xref: /qemu/linux-user/syscall.c (revision 8a64609e)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "target_mman.h"
27 #include <elf.h>
28 #include <endian.h>
29 #include <grp.h>
30 #include <sys/ipc.h>
31 #include <sys/msg.h>
32 #include <sys/wait.h>
33 #include <sys/mount.h>
34 #include <sys/file.h>
35 #include <sys/fsuid.h>
36 #include <sys/personality.h>
37 #include <sys/prctl.h>
38 #include <sys/resource.h>
39 #include <sys/swap.h>
40 #include <linux/capability.h>
41 #include <sched.h>
42 #include <sys/timex.h>
43 #include <sys/socket.h>
44 #include <linux/sockios.h>
45 #include <sys/un.h>
46 #include <sys/uio.h>
47 #include <poll.h>
48 #include <sys/times.h>
49 #include <sys/shm.h>
50 #include <sys/sem.h>
51 #include <sys/statfs.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <netinet/udp.h>
60 #include <linux/wireless.h>
61 #include <linux/icmp.h>
62 #include <linux/icmpv6.h>
63 #include <linux/if_tun.h>
64 #include <linux/in6.h>
65 #include <linux/errqueue.h>
66 #include <linux/random.h>
67 #ifdef CONFIG_TIMERFD
68 #include <sys/timerfd.h>
69 #endif
70 #ifdef CONFIG_EVENTFD
71 #include <sys/eventfd.h>
72 #endif
73 #ifdef CONFIG_EPOLL
74 #include <sys/epoll.h>
75 #endif
76 #ifdef CONFIG_ATTR
77 #include "qemu/xattr.h"
78 #endif
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
81 #endif
82 #ifdef HAVE_SYS_KCOV_H
83 #include <sys/kcov.h>
84 #endif
85 
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
92 
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #include <linux/fd.h>
102 #if defined(CONFIG_FIEMAP)
103 #include <linux/fiemap.h>
104 #endif
105 #include <linux/fb.h>
106 #if defined(CONFIG_USBFS)
107 #include <linux/usbdevice_fs.h>
108 #include <linux/usb/ch9.h>
109 #endif
110 #include <linux/vt.h>
111 #include <linux/dm-ioctl.h>
112 #include <linux/reboot.h>
113 #include <linux/route.h>
114 #include <linux/filter.h>
115 #include <linux/blkpg.h>
116 #include <netpacket/packet.h>
117 #include <linux/netlink.h>
118 #include <linux/if_alg.h>
119 #include <linux/rtc.h>
120 #include <sound/asound.h>
121 #ifdef HAVE_BTRFS_H
122 #include <linux/btrfs.h>
123 #endif
124 #ifdef HAVE_DRM_H
125 #include <libdrm/drm.h>
126 #include <libdrm/i915_drm.h>
127 #endif
128 #include "linux_loop.h"
129 #include "uname.h"
130 
131 #include "qemu.h"
132 #include "user-internals.h"
133 #include "strace.h"
134 #include "signal-common.h"
135 #include "loader.h"
136 #include "user-mmap.h"
137 #include "user/safe-syscall.h"
138 #include "qemu/guest-random.h"
139 #include "qemu/selfmap.h"
140 #include "user/syscall-trace.h"
141 #include "special-errno.h"
142 #include "qapi/error.h"
143 #include "fd-trans.h"
144 #include "tcg/tcg.h"
145 #include "cpu_loop-common.h"
146 
147 #ifndef CLONE_IO
148 #define CLONE_IO                0x80000000      /* Clone io context */
149 #endif
150 
151 /* We can't directly call the host clone syscall, because this will
152  * badly confuse libc (breaking mutexes, for example). So we must
153  * divide clone flags into:
154  *  * flag combinations that look like pthread_create()
155  *  * flag combinations that look like fork()
156  *  * flags we can implement within QEMU itself
157  *  * flags we can't support and will return an error for
158  */
159 /* For thread creation, all these flags must be present; for
160  * fork, none must be present.
161  */
162 #define CLONE_THREAD_FLAGS                              \
163     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
164      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
165 
166 /* These flags are ignored:
167  * CLONE_DETACHED is now ignored by the kernel;
168  * CLONE_IO is just an optimisation hint to the I/O scheduler
169  */
170 #define CLONE_IGNORED_FLAGS                     \
171     (CLONE_DETACHED | CLONE_IO)
172 
173 #ifndef CLONE_PIDFD
174 # define CLONE_PIDFD 0x00001000
175 #endif
176 
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS               \
179     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
181 
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
184     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
185      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
186 
187 #define CLONE_INVALID_FORK_FLAGS                                        \
188     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
189 
190 #define CLONE_INVALID_THREAD_FLAGS                                      \
191     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
192        CLONE_IGNORED_FLAGS))
193 
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195  * have almost all been allocated. We cannot support any of
196  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198  * The checks against the invalid thread masks above will catch these.
199  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
200  */
201 
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203  * once. This exercises the codepaths for restart.
204  */
205 //#define DEBUG_ERESTARTSYS
206 
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
212 
213 #undef _syscall0
214 #undef _syscall1
215 #undef _syscall2
216 #undef _syscall3
217 #undef _syscall4
218 #undef _syscall5
219 #undef _syscall6
220 
221 #define _syscall0(type,name)		\
222 static type name (void)			\
223 {					\
224 	return syscall(__NR_##name);	\
225 }
226 
227 #define _syscall1(type,name,type1,arg1)		\
228 static type name (type1 arg1)			\
229 {						\
230 	return syscall(__NR_##name, arg1);	\
231 }
232 
233 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
234 static type name (type1 arg1,type2 arg2)		\
235 {							\
236 	return syscall(__NR_##name, arg1, arg2);	\
237 }
238 
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3)		\
241 {								\
242 	return syscall(__NR_##name, arg1, arg2, arg3);		\
243 }
244 
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
249 }
250 
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
252 		  type5,arg5)							\
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
254 {										\
255 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
256 }
257 
258 
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
260 		  type5,arg5,type6,arg6)					\
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
262                   type6 arg6)							\
263 {										\
264 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
265 }
266 
267 
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
278 #endif
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
281 #endif
282 #define __NR_sys_statx __NR_statx
283 
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
286 #endif
287 
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
291 #endif
292 
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296 #endif
297 
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid)
300 
301 /* For the 64-bit guest on 32-bit host case we must emulate
302  * getdents using getdents64, because otherwise the host
303  * might hand us back more dirent records than we can fit
304  * into the guest buffer after structure format conversion.
305  * Otherwise we emulate getdents with getdents if the host has it.
306  */
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
309 #endif
310 
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
313 #endif
314 #if (defined(TARGET_NR_getdents) && \
315       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
318 #endif
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
321           loff_t *, res, unsigned int, wh);
322 #endif
323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325           siginfo_t *, uinfo)
326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group,int,error_code)
329 #endif
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
335 #endif
336 #endif
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339           const struct timespec *,timeout,int *,uaddr2,int,val3)
340 #endif
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343           const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347 #endif
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350                              unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354 #endif
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357           unsigned long *, user_mask_ptr);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360           unsigned long *, user_mask_ptr);
361 /* sched_attr is not defined in glibc */
362 struct sched_attr {
363     uint32_t size;
364     uint32_t sched_policy;
365     uint64_t sched_flags;
366     int32_t sched_nice;
367     uint32_t sched_priority;
368     uint64_t sched_runtime;
369     uint64_t sched_deadline;
370     uint64_t sched_period;
371     uint32_t sched_util_min;
372     uint32_t sched_util_max;
373 };
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
376           unsigned int, size, unsigned int, flags);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
379           unsigned int, flags);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
384           const struct sched_param *, param);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam, pid_t, pid,
387           struct sched_param *, param);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam, pid_t, pid,
390           const struct sched_param *, param);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
393 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
394           void *, arg);
395 _syscall2(int, capget, struct __user_cap_header_struct *, header,
396           struct __user_cap_data_struct *, data);
397 _syscall2(int, capset, struct __user_cap_header_struct *, header,
398           struct __user_cap_data_struct *, data);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get, int, which, int, who)
401 #endif
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
404 #endif
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
407 #endif
408 
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
411           unsigned long, idx1, unsigned long, idx2)
412 #endif
413 
414 /*
415  * It is assumed that struct statx is architecture independent.
416  */
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
419           unsigned int, mask, struct target_statx *, statxbuf)
420 #endif
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier, int, cmd, int, flags)
423 #endif
424 
425 static const bitmask_transtbl fcntl_flags_tbl[] = {
426   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
427   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
428   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
429   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
430   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
431   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
432   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
433   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
434   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
435   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
436   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
437   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
438   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
439 #if defined(O_DIRECT)
440   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
441 #endif
442 #if defined(O_NOATIME)
443   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
444 #endif
445 #if defined(O_CLOEXEC)
446   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
447 #endif
448 #if defined(O_PATH)
449   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
450 #endif
451 #if defined(O_TMPFILE)
452   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
453 #endif
454   /* Don't terminate the list prematurely on 64-bit host+guest.  */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
457 #endif
458   { 0, 0, 0, 0 }
459 };
460 
461 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
462 
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
467           const struct timespec *,tsp,int,flags)
468 #else
469 static int sys_utimensat(int dirfd, const char *pathname,
470                          const struct timespec times[2], int flags)
471 {
472     errno = ENOSYS;
473     return -1;
474 }
475 #endif
476 #endif /* TARGET_NR_utimensat */
477 
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
482           const char *, new, unsigned int, flags)
483 #else
484 static int sys_renameat2(int oldfd, const char *old,
485                          int newfd, const char *new, int flags)
486 {
487     if (flags == 0) {
488         return renameat(oldfd, old, newfd, new);
489     }
490     errno = ENOSYS;
491     return -1;
492 }
493 #endif
494 #endif /* TARGET_NR_renameat2 */
495 
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
498 #else
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY  */
505 
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
509 #endif
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64 {
513     uint64_t rlim_cur;
514     uint64_t rlim_max;
515 };
516 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
517           const struct host_rlimit64 *, new_limit,
518           struct host_rlimit64 *, old_limit)
519 #endif
520 
521 
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers[GUEST_TIMER_MAX];
526 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
527 
528 static inline int next_free_host_timer(void)
529 {
530     int k;
531     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
532         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
533             return k;
534         }
535     }
536     return -1;
537 }
538 
539 static inline void free_host_timer_slot(int id)
540 {
541     qatomic_store_release(g_posix_timer_allocated + id, 0);
542 }
543 #endif
544 
545 static inline int host_to_target_errno(int host_errno)
546 {
547     switch (host_errno) {
548 #define E(X)  case X: return TARGET_##X;
549 #include "errnos.c.inc"
550 #undef E
551     default:
552         return host_errno;
553     }
554 }
555 
556 static inline int target_to_host_errno(int target_errno)
557 {
558     switch (target_errno) {
559 #define E(X)  case TARGET_##X: return X;
560 #include "errnos.c.inc"
561 #undef E
562     default:
563         return target_errno;
564     }
565 }
566 
567 abi_long get_errno(abi_long ret)
568 {
569     if (ret == -1)
570         return -host_to_target_errno(errno);
571     else
572         return ret;
573 }
574 
575 const char *target_strerror(int err)
576 {
577     if (err == QEMU_ERESTARTSYS) {
578         return "To be restarted";
579     }
580     if (err == QEMU_ESIGRETURN) {
581         return "Successful exit from sigreturn";
582     }
583 
584     return strerror(target_to_host_errno(err));
585 }
586 
587 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
588 {
589     int i;
590     uint8_t b;
591     if (usize <= ksize) {
592         return 1;
593     }
594     for (i = ksize; i < usize; i++) {
595         if (get_user_u8(b, addr + i)) {
596             return -TARGET_EFAULT;
597         }
598         if (b != 0) {
599             return 0;
600         }
601     }
602     return 1;
603 }
604 
605 #define safe_syscall0(type, name) \
606 static type safe_##name(void) \
607 { \
608     return safe_syscall(__NR_##name); \
609 }
610 
611 #define safe_syscall1(type, name, type1, arg1) \
612 static type safe_##name(type1 arg1) \
613 { \
614     return safe_syscall(__NR_##name, arg1); \
615 }
616 
617 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
618 static type safe_##name(type1 arg1, type2 arg2) \
619 { \
620     return safe_syscall(__NR_##name, arg1, arg2); \
621 }
622 
623 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
625 { \
626     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
627 }
628 
629 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
630     type4, arg4) \
631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
632 { \
633     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
634 }
635 
636 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
637     type4, arg4, type5, arg5) \
638 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
639     type5 arg5) \
640 { \
641     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
642 }
643 
644 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
645     type4, arg4, type5, arg5, type6, arg6) \
646 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
647     type5 arg5, type6 arg6) \
648 { \
649     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
650 }
651 
652 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
653 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
654 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
655               int, flags, mode_t, mode)
656 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
657 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
658               struct rusage *, rusage)
659 #endif
660 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
661               int, options, struct rusage *, rusage)
662 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
663 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
664               char **, argv, char **, envp, int, flags)
665 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
666     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
667 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
668               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
669 #endif
670 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
671 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
672               struct timespec *, tsp, const sigset_t *, sigmask,
673               size_t, sigsetsize)
674 #endif
675 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
676               int, maxevents, int, timeout, const sigset_t *, sigmask,
677               size_t, sigsetsize)
678 #if defined(__NR_futex)
679 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
680               const struct timespec *,timeout,int *,uaddr2,int,val3)
681 #endif
682 #if defined(__NR_futex_time64)
683 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
684               const struct timespec *,timeout,int *,uaddr2,int,val3)
685 #endif
686 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
687 safe_syscall2(int, kill, pid_t, pid, int, sig)
688 safe_syscall2(int, tkill, int, tid, int, sig)
689 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
690 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
691 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
692 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
693               unsigned long, pos_l, unsigned long, pos_h)
694 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
695               unsigned long, pos_l, unsigned long, pos_h)
696 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
697               socklen_t, addrlen)
698 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
699               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
700 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
701               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
702 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
703 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
704 safe_syscall2(int, flock, int, fd, int, operation)
705 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
706 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
707               const struct timespec *, uts, size_t, sigsetsize)
708 #endif
709 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
710               int, flags)
711 #if defined(TARGET_NR_nanosleep)
712 safe_syscall2(int, nanosleep, const struct timespec *, req,
713               struct timespec *, rem)
714 #endif
715 #if defined(TARGET_NR_clock_nanosleep) || \
716     defined(TARGET_NR_clock_nanosleep_time64)
717 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
718               const struct timespec *, req, struct timespec *, rem)
719 #endif
720 #ifdef __NR_ipc
721 #ifdef __s390x__
722 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
723               void *, ptr)
724 #else
725 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
726               void *, ptr, long, fifth)
727 #endif
728 #endif
729 #ifdef __NR_msgsnd
730 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
731               int, flags)
732 #endif
733 #ifdef __NR_msgrcv
734 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
735               long, msgtype, int, flags)
736 #endif
737 #ifdef __NR_semtimedop
738 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
739               unsigned, nsops, const struct timespec *, timeout)
740 #endif
741 #if defined(TARGET_NR_mq_timedsend) || \
742     defined(TARGET_NR_mq_timedsend_time64)
743 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
744               size_t, len, unsigned, prio, const struct timespec *, timeout)
745 #endif
746 #if defined(TARGET_NR_mq_timedreceive) || \
747     defined(TARGET_NR_mq_timedreceive_time64)
748 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
749               size_t, len, unsigned *, prio, const struct timespec *, timeout)
750 #endif
751 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
752 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
753               int, outfd, loff_t *, poutoff, size_t, length,
754               unsigned int, flags)
755 #endif
756 
757 /* We do ioctl like this rather than via safe_syscall3 to preserve the
758  * "third argument might be integer or pointer or not present" behaviour of
759  * the libc function.
760  */
761 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
762 /* Similarly for fcntl. Note that callers must always:
763  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
764  *  use the flock64 struct rather than unsuffixed flock
765  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
766  */
767 #ifdef __NR_fcntl64
768 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
769 #else
770 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
771 #endif
772 
773 static inline int host_to_target_sock_type(int host_type)
774 {
775     int target_type;
776 
777     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
778     case SOCK_DGRAM:
779         target_type = TARGET_SOCK_DGRAM;
780         break;
781     case SOCK_STREAM:
782         target_type = TARGET_SOCK_STREAM;
783         break;
784     default:
785         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
786         break;
787     }
788 
789 #if defined(SOCK_CLOEXEC)
790     if (host_type & SOCK_CLOEXEC) {
791         target_type |= TARGET_SOCK_CLOEXEC;
792     }
793 #endif
794 
795 #if defined(SOCK_NONBLOCK)
796     if (host_type & SOCK_NONBLOCK) {
797         target_type |= TARGET_SOCK_NONBLOCK;
798     }
799 #endif
800 
801     return target_type;
802 }
803 
804 static abi_ulong target_brk, initial_target_brk;
805 
806 void target_set_brk(abi_ulong new_brk)
807 {
808     target_brk = TARGET_PAGE_ALIGN(new_brk);
809     initial_target_brk = target_brk;
810 }
811 
812 /* do_brk() must return target values and target errnos. */
813 abi_long do_brk(abi_ulong brk_val)
814 {
815     abi_long mapped_addr;
816     abi_ulong new_brk;
817     abi_ulong old_brk;
818 
819     /* brk pointers are always untagged */
820 
821     /* do not allow to shrink below initial brk value */
822     if (brk_val < initial_target_brk) {
823         return target_brk;
824     }
825 
826     new_brk = TARGET_PAGE_ALIGN(brk_val);
827     old_brk = TARGET_PAGE_ALIGN(target_brk);
828 
829     /* new and old target_brk might be on the same page */
830     if (new_brk == old_brk) {
831         target_brk = brk_val;
832         return target_brk;
833     }
834 
835     /* Release heap if necesary */
836     if (new_brk < old_brk) {
837         target_munmap(new_brk, old_brk - new_brk);
838 
839         target_brk = brk_val;
840         return target_brk;
841     }
842 
843     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
844                               PROT_READ | PROT_WRITE,
845                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
846                               -1, 0);
847 
848     if (mapped_addr == old_brk) {
849         target_brk = brk_val;
850         return target_brk;
851     }
852 
853 #if defined(TARGET_ALPHA)
854     /* We (partially) emulate OSF/1 on Alpha, which requires we
855        return a proper errno, not an unchanged brk value.  */
856     return -TARGET_ENOMEM;
857 #endif
858     /* For everything else, return the previous break. */
859     return target_brk;
860 }
861 
862 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
863     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
864 static inline abi_long copy_from_user_fdset(fd_set *fds,
865                                             abi_ulong target_fds_addr,
866                                             int n)
867 {
868     int i, nw, j, k;
869     abi_ulong b, *target_fds;
870 
871     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
872     if (!(target_fds = lock_user(VERIFY_READ,
873                                  target_fds_addr,
874                                  sizeof(abi_ulong) * nw,
875                                  1)))
876         return -TARGET_EFAULT;
877 
878     FD_ZERO(fds);
879     k = 0;
880     for (i = 0; i < nw; i++) {
881         /* grab the abi_ulong */
882         __get_user(b, &target_fds[i]);
883         for (j = 0; j < TARGET_ABI_BITS; j++) {
884             /* check the bit inside the abi_ulong */
885             if ((b >> j) & 1)
886                 FD_SET(k, fds);
887             k++;
888         }
889     }
890 
891     unlock_user(target_fds, target_fds_addr, 0);
892 
893     return 0;
894 }
895 
896 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
897                                                  abi_ulong target_fds_addr,
898                                                  int n)
899 {
900     if (target_fds_addr) {
901         if (copy_from_user_fdset(fds, target_fds_addr, n))
902             return -TARGET_EFAULT;
903         *fds_ptr = fds;
904     } else {
905         *fds_ptr = NULL;
906     }
907     return 0;
908 }
909 
910 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
911                                           const fd_set *fds,
912                                           int n)
913 {
914     int i, nw, j, k;
915     abi_long v;
916     abi_ulong *target_fds;
917 
918     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
919     if (!(target_fds = lock_user(VERIFY_WRITE,
920                                  target_fds_addr,
921                                  sizeof(abi_ulong) * nw,
922                                  0)))
923         return -TARGET_EFAULT;
924 
925     k = 0;
926     for (i = 0; i < nw; i++) {
927         v = 0;
928         for (j = 0; j < TARGET_ABI_BITS; j++) {
929             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
930             k++;
931         }
932         __put_user(v, &target_fds[i]);
933     }
934 
935     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
936 
937     return 0;
938 }
939 #endif
940 
941 #if defined(__alpha__)
942 #define HOST_HZ 1024
943 #else
944 #define HOST_HZ 100
945 #endif
946 
947 static inline abi_long host_to_target_clock_t(long ticks)
948 {
949 #if HOST_HZ == TARGET_HZ
950     return ticks;
951 #else
952     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
953 #endif
954 }
955 
956 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
957                                              const struct rusage *rusage)
958 {
959     struct target_rusage *target_rusage;
960 
961     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
962         return -TARGET_EFAULT;
963     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
964     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
965     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
966     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
967     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
968     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
969     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
970     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
971     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
972     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
973     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
974     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
975     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
976     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
977     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
978     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
979     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
980     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
981     unlock_user_struct(target_rusage, target_addr, 1);
982 
983     return 0;
984 }
985 
986 #ifdef TARGET_NR_setrlimit
987 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
988 {
989     abi_ulong target_rlim_swap;
990     rlim_t result;
991 
992     target_rlim_swap = tswapal(target_rlim);
993     if (target_rlim_swap == TARGET_RLIM_INFINITY)
994         return RLIM_INFINITY;
995 
996     result = target_rlim_swap;
997     if (target_rlim_swap != (rlim_t)result)
998         return RLIM_INFINITY;
999 
1000     return result;
1001 }
1002 #endif
1003 
1004 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1005 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1006 {
1007     abi_ulong target_rlim_swap;
1008     abi_ulong result;
1009 
1010     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1011         target_rlim_swap = TARGET_RLIM_INFINITY;
1012     else
1013         target_rlim_swap = rlim;
1014     result = tswapal(target_rlim_swap);
1015 
1016     return result;
1017 }
1018 #endif
1019 
1020 static inline int target_to_host_resource(int code)
1021 {
1022     switch (code) {
1023     case TARGET_RLIMIT_AS:
1024         return RLIMIT_AS;
1025     case TARGET_RLIMIT_CORE:
1026         return RLIMIT_CORE;
1027     case TARGET_RLIMIT_CPU:
1028         return RLIMIT_CPU;
1029     case TARGET_RLIMIT_DATA:
1030         return RLIMIT_DATA;
1031     case TARGET_RLIMIT_FSIZE:
1032         return RLIMIT_FSIZE;
1033     case TARGET_RLIMIT_LOCKS:
1034         return RLIMIT_LOCKS;
1035     case TARGET_RLIMIT_MEMLOCK:
1036         return RLIMIT_MEMLOCK;
1037     case TARGET_RLIMIT_MSGQUEUE:
1038         return RLIMIT_MSGQUEUE;
1039     case TARGET_RLIMIT_NICE:
1040         return RLIMIT_NICE;
1041     case TARGET_RLIMIT_NOFILE:
1042         return RLIMIT_NOFILE;
1043     case TARGET_RLIMIT_NPROC:
1044         return RLIMIT_NPROC;
1045     case TARGET_RLIMIT_RSS:
1046         return RLIMIT_RSS;
1047     case TARGET_RLIMIT_RTPRIO:
1048         return RLIMIT_RTPRIO;
1049 #ifdef RLIMIT_RTTIME
1050     case TARGET_RLIMIT_RTTIME:
1051         return RLIMIT_RTTIME;
1052 #endif
1053     case TARGET_RLIMIT_SIGPENDING:
1054         return RLIMIT_SIGPENDING;
1055     case TARGET_RLIMIT_STACK:
1056         return RLIMIT_STACK;
1057     default:
1058         return code;
1059     }
1060 }
1061 
1062 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1063                                               abi_ulong target_tv_addr)
1064 {
1065     struct target_timeval *target_tv;
1066 
1067     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1068         return -TARGET_EFAULT;
1069     }
1070 
1071     __get_user(tv->tv_sec, &target_tv->tv_sec);
1072     __get_user(tv->tv_usec, &target_tv->tv_usec);
1073 
1074     unlock_user_struct(target_tv, target_tv_addr, 0);
1075 
1076     return 0;
1077 }
1078 
1079 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1080                                             const struct timeval *tv)
1081 {
1082     struct target_timeval *target_tv;
1083 
1084     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1085         return -TARGET_EFAULT;
1086     }
1087 
1088     __put_user(tv->tv_sec, &target_tv->tv_sec);
1089     __put_user(tv->tv_usec, &target_tv->tv_usec);
1090 
1091     unlock_user_struct(target_tv, target_tv_addr, 1);
1092 
1093     return 0;
1094 }
1095 
1096 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1097 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1098                                                 abi_ulong target_tv_addr)
1099 {
1100     struct target__kernel_sock_timeval *target_tv;
1101 
1102     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1103         return -TARGET_EFAULT;
1104     }
1105 
1106     __get_user(tv->tv_sec, &target_tv->tv_sec);
1107     __get_user(tv->tv_usec, &target_tv->tv_usec);
1108 
1109     unlock_user_struct(target_tv, target_tv_addr, 0);
1110 
1111     return 0;
1112 }
1113 #endif
1114 
1115 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1116                                               const struct timeval *tv)
1117 {
1118     struct target__kernel_sock_timeval *target_tv;
1119 
1120     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1121         return -TARGET_EFAULT;
1122     }
1123 
1124     __put_user(tv->tv_sec, &target_tv->tv_sec);
1125     __put_user(tv->tv_usec, &target_tv->tv_usec);
1126 
1127     unlock_user_struct(target_tv, target_tv_addr, 1);
1128 
1129     return 0;
1130 }
1131 
1132 #if defined(TARGET_NR_futex) || \
1133     defined(TARGET_NR_rt_sigtimedwait) || \
1134     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1135     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1136     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1137     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1138     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1139     defined(TARGET_NR_timer_settime) || \
1140     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1141 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1142                                                abi_ulong target_addr)
1143 {
1144     struct target_timespec *target_ts;
1145 
1146     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1147         return -TARGET_EFAULT;
1148     }
1149     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1150     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1151     unlock_user_struct(target_ts, target_addr, 0);
1152     return 0;
1153 }
1154 #endif
1155 
1156 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1157     defined(TARGET_NR_timer_settime64) || \
1158     defined(TARGET_NR_mq_timedsend_time64) || \
1159     defined(TARGET_NR_mq_timedreceive_time64) || \
1160     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1161     defined(TARGET_NR_clock_nanosleep_time64) || \
1162     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1163     defined(TARGET_NR_utimensat) || \
1164     defined(TARGET_NR_utimensat_time64) || \
1165     defined(TARGET_NR_semtimedop_time64) || \
1166     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1167 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1168                                                  abi_ulong target_addr)
1169 {
1170     struct target__kernel_timespec *target_ts;
1171 
1172     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1173         return -TARGET_EFAULT;
1174     }
1175     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1176     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1177     /* in 32bit mode, this drops the padding */
1178     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1179     unlock_user_struct(target_ts, target_addr, 0);
1180     return 0;
1181 }
1182 #endif
1183 
1184 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1185                                                struct timespec *host_ts)
1186 {
1187     struct target_timespec *target_ts;
1188 
1189     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1190         return -TARGET_EFAULT;
1191     }
1192     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1193     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1194     unlock_user_struct(target_ts, target_addr, 1);
1195     return 0;
1196 }
1197 
1198 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1199                                                  struct timespec *host_ts)
1200 {
1201     struct target__kernel_timespec *target_ts;
1202 
1203     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1204         return -TARGET_EFAULT;
1205     }
1206     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1207     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1208     unlock_user_struct(target_ts, target_addr, 1);
1209     return 0;
1210 }
1211 
1212 #if defined(TARGET_NR_gettimeofday)
1213 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1214                                              struct timezone *tz)
1215 {
1216     struct target_timezone *target_tz;
1217 
1218     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1219         return -TARGET_EFAULT;
1220     }
1221 
1222     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1223     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1224 
1225     unlock_user_struct(target_tz, target_tz_addr, 1);
1226 
1227     return 0;
1228 }
1229 #endif
1230 
1231 #if defined(TARGET_NR_settimeofday)
1232 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1233                                                abi_ulong target_tz_addr)
1234 {
1235     struct target_timezone *target_tz;
1236 
1237     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1238         return -TARGET_EFAULT;
1239     }
1240 
1241     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1242     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1243 
1244     unlock_user_struct(target_tz, target_tz_addr, 0);
1245 
1246     return 0;
1247 }
1248 #endif
1249 
1250 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1251 #include <mqueue.h>
1252 
1253 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1254                                               abi_ulong target_mq_attr_addr)
1255 {
1256     struct target_mq_attr *target_mq_attr;
1257 
1258     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1259                           target_mq_attr_addr, 1))
1260         return -TARGET_EFAULT;
1261 
1262     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1263     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1264     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1265     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1266 
1267     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1268 
1269     return 0;
1270 }
1271 
1272 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1273                                             const struct mq_attr *attr)
1274 {
1275     struct target_mq_attr *target_mq_attr;
1276 
1277     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1278                           target_mq_attr_addr, 0))
1279         return -TARGET_EFAULT;
1280 
1281     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1282     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1283     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1284     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1285 
1286     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1287 
1288     return 0;
1289 }
1290 #endif
1291 
1292 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1293 /* do_select() must return target values and target errnos. */
1294 static abi_long do_select(int n,
1295                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1296                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1297 {
1298     fd_set rfds, wfds, efds;
1299     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1300     struct timeval tv;
1301     struct timespec ts, *ts_ptr;
1302     abi_long ret;
1303 
1304     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1305     if (ret) {
1306         return ret;
1307     }
1308     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1309     if (ret) {
1310         return ret;
1311     }
1312     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1313     if (ret) {
1314         return ret;
1315     }
1316 
1317     if (target_tv_addr) {
1318         if (copy_from_user_timeval(&tv, target_tv_addr))
1319             return -TARGET_EFAULT;
1320         ts.tv_sec = tv.tv_sec;
1321         ts.tv_nsec = tv.tv_usec * 1000;
1322         ts_ptr = &ts;
1323     } else {
1324         ts_ptr = NULL;
1325     }
1326 
1327     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1328                                   ts_ptr, NULL));
1329 
1330     if (!is_error(ret)) {
1331         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1332             return -TARGET_EFAULT;
1333         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1334             return -TARGET_EFAULT;
1335         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1336             return -TARGET_EFAULT;
1337 
1338         if (target_tv_addr) {
1339             tv.tv_sec = ts.tv_sec;
1340             tv.tv_usec = ts.tv_nsec / 1000;
1341             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1342                 return -TARGET_EFAULT;
1343             }
1344         }
1345     }
1346 
1347     return ret;
1348 }
1349 
1350 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1351 static abi_long do_old_select(abi_ulong arg1)
1352 {
1353     struct target_sel_arg_struct *sel;
1354     abi_ulong inp, outp, exp, tvp;
1355     long nsel;
1356 
1357     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1358         return -TARGET_EFAULT;
1359     }
1360 
1361     nsel = tswapal(sel->n);
1362     inp = tswapal(sel->inp);
1363     outp = tswapal(sel->outp);
1364     exp = tswapal(sel->exp);
1365     tvp = tswapal(sel->tvp);
1366 
1367     unlock_user_struct(sel, arg1, 0);
1368 
1369     return do_select(nsel, inp, outp, exp, tvp);
1370 }
1371 #endif
1372 #endif
1373 
1374 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1375 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1376                             abi_long arg4, abi_long arg5, abi_long arg6,
1377                             bool time64)
1378 {
1379     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1380     fd_set rfds, wfds, efds;
1381     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1382     struct timespec ts, *ts_ptr;
1383     abi_long ret;
1384 
1385     /*
1386      * The 6th arg is actually two args smashed together,
1387      * so we cannot use the C library.
1388      */
1389     struct {
1390         sigset_t *set;
1391         size_t size;
1392     } sig, *sig_ptr;
1393 
1394     abi_ulong arg_sigset, arg_sigsize, *arg7;
1395 
1396     n = arg1;
1397     rfd_addr = arg2;
1398     wfd_addr = arg3;
1399     efd_addr = arg4;
1400     ts_addr = arg5;
1401 
1402     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1403     if (ret) {
1404         return ret;
1405     }
1406     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1407     if (ret) {
1408         return ret;
1409     }
1410     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1411     if (ret) {
1412         return ret;
1413     }
1414 
1415     /*
1416      * This takes a timespec, and not a timeval, so we cannot
1417      * use the do_select() helper ...
1418      */
1419     if (ts_addr) {
1420         if (time64) {
1421             if (target_to_host_timespec64(&ts, ts_addr)) {
1422                 return -TARGET_EFAULT;
1423             }
1424         } else {
1425             if (target_to_host_timespec(&ts, ts_addr)) {
1426                 return -TARGET_EFAULT;
1427             }
1428         }
1429             ts_ptr = &ts;
1430     } else {
1431         ts_ptr = NULL;
1432     }
1433 
1434     /* Extract the two packed args for the sigset */
1435     sig_ptr = NULL;
1436     if (arg6) {
1437         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1438         if (!arg7) {
1439             return -TARGET_EFAULT;
1440         }
1441         arg_sigset = tswapal(arg7[0]);
1442         arg_sigsize = tswapal(arg7[1]);
1443         unlock_user(arg7, arg6, 0);
1444 
1445         if (arg_sigset) {
1446             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1447             if (ret != 0) {
1448                 return ret;
1449             }
1450             sig_ptr = &sig;
1451             sig.size = SIGSET_T_SIZE;
1452         }
1453     }
1454 
1455     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1456                                   ts_ptr, sig_ptr));
1457 
1458     if (sig_ptr) {
1459         finish_sigsuspend_mask(ret);
1460     }
1461 
1462     if (!is_error(ret)) {
1463         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1464             return -TARGET_EFAULT;
1465         }
1466         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1467             return -TARGET_EFAULT;
1468         }
1469         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1470             return -TARGET_EFAULT;
1471         }
1472         if (time64) {
1473             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1474                 return -TARGET_EFAULT;
1475             }
1476         } else {
1477             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1478                 return -TARGET_EFAULT;
1479             }
1480         }
1481     }
1482     return ret;
1483 }
1484 #endif
1485 
1486 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1487     defined(TARGET_NR_ppoll_time64)
1488 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1489                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1490 {
1491     struct target_pollfd *target_pfd;
1492     unsigned int nfds = arg2;
1493     struct pollfd *pfd;
1494     unsigned int i;
1495     abi_long ret;
1496 
1497     pfd = NULL;
1498     target_pfd = NULL;
1499     if (nfds) {
1500         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1501             return -TARGET_EINVAL;
1502         }
1503         target_pfd = lock_user(VERIFY_WRITE, arg1,
1504                                sizeof(struct target_pollfd) * nfds, 1);
1505         if (!target_pfd) {
1506             return -TARGET_EFAULT;
1507         }
1508 
1509         pfd = alloca(sizeof(struct pollfd) * nfds);
1510         for (i = 0; i < nfds; i++) {
1511             pfd[i].fd = tswap32(target_pfd[i].fd);
1512             pfd[i].events = tswap16(target_pfd[i].events);
1513         }
1514     }
1515     if (ppoll) {
1516         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1517         sigset_t *set = NULL;
1518 
1519         if (arg3) {
1520             if (time64) {
1521                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1522                     unlock_user(target_pfd, arg1, 0);
1523                     return -TARGET_EFAULT;
1524                 }
1525             } else {
1526                 if (target_to_host_timespec(timeout_ts, arg3)) {
1527                     unlock_user(target_pfd, arg1, 0);
1528                     return -TARGET_EFAULT;
1529                 }
1530             }
1531         } else {
1532             timeout_ts = NULL;
1533         }
1534 
1535         if (arg4) {
1536             ret = process_sigsuspend_mask(&set, arg4, arg5);
1537             if (ret != 0) {
1538                 unlock_user(target_pfd, arg1, 0);
1539                 return ret;
1540             }
1541         }
1542 
1543         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1544                                    set, SIGSET_T_SIZE));
1545 
1546         if (set) {
1547             finish_sigsuspend_mask(ret);
1548         }
1549         if (!is_error(ret) && arg3) {
1550             if (time64) {
1551                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1552                     return -TARGET_EFAULT;
1553                 }
1554             } else {
1555                 if (host_to_target_timespec(arg3, timeout_ts)) {
1556                     return -TARGET_EFAULT;
1557                 }
1558             }
1559         }
1560     } else {
1561           struct timespec ts, *pts;
1562 
1563           if (arg3 >= 0) {
1564               /* Convert ms to secs, ns */
1565               ts.tv_sec = arg3 / 1000;
1566               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1567               pts = &ts;
1568           } else {
1569               /* -ve poll() timeout means "infinite" */
1570               pts = NULL;
1571           }
1572           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1573     }
1574 
1575     if (!is_error(ret)) {
1576         for (i = 0; i < nfds; i++) {
1577             target_pfd[i].revents = tswap16(pfd[i].revents);
1578         }
1579     }
1580     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1581     return ret;
1582 }
1583 #endif
1584 
1585 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1586                         int flags, int is_pipe2)
1587 {
1588     int host_pipe[2];
1589     abi_long ret;
1590     ret = pipe2(host_pipe, flags);
1591 
1592     if (is_error(ret))
1593         return get_errno(ret);
1594 
1595     /* Several targets have special calling conventions for the original
1596        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1597     if (!is_pipe2) {
1598 #if defined(TARGET_ALPHA)
1599         cpu_env->ir[IR_A4] = host_pipe[1];
1600         return host_pipe[0];
1601 #elif defined(TARGET_MIPS)
1602         cpu_env->active_tc.gpr[3] = host_pipe[1];
1603         return host_pipe[0];
1604 #elif defined(TARGET_SH4)
1605         cpu_env->gregs[1] = host_pipe[1];
1606         return host_pipe[0];
1607 #elif defined(TARGET_SPARC)
1608         cpu_env->regwptr[1] = host_pipe[1];
1609         return host_pipe[0];
1610 #endif
1611     }
1612 
1613     if (put_user_s32(host_pipe[0], pipedes)
1614         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1615         return -TARGET_EFAULT;
1616     return get_errno(ret);
1617 }
1618 
1619 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1620                                               abi_ulong target_addr,
1621                                               socklen_t len)
1622 {
1623     struct target_ip_mreqn *target_smreqn;
1624 
1625     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1626     if (!target_smreqn)
1627         return -TARGET_EFAULT;
1628     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1629     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1630     if (len == sizeof(struct target_ip_mreqn))
1631         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1632     unlock_user(target_smreqn, target_addr, 0);
1633 
1634     return 0;
1635 }
1636 
1637 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1638                                                abi_ulong target_addr,
1639                                                socklen_t len)
1640 {
1641     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1642     sa_family_t sa_family;
1643     struct target_sockaddr *target_saddr;
1644 
1645     if (fd_trans_target_to_host_addr(fd)) {
1646         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1647     }
1648 
1649     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1650     if (!target_saddr)
1651         return -TARGET_EFAULT;
1652 
1653     sa_family = tswap16(target_saddr->sa_family);
1654 
1655     /* Oops. The caller might send a incomplete sun_path; sun_path
1656      * must be terminated by \0 (see the manual page), but
1657      * unfortunately it is quite common to specify sockaddr_un
1658      * length as "strlen(x->sun_path)" while it should be
1659      * "strlen(...) + 1". We'll fix that here if needed.
1660      * Linux kernel has a similar feature.
1661      */
1662 
1663     if (sa_family == AF_UNIX) {
1664         if (len < unix_maxlen && len > 0) {
1665             char *cp = (char*)target_saddr;
1666 
1667             if ( cp[len-1] && !cp[len] )
1668                 len++;
1669         }
1670         if (len > unix_maxlen)
1671             len = unix_maxlen;
1672     }
1673 
1674     memcpy(addr, target_saddr, len);
1675     addr->sa_family = sa_family;
1676     if (sa_family == AF_NETLINK) {
1677         struct sockaddr_nl *nladdr;
1678 
1679         nladdr = (struct sockaddr_nl *)addr;
1680         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1681         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1682     } else if (sa_family == AF_PACKET) {
1683 	struct target_sockaddr_ll *lladdr;
1684 
1685 	lladdr = (struct target_sockaddr_ll *)addr;
1686 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1687 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1688     } else if (sa_family == AF_INET6) {
1689         struct sockaddr_in6 *in6addr;
1690 
1691         in6addr = (struct sockaddr_in6 *)addr;
1692         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1693     }
1694     unlock_user(target_saddr, target_addr, 0);
1695 
1696     return 0;
1697 }
1698 
1699 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1700                                                struct sockaddr *addr,
1701                                                socklen_t len)
1702 {
1703     struct target_sockaddr *target_saddr;
1704 
1705     if (len == 0) {
1706         return 0;
1707     }
1708     assert(addr);
1709 
1710     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1711     if (!target_saddr)
1712         return -TARGET_EFAULT;
1713     memcpy(target_saddr, addr, len);
1714     if (len >= offsetof(struct target_sockaddr, sa_family) +
1715         sizeof(target_saddr->sa_family)) {
1716         target_saddr->sa_family = tswap16(addr->sa_family);
1717     }
1718     if (addr->sa_family == AF_NETLINK &&
1719         len >= sizeof(struct target_sockaddr_nl)) {
1720         struct target_sockaddr_nl *target_nl =
1721                (struct target_sockaddr_nl *)target_saddr;
1722         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1723         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1724     } else if (addr->sa_family == AF_PACKET) {
1725         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1726         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1727         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1728     } else if (addr->sa_family == AF_INET6 &&
1729                len >= sizeof(struct target_sockaddr_in6)) {
1730         struct target_sockaddr_in6 *target_in6 =
1731                (struct target_sockaddr_in6 *)target_saddr;
1732         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1733     }
1734     unlock_user(target_saddr, target_addr, len);
1735 
1736     return 0;
1737 }
1738 
1739 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1740                                            struct target_msghdr *target_msgh)
1741 {
1742     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1743     abi_long msg_controllen;
1744     abi_ulong target_cmsg_addr;
1745     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1746     socklen_t space = 0;
1747 
1748     msg_controllen = tswapal(target_msgh->msg_controllen);
1749     if (msg_controllen < sizeof (struct target_cmsghdr))
1750         goto the_end;
1751     target_cmsg_addr = tswapal(target_msgh->msg_control);
1752     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1753     target_cmsg_start = target_cmsg;
1754     if (!target_cmsg)
1755         return -TARGET_EFAULT;
1756 
1757     while (cmsg && target_cmsg) {
1758         void *data = CMSG_DATA(cmsg);
1759         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1760 
1761         int len = tswapal(target_cmsg->cmsg_len)
1762             - sizeof(struct target_cmsghdr);
1763 
1764         space += CMSG_SPACE(len);
1765         if (space > msgh->msg_controllen) {
1766             space -= CMSG_SPACE(len);
1767             /* This is a QEMU bug, since we allocated the payload
1768              * area ourselves (unlike overflow in host-to-target
1769              * conversion, which is just the guest giving us a buffer
1770              * that's too small). It can't happen for the payload types
1771              * we currently support; if it becomes an issue in future
1772              * we would need to improve our allocation strategy to
1773              * something more intelligent than "twice the size of the
1774              * target buffer we're reading from".
1775              */
1776             qemu_log_mask(LOG_UNIMP,
1777                           ("Unsupported ancillary data %d/%d: "
1778                            "unhandled msg size\n"),
1779                           tswap32(target_cmsg->cmsg_level),
1780                           tswap32(target_cmsg->cmsg_type));
1781             break;
1782         }
1783 
1784         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1785             cmsg->cmsg_level = SOL_SOCKET;
1786         } else {
1787             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1788         }
1789         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1790         cmsg->cmsg_len = CMSG_LEN(len);
1791 
1792         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1793             int *fd = (int *)data;
1794             int *target_fd = (int *)target_data;
1795             int i, numfds = len / sizeof(int);
1796 
1797             for (i = 0; i < numfds; i++) {
1798                 __get_user(fd[i], target_fd + i);
1799             }
1800         } else if (cmsg->cmsg_level == SOL_SOCKET
1801                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1802             struct ucred *cred = (struct ucred *)data;
1803             struct target_ucred *target_cred =
1804                 (struct target_ucred *)target_data;
1805 
1806             __get_user(cred->pid, &target_cred->pid);
1807             __get_user(cred->uid, &target_cred->uid);
1808             __get_user(cred->gid, &target_cred->gid);
1809         } else if (cmsg->cmsg_level == SOL_ALG) {
1810             uint32_t *dst = (uint32_t *)data;
1811 
1812             memcpy(dst, target_data, len);
1813             /* fix endianess of first 32-bit word */
1814             if (len >= sizeof(uint32_t)) {
1815                 *dst = tswap32(*dst);
1816             }
1817         } else {
1818             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1819                           cmsg->cmsg_level, cmsg->cmsg_type);
1820             memcpy(data, target_data, len);
1821         }
1822 
1823         cmsg = CMSG_NXTHDR(msgh, cmsg);
1824         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1825                                          target_cmsg_start);
1826     }
1827     unlock_user(target_cmsg, target_cmsg_addr, 0);
1828  the_end:
1829     msgh->msg_controllen = space;
1830     return 0;
1831 }
1832 
1833 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1834                                            struct msghdr *msgh)
1835 {
1836     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1837     abi_long msg_controllen;
1838     abi_ulong target_cmsg_addr;
1839     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1840     socklen_t space = 0;
1841 
1842     msg_controllen = tswapal(target_msgh->msg_controllen);
1843     if (msg_controllen < sizeof (struct target_cmsghdr))
1844         goto the_end;
1845     target_cmsg_addr = tswapal(target_msgh->msg_control);
1846     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1847     target_cmsg_start = target_cmsg;
1848     if (!target_cmsg)
1849         return -TARGET_EFAULT;
1850 
1851     while (cmsg && target_cmsg) {
1852         void *data = CMSG_DATA(cmsg);
1853         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1854 
1855         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1856         int tgt_len, tgt_space;
1857 
1858         /* We never copy a half-header but may copy half-data;
1859          * this is Linux's behaviour in put_cmsg(). Note that
1860          * truncation here is a guest problem (which we report
1861          * to the guest via the CTRUNC bit), unlike truncation
1862          * in target_to_host_cmsg, which is a QEMU bug.
1863          */
1864         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1865             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1866             break;
1867         }
1868 
1869         if (cmsg->cmsg_level == SOL_SOCKET) {
1870             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1871         } else {
1872             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1873         }
1874         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1875 
1876         /* Payload types which need a different size of payload on
1877          * the target must adjust tgt_len here.
1878          */
1879         tgt_len = len;
1880         switch (cmsg->cmsg_level) {
1881         case SOL_SOCKET:
1882             switch (cmsg->cmsg_type) {
1883             case SO_TIMESTAMP:
1884                 tgt_len = sizeof(struct target_timeval);
1885                 break;
1886             default:
1887                 break;
1888             }
1889             break;
1890         default:
1891             break;
1892         }
1893 
1894         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1895             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1896             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1897         }
1898 
1899         /* We must now copy-and-convert len bytes of payload
1900          * into tgt_len bytes of destination space. Bear in mind
1901          * that in both source and destination we may be dealing
1902          * with a truncated value!
1903          */
1904         switch (cmsg->cmsg_level) {
1905         case SOL_SOCKET:
1906             switch (cmsg->cmsg_type) {
1907             case SCM_RIGHTS:
1908             {
1909                 int *fd = (int *)data;
1910                 int *target_fd = (int *)target_data;
1911                 int i, numfds = tgt_len / sizeof(int);
1912 
1913                 for (i = 0; i < numfds; i++) {
1914                     __put_user(fd[i], target_fd + i);
1915                 }
1916                 break;
1917             }
1918             case SO_TIMESTAMP:
1919             {
1920                 struct timeval *tv = (struct timeval *)data;
1921                 struct target_timeval *target_tv =
1922                     (struct target_timeval *)target_data;
1923 
1924                 if (len != sizeof(struct timeval) ||
1925                     tgt_len != sizeof(struct target_timeval)) {
1926                     goto unimplemented;
1927                 }
1928 
1929                 /* copy struct timeval to target */
1930                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1931                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1932                 break;
1933             }
1934             case SCM_CREDENTIALS:
1935             {
1936                 struct ucred *cred = (struct ucred *)data;
1937                 struct target_ucred *target_cred =
1938                     (struct target_ucred *)target_data;
1939 
1940                 __put_user(cred->pid, &target_cred->pid);
1941                 __put_user(cred->uid, &target_cred->uid);
1942                 __put_user(cred->gid, &target_cred->gid);
1943                 break;
1944             }
1945             default:
1946                 goto unimplemented;
1947             }
1948             break;
1949 
1950         case SOL_IP:
1951             switch (cmsg->cmsg_type) {
1952             case IP_TTL:
1953             {
1954                 uint32_t *v = (uint32_t *)data;
1955                 uint32_t *t_int = (uint32_t *)target_data;
1956 
1957                 if (len != sizeof(uint32_t) ||
1958                     tgt_len != sizeof(uint32_t)) {
1959                     goto unimplemented;
1960                 }
1961                 __put_user(*v, t_int);
1962                 break;
1963             }
1964             case IP_RECVERR:
1965             {
1966                 struct errhdr_t {
1967                    struct sock_extended_err ee;
1968                    struct sockaddr_in offender;
1969                 };
1970                 struct errhdr_t *errh = (struct errhdr_t *)data;
1971                 struct errhdr_t *target_errh =
1972                     (struct errhdr_t *)target_data;
1973 
1974                 if (len != sizeof(struct errhdr_t) ||
1975                     tgt_len != sizeof(struct errhdr_t)) {
1976                     goto unimplemented;
1977                 }
1978                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1979                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1980                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1981                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1982                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1983                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1984                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1985                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1986                     (void *) &errh->offender, sizeof(errh->offender));
1987                 break;
1988             }
1989             default:
1990                 goto unimplemented;
1991             }
1992             break;
1993 
1994         case SOL_IPV6:
1995             switch (cmsg->cmsg_type) {
1996             case IPV6_HOPLIMIT:
1997             {
1998                 uint32_t *v = (uint32_t *)data;
1999                 uint32_t *t_int = (uint32_t *)target_data;
2000 
2001                 if (len != sizeof(uint32_t) ||
2002                     tgt_len != sizeof(uint32_t)) {
2003                     goto unimplemented;
2004                 }
2005                 __put_user(*v, t_int);
2006                 break;
2007             }
2008             case IPV6_RECVERR:
2009             {
2010                 struct errhdr6_t {
2011                    struct sock_extended_err ee;
2012                    struct sockaddr_in6 offender;
2013                 };
2014                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2015                 struct errhdr6_t *target_errh =
2016                     (struct errhdr6_t *)target_data;
2017 
2018                 if (len != sizeof(struct errhdr6_t) ||
2019                     tgt_len != sizeof(struct errhdr6_t)) {
2020                     goto unimplemented;
2021                 }
2022                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2023                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2024                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2025                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2026                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2027                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2028                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2029                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2030                     (void *) &errh->offender, sizeof(errh->offender));
2031                 break;
2032             }
2033             default:
2034                 goto unimplemented;
2035             }
2036             break;
2037 
2038         default:
2039         unimplemented:
2040             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2041                           cmsg->cmsg_level, cmsg->cmsg_type);
2042             memcpy(target_data, data, MIN(len, tgt_len));
2043             if (tgt_len > len) {
2044                 memset(target_data + len, 0, tgt_len - len);
2045             }
2046         }
2047 
2048         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2049         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2050         if (msg_controllen < tgt_space) {
2051             tgt_space = msg_controllen;
2052         }
2053         msg_controllen -= tgt_space;
2054         space += tgt_space;
2055         cmsg = CMSG_NXTHDR(msgh, cmsg);
2056         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2057                                          target_cmsg_start);
2058     }
2059     unlock_user(target_cmsg, target_cmsg_addr, space);
2060  the_end:
2061     target_msgh->msg_controllen = tswapal(space);
2062     return 0;
2063 }
2064 
2065 /* do_setsockopt() Must return target values and target errnos. */
2066 static abi_long do_setsockopt(int sockfd, int level, int optname,
2067                               abi_ulong optval_addr, socklen_t optlen)
2068 {
2069     abi_long ret;
2070     int val;
2071     struct ip_mreqn *ip_mreq;
2072     struct ip_mreq_source *ip_mreq_source;
2073 
2074     switch(level) {
2075     case SOL_TCP:
2076     case SOL_UDP:
2077         /* TCP and UDP options all take an 'int' value.  */
2078         if (optlen < sizeof(uint32_t))
2079             return -TARGET_EINVAL;
2080 
2081         if (get_user_u32(val, optval_addr))
2082             return -TARGET_EFAULT;
2083         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2084         break;
2085     case SOL_IP:
2086         switch(optname) {
2087         case IP_TOS:
2088         case IP_TTL:
2089         case IP_HDRINCL:
2090         case IP_ROUTER_ALERT:
2091         case IP_RECVOPTS:
2092         case IP_RETOPTS:
2093         case IP_PKTINFO:
2094         case IP_MTU_DISCOVER:
2095         case IP_RECVERR:
2096         case IP_RECVTTL:
2097         case IP_RECVTOS:
2098 #ifdef IP_FREEBIND
2099         case IP_FREEBIND:
2100 #endif
2101         case IP_MULTICAST_TTL:
2102         case IP_MULTICAST_LOOP:
2103             val = 0;
2104             if (optlen >= sizeof(uint32_t)) {
2105                 if (get_user_u32(val, optval_addr))
2106                     return -TARGET_EFAULT;
2107             } else if (optlen >= 1) {
2108                 if (get_user_u8(val, optval_addr))
2109                     return -TARGET_EFAULT;
2110             }
2111             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2112             break;
2113         case IP_ADD_MEMBERSHIP:
2114         case IP_DROP_MEMBERSHIP:
2115             if (optlen < sizeof (struct target_ip_mreq) ||
2116                 optlen > sizeof (struct target_ip_mreqn))
2117                 return -TARGET_EINVAL;
2118 
2119             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2120             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2121             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2122             break;
2123 
2124         case IP_BLOCK_SOURCE:
2125         case IP_UNBLOCK_SOURCE:
2126         case IP_ADD_SOURCE_MEMBERSHIP:
2127         case IP_DROP_SOURCE_MEMBERSHIP:
2128             if (optlen != sizeof (struct target_ip_mreq_source))
2129                 return -TARGET_EINVAL;
2130 
2131             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2132             if (!ip_mreq_source) {
2133                 return -TARGET_EFAULT;
2134             }
2135             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2136             unlock_user (ip_mreq_source, optval_addr, 0);
2137             break;
2138 
2139         default:
2140             goto unimplemented;
2141         }
2142         break;
2143     case SOL_IPV6:
2144         switch (optname) {
2145         case IPV6_MTU_DISCOVER:
2146         case IPV6_MTU:
2147         case IPV6_V6ONLY:
2148         case IPV6_RECVPKTINFO:
2149         case IPV6_UNICAST_HOPS:
2150         case IPV6_MULTICAST_HOPS:
2151         case IPV6_MULTICAST_LOOP:
2152         case IPV6_RECVERR:
2153         case IPV6_RECVHOPLIMIT:
2154         case IPV6_2292HOPLIMIT:
2155         case IPV6_CHECKSUM:
2156         case IPV6_ADDRFORM:
2157         case IPV6_2292PKTINFO:
2158         case IPV6_RECVTCLASS:
2159         case IPV6_RECVRTHDR:
2160         case IPV6_2292RTHDR:
2161         case IPV6_RECVHOPOPTS:
2162         case IPV6_2292HOPOPTS:
2163         case IPV6_RECVDSTOPTS:
2164         case IPV6_2292DSTOPTS:
2165         case IPV6_TCLASS:
2166         case IPV6_ADDR_PREFERENCES:
2167 #ifdef IPV6_RECVPATHMTU
2168         case IPV6_RECVPATHMTU:
2169 #endif
2170 #ifdef IPV6_TRANSPARENT
2171         case IPV6_TRANSPARENT:
2172 #endif
2173 #ifdef IPV6_FREEBIND
2174         case IPV6_FREEBIND:
2175 #endif
2176 #ifdef IPV6_RECVORIGDSTADDR
2177         case IPV6_RECVORIGDSTADDR:
2178 #endif
2179             val = 0;
2180             if (optlen < sizeof(uint32_t)) {
2181                 return -TARGET_EINVAL;
2182             }
2183             if (get_user_u32(val, optval_addr)) {
2184                 return -TARGET_EFAULT;
2185             }
2186             ret = get_errno(setsockopt(sockfd, level, optname,
2187                                        &val, sizeof(val)));
2188             break;
2189         case IPV6_PKTINFO:
2190         {
2191             struct in6_pktinfo pki;
2192 
2193             if (optlen < sizeof(pki)) {
2194                 return -TARGET_EINVAL;
2195             }
2196 
2197             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2198                 return -TARGET_EFAULT;
2199             }
2200 
2201             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2202 
2203             ret = get_errno(setsockopt(sockfd, level, optname,
2204                                        &pki, sizeof(pki)));
2205             break;
2206         }
2207         case IPV6_ADD_MEMBERSHIP:
2208         case IPV6_DROP_MEMBERSHIP:
2209         {
2210             struct ipv6_mreq ipv6mreq;
2211 
2212             if (optlen < sizeof(ipv6mreq)) {
2213                 return -TARGET_EINVAL;
2214             }
2215 
2216             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2217                 return -TARGET_EFAULT;
2218             }
2219 
2220             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2221 
2222             ret = get_errno(setsockopt(sockfd, level, optname,
2223                                        &ipv6mreq, sizeof(ipv6mreq)));
2224             break;
2225         }
2226         default:
2227             goto unimplemented;
2228         }
2229         break;
2230     case SOL_ICMPV6:
2231         switch (optname) {
2232         case ICMPV6_FILTER:
2233         {
2234             struct icmp6_filter icmp6f;
2235 
2236             if (optlen > sizeof(icmp6f)) {
2237                 optlen = sizeof(icmp6f);
2238             }
2239 
2240             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2241                 return -TARGET_EFAULT;
2242             }
2243 
2244             for (val = 0; val < 8; val++) {
2245                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2246             }
2247 
2248             ret = get_errno(setsockopt(sockfd, level, optname,
2249                                        &icmp6f, optlen));
2250             break;
2251         }
2252         default:
2253             goto unimplemented;
2254         }
2255         break;
2256     case SOL_RAW:
2257         switch (optname) {
2258         case ICMP_FILTER:
2259         case IPV6_CHECKSUM:
2260             /* those take an u32 value */
2261             if (optlen < sizeof(uint32_t)) {
2262                 return -TARGET_EINVAL;
2263             }
2264 
2265             if (get_user_u32(val, optval_addr)) {
2266                 return -TARGET_EFAULT;
2267             }
2268             ret = get_errno(setsockopt(sockfd, level, optname,
2269                                        &val, sizeof(val)));
2270             break;
2271 
2272         default:
2273             goto unimplemented;
2274         }
2275         break;
2276 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2277     case SOL_ALG:
2278         switch (optname) {
2279         case ALG_SET_KEY:
2280         {
2281             char *alg_key = g_malloc(optlen);
2282 
2283             if (!alg_key) {
2284                 return -TARGET_ENOMEM;
2285             }
2286             if (copy_from_user(alg_key, optval_addr, optlen)) {
2287                 g_free(alg_key);
2288                 return -TARGET_EFAULT;
2289             }
2290             ret = get_errno(setsockopt(sockfd, level, optname,
2291                                        alg_key, optlen));
2292             g_free(alg_key);
2293             break;
2294         }
2295         case ALG_SET_AEAD_AUTHSIZE:
2296         {
2297             ret = get_errno(setsockopt(sockfd, level, optname,
2298                                        NULL, optlen));
2299             break;
2300         }
2301         default:
2302             goto unimplemented;
2303         }
2304         break;
2305 #endif
2306     case TARGET_SOL_SOCKET:
2307         switch (optname) {
2308         case TARGET_SO_RCVTIMEO:
2309         {
2310                 struct timeval tv;
2311 
2312                 optname = SO_RCVTIMEO;
2313 
2314 set_timeout:
2315                 if (optlen != sizeof(struct target_timeval)) {
2316                     return -TARGET_EINVAL;
2317                 }
2318 
2319                 if (copy_from_user_timeval(&tv, optval_addr)) {
2320                     return -TARGET_EFAULT;
2321                 }
2322 
2323                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2324                                 &tv, sizeof(tv)));
2325                 return ret;
2326         }
2327         case TARGET_SO_SNDTIMEO:
2328                 optname = SO_SNDTIMEO;
2329                 goto set_timeout;
2330         case TARGET_SO_ATTACH_FILTER:
2331         {
2332                 struct target_sock_fprog *tfprog;
2333                 struct target_sock_filter *tfilter;
2334                 struct sock_fprog fprog;
2335                 struct sock_filter *filter;
2336                 int i;
2337 
2338                 if (optlen != sizeof(*tfprog)) {
2339                     return -TARGET_EINVAL;
2340                 }
2341                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2342                     return -TARGET_EFAULT;
2343                 }
2344                 if (!lock_user_struct(VERIFY_READ, tfilter,
2345                                       tswapal(tfprog->filter), 0)) {
2346                     unlock_user_struct(tfprog, optval_addr, 1);
2347                     return -TARGET_EFAULT;
2348                 }
2349 
2350                 fprog.len = tswap16(tfprog->len);
2351                 filter = g_try_new(struct sock_filter, fprog.len);
2352                 if (filter == NULL) {
2353                     unlock_user_struct(tfilter, tfprog->filter, 1);
2354                     unlock_user_struct(tfprog, optval_addr, 1);
2355                     return -TARGET_ENOMEM;
2356                 }
2357                 for (i = 0; i < fprog.len; i++) {
2358                     filter[i].code = tswap16(tfilter[i].code);
2359                     filter[i].jt = tfilter[i].jt;
2360                     filter[i].jf = tfilter[i].jf;
2361                     filter[i].k = tswap32(tfilter[i].k);
2362                 }
2363                 fprog.filter = filter;
2364 
2365                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2366                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2367                 g_free(filter);
2368 
2369                 unlock_user_struct(tfilter, tfprog->filter, 1);
2370                 unlock_user_struct(tfprog, optval_addr, 1);
2371                 return ret;
2372         }
2373 	case TARGET_SO_BINDTODEVICE:
2374 	{
2375 		char *dev_ifname, *addr_ifname;
2376 
2377 		if (optlen > IFNAMSIZ - 1) {
2378 		    optlen = IFNAMSIZ - 1;
2379 		}
2380 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2381 		if (!dev_ifname) {
2382 		    return -TARGET_EFAULT;
2383 		}
2384 		optname = SO_BINDTODEVICE;
2385 		addr_ifname = alloca(IFNAMSIZ);
2386 		memcpy(addr_ifname, dev_ifname, optlen);
2387 		addr_ifname[optlen] = 0;
2388 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2389                                            addr_ifname, optlen));
2390 		unlock_user (dev_ifname, optval_addr, 0);
2391 		return ret;
2392 	}
2393         case TARGET_SO_LINGER:
2394         {
2395                 struct linger lg;
2396                 struct target_linger *tlg;
2397 
2398                 if (optlen != sizeof(struct target_linger)) {
2399                     return -TARGET_EINVAL;
2400                 }
2401                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2402                     return -TARGET_EFAULT;
2403                 }
2404                 __get_user(lg.l_onoff, &tlg->l_onoff);
2405                 __get_user(lg.l_linger, &tlg->l_linger);
2406                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2407                                 &lg, sizeof(lg)));
2408                 unlock_user_struct(tlg, optval_addr, 0);
2409                 return ret;
2410         }
2411             /* Options with 'int' argument.  */
2412         case TARGET_SO_DEBUG:
2413 		optname = SO_DEBUG;
2414 		break;
2415         case TARGET_SO_REUSEADDR:
2416 		optname = SO_REUSEADDR;
2417 		break;
2418 #ifdef SO_REUSEPORT
2419         case TARGET_SO_REUSEPORT:
2420                 optname = SO_REUSEPORT;
2421                 break;
2422 #endif
2423         case TARGET_SO_TYPE:
2424 		optname = SO_TYPE;
2425 		break;
2426         case TARGET_SO_ERROR:
2427 		optname = SO_ERROR;
2428 		break;
2429         case TARGET_SO_DONTROUTE:
2430 		optname = SO_DONTROUTE;
2431 		break;
2432         case TARGET_SO_BROADCAST:
2433 		optname = SO_BROADCAST;
2434 		break;
2435         case TARGET_SO_SNDBUF:
2436 		optname = SO_SNDBUF;
2437 		break;
2438         case TARGET_SO_SNDBUFFORCE:
2439                 optname = SO_SNDBUFFORCE;
2440                 break;
2441         case TARGET_SO_RCVBUF:
2442 		optname = SO_RCVBUF;
2443 		break;
2444         case TARGET_SO_RCVBUFFORCE:
2445                 optname = SO_RCVBUFFORCE;
2446                 break;
2447         case TARGET_SO_KEEPALIVE:
2448 		optname = SO_KEEPALIVE;
2449 		break;
2450         case TARGET_SO_OOBINLINE:
2451 		optname = SO_OOBINLINE;
2452 		break;
2453         case TARGET_SO_NO_CHECK:
2454 		optname = SO_NO_CHECK;
2455 		break;
2456         case TARGET_SO_PRIORITY:
2457 		optname = SO_PRIORITY;
2458 		break;
2459 #ifdef SO_BSDCOMPAT
2460         case TARGET_SO_BSDCOMPAT:
2461 		optname = SO_BSDCOMPAT;
2462 		break;
2463 #endif
2464         case TARGET_SO_PASSCRED:
2465 		optname = SO_PASSCRED;
2466 		break;
2467         case TARGET_SO_PASSSEC:
2468                 optname = SO_PASSSEC;
2469                 break;
2470         case TARGET_SO_TIMESTAMP:
2471 		optname = SO_TIMESTAMP;
2472 		break;
2473         case TARGET_SO_RCVLOWAT:
2474 		optname = SO_RCVLOWAT;
2475 		break;
2476         default:
2477             goto unimplemented;
2478         }
2479 	if (optlen < sizeof(uint32_t))
2480             return -TARGET_EINVAL;
2481 
2482 	if (get_user_u32(val, optval_addr))
2483             return -TARGET_EFAULT;
2484 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2485         break;
2486 #ifdef SOL_NETLINK
2487     case SOL_NETLINK:
2488         switch (optname) {
2489         case NETLINK_PKTINFO:
2490         case NETLINK_ADD_MEMBERSHIP:
2491         case NETLINK_DROP_MEMBERSHIP:
2492         case NETLINK_BROADCAST_ERROR:
2493         case NETLINK_NO_ENOBUFS:
2494 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2495         case NETLINK_LISTEN_ALL_NSID:
2496         case NETLINK_CAP_ACK:
2497 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2498 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2499         case NETLINK_EXT_ACK:
2500 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2501 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2502         case NETLINK_GET_STRICT_CHK:
2503 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2504             break;
2505         default:
2506             goto unimplemented;
2507         }
2508         val = 0;
2509         if (optlen < sizeof(uint32_t)) {
2510             return -TARGET_EINVAL;
2511         }
2512         if (get_user_u32(val, optval_addr)) {
2513             return -TARGET_EFAULT;
2514         }
2515         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2516                                    sizeof(val)));
2517         break;
2518 #endif /* SOL_NETLINK */
2519     default:
2520     unimplemented:
2521         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2522                       level, optname);
2523         ret = -TARGET_ENOPROTOOPT;
2524     }
2525     return ret;
2526 }
2527 
2528 /* do_getsockopt() Must return target values and target errnos. */
2529 static abi_long do_getsockopt(int sockfd, int level, int optname,
2530                               abi_ulong optval_addr, abi_ulong optlen)
2531 {
2532     abi_long ret;
2533     int len, val;
2534     socklen_t lv;
2535 
2536     switch(level) {
2537     case TARGET_SOL_SOCKET:
2538         level = SOL_SOCKET;
2539         switch (optname) {
2540         /* These don't just return a single integer */
2541         case TARGET_SO_PEERNAME:
2542             goto unimplemented;
2543         case TARGET_SO_RCVTIMEO: {
2544             struct timeval tv;
2545             socklen_t tvlen;
2546 
2547             optname = SO_RCVTIMEO;
2548 
2549 get_timeout:
2550             if (get_user_u32(len, optlen)) {
2551                 return -TARGET_EFAULT;
2552             }
2553             if (len < 0) {
2554                 return -TARGET_EINVAL;
2555             }
2556 
2557             tvlen = sizeof(tv);
2558             ret = get_errno(getsockopt(sockfd, level, optname,
2559                                        &tv, &tvlen));
2560             if (ret < 0) {
2561                 return ret;
2562             }
2563             if (len > sizeof(struct target_timeval)) {
2564                 len = sizeof(struct target_timeval);
2565             }
2566             if (copy_to_user_timeval(optval_addr, &tv)) {
2567                 return -TARGET_EFAULT;
2568             }
2569             if (put_user_u32(len, optlen)) {
2570                 return -TARGET_EFAULT;
2571             }
2572             break;
2573         }
2574         case TARGET_SO_SNDTIMEO:
2575             optname = SO_SNDTIMEO;
2576             goto get_timeout;
2577         case TARGET_SO_PEERCRED: {
2578             struct ucred cr;
2579             socklen_t crlen;
2580             struct target_ucred *tcr;
2581 
2582             if (get_user_u32(len, optlen)) {
2583                 return -TARGET_EFAULT;
2584             }
2585             if (len < 0) {
2586                 return -TARGET_EINVAL;
2587             }
2588 
2589             crlen = sizeof(cr);
2590             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2591                                        &cr, &crlen));
2592             if (ret < 0) {
2593                 return ret;
2594             }
2595             if (len > crlen) {
2596                 len = crlen;
2597             }
2598             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2599                 return -TARGET_EFAULT;
2600             }
2601             __put_user(cr.pid, &tcr->pid);
2602             __put_user(cr.uid, &tcr->uid);
2603             __put_user(cr.gid, &tcr->gid);
2604             unlock_user_struct(tcr, optval_addr, 1);
2605             if (put_user_u32(len, optlen)) {
2606                 return -TARGET_EFAULT;
2607             }
2608             break;
2609         }
2610         case TARGET_SO_PEERSEC: {
2611             char *name;
2612 
2613             if (get_user_u32(len, optlen)) {
2614                 return -TARGET_EFAULT;
2615             }
2616             if (len < 0) {
2617                 return -TARGET_EINVAL;
2618             }
2619             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2620             if (!name) {
2621                 return -TARGET_EFAULT;
2622             }
2623             lv = len;
2624             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2625                                        name, &lv));
2626             if (put_user_u32(lv, optlen)) {
2627                 ret = -TARGET_EFAULT;
2628             }
2629             unlock_user(name, optval_addr, lv);
2630             break;
2631         }
2632         case TARGET_SO_LINGER:
2633         {
2634             struct linger lg;
2635             socklen_t lglen;
2636             struct target_linger *tlg;
2637 
2638             if (get_user_u32(len, optlen)) {
2639                 return -TARGET_EFAULT;
2640             }
2641             if (len < 0) {
2642                 return -TARGET_EINVAL;
2643             }
2644 
2645             lglen = sizeof(lg);
2646             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2647                                        &lg, &lglen));
2648             if (ret < 0) {
2649                 return ret;
2650             }
2651             if (len > lglen) {
2652                 len = lglen;
2653             }
2654             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2655                 return -TARGET_EFAULT;
2656             }
2657             __put_user(lg.l_onoff, &tlg->l_onoff);
2658             __put_user(lg.l_linger, &tlg->l_linger);
2659             unlock_user_struct(tlg, optval_addr, 1);
2660             if (put_user_u32(len, optlen)) {
2661                 return -TARGET_EFAULT;
2662             }
2663             break;
2664         }
2665         /* Options with 'int' argument.  */
2666         case TARGET_SO_DEBUG:
2667             optname = SO_DEBUG;
2668             goto int_case;
2669         case TARGET_SO_REUSEADDR:
2670             optname = SO_REUSEADDR;
2671             goto int_case;
2672 #ifdef SO_REUSEPORT
2673         case TARGET_SO_REUSEPORT:
2674             optname = SO_REUSEPORT;
2675             goto int_case;
2676 #endif
2677         case TARGET_SO_TYPE:
2678             optname = SO_TYPE;
2679             goto int_case;
2680         case TARGET_SO_ERROR:
2681             optname = SO_ERROR;
2682             goto int_case;
2683         case TARGET_SO_DONTROUTE:
2684             optname = SO_DONTROUTE;
2685             goto int_case;
2686         case TARGET_SO_BROADCAST:
2687             optname = SO_BROADCAST;
2688             goto int_case;
2689         case TARGET_SO_SNDBUF:
2690             optname = SO_SNDBUF;
2691             goto int_case;
2692         case TARGET_SO_RCVBUF:
2693             optname = SO_RCVBUF;
2694             goto int_case;
2695         case TARGET_SO_KEEPALIVE:
2696             optname = SO_KEEPALIVE;
2697             goto int_case;
2698         case TARGET_SO_OOBINLINE:
2699             optname = SO_OOBINLINE;
2700             goto int_case;
2701         case TARGET_SO_NO_CHECK:
2702             optname = SO_NO_CHECK;
2703             goto int_case;
2704         case TARGET_SO_PRIORITY:
2705             optname = SO_PRIORITY;
2706             goto int_case;
2707 #ifdef SO_BSDCOMPAT
2708         case TARGET_SO_BSDCOMPAT:
2709             optname = SO_BSDCOMPAT;
2710             goto int_case;
2711 #endif
2712         case TARGET_SO_PASSCRED:
2713             optname = SO_PASSCRED;
2714             goto int_case;
2715         case TARGET_SO_TIMESTAMP:
2716             optname = SO_TIMESTAMP;
2717             goto int_case;
2718         case TARGET_SO_RCVLOWAT:
2719             optname = SO_RCVLOWAT;
2720             goto int_case;
2721         case TARGET_SO_ACCEPTCONN:
2722             optname = SO_ACCEPTCONN;
2723             goto int_case;
2724         case TARGET_SO_PROTOCOL:
2725             optname = SO_PROTOCOL;
2726             goto int_case;
2727         case TARGET_SO_DOMAIN:
2728             optname = SO_DOMAIN;
2729             goto int_case;
2730         default:
2731             goto int_case;
2732         }
2733         break;
2734     case SOL_TCP:
2735     case SOL_UDP:
2736         /* TCP and UDP options all take an 'int' value.  */
2737     int_case:
2738         if (get_user_u32(len, optlen))
2739             return -TARGET_EFAULT;
2740         if (len < 0)
2741             return -TARGET_EINVAL;
2742         lv = sizeof(lv);
2743         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2744         if (ret < 0)
2745             return ret;
2746         switch (optname) {
2747         case SO_TYPE:
2748             val = host_to_target_sock_type(val);
2749             break;
2750         case SO_ERROR:
2751             val = host_to_target_errno(val);
2752             break;
2753         }
2754         if (len > lv)
2755             len = lv;
2756         if (len == 4) {
2757             if (put_user_u32(val, optval_addr))
2758                 return -TARGET_EFAULT;
2759         } else {
2760             if (put_user_u8(val, optval_addr))
2761                 return -TARGET_EFAULT;
2762         }
2763         if (put_user_u32(len, optlen))
2764             return -TARGET_EFAULT;
2765         break;
2766     case SOL_IP:
2767         switch(optname) {
2768         case IP_TOS:
2769         case IP_TTL:
2770         case IP_HDRINCL:
2771         case IP_ROUTER_ALERT:
2772         case IP_RECVOPTS:
2773         case IP_RETOPTS:
2774         case IP_PKTINFO:
2775         case IP_MTU_DISCOVER:
2776         case IP_RECVERR:
2777         case IP_RECVTOS:
2778 #ifdef IP_FREEBIND
2779         case IP_FREEBIND:
2780 #endif
2781         case IP_MULTICAST_TTL:
2782         case IP_MULTICAST_LOOP:
2783             if (get_user_u32(len, optlen))
2784                 return -TARGET_EFAULT;
2785             if (len < 0)
2786                 return -TARGET_EINVAL;
2787             lv = sizeof(lv);
2788             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2789             if (ret < 0)
2790                 return ret;
2791             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2792                 len = 1;
2793                 if (put_user_u32(len, optlen)
2794                     || put_user_u8(val, optval_addr))
2795                     return -TARGET_EFAULT;
2796             } else {
2797                 if (len > sizeof(int))
2798                     len = sizeof(int);
2799                 if (put_user_u32(len, optlen)
2800                     || put_user_u32(val, optval_addr))
2801                     return -TARGET_EFAULT;
2802             }
2803             break;
2804         default:
2805             ret = -TARGET_ENOPROTOOPT;
2806             break;
2807         }
2808         break;
2809     case SOL_IPV6:
2810         switch (optname) {
2811         case IPV6_MTU_DISCOVER:
2812         case IPV6_MTU:
2813         case IPV6_V6ONLY:
2814         case IPV6_RECVPKTINFO:
2815         case IPV6_UNICAST_HOPS:
2816         case IPV6_MULTICAST_HOPS:
2817         case IPV6_MULTICAST_LOOP:
2818         case IPV6_RECVERR:
2819         case IPV6_RECVHOPLIMIT:
2820         case IPV6_2292HOPLIMIT:
2821         case IPV6_CHECKSUM:
2822         case IPV6_ADDRFORM:
2823         case IPV6_2292PKTINFO:
2824         case IPV6_RECVTCLASS:
2825         case IPV6_RECVRTHDR:
2826         case IPV6_2292RTHDR:
2827         case IPV6_RECVHOPOPTS:
2828         case IPV6_2292HOPOPTS:
2829         case IPV6_RECVDSTOPTS:
2830         case IPV6_2292DSTOPTS:
2831         case IPV6_TCLASS:
2832         case IPV6_ADDR_PREFERENCES:
2833 #ifdef IPV6_RECVPATHMTU
2834         case IPV6_RECVPATHMTU:
2835 #endif
2836 #ifdef IPV6_TRANSPARENT
2837         case IPV6_TRANSPARENT:
2838 #endif
2839 #ifdef IPV6_FREEBIND
2840         case IPV6_FREEBIND:
2841 #endif
2842 #ifdef IPV6_RECVORIGDSTADDR
2843         case IPV6_RECVORIGDSTADDR:
2844 #endif
2845             if (get_user_u32(len, optlen))
2846                 return -TARGET_EFAULT;
2847             if (len < 0)
2848                 return -TARGET_EINVAL;
2849             lv = sizeof(lv);
2850             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2851             if (ret < 0)
2852                 return ret;
2853             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2854                 len = 1;
2855                 if (put_user_u32(len, optlen)
2856                     || put_user_u8(val, optval_addr))
2857                     return -TARGET_EFAULT;
2858             } else {
2859                 if (len > sizeof(int))
2860                     len = sizeof(int);
2861                 if (put_user_u32(len, optlen)
2862                     || put_user_u32(val, optval_addr))
2863                     return -TARGET_EFAULT;
2864             }
2865             break;
2866         default:
2867             ret = -TARGET_ENOPROTOOPT;
2868             break;
2869         }
2870         break;
2871 #ifdef SOL_NETLINK
2872     case SOL_NETLINK:
2873         switch (optname) {
2874         case NETLINK_PKTINFO:
2875         case NETLINK_BROADCAST_ERROR:
2876         case NETLINK_NO_ENOBUFS:
2877 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2878         case NETLINK_LISTEN_ALL_NSID:
2879         case NETLINK_CAP_ACK:
2880 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2881 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2882         case NETLINK_EXT_ACK:
2883 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2884 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2885         case NETLINK_GET_STRICT_CHK:
2886 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2887             if (get_user_u32(len, optlen)) {
2888                 return -TARGET_EFAULT;
2889             }
2890             if (len != sizeof(val)) {
2891                 return -TARGET_EINVAL;
2892             }
2893             lv = len;
2894             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2895             if (ret < 0) {
2896                 return ret;
2897             }
2898             if (put_user_u32(lv, optlen)
2899                 || put_user_u32(val, optval_addr)) {
2900                 return -TARGET_EFAULT;
2901             }
2902             break;
2903 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2904         case NETLINK_LIST_MEMBERSHIPS:
2905         {
2906             uint32_t *results;
2907             int i;
2908             if (get_user_u32(len, optlen)) {
2909                 return -TARGET_EFAULT;
2910             }
2911             if (len < 0) {
2912                 return -TARGET_EINVAL;
2913             }
2914             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2915             if (!results && len > 0) {
2916                 return -TARGET_EFAULT;
2917             }
2918             lv = len;
2919             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2920             if (ret < 0) {
2921                 unlock_user(results, optval_addr, 0);
2922                 return ret;
2923             }
2924             /* swap host endianess to target endianess. */
2925             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2926                 results[i] = tswap32(results[i]);
2927             }
2928             if (put_user_u32(lv, optlen)) {
2929                 return -TARGET_EFAULT;
2930             }
2931             unlock_user(results, optval_addr, 0);
2932             break;
2933         }
2934 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2935         default:
2936             goto unimplemented;
2937         }
2938         break;
2939 #endif /* SOL_NETLINK */
2940     default:
2941     unimplemented:
2942         qemu_log_mask(LOG_UNIMP,
2943                       "getsockopt level=%d optname=%d not yet supported\n",
2944                       level, optname);
2945         ret = -TARGET_EOPNOTSUPP;
2946         break;
2947     }
2948     return ret;
2949 }
2950 
2951 /* Convert target low/high pair representing file offset into the host
2952  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2953  * as the kernel doesn't handle them either.
2954  */
2955 static void target_to_host_low_high(abi_ulong tlow,
2956                                     abi_ulong thigh,
2957                                     unsigned long *hlow,
2958                                     unsigned long *hhigh)
2959 {
2960     uint64_t off = tlow |
2961         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2962         TARGET_LONG_BITS / 2;
2963 
2964     *hlow = off;
2965     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2966 }
2967 
2968 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2969                                 abi_ulong count, int copy)
2970 {
2971     struct target_iovec *target_vec;
2972     struct iovec *vec;
2973     abi_ulong total_len, max_len;
2974     int i;
2975     int err = 0;
2976     bool bad_address = false;
2977 
2978     if (count == 0) {
2979         errno = 0;
2980         return NULL;
2981     }
2982     if (count > IOV_MAX) {
2983         errno = EINVAL;
2984         return NULL;
2985     }
2986 
2987     vec = g_try_new0(struct iovec, count);
2988     if (vec == NULL) {
2989         errno = ENOMEM;
2990         return NULL;
2991     }
2992 
2993     target_vec = lock_user(VERIFY_READ, target_addr,
2994                            count * sizeof(struct target_iovec), 1);
2995     if (target_vec == NULL) {
2996         err = EFAULT;
2997         goto fail2;
2998     }
2999 
3000     /* ??? If host page size > target page size, this will result in a
3001        value larger than what we can actually support.  */
3002     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3003     total_len = 0;
3004 
3005     for (i = 0; i < count; i++) {
3006         abi_ulong base = tswapal(target_vec[i].iov_base);
3007         abi_long len = tswapal(target_vec[i].iov_len);
3008 
3009         if (len < 0) {
3010             err = EINVAL;
3011             goto fail;
3012         } else if (len == 0) {
3013             /* Zero length pointer is ignored.  */
3014             vec[i].iov_base = 0;
3015         } else {
3016             vec[i].iov_base = lock_user(type, base, len, copy);
3017             /* If the first buffer pointer is bad, this is a fault.  But
3018              * subsequent bad buffers will result in a partial write; this
3019              * is realized by filling the vector with null pointers and
3020              * zero lengths. */
3021             if (!vec[i].iov_base) {
3022                 if (i == 0) {
3023                     err = EFAULT;
3024                     goto fail;
3025                 } else {
3026                     bad_address = true;
3027                 }
3028             }
3029             if (bad_address) {
3030                 len = 0;
3031             }
3032             if (len > max_len - total_len) {
3033                 len = max_len - total_len;
3034             }
3035         }
3036         vec[i].iov_len = len;
3037         total_len += len;
3038     }
3039 
3040     unlock_user(target_vec, target_addr, 0);
3041     return vec;
3042 
3043  fail:
3044     while (--i >= 0) {
3045         if (tswapal(target_vec[i].iov_len) > 0) {
3046             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3047         }
3048     }
3049     unlock_user(target_vec, target_addr, 0);
3050  fail2:
3051     g_free(vec);
3052     errno = err;
3053     return NULL;
3054 }
3055 
3056 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3057                          abi_ulong count, int copy)
3058 {
3059     struct target_iovec *target_vec;
3060     int i;
3061 
3062     target_vec = lock_user(VERIFY_READ, target_addr,
3063                            count * sizeof(struct target_iovec), 1);
3064     if (target_vec) {
3065         for (i = 0; i < count; i++) {
3066             abi_ulong base = tswapal(target_vec[i].iov_base);
3067             abi_long len = tswapal(target_vec[i].iov_len);
3068             if (len < 0) {
3069                 break;
3070             }
3071             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3072         }
3073         unlock_user(target_vec, target_addr, 0);
3074     }
3075 
3076     g_free(vec);
3077 }
3078 
3079 static inline int target_to_host_sock_type(int *type)
3080 {
3081     int host_type = 0;
3082     int target_type = *type;
3083 
3084     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3085     case TARGET_SOCK_DGRAM:
3086         host_type = SOCK_DGRAM;
3087         break;
3088     case TARGET_SOCK_STREAM:
3089         host_type = SOCK_STREAM;
3090         break;
3091     default:
3092         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3093         break;
3094     }
3095     if (target_type & TARGET_SOCK_CLOEXEC) {
3096 #if defined(SOCK_CLOEXEC)
3097         host_type |= SOCK_CLOEXEC;
3098 #else
3099         return -TARGET_EINVAL;
3100 #endif
3101     }
3102     if (target_type & TARGET_SOCK_NONBLOCK) {
3103 #if defined(SOCK_NONBLOCK)
3104         host_type |= SOCK_NONBLOCK;
3105 #elif !defined(O_NONBLOCK)
3106         return -TARGET_EINVAL;
3107 #endif
3108     }
3109     *type = host_type;
3110     return 0;
3111 }
3112 
3113 /* Try to emulate socket type flags after socket creation.  */
3114 static int sock_flags_fixup(int fd, int target_type)
3115 {
3116 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3117     if (target_type & TARGET_SOCK_NONBLOCK) {
3118         int flags = fcntl(fd, F_GETFL);
3119         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3120             close(fd);
3121             return -TARGET_EINVAL;
3122         }
3123     }
3124 #endif
3125     return fd;
3126 }
3127 
3128 /* do_socket() Must return target values and target errnos. */
3129 static abi_long do_socket(int domain, int type, int protocol)
3130 {
3131     int target_type = type;
3132     int ret;
3133 
3134     ret = target_to_host_sock_type(&type);
3135     if (ret) {
3136         return ret;
3137     }
3138 
3139     if (domain == PF_NETLINK && !(
3140 #ifdef CONFIG_RTNETLINK
3141          protocol == NETLINK_ROUTE ||
3142 #endif
3143          protocol == NETLINK_KOBJECT_UEVENT ||
3144          protocol == NETLINK_AUDIT)) {
3145         return -TARGET_EPROTONOSUPPORT;
3146     }
3147 
3148     if (domain == AF_PACKET ||
3149         (domain == AF_INET && type == SOCK_PACKET)) {
3150         protocol = tswap16(protocol);
3151     }
3152 
3153     ret = get_errno(socket(domain, type, protocol));
3154     if (ret >= 0) {
3155         ret = sock_flags_fixup(ret, target_type);
3156         if (type == SOCK_PACKET) {
3157             /* Manage an obsolete case :
3158              * if socket type is SOCK_PACKET, bind by name
3159              */
3160             fd_trans_register(ret, &target_packet_trans);
3161         } else if (domain == PF_NETLINK) {
3162             switch (protocol) {
3163 #ifdef CONFIG_RTNETLINK
3164             case NETLINK_ROUTE:
3165                 fd_trans_register(ret, &target_netlink_route_trans);
3166                 break;
3167 #endif
3168             case NETLINK_KOBJECT_UEVENT:
3169                 /* nothing to do: messages are strings */
3170                 break;
3171             case NETLINK_AUDIT:
3172                 fd_trans_register(ret, &target_netlink_audit_trans);
3173                 break;
3174             default:
3175                 g_assert_not_reached();
3176             }
3177         }
3178     }
3179     return ret;
3180 }
3181 
3182 /* do_bind() Must return target values and target errnos. */
3183 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3184                         socklen_t addrlen)
3185 {
3186     void *addr;
3187     abi_long ret;
3188 
3189     if ((int)addrlen < 0) {
3190         return -TARGET_EINVAL;
3191     }
3192 
3193     addr = alloca(addrlen+1);
3194 
3195     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3196     if (ret)
3197         return ret;
3198 
3199     return get_errno(bind(sockfd, addr, addrlen));
3200 }
3201 
3202 /* do_connect() Must return target values and target errnos. */
3203 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3204                            socklen_t addrlen)
3205 {
3206     void *addr;
3207     abi_long ret;
3208 
3209     if ((int)addrlen < 0) {
3210         return -TARGET_EINVAL;
3211     }
3212 
3213     addr = alloca(addrlen+1);
3214 
3215     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3216     if (ret)
3217         return ret;
3218 
3219     return get_errno(safe_connect(sockfd, addr, addrlen));
3220 }
3221 
3222 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3223 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3224                                       int flags, int send)
3225 {
3226     abi_long ret, len;
3227     struct msghdr msg;
3228     abi_ulong count;
3229     struct iovec *vec;
3230     abi_ulong target_vec;
3231 
3232     if (msgp->msg_name) {
3233         msg.msg_namelen = tswap32(msgp->msg_namelen);
3234         msg.msg_name = alloca(msg.msg_namelen+1);
3235         ret = target_to_host_sockaddr(fd, msg.msg_name,
3236                                       tswapal(msgp->msg_name),
3237                                       msg.msg_namelen);
3238         if (ret == -TARGET_EFAULT) {
3239             /* For connected sockets msg_name and msg_namelen must
3240              * be ignored, so returning EFAULT immediately is wrong.
3241              * Instead, pass a bad msg_name to the host kernel, and
3242              * let it decide whether to return EFAULT or not.
3243              */
3244             msg.msg_name = (void *)-1;
3245         } else if (ret) {
3246             goto out2;
3247         }
3248     } else {
3249         msg.msg_name = NULL;
3250         msg.msg_namelen = 0;
3251     }
3252     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3253     msg.msg_control = alloca(msg.msg_controllen);
3254     memset(msg.msg_control, 0, msg.msg_controllen);
3255 
3256     msg.msg_flags = tswap32(msgp->msg_flags);
3257 
3258     count = tswapal(msgp->msg_iovlen);
3259     target_vec = tswapal(msgp->msg_iov);
3260 
3261     if (count > IOV_MAX) {
3262         /* sendrcvmsg returns a different errno for this condition than
3263          * readv/writev, so we must catch it here before lock_iovec() does.
3264          */
3265         ret = -TARGET_EMSGSIZE;
3266         goto out2;
3267     }
3268 
3269     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3270                      target_vec, count, send);
3271     if (vec == NULL) {
3272         ret = -host_to_target_errno(errno);
3273         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3274         if (!send || ret) {
3275             goto out2;
3276         }
3277     }
3278     msg.msg_iovlen = count;
3279     msg.msg_iov = vec;
3280 
3281     if (send) {
3282         if (fd_trans_target_to_host_data(fd)) {
3283             void *host_msg;
3284 
3285             host_msg = g_malloc(msg.msg_iov->iov_len);
3286             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3287             ret = fd_trans_target_to_host_data(fd)(host_msg,
3288                                                    msg.msg_iov->iov_len);
3289             if (ret >= 0) {
3290                 msg.msg_iov->iov_base = host_msg;
3291                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3292             }
3293             g_free(host_msg);
3294         } else {
3295             ret = target_to_host_cmsg(&msg, msgp);
3296             if (ret == 0) {
3297                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3298             }
3299         }
3300     } else {
3301         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3302         if (!is_error(ret)) {
3303             len = ret;
3304             if (fd_trans_host_to_target_data(fd)) {
3305                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3306                                                MIN(msg.msg_iov->iov_len, len));
3307             }
3308             if (!is_error(ret)) {
3309                 ret = host_to_target_cmsg(msgp, &msg);
3310             }
3311             if (!is_error(ret)) {
3312                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3313                 msgp->msg_flags = tswap32(msg.msg_flags);
3314                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3315                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3316                                     msg.msg_name, msg.msg_namelen);
3317                     if (ret) {
3318                         goto out;
3319                     }
3320                 }
3321 
3322                 ret = len;
3323             }
3324         }
3325     }
3326 
3327 out:
3328     if (vec) {
3329         unlock_iovec(vec, target_vec, count, !send);
3330     }
3331 out2:
3332     return ret;
3333 }
3334 
3335 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3336                                int flags, int send)
3337 {
3338     abi_long ret;
3339     struct target_msghdr *msgp;
3340 
3341     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3342                           msgp,
3343                           target_msg,
3344                           send ? 1 : 0)) {
3345         return -TARGET_EFAULT;
3346     }
3347     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3348     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3349     return ret;
3350 }
3351 
3352 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3353  * so it might not have this *mmsg-specific flag either.
3354  */
3355 #ifndef MSG_WAITFORONE
3356 #define MSG_WAITFORONE 0x10000
3357 #endif
3358 
3359 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3360                                 unsigned int vlen, unsigned int flags,
3361                                 int send)
3362 {
3363     struct target_mmsghdr *mmsgp;
3364     abi_long ret = 0;
3365     int i;
3366 
3367     if (vlen > UIO_MAXIOV) {
3368         vlen = UIO_MAXIOV;
3369     }
3370 
3371     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3372     if (!mmsgp) {
3373         return -TARGET_EFAULT;
3374     }
3375 
3376     for (i = 0; i < vlen; i++) {
3377         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3378         if (is_error(ret)) {
3379             break;
3380         }
3381         mmsgp[i].msg_len = tswap32(ret);
3382         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3383         if (flags & MSG_WAITFORONE) {
3384             flags |= MSG_DONTWAIT;
3385         }
3386     }
3387 
3388     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3389 
3390     /* Return number of datagrams sent if we sent any at all;
3391      * otherwise return the error.
3392      */
3393     if (i) {
3394         return i;
3395     }
3396     return ret;
3397 }
3398 
3399 /* do_accept4() Must return target values and target errnos. */
3400 static abi_long do_accept4(int fd, abi_ulong target_addr,
3401                            abi_ulong target_addrlen_addr, int flags)
3402 {
3403     socklen_t addrlen, ret_addrlen;
3404     void *addr;
3405     abi_long ret;
3406     int host_flags;
3407 
3408     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3409         return -TARGET_EINVAL;
3410     }
3411 
3412     host_flags = 0;
3413     if (flags & TARGET_SOCK_NONBLOCK) {
3414         host_flags |= SOCK_NONBLOCK;
3415     }
3416     if (flags & TARGET_SOCK_CLOEXEC) {
3417         host_flags |= SOCK_CLOEXEC;
3418     }
3419 
3420     if (target_addr == 0) {
3421         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3422     }
3423 
3424     /* linux returns EFAULT if addrlen pointer is invalid */
3425     if (get_user_u32(addrlen, target_addrlen_addr))
3426         return -TARGET_EFAULT;
3427 
3428     if ((int)addrlen < 0) {
3429         return -TARGET_EINVAL;
3430     }
3431 
3432     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3433         return -TARGET_EFAULT;
3434     }
3435 
3436     addr = alloca(addrlen);
3437 
3438     ret_addrlen = addrlen;
3439     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3440     if (!is_error(ret)) {
3441         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3442         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3443             ret = -TARGET_EFAULT;
3444         }
3445     }
3446     return ret;
3447 }
3448 
3449 /* do_getpeername() Must return target values and target errnos. */
3450 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3451                                abi_ulong target_addrlen_addr)
3452 {
3453     socklen_t addrlen, ret_addrlen;
3454     void *addr;
3455     abi_long ret;
3456 
3457     if (get_user_u32(addrlen, target_addrlen_addr))
3458         return -TARGET_EFAULT;
3459 
3460     if ((int)addrlen < 0) {
3461         return -TARGET_EINVAL;
3462     }
3463 
3464     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3465         return -TARGET_EFAULT;
3466     }
3467 
3468     addr = alloca(addrlen);
3469 
3470     ret_addrlen = addrlen;
3471     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3472     if (!is_error(ret)) {
3473         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3474         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3475             ret = -TARGET_EFAULT;
3476         }
3477     }
3478     return ret;
3479 }
3480 
3481 /* do_getsockname() Must return target values and target errnos. */
3482 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3483                                abi_ulong target_addrlen_addr)
3484 {
3485     socklen_t addrlen, ret_addrlen;
3486     void *addr;
3487     abi_long ret;
3488 
3489     if (get_user_u32(addrlen, target_addrlen_addr))
3490         return -TARGET_EFAULT;
3491 
3492     if ((int)addrlen < 0) {
3493         return -TARGET_EINVAL;
3494     }
3495 
3496     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3497         return -TARGET_EFAULT;
3498     }
3499 
3500     addr = alloca(addrlen);
3501 
3502     ret_addrlen = addrlen;
3503     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3504     if (!is_error(ret)) {
3505         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3506         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3507             ret = -TARGET_EFAULT;
3508         }
3509     }
3510     return ret;
3511 }
3512 
3513 /* do_socketpair() Must return target values and target errnos. */
3514 static abi_long do_socketpair(int domain, int type, int protocol,
3515                               abi_ulong target_tab_addr)
3516 {
3517     int tab[2];
3518     abi_long ret;
3519 
3520     target_to_host_sock_type(&type);
3521 
3522     ret = get_errno(socketpair(domain, type, protocol, tab));
3523     if (!is_error(ret)) {
3524         if (put_user_s32(tab[0], target_tab_addr)
3525             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3526             ret = -TARGET_EFAULT;
3527     }
3528     return ret;
3529 }
3530 
3531 /* do_sendto() Must return target values and target errnos. */
3532 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3533                           abi_ulong target_addr, socklen_t addrlen)
3534 {
3535     void *addr;
3536     void *host_msg;
3537     void *copy_msg = NULL;
3538     abi_long ret;
3539 
3540     if ((int)addrlen < 0) {
3541         return -TARGET_EINVAL;
3542     }
3543 
3544     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3545     if (!host_msg)
3546         return -TARGET_EFAULT;
3547     if (fd_trans_target_to_host_data(fd)) {
3548         copy_msg = host_msg;
3549         host_msg = g_malloc(len);
3550         memcpy(host_msg, copy_msg, len);
3551         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3552         if (ret < 0) {
3553             goto fail;
3554         }
3555     }
3556     if (target_addr) {
3557         addr = alloca(addrlen+1);
3558         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3559         if (ret) {
3560             goto fail;
3561         }
3562         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3563     } else {
3564         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3565     }
3566 fail:
3567     if (copy_msg) {
3568         g_free(host_msg);
3569         host_msg = copy_msg;
3570     }
3571     unlock_user(host_msg, msg, 0);
3572     return ret;
3573 }
3574 
3575 /* do_recvfrom() Must return target values and target errnos. */
3576 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3577                             abi_ulong target_addr,
3578                             abi_ulong target_addrlen)
3579 {
3580     socklen_t addrlen, ret_addrlen;
3581     void *addr;
3582     void *host_msg;
3583     abi_long ret;
3584 
3585     if (!msg) {
3586         host_msg = NULL;
3587     } else {
3588         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3589         if (!host_msg) {
3590             return -TARGET_EFAULT;
3591         }
3592     }
3593     if (target_addr) {
3594         if (get_user_u32(addrlen, target_addrlen)) {
3595             ret = -TARGET_EFAULT;
3596             goto fail;
3597         }
3598         if ((int)addrlen < 0) {
3599             ret = -TARGET_EINVAL;
3600             goto fail;
3601         }
3602         addr = alloca(addrlen);
3603         ret_addrlen = addrlen;
3604         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3605                                       addr, &ret_addrlen));
3606     } else {
3607         addr = NULL; /* To keep compiler quiet.  */
3608         addrlen = 0; /* To keep compiler quiet.  */
3609         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3610     }
3611     if (!is_error(ret)) {
3612         if (fd_trans_host_to_target_data(fd)) {
3613             abi_long trans;
3614             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3615             if (is_error(trans)) {
3616                 ret = trans;
3617                 goto fail;
3618             }
3619         }
3620         if (target_addr) {
3621             host_to_target_sockaddr(target_addr, addr,
3622                                     MIN(addrlen, ret_addrlen));
3623             if (put_user_u32(ret_addrlen, target_addrlen)) {
3624                 ret = -TARGET_EFAULT;
3625                 goto fail;
3626             }
3627         }
3628         unlock_user(host_msg, msg, len);
3629     } else {
3630 fail:
3631         unlock_user(host_msg, msg, 0);
3632     }
3633     return ret;
3634 }
3635 
3636 #ifdef TARGET_NR_socketcall
3637 /* do_socketcall() must return target values and target errnos. */
3638 static abi_long do_socketcall(int num, abi_ulong vptr)
3639 {
3640     static const unsigned nargs[] = { /* number of arguments per operation */
3641         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3642         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3643         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3644         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3645         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3646         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3647         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3648         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3649         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3650         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3651         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3652         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3653         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3654         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3655         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3656         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3657         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3658         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3659         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3660         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3661     };
3662     abi_long a[6]; /* max 6 args */
3663     unsigned i;
3664 
3665     /* check the range of the first argument num */
3666     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3667     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3668         return -TARGET_EINVAL;
3669     }
3670     /* ensure we have space for args */
3671     if (nargs[num] > ARRAY_SIZE(a)) {
3672         return -TARGET_EINVAL;
3673     }
3674     /* collect the arguments in a[] according to nargs[] */
3675     for (i = 0; i < nargs[num]; ++i) {
3676         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3677             return -TARGET_EFAULT;
3678         }
3679     }
3680     /* now when we have the args, invoke the appropriate underlying function */
3681     switch (num) {
3682     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3683         return do_socket(a[0], a[1], a[2]);
3684     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3685         return do_bind(a[0], a[1], a[2]);
3686     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3687         return do_connect(a[0], a[1], a[2]);
3688     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3689         return get_errno(listen(a[0], a[1]));
3690     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3691         return do_accept4(a[0], a[1], a[2], 0);
3692     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3693         return do_getsockname(a[0], a[1], a[2]);
3694     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3695         return do_getpeername(a[0], a[1], a[2]);
3696     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3697         return do_socketpair(a[0], a[1], a[2], a[3]);
3698     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3699         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3700     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3701         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3702     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3703         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3704     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3705         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3706     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3707         return get_errno(shutdown(a[0], a[1]));
3708     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3709         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3710     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3711         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3712     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3713         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3714     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3715         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3716     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3717         return do_accept4(a[0], a[1], a[2], a[3]);
3718     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3719         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3720     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3721         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3722     default:
3723         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3724         return -TARGET_EINVAL;
3725     }
3726 }
3727 #endif
3728 
3729 #define N_SHM_REGIONS	32
3730 
3731 static struct shm_region {
3732     abi_ulong start;
3733     abi_ulong size;
3734     bool in_use;
3735 } shm_regions[N_SHM_REGIONS];
3736 
3737 #ifndef TARGET_SEMID64_DS
3738 /* asm-generic version of this struct */
3739 struct target_semid64_ds
3740 {
3741   struct target_ipc_perm sem_perm;
3742   abi_ulong sem_otime;
3743 #if TARGET_ABI_BITS == 32
3744   abi_ulong __unused1;
3745 #endif
3746   abi_ulong sem_ctime;
3747 #if TARGET_ABI_BITS == 32
3748   abi_ulong __unused2;
3749 #endif
3750   abi_ulong sem_nsems;
3751   abi_ulong __unused3;
3752   abi_ulong __unused4;
3753 };
3754 #endif
3755 
3756 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3757                                                abi_ulong target_addr)
3758 {
3759     struct target_ipc_perm *target_ip;
3760     struct target_semid64_ds *target_sd;
3761 
3762     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3763         return -TARGET_EFAULT;
3764     target_ip = &(target_sd->sem_perm);
3765     host_ip->__key = tswap32(target_ip->__key);
3766     host_ip->uid = tswap32(target_ip->uid);
3767     host_ip->gid = tswap32(target_ip->gid);
3768     host_ip->cuid = tswap32(target_ip->cuid);
3769     host_ip->cgid = tswap32(target_ip->cgid);
3770 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3771     host_ip->mode = tswap32(target_ip->mode);
3772 #else
3773     host_ip->mode = tswap16(target_ip->mode);
3774 #endif
3775 #if defined(TARGET_PPC)
3776     host_ip->__seq = tswap32(target_ip->__seq);
3777 #else
3778     host_ip->__seq = tswap16(target_ip->__seq);
3779 #endif
3780     unlock_user_struct(target_sd, target_addr, 0);
3781     return 0;
3782 }
3783 
3784 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3785                                                struct ipc_perm *host_ip)
3786 {
3787     struct target_ipc_perm *target_ip;
3788     struct target_semid64_ds *target_sd;
3789 
3790     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3791         return -TARGET_EFAULT;
3792     target_ip = &(target_sd->sem_perm);
3793     target_ip->__key = tswap32(host_ip->__key);
3794     target_ip->uid = tswap32(host_ip->uid);
3795     target_ip->gid = tswap32(host_ip->gid);
3796     target_ip->cuid = tswap32(host_ip->cuid);
3797     target_ip->cgid = tswap32(host_ip->cgid);
3798 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3799     target_ip->mode = tswap32(host_ip->mode);
3800 #else
3801     target_ip->mode = tswap16(host_ip->mode);
3802 #endif
3803 #if defined(TARGET_PPC)
3804     target_ip->__seq = tswap32(host_ip->__seq);
3805 #else
3806     target_ip->__seq = tswap16(host_ip->__seq);
3807 #endif
3808     unlock_user_struct(target_sd, target_addr, 1);
3809     return 0;
3810 }
3811 
3812 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3813                                                abi_ulong target_addr)
3814 {
3815     struct target_semid64_ds *target_sd;
3816 
3817     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3818         return -TARGET_EFAULT;
3819     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3820         return -TARGET_EFAULT;
3821     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3822     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3823     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3824     unlock_user_struct(target_sd, target_addr, 0);
3825     return 0;
3826 }
3827 
3828 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3829                                                struct semid_ds *host_sd)
3830 {
3831     struct target_semid64_ds *target_sd;
3832 
3833     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3834         return -TARGET_EFAULT;
3835     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3836         return -TARGET_EFAULT;
3837     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3838     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3839     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3840     unlock_user_struct(target_sd, target_addr, 1);
3841     return 0;
3842 }
3843 
3844 struct target_seminfo {
3845     int semmap;
3846     int semmni;
3847     int semmns;
3848     int semmnu;
3849     int semmsl;
3850     int semopm;
3851     int semume;
3852     int semusz;
3853     int semvmx;
3854     int semaem;
3855 };
3856 
3857 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3858                                               struct seminfo *host_seminfo)
3859 {
3860     struct target_seminfo *target_seminfo;
3861     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3862         return -TARGET_EFAULT;
3863     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3864     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3865     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3866     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3867     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3868     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3869     __put_user(host_seminfo->semume, &target_seminfo->semume);
3870     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3871     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3872     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3873     unlock_user_struct(target_seminfo, target_addr, 1);
3874     return 0;
3875 }
3876 
3877 union semun {
3878 	int val;
3879 	struct semid_ds *buf;
3880 	unsigned short *array;
3881 	struct seminfo *__buf;
3882 };
3883 
3884 union target_semun {
3885 	int val;
3886 	abi_ulong buf;
3887 	abi_ulong array;
3888 	abi_ulong __buf;
3889 };
3890 
3891 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3892                                                abi_ulong target_addr)
3893 {
3894     int nsems;
3895     unsigned short *array;
3896     union semun semun;
3897     struct semid_ds semid_ds;
3898     int i, ret;
3899 
3900     semun.buf = &semid_ds;
3901 
3902     ret = semctl(semid, 0, IPC_STAT, semun);
3903     if (ret == -1)
3904         return get_errno(ret);
3905 
3906     nsems = semid_ds.sem_nsems;
3907 
3908     *host_array = g_try_new(unsigned short, nsems);
3909     if (!*host_array) {
3910         return -TARGET_ENOMEM;
3911     }
3912     array = lock_user(VERIFY_READ, target_addr,
3913                       nsems*sizeof(unsigned short), 1);
3914     if (!array) {
3915         g_free(*host_array);
3916         return -TARGET_EFAULT;
3917     }
3918 
3919     for(i=0; i<nsems; i++) {
3920         __get_user((*host_array)[i], &array[i]);
3921     }
3922     unlock_user(array, target_addr, 0);
3923 
3924     return 0;
3925 }
3926 
3927 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3928                                                unsigned short **host_array)
3929 {
3930     int nsems;
3931     unsigned short *array;
3932     union semun semun;
3933     struct semid_ds semid_ds;
3934     int i, ret;
3935 
3936     semun.buf = &semid_ds;
3937 
3938     ret = semctl(semid, 0, IPC_STAT, semun);
3939     if (ret == -1)
3940         return get_errno(ret);
3941 
3942     nsems = semid_ds.sem_nsems;
3943 
3944     array = lock_user(VERIFY_WRITE, target_addr,
3945                       nsems*sizeof(unsigned short), 0);
3946     if (!array)
3947         return -TARGET_EFAULT;
3948 
3949     for(i=0; i<nsems; i++) {
3950         __put_user((*host_array)[i], &array[i]);
3951     }
3952     g_free(*host_array);
3953     unlock_user(array, target_addr, 1);
3954 
3955     return 0;
3956 }
3957 
3958 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3959                                  abi_ulong target_arg)
3960 {
3961     union target_semun target_su = { .buf = target_arg };
3962     union semun arg;
3963     struct semid_ds dsarg;
3964     unsigned short *array = NULL;
3965     struct seminfo seminfo;
3966     abi_long ret = -TARGET_EINVAL;
3967     abi_long err;
3968     cmd &= 0xff;
3969 
3970     switch( cmd ) {
3971 	case GETVAL:
3972 	case SETVAL:
3973             /* In 64 bit cross-endian situations, we will erroneously pick up
3974              * the wrong half of the union for the "val" element.  To rectify
3975              * this, the entire 8-byte structure is byteswapped, followed by
3976 	     * a swap of the 4 byte val field. In other cases, the data is
3977 	     * already in proper host byte order. */
3978 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3979 		target_su.buf = tswapal(target_su.buf);
3980 		arg.val = tswap32(target_su.val);
3981 	    } else {
3982 		arg.val = target_su.val;
3983 	    }
3984             ret = get_errno(semctl(semid, semnum, cmd, arg));
3985             break;
3986 	case GETALL:
3987 	case SETALL:
3988             err = target_to_host_semarray(semid, &array, target_su.array);
3989             if (err)
3990                 return err;
3991             arg.array = array;
3992             ret = get_errno(semctl(semid, semnum, cmd, arg));
3993             err = host_to_target_semarray(semid, target_su.array, &array);
3994             if (err)
3995                 return err;
3996             break;
3997 	case IPC_STAT:
3998 	case IPC_SET:
3999 	case SEM_STAT:
4000             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4001             if (err)
4002                 return err;
4003             arg.buf = &dsarg;
4004             ret = get_errno(semctl(semid, semnum, cmd, arg));
4005             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4006             if (err)
4007                 return err;
4008             break;
4009 	case IPC_INFO:
4010 	case SEM_INFO:
4011             arg.__buf = &seminfo;
4012             ret = get_errno(semctl(semid, semnum, cmd, arg));
4013             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4014             if (err)
4015                 return err;
4016             break;
4017 	case IPC_RMID:
4018 	case GETPID:
4019 	case GETNCNT:
4020 	case GETZCNT:
4021             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4022             break;
4023     }
4024 
4025     return ret;
4026 }
4027 
4028 struct target_sembuf {
4029     unsigned short sem_num;
4030     short sem_op;
4031     short sem_flg;
4032 };
4033 
4034 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4035                                              abi_ulong target_addr,
4036                                              unsigned nsops)
4037 {
4038     struct target_sembuf *target_sembuf;
4039     int i;
4040 
4041     target_sembuf = lock_user(VERIFY_READ, target_addr,
4042                               nsops*sizeof(struct target_sembuf), 1);
4043     if (!target_sembuf)
4044         return -TARGET_EFAULT;
4045 
4046     for(i=0; i<nsops; i++) {
4047         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4048         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4049         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4050     }
4051 
4052     unlock_user(target_sembuf, target_addr, 0);
4053 
4054     return 0;
4055 }
4056 
4057 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4058     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4059 
4060 /*
4061  * This macro is required to handle the s390 variants, which passes the
4062  * arguments in a different order than default.
4063  */
4064 #ifdef __s390x__
4065 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4066   (__nsops), (__timeout), (__sops)
4067 #else
4068 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4069   (__nsops), 0, (__sops), (__timeout)
4070 #endif
4071 
4072 static inline abi_long do_semtimedop(int semid,
4073                                      abi_long ptr,
4074                                      unsigned nsops,
4075                                      abi_long timeout, bool time64)
4076 {
4077     struct sembuf *sops;
4078     struct timespec ts, *pts = NULL;
4079     abi_long ret;
4080 
4081     if (timeout) {
4082         pts = &ts;
4083         if (time64) {
4084             if (target_to_host_timespec64(pts, timeout)) {
4085                 return -TARGET_EFAULT;
4086             }
4087         } else {
4088             if (target_to_host_timespec(pts, timeout)) {
4089                 return -TARGET_EFAULT;
4090             }
4091         }
4092     }
4093 
4094     if (nsops > TARGET_SEMOPM) {
4095         return -TARGET_E2BIG;
4096     }
4097 
4098     sops = g_new(struct sembuf, nsops);
4099 
4100     if (target_to_host_sembuf(sops, ptr, nsops)) {
4101         g_free(sops);
4102         return -TARGET_EFAULT;
4103     }
4104 
4105     ret = -TARGET_ENOSYS;
4106 #ifdef __NR_semtimedop
4107     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4108 #endif
4109 #ifdef __NR_ipc
4110     if (ret == -TARGET_ENOSYS) {
4111         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4112                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4113     }
4114 #endif
4115     g_free(sops);
4116     return ret;
4117 }
4118 #endif
4119 
4120 struct target_msqid_ds
4121 {
4122     struct target_ipc_perm msg_perm;
4123     abi_ulong msg_stime;
4124 #if TARGET_ABI_BITS == 32
4125     abi_ulong __unused1;
4126 #endif
4127     abi_ulong msg_rtime;
4128 #if TARGET_ABI_BITS == 32
4129     abi_ulong __unused2;
4130 #endif
4131     abi_ulong msg_ctime;
4132 #if TARGET_ABI_BITS == 32
4133     abi_ulong __unused3;
4134 #endif
4135     abi_ulong __msg_cbytes;
4136     abi_ulong msg_qnum;
4137     abi_ulong msg_qbytes;
4138     abi_ulong msg_lspid;
4139     abi_ulong msg_lrpid;
4140     abi_ulong __unused4;
4141     abi_ulong __unused5;
4142 };
4143 
4144 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4145                                                abi_ulong target_addr)
4146 {
4147     struct target_msqid_ds *target_md;
4148 
4149     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4150         return -TARGET_EFAULT;
4151     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4152         return -TARGET_EFAULT;
4153     host_md->msg_stime = tswapal(target_md->msg_stime);
4154     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4155     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4156     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4157     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4158     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4159     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4160     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4161     unlock_user_struct(target_md, target_addr, 0);
4162     return 0;
4163 }
4164 
4165 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4166                                                struct msqid_ds *host_md)
4167 {
4168     struct target_msqid_ds *target_md;
4169 
4170     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4171         return -TARGET_EFAULT;
4172     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4173         return -TARGET_EFAULT;
4174     target_md->msg_stime = tswapal(host_md->msg_stime);
4175     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4176     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4177     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4178     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4179     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4180     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4181     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4182     unlock_user_struct(target_md, target_addr, 1);
4183     return 0;
4184 }
4185 
4186 struct target_msginfo {
4187     int msgpool;
4188     int msgmap;
4189     int msgmax;
4190     int msgmnb;
4191     int msgmni;
4192     int msgssz;
4193     int msgtql;
4194     unsigned short int msgseg;
4195 };
4196 
4197 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4198                                               struct msginfo *host_msginfo)
4199 {
4200     struct target_msginfo *target_msginfo;
4201     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4202         return -TARGET_EFAULT;
4203     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4204     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4205     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4206     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4207     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4208     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4209     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4210     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4211     unlock_user_struct(target_msginfo, target_addr, 1);
4212     return 0;
4213 }
4214 
4215 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4216 {
4217     struct msqid_ds dsarg;
4218     struct msginfo msginfo;
4219     abi_long ret = -TARGET_EINVAL;
4220 
4221     cmd &= 0xff;
4222 
4223     switch (cmd) {
4224     case IPC_STAT:
4225     case IPC_SET:
4226     case MSG_STAT:
4227         if (target_to_host_msqid_ds(&dsarg,ptr))
4228             return -TARGET_EFAULT;
4229         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4230         if (host_to_target_msqid_ds(ptr,&dsarg))
4231             return -TARGET_EFAULT;
4232         break;
4233     case IPC_RMID:
4234         ret = get_errno(msgctl(msgid, cmd, NULL));
4235         break;
4236     case IPC_INFO:
4237     case MSG_INFO:
4238         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4239         if (host_to_target_msginfo(ptr, &msginfo))
4240             return -TARGET_EFAULT;
4241         break;
4242     }
4243 
4244     return ret;
4245 }
4246 
4247 struct target_msgbuf {
4248     abi_long mtype;
4249     char	mtext[1];
4250 };
4251 
4252 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4253                                  ssize_t msgsz, int msgflg)
4254 {
4255     struct target_msgbuf *target_mb;
4256     struct msgbuf *host_mb;
4257     abi_long ret = 0;
4258 
4259     if (msgsz < 0) {
4260         return -TARGET_EINVAL;
4261     }
4262 
4263     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4264         return -TARGET_EFAULT;
4265     host_mb = g_try_malloc(msgsz + sizeof(long));
4266     if (!host_mb) {
4267         unlock_user_struct(target_mb, msgp, 0);
4268         return -TARGET_ENOMEM;
4269     }
4270     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4271     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4272     ret = -TARGET_ENOSYS;
4273 #ifdef __NR_msgsnd
4274     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4275 #endif
4276 #ifdef __NR_ipc
4277     if (ret == -TARGET_ENOSYS) {
4278 #ifdef __s390x__
4279         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4280                                  host_mb));
4281 #else
4282         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4283                                  host_mb, 0));
4284 #endif
4285     }
4286 #endif
4287     g_free(host_mb);
4288     unlock_user_struct(target_mb, msgp, 0);
4289 
4290     return ret;
4291 }
4292 
4293 #ifdef __NR_ipc
4294 #if defined(__sparc__)
4295 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4296 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4297 #elif defined(__s390x__)
4298 /* The s390 sys_ipc variant has only five parameters.  */
4299 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4300     ((long int[]){(long int)__msgp, __msgtyp})
4301 #else
4302 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4303     ((long int[]){(long int)__msgp, __msgtyp}), 0
4304 #endif
4305 #endif
4306 
4307 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4308                                  ssize_t msgsz, abi_long msgtyp,
4309                                  int msgflg)
4310 {
4311     struct target_msgbuf *target_mb;
4312     char *target_mtext;
4313     struct msgbuf *host_mb;
4314     abi_long ret = 0;
4315 
4316     if (msgsz < 0) {
4317         return -TARGET_EINVAL;
4318     }
4319 
4320     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4321         return -TARGET_EFAULT;
4322 
4323     host_mb = g_try_malloc(msgsz + sizeof(long));
4324     if (!host_mb) {
4325         ret = -TARGET_ENOMEM;
4326         goto end;
4327     }
4328     ret = -TARGET_ENOSYS;
4329 #ifdef __NR_msgrcv
4330     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4331 #endif
4332 #ifdef __NR_ipc
4333     if (ret == -TARGET_ENOSYS) {
4334         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4335                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4336     }
4337 #endif
4338 
4339     if (ret > 0) {
4340         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4341         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4342         if (!target_mtext) {
4343             ret = -TARGET_EFAULT;
4344             goto end;
4345         }
4346         memcpy(target_mb->mtext, host_mb->mtext, ret);
4347         unlock_user(target_mtext, target_mtext_addr, ret);
4348     }
4349 
4350     target_mb->mtype = tswapal(host_mb->mtype);
4351 
4352 end:
4353     if (target_mb)
4354         unlock_user_struct(target_mb, msgp, 1);
4355     g_free(host_mb);
4356     return ret;
4357 }
4358 
4359 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4360                                                abi_ulong target_addr)
4361 {
4362     struct target_shmid_ds *target_sd;
4363 
4364     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4365         return -TARGET_EFAULT;
4366     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4367         return -TARGET_EFAULT;
4368     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4369     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4370     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4371     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4372     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4373     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4374     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4375     unlock_user_struct(target_sd, target_addr, 0);
4376     return 0;
4377 }
4378 
4379 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4380                                                struct shmid_ds *host_sd)
4381 {
4382     struct target_shmid_ds *target_sd;
4383 
4384     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4385         return -TARGET_EFAULT;
4386     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4387         return -TARGET_EFAULT;
4388     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4389     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4390     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4391     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4392     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4393     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4394     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4395     unlock_user_struct(target_sd, target_addr, 1);
4396     return 0;
4397 }
4398 
4399 struct  target_shminfo {
4400     abi_ulong shmmax;
4401     abi_ulong shmmin;
4402     abi_ulong shmmni;
4403     abi_ulong shmseg;
4404     abi_ulong shmall;
4405 };
4406 
4407 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4408                                               struct shminfo *host_shminfo)
4409 {
4410     struct target_shminfo *target_shminfo;
4411     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4412         return -TARGET_EFAULT;
4413     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4414     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4415     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4416     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4417     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4418     unlock_user_struct(target_shminfo, target_addr, 1);
4419     return 0;
4420 }
4421 
4422 struct target_shm_info {
4423     int used_ids;
4424     abi_ulong shm_tot;
4425     abi_ulong shm_rss;
4426     abi_ulong shm_swp;
4427     abi_ulong swap_attempts;
4428     abi_ulong swap_successes;
4429 };
4430 
4431 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4432                                                struct shm_info *host_shm_info)
4433 {
4434     struct target_shm_info *target_shm_info;
4435     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4436         return -TARGET_EFAULT;
4437     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4438     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4439     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4440     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4441     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4442     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4443     unlock_user_struct(target_shm_info, target_addr, 1);
4444     return 0;
4445 }
4446 
4447 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4448 {
4449     struct shmid_ds dsarg;
4450     struct shminfo shminfo;
4451     struct shm_info shm_info;
4452     abi_long ret = -TARGET_EINVAL;
4453 
4454     cmd &= 0xff;
4455 
4456     switch(cmd) {
4457     case IPC_STAT:
4458     case IPC_SET:
4459     case SHM_STAT:
4460         if (target_to_host_shmid_ds(&dsarg, buf))
4461             return -TARGET_EFAULT;
4462         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4463         if (host_to_target_shmid_ds(buf, &dsarg))
4464             return -TARGET_EFAULT;
4465         break;
4466     case IPC_INFO:
4467         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4468         if (host_to_target_shminfo(buf, &shminfo))
4469             return -TARGET_EFAULT;
4470         break;
4471     case SHM_INFO:
4472         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4473         if (host_to_target_shm_info(buf, &shm_info))
4474             return -TARGET_EFAULT;
4475         break;
4476     case IPC_RMID:
4477     case SHM_LOCK:
4478     case SHM_UNLOCK:
4479         ret = get_errno(shmctl(shmid, cmd, NULL));
4480         break;
4481     }
4482 
4483     return ret;
4484 }
4485 
4486 #ifndef TARGET_FORCE_SHMLBA
4487 /* For most architectures, SHMLBA is the same as the page size;
4488  * some architectures have larger values, in which case they should
4489  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4490  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4491  * and defining its own value for SHMLBA.
4492  *
4493  * The kernel also permits SHMLBA to be set by the architecture to a
4494  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4495  * this means that addresses are rounded to the large size if
4496  * SHM_RND is set but addresses not aligned to that size are not rejected
4497  * as long as they are at least page-aligned. Since the only architecture
4498  * which uses this is ia64 this code doesn't provide for that oddity.
4499  */
4500 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4501 {
4502     return TARGET_PAGE_SIZE;
4503 }
4504 #endif
4505 
4506 static abi_ulong do_shmat(CPUArchState *cpu_env, int shmid,
4507                           abi_ulong shmaddr, int shmflg)
4508 {
4509     CPUState *cpu = env_cpu(cpu_env);
4510     abi_ulong raddr;
4511     void *host_raddr;
4512     struct shmid_ds shm_info;
4513     int i, ret;
4514     abi_ulong shmlba;
4515 
4516     /* shmat pointers are always untagged */
4517 
4518     /* find out the length of the shared memory segment */
4519     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4520     if (is_error(ret)) {
4521         /* can't get length, bail out */
4522         return ret;
4523     }
4524 
4525     shmlba = target_shmlba(cpu_env);
4526 
4527     if (shmaddr & (shmlba - 1)) {
4528         if (shmflg & SHM_RND) {
4529             shmaddr &= ~(shmlba - 1);
4530         } else {
4531             return -TARGET_EINVAL;
4532         }
4533     }
4534     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4535         return -TARGET_EINVAL;
4536     }
4537 
4538     mmap_lock();
4539 
4540     /*
4541      * We're mapping shared memory, so ensure we generate code for parallel
4542      * execution and flush old translations.  This will work up to the level
4543      * supported by the host -- anything that requires EXCP_ATOMIC will not
4544      * be atomic with respect to an external process.
4545      */
4546     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4547         cpu->tcg_cflags |= CF_PARALLEL;
4548         tb_flush(cpu);
4549     }
4550 
4551     if (shmaddr)
4552         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4553     else {
4554         abi_ulong mmap_start;
4555 
4556         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4557         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4558 
4559         if (mmap_start == -1) {
4560             errno = ENOMEM;
4561             host_raddr = (void *)-1;
4562         } else
4563             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4564                                shmflg | SHM_REMAP);
4565     }
4566 
4567     if (host_raddr == (void *)-1) {
4568         mmap_unlock();
4569         return get_errno((intptr_t)host_raddr);
4570     }
4571     raddr = h2g((uintptr_t)host_raddr);
4572 
4573     page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
4574                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4575                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4576 
4577     for (i = 0; i < N_SHM_REGIONS; i++) {
4578         if (!shm_regions[i].in_use) {
4579             shm_regions[i].in_use = true;
4580             shm_regions[i].start = raddr;
4581             shm_regions[i].size = shm_info.shm_segsz;
4582             break;
4583         }
4584     }
4585 
4586     mmap_unlock();
4587     return raddr;
4588 }
4589 
4590 static inline abi_long do_shmdt(abi_ulong shmaddr)
4591 {
4592     int i;
4593     abi_long rv;
4594 
4595     /* shmdt pointers are always untagged */
4596 
4597     mmap_lock();
4598 
4599     for (i = 0; i < N_SHM_REGIONS; ++i) {
4600         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4601             shm_regions[i].in_use = false;
4602             page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
4603             break;
4604         }
4605     }
4606     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4607 
4608     mmap_unlock();
4609 
4610     return rv;
4611 }
4612 
4613 #ifdef TARGET_NR_ipc
4614 /* ??? This only works with linear mappings.  */
4615 /* do_ipc() must return target values and target errnos. */
4616 static abi_long do_ipc(CPUArchState *cpu_env,
4617                        unsigned int call, abi_long first,
4618                        abi_long second, abi_long third,
4619                        abi_long ptr, abi_long fifth)
4620 {
4621     int version;
4622     abi_long ret = 0;
4623 
4624     version = call >> 16;
4625     call &= 0xffff;
4626 
4627     switch (call) {
4628     case IPCOP_semop:
4629         ret = do_semtimedop(first, ptr, second, 0, false);
4630         break;
4631     case IPCOP_semtimedop:
4632     /*
4633      * The s390 sys_ipc variant has only five parameters instead of six
4634      * (as for default variant) and the only difference is the handling of
4635      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4636      * to a struct timespec where the generic variant uses fifth parameter.
4637      */
4638 #if defined(TARGET_S390X)
4639         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4640 #else
4641         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4642 #endif
4643         break;
4644 
4645     case IPCOP_semget:
4646         ret = get_errno(semget(first, second, third));
4647         break;
4648 
4649     case IPCOP_semctl: {
4650         /* The semun argument to semctl is passed by value, so dereference the
4651          * ptr argument. */
4652         abi_ulong atptr;
4653         get_user_ual(atptr, ptr);
4654         ret = do_semctl(first, second, third, atptr);
4655         break;
4656     }
4657 
4658     case IPCOP_msgget:
4659         ret = get_errno(msgget(first, second));
4660         break;
4661 
4662     case IPCOP_msgsnd:
4663         ret = do_msgsnd(first, ptr, second, third);
4664         break;
4665 
4666     case IPCOP_msgctl:
4667         ret = do_msgctl(first, second, ptr);
4668         break;
4669 
4670     case IPCOP_msgrcv:
4671         switch (version) {
4672         case 0:
4673             {
4674                 struct target_ipc_kludge {
4675                     abi_long msgp;
4676                     abi_long msgtyp;
4677                 } *tmp;
4678 
4679                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4680                     ret = -TARGET_EFAULT;
4681                     break;
4682                 }
4683 
4684                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4685 
4686                 unlock_user_struct(tmp, ptr, 0);
4687                 break;
4688             }
4689         default:
4690             ret = do_msgrcv(first, ptr, second, fifth, third);
4691         }
4692         break;
4693 
4694     case IPCOP_shmat:
4695         switch (version) {
4696         default:
4697         {
4698             abi_ulong raddr;
4699             raddr = do_shmat(cpu_env, first, ptr, second);
4700             if (is_error(raddr))
4701                 return get_errno(raddr);
4702             if (put_user_ual(raddr, third))
4703                 return -TARGET_EFAULT;
4704             break;
4705         }
4706         case 1:
4707             ret = -TARGET_EINVAL;
4708             break;
4709         }
4710 	break;
4711     case IPCOP_shmdt:
4712         ret = do_shmdt(ptr);
4713 	break;
4714 
4715     case IPCOP_shmget:
4716 	/* IPC_* flag values are the same on all linux platforms */
4717 	ret = get_errno(shmget(first, second, third));
4718 	break;
4719 
4720 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4721     case IPCOP_shmctl:
4722         ret = do_shmctl(first, second, ptr);
4723         break;
4724     default:
4725         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4726                       call, version);
4727 	ret = -TARGET_ENOSYS;
4728 	break;
4729     }
4730     return ret;
4731 }
4732 #endif
4733 
4734 /* kernel structure types definitions */
4735 
4736 #define STRUCT(name, ...) STRUCT_ ## name,
4737 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4738 enum {
4739 #include "syscall_types.h"
4740 STRUCT_MAX
4741 };
4742 #undef STRUCT
4743 #undef STRUCT_SPECIAL
4744 
4745 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4746 #define STRUCT_SPECIAL(name)
4747 #include "syscall_types.h"
4748 #undef STRUCT
4749 #undef STRUCT_SPECIAL
4750 
4751 #define MAX_STRUCT_SIZE 4096
4752 
4753 #ifdef CONFIG_FIEMAP
4754 /* So fiemap access checks don't overflow on 32 bit systems.
4755  * This is very slightly smaller than the limit imposed by
4756  * the underlying kernel.
4757  */
4758 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4759                             / sizeof(struct fiemap_extent))
4760 
4761 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4762                                        int fd, int cmd, abi_long arg)
4763 {
4764     /* The parameter for this ioctl is a struct fiemap followed
4765      * by an array of struct fiemap_extent whose size is set
4766      * in fiemap->fm_extent_count. The array is filled in by the
4767      * ioctl.
4768      */
4769     int target_size_in, target_size_out;
4770     struct fiemap *fm;
4771     const argtype *arg_type = ie->arg_type;
4772     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4773     void *argptr, *p;
4774     abi_long ret;
4775     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4776     uint32_t outbufsz;
4777     int free_fm = 0;
4778 
4779     assert(arg_type[0] == TYPE_PTR);
4780     assert(ie->access == IOC_RW);
4781     arg_type++;
4782     target_size_in = thunk_type_size(arg_type, 0);
4783     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4784     if (!argptr) {
4785         return -TARGET_EFAULT;
4786     }
4787     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4788     unlock_user(argptr, arg, 0);
4789     fm = (struct fiemap *)buf_temp;
4790     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4791         return -TARGET_EINVAL;
4792     }
4793 
4794     outbufsz = sizeof (*fm) +
4795         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4796 
4797     if (outbufsz > MAX_STRUCT_SIZE) {
4798         /* We can't fit all the extents into the fixed size buffer.
4799          * Allocate one that is large enough and use it instead.
4800          */
4801         fm = g_try_malloc(outbufsz);
4802         if (!fm) {
4803             return -TARGET_ENOMEM;
4804         }
4805         memcpy(fm, buf_temp, sizeof(struct fiemap));
4806         free_fm = 1;
4807     }
4808     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4809     if (!is_error(ret)) {
4810         target_size_out = target_size_in;
4811         /* An extent_count of 0 means we were only counting the extents
4812          * so there are no structs to copy
4813          */
4814         if (fm->fm_extent_count != 0) {
4815             target_size_out += fm->fm_mapped_extents * extent_size;
4816         }
4817         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4818         if (!argptr) {
4819             ret = -TARGET_EFAULT;
4820         } else {
4821             /* Convert the struct fiemap */
4822             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4823             if (fm->fm_extent_count != 0) {
4824                 p = argptr + target_size_in;
4825                 /* ...and then all the struct fiemap_extents */
4826                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4827                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4828                                   THUNK_TARGET);
4829                     p += extent_size;
4830                 }
4831             }
4832             unlock_user(argptr, arg, target_size_out);
4833         }
4834     }
4835     if (free_fm) {
4836         g_free(fm);
4837     }
4838     return ret;
4839 }
4840 #endif
4841 
4842 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4843                                 int fd, int cmd, abi_long arg)
4844 {
4845     const argtype *arg_type = ie->arg_type;
4846     int target_size;
4847     void *argptr;
4848     int ret;
4849     struct ifconf *host_ifconf;
4850     uint32_t outbufsz;
4851     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4852     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4853     int target_ifreq_size;
4854     int nb_ifreq;
4855     int free_buf = 0;
4856     int i;
4857     int target_ifc_len;
4858     abi_long target_ifc_buf;
4859     int host_ifc_len;
4860     char *host_ifc_buf;
4861 
4862     assert(arg_type[0] == TYPE_PTR);
4863     assert(ie->access == IOC_RW);
4864 
4865     arg_type++;
4866     target_size = thunk_type_size(arg_type, 0);
4867 
4868     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4869     if (!argptr)
4870         return -TARGET_EFAULT;
4871     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4872     unlock_user(argptr, arg, 0);
4873 
4874     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4875     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4876     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4877 
4878     if (target_ifc_buf != 0) {
4879         target_ifc_len = host_ifconf->ifc_len;
4880         nb_ifreq = target_ifc_len / target_ifreq_size;
4881         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4882 
4883         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4884         if (outbufsz > MAX_STRUCT_SIZE) {
4885             /*
4886              * We can't fit all the extents into the fixed size buffer.
4887              * Allocate one that is large enough and use it instead.
4888              */
4889             host_ifconf = g_try_malloc(outbufsz);
4890             if (!host_ifconf) {
4891                 return -TARGET_ENOMEM;
4892             }
4893             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4894             free_buf = 1;
4895         }
4896         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4897 
4898         host_ifconf->ifc_len = host_ifc_len;
4899     } else {
4900       host_ifc_buf = NULL;
4901     }
4902     host_ifconf->ifc_buf = host_ifc_buf;
4903 
4904     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4905     if (!is_error(ret)) {
4906 	/* convert host ifc_len to target ifc_len */
4907 
4908         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4909         target_ifc_len = nb_ifreq * target_ifreq_size;
4910         host_ifconf->ifc_len = target_ifc_len;
4911 
4912 	/* restore target ifc_buf */
4913 
4914         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4915 
4916 	/* copy struct ifconf to target user */
4917 
4918         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4919         if (!argptr)
4920             return -TARGET_EFAULT;
4921         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4922         unlock_user(argptr, arg, target_size);
4923 
4924         if (target_ifc_buf != 0) {
4925             /* copy ifreq[] to target user */
4926             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4927             for (i = 0; i < nb_ifreq ; i++) {
4928                 thunk_convert(argptr + i * target_ifreq_size,
4929                               host_ifc_buf + i * sizeof(struct ifreq),
4930                               ifreq_arg_type, THUNK_TARGET);
4931             }
4932             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4933         }
4934     }
4935 
4936     if (free_buf) {
4937         g_free(host_ifconf);
4938     }
4939 
4940     return ret;
4941 }
4942 
4943 #if defined(CONFIG_USBFS)
4944 #if HOST_LONG_BITS > 64
4945 #error USBDEVFS thunks do not support >64 bit hosts yet.
4946 #endif
4947 struct live_urb {
4948     uint64_t target_urb_adr;
4949     uint64_t target_buf_adr;
4950     char *target_buf_ptr;
4951     struct usbdevfs_urb host_urb;
4952 };
4953 
4954 static GHashTable *usbdevfs_urb_hashtable(void)
4955 {
4956     static GHashTable *urb_hashtable;
4957 
4958     if (!urb_hashtable) {
4959         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4960     }
4961     return urb_hashtable;
4962 }
4963 
4964 static void urb_hashtable_insert(struct live_urb *urb)
4965 {
4966     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4967     g_hash_table_insert(urb_hashtable, urb, urb);
4968 }
4969 
4970 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4971 {
4972     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4973     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4974 }
4975 
4976 static void urb_hashtable_remove(struct live_urb *urb)
4977 {
4978     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4979     g_hash_table_remove(urb_hashtable, urb);
4980 }
4981 
4982 static abi_long
4983 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4984                           int fd, int cmd, abi_long arg)
4985 {
4986     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4987     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4988     struct live_urb *lurb;
4989     void *argptr;
4990     uint64_t hurb;
4991     int target_size;
4992     uintptr_t target_urb_adr;
4993     abi_long ret;
4994 
4995     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4996 
4997     memset(buf_temp, 0, sizeof(uint64_t));
4998     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4999     if (is_error(ret)) {
5000         return ret;
5001     }
5002 
5003     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5004     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5005     if (!lurb->target_urb_adr) {
5006         return -TARGET_EFAULT;
5007     }
5008     urb_hashtable_remove(lurb);
5009     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5010         lurb->host_urb.buffer_length);
5011     lurb->target_buf_ptr = NULL;
5012 
5013     /* restore the guest buffer pointer */
5014     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5015 
5016     /* update the guest urb struct */
5017     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5018     if (!argptr) {
5019         g_free(lurb);
5020         return -TARGET_EFAULT;
5021     }
5022     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5023     unlock_user(argptr, lurb->target_urb_adr, target_size);
5024 
5025     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5026     /* write back the urb handle */
5027     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5028     if (!argptr) {
5029         g_free(lurb);
5030         return -TARGET_EFAULT;
5031     }
5032 
5033     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5034     target_urb_adr = lurb->target_urb_adr;
5035     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5036     unlock_user(argptr, arg, target_size);
5037 
5038     g_free(lurb);
5039     return ret;
5040 }
5041 
5042 static abi_long
5043 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5044                              uint8_t *buf_temp __attribute__((unused)),
5045                              int fd, int cmd, abi_long arg)
5046 {
5047     struct live_urb *lurb;
5048 
5049     /* map target address back to host URB with metadata. */
5050     lurb = urb_hashtable_lookup(arg);
5051     if (!lurb) {
5052         return -TARGET_EFAULT;
5053     }
5054     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5055 }
5056 
5057 static abi_long
5058 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5059                             int fd, int cmd, abi_long arg)
5060 {
5061     const argtype *arg_type = ie->arg_type;
5062     int target_size;
5063     abi_long ret;
5064     void *argptr;
5065     int rw_dir;
5066     struct live_urb *lurb;
5067 
5068     /*
5069      * each submitted URB needs to map to a unique ID for the
5070      * kernel, and that unique ID needs to be a pointer to
5071      * host memory.  hence, we need to malloc for each URB.
5072      * isochronous transfers have a variable length struct.
5073      */
5074     arg_type++;
5075     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5076 
5077     /* construct host copy of urb and metadata */
5078     lurb = g_try_new0(struct live_urb, 1);
5079     if (!lurb) {
5080         return -TARGET_ENOMEM;
5081     }
5082 
5083     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5084     if (!argptr) {
5085         g_free(lurb);
5086         return -TARGET_EFAULT;
5087     }
5088     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5089     unlock_user(argptr, arg, 0);
5090 
5091     lurb->target_urb_adr = arg;
5092     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5093 
5094     /* buffer space used depends on endpoint type so lock the entire buffer */
5095     /* control type urbs should check the buffer contents for true direction */
5096     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5097     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5098         lurb->host_urb.buffer_length, 1);
5099     if (lurb->target_buf_ptr == NULL) {
5100         g_free(lurb);
5101         return -TARGET_EFAULT;
5102     }
5103 
5104     /* update buffer pointer in host copy */
5105     lurb->host_urb.buffer = lurb->target_buf_ptr;
5106 
5107     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5108     if (is_error(ret)) {
5109         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5110         g_free(lurb);
5111     } else {
5112         urb_hashtable_insert(lurb);
5113     }
5114 
5115     return ret;
5116 }
5117 #endif /* CONFIG_USBFS */
5118 
5119 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5120                             int cmd, abi_long arg)
5121 {
5122     void *argptr;
5123     struct dm_ioctl *host_dm;
5124     abi_long guest_data;
5125     uint32_t guest_data_size;
5126     int target_size;
5127     const argtype *arg_type = ie->arg_type;
5128     abi_long ret;
5129     void *big_buf = NULL;
5130     char *host_data;
5131 
5132     arg_type++;
5133     target_size = thunk_type_size(arg_type, 0);
5134     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5135     if (!argptr) {
5136         ret = -TARGET_EFAULT;
5137         goto out;
5138     }
5139     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5140     unlock_user(argptr, arg, 0);
5141 
5142     /* buf_temp is too small, so fetch things into a bigger buffer */
5143     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5144     memcpy(big_buf, buf_temp, target_size);
5145     buf_temp = big_buf;
5146     host_dm = big_buf;
5147 
5148     guest_data = arg + host_dm->data_start;
5149     if ((guest_data - arg) < 0) {
5150         ret = -TARGET_EINVAL;
5151         goto out;
5152     }
5153     guest_data_size = host_dm->data_size - host_dm->data_start;
5154     host_data = (char*)host_dm + host_dm->data_start;
5155 
5156     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5157     if (!argptr) {
5158         ret = -TARGET_EFAULT;
5159         goto out;
5160     }
5161 
5162     switch (ie->host_cmd) {
5163     case DM_REMOVE_ALL:
5164     case DM_LIST_DEVICES:
5165     case DM_DEV_CREATE:
5166     case DM_DEV_REMOVE:
5167     case DM_DEV_SUSPEND:
5168     case DM_DEV_STATUS:
5169     case DM_DEV_WAIT:
5170     case DM_TABLE_STATUS:
5171     case DM_TABLE_CLEAR:
5172     case DM_TABLE_DEPS:
5173     case DM_LIST_VERSIONS:
5174         /* no input data */
5175         break;
5176     case DM_DEV_RENAME:
5177     case DM_DEV_SET_GEOMETRY:
5178         /* data contains only strings */
5179         memcpy(host_data, argptr, guest_data_size);
5180         break;
5181     case DM_TARGET_MSG:
5182         memcpy(host_data, argptr, guest_data_size);
5183         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5184         break;
5185     case DM_TABLE_LOAD:
5186     {
5187         void *gspec = argptr;
5188         void *cur_data = host_data;
5189         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5190         int spec_size = thunk_type_size(arg_type, 0);
5191         int i;
5192 
5193         for (i = 0; i < host_dm->target_count; i++) {
5194             struct dm_target_spec *spec = cur_data;
5195             uint32_t next;
5196             int slen;
5197 
5198             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5199             slen = strlen((char*)gspec + spec_size) + 1;
5200             next = spec->next;
5201             spec->next = sizeof(*spec) + slen;
5202             strcpy((char*)&spec[1], gspec + spec_size);
5203             gspec += next;
5204             cur_data += spec->next;
5205         }
5206         break;
5207     }
5208     default:
5209         ret = -TARGET_EINVAL;
5210         unlock_user(argptr, guest_data, 0);
5211         goto out;
5212     }
5213     unlock_user(argptr, guest_data, 0);
5214 
5215     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5216     if (!is_error(ret)) {
5217         guest_data = arg + host_dm->data_start;
5218         guest_data_size = host_dm->data_size - host_dm->data_start;
5219         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5220         switch (ie->host_cmd) {
5221         case DM_REMOVE_ALL:
5222         case DM_DEV_CREATE:
5223         case DM_DEV_REMOVE:
5224         case DM_DEV_RENAME:
5225         case DM_DEV_SUSPEND:
5226         case DM_DEV_STATUS:
5227         case DM_TABLE_LOAD:
5228         case DM_TABLE_CLEAR:
5229         case DM_TARGET_MSG:
5230         case DM_DEV_SET_GEOMETRY:
5231             /* no return data */
5232             break;
5233         case DM_LIST_DEVICES:
5234         {
5235             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5236             uint32_t remaining_data = guest_data_size;
5237             void *cur_data = argptr;
5238             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5239             int nl_size = 12; /* can't use thunk_size due to alignment */
5240 
5241             while (1) {
5242                 uint32_t next = nl->next;
5243                 if (next) {
5244                     nl->next = nl_size + (strlen(nl->name) + 1);
5245                 }
5246                 if (remaining_data < nl->next) {
5247                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5248                     break;
5249                 }
5250                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5251                 strcpy(cur_data + nl_size, nl->name);
5252                 cur_data += nl->next;
5253                 remaining_data -= nl->next;
5254                 if (!next) {
5255                     break;
5256                 }
5257                 nl = (void*)nl + next;
5258             }
5259             break;
5260         }
5261         case DM_DEV_WAIT:
5262         case DM_TABLE_STATUS:
5263         {
5264             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5265             void *cur_data = argptr;
5266             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5267             int spec_size = thunk_type_size(arg_type, 0);
5268             int i;
5269 
5270             for (i = 0; i < host_dm->target_count; i++) {
5271                 uint32_t next = spec->next;
5272                 int slen = strlen((char*)&spec[1]) + 1;
5273                 spec->next = (cur_data - argptr) + spec_size + slen;
5274                 if (guest_data_size < spec->next) {
5275                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5276                     break;
5277                 }
5278                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5279                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5280                 cur_data = argptr + spec->next;
5281                 spec = (void*)host_dm + host_dm->data_start + next;
5282             }
5283             break;
5284         }
5285         case DM_TABLE_DEPS:
5286         {
5287             void *hdata = (void*)host_dm + host_dm->data_start;
5288             int count = *(uint32_t*)hdata;
5289             uint64_t *hdev = hdata + 8;
5290             uint64_t *gdev = argptr + 8;
5291             int i;
5292 
5293             *(uint32_t*)argptr = tswap32(count);
5294             for (i = 0; i < count; i++) {
5295                 *gdev = tswap64(*hdev);
5296                 gdev++;
5297                 hdev++;
5298             }
5299             break;
5300         }
5301         case DM_LIST_VERSIONS:
5302         {
5303             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5304             uint32_t remaining_data = guest_data_size;
5305             void *cur_data = argptr;
5306             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5307             int vers_size = thunk_type_size(arg_type, 0);
5308 
5309             while (1) {
5310                 uint32_t next = vers->next;
5311                 if (next) {
5312                     vers->next = vers_size + (strlen(vers->name) + 1);
5313                 }
5314                 if (remaining_data < vers->next) {
5315                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5316                     break;
5317                 }
5318                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5319                 strcpy(cur_data + vers_size, vers->name);
5320                 cur_data += vers->next;
5321                 remaining_data -= vers->next;
5322                 if (!next) {
5323                     break;
5324                 }
5325                 vers = (void*)vers + next;
5326             }
5327             break;
5328         }
5329         default:
5330             unlock_user(argptr, guest_data, 0);
5331             ret = -TARGET_EINVAL;
5332             goto out;
5333         }
5334         unlock_user(argptr, guest_data, guest_data_size);
5335 
5336         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5337         if (!argptr) {
5338             ret = -TARGET_EFAULT;
5339             goto out;
5340         }
5341         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5342         unlock_user(argptr, arg, target_size);
5343     }
5344 out:
5345     g_free(big_buf);
5346     return ret;
5347 }
5348 
5349 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5350                                int cmd, abi_long arg)
5351 {
5352     void *argptr;
5353     int target_size;
5354     const argtype *arg_type = ie->arg_type;
5355     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5356     abi_long ret;
5357 
5358     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5359     struct blkpg_partition host_part;
5360 
5361     /* Read and convert blkpg */
5362     arg_type++;
5363     target_size = thunk_type_size(arg_type, 0);
5364     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5365     if (!argptr) {
5366         ret = -TARGET_EFAULT;
5367         goto out;
5368     }
5369     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5370     unlock_user(argptr, arg, 0);
5371 
5372     switch (host_blkpg->op) {
5373     case BLKPG_ADD_PARTITION:
5374     case BLKPG_DEL_PARTITION:
5375         /* payload is struct blkpg_partition */
5376         break;
5377     default:
5378         /* Unknown opcode */
5379         ret = -TARGET_EINVAL;
5380         goto out;
5381     }
5382 
5383     /* Read and convert blkpg->data */
5384     arg = (abi_long)(uintptr_t)host_blkpg->data;
5385     target_size = thunk_type_size(part_arg_type, 0);
5386     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5387     if (!argptr) {
5388         ret = -TARGET_EFAULT;
5389         goto out;
5390     }
5391     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5392     unlock_user(argptr, arg, 0);
5393 
5394     /* Swizzle the data pointer to our local copy and call! */
5395     host_blkpg->data = &host_part;
5396     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5397 
5398 out:
5399     return ret;
5400 }
5401 
5402 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5403                                 int fd, int cmd, abi_long arg)
5404 {
5405     const argtype *arg_type = ie->arg_type;
5406     const StructEntry *se;
5407     const argtype *field_types;
5408     const int *dst_offsets, *src_offsets;
5409     int target_size;
5410     void *argptr;
5411     abi_ulong *target_rt_dev_ptr = NULL;
5412     unsigned long *host_rt_dev_ptr = NULL;
5413     abi_long ret;
5414     int i;
5415 
5416     assert(ie->access == IOC_W);
5417     assert(*arg_type == TYPE_PTR);
5418     arg_type++;
5419     assert(*arg_type == TYPE_STRUCT);
5420     target_size = thunk_type_size(arg_type, 0);
5421     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5422     if (!argptr) {
5423         return -TARGET_EFAULT;
5424     }
5425     arg_type++;
5426     assert(*arg_type == (int)STRUCT_rtentry);
5427     se = struct_entries + *arg_type++;
5428     assert(se->convert[0] == NULL);
5429     /* convert struct here to be able to catch rt_dev string */
5430     field_types = se->field_types;
5431     dst_offsets = se->field_offsets[THUNK_HOST];
5432     src_offsets = se->field_offsets[THUNK_TARGET];
5433     for (i = 0; i < se->nb_fields; i++) {
5434         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5435             assert(*field_types == TYPE_PTRVOID);
5436             target_rt_dev_ptr = argptr + src_offsets[i];
5437             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5438             if (*target_rt_dev_ptr != 0) {
5439                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5440                                                   tswapal(*target_rt_dev_ptr));
5441                 if (!*host_rt_dev_ptr) {
5442                     unlock_user(argptr, arg, 0);
5443                     return -TARGET_EFAULT;
5444                 }
5445             } else {
5446                 *host_rt_dev_ptr = 0;
5447             }
5448             field_types++;
5449             continue;
5450         }
5451         field_types = thunk_convert(buf_temp + dst_offsets[i],
5452                                     argptr + src_offsets[i],
5453                                     field_types, THUNK_HOST);
5454     }
5455     unlock_user(argptr, arg, 0);
5456 
5457     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5458 
5459     assert(host_rt_dev_ptr != NULL);
5460     assert(target_rt_dev_ptr != NULL);
5461     if (*host_rt_dev_ptr != 0) {
5462         unlock_user((void *)*host_rt_dev_ptr,
5463                     *target_rt_dev_ptr, 0);
5464     }
5465     return ret;
5466 }
5467 
5468 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5469                                      int fd, int cmd, abi_long arg)
5470 {
5471     int sig = target_to_host_signal(arg);
5472     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5473 }
5474 
5475 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5476                                     int fd, int cmd, abi_long arg)
5477 {
5478     struct timeval tv;
5479     abi_long ret;
5480 
5481     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5482     if (is_error(ret)) {
5483         return ret;
5484     }
5485 
5486     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5487         if (copy_to_user_timeval(arg, &tv)) {
5488             return -TARGET_EFAULT;
5489         }
5490     } else {
5491         if (copy_to_user_timeval64(arg, &tv)) {
5492             return -TARGET_EFAULT;
5493         }
5494     }
5495 
5496     return ret;
5497 }
5498 
5499 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5500                                       int fd, int cmd, abi_long arg)
5501 {
5502     struct timespec ts;
5503     abi_long ret;
5504 
5505     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5506     if (is_error(ret)) {
5507         return ret;
5508     }
5509 
5510     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5511         if (host_to_target_timespec(arg, &ts)) {
5512             return -TARGET_EFAULT;
5513         }
5514     } else{
5515         if (host_to_target_timespec64(arg, &ts)) {
5516             return -TARGET_EFAULT;
5517         }
5518     }
5519 
5520     return ret;
5521 }
5522 
5523 #ifdef TIOCGPTPEER
5524 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5525                                      int fd, int cmd, abi_long arg)
5526 {
5527     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5528     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5529 }
5530 #endif
5531 
5532 #ifdef HAVE_DRM_H
5533 
5534 static void unlock_drm_version(struct drm_version *host_ver,
5535                                struct target_drm_version *target_ver,
5536                                bool copy)
5537 {
5538     unlock_user(host_ver->name, target_ver->name,
5539                                 copy ? host_ver->name_len : 0);
5540     unlock_user(host_ver->date, target_ver->date,
5541                                 copy ? host_ver->date_len : 0);
5542     unlock_user(host_ver->desc, target_ver->desc,
5543                                 copy ? host_ver->desc_len : 0);
5544 }
5545 
5546 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5547                                           struct target_drm_version *target_ver)
5548 {
5549     memset(host_ver, 0, sizeof(*host_ver));
5550 
5551     __get_user(host_ver->name_len, &target_ver->name_len);
5552     if (host_ver->name_len) {
5553         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5554                                    target_ver->name_len, 0);
5555         if (!host_ver->name) {
5556             return -EFAULT;
5557         }
5558     }
5559 
5560     __get_user(host_ver->date_len, &target_ver->date_len);
5561     if (host_ver->date_len) {
5562         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5563                                    target_ver->date_len, 0);
5564         if (!host_ver->date) {
5565             goto err;
5566         }
5567     }
5568 
5569     __get_user(host_ver->desc_len, &target_ver->desc_len);
5570     if (host_ver->desc_len) {
5571         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5572                                    target_ver->desc_len, 0);
5573         if (!host_ver->desc) {
5574             goto err;
5575         }
5576     }
5577 
5578     return 0;
5579 err:
5580     unlock_drm_version(host_ver, target_ver, false);
5581     return -EFAULT;
5582 }
5583 
5584 static inline void host_to_target_drmversion(
5585                                           struct target_drm_version *target_ver,
5586                                           struct drm_version *host_ver)
5587 {
5588     __put_user(host_ver->version_major, &target_ver->version_major);
5589     __put_user(host_ver->version_minor, &target_ver->version_minor);
5590     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5591     __put_user(host_ver->name_len, &target_ver->name_len);
5592     __put_user(host_ver->date_len, &target_ver->date_len);
5593     __put_user(host_ver->desc_len, &target_ver->desc_len);
5594     unlock_drm_version(host_ver, target_ver, true);
5595 }
5596 
5597 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5598                              int fd, int cmd, abi_long arg)
5599 {
5600     struct drm_version *ver;
5601     struct target_drm_version *target_ver;
5602     abi_long ret;
5603 
5604     switch (ie->host_cmd) {
5605     case DRM_IOCTL_VERSION:
5606         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5607             return -TARGET_EFAULT;
5608         }
5609         ver = (struct drm_version *)buf_temp;
5610         ret = target_to_host_drmversion(ver, target_ver);
5611         if (!is_error(ret)) {
5612             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5613             if (is_error(ret)) {
5614                 unlock_drm_version(ver, target_ver, false);
5615             } else {
5616                 host_to_target_drmversion(target_ver, ver);
5617             }
5618         }
5619         unlock_user_struct(target_ver, arg, 0);
5620         return ret;
5621     }
5622     return -TARGET_ENOSYS;
5623 }
5624 
5625 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5626                                            struct drm_i915_getparam *gparam,
5627                                            int fd, abi_long arg)
5628 {
5629     abi_long ret;
5630     int value;
5631     struct target_drm_i915_getparam *target_gparam;
5632 
5633     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5634         return -TARGET_EFAULT;
5635     }
5636 
5637     __get_user(gparam->param, &target_gparam->param);
5638     gparam->value = &value;
5639     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5640     put_user_s32(value, target_gparam->value);
5641 
5642     unlock_user_struct(target_gparam, arg, 0);
5643     return ret;
5644 }
5645 
5646 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5647                                   int fd, int cmd, abi_long arg)
5648 {
5649     switch (ie->host_cmd) {
5650     case DRM_IOCTL_I915_GETPARAM:
5651         return do_ioctl_drm_i915_getparam(ie,
5652                                           (struct drm_i915_getparam *)buf_temp,
5653                                           fd, arg);
5654     default:
5655         return -TARGET_ENOSYS;
5656     }
5657 }
5658 
5659 #endif
5660 
5661 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5662                                         int fd, int cmd, abi_long arg)
5663 {
5664     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5665     struct tun_filter *target_filter;
5666     char *target_addr;
5667 
5668     assert(ie->access == IOC_W);
5669 
5670     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5671     if (!target_filter) {
5672         return -TARGET_EFAULT;
5673     }
5674     filter->flags = tswap16(target_filter->flags);
5675     filter->count = tswap16(target_filter->count);
5676     unlock_user(target_filter, arg, 0);
5677 
5678     if (filter->count) {
5679         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5680             MAX_STRUCT_SIZE) {
5681             return -TARGET_EFAULT;
5682         }
5683 
5684         target_addr = lock_user(VERIFY_READ,
5685                                 arg + offsetof(struct tun_filter, addr),
5686                                 filter->count * ETH_ALEN, 1);
5687         if (!target_addr) {
5688             return -TARGET_EFAULT;
5689         }
5690         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5691         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5692     }
5693 
5694     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5695 }
5696 
5697 IOCTLEntry ioctl_entries[] = {
5698 #define IOCTL(cmd, access, ...) \
5699     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5700 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5701     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5702 #define IOCTL_IGNORE(cmd) \
5703     { TARGET_ ## cmd, 0, #cmd },
5704 #include "ioctls.h"
5705     { 0, 0, },
5706 };
5707 
5708 /* ??? Implement proper locking for ioctls.  */
5709 /* do_ioctl() Must return target values and target errnos. */
5710 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5711 {
5712     const IOCTLEntry *ie;
5713     const argtype *arg_type;
5714     abi_long ret;
5715     uint8_t buf_temp[MAX_STRUCT_SIZE];
5716     int target_size;
5717     void *argptr;
5718 
5719     ie = ioctl_entries;
5720     for(;;) {
5721         if (ie->target_cmd == 0) {
5722             qemu_log_mask(
5723                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5724             return -TARGET_ENOTTY;
5725         }
5726         if (ie->target_cmd == cmd)
5727             break;
5728         ie++;
5729     }
5730     arg_type = ie->arg_type;
5731     if (ie->do_ioctl) {
5732         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5733     } else if (!ie->host_cmd) {
5734         /* Some architectures define BSD ioctls in their headers
5735            that are not implemented in Linux.  */
5736         return -TARGET_ENOTTY;
5737     }
5738 
5739     switch(arg_type[0]) {
5740     case TYPE_NULL:
5741         /* no argument */
5742         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5743         break;
5744     case TYPE_PTRVOID:
5745     case TYPE_INT:
5746     case TYPE_LONG:
5747     case TYPE_ULONG:
5748         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5749         break;
5750     case TYPE_PTR:
5751         arg_type++;
5752         target_size = thunk_type_size(arg_type, 0);
5753         switch(ie->access) {
5754         case IOC_R:
5755             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5756             if (!is_error(ret)) {
5757                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5758                 if (!argptr)
5759                     return -TARGET_EFAULT;
5760                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5761                 unlock_user(argptr, arg, target_size);
5762             }
5763             break;
5764         case IOC_W:
5765             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5766             if (!argptr)
5767                 return -TARGET_EFAULT;
5768             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5769             unlock_user(argptr, arg, 0);
5770             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5771             break;
5772         default:
5773         case IOC_RW:
5774             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5775             if (!argptr)
5776                 return -TARGET_EFAULT;
5777             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5778             unlock_user(argptr, arg, 0);
5779             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5780             if (!is_error(ret)) {
5781                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5782                 if (!argptr)
5783                     return -TARGET_EFAULT;
5784                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5785                 unlock_user(argptr, arg, target_size);
5786             }
5787             break;
5788         }
5789         break;
5790     default:
5791         qemu_log_mask(LOG_UNIMP,
5792                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5793                       (long)cmd, arg_type[0]);
5794         ret = -TARGET_ENOTTY;
5795         break;
5796     }
5797     return ret;
5798 }
5799 
5800 static const bitmask_transtbl iflag_tbl[] = {
5801         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5802         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5803         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5804         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5805         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5806         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5807         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5808         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5809         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5810         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5811         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5812         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5813         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5814         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5815         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5816         { 0, 0, 0, 0 }
5817 };
5818 
5819 static const bitmask_transtbl oflag_tbl[] = {
5820 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5821 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5822 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5823 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5824 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5825 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5826 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5827 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5828 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5829 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5830 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5831 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5832 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5833 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5834 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5835 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5836 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5837 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5838 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5839 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5840 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5841 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5842 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5843 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5844 	{ 0, 0, 0, 0 }
5845 };
5846 
5847 static const bitmask_transtbl cflag_tbl[] = {
5848 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5849 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5850 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5851 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5852 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5853 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5854 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5855 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5856 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5857 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5858 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5859 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5860 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5861 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5862 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5863 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5864 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5865 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5866 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5867 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5868 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5869 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5870 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5871 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5872 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5873 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5874 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5875 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5876 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5877 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5878 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5879 	{ 0, 0, 0, 0 }
5880 };
5881 
5882 static const bitmask_transtbl lflag_tbl[] = {
5883   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5884   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5885   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5886   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5887   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5888   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5889   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5890   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5891   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5892   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5893   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5894   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5895   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5896   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5897   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5898   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5899   { 0, 0, 0, 0 }
5900 };
5901 
5902 static void target_to_host_termios (void *dst, const void *src)
5903 {
5904     struct host_termios *host = dst;
5905     const struct target_termios *target = src;
5906 
5907     host->c_iflag =
5908         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5909     host->c_oflag =
5910         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5911     host->c_cflag =
5912         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5913     host->c_lflag =
5914         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5915     host->c_line = target->c_line;
5916 
5917     memset(host->c_cc, 0, sizeof(host->c_cc));
5918     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5919     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5920     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5921     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5922     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5923     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5924     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5925     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5926     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5927     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5928     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5929     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5930     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5931     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5932     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5933     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5934     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5935 }
5936 
5937 static void host_to_target_termios (void *dst, const void *src)
5938 {
5939     struct target_termios *target = dst;
5940     const struct host_termios *host = src;
5941 
5942     target->c_iflag =
5943         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5944     target->c_oflag =
5945         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5946     target->c_cflag =
5947         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5948     target->c_lflag =
5949         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5950     target->c_line = host->c_line;
5951 
5952     memset(target->c_cc, 0, sizeof(target->c_cc));
5953     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5954     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5955     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5956     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5957     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5958     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5959     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5960     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5961     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5962     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5963     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5964     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5965     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5966     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5967     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5968     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5969     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5970 }
5971 
5972 static const StructEntry struct_termios_def = {
5973     .convert = { host_to_target_termios, target_to_host_termios },
5974     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5975     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5976     .print = print_termios,
5977 };
5978 
5979 /* If the host does not provide these bits, they may be safely discarded. */
5980 #ifndef MAP_SYNC
5981 #define MAP_SYNC 0
5982 #endif
5983 #ifndef MAP_UNINITIALIZED
5984 #define MAP_UNINITIALIZED 0
5985 #endif
5986 
5987 static const bitmask_transtbl mmap_flags_tbl[] = {
5988     { TARGET_MAP_TYPE, TARGET_MAP_SHARED, MAP_TYPE, MAP_SHARED },
5989     { TARGET_MAP_TYPE, TARGET_MAP_PRIVATE, MAP_TYPE, MAP_PRIVATE },
5990     { TARGET_MAP_TYPE, TARGET_MAP_SHARED_VALIDATE,
5991       MAP_TYPE, MAP_SHARED_VALIDATE },
5992     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5993     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5994       MAP_ANONYMOUS, MAP_ANONYMOUS },
5995     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5996       MAP_GROWSDOWN, MAP_GROWSDOWN },
5997     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5998       MAP_DENYWRITE, MAP_DENYWRITE },
5999     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6000       MAP_EXECUTABLE, MAP_EXECUTABLE },
6001     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6002     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6003       MAP_NORESERVE, MAP_NORESERVE },
6004     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6005     /* MAP_STACK had been ignored by the kernel for quite some time.
6006        Recognize it for the target insofar as we do not want to pass
6007        it through to the host.  */
6008     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6009     { TARGET_MAP_SYNC, TARGET_MAP_SYNC, MAP_SYNC, MAP_SYNC },
6010     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
6011     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
6012     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
6013       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
6014     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
6015       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
6016     { 0, 0, 0, 0 }
6017 };
6018 
6019 /*
6020  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6021  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6022  */
6023 #if defined(TARGET_I386)
6024 
6025 /* NOTE: there is really one LDT for all the threads */
6026 static uint8_t *ldt_table;
6027 
6028 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6029 {
6030     int size;
6031     void *p;
6032 
6033     if (!ldt_table)
6034         return 0;
6035     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6036     if (size > bytecount)
6037         size = bytecount;
6038     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6039     if (!p)
6040         return -TARGET_EFAULT;
6041     /* ??? Should this by byteswapped?  */
6042     memcpy(p, ldt_table, size);
6043     unlock_user(p, ptr, size);
6044     return size;
6045 }
6046 
6047 /* XXX: add locking support */
6048 static abi_long write_ldt(CPUX86State *env,
6049                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6050 {
6051     struct target_modify_ldt_ldt_s ldt_info;
6052     struct target_modify_ldt_ldt_s *target_ldt_info;
6053     int seg_32bit, contents, read_exec_only, limit_in_pages;
6054     int seg_not_present, useable, lm;
6055     uint32_t *lp, entry_1, entry_2;
6056 
6057     if (bytecount != sizeof(ldt_info))
6058         return -TARGET_EINVAL;
6059     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6060         return -TARGET_EFAULT;
6061     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6062     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6063     ldt_info.limit = tswap32(target_ldt_info->limit);
6064     ldt_info.flags = tswap32(target_ldt_info->flags);
6065     unlock_user_struct(target_ldt_info, ptr, 0);
6066 
6067     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6068         return -TARGET_EINVAL;
6069     seg_32bit = ldt_info.flags & 1;
6070     contents = (ldt_info.flags >> 1) & 3;
6071     read_exec_only = (ldt_info.flags >> 3) & 1;
6072     limit_in_pages = (ldt_info.flags >> 4) & 1;
6073     seg_not_present = (ldt_info.flags >> 5) & 1;
6074     useable = (ldt_info.flags >> 6) & 1;
6075 #ifdef TARGET_ABI32
6076     lm = 0;
6077 #else
6078     lm = (ldt_info.flags >> 7) & 1;
6079 #endif
6080     if (contents == 3) {
6081         if (oldmode)
6082             return -TARGET_EINVAL;
6083         if (seg_not_present == 0)
6084             return -TARGET_EINVAL;
6085     }
6086     /* allocate the LDT */
6087     if (!ldt_table) {
6088         env->ldt.base = target_mmap(0,
6089                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6090                                     PROT_READ|PROT_WRITE,
6091                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6092         if (env->ldt.base == -1)
6093             return -TARGET_ENOMEM;
6094         memset(g2h_untagged(env->ldt.base), 0,
6095                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6096         env->ldt.limit = 0xffff;
6097         ldt_table = g2h_untagged(env->ldt.base);
6098     }
6099 
6100     /* NOTE: same code as Linux kernel */
6101     /* Allow LDTs to be cleared by the user. */
6102     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6103         if (oldmode ||
6104             (contents == 0		&&
6105              read_exec_only == 1	&&
6106              seg_32bit == 0		&&
6107              limit_in_pages == 0	&&
6108              seg_not_present == 1	&&
6109              useable == 0 )) {
6110             entry_1 = 0;
6111             entry_2 = 0;
6112             goto install;
6113         }
6114     }
6115 
6116     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6117         (ldt_info.limit & 0x0ffff);
6118     entry_2 = (ldt_info.base_addr & 0xff000000) |
6119         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6120         (ldt_info.limit & 0xf0000) |
6121         ((read_exec_only ^ 1) << 9) |
6122         (contents << 10) |
6123         ((seg_not_present ^ 1) << 15) |
6124         (seg_32bit << 22) |
6125         (limit_in_pages << 23) |
6126         (lm << 21) |
6127         0x7000;
6128     if (!oldmode)
6129         entry_2 |= (useable << 20);
6130 
6131     /* Install the new entry ...  */
6132 install:
6133     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6134     lp[0] = tswap32(entry_1);
6135     lp[1] = tswap32(entry_2);
6136     return 0;
6137 }
6138 
6139 /* specific and weird i386 syscalls */
6140 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6141                               unsigned long bytecount)
6142 {
6143     abi_long ret;
6144 
6145     switch (func) {
6146     case 0:
6147         ret = read_ldt(ptr, bytecount);
6148         break;
6149     case 1:
6150         ret = write_ldt(env, ptr, bytecount, 1);
6151         break;
6152     case 0x11:
6153         ret = write_ldt(env, ptr, bytecount, 0);
6154         break;
6155     default:
6156         ret = -TARGET_ENOSYS;
6157         break;
6158     }
6159     return ret;
6160 }
6161 
6162 #if defined(TARGET_ABI32)
6163 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6164 {
6165     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6166     struct target_modify_ldt_ldt_s ldt_info;
6167     struct target_modify_ldt_ldt_s *target_ldt_info;
6168     int seg_32bit, contents, read_exec_only, limit_in_pages;
6169     int seg_not_present, useable, lm;
6170     uint32_t *lp, entry_1, entry_2;
6171     int i;
6172 
6173     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6174     if (!target_ldt_info)
6175         return -TARGET_EFAULT;
6176     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6177     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6178     ldt_info.limit = tswap32(target_ldt_info->limit);
6179     ldt_info.flags = tswap32(target_ldt_info->flags);
6180     if (ldt_info.entry_number == -1) {
6181         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6182             if (gdt_table[i] == 0) {
6183                 ldt_info.entry_number = i;
6184                 target_ldt_info->entry_number = tswap32(i);
6185                 break;
6186             }
6187         }
6188     }
6189     unlock_user_struct(target_ldt_info, ptr, 1);
6190 
6191     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6192         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6193            return -TARGET_EINVAL;
6194     seg_32bit = ldt_info.flags & 1;
6195     contents = (ldt_info.flags >> 1) & 3;
6196     read_exec_only = (ldt_info.flags >> 3) & 1;
6197     limit_in_pages = (ldt_info.flags >> 4) & 1;
6198     seg_not_present = (ldt_info.flags >> 5) & 1;
6199     useable = (ldt_info.flags >> 6) & 1;
6200 #ifdef TARGET_ABI32
6201     lm = 0;
6202 #else
6203     lm = (ldt_info.flags >> 7) & 1;
6204 #endif
6205 
6206     if (contents == 3) {
6207         if (seg_not_present == 0)
6208             return -TARGET_EINVAL;
6209     }
6210 
6211     /* NOTE: same code as Linux kernel */
6212     /* Allow LDTs to be cleared by the user. */
6213     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6214         if ((contents == 0             &&
6215              read_exec_only == 1       &&
6216              seg_32bit == 0            &&
6217              limit_in_pages == 0       &&
6218              seg_not_present == 1      &&
6219              useable == 0 )) {
6220             entry_1 = 0;
6221             entry_2 = 0;
6222             goto install;
6223         }
6224     }
6225 
6226     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6227         (ldt_info.limit & 0x0ffff);
6228     entry_2 = (ldt_info.base_addr & 0xff000000) |
6229         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6230         (ldt_info.limit & 0xf0000) |
6231         ((read_exec_only ^ 1) << 9) |
6232         (contents << 10) |
6233         ((seg_not_present ^ 1) << 15) |
6234         (seg_32bit << 22) |
6235         (limit_in_pages << 23) |
6236         (useable << 20) |
6237         (lm << 21) |
6238         0x7000;
6239 
6240     /* Install the new entry ...  */
6241 install:
6242     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6243     lp[0] = tswap32(entry_1);
6244     lp[1] = tswap32(entry_2);
6245     return 0;
6246 }
6247 
6248 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6249 {
6250     struct target_modify_ldt_ldt_s *target_ldt_info;
6251     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6252     uint32_t base_addr, limit, flags;
6253     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6254     int seg_not_present, useable, lm;
6255     uint32_t *lp, entry_1, entry_2;
6256 
6257     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6258     if (!target_ldt_info)
6259         return -TARGET_EFAULT;
6260     idx = tswap32(target_ldt_info->entry_number);
6261     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6262         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6263         unlock_user_struct(target_ldt_info, ptr, 1);
6264         return -TARGET_EINVAL;
6265     }
6266     lp = (uint32_t *)(gdt_table + idx);
6267     entry_1 = tswap32(lp[0]);
6268     entry_2 = tswap32(lp[1]);
6269 
6270     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6271     contents = (entry_2 >> 10) & 3;
6272     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6273     seg_32bit = (entry_2 >> 22) & 1;
6274     limit_in_pages = (entry_2 >> 23) & 1;
6275     useable = (entry_2 >> 20) & 1;
6276 #ifdef TARGET_ABI32
6277     lm = 0;
6278 #else
6279     lm = (entry_2 >> 21) & 1;
6280 #endif
6281     flags = (seg_32bit << 0) | (contents << 1) |
6282         (read_exec_only << 3) | (limit_in_pages << 4) |
6283         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6284     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6285     base_addr = (entry_1 >> 16) |
6286         (entry_2 & 0xff000000) |
6287         ((entry_2 & 0xff) << 16);
6288     target_ldt_info->base_addr = tswapal(base_addr);
6289     target_ldt_info->limit = tswap32(limit);
6290     target_ldt_info->flags = tswap32(flags);
6291     unlock_user_struct(target_ldt_info, ptr, 1);
6292     return 0;
6293 }
6294 
6295 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6296 {
6297     return -TARGET_ENOSYS;
6298 }
6299 #else
6300 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6301 {
6302     abi_long ret = 0;
6303     abi_ulong val;
6304     int idx;
6305 
6306     switch(code) {
6307     case TARGET_ARCH_SET_GS:
6308     case TARGET_ARCH_SET_FS:
6309         if (code == TARGET_ARCH_SET_GS)
6310             idx = R_GS;
6311         else
6312             idx = R_FS;
6313         cpu_x86_load_seg(env, idx, 0);
6314         env->segs[idx].base = addr;
6315         break;
6316     case TARGET_ARCH_GET_GS:
6317     case TARGET_ARCH_GET_FS:
6318         if (code == TARGET_ARCH_GET_GS)
6319             idx = R_GS;
6320         else
6321             idx = R_FS;
6322         val = env->segs[idx].base;
6323         if (put_user(val, addr, abi_ulong))
6324             ret = -TARGET_EFAULT;
6325         break;
6326     default:
6327         ret = -TARGET_EINVAL;
6328         break;
6329     }
6330     return ret;
6331 }
6332 #endif /* defined(TARGET_ABI32 */
6333 #endif /* defined(TARGET_I386) */
6334 
6335 /*
6336  * These constants are generic.  Supply any that are missing from the host.
6337  */
6338 #ifndef PR_SET_NAME
6339 # define PR_SET_NAME    15
6340 # define PR_GET_NAME    16
6341 #endif
6342 #ifndef PR_SET_FP_MODE
6343 # define PR_SET_FP_MODE 45
6344 # define PR_GET_FP_MODE 46
6345 # define PR_FP_MODE_FR   (1 << 0)
6346 # define PR_FP_MODE_FRE  (1 << 1)
6347 #endif
6348 #ifndef PR_SVE_SET_VL
6349 # define PR_SVE_SET_VL  50
6350 # define PR_SVE_GET_VL  51
6351 # define PR_SVE_VL_LEN_MASK  0xffff
6352 # define PR_SVE_VL_INHERIT   (1 << 17)
6353 #endif
6354 #ifndef PR_PAC_RESET_KEYS
6355 # define PR_PAC_RESET_KEYS  54
6356 # define PR_PAC_APIAKEY   (1 << 0)
6357 # define PR_PAC_APIBKEY   (1 << 1)
6358 # define PR_PAC_APDAKEY   (1 << 2)
6359 # define PR_PAC_APDBKEY   (1 << 3)
6360 # define PR_PAC_APGAKEY   (1 << 4)
6361 #endif
6362 #ifndef PR_SET_TAGGED_ADDR_CTRL
6363 # define PR_SET_TAGGED_ADDR_CTRL 55
6364 # define PR_GET_TAGGED_ADDR_CTRL 56
6365 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6366 #endif
6367 #ifndef PR_MTE_TCF_SHIFT
6368 # define PR_MTE_TCF_SHIFT       1
6369 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6370 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6371 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6372 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6373 # define PR_MTE_TAG_SHIFT       3
6374 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6375 #endif
6376 #ifndef PR_SET_IO_FLUSHER
6377 # define PR_SET_IO_FLUSHER 57
6378 # define PR_GET_IO_FLUSHER 58
6379 #endif
6380 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6381 # define PR_SET_SYSCALL_USER_DISPATCH 59
6382 #endif
6383 #ifndef PR_SME_SET_VL
6384 # define PR_SME_SET_VL  63
6385 # define PR_SME_GET_VL  64
6386 # define PR_SME_VL_LEN_MASK  0xffff
6387 # define PR_SME_VL_INHERIT   (1 << 17)
6388 #endif
6389 
6390 #include "target_prctl.h"
6391 
6392 static abi_long do_prctl_inval0(CPUArchState *env)
6393 {
6394     return -TARGET_EINVAL;
6395 }
6396 
6397 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6398 {
6399     return -TARGET_EINVAL;
6400 }
6401 
6402 #ifndef do_prctl_get_fp_mode
6403 #define do_prctl_get_fp_mode do_prctl_inval0
6404 #endif
6405 #ifndef do_prctl_set_fp_mode
6406 #define do_prctl_set_fp_mode do_prctl_inval1
6407 #endif
6408 #ifndef do_prctl_sve_get_vl
6409 #define do_prctl_sve_get_vl do_prctl_inval0
6410 #endif
6411 #ifndef do_prctl_sve_set_vl
6412 #define do_prctl_sve_set_vl do_prctl_inval1
6413 #endif
6414 #ifndef do_prctl_reset_keys
6415 #define do_prctl_reset_keys do_prctl_inval1
6416 #endif
6417 #ifndef do_prctl_set_tagged_addr_ctrl
6418 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6419 #endif
6420 #ifndef do_prctl_get_tagged_addr_ctrl
6421 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6422 #endif
6423 #ifndef do_prctl_get_unalign
6424 #define do_prctl_get_unalign do_prctl_inval1
6425 #endif
6426 #ifndef do_prctl_set_unalign
6427 #define do_prctl_set_unalign do_prctl_inval1
6428 #endif
6429 #ifndef do_prctl_sme_get_vl
6430 #define do_prctl_sme_get_vl do_prctl_inval0
6431 #endif
6432 #ifndef do_prctl_sme_set_vl
6433 #define do_prctl_sme_set_vl do_prctl_inval1
6434 #endif
6435 
6436 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6437                          abi_long arg3, abi_long arg4, abi_long arg5)
6438 {
6439     abi_long ret;
6440 
6441     switch (option) {
6442     case PR_GET_PDEATHSIG:
6443         {
6444             int deathsig;
6445             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6446                                   arg3, arg4, arg5));
6447             if (!is_error(ret) &&
6448                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6449                 return -TARGET_EFAULT;
6450             }
6451             return ret;
6452         }
6453     case PR_SET_PDEATHSIG:
6454         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6455                                arg3, arg4, arg5));
6456     case PR_GET_NAME:
6457         {
6458             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6459             if (!name) {
6460                 return -TARGET_EFAULT;
6461             }
6462             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6463                                   arg3, arg4, arg5));
6464             unlock_user(name, arg2, 16);
6465             return ret;
6466         }
6467     case PR_SET_NAME:
6468         {
6469             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6470             if (!name) {
6471                 return -TARGET_EFAULT;
6472             }
6473             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6474                                   arg3, arg4, arg5));
6475             unlock_user(name, arg2, 0);
6476             return ret;
6477         }
6478     case PR_GET_FP_MODE:
6479         return do_prctl_get_fp_mode(env);
6480     case PR_SET_FP_MODE:
6481         return do_prctl_set_fp_mode(env, arg2);
6482     case PR_SVE_GET_VL:
6483         return do_prctl_sve_get_vl(env);
6484     case PR_SVE_SET_VL:
6485         return do_prctl_sve_set_vl(env, arg2);
6486     case PR_SME_GET_VL:
6487         return do_prctl_sme_get_vl(env);
6488     case PR_SME_SET_VL:
6489         return do_prctl_sme_set_vl(env, arg2);
6490     case PR_PAC_RESET_KEYS:
6491         if (arg3 || arg4 || arg5) {
6492             return -TARGET_EINVAL;
6493         }
6494         return do_prctl_reset_keys(env, arg2);
6495     case PR_SET_TAGGED_ADDR_CTRL:
6496         if (arg3 || arg4 || arg5) {
6497             return -TARGET_EINVAL;
6498         }
6499         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6500     case PR_GET_TAGGED_ADDR_CTRL:
6501         if (arg2 || arg3 || arg4 || arg5) {
6502             return -TARGET_EINVAL;
6503         }
6504         return do_prctl_get_tagged_addr_ctrl(env);
6505 
6506     case PR_GET_UNALIGN:
6507         return do_prctl_get_unalign(env, arg2);
6508     case PR_SET_UNALIGN:
6509         return do_prctl_set_unalign(env, arg2);
6510 
6511     case PR_CAP_AMBIENT:
6512     case PR_CAPBSET_READ:
6513     case PR_CAPBSET_DROP:
6514     case PR_GET_DUMPABLE:
6515     case PR_SET_DUMPABLE:
6516     case PR_GET_KEEPCAPS:
6517     case PR_SET_KEEPCAPS:
6518     case PR_GET_SECUREBITS:
6519     case PR_SET_SECUREBITS:
6520     case PR_GET_TIMING:
6521     case PR_SET_TIMING:
6522     case PR_GET_TIMERSLACK:
6523     case PR_SET_TIMERSLACK:
6524     case PR_MCE_KILL:
6525     case PR_MCE_KILL_GET:
6526     case PR_GET_NO_NEW_PRIVS:
6527     case PR_SET_NO_NEW_PRIVS:
6528     case PR_GET_IO_FLUSHER:
6529     case PR_SET_IO_FLUSHER:
6530         /* Some prctl options have no pointer arguments and we can pass on. */
6531         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6532 
6533     case PR_GET_CHILD_SUBREAPER:
6534     case PR_SET_CHILD_SUBREAPER:
6535     case PR_GET_SPECULATION_CTRL:
6536     case PR_SET_SPECULATION_CTRL:
6537     case PR_GET_TID_ADDRESS:
6538         /* TODO */
6539         return -TARGET_EINVAL;
6540 
6541     case PR_GET_FPEXC:
6542     case PR_SET_FPEXC:
6543         /* Was used for SPE on PowerPC. */
6544         return -TARGET_EINVAL;
6545 
6546     case PR_GET_ENDIAN:
6547     case PR_SET_ENDIAN:
6548     case PR_GET_FPEMU:
6549     case PR_SET_FPEMU:
6550     case PR_SET_MM:
6551     case PR_GET_SECCOMP:
6552     case PR_SET_SECCOMP:
6553     case PR_SET_SYSCALL_USER_DISPATCH:
6554     case PR_GET_THP_DISABLE:
6555     case PR_SET_THP_DISABLE:
6556     case PR_GET_TSC:
6557     case PR_SET_TSC:
6558         /* Disable to prevent the target disabling stuff we need. */
6559         return -TARGET_EINVAL;
6560 
6561     default:
6562         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6563                       option);
6564         return -TARGET_EINVAL;
6565     }
6566 }
6567 
6568 #define NEW_STACK_SIZE 0x40000
6569 
6570 
6571 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6572 typedef struct {
6573     CPUArchState *env;
6574     pthread_mutex_t mutex;
6575     pthread_cond_t cond;
6576     pthread_t thread;
6577     uint32_t tid;
6578     abi_ulong child_tidptr;
6579     abi_ulong parent_tidptr;
6580     sigset_t sigmask;
6581 } new_thread_info;
6582 
6583 static void *clone_func(void *arg)
6584 {
6585     new_thread_info *info = arg;
6586     CPUArchState *env;
6587     CPUState *cpu;
6588     TaskState *ts;
6589 
6590     rcu_register_thread();
6591     tcg_register_thread();
6592     env = info->env;
6593     cpu = env_cpu(env);
6594     thread_cpu = cpu;
6595     ts = (TaskState *)cpu->opaque;
6596     info->tid = sys_gettid();
6597     task_settid(ts);
6598     if (info->child_tidptr)
6599         put_user_u32(info->tid, info->child_tidptr);
6600     if (info->parent_tidptr)
6601         put_user_u32(info->tid, info->parent_tidptr);
6602     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6603     /* Enable signals.  */
6604     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6605     /* Signal to the parent that we're ready.  */
6606     pthread_mutex_lock(&info->mutex);
6607     pthread_cond_broadcast(&info->cond);
6608     pthread_mutex_unlock(&info->mutex);
6609     /* Wait until the parent has finished initializing the tls state.  */
6610     pthread_mutex_lock(&clone_lock);
6611     pthread_mutex_unlock(&clone_lock);
6612     cpu_loop(env);
6613     /* never exits */
6614     return NULL;
6615 }
6616 
6617 /* do_fork() Must return host values and target errnos (unlike most
6618    do_*() functions). */
6619 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6620                    abi_ulong parent_tidptr, target_ulong newtls,
6621                    abi_ulong child_tidptr)
6622 {
6623     CPUState *cpu = env_cpu(env);
6624     int ret;
6625     TaskState *ts;
6626     CPUState *new_cpu;
6627     CPUArchState *new_env;
6628     sigset_t sigmask;
6629 
6630     flags &= ~CLONE_IGNORED_FLAGS;
6631 
6632     /* Emulate vfork() with fork() */
6633     if (flags & CLONE_VFORK)
6634         flags &= ~(CLONE_VFORK | CLONE_VM);
6635 
6636     if (flags & CLONE_VM) {
6637         TaskState *parent_ts = (TaskState *)cpu->opaque;
6638         new_thread_info info;
6639         pthread_attr_t attr;
6640 
6641         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6642             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6643             return -TARGET_EINVAL;
6644         }
6645 
6646         ts = g_new0(TaskState, 1);
6647         init_task_state(ts);
6648 
6649         /* Grab a mutex so that thread setup appears atomic.  */
6650         pthread_mutex_lock(&clone_lock);
6651 
6652         /*
6653          * If this is our first additional thread, we need to ensure we
6654          * generate code for parallel execution and flush old translations.
6655          * Do this now so that the copy gets CF_PARALLEL too.
6656          */
6657         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6658             cpu->tcg_cflags |= CF_PARALLEL;
6659             tb_flush(cpu);
6660         }
6661 
6662         /* we create a new CPU instance. */
6663         new_env = cpu_copy(env);
6664         /* Init regs that differ from the parent.  */
6665         cpu_clone_regs_child(new_env, newsp, flags);
6666         cpu_clone_regs_parent(env, flags);
6667         new_cpu = env_cpu(new_env);
6668         new_cpu->opaque = ts;
6669         ts->bprm = parent_ts->bprm;
6670         ts->info = parent_ts->info;
6671         ts->signal_mask = parent_ts->signal_mask;
6672 
6673         if (flags & CLONE_CHILD_CLEARTID) {
6674             ts->child_tidptr = child_tidptr;
6675         }
6676 
6677         if (flags & CLONE_SETTLS) {
6678             cpu_set_tls (new_env, newtls);
6679         }
6680 
6681         memset(&info, 0, sizeof(info));
6682         pthread_mutex_init(&info.mutex, NULL);
6683         pthread_mutex_lock(&info.mutex);
6684         pthread_cond_init(&info.cond, NULL);
6685         info.env = new_env;
6686         if (flags & CLONE_CHILD_SETTID) {
6687             info.child_tidptr = child_tidptr;
6688         }
6689         if (flags & CLONE_PARENT_SETTID) {
6690             info.parent_tidptr = parent_tidptr;
6691         }
6692 
6693         ret = pthread_attr_init(&attr);
6694         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6695         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6696         /* It is not safe to deliver signals until the child has finished
6697            initializing, so temporarily block all signals.  */
6698         sigfillset(&sigmask);
6699         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6700         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6701 
6702         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6703         /* TODO: Free new CPU state if thread creation failed.  */
6704 
6705         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6706         pthread_attr_destroy(&attr);
6707         if (ret == 0) {
6708             /* Wait for the child to initialize.  */
6709             pthread_cond_wait(&info.cond, &info.mutex);
6710             ret = info.tid;
6711         } else {
6712             ret = -1;
6713         }
6714         pthread_mutex_unlock(&info.mutex);
6715         pthread_cond_destroy(&info.cond);
6716         pthread_mutex_destroy(&info.mutex);
6717         pthread_mutex_unlock(&clone_lock);
6718     } else {
6719         /* if no CLONE_VM, we consider it is a fork */
6720         if (flags & CLONE_INVALID_FORK_FLAGS) {
6721             return -TARGET_EINVAL;
6722         }
6723 
6724         /* We can't support custom termination signals */
6725         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6726             return -TARGET_EINVAL;
6727         }
6728 
6729 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6730         if (flags & CLONE_PIDFD) {
6731             return -TARGET_EINVAL;
6732         }
6733 #endif
6734 
6735         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6736         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6737             return -TARGET_EINVAL;
6738         }
6739 
6740         if (block_signals()) {
6741             return -QEMU_ERESTARTSYS;
6742         }
6743 
6744         fork_start();
6745         ret = fork();
6746         if (ret == 0) {
6747             /* Child Process.  */
6748             cpu_clone_regs_child(env, newsp, flags);
6749             fork_end(1);
6750             /* There is a race condition here.  The parent process could
6751                theoretically read the TID in the child process before the child
6752                tid is set.  This would require using either ptrace
6753                (not implemented) or having *_tidptr to point at a shared memory
6754                mapping.  We can't repeat the spinlock hack used above because
6755                the child process gets its own copy of the lock.  */
6756             if (flags & CLONE_CHILD_SETTID)
6757                 put_user_u32(sys_gettid(), child_tidptr);
6758             if (flags & CLONE_PARENT_SETTID)
6759                 put_user_u32(sys_gettid(), parent_tidptr);
6760             ts = (TaskState *)cpu->opaque;
6761             if (flags & CLONE_SETTLS)
6762                 cpu_set_tls (env, newtls);
6763             if (flags & CLONE_CHILD_CLEARTID)
6764                 ts->child_tidptr = child_tidptr;
6765         } else {
6766             cpu_clone_regs_parent(env, flags);
6767             if (flags & CLONE_PIDFD) {
6768                 int pid_fd = 0;
6769 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6770                 int pid_child = ret;
6771                 pid_fd = pidfd_open(pid_child, 0);
6772                 if (pid_fd >= 0) {
6773                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6774                                                | FD_CLOEXEC);
6775                 } else {
6776                         pid_fd = 0;
6777                 }
6778 #endif
6779                 put_user_u32(pid_fd, parent_tidptr);
6780                 }
6781             fork_end(0);
6782         }
6783         g_assert(!cpu_in_exclusive_context(cpu));
6784     }
6785     return ret;
6786 }
6787 
6788 /* warning : doesn't handle linux specific flags... */
6789 static int target_to_host_fcntl_cmd(int cmd)
6790 {
6791     int ret;
6792 
6793     switch(cmd) {
6794     case TARGET_F_DUPFD:
6795     case TARGET_F_GETFD:
6796     case TARGET_F_SETFD:
6797     case TARGET_F_GETFL:
6798     case TARGET_F_SETFL:
6799     case TARGET_F_OFD_GETLK:
6800     case TARGET_F_OFD_SETLK:
6801     case TARGET_F_OFD_SETLKW:
6802         ret = cmd;
6803         break;
6804     case TARGET_F_GETLK:
6805         ret = F_GETLK64;
6806         break;
6807     case TARGET_F_SETLK:
6808         ret = F_SETLK64;
6809         break;
6810     case TARGET_F_SETLKW:
6811         ret = F_SETLKW64;
6812         break;
6813     case TARGET_F_GETOWN:
6814         ret = F_GETOWN;
6815         break;
6816     case TARGET_F_SETOWN:
6817         ret = F_SETOWN;
6818         break;
6819     case TARGET_F_GETSIG:
6820         ret = F_GETSIG;
6821         break;
6822     case TARGET_F_SETSIG:
6823         ret = F_SETSIG;
6824         break;
6825 #if TARGET_ABI_BITS == 32
6826     case TARGET_F_GETLK64:
6827         ret = F_GETLK64;
6828         break;
6829     case TARGET_F_SETLK64:
6830         ret = F_SETLK64;
6831         break;
6832     case TARGET_F_SETLKW64:
6833         ret = F_SETLKW64;
6834         break;
6835 #endif
6836     case TARGET_F_SETLEASE:
6837         ret = F_SETLEASE;
6838         break;
6839     case TARGET_F_GETLEASE:
6840         ret = F_GETLEASE;
6841         break;
6842 #ifdef F_DUPFD_CLOEXEC
6843     case TARGET_F_DUPFD_CLOEXEC:
6844         ret = F_DUPFD_CLOEXEC;
6845         break;
6846 #endif
6847     case TARGET_F_NOTIFY:
6848         ret = F_NOTIFY;
6849         break;
6850 #ifdef F_GETOWN_EX
6851     case TARGET_F_GETOWN_EX:
6852         ret = F_GETOWN_EX;
6853         break;
6854 #endif
6855 #ifdef F_SETOWN_EX
6856     case TARGET_F_SETOWN_EX:
6857         ret = F_SETOWN_EX;
6858         break;
6859 #endif
6860 #ifdef F_SETPIPE_SZ
6861     case TARGET_F_SETPIPE_SZ:
6862         ret = F_SETPIPE_SZ;
6863         break;
6864     case TARGET_F_GETPIPE_SZ:
6865         ret = F_GETPIPE_SZ;
6866         break;
6867 #endif
6868 #ifdef F_ADD_SEALS
6869     case TARGET_F_ADD_SEALS:
6870         ret = F_ADD_SEALS;
6871         break;
6872     case TARGET_F_GET_SEALS:
6873         ret = F_GET_SEALS;
6874         break;
6875 #endif
6876     default:
6877         ret = -TARGET_EINVAL;
6878         break;
6879     }
6880 
6881 #if defined(__powerpc64__)
6882     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6883      * is not supported by kernel. The glibc fcntl call actually adjusts
6884      * them to 5, 6 and 7 before making the syscall(). Since we make the
6885      * syscall directly, adjust to what is supported by the kernel.
6886      */
6887     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6888         ret -= F_GETLK64 - 5;
6889     }
6890 #endif
6891 
6892     return ret;
6893 }
6894 
6895 #define FLOCK_TRANSTBL \
6896     switch (type) { \
6897     TRANSTBL_CONVERT(F_RDLCK); \
6898     TRANSTBL_CONVERT(F_WRLCK); \
6899     TRANSTBL_CONVERT(F_UNLCK); \
6900     }
6901 
6902 static int target_to_host_flock(int type)
6903 {
6904 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6905     FLOCK_TRANSTBL
6906 #undef  TRANSTBL_CONVERT
6907     return -TARGET_EINVAL;
6908 }
6909 
6910 static int host_to_target_flock(int type)
6911 {
6912 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6913     FLOCK_TRANSTBL
6914 #undef  TRANSTBL_CONVERT
6915     /* if we don't know how to convert the value coming
6916      * from the host we copy to the target field as-is
6917      */
6918     return type;
6919 }
6920 
6921 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6922                                             abi_ulong target_flock_addr)
6923 {
6924     struct target_flock *target_fl;
6925     int l_type;
6926 
6927     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6928         return -TARGET_EFAULT;
6929     }
6930 
6931     __get_user(l_type, &target_fl->l_type);
6932     l_type = target_to_host_flock(l_type);
6933     if (l_type < 0) {
6934         return l_type;
6935     }
6936     fl->l_type = l_type;
6937     __get_user(fl->l_whence, &target_fl->l_whence);
6938     __get_user(fl->l_start, &target_fl->l_start);
6939     __get_user(fl->l_len, &target_fl->l_len);
6940     __get_user(fl->l_pid, &target_fl->l_pid);
6941     unlock_user_struct(target_fl, target_flock_addr, 0);
6942     return 0;
6943 }
6944 
6945 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6946                                           const struct flock64 *fl)
6947 {
6948     struct target_flock *target_fl;
6949     short l_type;
6950 
6951     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6952         return -TARGET_EFAULT;
6953     }
6954 
6955     l_type = host_to_target_flock(fl->l_type);
6956     __put_user(l_type, &target_fl->l_type);
6957     __put_user(fl->l_whence, &target_fl->l_whence);
6958     __put_user(fl->l_start, &target_fl->l_start);
6959     __put_user(fl->l_len, &target_fl->l_len);
6960     __put_user(fl->l_pid, &target_fl->l_pid);
6961     unlock_user_struct(target_fl, target_flock_addr, 1);
6962     return 0;
6963 }
6964 
6965 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6966 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6967 
6968 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6969 struct target_oabi_flock64 {
6970     abi_short l_type;
6971     abi_short l_whence;
6972     abi_llong l_start;
6973     abi_llong l_len;
6974     abi_int   l_pid;
6975 } QEMU_PACKED;
6976 
6977 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6978                                                    abi_ulong target_flock_addr)
6979 {
6980     struct target_oabi_flock64 *target_fl;
6981     int l_type;
6982 
6983     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6984         return -TARGET_EFAULT;
6985     }
6986 
6987     __get_user(l_type, &target_fl->l_type);
6988     l_type = target_to_host_flock(l_type);
6989     if (l_type < 0) {
6990         return l_type;
6991     }
6992     fl->l_type = l_type;
6993     __get_user(fl->l_whence, &target_fl->l_whence);
6994     __get_user(fl->l_start, &target_fl->l_start);
6995     __get_user(fl->l_len, &target_fl->l_len);
6996     __get_user(fl->l_pid, &target_fl->l_pid);
6997     unlock_user_struct(target_fl, target_flock_addr, 0);
6998     return 0;
6999 }
7000 
7001 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
7002                                                  const struct flock64 *fl)
7003 {
7004     struct target_oabi_flock64 *target_fl;
7005     short l_type;
7006 
7007     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7008         return -TARGET_EFAULT;
7009     }
7010 
7011     l_type = host_to_target_flock(fl->l_type);
7012     __put_user(l_type, &target_fl->l_type);
7013     __put_user(fl->l_whence, &target_fl->l_whence);
7014     __put_user(fl->l_start, &target_fl->l_start);
7015     __put_user(fl->l_len, &target_fl->l_len);
7016     __put_user(fl->l_pid, &target_fl->l_pid);
7017     unlock_user_struct(target_fl, target_flock_addr, 1);
7018     return 0;
7019 }
7020 #endif
7021 
7022 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7023                                               abi_ulong target_flock_addr)
7024 {
7025     struct target_flock64 *target_fl;
7026     int l_type;
7027 
7028     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7029         return -TARGET_EFAULT;
7030     }
7031 
7032     __get_user(l_type, &target_fl->l_type);
7033     l_type = target_to_host_flock(l_type);
7034     if (l_type < 0) {
7035         return l_type;
7036     }
7037     fl->l_type = l_type;
7038     __get_user(fl->l_whence, &target_fl->l_whence);
7039     __get_user(fl->l_start, &target_fl->l_start);
7040     __get_user(fl->l_len, &target_fl->l_len);
7041     __get_user(fl->l_pid, &target_fl->l_pid);
7042     unlock_user_struct(target_fl, target_flock_addr, 0);
7043     return 0;
7044 }
7045 
7046 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7047                                             const struct flock64 *fl)
7048 {
7049     struct target_flock64 *target_fl;
7050     short l_type;
7051 
7052     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7053         return -TARGET_EFAULT;
7054     }
7055 
7056     l_type = host_to_target_flock(fl->l_type);
7057     __put_user(l_type, &target_fl->l_type);
7058     __put_user(fl->l_whence, &target_fl->l_whence);
7059     __put_user(fl->l_start, &target_fl->l_start);
7060     __put_user(fl->l_len, &target_fl->l_len);
7061     __put_user(fl->l_pid, &target_fl->l_pid);
7062     unlock_user_struct(target_fl, target_flock_addr, 1);
7063     return 0;
7064 }
7065 
7066 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7067 {
7068     struct flock64 fl64;
7069 #ifdef F_GETOWN_EX
7070     struct f_owner_ex fox;
7071     struct target_f_owner_ex *target_fox;
7072 #endif
7073     abi_long ret;
7074     int host_cmd = target_to_host_fcntl_cmd(cmd);
7075 
7076     if (host_cmd == -TARGET_EINVAL)
7077 	    return host_cmd;
7078 
7079     switch(cmd) {
7080     case TARGET_F_GETLK:
7081         ret = copy_from_user_flock(&fl64, arg);
7082         if (ret) {
7083             return ret;
7084         }
7085         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7086         if (ret == 0) {
7087             ret = copy_to_user_flock(arg, &fl64);
7088         }
7089         break;
7090 
7091     case TARGET_F_SETLK:
7092     case TARGET_F_SETLKW:
7093         ret = copy_from_user_flock(&fl64, arg);
7094         if (ret) {
7095             return ret;
7096         }
7097         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7098         break;
7099 
7100     case TARGET_F_GETLK64:
7101     case TARGET_F_OFD_GETLK:
7102         ret = copy_from_user_flock64(&fl64, arg);
7103         if (ret) {
7104             return ret;
7105         }
7106         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7107         if (ret == 0) {
7108             ret = copy_to_user_flock64(arg, &fl64);
7109         }
7110         break;
7111     case TARGET_F_SETLK64:
7112     case TARGET_F_SETLKW64:
7113     case TARGET_F_OFD_SETLK:
7114     case TARGET_F_OFD_SETLKW:
7115         ret = copy_from_user_flock64(&fl64, arg);
7116         if (ret) {
7117             return ret;
7118         }
7119         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7120         break;
7121 
7122     case TARGET_F_GETFL:
7123         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7124         if (ret >= 0) {
7125             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7126             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7127             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7128                 ret |= TARGET_O_LARGEFILE;
7129             }
7130         }
7131         break;
7132 
7133     case TARGET_F_SETFL:
7134         ret = get_errno(safe_fcntl(fd, host_cmd,
7135                                    target_to_host_bitmask(arg,
7136                                                           fcntl_flags_tbl)));
7137         break;
7138 
7139 #ifdef F_GETOWN_EX
7140     case TARGET_F_GETOWN_EX:
7141         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7142         if (ret >= 0) {
7143             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7144                 return -TARGET_EFAULT;
7145             target_fox->type = tswap32(fox.type);
7146             target_fox->pid = tswap32(fox.pid);
7147             unlock_user_struct(target_fox, arg, 1);
7148         }
7149         break;
7150 #endif
7151 
7152 #ifdef F_SETOWN_EX
7153     case TARGET_F_SETOWN_EX:
7154         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7155             return -TARGET_EFAULT;
7156         fox.type = tswap32(target_fox->type);
7157         fox.pid = tswap32(target_fox->pid);
7158         unlock_user_struct(target_fox, arg, 0);
7159         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7160         break;
7161 #endif
7162 
7163     case TARGET_F_SETSIG:
7164         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7165         break;
7166 
7167     case TARGET_F_GETSIG:
7168         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7169         break;
7170 
7171     case TARGET_F_SETOWN:
7172     case TARGET_F_GETOWN:
7173     case TARGET_F_SETLEASE:
7174     case TARGET_F_GETLEASE:
7175     case TARGET_F_SETPIPE_SZ:
7176     case TARGET_F_GETPIPE_SZ:
7177     case TARGET_F_ADD_SEALS:
7178     case TARGET_F_GET_SEALS:
7179         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7180         break;
7181 
7182     default:
7183         ret = get_errno(safe_fcntl(fd, cmd, arg));
7184         break;
7185     }
7186     return ret;
7187 }
7188 
7189 #ifdef USE_UID16
7190 
7191 static inline int high2lowuid(int uid)
7192 {
7193     if (uid > 65535)
7194         return 65534;
7195     else
7196         return uid;
7197 }
7198 
7199 static inline int high2lowgid(int gid)
7200 {
7201     if (gid > 65535)
7202         return 65534;
7203     else
7204         return gid;
7205 }
7206 
7207 static inline int low2highuid(int uid)
7208 {
7209     if ((int16_t)uid == -1)
7210         return -1;
7211     else
7212         return uid;
7213 }
7214 
7215 static inline int low2highgid(int gid)
7216 {
7217     if ((int16_t)gid == -1)
7218         return -1;
7219     else
7220         return gid;
7221 }
7222 static inline int tswapid(int id)
7223 {
7224     return tswap16(id);
7225 }
7226 
7227 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7228 
7229 #else /* !USE_UID16 */
7230 static inline int high2lowuid(int uid)
7231 {
7232     return uid;
7233 }
7234 static inline int high2lowgid(int gid)
7235 {
7236     return gid;
7237 }
7238 static inline int low2highuid(int uid)
7239 {
7240     return uid;
7241 }
7242 static inline int low2highgid(int gid)
7243 {
7244     return gid;
7245 }
7246 static inline int tswapid(int id)
7247 {
7248     return tswap32(id);
7249 }
7250 
7251 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7252 
7253 #endif /* USE_UID16 */
7254 
7255 /* We must do direct syscalls for setting UID/GID, because we want to
7256  * implement the Linux system call semantics of "change only for this thread",
7257  * not the libc/POSIX semantics of "change for all threads in process".
7258  * (See http://ewontfix.com/17/ for more details.)
7259  * We use the 32-bit version of the syscalls if present; if it is not
7260  * then either the host architecture supports 32-bit UIDs natively with
7261  * the standard syscall, or the 16-bit UID is the best we can do.
7262  */
7263 #ifdef __NR_setuid32
7264 #define __NR_sys_setuid __NR_setuid32
7265 #else
7266 #define __NR_sys_setuid __NR_setuid
7267 #endif
7268 #ifdef __NR_setgid32
7269 #define __NR_sys_setgid __NR_setgid32
7270 #else
7271 #define __NR_sys_setgid __NR_setgid
7272 #endif
7273 #ifdef __NR_setresuid32
7274 #define __NR_sys_setresuid __NR_setresuid32
7275 #else
7276 #define __NR_sys_setresuid __NR_setresuid
7277 #endif
7278 #ifdef __NR_setresgid32
7279 #define __NR_sys_setresgid __NR_setresgid32
7280 #else
7281 #define __NR_sys_setresgid __NR_setresgid
7282 #endif
7283 
7284 _syscall1(int, sys_setuid, uid_t, uid)
7285 _syscall1(int, sys_setgid, gid_t, gid)
7286 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7287 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7288 
7289 void syscall_init(void)
7290 {
7291     IOCTLEntry *ie;
7292     const argtype *arg_type;
7293     int size;
7294 
7295     thunk_init(STRUCT_MAX);
7296 
7297 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7298 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7299 #include "syscall_types.h"
7300 #undef STRUCT
7301 #undef STRUCT_SPECIAL
7302 
7303     /* we patch the ioctl size if necessary. We rely on the fact that
7304        no ioctl has all the bits at '1' in the size field */
7305     ie = ioctl_entries;
7306     while (ie->target_cmd != 0) {
7307         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7308             TARGET_IOC_SIZEMASK) {
7309             arg_type = ie->arg_type;
7310             if (arg_type[0] != TYPE_PTR) {
7311                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7312                         ie->target_cmd);
7313                 exit(1);
7314             }
7315             arg_type++;
7316             size = thunk_type_size(arg_type, 0);
7317             ie->target_cmd = (ie->target_cmd &
7318                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7319                 (size << TARGET_IOC_SIZESHIFT);
7320         }
7321 
7322         /* automatic consistency check if same arch */
7323 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7324     (defined(__x86_64__) && defined(TARGET_X86_64))
7325         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7326             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7327                     ie->name, ie->target_cmd, ie->host_cmd);
7328         }
7329 #endif
7330         ie++;
7331     }
7332 }
7333 
7334 #ifdef TARGET_NR_truncate64
7335 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7336                                          abi_long arg2,
7337                                          abi_long arg3,
7338                                          abi_long arg4)
7339 {
7340     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7341         arg2 = arg3;
7342         arg3 = arg4;
7343     }
7344     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7345 }
7346 #endif
7347 
7348 #ifdef TARGET_NR_ftruncate64
7349 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7350                                           abi_long arg2,
7351                                           abi_long arg3,
7352                                           abi_long arg4)
7353 {
7354     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7355         arg2 = arg3;
7356         arg3 = arg4;
7357     }
7358     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7359 }
7360 #endif
7361 
7362 #if defined(TARGET_NR_timer_settime) || \
7363     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7364 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7365                                                  abi_ulong target_addr)
7366 {
7367     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7368                                 offsetof(struct target_itimerspec,
7369                                          it_interval)) ||
7370         target_to_host_timespec(&host_its->it_value, target_addr +
7371                                 offsetof(struct target_itimerspec,
7372                                          it_value))) {
7373         return -TARGET_EFAULT;
7374     }
7375 
7376     return 0;
7377 }
7378 #endif
7379 
7380 #if defined(TARGET_NR_timer_settime64) || \
7381     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7382 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7383                                                    abi_ulong target_addr)
7384 {
7385     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7386                                   offsetof(struct target__kernel_itimerspec,
7387                                            it_interval)) ||
7388         target_to_host_timespec64(&host_its->it_value, target_addr +
7389                                   offsetof(struct target__kernel_itimerspec,
7390                                            it_value))) {
7391         return -TARGET_EFAULT;
7392     }
7393 
7394     return 0;
7395 }
7396 #endif
7397 
7398 #if ((defined(TARGET_NR_timerfd_gettime) || \
7399       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7400       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7401 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7402                                                  struct itimerspec *host_its)
7403 {
7404     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7405                                                        it_interval),
7406                                 &host_its->it_interval) ||
7407         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7408                                                        it_value),
7409                                 &host_its->it_value)) {
7410         return -TARGET_EFAULT;
7411     }
7412     return 0;
7413 }
7414 #endif
7415 
7416 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7417       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7418       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7419 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7420                                                    struct itimerspec *host_its)
7421 {
7422     if (host_to_target_timespec64(target_addr +
7423                                   offsetof(struct target__kernel_itimerspec,
7424                                            it_interval),
7425                                   &host_its->it_interval) ||
7426         host_to_target_timespec64(target_addr +
7427                                   offsetof(struct target__kernel_itimerspec,
7428                                            it_value),
7429                                   &host_its->it_value)) {
7430         return -TARGET_EFAULT;
7431     }
7432     return 0;
7433 }
7434 #endif
7435 
7436 #if defined(TARGET_NR_adjtimex) || \
7437     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7438 static inline abi_long target_to_host_timex(struct timex *host_tx,
7439                                             abi_long target_addr)
7440 {
7441     struct target_timex *target_tx;
7442 
7443     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7444         return -TARGET_EFAULT;
7445     }
7446 
7447     __get_user(host_tx->modes, &target_tx->modes);
7448     __get_user(host_tx->offset, &target_tx->offset);
7449     __get_user(host_tx->freq, &target_tx->freq);
7450     __get_user(host_tx->maxerror, &target_tx->maxerror);
7451     __get_user(host_tx->esterror, &target_tx->esterror);
7452     __get_user(host_tx->status, &target_tx->status);
7453     __get_user(host_tx->constant, &target_tx->constant);
7454     __get_user(host_tx->precision, &target_tx->precision);
7455     __get_user(host_tx->tolerance, &target_tx->tolerance);
7456     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7457     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7458     __get_user(host_tx->tick, &target_tx->tick);
7459     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7460     __get_user(host_tx->jitter, &target_tx->jitter);
7461     __get_user(host_tx->shift, &target_tx->shift);
7462     __get_user(host_tx->stabil, &target_tx->stabil);
7463     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7464     __get_user(host_tx->calcnt, &target_tx->calcnt);
7465     __get_user(host_tx->errcnt, &target_tx->errcnt);
7466     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7467     __get_user(host_tx->tai, &target_tx->tai);
7468 
7469     unlock_user_struct(target_tx, target_addr, 0);
7470     return 0;
7471 }
7472 
7473 static inline abi_long host_to_target_timex(abi_long target_addr,
7474                                             struct timex *host_tx)
7475 {
7476     struct target_timex *target_tx;
7477 
7478     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7479         return -TARGET_EFAULT;
7480     }
7481 
7482     __put_user(host_tx->modes, &target_tx->modes);
7483     __put_user(host_tx->offset, &target_tx->offset);
7484     __put_user(host_tx->freq, &target_tx->freq);
7485     __put_user(host_tx->maxerror, &target_tx->maxerror);
7486     __put_user(host_tx->esterror, &target_tx->esterror);
7487     __put_user(host_tx->status, &target_tx->status);
7488     __put_user(host_tx->constant, &target_tx->constant);
7489     __put_user(host_tx->precision, &target_tx->precision);
7490     __put_user(host_tx->tolerance, &target_tx->tolerance);
7491     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7492     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7493     __put_user(host_tx->tick, &target_tx->tick);
7494     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7495     __put_user(host_tx->jitter, &target_tx->jitter);
7496     __put_user(host_tx->shift, &target_tx->shift);
7497     __put_user(host_tx->stabil, &target_tx->stabil);
7498     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7499     __put_user(host_tx->calcnt, &target_tx->calcnt);
7500     __put_user(host_tx->errcnt, &target_tx->errcnt);
7501     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7502     __put_user(host_tx->tai, &target_tx->tai);
7503 
7504     unlock_user_struct(target_tx, target_addr, 1);
7505     return 0;
7506 }
7507 #endif
7508 
7509 
7510 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7511 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7512                                               abi_long target_addr)
7513 {
7514     struct target__kernel_timex *target_tx;
7515 
7516     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7517                                  offsetof(struct target__kernel_timex,
7518                                           time))) {
7519         return -TARGET_EFAULT;
7520     }
7521 
7522     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7523         return -TARGET_EFAULT;
7524     }
7525 
7526     __get_user(host_tx->modes, &target_tx->modes);
7527     __get_user(host_tx->offset, &target_tx->offset);
7528     __get_user(host_tx->freq, &target_tx->freq);
7529     __get_user(host_tx->maxerror, &target_tx->maxerror);
7530     __get_user(host_tx->esterror, &target_tx->esterror);
7531     __get_user(host_tx->status, &target_tx->status);
7532     __get_user(host_tx->constant, &target_tx->constant);
7533     __get_user(host_tx->precision, &target_tx->precision);
7534     __get_user(host_tx->tolerance, &target_tx->tolerance);
7535     __get_user(host_tx->tick, &target_tx->tick);
7536     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7537     __get_user(host_tx->jitter, &target_tx->jitter);
7538     __get_user(host_tx->shift, &target_tx->shift);
7539     __get_user(host_tx->stabil, &target_tx->stabil);
7540     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7541     __get_user(host_tx->calcnt, &target_tx->calcnt);
7542     __get_user(host_tx->errcnt, &target_tx->errcnt);
7543     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7544     __get_user(host_tx->tai, &target_tx->tai);
7545 
7546     unlock_user_struct(target_tx, target_addr, 0);
7547     return 0;
7548 }
7549 
7550 static inline abi_long host_to_target_timex64(abi_long target_addr,
7551                                               struct timex *host_tx)
7552 {
7553     struct target__kernel_timex *target_tx;
7554 
7555    if (copy_to_user_timeval64(target_addr +
7556                               offsetof(struct target__kernel_timex, time),
7557                               &host_tx->time)) {
7558         return -TARGET_EFAULT;
7559     }
7560 
7561     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7562         return -TARGET_EFAULT;
7563     }
7564 
7565     __put_user(host_tx->modes, &target_tx->modes);
7566     __put_user(host_tx->offset, &target_tx->offset);
7567     __put_user(host_tx->freq, &target_tx->freq);
7568     __put_user(host_tx->maxerror, &target_tx->maxerror);
7569     __put_user(host_tx->esterror, &target_tx->esterror);
7570     __put_user(host_tx->status, &target_tx->status);
7571     __put_user(host_tx->constant, &target_tx->constant);
7572     __put_user(host_tx->precision, &target_tx->precision);
7573     __put_user(host_tx->tolerance, &target_tx->tolerance);
7574     __put_user(host_tx->tick, &target_tx->tick);
7575     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7576     __put_user(host_tx->jitter, &target_tx->jitter);
7577     __put_user(host_tx->shift, &target_tx->shift);
7578     __put_user(host_tx->stabil, &target_tx->stabil);
7579     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7580     __put_user(host_tx->calcnt, &target_tx->calcnt);
7581     __put_user(host_tx->errcnt, &target_tx->errcnt);
7582     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7583     __put_user(host_tx->tai, &target_tx->tai);
7584 
7585     unlock_user_struct(target_tx, target_addr, 1);
7586     return 0;
7587 }
7588 #endif
7589 
7590 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7591 #define sigev_notify_thread_id _sigev_un._tid
7592 #endif
7593 
7594 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7595                                                abi_ulong target_addr)
7596 {
7597     struct target_sigevent *target_sevp;
7598 
7599     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7600         return -TARGET_EFAULT;
7601     }
7602 
7603     /* This union is awkward on 64 bit systems because it has a 32 bit
7604      * integer and a pointer in it; we follow the conversion approach
7605      * used for handling sigval types in signal.c so the guest should get
7606      * the correct value back even if we did a 64 bit byteswap and it's
7607      * using the 32 bit integer.
7608      */
7609     host_sevp->sigev_value.sival_ptr =
7610         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7611     host_sevp->sigev_signo =
7612         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7613     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7614     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7615 
7616     unlock_user_struct(target_sevp, target_addr, 1);
7617     return 0;
7618 }
7619 
7620 #if defined(TARGET_NR_mlockall)
7621 static inline int target_to_host_mlockall_arg(int arg)
7622 {
7623     int result = 0;
7624 
7625     if (arg & TARGET_MCL_CURRENT) {
7626         result |= MCL_CURRENT;
7627     }
7628     if (arg & TARGET_MCL_FUTURE) {
7629         result |= MCL_FUTURE;
7630     }
7631 #ifdef MCL_ONFAULT
7632     if (arg & TARGET_MCL_ONFAULT) {
7633         result |= MCL_ONFAULT;
7634     }
7635 #endif
7636 
7637     return result;
7638 }
7639 #endif
7640 
7641 static inline int target_to_host_msync_arg(abi_long arg)
7642 {
7643     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7644            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7645            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7646            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7647 }
7648 
7649 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7650      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7651      defined(TARGET_NR_newfstatat))
7652 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7653                                              abi_ulong target_addr,
7654                                              struct stat *host_st)
7655 {
7656 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7657     if (cpu_env->eabi) {
7658         struct target_eabi_stat64 *target_st;
7659 
7660         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7661             return -TARGET_EFAULT;
7662         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7663         __put_user(host_st->st_dev, &target_st->st_dev);
7664         __put_user(host_st->st_ino, &target_st->st_ino);
7665 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7666         __put_user(host_st->st_ino, &target_st->__st_ino);
7667 #endif
7668         __put_user(host_st->st_mode, &target_st->st_mode);
7669         __put_user(host_st->st_nlink, &target_st->st_nlink);
7670         __put_user(host_st->st_uid, &target_st->st_uid);
7671         __put_user(host_st->st_gid, &target_st->st_gid);
7672         __put_user(host_st->st_rdev, &target_st->st_rdev);
7673         __put_user(host_st->st_size, &target_st->st_size);
7674         __put_user(host_st->st_blksize, &target_st->st_blksize);
7675         __put_user(host_st->st_blocks, &target_st->st_blocks);
7676         __put_user(host_st->st_atime, &target_st->target_st_atime);
7677         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7678         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7679 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7680         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7681         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7682         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7683 #endif
7684         unlock_user_struct(target_st, target_addr, 1);
7685     } else
7686 #endif
7687     {
7688 #if defined(TARGET_HAS_STRUCT_STAT64)
7689         struct target_stat64 *target_st;
7690 #else
7691         struct target_stat *target_st;
7692 #endif
7693 
7694         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7695             return -TARGET_EFAULT;
7696         memset(target_st, 0, sizeof(*target_st));
7697         __put_user(host_st->st_dev, &target_st->st_dev);
7698         __put_user(host_st->st_ino, &target_st->st_ino);
7699 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7700         __put_user(host_st->st_ino, &target_st->__st_ino);
7701 #endif
7702         __put_user(host_st->st_mode, &target_st->st_mode);
7703         __put_user(host_st->st_nlink, &target_st->st_nlink);
7704         __put_user(host_st->st_uid, &target_st->st_uid);
7705         __put_user(host_st->st_gid, &target_st->st_gid);
7706         __put_user(host_st->st_rdev, &target_st->st_rdev);
7707         /* XXX: better use of kernel struct */
7708         __put_user(host_st->st_size, &target_st->st_size);
7709         __put_user(host_st->st_blksize, &target_st->st_blksize);
7710         __put_user(host_st->st_blocks, &target_st->st_blocks);
7711         __put_user(host_st->st_atime, &target_st->target_st_atime);
7712         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7713         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7714 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7715         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7716         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7717         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7718 #endif
7719         unlock_user_struct(target_st, target_addr, 1);
7720     }
7721 
7722     return 0;
7723 }
7724 #endif
7725 
7726 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7727 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7728                                             abi_ulong target_addr)
7729 {
7730     struct target_statx *target_stx;
7731 
7732     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7733         return -TARGET_EFAULT;
7734     }
7735     memset(target_stx, 0, sizeof(*target_stx));
7736 
7737     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7738     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7739     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7740     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7741     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7742     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7743     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7744     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7745     __put_user(host_stx->stx_size, &target_stx->stx_size);
7746     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7747     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7748     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7749     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7750     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7751     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7752     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7753     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7754     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7755     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7756     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7757     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7758     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7759     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7760 
7761     unlock_user_struct(target_stx, target_addr, 1);
7762 
7763     return 0;
7764 }
7765 #endif
7766 
7767 static int do_sys_futex(int *uaddr, int op, int val,
7768                          const struct timespec *timeout, int *uaddr2,
7769                          int val3)
7770 {
7771 #if HOST_LONG_BITS == 64
7772 #if defined(__NR_futex)
7773     /* always a 64-bit time_t, it doesn't define _time64 version  */
7774     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7775 
7776 #endif
7777 #else /* HOST_LONG_BITS == 64 */
7778 #if defined(__NR_futex_time64)
7779     if (sizeof(timeout->tv_sec) == 8) {
7780         /* _time64 function on 32bit arch */
7781         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7782     }
7783 #endif
7784 #if defined(__NR_futex)
7785     /* old function on 32bit arch */
7786     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7787 #endif
7788 #endif /* HOST_LONG_BITS == 64 */
7789     g_assert_not_reached();
7790 }
7791 
7792 static int do_safe_futex(int *uaddr, int op, int val,
7793                          const struct timespec *timeout, int *uaddr2,
7794                          int val3)
7795 {
7796 #if HOST_LONG_BITS == 64
7797 #if defined(__NR_futex)
7798     /* always a 64-bit time_t, it doesn't define _time64 version  */
7799     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7800 #endif
7801 #else /* HOST_LONG_BITS == 64 */
7802 #if defined(__NR_futex_time64)
7803     if (sizeof(timeout->tv_sec) == 8) {
7804         /* _time64 function on 32bit arch */
7805         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7806                                            val3));
7807     }
7808 #endif
7809 #if defined(__NR_futex)
7810     /* old function on 32bit arch */
7811     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7812 #endif
7813 #endif /* HOST_LONG_BITS == 64 */
7814     return -TARGET_ENOSYS;
7815 }
7816 
7817 /* ??? Using host futex calls even when target atomic operations
7818    are not really atomic probably breaks things.  However implementing
7819    futexes locally would make futexes shared between multiple processes
7820    tricky.  However they're probably useless because guest atomic
7821    operations won't work either.  */
7822 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7823 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7824                     int op, int val, target_ulong timeout,
7825                     target_ulong uaddr2, int val3)
7826 {
7827     struct timespec ts, *pts = NULL;
7828     void *haddr2 = NULL;
7829     int base_op;
7830 
7831     /* We assume FUTEX_* constants are the same on both host and target. */
7832 #ifdef FUTEX_CMD_MASK
7833     base_op = op & FUTEX_CMD_MASK;
7834 #else
7835     base_op = op;
7836 #endif
7837     switch (base_op) {
7838     case FUTEX_WAIT:
7839     case FUTEX_WAIT_BITSET:
7840         val = tswap32(val);
7841         break;
7842     case FUTEX_WAIT_REQUEUE_PI:
7843         val = tswap32(val);
7844         haddr2 = g2h(cpu, uaddr2);
7845         break;
7846     case FUTEX_LOCK_PI:
7847     case FUTEX_LOCK_PI2:
7848         break;
7849     case FUTEX_WAKE:
7850     case FUTEX_WAKE_BITSET:
7851     case FUTEX_TRYLOCK_PI:
7852     case FUTEX_UNLOCK_PI:
7853         timeout = 0;
7854         break;
7855     case FUTEX_FD:
7856         val = target_to_host_signal(val);
7857         timeout = 0;
7858         break;
7859     case FUTEX_CMP_REQUEUE:
7860     case FUTEX_CMP_REQUEUE_PI:
7861         val3 = tswap32(val3);
7862         /* fall through */
7863     case FUTEX_REQUEUE:
7864     case FUTEX_WAKE_OP:
7865         /*
7866          * For these, the 4th argument is not TIMEOUT, but VAL2.
7867          * But the prototype of do_safe_futex takes a pointer, so
7868          * insert casts to satisfy the compiler.  We do not need
7869          * to tswap VAL2 since it's not compared to guest memory.
7870           */
7871         pts = (struct timespec *)(uintptr_t)timeout;
7872         timeout = 0;
7873         haddr2 = g2h(cpu, uaddr2);
7874         break;
7875     default:
7876         return -TARGET_ENOSYS;
7877     }
7878     if (timeout) {
7879         pts = &ts;
7880         if (time64
7881             ? target_to_host_timespec64(pts, timeout)
7882             : target_to_host_timespec(pts, timeout)) {
7883             return -TARGET_EFAULT;
7884         }
7885     }
7886     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7887 }
7888 #endif
7889 
7890 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7891 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7892                                      abi_long handle, abi_long mount_id,
7893                                      abi_long flags)
7894 {
7895     struct file_handle *target_fh;
7896     struct file_handle *fh;
7897     int mid = 0;
7898     abi_long ret;
7899     char *name;
7900     unsigned int size, total_size;
7901 
7902     if (get_user_s32(size, handle)) {
7903         return -TARGET_EFAULT;
7904     }
7905 
7906     name = lock_user_string(pathname);
7907     if (!name) {
7908         return -TARGET_EFAULT;
7909     }
7910 
7911     total_size = sizeof(struct file_handle) + size;
7912     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7913     if (!target_fh) {
7914         unlock_user(name, pathname, 0);
7915         return -TARGET_EFAULT;
7916     }
7917 
7918     fh = g_malloc0(total_size);
7919     fh->handle_bytes = size;
7920 
7921     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7922     unlock_user(name, pathname, 0);
7923 
7924     /* man name_to_handle_at(2):
7925      * Other than the use of the handle_bytes field, the caller should treat
7926      * the file_handle structure as an opaque data type
7927      */
7928 
7929     memcpy(target_fh, fh, total_size);
7930     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7931     target_fh->handle_type = tswap32(fh->handle_type);
7932     g_free(fh);
7933     unlock_user(target_fh, handle, total_size);
7934 
7935     if (put_user_s32(mid, mount_id)) {
7936         return -TARGET_EFAULT;
7937     }
7938 
7939     return ret;
7940 
7941 }
7942 #endif
7943 
7944 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7945 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7946                                      abi_long flags)
7947 {
7948     struct file_handle *target_fh;
7949     struct file_handle *fh;
7950     unsigned int size, total_size;
7951     abi_long ret;
7952 
7953     if (get_user_s32(size, handle)) {
7954         return -TARGET_EFAULT;
7955     }
7956 
7957     total_size = sizeof(struct file_handle) + size;
7958     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7959     if (!target_fh) {
7960         return -TARGET_EFAULT;
7961     }
7962 
7963     fh = g_memdup(target_fh, total_size);
7964     fh->handle_bytes = size;
7965     fh->handle_type = tswap32(target_fh->handle_type);
7966 
7967     ret = get_errno(open_by_handle_at(mount_fd, fh,
7968                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7969 
7970     g_free(fh);
7971 
7972     unlock_user(target_fh, handle, total_size);
7973 
7974     return ret;
7975 }
7976 #endif
7977 
7978 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7979 
7980 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7981 {
7982     int host_flags;
7983     target_sigset_t *target_mask;
7984     sigset_t host_mask;
7985     abi_long ret;
7986 
7987     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7988         return -TARGET_EINVAL;
7989     }
7990     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7991         return -TARGET_EFAULT;
7992     }
7993 
7994     target_to_host_sigset(&host_mask, target_mask);
7995 
7996     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7997 
7998     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7999     if (ret >= 0) {
8000         fd_trans_register(ret, &target_signalfd_trans);
8001     }
8002 
8003     unlock_user_struct(target_mask, mask, 0);
8004 
8005     return ret;
8006 }
8007 #endif
8008 
8009 /* Map host to target signal numbers for the wait family of syscalls.
8010    Assume all other status bits are the same.  */
8011 int host_to_target_waitstatus(int status)
8012 {
8013     if (WIFSIGNALED(status)) {
8014         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8015     }
8016     if (WIFSTOPPED(status)) {
8017         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8018                | (status & 0xff);
8019     }
8020     return status;
8021 }
8022 
8023 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8024 {
8025     CPUState *cpu = env_cpu(cpu_env);
8026     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8027     int i;
8028 
8029     for (i = 0; i < bprm->argc; i++) {
8030         size_t len = strlen(bprm->argv[i]) + 1;
8031 
8032         if (write(fd, bprm->argv[i], len) != len) {
8033             return -1;
8034         }
8035     }
8036 
8037     return 0;
8038 }
8039 
8040 static void show_smaps(int fd, unsigned long size)
8041 {
8042     unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8043     unsigned long size_kb = size >> 10;
8044 
8045     dprintf(fd, "Size:                  %lu kB\n"
8046                 "KernelPageSize:        %lu kB\n"
8047                 "MMUPageSize:           %lu kB\n"
8048                 "Rss:                   0 kB\n"
8049                 "Pss:                   0 kB\n"
8050                 "Pss_Dirty:             0 kB\n"
8051                 "Shared_Clean:          0 kB\n"
8052                 "Shared_Dirty:          0 kB\n"
8053                 "Private_Clean:         0 kB\n"
8054                 "Private_Dirty:         0 kB\n"
8055                 "Referenced:            0 kB\n"
8056                 "Anonymous:             0 kB\n"
8057                 "LazyFree:              0 kB\n"
8058                 "AnonHugePages:         0 kB\n"
8059                 "ShmemPmdMapped:        0 kB\n"
8060                 "FilePmdMapped:         0 kB\n"
8061                 "Shared_Hugetlb:        0 kB\n"
8062                 "Private_Hugetlb:       0 kB\n"
8063                 "Swap:                  0 kB\n"
8064                 "SwapPss:               0 kB\n"
8065                 "Locked:                0 kB\n"
8066                 "THPeligible:    0\n", size_kb, page_size_kb, page_size_kb);
8067 }
8068 
8069 static int open_self_maps_1(CPUArchState *cpu_env, int fd, bool smaps)
8070 {
8071     CPUState *cpu = env_cpu(cpu_env);
8072     TaskState *ts = cpu->opaque;
8073     GSList *map_info = read_self_maps();
8074     GSList *s;
8075     int count;
8076 
8077     for (s = map_info; s; s = g_slist_next(s)) {
8078         MapInfo *e = (MapInfo *) s->data;
8079 
8080         if (h2g_valid(e->start)) {
8081             unsigned long min = e->start;
8082             unsigned long max = e->end;
8083             int flags = page_get_flags(h2g(min));
8084             const char *path;
8085 
8086             max = h2g_valid(max - 1) ?
8087                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8088 
8089             if (!page_check_range(h2g(min), max - min, flags)) {
8090                 continue;
8091             }
8092 
8093 #ifdef TARGET_HPPA
8094             if (h2g(max) == ts->info->stack_limit) {
8095 #else
8096             if (h2g(min) == ts->info->stack_limit) {
8097 #endif
8098                 path = "[stack]";
8099             } else {
8100                 path = e->path;
8101             }
8102 
8103             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8104                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8105                             h2g(min), h2g(max - 1) + 1,
8106                             (flags & PAGE_READ) ? 'r' : '-',
8107                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8108                             (flags & PAGE_EXEC) ? 'x' : '-',
8109                             e->is_priv ? 'p' : 's',
8110                             (uint64_t) e->offset, e->dev, e->inode);
8111             if (path) {
8112                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8113             } else {
8114                 dprintf(fd, "\n");
8115             }
8116             if (smaps) {
8117                 show_smaps(fd, max - min);
8118                 dprintf(fd, "VmFlags:%s%s%s%s%s%s%s%s\n",
8119                         (flags & PAGE_READ) ? " rd" : "",
8120                         (flags & PAGE_WRITE_ORG) ? " wr" : "",
8121                         (flags & PAGE_EXEC) ? " ex" : "",
8122                         e->is_priv ? "" : " sh",
8123                         (flags & PAGE_READ) ? " mr" : "",
8124                         (flags & PAGE_WRITE_ORG) ? " mw" : "",
8125                         (flags & PAGE_EXEC) ? " me" : "",
8126                         e->is_priv ? "" : " ms");
8127             }
8128         }
8129     }
8130 
8131     free_self_maps(map_info);
8132 
8133 #ifdef TARGET_VSYSCALL_PAGE
8134     /*
8135      * We only support execution from the vsyscall page.
8136      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8137      */
8138     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8139                     " --xp 00000000 00:00 0",
8140                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8141     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8142     if (smaps) {
8143         show_smaps(fd, TARGET_PAGE_SIZE);
8144         dprintf(fd, "VmFlags: ex\n");
8145     }
8146 #endif
8147 
8148     return 0;
8149 }
8150 
8151 static int open_self_maps(CPUArchState *cpu_env, int fd)
8152 {
8153     return open_self_maps_1(cpu_env, fd, false);
8154 }
8155 
8156 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8157 {
8158     return open_self_maps_1(cpu_env, fd, true);
8159 }
8160 
8161 static int open_self_stat(CPUArchState *cpu_env, int fd)
8162 {
8163     CPUState *cpu = env_cpu(cpu_env);
8164     TaskState *ts = cpu->opaque;
8165     g_autoptr(GString) buf = g_string_new(NULL);
8166     int i;
8167 
8168     for (i = 0; i < 44; i++) {
8169         if (i == 0) {
8170             /* pid */
8171             g_string_printf(buf, FMT_pid " ", getpid());
8172         } else if (i == 1) {
8173             /* app name */
8174             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8175             bin = bin ? bin + 1 : ts->bprm->argv[0];
8176             g_string_printf(buf, "(%.15s) ", bin);
8177         } else if (i == 2) {
8178             /* task state */
8179             g_string_assign(buf, "R "); /* we are running right now */
8180         } else if (i == 3) {
8181             /* ppid */
8182             g_string_printf(buf, FMT_pid " ", getppid());
8183         } else if (i == 21) {
8184             /* starttime */
8185             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8186         } else if (i == 27) {
8187             /* stack bottom */
8188             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8189         } else {
8190             /* for the rest, there is MasterCard */
8191             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8192         }
8193 
8194         if (write(fd, buf->str, buf->len) != buf->len) {
8195             return -1;
8196         }
8197     }
8198 
8199     return 0;
8200 }
8201 
8202 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8203 {
8204     CPUState *cpu = env_cpu(cpu_env);
8205     TaskState *ts = cpu->opaque;
8206     abi_ulong auxv = ts->info->saved_auxv;
8207     abi_ulong len = ts->info->auxv_len;
8208     char *ptr;
8209 
8210     /*
8211      * Auxiliary vector is stored in target process stack.
8212      * read in whole auxv vector and copy it to file
8213      */
8214     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8215     if (ptr != NULL) {
8216         while (len > 0) {
8217             ssize_t r;
8218             r = write(fd, ptr, len);
8219             if (r <= 0) {
8220                 break;
8221             }
8222             len -= r;
8223             ptr += r;
8224         }
8225         lseek(fd, 0, SEEK_SET);
8226         unlock_user(ptr, auxv, len);
8227     }
8228 
8229     return 0;
8230 }
8231 
8232 static int is_proc_myself(const char *filename, const char *entry)
8233 {
8234     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8235         filename += strlen("/proc/");
8236         if (!strncmp(filename, "self/", strlen("self/"))) {
8237             filename += strlen("self/");
8238         } else if (*filename >= '1' && *filename <= '9') {
8239             char myself[80];
8240             snprintf(myself, sizeof(myself), "%d/", getpid());
8241             if (!strncmp(filename, myself, strlen(myself))) {
8242                 filename += strlen(myself);
8243             } else {
8244                 return 0;
8245             }
8246         } else {
8247             return 0;
8248         }
8249         if (!strcmp(filename, entry)) {
8250             return 1;
8251         }
8252     }
8253     return 0;
8254 }
8255 
8256 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8257                       const char *fmt, int code)
8258 {
8259     if (logfile) {
8260         CPUState *cs = env_cpu(env);
8261 
8262         fprintf(logfile, fmt, code);
8263         fprintf(logfile, "Failing executable: %s\n", exec_path);
8264         cpu_dump_state(cs, logfile, 0);
8265         open_self_maps(env, fileno(logfile));
8266     }
8267 }
8268 
8269 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8270 {
8271     /* dump to console */
8272     excp_dump_file(stderr, env, fmt, code);
8273 
8274     /* dump to log file */
8275     if (qemu_log_separate()) {
8276         FILE *logfile = qemu_log_trylock();
8277 
8278         excp_dump_file(logfile, env, fmt, code);
8279         qemu_log_unlock(logfile);
8280     }
8281 }
8282 
8283 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8284     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) || \
8285     defined(TARGET_RISCV) || defined(TARGET_S390X)
8286 static int is_proc(const char *filename, const char *entry)
8287 {
8288     return strcmp(filename, entry) == 0;
8289 }
8290 #endif
8291 
8292 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8293 static int open_net_route(CPUArchState *cpu_env, int fd)
8294 {
8295     FILE *fp;
8296     char *line = NULL;
8297     size_t len = 0;
8298     ssize_t read;
8299 
8300     fp = fopen("/proc/net/route", "r");
8301     if (fp == NULL) {
8302         return -1;
8303     }
8304 
8305     /* read header */
8306 
8307     read = getline(&line, &len, fp);
8308     dprintf(fd, "%s", line);
8309 
8310     /* read routes */
8311 
8312     while ((read = getline(&line, &len, fp)) != -1) {
8313         char iface[16];
8314         uint32_t dest, gw, mask;
8315         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8316         int fields;
8317 
8318         fields = sscanf(line,
8319                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8320                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8321                         &mask, &mtu, &window, &irtt);
8322         if (fields != 11) {
8323             continue;
8324         }
8325         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8326                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8327                 metric, tswap32(mask), mtu, window, irtt);
8328     }
8329 
8330     free(line);
8331     fclose(fp);
8332 
8333     return 0;
8334 }
8335 #endif
8336 
8337 #if defined(TARGET_SPARC)
8338 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8339 {
8340     dprintf(fd, "type\t\t: sun4u\n");
8341     return 0;
8342 }
8343 #endif
8344 
8345 #if defined(TARGET_HPPA)
8346 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8347 {
8348     int i, num_cpus;
8349 
8350     num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8351     for (i = 0; i < num_cpus; i++) {
8352         dprintf(fd, "processor\t: %d\n", i);
8353         dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8354         dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8355         dprintf(fd, "capabilities\t: os32\n");
8356         dprintf(fd, "model\t\t: 9000/778/B160L - "
8357                     "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8358     }
8359     return 0;
8360 }
8361 #endif
8362 
8363 #if defined(TARGET_RISCV)
8364 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8365 {
8366     int i;
8367     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8368     RISCVCPU *cpu = env_archcpu(cpu_env);
8369     const RISCVCPUConfig *cfg = riscv_cpu_cfg((CPURISCVState *) cpu_env);
8370     char *isa_string = riscv_isa_string(cpu);
8371     const char *mmu;
8372 
8373     if (cfg->mmu) {
8374         mmu = (cpu_env->xl == MXL_RV32) ? "sv32"  : "sv48";
8375     } else {
8376         mmu = "none";
8377     }
8378 
8379     for (i = 0; i < num_cpus; i++) {
8380         dprintf(fd, "processor\t: %d\n", i);
8381         dprintf(fd, "hart\t\t: %d\n", i);
8382         dprintf(fd, "isa\t\t: %s\n", isa_string);
8383         dprintf(fd, "mmu\t\t: %s\n", mmu);
8384         dprintf(fd, "uarch\t\t: qemu\n\n");
8385     }
8386 
8387     g_free(isa_string);
8388     return 0;
8389 }
8390 #endif
8391 
8392 #if defined(TARGET_S390X)
8393 /*
8394  * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would
8395  * show in /proc/cpuinfo.
8396  *
8397  * Skip the following in order to match the missing support in op_ecag():
8398  * - show_cacheinfo().
8399  * - show_cpu_topology().
8400  * - show_cpu_mhz().
8401  *
8402  * Use fixed values for certain fields:
8403  * - bogomips per cpu - from a qemu-system-s390x run.
8404  * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported.
8405  *
8406  * Keep the code structure close to arch/s390/kernel/processor.c.
8407  */
8408 
8409 static void show_facilities(int fd)
8410 {
8411     size_t sizeof_stfl_bytes = 2048;
8412     g_autofree uint8_t *stfl_bytes = g_new0(uint8_t, sizeof_stfl_bytes);
8413     unsigned int bit;
8414 
8415     dprintf(fd, "facilities      :");
8416     s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
8417     for (bit = 0; bit < sizeof_stfl_bytes * 8; bit++) {
8418         if (test_be_bit(bit, stfl_bytes)) {
8419             dprintf(fd, " %d", bit);
8420         }
8421     }
8422     dprintf(fd, "\n");
8423 }
8424 
8425 static int cpu_ident(unsigned long n)
8426 {
8427     return deposit32(0, CPU_ID_BITS - CPU_PHYS_ADDR_BITS, CPU_PHYS_ADDR_BITS,
8428                      n);
8429 }
8430 
8431 static void show_cpu_summary(CPUArchState *cpu_env, int fd)
8432 {
8433     S390CPUModel *model = env_archcpu(cpu_env)->model;
8434     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8435     uint32_t elf_hwcap = get_elf_hwcap();
8436     const char *hwcap_str;
8437     int i;
8438 
8439     dprintf(fd, "vendor_id       : IBM/S390\n"
8440                 "# processors    : %i\n"
8441                 "bogomips per cpu: 13370.00\n",
8442             num_cpus);
8443     dprintf(fd, "max thread id   : 0\n");
8444     dprintf(fd, "features\t: ");
8445     for (i = 0; i < sizeof(elf_hwcap) * 8; i++) {
8446         if (!(elf_hwcap & (1 << i))) {
8447             continue;
8448         }
8449         hwcap_str = elf_hwcap_str(i);
8450         if (hwcap_str) {
8451             dprintf(fd, "%s ", hwcap_str);
8452         }
8453     }
8454     dprintf(fd, "\n");
8455     show_facilities(fd);
8456     for (i = 0; i < num_cpus; i++) {
8457         dprintf(fd, "processor %d: "
8458                "version = %02X,  "
8459                "identification = %06X,  "
8460                "machine = %04X\n",
8461                i, model->cpu_ver, cpu_ident(i), model->def->type);
8462     }
8463 }
8464 
8465 static void show_cpu_ids(CPUArchState *cpu_env, int fd, unsigned long n)
8466 {
8467     S390CPUModel *model = env_archcpu(cpu_env)->model;
8468 
8469     dprintf(fd, "version         : %02X\n", model->cpu_ver);
8470     dprintf(fd, "identification  : %06X\n", cpu_ident(n));
8471     dprintf(fd, "machine         : %04X\n", model->def->type);
8472 }
8473 
8474 static void show_cpuinfo(CPUArchState *cpu_env, int fd, unsigned long n)
8475 {
8476     dprintf(fd, "\ncpu number      : %ld\n", n);
8477     show_cpu_ids(cpu_env, fd, n);
8478 }
8479 
8480 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8481 {
8482     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8483     int i;
8484 
8485     show_cpu_summary(cpu_env, fd);
8486     for (i = 0; i < num_cpus; i++) {
8487         show_cpuinfo(cpu_env, fd, i);
8488     }
8489     return 0;
8490 }
8491 #endif
8492 
8493 #if defined(TARGET_M68K)
8494 static int open_hardware(CPUArchState *cpu_env, int fd)
8495 {
8496     dprintf(fd, "Model:\t\tqemu-m68k\n");
8497     return 0;
8498 }
8499 #endif
8500 
8501 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8502                     int flags, mode_t mode, bool safe)
8503 {
8504     struct fake_open {
8505         const char *filename;
8506         int (*fill)(CPUArchState *cpu_env, int fd);
8507         int (*cmp)(const char *s1, const char *s2);
8508     };
8509     const struct fake_open *fake_open;
8510     static const struct fake_open fakes[] = {
8511         { "maps", open_self_maps, is_proc_myself },
8512         { "smaps", open_self_smaps, is_proc_myself },
8513         { "stat", open_self_stat, is_proc_myself },
8514         { "auxv", open_self_auxv, is_proc_myself },
8515         { "cmdline", open_self_cmdline, is_proc_myself },
8516 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8517         { "/proc/net/route", open_net_route, is_proc },
8518 #endif
8519 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) || \
8520     defined(TARGET_RISCV) || defined(TARGET_S390X)
8521         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8522 #endif
8523 #if defined(TARGET_M68K)
8524         { "/proc/hardware", open_hardware, is_proc },
8525 #endif
8526         { NULL, NULL, NULL }
8527     };
8528 
8529     if (is_proc_myself(pathname, "exe")) {
8530         if (safe) {
8531             return safe_openat(dirfd, exec_path, flags, mode);
8532         } else {
8533             return openat(dirfd, exec_path, flags, mode);
8534         }
8535     }
8536 
8537     for (fake_open = fakes; fake_open->filename; fake_open++) {
8538         if (fake_open->cmp(pathname, fake_open->filename)) {
8539             break;
8540         }
8541     }
8542 
8543     if (fake_open->filename) {
8544         const char *tmpdir;
8545         char filename[PATH_MAX];
8546         int fd, r;
8547 
8548         fd = memfd_create("qemu-open", 0);
8549         if (fd < 0) {
8550             if (errno != ENOSYS) {
8551                 return fd;
8552             }
8553             /* create temporary file to map stat to */
8554             tmpdir = getenv("TMPDIR");
8555             if (!tmpdir)
8556                 tmpdir = "/tmp";
8557             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8558             fd = mkstemp(filename);
8559             if (fd < 0) {
8560                 return fd;
8561             }
8562             unlink(filename);
8563         }
8564 
8565         if ((r = fake_open->fill(cpu_env, fd))) {
8566             int e = errno;
8567             close(fd);
8568             errno = e;
8569             return r;
8570         }
8571         lseek(fd, 0, SEEK_SET);
8572 
8573         return fd;
8574     }
8575 
8576     if (safe) {
8577         return safe_openat(dirfd, path(pathname), flags, mode);
8578     } else {
8579         return openat(dirfd, path(pathname), flags, mode);
8580     }
8581 }
8582 
8583 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8584 {
8585     ssize_t ret;
8586 
8587     if (!pathname || !buf) {
8588         errno = EFAULT;
8589         return -1;
8590     }
8591 
8592     if (!bufsiz) {
8593         /* Short circuit this for the magic exe check. */
8594         errno = EINVAL;
8595         return -1;
8596     }
8597 
8598     if (is_proc_myself((const char *)pathname, "exe")) {
8599         /*
8600          * Don't worry about sign mismatch as earlier mapping
8601          * logic would have thrown a bad address error.
8602          */
8603         ret = MIN(strlen(exec_path), bufsiz);
8604         /* We cannot NUL terminate the string. */
8605         memcpy(buf, exec_path, ret);
8606     } else {
8607         ret = readlink(path(pathname), buf, bufsiz);
8608     }
8609 
8610     return ret;
8611 }
8612 
8613 static int do_execv(CPUArchState *cpu_env, int dirfd,
8614                     abi_long pathname, abi_long guest_argp,
8615                     abi_long guest_envp, int flags, bool is_execveat)
8616 {
8617     int ret;
8618     char **argp, **envp;
8619     int argc, envc;
8620     abi_ulong gp;
8621     abi_ulong addr;
8622     char **q;
8623     void *p;
8624 
8625     argc = 0;
8626 
8627     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8628         if (get_user_ual(addr, gp)) {
8629             return -TARGET_EFAULT;
8630         }
8631         if (!addr) {
8632             break;
8633         }
8634         argc++;
8635     }
8636     envc = 0;
8637     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8638         if (get_user_ual(addr, gp)) {
8639             return -TARGET_EFAULT;
8640         }
8641         if (!addr) {
8642             break;
8643         }
8644         envc++;
8645     }
8646 
8647     argp = g_new0(char *, argc + 1);
8648     envp = g_new0(char *, envc + 1);
8649 
8650     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8651         if (get_user_ual(addr, gp)) {
8652             goto execve_efault;
8653         }
8654         if (!addr) {
8655             break;
8656         }
8657         *q = lock_user_string(addr);
8658         if (!*q) {
8659             goto execve_efault;
8660         }
8661     }
8662     *q = NULL;
8663 
8664     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8665         if (get_user_ual(addr, gp)) {
8666             goto execve_efault;
8667         }
8668         if (!addr) {
8669             break;
8670         }
8671         *q = lock_user_string(addr);
8672         if (!*q) {
8673             goto execve_efault;
8674         }
8675     }
8676     *q = NULL;
8677 
8678     /*
8679      * Although execve() is not an interruptible syscall it is
8680      * a special case where we must use the safe_syscall wrapper:
8681      * if we allow a signal to happen before we make the host
8682      * syscall then we will 'lose' it, because at the point of
8683      * execve the process leaves QEMU's control. So we use the
8684      * safe syscall wrapper to ensure that we either take the
8685      * signal as a guest signal, or else it does not happen
8686      * before the execve completes and makes it the other
8687      * program's problem.
8688      */
8689     p = lock_user_string(pathname);
8690     if (!p) {
8691         goto execve_efault;
8692     }
8693 
8694     const char *exe = p;
8695     if (is_proc_myself(p, "exe")) {
8696         exe = exec_path;
8697     }
8698     ret = is_execveat
8699         ? safe_execveat(dirfd, exe, argp, envp, flags)
8700         : safe_execve(exe, argp, envp);
8701     ret = get_errno(ret);
8702 
8703     unlock_user(p, pathname, 0);
8704 
8705     goto execve_end;
8706 
8707 execve_efault:
8708     ret = -TARGET_EFAULT;
8709 
8710 execve_end:
8711     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8712         if (get_user_ual(addr, gp) || !addr) {
8713             break;
8714         }
8715         unlock_user(*q, addr, 0);
8716     }
8717     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8718         if (get_user_ual(addr, gp) || !addr) {
8719             break;
8720         }
8721         unlock_user(*q, addr, 0);
8722     }
8723 
8724     g_free(argp);
8725     g_free(envp);
8726     return ret;
8727 }
8728 
8729 #define TIMER_MAGIC 0x0caf0000
8730 #define TIMER_MAGIC_MASK 0xffff0000
8731 
8732 /* Convert QEMU provided timer ID back to internal 16bit index format */
8733 static target_timer_t get_timer_id(abi_long arg)
8734 {
8735     target_timer_t timerid = arg;
8736 
8737     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8738         return -TARGET_EINVAL;
8739     }
8740 
8741     timerid &= 0xffff;
8742 
8743     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8744         return -TARGET_EINVAL;
8745     }
8746 
8747     return timerid;
8748 }
8749 
8750 static int target_to_host_cpu_mask(unsigned long *host_mask,
8751                                    size_t host_size,
8752                                    abi_ulong target_addr,
8753                                    size_t target_size)
8754 {
8755     unsigned target_bits = sizeof(abi_ulong) * 8;
8756     unsigned host_bits = sizeof(*host_mask) * 8;
8757     abi_ulong *target_mask;
8758     unsigned i, j;
8759 
8760     assert(host_size >= target_size);
8761 
8762     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8763     if (!target_mask) {
8764         return -TARGET_EFAULT;
8765     }
8766     memset(host_mask, 0, host_size);
8767 
8768     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8769         unsigned bit = i * target_bits;
8770         abi_ulong val;
8771 
8772         __get_user(val, &target_mask[i]);
8773         for (j = 0; j < target_bits; j++, bit++) {
8774             if (val & (1UL << j)) {
8775                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8776             }
8777         }
8778     }
8779 
8780     unlock_user(target_mask, target_addr, 0);
8781     return 0;
8782 }
8783 
8784 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8785                                    size_t host_size,
8786                                    abi_ulong target_addr,
8787                                    size_t target_size)
8788 {
8789     unsigned target_bits = sizeof(abi_ulong) * 8;
8790     unsigned host_bits = sizeof(*host_mask) * 8;
8791     abi_ulong *target_mask;
8792     unsigned i, j;
8793 
8794     assert(host_size >= target_size);
8795 
8796     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8797     if (!target_mask) {
8798         return -TARGET_EFAULT;
8799     }
8800 
8801     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8802         unsigned bit = i * target_bits;
8803         abi_ulong val = 0;
8804 
8805         for (j = 0; j < target_bits; j++, bit++) {
8806             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8807                 val |= 1UL << j;
8808             }
8809         }
8810         __put_user(val, &target_mask[i]);
8811     }
8812 
8813     unlock_user(target_mask, target_addr, target_size);
8814     return 0;
8815 }
8816 
8817 #ifdef TARGET_NR_getdents
8818 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8819 {
8820     g_autofree void *hdirp = NULL;
8821     void *tdirp;
8822     int hlen, hoff, toff;
8823     int hreclen, treclen;
8824     off64_t prev_diroff = 0;
8825 
8826     hdirp = g_try_malloc(count);
8827     if (!hdirp) {
8828         return -TARGET_ENOMEM;
8829     }
8830 
8831 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8832     hlen = sys_getdents(dirfd, hdirp, count);
8833 #else
8834     hlen = sys_getdents64(dirfd, hdirp, count);
8835 #endif
8836 
8837     hlen = get_errno(hlen);
8838     if (is_error(hlen)) {
8839         return hlen;
8840     }
8841 
8842     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8843     if (!tdirp) {
8844         return -TARGET_EFAULT;
8845     }
8846 
8847     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8848 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8849         struct linux_dirent *hde = hdirp + hoff;
8850 #else
8851         struct linux_dirent64 *hde = hdirp + hoff;
8852 #endif
8853         struct target_dirent *tde = tdirp + toff;
8854         int namelen;
8855         uint8_t type;
8856 
8857         namelen = strlen(hde->d_name);
8858         hreclen = hde->d_reclen;
8859         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8860         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8861 
8862         if (toff + treclen > count) {
8863             /*
8864              * If the host struct is smaller than the target struct, or
8865              * requires less alignment and thus packs into less space,
8866              * then the host can return more entries than we can pass
8867              * on to the guest.
8868              */
8869             if (toff == 0) {
8870                 toff = -TARGET_EINVAL; /* result buffer is too small */
8871                 break;
8872             }
8873             /*
8874              * Return what we have, resetting the file pointer to the
8875              * location of the first record not returned.
8876              */
8877             lseek64(dirfd, prev_diroff, SEEK_SET);
8878             break;
8879         }
8880 
8881         prev_diroff = hde->d_off;
8882         tde->d_ino = tswapal(hde->d_ino);
8883         tde->d_off = tswapal(hde->d_off);
8884         tde->d_reclen = tswap16(treclen);
8885         memcpy(tde->d_name, hde->d_name, namelen + 1);
8886 
8887         /*
8888          * The getdents type is in what was formerly a padding byte at the
8889          * end of the structure.
8890          */
8891 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8892         type = *((uint8_t *)hde + hreclen - 1);
8893 #else
8894         type = hde->d_type;
8895 #endif
8896         *((uint8_t *)tde + treclen - 1) = type;
8897     }
8898 
8899     unlock_user(tdirp, arg2, toff);
8900     return toff;
8901 }
8902 #endif /* TARGET_NR_getdents */
8903 
8904 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8905 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8906 {
8907     g_autofree void *hdirp = NULL;
8908     void *tdirp;
8909     int hlen, hoff, toff;
8910     int hreclen, treclen;
8911     off64_t prev_diroff = 0;
8912 
8913     hdirp = g_try_malloc(count);
8914     if (!hdirp) {
8915         return -TARGET_ENOMEM;
8916     }
8917 
8918     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8919     if (is_error(hlen)) {
8920         return hlen;
8921     }
8922 
8923     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8924     if (!tdirp) {
8925         return -TARGET_EFAULT;
8926     }
8927 
8928     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8929         struct linux_dirent64 *hde = hdirp + hoff;
8930         struct target_dirent64 *tde = tdirp + toff;
8931         int namelen;
8932 
8933         namelen = strlen(hde->d_name) + 1;
8934         hreclen = hde->d_reclen;
8935         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8936         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8937 
8938         if (toff + treclen > count) {
8939             /*
8940              * If the host struct is smaller than the target struct, or
8941              * requires less alignment and thus packs into less space,
8942              * then the host can return more entries than we can pass
8943              * on to the guest.
8944              */
8945             if (toff == 0) {
8946                 toff = -TARGET_EINVAL; /* result buffer is too small */
8947                 break;
8948             }
8949             /*
8950              * Return what we have, resetting the file pointer to the
8951              * location of the first record not returned.
8952              */
8953             lseek64(dirfd, prev_diroff, SEEK_SET);
8954             break;
8955         }
8956 
8957         prev_diroff = hde->d_off;
8958         tde->d_ino = tswap64(hde->d_ino);
8959         tde->d_off = tswap64(hde->d_off);
8960         tde->d_reclen = tswap16(treclen);
8961         tde->d_type = hde->d_type;
8962         memcpy(tde->d_name, hde->d_name, namelen);
8963     }
8964 
8965     unlock_user(tdirp, arg2, toff);
8966     return toff;
8967 }
8968 #endif /* TARGET_NR_getdents64 */
8969 
8970 #if defined(TARGET_NR_riscv_hwprobe)
8971 
8972 #define RISCV_HWPROBE_KEY_MVENDORID     0
8973 #define RISCV_HWPROBE_KEY_MARCHID       1
8974 #define RISCV_HWPROBE_KEY_MIMPID        2
8975 
8976 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8977 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8978 
8979 #define RISCV_HWPROBE_KEY_IMA_EXT_0     4
8980 #define     RISCV_HWPROBE_IMA_FD       (1 << 0)
8981 #define     RISCV_HWPROBE_IMA_C        (1 << 1)
8982 
8983 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8984 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8985 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8986 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8987 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8988 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8989 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
8990 
8991 struct riscv_hwprobe {
8992     abi_llong  key;
8993     abi_ullong value;
8994 };
8995 
8996 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
8997                                     struct riscv_hwprobe *pair,
8998                                     size_t pair_count)
8999 {
9000     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9001 
9002     for (; pair_count > 0; pair_count--, pair++) {
9003         abi_llong key;
9004         abi_ullong value;
9005         __put_user(0, &pair->value);
9006         __get_user(key, &pair->key);
9007         switch (key) {
9008         case RISCV_HWPROBE_KEY_MVENDORID:
9009             __put_user(cfg->mvendorid, &pair->value);
9010             break;
9011         case RISCV_HWPROBE_KEY_MARCHID:
9012             __put_user(cfg->marchid, &pair->value);
9013             break;
9014         case RISCV_HWPROBE_KEY_MIMPID:
9015             __put_user(cfg->mimpid, &pair->value);
9016             break;
9017         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9018             value = riscv_has_ext(env, RVI) &&
9019                     riscv_has_ext(env, RVM) &&
9020                     riscv_has_ext(env, RVA) ?
9021                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9022             __put_user(value, &pair->value);
9023             break;
9024         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9025             value = riscv_has_ext(env, RVF) &&
9026                     riscv_has_ext(env, RVD) ?
9027                     RISCV_HWPROBE_IMA_FD : 0;
9028             value |= riscv_has_ext(env, RVC) ?
9029                      RISCV_HWPROBE_IMA_C : pair->value;
9030             __put_user(value, &pair->value);
9031             break;
9032         case RISCV_HWPROBE_KEY_CPUPERF_0:
9033             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9034             break;
9035         default:
9036             __put_user(-1, &pair->key);
9037             break;
9038         }
9039     }
9040 }
9041 
9042 static int cpu_set_valid(abi_long arg3, abi_long arg4)
9043 {
9044     int ret, i, tmp;
9045     size_t host_mask_size, target_mask_size;
9046     unsigned long *host_mask;
9047 
9048     /*
9049      * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9050      * arg3 contains the cpu count.
9051      */
9052     tmp = (8 * sizeof(abi_ulong));
9053     target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9054     host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9055                      ~(sizeof(*host_mask) - 1);
9056 
9057     host_mask = alloca(host_mask_size);
9058 
9059     ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9060                                   arg4, target_mask_size);
9061     if (ret != 0) {
9062         return ret;
9063     }
9064 
9065     for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9066         if (host_mask[i] != 0) {
9067             return 0;
9068         }
9069     }
9070     return -TARGET_EINVAL;
9071 }
9072 
9073 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9074                                  abi_long arg2, abi_long arg3,
9075                                  abi_long arg4, abi_long arg5)
9076 {
9077     int ret;
9078     struct riscv_hwprobe *host_pairs;
9079 
9080     /* flags must be 0 */
9081     if (arg5 != 0) {
9082         return -TARGET_EINVAL;
9083     }
9084 
9085     /* check cpu_set */
9086     if (arg3 != 0) {
9087         ret = cpu_set_valid(arg3, arg4);
9088         if (ret != 0) {
9089             return ret;
9090         }
9091     } else if (arg4 != 0) {
9092         return -TARGET_EINVAL;
9093     }
9094 
9095     /* no pairs */
9096     if (arg2 == 0) {
9097         return 0;
9098     }
9099 
9100     host_pairs = lock_user(VERIFY_WRITE, arg1,
9101                            sizeof(*host_pairs) * (size_t)arg2, 0);
9102     if (host_pairs == NULL) {
9103         return -TARGET_EFAULT;
9104     }
9105     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9106     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9107     return 0;
9108 }
9109 #endif /* TARGET_NR_riscv_hwprobe */
9110 
9111 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9112 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9113 #endif
9114 
9115 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9116 #define __NR_sys_open_tree __NR_open_tree
9117 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9118           unsigned int, __flags)
9119 #endif
9120 
9121 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9122 #define __NR_sys_move_mount __NR_move_mount
9123 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9124            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9125 #endif
9126 
9127 /* This is an internal helper for do_syscall so that it is easier
9128  * to have a single return point, so that actions, such as logging
9129  * of syscall results, can be performed.
9130  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9131  */
9132 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9133                             abi_long arg2, abi_long arg3, abi_long arg4,
9134                             abi_long arg5, abi_long arg6, abi_long arg7,
9135                             abi_long arg8)
9136 {
9137     CPUState *cpu = env_cpu(cpu_env);
9138     abi_long ret;
9139 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9140     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9141     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9142     || defined(TARGET_NR_statx)
9143     struct stat st;
9144 #endif
9145 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9146     || defined(TARGET_NR_fstatfs)
9147     struct statfs stfs;
9148 #endif
9149     void *p;
9150 
9151     switch(num) {
9152     case TARGET_NR_exit:
9153         /* In old applications this may be used to implement _exit(2).
9154            However in threaded applications it is used for thread termination,
9155            and _exit_group is used for application termination.
9156            Do thread termination if we have more then one thread.  */
9157 
9158         if (block_signals()) {
9159             return -QEMU_ERESTARTSYS;
9160         }
9161 
9162         pthread_mutex_lock(&clone_lock);
9163 
9164         if (CPU_NEXT(first_cpu)) {
9165             TaskState *ts = cpu->opaque;
9166 
9167             if (ts->child_tidptr) {
9168                 put_user_u32(0, ts->child_tidptr);
9169                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9170                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9171             }
9172 
9173             object_unparent(OBJECT(cpu));
9174             object_unref(OBJECT(cpu));
9175             /*
9176              * At this point the CPU should be unrealized and removed
9177              * from cpu lists. We can clean-up the rest of the thread
9178              * data without the lock held.
9179              */
9180 
9181             pthread_mutex_unlock(&clone_lock);
9182 
9183             thread_cpu = NULL;
9184             g_free(ts);
9185             rcu_unregister_thread();
9186             pthread_exit(NULL);
9187         }
9188 
9189         pthread_mutex_unlock(&clone_lock);
9190         preexit_cleanup(cpu_env, arg1);
9191         _exit(arg1);
9192         return 0; /* avoid warning */
9193     case TARGET_NR_read:
9194         if (arg2 == 0 && arg3 == 0) {
9195             return get_errno(safe_read(arg1, 0, 0));
9196         } else {
9197             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9198                 return -TARGET_EFAULT;
9199             ret = get_errno(safe_read(arg1, p, arg3));
9200             if (ret >= 0 &&
9201                 fd_trans_host_to_target_data(arg1)) {
9202                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9203             }
9204             unlock_user(p, arg2, ret);
9205         }
9206         return ret;
9207     case TARGET_NR_write:
9208         if (arg2 == 0 && arg3 == 0) {
9209             return get_errno(safe_write(arg1, 0, 0));
9210         }
9211         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9212             return -TARGET_EFAULT;
9213         if (fd_trans_target_to_host_data(arg1)) {
9214             void *copy = g_malloc(arg3);
9215             memcpy(copy, p, arg3);
9216             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9217             if (ret >= 0) {
9218                 ret = get_errno(safe_write(arg1, copy, ret));
9219             }
9220             g_free(copy);
9221         } else {
9222             ret = get_errno(safe_write(arg1, p, arg3));
9223         }
9224         unlock_user(p, arg2, 0);
9225         return ret;
9226 
9227 #ifdef TARGET_NR_open
9228     case TARGET_NR_open:
9229         if (!(p = lock_user_string(arg1)))
9230             return -TARGET_EFAULT;
9231         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9232                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9233                                   arg3, true));
9234         fd_trans_unregister(ret);
9235         unlock_user(p, arg1, 0);
9236         return ret;
9237 #endif
9238     case TARGET_NR_openat:
9239         if (!(p = lock_user_string(arg2)))
9240             return -TARGET_EFAULT;
9241         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9242                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9243                                   arg4, true));
9244         fd_trans_unregister(ret);
9245         unlock_user(p, arg2, 0);
9246         return ret;
9247 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9248     case TARGET_NR_name_to_handle_at:
9249         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9250         return ret;
9251 #endif
9252 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9253     case TARGET_NR_open_by_handle_at:
9254         ret = do_open_by_handle_at(arg1, arg2, arg3);
9255         fd_trans_unregister(ret);
9256         return ret;
9257 #endif
9258 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9259     case TARGET_NR_pidfd_open:
9260         return get_errno(pidfd_open(arg1, arg2));
9261 #endif
9262 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9263     case TARGET_NR_pidfd_send_signal:
9264         {
9265             siginfo_t uinfo, *puinfo;
9266 
9267             if (arg3) {
9268                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9269                 if (!p) {
9270                     return -TARGET_EFAULT;
9271                  }
9272                  target_to_host_siginfo(&uinfo, p);
9273                  unlock_user(p, arg3, 0);
9274                  puinfo = &uinfo;
9275             } else {
9276                  puinfo = NULL;
9277             }
9278             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9279                                               puinfo, arg4));
9280         }
9281         return ret;
9282 #endif
9283 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9284     case TARGET_NR_pidfd_getfd:
9285         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9286 #endif
9287     case TARGET_NR_close:
9288         fd_trans_unregister(arg1);
9289         return get_errno(close(arg1));
9290 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9291     case TARGET_NR_close_range:
9292         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9293         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9294             abi_long fd, maxfd;
9295             maxfd = MIN(arg2, target_fd_max);
9296             for (fd = arg1; fd < maxfd; fd++) {
9297                 fd_trans_unregister(fd);
9298             }
9299         }
9300         return ret;
9301 #endif
9302 
9303     case TARGET_NR_brk:
9304         return do_brk(arg1);
9305 #ifdef TARGET_NR_fork
9306     case TARGET_NR_fork:
9307         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9308 #endif
9309 #ifdef TARGET_NR_waitpid
9310     case TARGET_NR_waitpid:
9311         {
9312             int status;
9313             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9314             if (!is_error(ret) && arg2 && ret
9315                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9316                 return -TARGET_EFAULT;
9317         }
9318         return ret;
9319 #endif
9320 #ifdef TARGET_NR_waitid
9321     case TARGET_NR_waitid:
9322         {
9323             siginfo_t info;
9324             info.si_pid = 0;
9325             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
9326             if (!is_error(ret) && arg3 && info.si_pid != 0) {
9327                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
9328                     return -TARGET_EFAULT;
9329                 host_to_target_siginfo(p, &info);
9330                 unlock_user(p, arg3, sizeof(target_siginfo_t));
9331             }
9332         }
9333         return ret;
9334 #endif
9335 #ifdef TARGET_NR_creat /* not on alpha */
9336     case TARGET_NR_creat:
9337         if (!(p = lock_user_string(arg1)))
9338             return -TARGET_EFAULT;
9339         ret = get_errno(creat(p, arg2));
9340         fd_trans_unregister(ret);
9341         unlock_user(p, arg1, 0);
9342         return ret;
9343 #endif
9344 #ifdef TARGET_NR_link
9345     case TARGET_NR_link:
9346         {
9347             void * p2;
9348             p = lock_user_string(arg1);
9349             p2 = lock_user_string(arg2);
9350             if (!p || !p2)
9351                 ret = -TARGET_EFAULT;
9352             else
9353                 ret = get_errno(link(p, p2));
9354             unlock_user(p2, arg2, 0);
9355             unlock_user(p, arg1, 0);
9356         }
9357         return ret;
9358 #endif
9359 #if defined(TARGET_NR_linkat)
9360     case TARGET_NR_linkat:
9361         {
9362             void * p2 = NULL;
9363             if (!arg2 || !arg4)
9364                 return -TARGET_EFAULT;
9365             p  = lock_user_string(arg2);
9366             p2 = lock_user_string(arg4);
9367             if (!p || !p2)
9368                 ret = -TARGET_EFAULT;
9369             else
9370                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9371             unlock_user(p, arg2, 0);
9372             unlock_user(p2, arg4, 0);
9373         }
9374         return ret;
9375 #endif
9376 #ifdef TARGET_NR_unlink
9377     case TARGET_NR_unlink:
9378         if (!(p = lock_user_string(arg1)))
9379             return -TARGET_EFAULT;
9380         ret = get_errno(unlink(p));
9381         unlock_user(p, arg1, 0);
9382         return ret;
9383 #endif
9384 #if defined(TARGET_NR_unlinkat)
9385     case TARGET_NR_unlinkat:
9386         if (!(p = lock_user_string(arg2)))
9387             return -TARGET_EFAULT;
9388         ret = get_errno(unlinkat(arg1, p, arg3));
9389         unlock_user(p, arg2, 0);
9390         return ret;
9391 #endif
9392     case TARGET_NR_execveat:
9393         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9394     case TARGET_NR_execve:
9395         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9396     case TARGET_NR_chdir:
9397         if (!(p = lock_user_string(arg1)))
9398             return -TARGET_EFAULT;
9399         ret = get_errno(chdir(p));
9400         unlock_user(p, arg1, 0);
9401         return ret;
9402 #ifdef TARGET_NR_time
9403     case TARGET_NR_time:
9404         {
9405             time_t host_time;
9406             ret = get_errno(time(&host_time));
9407             if (!is_error(ret)
9408                 && arg1
9409                 && put_user_sal(host_time, arg1))
9410                 return -TARGET_EFAULT;
9411         }
9412         return ret;
9413 #endif
9414 #ifdef TARGET_NR_mknod
9415     case TARGET_NR_mknod:
9416         if (!(p = lock_user_string(arg1)))
9417             return -TARGET_EFAULT;
9418         ret = get_errno(mknod(p, arg2, arg3));
9419         unlock_user(p, arg1, 0);
9420         return ret;
9421 #endif
9422 #if defined(TARGET_NR_mknodat)
9423     case TARGET_NR_mknodat:
9424         if (!(p = lock_user_string(arg2)))
9425             return -TARGET_EFAULT;
9426         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9427         unlock_user(p, arg2, 0);
9428         return ret;
9429 #endif
9430 #ifdef TARGET_NR_chmod
9431     case TARGET_NR_chmod:
9432         if (!(p = lock_user_string(arg1)))
9433             return -TARGET_EFAULT;
9434         ret = get_errno(chmod(p, arg2));
9435         unlock_user(p, arg1, 0);
9436         return ret;
9437 #endif
9438 #ifdef TARGET_NR_lseek
9439     case TARGET_NR_lseek:
9440         return get_errno(lseek(arg1, arg2, arg3));
9441 #endif
9442 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9443     /* Alpha specific */
9444     case TARGET_NR_getxpid:
9445         cpu_env->ir[IR_A4] = getppid();
9446         return get_errno(getpid());
9447 #endif
9448 #ifdef TARGET_NR_getpid
9449     case TARGET_NR_getpid:
9450         return get_errno(getpid());
9451 #endif
9452     case TARGET_NR_mount:
9453         {
9454             /* need to look at the data field */
9455             void *p2, *p3;
9456 
9457             if (arg1) {
9458                 p = lock_user_string(arg1);
9459                 if (!p) {
9460                     return -TARGET_EFAULT;
9461                 }
9462             } else {
9463                 p = NULL;
9464             }
9465 
9466             p2 = lock_user_string(arg2);
9467             if (!p2) {
9468                 if (arg1) {
9469                     unlock_user(p, arg1, 0);
9470                 }
9471                 return -TARGET_EFAULT;
9472             }
9473 
9474             if (arg3) {
9475                 p3 = lock_user_string(arg3);
9476                 if (!p3) {
9477                     if (arg1) {
9478                         unlock_user(p, arg1, 0);
9479                     }
9480                     unlock_user(p2, arg2, 0);
9481                     return -TARGET_EFAULT;
9482                 }
9483             } else {
9484                 p3 = NULL;
9485             }
9486 
9487             /* FIXME - arg5 should be locked, but it isn't clear how to
9488              * do that since it's not guaranteed to be a NULL-terminated
9489              * string.
9490              */
9491             if (!arg5) {
9492                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9493             } else {
9494                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9495             }
9496             ret = get_errno(ret);
9497 
9498             if (arg1) {
9499                 unlock_user(p, arg1, 0);
9500             }
9501             unlock_user(p2, arg2, 0);
9502             if (arg3) {
9503                 unlock_user(p3, arg3, 0);
9504             }
9505         }
9506         return ret;
9507 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9508 #if defined(TARGET_NR_umount)
9509     case TARGET_NR_umount:
9510 #endif
9511 #if defined(TARGET_NR_oldumount)
9512     case TARGET_NR_oldumount:
9513 #endif
9514         if (!(p = lock_user_string(arg1)))
9515             return -TARGET_EFAULT;
9516         ret = get_errno(umount(p));
9517         unlock_user(p, arg1, 0);
9518         return ret;
9519 #endif
9520 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9521     case TARGET_NR_move_mount:
9522         {
9523             void *p2, *p4;
9524 
9525             if (!arg2 || !arg4) {
9526                 return -TARGET_EFAULT;
9527             }
9528 
9529             p2 = lock_user_string(arg2);
9530             if (!p2) {
9531                 return -TARGET_EFAULT;
9532             }
9533 
9534             p4 = lock_user_string(arg4);
9535             if (!p4) {
9536                 unlock_user(p2, arg2, 0);
9537                 return -TARGET_EFAULT;
9538             }
9539             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9540 
9541             unlock_user(p2, arg2, 0);
9542             unlock_user(p4, arg4, 0);
9543 
9544             return ret;
9545         }
9546 #endif
9547 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9548     case TARGET_NR_open_tree:
9549         {
9550             void *p2;
9551             int host_flags;
9552 
9553             if (!arg2) {
9554                 return -TARGET_EFAULT;
9555             }
9556 
9557             p2 = lock_user_string(arg2);
9558             if (!p2) {
9559                 return -TARGET_EFAULT;
9560             }
9561 
9562             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9563             if (arg3 & TARGET_O_CLOEXEC) {
9564                 host_flags |= O_CLOEXEC;
9565             }
9566 
9567             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9568 
9569             unlock_user(p2, arg2, 0);
9570 
9571             return ret;
9572         }
9573 #endif
9574 #ifdef TARGET_NR_stime /* not on alpha */
9575     case TARGET_NR_stime:
9576         {
9577             struct timespec ts;
9578             ts.tv_nsec = 0;
9579             if (get_user_sal(ts.tv_sec, arg1)) {
9580                 return -TARGET_EFAULT;
9581             }
9582             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9583         }
9584 #endif
9585 #ifdef TARGET_NR_alarm /* not on alpha */
9586     case TARGET_NR_alarm:
9587         return alarm(arg1);
9588 #endif
9589 #ifdef TARGET_NR_pause /* not on alpha */
9590     case TARGET_NR_pause:
9591         if (!block_signals()) {
9592             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9593         }
9594         return -TARGET_EINTR;
9595 #endif
9596 #ifdef TARGET_NR_utime
9597     case TARGET_NR_utime:
9598         {
9599             struct utimbuf tbuf, *host_tbuf;
9600             struct target_utimbuf *target_tbuf;
9601             if (arg2) {
9602                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9603                     return -TARGET_EFAULT;
9604                 tbuf.actime = tswapal(target_tbuf->actime);
9605                 tbuf.modtime = tswapal(target_tbuf->modtime);
9606                 unlock_user_struct(target_tbuf, arg2, 0);
9607                 host_tbuf = &tbuf;
9608             } else {
9609                 host_tbuf = NULL;
9610             }
9611             if (!(p = lock_user_string(arg1)))
9612                 return -TARGET_EFAULT;
9613             ret = get_errno(utime(p, host_tbuf));
9614             unlock_user(p, arg1, 0);
9615         }
9616         return ret;
9617 #endif
9618 #ifdef TARGET_NR_utimes
9619     case TARGET_NR_utimes:
9620         {
9621             struct timeval *tvp, tv[2];
9622             if (arg2) {
9623                 if (copy_from_user_timeval(&tv[0], arg2)
9624                     || copy_from_user_timeval(&tv[1],
9625                                               arg2 + sizeof(struct target_timeval)))
9626                     return -TARGET_EFAULT;
9627                 tvp = tv;
9628             } else {
9629                 tvp = NULL;
9630             }
9631             if (!(p = lock_user_string(arg1)))
9632                 return -TARGET_EFAULT;
9633             ret = get_errno(utimes(p, tvp));
9634             unlock_user(p, arg1, 0);
9635         }
9636         return ret;
9637 #endif
9638 #if defined(TARGET_NR_futimesat)
9639     case TARGET_NR_futimesat:
9640         {
9641             struct timeval *tvp, tv[2];
9642             if (arg3) {
9643                 if (copy_from_user_timeval(&tv[0], arg3)
9644                     || copy_from_user_timeval(&tv[1],
9645                                               arg3 + sizeof(struct target_timeval)))
9646                     return -TARGET_EFAULT;
9647                 tvp = tv;
9648             } else {
9649                 tvp = NULL;
9650             }
9651             if (!(p = lock_user_string(arg2))) {
9652                 return -TARGET_EFAULT;
9653             }
9654             ret = get_errno(futimesat(arg1, path(p), tvp));
9655             unlock_user(p, arg2, 0);
9656         }
9657         return ret;
9658 #endif
9659 #ifdef TARGET_NR_access
9660     case TARGET_NR_access:
9661         if (!(p = lock_user_string(arg1))) {
9662             return -TARGET_EFAULT;
9663         }
9664         ret = get_errno(access(path(p), arg2));
9665         unlock_user(p, arg1, 0);
9666         return ret;
9667 #endif
9668 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9669     case TARGET_NR_faccessat:
9670         if (!(p = lock_user_string(arg2))) {
9671             return -TARGET_EFAULT;
9672         }
9673         ret = get_errno(faccessat(arg1, p, arg3, 0));
9674         unlock_user(p, arg2, 0);
9675         return ret;
9676 #endif
9677 #if defined(TARGET_NR_faccessat2)
9678     case TARGET_NR_faccessat2:
9679         if (!(p = lock_user_string(arg2))) {
9680             return -TARGET_EFAULT;
9681         }
9682         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9683         unlock_user(p, arg2, 0);
9684         return ret;
9685 #endif
9686 #ifdef TARGET_NR_nice /* not on alpha */
9687     case TARGET_NR_nice:
9688         return get_errno(nice(arg1));
9689 #endif
9690     case TARGET_NR_sync:
9691         sync();
9692         return 0;
9693 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9694     case TARGET_NR_syncfs:
9695         return get_errno(syncfs(arg1));
9696 #endif
9697     case TARGET_NR_kill:
9698         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9699 #ifdef TARGET_NR_rename
9700     case TARGET_NR_rename:
9701         {
9702             void *p2;
9703             p = lock_user_string(arg1);
9704             p2 = lock_user_string(arg2);
9705             if (!p || !p2)
9706                 ret = -TARGET_EFAULT;
9707             else
9708                 ret = get_errno(rename(p, p2));
9709             unlock_user(p2, arg2, 0);
9710             unlock_user(p, arg1, 0);
9711         }
9712         return ret;
9713 #endif
9714 #if defined(TARGET_NR_renameat)
9715     case TARGET_NR_renameat:
9716         {
9717             void *p2;
9718             p  = lock_user_string(arg2);
9719             p2 = lock_user_string(arg4);
9720             if (!p || !p2)
9721                 ret = -TARGET_EFAULT;
9722             else
9723                 ret = get_errno(renameat(arg1, p, arg3, p2));
9724             unlock_user(p2, arg4, 0);
9725             unlock_user(p, arg2, 0);
9726         }
9727         return ret;
9728 #endif
9729 #if defined(TARGET_NR_renameat2)
9730     case TARGET_NR_renameat2:
9731         {
9732             void *p2;
9733             p  = lock_user_string(arg2);
9734             p2 = lock_user_string(arg4);
9735             if (!p || !p2) {
9736                 ret = -TARGET_EFAULT;
9737             } else {
9738                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9739             }
9740             unlock_user(p2, arg4, 0);
9741             unlock_user(p, arg2, 0);
9742         }
9743         return ret;
9744 #endif
9745 #ifdef TARGET_NR_mkdir
9746     case TARGET_NR_mkdir:
9747         if (!(p = lock_user_string(arg1)))
9748             return -TARGET_EFAULT;
9749         ret = get_errno(mkdir(p, arg2));
9750         unlock_user(p, arg1, 0);
9751         return ret;
9752 #endif
9753 #if defined(TARGET_NR_mkdirat)
9754     case TARGET_NR_mkdirat:
9755         if (!(p = lock_user_string(arg2)))
9756             return -TARGET_EFAULT;
9757         ret = get_errno(mkdirat(arg1, p, arg3));
9758         unlock_user(p, arg2, 0);
9759         return ret;
9760 #endif
9761 #ifdef TARGET_NR_rmdir
9762     case TARGET_NR_rmdir:
9763         if (!(p = lock_user_string(arg1)))
9764             return -TARGET_EFAULT;
9765         ret = get_errno(rmdir(p));
9766         unlock_user(p, arg1, 0);
9767         return ret;
9768 #endif
9769     case TARGET_NR_dup:
9770         ret = get_errno(dup(arg1));
9771         if (ret >= 0) {
9772             fd_trans_dup(arg1, ret);
9773         }
9774         return ret;
9775 #ifdef TARGET_NR_pipe
9776     case TARGET_NR_pipe:
9777         return do_pipe(cpu_env, arg1, 0, 0);
9778 #endif
9779 #ifdef TARGET_NR_pipe2
9780     case TARGET_NR_pipe2:
9781         return do_pipe(cpu_env, arg1,
9782                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9783 #endif
9784     case TARGET_NR_times:
9785         {
9786             struct target_tms *tmsp;
9787             struct tms tms;
9788             ret = get_errno(times(&tms));
9789             if (arg1) {
9790                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9791                 if (!tmsp)
9792                     return -TARGET_EFAULT;
9793                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9794                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9795                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9796                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9797             }
9798             if (!is_error(ret))
9799                 ret = host_to_target_clock_t(ret);
9800         }
9801         return ret;
9802     case TARGET_NR_acct:
9803         if (arg1 == 0) {
9804             ret = get_errno(acct(NULL));
9805         } else {
9806             if (!(p = lock_user_string(arg1))) {
9807                 return -TARGET_EFAULT;
9808             }
9809             ret = get_errno(acct(path(p)));
9810             unlock_user(p, arg1, 0);
9811         }
9812         return ret;
9813 #ifdef TARGET_NR_umount2
9814     case TARGET_NR_umount2:
9815         if (!(p = lock_user_string(arg1)))
9816             return -TARGET_EFAULT;
9817         ret = get_errno(umount2(p, arg2));
9818         unlock_user(p, arg1, 0);
9819         return ret;
9820 #endif
9821     case TARGET_NR_ioctl:
9822         return do_ioctl(arg1, arg2, arg3);
9823 #ifdef TARGET_NR_fcntl
9824     case TARGET_NR_fcntl:
9825         return do_fcntl(arg1, arg2, arg3);
9826 #endif
9827     case TARGET_NR_setpgid:
9828         return get_errno(setpgid(arg1, arg2));
9829     case TARGET_NR_umask:
9830         return get_errno(umask(arg1));
9831     case TARGET_NR_chroot:
9832         if (!(p = lock_user_string(arg1)))
9833             return -TARGET_EFAULT;
9834         ret = get_errno(chroot(p));
9835         unlock_user(p, arg1, 0);
9836         return ret;
9837 #ifdef TARGET_NR_dup2
9838     case TARGET_NR_dup2:
9839         ret = get_errno(dup2(arg1, arg2));
9840         if (ret >= 0) {
9841             fd_trans_dup(arg1, arg2);
9842         }
9843         return ret;
9844 #endif
9845 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9846     case TARGET_NR_dup3:
9847     {
9848         int host_flags;
9849 
9850         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9851             return -EINVAL;
9852         }
9853         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9854         ret = get_errno(dup3(arg1, arg2, host_flags));
9855         if (ret >= 0) {
9856             fd_trans_dup(arg1, arg2);
9857         }
9858         return ret;
9859     }
9860 #endif
9861 #ifdef TARGET_NR_getppid /* not on alpha */
9862     case TARGET_NR_getppid:
9863         return get_errno(getppid());
9864 #endif
9865 #ifdef TARGET_NR_getpgrp
9866     case TARGET_NR_getpgrp:
9867         return get_errno(getpgrp());
9868 #endif
9869     case TARGET_NR_setsid:
9870         return get_errno(setsid());
9871 #ifdef TARGET_NR_sigaction
9872     case TARGET_NR_sigaction:
9873         {
9874 #if defined(TARGET_MIPS)
9875 	    struct target_sigaction act, oact, *pact, *old_act;
9876 
9877 	    if (arg2) {
9878                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9879                     return -TARGET_EFAULT;
9880 		act._sa_handler = old_act->_sa_handler;
9881 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9882 		act.sa_flags = old_act->sa_flags;
9883 		unlock_user_struct(old_act, arg2, 0);
9884 		pact = &act;
9885 	    } else {
9886 		pact = NULL;
9887 	    }
9888 
9889         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9890 
9891 	    if (!is_error(ret) && arg3) {
9892                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9893                     return -TARGET_EFAULT;
9894 		old_act->_sa_handler = oact._sa_handler;
9895 		old_act->sa_flags = oact.sa_flags;
9896 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9897 		old_act->sa_mask.sig[1] = 0;
9898 		old_act->sa_mask.sig[2] = 0;
9899 		old_act->sa_mask.sig[3] = 0;
9900 		unlock_user_struct(old_act, arg3, 1);
9901 	    }
9902 #else
9903             struct target_old_sigaction *old_act;
9904             struct target_sigaction act, oact, *pact;
9905             if (arg2) {
9906                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9907                     return -TARGET_EFAULT;
9908                 act._sa_handler = old_act->_sa_handler;
9909                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9910                 act.sa_flags = old_act->sa_flags;
9911 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9912                 act.sa_restorer = old_act->sa_restorer;
9913 #endif
9914                 unlock_user_struct(old_act, arg2, 0);
9915                 pact = &act;
9916             } else {
9917                 pact = NULL;
9918             }
9919             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9920             if (!is_error(ret) && arg3) {
9921                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9922                     return -TARGET_EFAULT;
9923                 old_act->_sa_handler = oact._sa_handler;
9924                 old_act->sa_mask = oact.sa_mask.sig[0];
9925                 old_act->sa_flags = oact.sa_flags;
9926 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9927                 old_act->sa_restorer = oact.sa_restorer;
9928 #endif
9929                 unlock_user_struct(old_act, arg3, 1);
9930             }
9931 #endif
9932         }
9933         return ret;
9934 #endif
9935     case TARGET_NR_rt_sigaction:
9936         {
9937             /*
9938              * For Alpha and SPARC this is a 5 argument syscall, with
9939              * a 'restorer' parameter which must be copied into the
9940              * sa_restorer field of the sigaction struct.
9941              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9942              * and arg5 is the sigsetsize.
9943              */
9944 #if defined(TARGET_ALPHA)
9945             target_ulong sigsetsize = arg4;
9946             target_ulong restorer = arg5;
9947 #elif defined(TARGET_SPARC)
9948             target_ulong restorer = arg4;
9949             target_ulong sigsetsize = arg5;
9950 #else
9951             target_ulong sigsetsize = arg4;
9952             target_ulong restorer = 0;
9953 #endif
9954             struct target_sigaction *act = NULL;
9955             struct target_sigaction *oact = NULL;
9956 
9957             if (sigsetsize != sizeof(target_sigset_t)) {
9958                 return -TARGET_EINVAL;
9959             }
9960             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9961                 return -TARGET_EFAULT;
9962             }
9963             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9964                 ret = -TARGET_EFAULT;
9965             } else {
9966                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9967                 if (oact) {
9968                     unlock_user_struct(oact, arg3, 1);
9969                 }
9970             }
9971             if (act) {
9972                 unlock_user_struct(act, arg2, 0);
9973             }
9974         }
9975         return ret;
9976 #ifdef TARGET_NR_sgetmask /* not on alpha */
9977     case TARGET_NR_sgetmask:
9978         {
9979             sigset_t cur_set;
9980             abi_ulong target_set;
9981             ret = do_sigprocmask(0, NULL, &cur_set);
9982             if (!ret) {
9983                 host_to_target_old_sigset(&target_set, &cur_set);
9984                 ret = target_set;
9985             }
9986         }
9987         return ret;
9988 #endif
9989 #ifdef TARGET_NR_ssetmask /* not on alpha */
9990     case TARGET_NR_ssetmask:
9991         {
9992             sigset_t set, oset;
9993             abi_ulong target_set = arg1;
9994             target_to_host_old_sigset(&set, &target_set);
9995             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9996             if (!ret) {
9997                 host_to_target_old_sigset(&target_set, &oset);
9998                 ret = target_set;
9999             }
10000         }
10001         return ret;
10002 #endif
10003 #ifdef TARGET_NR_sigprocmask
10004     case TARGET_NR_sigprocmask:
10005         {
10006 #if defined(TARGET_ALPHA)
10007             sigset_t set, oldset;
10008             abi_ulong mask;
10009             int how;
10010 
10011             switch (arg1) {
10012             case TARGET_SIG_BLOCK:
10013                 how = SIG_BLOCK;
10014                 break;
10015             case TARGET_SIG_UNBLOCK:
10016                 how = SIG_UNBLOCK;
10017                 break;
10018             case TARGET_SIG_SETMASK:
10019                 how = SIG_SETMASK;
10020                 break;
10021             default:
10022                 return -TARGET_EINVAL;
10023             }
10024             mask = arg2;
10025             target_to_host_old_sigset(&set, &mask);
10026 
10027             ret = do_sigprocmask(how, &set, &oldset);
10028             if (!is_error(ret)) {
10029                 host_to_target_old_sigset(&mask, &oldset);
10030                 ret = mask;
10031                 cpu_env->ir[IR_V0] = 0; /* force no error */
10032             }
10033 #else
10034             sigset_t set, oldset, *set_ptr;
10035             int how;
10036 
10037             if (arg2) {
10038                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10039                 if (!p) {
10040                     return -TARGET_EFAULT;
10041                 }
10042                 target_to_host_old_sigset(&set, p);
10043                 unlock_user(p, arg2, 0);
10044                 set_ptr = &set;
10045                 switch (arg1) {
10046                 case TARGET_SIG_BLOCK:
10047                     how = SIG_BLOCK;
10048                     break;
10049                 case TARGET_SIG_UNBLOCK:
10050                     how = SIG_UNBLOCK;
10051                     break;
10052                 case TARGET_SIG_SETMASK:
10053                     how = SIG_SETMASK;
10054                     break;
10055                 default:
10056                     return -TARGET_EINVAL;
10057                 }
10058             } else {
10059                 how = 0;
10060                 set_ptr = NULL;
10061             }
10062             ret = do_sigprocmask(how, set_ptr, &oldset);
10063             if (!is_error(ret) && arg3) {
10064                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10065                     return -TARGET_EFAULT;
10066                 host_to_target_old_sigset(p, &oldset);
10067                 unlock_user(p, arg3, sizeof(target_sigset_t));
10068             }
10069 #endif
10070         }
10071         return ret;
10072 #endif
10073     case TARGET_NR_rt_sigprocmask:
10074         {
10075             int how = arg1;
10076             sigset_t set, oldset, *set_ptr;
10077 
10078             if (arg4 != sizeof(target_sigset_t)) {
10079                 return -TARGET_EINVAL;
10080             }
10081 
10082             if (arg2) {
10083                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10084                 if (!p) {
10085                     return -TARGET_EFAULT;
10086                 }
10087                 target_to_host_sigset(&set, p);
10088                 unlock_user(p, arg2, 0);
10089                 set_ptr = &set;
10090                 switch(how) {
10091                 case TARGET_SIG_BLOCK:
10092                     how = SIG_BLOCK;
10093                     break;
10094                 case TARGET_SIG_UNBLOCK:
10095                     how = SIG_UNBLOCK;
10096                     break;
10097                 case TARGET_SIG_SETMASK:
10098                     how = SIG_SETMASK;
10099                     break;
10100                 default:
10101                     return -TARGET_EINVAL;
10102                 }
10103             } else {
10104                 how = 0;
10105                 set_ptr = NULL;
10106             }
10107             ret = do_sigprocmask(how, set_ptr, &oldset);
10108             if (!is_error(ret) && arg3) {
10109                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10110                     return -TARGET_EFAULT;
10111                 host_to_target_sigset(p, &oldset);
10112                 unlock_user(p, arg3, sizeof(target_sigset_t));
10113             }
10114         }
10115         return ret;
10116 #ifdef TARGET_NR_sigpending
10117     case TARGET_NR_sigpending:
10118         {
10119             sigset_t set;
10120             ret = get_errno(sigpending(&set));
10121             if (!is_error(ret)) {
10122                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10123                     return -TARGET_EFAULT;
10124                 host_to_target_old_sigset(p, &set);
10125                 unlock_user(p, arg1, sizeof(target_sigset_t));
10126             }
10127         }
10128         return ret;
10129 #endif
10130     case TARGET_NR_rt_sigpending:
10131         {
10132             sigset_t set;
10133 
10134             /* Yes, this check is >, not != like most. We follow the kernel's
10135              * logic and it does it like this because it implements
10136              * NR_sigpending through the same code path, and in that case
10137              * the old_sigset_t is smaller in size.
10138              */
10139             if (arg2 > sizeof(target_sigset_t)) {
10140                 return -TARGET_EINVAL;
10141             }
10142 
10143             ret = get_errno(sigpending(&set));
10144             if (!is_error(ret)) {
10145                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10146                     return -TARGET_EFAULT;
10147                 host_to_target_sigset(p, &set);
10148                 unlock_user(p, arg1, sizeof(target_sigset_t));
10149             }
10150         }
10151         return ret;
10152 #ifdef TARGET_NR_sigsuspend
10153     case TARGET_NR_sigsuspend:
10154         {
10155             sigset_t *set;
10156 
10157 #if defined(TARGET_ALPHA)
10158             TaskState *ts = cpu->opaque;
10159             /* target_to_host_old_sigset will bswap back */
10160             abi_ulong mask = tswapal(arg1);
10161             set = &ts->sigsuspend_mask;
10162             target_to_host_old_sigset(set, &mask);
10163 #else
10164             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10165             if (ret != 0) {
10166                 return ret;
10167             }
10168 #endif
10169             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10170             finish_sigsuspend_mask(ret);
10171         }
10172         return ret;
10173 #endif
10174     case TARGET_NR_rt_sigsuspend:
10175         {
10176             sigset_t *set;
10177 
10178             ret = process_sigsuspend_mask(&set, arg1, arg2);
10179             if (ret != 0) {
10180                 return ret;
10181             }
10182             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10183             finish_sigsuspend_mask(ret);
10184         }
10185         return ret;
10186 #ifdef TARGET_NR_rt_sigtimedwait
10187     case TARGET_NR_rt_sigtimedwait:
10188         {
10189             sigset_t set;
10190             struct timespec uts, *puts;
10191             siginfo_t uinfo;
10192 
10193             if (arg4 != sizeof(target_sigset_t)) {
10194                 return -TARGET_EINVAL;
10195             }
10196 
10197             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10198                 return -TARGET_EFAULT;
10199             target_to_host_sigset(&set, p);
10200             unlock_user(p, arg1, 0);
10201             if (arg3) {
10202                 puts = &uts;
10203                 if (target_to_host_timespec(puts, arg3)) {
10204                     return -TARGET_EFAULT;
10205                 }
10206             } else {
10207                 puts = NULL;
10208             }
10209             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10210                                                  SIGSET_T_SIZE));
10211             if (!is_error(ret)) {
10212                 if (arg2) {
10213                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10214                                   0);
10215                     if (!p) {
10216                         return -TARGET_EFAULT;
10217                     }
10218                     host_to_target_siginfo(p, &uinfo);
10219                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10220                 }
10221                 ret = host_to_target_signal(ret);
10222             }
10223         }
10224         return ret;
10225 #endif
10226 #ifdef TARGET_NR_rt_sigtimedwait_time64
10227     case TARGET_NR_rt_sigtimedwait_time64:
10228         {
10229             sigset_t set;
10230             struct timespec uts, *puts;
10231             siginfo_t uinfo;
10232 
10233             if (arg4 != sizeof(target_sigset_t)) {
10234                 return -TARGET_EINVAL;
10235             }
10236 
10237             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10238             if (!p) {
10239                 return -TARGET_EFAULT;
10240             }
10241             target_to_host_sigset(&set, p);
10242             unlock_user(p, arg1, 0);
10243             if (arg3) {
10244                 puts = &uts;
10245                 if (target_to_host_timespec64(puts, arg3)) {
10246                     return -TARGET_EFAULT;
10247                 }
10248             } else {
10249                 puts = NULL;
10250             }
10251             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10252                                                  SIGSET_T_SIZE));
10253             if (!is_error(ret)) {
10254                 if (arg2) {
10255                     p = lock_user(VERIFY_WRITE, arg2,
10256                                   sizeof(target_siginfo_t), 0);
10257                     if (!p) {
10258                         return -TARGET_EFAULT;
10259                     }
10260                     host_to_target_siginfo(p, &uinfo);
10261                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10262                 }
10263                 ret = host_to_target_signal(ret);
10264             }
10265         }
10266         return ret;
10267 #endif
10268     case TARGET_NR_rt_sigqueueinfo:
10269         {
10270             siginfo_t uinfo;
10271 
10272             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10273             if (!p) {
10274                 return -TARGET_EFAULT;
10275             }
10276             target_to_host_siginfo(&uinfo, p);
10277             unlock_user(p, arg3, 0);
10278             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10279         }
10280         return ret;
10281     case TARGET_NR_rt_tgsigqueueinfo:
10282         {
10283             siginfo_t uinfo;
10284 
10285             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10286             if (!p) {
10287                 return -TARGET_EFAULT;
10288             }
10289             target_to_host_siginfo(&uinfo, p);
10290             unlock_user(p, arg4, 0);
10291             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10292         }
10293         return ret;
10294 #ifdef TARGET_NR_sigreturn
10295     case TARGET_NR_sigreturn:
10296         if (block_signals()) {
10297             return -QEMU_ERESTARTSYS;
10298         }
10299         return do_sigreturn(cpu_env);
10300 #endif
10301     case TARGET_NR_rt_sigreturn:
10302         if (block_signals()) {
10303             return -QEMU_ERESTARTSYS;
10304         }
10305         return do_rt_sigreturn(cpu_env);
10306     case TARGET_NR_sethostname:
10307         if (!(p = lock_user_string(arg1)))
10308             return -TARGET_EFAULT;
10309         ret = get_errno(sethostname(p, arg2));
10310         unlock_user(p, arg1, 0);
10311         return ret;
10312 #ifdef TARGET_NR_setrlimit
10313     case TARGET_NR_setrlimit:
10314         {
10315             int resource = target_to_host_resource(arg1);
10316             struct target_rlimit *target_rlim;
10317             struct rlimit rlim;
10318             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10319                 return -TARGET_EFAULT;
10320             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10321             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10322             unlock_user_struct(target_rlim, arg2, 0);
10323             /*
10324              * If we just passed through resource limit settings for memory then
10325              * they would also apply to QEMU's own allocations, and QEMU will
10326              * crash or hang or die if its allocations fail. Ideally we would
10327              * track the guest allocations in QEMU and apply the limits ourselves.
10328              * For now, just tell the guest the call succeeded but don't actually
10329              * limit anything.
10330              */
10331             if (resource != RLIMIT_AS &&
10332                 resource != RLIMIT_DATA &&
10333                 resource != RLIMIT_STACK) {
10334                 return get_errno(setrlimit(resource, &rlim));
10335             } else {
10336                 return 0;
10337             }
10338         }
10339 #endif
10340 #ifdef TARGET_NR_getrlimit
10341     case TARGET_NR_getrlimit:
10342         {
10343             int resource = target_to_host_resource(arg1);
10344             struct target_rlimit *target_rlim;
10345             struct rlimit rlim;
10346 
10347             ret = get_errno(getrlimit(resource, &rlim));
10348             if (!is_error(ret)) {
10349                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10350                     return -TARGET_EFAULT;
10351                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10352                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10353                 unlock_user_struct(target_rlim, arg2, 1);
10354             }
10355         }
10356         return ret;
10357 #endif
10358     case TARGET_NR_getrusage:
10359         {
10360             struct rusage rusage;
10361             ret = get_errno(getrusage(arg1, &rusage));
10362             if (!is_error(ret)) {
10363                 ret = host_to_target_rusage(arg2, &rusage);
10364             }
10365         }
10366         return ret;
10367 #if defined(TARGET_NR_gettimeofday)
10368     case TARGET_NR_gettimeofday:
10369         {
10370             struct timeval tv;
10371             struct timezone tz;
10372 
10373             ret = get_errno(gettimeofday(&tv, &tz));
10374             if (!is_error(ret)) {
10375                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10376                     return -TARGET_EFAULT;
10377                 }
10378                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10379                     return -TARGET_EFAULT;
10380                 }
10381             }
10382         }
10383         return ret;
10384 #endif
10385 #if defined(TARGET_NR_settimeofday)
10386     case TARGET_NR_settimeofday:
10387         {
10388             struct timeval tv, *ptv = NULL;
10389             struct timezone tz, *ptz = NULL;
10390 
10391             if (arg1) {
10392                 if (copy_from_user_timeval(&tv, arg1)) {
10393                     return -TARGET_EFAULT;
10394                 }
10395                 ptv = &tv;
10396             }
10397 
10398             if (arg2) {
10399                 if (copy_from_user_timezone(&tz, arg2)) {
10400                     return -TARGET_EFAULT;
10401                 }
10402                 ptz = &tz;
10403             }
10404 
10405             return get_errno(settimeofday(ptv, ptz));
10406         }
10407 #endif
10408 #if defined(TARGET_NR_select)
10409     case TARGET_NR_select:
10410 #if defined(TARGET_WANT_NI_OLD_SELECT)
10411         /* some architectures used to have old_select here
10412          * but now ENOSYS it.
10413          */
10414         ret = -TARGET_ENOSYS;
10415 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10416         ret = do_old_select(arg1);
10417 #else
10418         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10419 #endif
10420         return ret;
10421 #endif
10422 #ifdef TARGET_NR_pselect6
10423     case TARGET_NR_pselect6:
10424         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10425 #endif
10426 #ifdef TARGET_NR_pselect6_time64
10427     case TARGET_NR_pselect6_time64:
10428         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10429 #endif
10430 #ifdef TARGET_NR_symlink
10431     case TARGET_NR_symlink:
10432         {
10433             void *p2;
10434             p = lock_user_string(arg1);
10435             p2 = lock_user_string(arg2);
10436             if (!p || !p2)
10437                 ret = -TARGET_EFAULT;
10438             else
10439                 ret = get_errno(symlink(p, p2));
10440             unlock_user(p2, arg2, 0);
10441             unlock_user(p, arg1, 0);
10442         }
10443         return ret;
10444 #endif
10445 #if defined(TARGET_NR_symlinkat)
10446     case TARGET_NR_symlinkat:
10447         {
10448             void *p2;
10449             p  = lock_user_string(arg1);
10450             p2 = lock_user_string(arg3);
10451             if (!p || !p2)
10452                 ret = -TARGET_EFAULT;
10453             else
10454                 ret = get_errno(symlinkat(p, arg2, p2));
10455             unlock_user(p2, arg3, 0);
10456             unlock_user(p, arg1, 0);
10457         }
10458         return ret;
10459 #endif
10460 #ifdef TARGET_NR_readlink
10461     case TARGET_NR_readlink:
10462         {
10463             void *p2;
10464             p = lock_user_string(arg1);
10465             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10466             ret = get_errno(do_guest_readlink(p, p2, arg3));
10467             unlock_user(p2, arg2, ret);
10468             unlock_user(p, arg1, 0);
10469         }
10470         return ret;
10471 #endif
10472 #if defined(TARGET_NR_readlinkat)
10473     case TARGET_NR_readlinkat:
10474         {
10475             void *p2;
10476             p  = lock_user_string(arg2);
10477             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10478             if (!p || !p2) {
10479                 ret = -TARGET_EFAULT;
10480             } else if (!arg4) {
10481                 /* Short circuit this for the magic exe check. */
10482                 ret = -TARGET_EINVAL;
10483             } else if (is_proc_myself((const char *)p, "exe")) {
10484                 /*
10485                  * Don't worry about sign mismatch as earlier mapping
10486                  * logic would have thrown a bad address error.
10487                  */
10488                 ret = MIN(strlen(exec_path), arg4);
10489                 /* We cannot NUL terminate the string. */
10490                 memcpy(p2, exec_path, ret);
10491             } else {
10492                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10493             }
10494             unlock_user(p2, arg3, ret);
10495             unlock_user(p, arg2, 0);
10496         }
10497         return ret;
10498 #endif
10499 #ifdef TARGET_NR_swapon
10500     case TARGET_NR_swapon:
10501         if (!(p = lock_user_string(arg1)))
10502             return -TARGET_EFAULT;
10503         ret = get_errno(swapon(p, arg2));
10504         unlock_user(p, arg1, 0);
10505         return ret;
10506 #endif
10507     case TARGET_NR_reboot:
10508         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10509            /* arg4 must be ignored in all other cases */
10510            p = lock_user_string(arg4);
10511            if (!p) {
10512                return -TARGET_EFAULT;
10513            }
10514            ret = get_errno(reboot(arg1, arg2, arg3, p));
10515            unlock_user(p, arg4, 0);
10516         } else {
10517            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10518         }
10519         return ret;
10520 #ifdef TARGET_NR_mmap
10521     case TARGET_NR_mmap:
10522 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10523     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10524     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10525     || defined(TARGET_S390X)
10526         {
10527             abi_ulong *v;
10528             abi_ulong v1, v2, v3, v4, v5, v6;
10529             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10530                 return -TARGET_EFAULT;
10531             v1 = tswapal(v[0]);
10532             v2 = tswapal(v[1]);
10533             v3 = tswapal(v[2]);
10534             v4 = tswapal(v[3]);
10535             v5 = tswapal(v[4]);
10536             v6 = tswapal(v[5]);
10537             unlock_user(v, arg1, 0);
10538             ret = get_errno(target_mmap(v1, v2, v3,
10539                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10540                                         v5, v6));
10541         }
10542 #else
10543         /* mmap pointers are always untagged */
10544         ret = get_errno(target_mmap(arg1, arg2, arg3,
10545                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10546                                     arg5,
10547                                     arg6));
10548 #endif
10549         return ret;
10550 #endif
10551 #ifdef TARGET_NR_mmap2
10552     case TARGET_NR_mmap2:
10553 #ifndef MMAP_SHIFT
10554 #define MMAP_SHIFT 12
10555 #endif
10556         ret = target_mmap(arg1, arg2, arg3,
10557                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10558                           arg5, (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10559         return get_errno(ret);
10560 #endif
10561     case TARGET_NR_munmap:
10562         arg1 = cpu_untagged_addr(cpu, arg1);
10563         return get_errno(target_munmap(arg1, arg2));
10564     case TARGET_NR_mprotect:
10565         arg1 = cpu_untagged_addr(cpu, arg1);
10566         {
10567             TaskState *ts = cpu->opaque;
10568             /* Special hack to detect libc making the stack executable.  */
10569             if ((arg3 & PROT_GROWSDOWN)
10570                 && arg1 >= ts->info->stack_limit
10571                 && arg1 <= ts->info->start_stack) {
10572                 arg3 &= ~PROT_GROWSDOWN;
10573                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10574                 arg1 = ts->info->stack_limit;
10575             }
10576         }
10577         return get_errno(target_mprotect(arg1, arg2, arg3));
10578 #ifdef TARGET_NR_mremap
10579     case TARGET_NR_mremap:
10580         arg1 = cpu_untagged_addr(cpu, arg1);
10581         /* mremap new_addr (arg5) is always untagged */
10582         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10583 #endif
10584         /* ??? msync/mlock/munlock are broken for softmmu.  */
10585 #ifdef TARGET_NR_msync
10586     case TARGET_NR_msync:
10587         return get_errno(msync(g2h(cpu, arg1), arg2,
10588                                target_to_host_msync_arg(arg3)));
10589 #endif
10590 #ifdef TARGET_NR_mlock
10591     case TARGET_NR_mlock:
10592         return get_errno(mlock(g2h(cpu, arg1), arg2));
10593 #endif
10594 #ifdef TARGET_NR_munlock
10595     case TARGET_NR_munlock:
10596         return get_errno(munlock(g2h(cpu, arg1), arg2));
10597 #endif
10598 #ifdef TARGET_NR_mlockall
10599     case TARGET_NR_mlockall:
10600         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10601 #endif
10602 #ifdef TARGET_NR_munlockall
10603     case TARGET_NR_munlockall:
10604         return get_errno(munlockall());
10605 #endif
10606 #ifdef TARGET_NR_truncate
10607     case TARGET_NR_truncate:
10608         if (!(p = lock_user_string(arg1)))
10609             return -TARGET_EFAULT;
10610         ret = get_errno(truncate(p, arg2));
10611         unlock_user(p, arg1, 0);
10612         return ret;
10613 #endif
10614 #ifdef TARGET_NR_ftruncate
10615     case TARGET_NR_ftruncate:
10616         return get_errno(ftruncate(arg1, arg2));
10617 #endif
10618     case TARGET_NR_fchmod:
10619         return get_errno(fchmod(arg1, arg2));
10620 #if defined(TARGET_NR_fchmodat)
10621     case TARGET_NR_fchmodat:
10622         if (!(p = lock_user_string(arg2)))
10623             return -TARGET_EFAULT;
10624         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10625         unlock_user(p, arg2, 0);
10626         return ret;
10627 #endif
10628     case TARGET_NR_getpriority:
10629         /* Note that negative values are valid for getpriority, so we must
10630            differentiate based on errno settings.  */
10631         errno = 0;
10632         ret = getpriority(arg1, arg2);
10633         if (ret == -1 && errno != 0) {
10634             return -host_to_target_errno(errno);
10635         }
10636 #ifdef TARGET_ALPHA
10637         /* Return value is the unbiased priority.  Signal no error.  */
10638         cpu_env->ir[IR_V0] = 0;
10639 #else
10640         /* Return value is a biased priority to avoid negative numbers.  */
10641         ret = 20 - ret;
10642 #endif
10643         return ret;
10644     case TARGET_NR_setpriority:
10645         return get_errno(setpriority(arg1, arg2, arg3));
10646 #ifdef TARGET_NR_statfs
10647     case TARGET_NR_statfs:
10648         if (!(p = lock_user_string(arg1))) {
10649             return -TARGET_EFAULT;
10650         }
10651         ret = get_errno(statfs(path(p), &stfs));
10652         unlock_user(p, arg1, 0);
10653     convert_statfs:
10654         if (!is_error(ret)) {
10655             struct target_statfs *target_stfs;
10656 
10657             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10658                 return -TARGET_EFAULT;
10659             __put_user(stfs.f_type, &target_stfs->f_type);
10660             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10661             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10662             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10663             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10664             __put_user(stfs.f_files, &target_stfs->f_files);
10665             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10666             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10667             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10668             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10669             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10670 #ifdef _STATFS_F_FLAGS
10671             __put_user(stfs.f_flags, &target_stfs->f_flags);
10672 #else
10673             __put_user(0, &target_stfs->f_flags);
10674 #endif
10675             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10676             unlock_user_struct(target_stfs, arg2, 1);
10677         }
10678         return ret;
10679 #endif
10680 #ifdef TARGET_NR_fstatfs
10681     case TARGET_NR_fstatfs:
10682         ret = get_errno(fstatfs(arg1, &stfs));
10683         goto convert_statfs;
10684 #endif
10685 #ifdef TARGET_NR_statfs64
10686     case TARGET_NR_statfs64:
10687         if (!(p = lock_user_string(arg1))) {
10688             return -TARGET_EFAULT;
10689         }
10690         ret = get_errno(statfs(path(p), &stfs));
10691         unlock_user(p, arg1, 0);
10692     convert_statfs64:
10693         if (!is_error(ret)) {
10694             struct target_statfs64 *target_stfs;
10695 
10696             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10697                 return -TARGET_EFAULT;
10698             __put_user(stfs.f_type, &target_stfs->f_type);
10699             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10700             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10701             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10702             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10703             __put_user(stfs.f_files, &target_stfs->f_files);
10704             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10705             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10706             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10707             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10708             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10709 #ifdef _STATFS_F_FLAGS
10710             __put_user(stfs.f_flags, &target_stfs->f_flags);
10711 #else
10712             __put_user(0, &target_stfs->f_flags);
10713 #endif
10714             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10715             unlock_user_struct(target_stfs, arg3, 1);
10716         }
10717         return ret;
10718     case TARGET_NR_fstatfs64:
10719         ret = get_errno(fstatfs(arg1, &stfs));
10720         goto convert_statfs64;
10721 #endif
10722 #ifdef TARGET_NR_socketcall
10723     case TARGET_NR_socketcall:
10724         return do_socketcall(arg1, arg2);
10725 #endif
10726 #ifdef TARGET_NR_accept
10727     case TARGET_NR_accept:
10728         return do_accept4(arg1, arg2, arg3, 0);
10729 #endif
10730 #ifdef TARGET_NR_accept4
10731     case TARGET_NR_accept4:
10732         return do_accept4(arg1, arg2, arg3, arg4);
10733 #endif
10734 #ifdef TARGET_NR_bind
10735     case TARGET_NR_bind:
10736         return do_bind(arg1, arg2, arg3);
10737 #endif
10738 #ifdef TARGET_NR_connect
10739     case TARGET_NR_connect:
10740         return do_connect(arg1, arg2, arg3);
10741 #endif
10742 #ifdef TARGET_NR_getpeername
10743     case TARGET_NR_getpeername:
10744         return do_getpeername(arg1, arg2, arg3);
10745 #endif
10746 #ifdef TARGET_NR_getsockname
10747     case TARGET_NR_getsockname:
10748         return do_getsockname(arg1, arg2, arg3);
10749 #endif
10750 #ifdef TARGET_NR_getsockopt
10751     case TARGET_NR_getsockopt:
10752         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10753 #endif
10754 #ifdef TARGET_NR_listen
10755     case TARGET_NR_listen:
10756         return get_errno(listen(arg1, arg2));
10757 #endif
10758 #ifdef TARGET_NR_recv
10759     case TARGET_NR_recv:
10760         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10761 #endif
10762 #ifdef TARGET_NR_recvfrom
10763     case TARGET_NR_recvfrom:
10764         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10765 #endif
10766 #ifdef TARGET_NR_recvmsg
10767     case TARGET_NR_recvmsg:
10768         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10769 #endif
10770 #ifdef TARGET_NR_send
10771     case TARGET_NR_send:
10772         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10773 #endif
10774 #ifdef TARGET_NR_sendmsg
10775     case TARGET_NR_sendmsg:
10776         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10777 #endif
10778 #ifdef TARGET_NR_sendmmsg
10779     case TARGET_NR_sendmmsg:
10780         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10781 #endif
10782 #ifdef TARGET_NR_recvmmsg
10783     case TARGET_NR_recvmmsg:
10784         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10785 #endif
10786 #ifdef TARGET_NR_sendto
10787     case TARGET_NR_sendto:
10788         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10789 #endif
10790 #ifdef TARGET_NR_shutdown
10791     case TARGET_NR_shutdown:
10792         return get_errno(shutdown(arg1, arg2));
10793 #endif
10794 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10795     case TARGET_NR_getrandom:
10796         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10797         if (!p) {
10798             return -TARGET_EFAULT;
10799         }
10800         ret = get_errno(getrandom(p, arg2, arg3));
10801         unlock_user(p, arg1, ret);
10802         return ret;
10803 #endif
10804 #ifdef TARGET_NR_socket
10805     case TARGET_NR_socket:
10806         return do_socket(arg1, arg2, arg3);
10807 #endif
10808 #ifdef TARGET_NR_socketpair
10809     case TARGET_NR_socketpair:
10810         return do_socketpair(arg1, arg2, arg3, arg4);
10811 #endif
10812 #ifdef TARGET_NR_setsockopt
10813     case TARGET_NR_setsockopt:
10814         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10815 #endif
10816 #if defined(TARGET_NR_syslog)
10817     case TARGET_NR_syslog:
10818         {
10819             int len = arg2;
10820 
10821             switch (arg1) {
10822             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10823             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10824             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10825             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10826             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10827             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10828             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10829             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10830                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10831             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10832             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10833             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10834                 {
10835                     if (len < 0) {
10836                         return -TARGET_EINVAL;
10837                     }
10838                     if (len == 0) {
10839                         return 0;
10840                     }
10841                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10842                     if (!p) {
10843                         return -TARGET_EFAULT;
10844                     }
10845                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10846                     unlock_user(p, arg2, arg3);
10847                 }
10848                 return ret;
10849             default:
10850                 return -TARGET_EINVAL;
10851             }
10852         }
10853         break;
10854 #endif
10855     case TARGET_NR_setitimer:
10856         {
10857             struct itimerval value, ovalue, *pvalue;
10858 
10859             if (arg2) {
10860                 pvalue = &value;
10861                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10862                     || copy_from_user_timeval(&pvalue->it_value,
10863                                               arg2 + sizeof(struct target_timeval)))
10864                     return -TARGET_EFAULT;
10865             } else {
10866                 pvalue = NULL;
10867             }
10868             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10869             if (!is_error(ret) && arg3) {
10870                 if (copy_to_user_timeval(arg3,
10871                                          &ovalue.it_interval)
10872                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10873                                             &ovalue.it_value))
10874                     return -TARGET_EFAULT;
10875             }
10876         }
10877         return ret;
10878     case TARGET_NR_getitimer:
10879         {
10880             struct itimerval value;
10881 
10882             ret = get_errno(getitimer(arg1, &value));
10883             if (!is_error(ret) && arg2) {
10884                 if (copy_to_user_timeval(arg2,
10885                                          &value.it_interval)
10886                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10887                                             &value.it_value))
10888                     return -TARGET_EFAULT;
10889             }
10890         }
10891         return ret;
10892 #ifdef TARGET_NR_stat
10893     case TARGET_NR_stat:
10894         if (!(p = lock_user_string(arg1))) {
10895             return -TARGET_EFAULT;
10896         }
10897         ret = get_errno(stat(path(p), &st));
10898         unlock_user(p, arg1, 0);
10899         goto do_stat;
10900 #endif
10901 #ifdef TARGET_NR_lstat
10902     case TARGET_NR_lstat:
10903         if (!(p = lock_user_string(arg1))) {
10904             return -TARGET_EFAULT;
10905         }
10906         ret = get_errno(lstat(path(p), &st));
10907         unlock_user(p, arg1, 0);
10908         goto do_stat;
10909 #endif
10910 #ifdef TARGET_NR_fstat
10911     case TARGET_NR_fstat:
10912         {
10913             ret = get_errno(fstat(arg1, &st));
10914 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10915         do_stat:
10916 #endif
10917             if (!is_error(ret)) {
10918                 struct target_stat *target_st;
10919 
10920                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10921                     return -TARGET_EFAULT;
10922                 memset(target_st, 0, sizeof(*target_st));
10923                 __put_user(st.st_dev, &target_st->st_dev);
10924                 __put_user(st.st_ino, &target_st->st_ino);
10925                 __put_user(st.st_mode, &target_st->st_mode);
10926                 __put_user(st.st_uid, &target_st->st_uid);
10927                 __put_user(st.st_gid, &target_st->st_gid);
10928                 __put_user(st.st_nlink, &target_st->st_nlink);
10929                 __put_user(st.st_rdev, &target_st->st_rdev);
10930                 __put_user(st.st_size, &target_st->st_size);
10931                 __put_user(st.st_blksize, &target_st->st_blksize);
10932                 __put_user(st.st_blocks, &target_st->st_blocks);
10933                 __put_user(st.st_atime, &target_st->target_st_atime);
10934                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10935                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10936 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10937                 __put_user(st.st_atim.tv_nsec,
10938                            &target_st->target_st_atime_nsec);
10939                 __put_user(st.st_mtim.tv_nsec,
10940                            &target_st->target_st_mtime_nsec);
10941                 __put_user(st.st_ctim.tv_nsec,
10942                            &target_st->target_st_ctime_nsec);
10943 #endif
10944                 unlock_user_struct(target_st, arg2, 1);
10945             }
10946         }
10947         return ret;
10948 #endif
10949     case TARGET_NR_vhangup:
10950         return get_errno(vhangup());
10951 #ifdef TARGET_NR_syscall
10952     case TARGET_NR_syscall:
10953         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10954                           arg6, arg7, arg8, 0);
10955 #endif
10956 #if defined(TARGET_NR_wait4)
10957     case TARGET_NR_wait4:
10958         {
10959             int status;
10960             abi_long status_ptr = arg2;
10961             struct rusage rusage, *rusage_ptr;
10962             abi_ulong target_rusage = arg4;
10963             abi_long rusage_err;
10964             if (target_rusage)
10965                 rusage_ptr = &rusage;
10966             else
10967                 rusage_ptr = NULL;
10968             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10969             if (!is_error(ret)) {
10970                 if (status_ptr && ret) {
10971                     status = host_to_target_waitstatus(status);
10972                     if (put_user_s32(status, status_ptr))
10973                         return -TARGET_EFAULT;
10974                 }
10975                 if (target_rusage) {
10976                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10977                     if (rusage_err) {
10978                         ret = rusage_err;
10979                     }
10980                 }
10981             }
10982         }
10983         return ret;
10984 #endif
10985 #ifdef TARGET_NR_swapoff
10986     case TARGET_NR_swapoff:
10987         if (!(p = lock_user_string(arg1)))
10988             return -TARGET_EFAULT;
10989         ret = get_errno(swapoff(p));
10990         unlock_user(p, arg1, 0);
10991         return ret;
10992 #endif
10993     case TARGET_NR_sysinfo:
10994         {
10995             struct target_sysinfo *target_value;
10996             struct sysinfo value;
10997             ret = get_errno(sysinfo(&value));
10998             if (!is_error(ret) && arg1)
10999             {
11000                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11001                     return -TARGET_EFAULT;
11002                 __put_user(value.uptime, &target_value->uptime);
11003                 __put_user(value.loads[0], &target_value->loads[0]);
11004                 __put_user(value.loads[1], &target_value->loads[1]);
11005                 __put_user(value.loads[2], &target_value->loads[2]);
11006                 __put_user(value.totalram, &target_value->totalram);
11007                 __put_user(value.freeram, &target_value->freeram);
11008                 __put_user(value.sharedram, &target_value->sharedram);
11009                 __put_user(value.bufferram, &target_value->bufferram);
11010                 __put_user(value.totalswap, &target_value->totalswap);
11011                 __put_user(value.freeswap, &target_value->freeswap);
11012                 __put_user(value.procs, &target_value->procs);
11013                 __put_user(value.totalhigh, &target_value->totalhigh);
11014                 __put_user(value.freehigh, &target_value->freehigh);
11015                 __put_user(value.mem_unit, &target_value->mem_unit);
11016                 unlock_user_struct(target_value, arg1, 1);
11017             }
11018         }
11019         return ret;
11020 #ifdef TARGET_NR_ipc
11021     case TARGET_NR_ipc:
11022         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11023 #endif
11024 #ifdef TARGET_NR_semget
11025     case TARGET_NR_semget:
11026         return get_errno(semget(arg1, arg2, arg3));
11027 #endif
11028 #ifdef TARGET_NR_semop
11029     case TARGET_NR_semop:
11030         return do_semtimedop(arg1, arg2, arg3, 0, false);
11031 #endif
11032 #ifdef TARGET_NR_semtimedop
11033     case TARGET_NR_semtimedop:
11034         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11035 #endif
11036 #ifdef TARGET_NR_semtimedop_time64
11037     case TARGET_NR_semtimedop_time64:
11038         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11039 #endif
11040 #ifdef TARGET_NR_semctl
11041     case TARGET_NR_semctl:
11042         return do_semctl(arg1, arg2, arg3, arg4);
11043 #endif
11044 #ifdef TARGET_NR_msgctl
11045     case TARGET_NR_msgctl:
11046         return do_msgctl(arg1, arg2, arg3);
11047 #endif
11048 #ifdef TARGET_NR_msgget
11049     case TARGET_NR_msgget:
11050         return get_errno(msgget(arg1, arg2));
11051 #endif
11052 #ifdef TARGET_NR_msgrcv
11053     case TARGET_NR_msgrcv:
11054         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11055 #endif
11056 #ifdef TARGET_NR_msgsnd
11057     case TARGET_NR_msgsnd:
11058         return do_msgsnd(arg1, arg2, arg3, arg4);
11059 #endif
11060 #ifdef TARGET_NR_shmget
11061     case TARGET_NR_shmget:
11062         return get_errno(shmget(arg1, arg2, arg3));
11063 #endif
11064 #ifdef TARGET_NR_shmctl
11065     case TARGET_NR_shmctl:
11066         return do_shmctl(arg1, arg2, arg3);
11067 #endif
11068 #ifdef TARGET_NR_shmat
11069     case TARGET_NR_shmat:
11070         return do_shmat(cpu_env, arg1, arg2, arg3);
11071 #endif
11072 #ifdef TARGET_NR_shmdt
11073     case TARGET_NR_shmdt:
11074         return do_shmdt(arg1);
11075 #endif
11076     case TARGET_NR_fsync:
11077         return get_errno(fsync(arg1));
11078     case TARGET_NR_clone:
11079         /* Linux manages to have three different orderings for its
11080          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11081          * match the kernel's CONFIG_CLONE_* settings.
11082          * Microblaze is further special in that it uses a sixth
11083          * implicit argument to clone for the TLS pointer.
11084          */
11085 #if defined(TARGET_MICROBLAZE)
11086         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11087 #elif defined(TARGET_CLONE_BACKWARDS)
11088         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11089 #elif defined(TARGET_CLONE_BACKWARDS2)
11090         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11091 #else
11092         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11093 #endif
11094         return ret;
11095 #ifdef __NR_exit_group
11096         /* new thread calls */
11097     case TARGET_NR_exit_group:
11098         preexit_cleanup(cpu_env, arg1);
11099         return get_errno(exit_group(arg1));
11100 #endif
11101     case TARGET_NR_setdomainname:
11102         if (!(p = lock_user_string(arg1)))
11103             return -TARGET_EFAULT;
11104         ret = get_errno(setdomainname(p, arg2));
11105         unlock_user(p, arg1, 0);
11106         return ret;
11107     case TARGET_NR_uname:
11108         /* no need to transcode because we use the linux syscall */
11109         {
11110             struct new_utsname * buf;
11111 
11112             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11113                 return -TARGET_EFAULT;
11114             ret = get_errno(sys_uname(buf));
11115             if (!is_error(ret)) {
11116                 /* Overwrite the native machine name with whatever is being
11117                    emulated. */
11118                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11119                           sizeof(buf->machine));
11120                 /* Allow the user to override the reported release.  */
11121                 if (qemu_uname_release && *qemu_uname_release) {
11122                     g_strlcpy(buf->release, qemu_uname_release,
11123                               sizeof(buf->release));
11124                 }
11125             }
11126             unlock_user_struct(buf, arg1, 1);
11127         }
11128         return ret;
11129 #ifdef TARGET_I386
11130     case TARGET_NR_modify_ldt:
11131         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11132 #if !defined(TARGET_X86_64)
11133     case TARGET_NR_vm86:
11134         return do_vm86(cpu_env, arg1, arg2);
11135 #endif
11136 #endif
11137 #if defined(TARGET_NR_adjtimex)
11138     case TARGET_NR_adjtimex:
11139         {
11140             struct timex host_buf;
11141 
11142             if (target_to_host_timex(&host_buf, arg1) != 0) {
11143                 return -TARGET_EFAULT;
11144             }
11145             ret = get_errno(adjtimex(&host_buf));
11146             if (!is_error(ret)) {
11147                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11148                     return -TARGET_EFAULT;
11149                 }
11150             }
11151         }
11152         return ret;
11153 #endif
11154 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11155     case TARGET_NR_clock_adjtime:
11156         {
11157             struct timex htx;
11158 
11159             if (target_to_host_timex(&htx, arg2) != 0) {
11160                 return -TARGET_EFAULT;
11161             }
11162             ret = get_errno(clock_adjtime(arg1, &htx));
11163             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11164                 return -TARGET_EFAULT;
11165             }
11166         }
11167         return ret;
11168 #endif
11169 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11170     case TARGET_NR_clock_adjtime64:
11171         {
11172             struct timex htx;
11173 
11174             if (target_to_host_timex64(&htx, arg2) != 0) {
11175                 return -TARGET_EFAULT;
11176             }
11177             ret = get_errno(clock_adjtime(arg1, &htx));
11178             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11179                     return -TARGET_EFAULT;
11180             }
11181         }
11182         return ret;
11183 #endif
11184     case TARGET_NR_getpgid:
11185         return get_errno(getpgid(arg1));
11186     case TARGET_NR_fchdir:
11187         return get_errno(fchdir(arg1));
11188     case TARGET_NR_personality:
11189         return get_errno(personality(arg1));
11190 #ifdef TARGET_NR__llseek /* Not on alpha */
11191     case TARGET_NR__llseek:
11192         {
11193             int64_t res;
11194 #if !defined(__NR_llseek)
11195             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11196             if (res == -1) {
11197                 ret = get_errno(res);
11198             } else {
11199                 ret = 0;
11200             }
11201 #else
11202             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11203 #endif
11204             if ((ret == 0) && put_user_s64(res, arg4)) {
11205                 return -TARGET_EFAULT;
11206             }
11207         }
11208         return ret;
11209 #endif
11210 #ifdef TARGET_NR_getdents
11211     case TARGET_NR_getdents:
11212         return do_getdents(arg1, arg2, arg3);
11213 #endif /* TARGET_NR_getdents */
11214 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11215     case TARGET_NR_getdents64:
11216         return do_getdents64(arg1, arg2, arg3);
11217 #endif /* TARGET_NR_getdents64 */
11218 #if defined(TARGET_NR__newselect)
11219     case TARGET_NR__newselect:
11220         return do_select(arg1, arg2, arg3, arg4, arg5);
11221 #endif
11222 #ifdef TARGET_NR_poll
11223     case TARGET_NR_poll:
11224         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11225 #endif
11226 #ifdef TARGET_NR_ppoll
11227     case TARGET_NR_ppoll:
11228         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11229 #endif
11230 #ifdef TARGET_NR_ppoll_time64
11231     case TARGET_NR_ppoll_time64:
11232         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11233 #endif
11234     case TARGET_NR_flock:
11235         /* NOTE: the flock constant seems to be the same for every
11236            Linux platform */
11237         return get_errno(safe_flock(arg1, arg2));
11238     case TARGET_NR_readv:
11239         {
11240             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11241             if (vec != NULL) {
11242                 ret = get_errno(safe_readv(arg1, vec, arg3));
11243                 unlock_iovec(vec, arg2, arg3, 1);
11244             } else {
11245                 ret = -host_to_target_errno(errno);
11246             }
11247         }
11248         return ret;
11249     case TARGET_NR_writev:
11250         {
11251             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11252             if (vec != NULL) {
11253                 ret = get_errno(safe_writev(arg1, vec, arg3));
11254                 unlock_iovec(vec, arg2, arg3, 0);
11255             } else {
11256                 ret = -host_to_target_errno(errno);
11257             }
11258         }
11259         return ret;
11260 #if defined(TARGET_NR_preadv)
11261     case TARGET_NR_preadv:
11262         {
11263             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11264             if (vec != NULL) {
11265                 unsigned long low, high;
11266 
11267                 target_to_host_low_high(arg4, arg5, &low, &high);
11268                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11269                 unlock_iovec(vec, arg2, arg3, 1);
11270             } else {
11271                 ret = -host_to_target_errno(errno);
11272            }
11273         }
11274         return ret;
11275 #endif
11276 #if defined(TARGET_NR_pwritev)
11277     case TARGET_NR_pwritev:
11278         {
11279             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11280             if (vec != NULL) {
11281                 unsigned long low, high;
11282 
11283                 target_to_host_low_high(arg4, arg5, &low, &high);
11284                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11285                 unlock_iovec(vec, arg2, arg3, 0);
11286             } else {
11287                 ret = -host_to_target_errno(errno);
11288            }
11289         }
11290         return ret;
11291 #endif
11292     case TARGET_NR_getsid:
11293         return get_errno(getsid(arg1));
11294 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11295     case TARGET_NR_fdatasync:
11296         return get_errno(fdatasync(arg1));
11297 #endif
11298     case TARGET_NR_sched_getaffinity:
11299         {
11300             unsigned int mask_size;
11301             unsigned long *mask;
11302 
11303             /*
11304              * sched_getaffinity needs multiples of ulong, so need to take
11305              * care of mismatches between target ulong and host ulong sizes.
11306              */
11307             if (arg2 & (sizeof(abi_ulong) - 1)) {
11308                 return -TARGET_EINVAL;
11309             }
11310             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11311 
11312             mask = alloca(mask_size);
11313             memset(mask, 0, mask_size);
11314             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11315 
11316             if (!is_error(ret)) {
11317                 if (ret > arg2) {
11318                     /* More data returned than the caller's buffer will fit.
11319                      * This only happens if sizeof(abi_long) < sizeof(long)
11320                      * and the caller passed us a buffer holding an odd number
11321                      * of abi_longs. If the host kernel is actually using the
11322                      * extra 4 bytes then fail EINVAL; otherwise we can just
11323                      * ignore them and only copy the interesting part.
11324                      */
11325                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11326                     if (numcpus > arg2 * 8) {
11327                         return -TARGET_EINVAL;
11328                     }
11329                     ret = arg2;
11330                 }
11331 
11332                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11333                     return -TARGET_EFAULT;
11334                 }
11335             }
11336         }
11337         return ret;
11338     case TARGET_NR_sched_setaffinity:
11339         {
11340             unsigned int mask_size;
11341             unsigned long *mask;
11342 
11343             /*
11344              * sched_setaffinity needs multiples of ulong, so need to take
11345              * care of mismatches between target ulong and host ulong sizes.
11346              */
11347             if (arg2 & (sizeof(abi_ulong) - 1)) {
11348                 return -TARGET_EINVAL;
11349             }
11350             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11351             mask = alloca(mask_size);
11352 
11353             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11354             if (ret) {
11355                 return ret;
11356             }
11357 
11358             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11359         }
11360     case TARGET_NR_getcpu:
11361         {
11362             unsigned cpu, node;
11363             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
11364                                        arg2 ? &node : NULL,
11365                                        NULL));
11366             if (is_error(ret)) {
11367                 return ret;
11368             }
11369             if (arg1 && put_user_u32(cpu, arg1)) {
11370                 return -TARGET_EFAULT;
11371             }
11372             if (arg2 && put_user_u32(node, arg2)) {
11373                 return -TARGET_EFAULT;
11374             }
11375         }
11376         return ret;
11377     case TARGET_NR_sched_setparam:
11378         {
11379             struct target_sched_param *target_schp;
11380             struct sched_param schp;
11381 
11382             if (arg2 == 0) {
11383                 return -TARGET_EINVAL;
11384             }
11385             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11386                 return -TARGET_EFAULT;
11387             }
11388             schp.sched_priority = tswap32(target_schp->sched_priority);
11389             unlock_user_struct(target_schp, arg2, 0);
11390             return get_errno(sys_sched_setparam(arg1, &schp));
11391         }
11392     case TARGET_NR_sched_getparam:
11393         {
11394             struct target_sched_param *target_schp;
11395             struct sched_param schp;
11396 
11397             if (arg2 == 0) {
11398                 return -TARGET_EINVAL;
11399             }
11400             ret = get_errno(sys_sched_getparam(arg1, &schp));
11401             if (!is_error(ret)) {
11402                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11403                     return -TARGET_EFAULT;
11404                 }
11405                 target_schp->sched_priority = tswap32(schp.sched_priority);
11406                 unlock_user_struct(target_schp, arg2, 1);
11407             }
11408         }
11409         return ret;
11410     case TARGET_NR_sched_setscheduler:
11411         {
11412             struct target_sched_param *target_schp;
11413             struct sched_param schp;
11414             if (arg3 == 0) {
11415                 return -TARGET_EINVAL;
11416             }
11417             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11418                 return -TARGET_EFAULT;
11419             }
11420             schp.sched_priority = tswap32(target_schp->sched_priority);
11421             unlock_user_struct(target_schp, arg3, 0);
11422             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11423         }
11424     case TARGET_NR_sched_getscheduler:
11425         return get_errno(sys_sched_getscheduler(arg1));
11426     case TARGET_NR_sched_getattr:
11427         {
11428             struct target_sched_attr *target_scha;
11429             struct sched_attr scha;
11430             if (arg2 == 0) {
11431                 return -TARGET_EINVAL;
11432             }
11433             if (arg3 > sizeof(scha)) {
11434                 arg3 = sizeof(scha);
11435             }
11436             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11437             if (!is_error(ret)) {
11438                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11439                 if (!target_scha) {
11440                     return -TARGET_EFAULT;
11441                 }
11442                 target_scha->size = tswap32(scha.size);
11443                 target_scha->sched_policy = tswap32(scha.sched_policy);
11444                 target_scha->sched_flags = tswap64(scha.sched_flags);
11445                 target_scha->sched_nice = tswap32(scha.sched_nice);
11446                 target_scha->sched_priority = tswap32(scha.sched_priority);
11447                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11448                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11449                 target_scha->sched_period = tswap64(scha.sched_period);
11450                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11451                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11452                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11453                 }
11454                 unlock_user(target_scha, arg2, arg3);
11455             }
11456             return ret;
11457         }
11458     case TARGET_NR_sched_setattr:
11459         {
11460             struct target_sched_attr *target_scha;
11461             struct sched_attr scha;
11462             uint32_t size;
11463             int zeroed;
11464             if (arg2 == 0) {
11465                 return -TARGET_EINVAL;
11466             }
11467             if (get_user_u32(size, arg2)) {
11468                 return -TARGET_EFAULT;
11469             }
11470             if (!size) {
11471                 size = offsetof(struct target_sched_attr, sched_util_min);
11472             }
11473             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11474                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11475                     return -TARGET_EFAULT;
11476                 }
11477                 return -TARGET_E2BIG;
11478             }
11479 
11480             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11481             if (zeroed < 0) {
11482                 return zeroed;
11483             } else if (zeroed == 0) {
11484                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11485                     return -TARGET_EFAULT;
11486                 }
11487                 return -TARGET_E2BIG;
11488             }
11489             if (size > sizeof(struct target_sched_attr)) {
11490                 size = sizeof(struct target_sched_attr);
11491             }
11492 
11493             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11494             if (!target_scha) {
11495                 return -TARGET_EFAULT;
11496             }
11497             scha.size = size;
11498             scha.sched_policy = tswap32(target_scha->sched_policy);
11499             scha.sched_flags = tswap64(target_scha->sched_flags);
11500             scha.sched_nice = tswap32(target_scha->sched_nice);
11501             scha.sched_priority = tswap32(target_scha->sched_priority);
11502             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11503             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11504             scha.sched_period = tswap64(target_scha->sched_period);
11505             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11506                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11507                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11508             }
11509             unlock_user(target_scha, arg2, 0);
11510             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11511         }
11512     case TARGET_NR_sched_yield:
11513         return get_errno(sched_yield());
11514     case TARGET_NR_sched_get_priority_max:
11515         return get_errno(sched_get_priority_max(arg1));
11516     case TARGET_NR_sched_get_priority_min:
11517         return get_errno(sched_get_priority_min(arg1));
11518 #ifdef TARGET_NR_sched_rr_get_interval
11519     case TARGET_NR_sched_rr_get_interval:
11520         {
11521             struct timespec ts;
11522             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11523             if (!is_error(ret)) {
11524                 ret = host_to_target_timespec(arg2, &ts);
11525             }
11526         }
11527         return ret;
11528 #endif
11529 #ifdef TARGET_NR_sched_rr_get_interval_time64
11530     case TARGET_NR_sched_rr_get_interval_time64:
11531         {
11532             struct timespec ts;
11533             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11534             if (!is_error(ret)) {
11535                 ret = host_to_target_timespec64(arg2, &ts);
11536             }
11537         }
11538         return ret;
11539 #endif
11540 #if defined(TARGET_NR_nanosleep)
11541     case TARGET_NR_nanosleep:
11542         {
11543             struct timespec req, rem;
11544             target_to_host_timespec(&req, arg1);
11545             ret = get_errno(safe_nanosleep(&req, &rem));
11546             if (is_error(ret) && arg2) {
11547                 host_to_target_timespec(arg2, &rem);
11548             }
11549         }
11550         return ret;
11551 #endif
11552     case TARGET_NR_prctl:
11553         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11554         break;
11555 #ifdef TARGET_NR_arch_prctl
11556     case TARGET_NR_arch_prctl:
11557         return do_arch_prctl(cpu_env, arg1, arg2);
11558 #endif
11559 #ifdef TARGET_NR_pread64
11560     case TARGET_NR_pread64:
11561         if (regpairs_aligned(cpu_env, num)) {
11562             arg4 = arg5;
11563             arg5 = arg6;
11564         }
11565         if (arg2 == 0 && arg3 == 0) {
11566             /* Special-case NULL buffer and zero length, which should succeed */
11567             p = 0;
11568         } else {
11569             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11570             if (!p) {
11571                 return -TARGET_EFAULT;
11572             }
11573         }
11574         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11575         unlock_user(p, arg2, ret);
11576         return ret;
11577     case TARGET_NR_pwrite64:
11578         if (regpairs_aligned(cpu_env, num)) {
11579             arg4 = arg5;
11580             arg5 = arg6;
11581         }
11582         if (arg2 == 0 && arg3 == 0) {
11583             /* Special-case NULL buffer and zero length, which should succeed */
11584             p = 0;
11585         } else {
11586             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11587             if (!p) {
11588                 return -TARGET_EFAULT;
11589             }
11590         }
11591         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11592         unlock_user(p, arg2, 0);
11593         return ret;
11594 #endif
11595     case TARGET_NR_getcwd:
11596         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11597             return -TARGET_EFAULT;
11598         ret = get_errno(sys_getcwd1(p, arg2));
11599         unlock_user(p, arg1, ret);
11600         return ret;
11601     case TARGET_NR_capget:
11602     case TARGET_NR_capset:
11603     {
11604         struct target_user_cap_header *target_header;
11605         struct target_user_cap_data *target_data = NULL;
11606         struct __user_cap_header_struct header;
11607         struct __user_cap_data_struct data[2];
11608         struct __user_cap_data_struct *dataptr = NULL;
11609         int i, target_datalen;
11610         int data_items = 1;
11611 
11612         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11613             return -TARGET_EFAULT;
11614         }
11615         header.version = tswap32(target_header->version);
11616         header.pid = tswap32(target_header->pid);
11617 
11618         if (header.version != _LINUX_CAPABILITY_VERSION) {
11619             /* Version 2 and up takes pointer to two user_data structs */
11620             data_items = 2;
11621         }
11622 
11623         target_datalen = sizeof(*target_data) * data_items;
11624 
11625         if (arg2) {
11626             if (num == TARGET_NR_capget) {
11627                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11628             } else {
11629                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11630             }
11631             if (!target_data) {
11632                 unlock_user_struct(target_header, arg1, 0);
11633                 return -TARGET_EFAULT;
11634             }
11635 
11636             if (num == TARGET_NR_capset) {
11637                 for (i = 0; i < data_items; i++) {
11638                     data[i].effective = tswap32(target_data[i].effective);
11639                     data[i].permitted = tswap32(target_data[i].permitted);
11640                     data[i].inheritable = tswap32(target_data[i].inheritable);
11641                 }
11642             }
11643 
11644             dataptr = data;
11645         }
11646 
11647         if (num == TARGET_NR_capget) {
11648             ret = get_errno(capget(&header, dataptr));
11649         } else {
11650             ret = get_errno(capset(&header, dataptr));
11651         }
11652 
11653         /* The kernel always updates version for both capget and capset */
11654         target_header->version = tswap32(header.version);
11655         unlock_user_struct(target_header, arg1, 1);
11656 
11657         if (arg2) {
11658             if (num == TARGET_NR_capget) {
11659                 for (i = 0; i < data_items; i++) {
11660                     target_data[i].effective = tswap32(data[i].effective);
11661                     target_data[i].permitted = tswap32(data[i].permitted);
11662                     target_data[i].inheritable = tswap32(data[i].inheritable);
11663                 }
11664                 unlock_user(target_data, arg2, target_datalen);
11665             } else {
11666                 unlock_user(target_data, arg2, 0);
11667             }
11668         }
11669         return ret;
11670     }
11671     case TARGET_NR_sigaltstack:
11672         return do_sigaltstack(arg1, arg2, cpu_env);
11673 
11674 #ifdef CONFIG_SENDFILE
11675 #ifdef TARGET_NR_sendfile
11676     case TARGET_NR_sendfile:
11677     {
11678         off_t *offp = NULL;
11679         off_t off;
11680         if (arg3) {
11681             ret = get_user_sal(off, arg3);
11682             if (is_error(ret)) {
11683                 return ret;
11684             }
11685             offp = &off;
11686         }
11687         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11688         if (!is_error(ret) && arg3) {
11689             abi_long ret2 = put_user_sal(off, arg3);
11690             if (is_error(ret2)) {
11691                 ret = ret2;
11692             }
11693         }
11694         return ret;
11695     }
11696 #endif
11697 #ifdef TARGET_NR_sendfile64
11698     case TARGET_NR_sendfile64:
11699     {
11700         off_t *offp = NULL;
11701         off_t off;
11702         if (arg3) {
11703             ret = get_user_s64(off, arg3);
11704             if (is_error(ret)) {
11705                 return ret;
11706             }
11707             offp = &off;
11708         }
11709         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11710         if (!is_error(ret) && arg3) {
11711             abi_long ret2 = put_user_s64(off, arg3);
11712             if (is_error(ret2)) {
11713                 ret = ret2;
11714             }
11715         }
11716         return ret;
11717     }
11718 #endif
11719 #endif
11720 #ifdef TARGET_NR_vfork
11721     case TARGET_NR_vfork:
11722         return get_errno(do_fork(cpu_env,
11723                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11724                          0, 0, 0, 0));
11725 #endif
11726 #ifdef TARGET_NR_ugetrlimit
11727     case TARGET_NR_ugetrlimit:
11728     {
11729 	struct rlimit rlim;
11730 	int resource = target_to_host_resource(arg1);
11731 	ret = get_errno(getrlimit(resource, &rlim));
11732 	if (!is_error(ret)) {
11733 	    struct target_rlimit *target_rlim;
11734             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11735                 return -TARGET_EFAULT;
11736 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11737 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11738             unlock_user_struct(target_rlim, arg2, 1);
11739 	}
11740         return ret;
11741     }
11742 #endif
11743 #ifdef TARGET_NR_truncate64
11744     case TARGET_NR_truncate64:
11745         if (!(p = lock_user_string(arg1)))
11746             return -TARGET_EFAULT;
11747 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11748         unlock_user(p, arg1, 0);
11749         return ret;
11750 #endif
11751 #ifdef TARGET_NR_ftruncate64
11752     case TARGET_NR_ftruncate64:
11753         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11754 #endif
11755 #ifdef TARGET_NR_stat64
11756     case TARGET_NR_stat64:
11757         if (!(p = lock_user_string(arg1))) {
11758             return -TARGET_EFAULT;
11759         }
11760         ret = get_errno(stat(path(p), &st));
11761         unlock_user(p, arg1, 0);
11762         if (!is_error(ret))
11763             ret = host_to_target_stat64(cpu_env, arg2, &st);
11764         return ret;
11765 #endif
11766 #ifdef TARGET_NR_lstat64
11767     case TARGET_NR_lstat64:
11768         if (!(p = lock_user_string(arg1))) {
11769             return -TARGET_EFAULT;
11770         }
11771         ret = get_errno(lstat(path(p), &st));
11772         unlock_user(p, arg1, 0);
11773         if (!is_error(ret))
11774             ret = host_to_target_stat64(cpu_env, arg2, &st);
11775         return ret;
11776 #endif
11777 #ifdef TARGET_NR_fstat64
11778     case TARGET_NR_fstat64:
11779         ret = get_errno(fstat(arg1, &st));
11780         if (!is_error(ret))
11781             ret = host_to_target_stat64(cpu_env, arg2, &st);
11782         return ret;
11783 #endif
11784 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11785 #ifdef TARGET_NR_fstatat64
11786     case TARGET_NR_fstatat64:
11787 #endif
11788 #ifdef TARGET_NR_newfstatat
11789     case TARGET_NR_newfstatat:
11790 #endif
11791         if (!(p = lock_user_string(arg2))) {
11792             return -TARGET_EFAULT;
11793         }
11794         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11795         unlock_user(p, arg2, 0);
11796         if (!is_error(ret))
11797             ret = host_to_target_stat64(cpu_env, arg3, &st);
11798         return ret;
11799 #endif
11800 #if defined(TARGET_NR_statx)
11801     case TARGET_NR_statx:
11802         {
11803             struct target_statx *target_stx;
11804             int dirfd = arg1;
11805             int flags = arg3;
11806 
11807             p = lock_user_string(arg2);
11808             if (p == NULL) {
11809                 return -TARGET_EFAULT;
11810             }
11811 #if defined(__NR_statx)
11812             {
11813                 /*
11814                  * It is assumed that struct statx is architecture independent.
11815                  */
11816                 struct target_statx host_stx;
11817                 int mask = arg4;
11818 
11819                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11820                 if (!is_error(ret)) {
11821                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11822                         unlock_user(p, arg2, 0);
11823                         return -TARGET_EFAULT;
11824                     }
11825                 }
11826 
11827                 if (ret != -TARGET_ENOSYS) {
11828                     unlock_user(p, arg2, 0);
11829                     return ret;
11830                 }
11831             }
11832 #endif
11833             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11834             unlock_user(p, arg2, 0);
11835 
11836             if (!is_error(ret)) {
11837                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11838                     return -TARGET_EFAULT;
11839                 }
11840                 memset(target_stx, 0, sizeof(*target_stx));
11841                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11842                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11843                 __put_user(st.st_ino, &target_stx->stx_ino);
11844                 __put_user(st.st_mode, &target_stx->stx_mode);
11845                 __put_user(st.st_uid, &target_stx->stx_uid);
11846                 __put_user(st.st_gid, &target_stx->stx_gid);
11847                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11848                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11849                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11850                 __put_user(st.st_size, &target_stx->stx_size);
11851                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11852                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11853                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11854                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11855                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11856                 unlock_user_struct(target_stx, arg5, 1);
11857             }
11858         }
11859         return ret;
11860 #endif
11861 #ifdef TARGET_NR_lchown
11862     case TARGET_NR_lchown:
11863         if (!(p = lock_user_string(arg1)))
11864             return -TARGET_EFAULT;
11865         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11866         unlock_user(p, arg1, 0);
11867         return ret;
11868 #endif
11869 #ifdef TARGET_NR_getuid
11870     case TARGET_NR_getuid:
11871         return get_errno(high2lowuid(getuid()));
11872 #endif
11873 #ifdef TARGET_NR_getgid
11874     case TARGET_NR_getgid:
11875         return get_errno(high2lowgid(getgid()));
11876 #endif
11877 #ifdef TARGET_NR_geteuid
11878     case TARGET_NR_geteuid:
11879         return get_errno(high2lowuid(geteuid()));
11880 #endif
11881 #ifdef TARGET_NR_getegid
11882     case TARGET_NR_getegid:
11883         return get_errno(high2lowgid(getegid()));
11884 #endif
11885     case TARGET_NR_setreuid:
11886         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11887     case TARGET_NR_setregid:
11888         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11889     case TARGET_NR_getgroups:
11890         { /* the same code as for TARGET_NR_getgroups32 */
11891             int gidsetsize = arg1;
11892             target_id *target_grouplist;
11893             g_autofree gid_t *grouplist = NULL;
11894             int i;
11895 
11896             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11897                 return -TARGET_EINVAL;
11898             }
11899             if (gidsetsize > 0) {
11900                 grouplist = g_try_new(gid_t, gidsetsize);
11901                 if (!grouplist) {
11902                     return -TARGET_ENOMEM;
11903                 }
11904             }
11905             ret = get_errno(getgroups(gidsetsize, grouplist));
11906             if (!is_error(ret) && gidsetsize > 0) {
11907                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11908                                              gidsetsize * sizeof(target_id), 0);
11909                 if (!target_grouplist) {
11910                     return -TARGET_EFAULT;
11911                 }
11912                 for (i = 0; i < ret; i++) {
11913                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11914                 }
11915                 unlock_user(target_grouplist, arg2,
11916                             gidsetsize * sizeof(target_id));
11917             }
11918             return ret;
11919         }
11920     case TARGET_NR_setgroups:
11921         { /* the same code as for TARGET_NR_setgroups32 */
11922             int gidsetsize = arg1;
11923             target_id *target_grouplist;
11924             g_autofree gid_t *grouplist = NULL;
11925             int i;
11926 
11927             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11928                 return -TARGET_EINVAL;
11929             }
11930             if (gidsetsize > 0) {
11931                 grouplist = g_try_new(gid_t, gidsetsize);
11932                 if (!grouplist) {
11933                     return -TARGET_ENOMEM;
11934                 }
11935                 target_grouplist = lock_user(VERIFY_READ, arg2,
11936                                              gidsetsize * sizeof(target_id), 1);
11937                 if (!target_grouplist) {
11938                     return -TARGET_EFAULT;
11939                 }
11940                 for (i = 0; i < gidsetsize; i++) {
11941                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11942                 }
11943                 unlock_user(target_grouplist, arg2,
11944                             gidsetsize * sizeof(target_id));
11945             }
11946             return get_errno(setgroups(gidsetsize, grouplist));
11947         }
11948     case TARGET_NR_fchown:
11949         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11950 #if defined(TARGET_NR_fchownat)
11951     case TARGET_NR_fchownat:
11952         if (!(p = lock_user_string(arg2)))
11953             return -TARGET_EFAULT;
11954         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11955                                  low2highgid(arg4), arg5));
11956         unlock_user(p, arg2, 0);
11957         return ret;
11958 #endif
11959 #ifdef TARGET_NR_setresuid
11960     case TARGET_NR_setresuid:
11961         return get_errno(sys_setresuid(low2highuid(arg1),
11962                                        low2highuid(arg2),
11963                                        low2highuid(arg3)));
11964 #endif
11965 #ifdef TARGET_NR_getresuid
11966     case TARGET_NR_getresuid:
11967         {
11968             uid_t ruid, euid, suid;
11969             ret = get_errno(getresuid(&ruid, &euid, &suid));
11970             if (!is_error(ret)) {
11971                 if (put_user_id(high2lowuid(ruid), arg1)
11972                     || put_user_id(high2lowuid(euid), arg2)
11973                     || put_user_id(high2lowuid(suid), arg3))
11974                     return -TARGET_EFAULT;
11975             }
11976         }
11977         return ret;
11978 #endif
11979 #ifdef TARGET_NR_getresgid
11980     case TARGET_NR_setresgid:
11981         return get_errno(sys_setresgid(low2highgid(arg1),
11982                                        low2highgid(arg2),
11983                                        low2highgid(arg3)));
11984 #endif
11985 #ifdef TARGET_NR_getresgid
11986     case TARGET_NR_getresgid:
11987         {
11988             gid_t rgid, egid, sgid;
11989             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11990             if (!is_error(ret)) {
11991                 if (put_user_id(high2lowgid(rgid), arg1)
11992                     || put_user_id(high2lowgid(egid), arg2)
11993                     || put_user_id(high2lowgid(sgid), arg3))
11994                     return -TARGET_EFAULT;
11995             }
11996         }
11997         return ret;
11998 #endif
11999 #ifdef TARGET_NR_chown
12000     case TARGET_NR_chown:
12001         if (!(p = lock_user_string(arg1)))
12002             return -TARGET_EFAULT;
12003         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12004         unlock_user(p, arg1, 0);
12005         return ret;
12006 #endif
12007     case TARGET_NR_setuid:
12008         return get_errno(sys_setuid(low2highuid(arg1)));
12009     case TARGET_NR_setgid:
12010         return get_errno(sys_setgid(low2highgid(arg1)));
12011     case TARGET_NR_setfsuid:
12012         return get_errno(setfsuid(arg1));
12013     case TARGET_NR_setfsgid:
12014         return get_errno(setfsgid(arg1));
12015 
12016 #ifdef TARGET_NR_lchown32
12017     case TARGET_NR_lchown32:
12018         if (!(p = lock_user_string(arg1)))
12019             return -TARGET_EFAULT;
12020         ret = get_errno(lchown(p, arg2, arg3));
12021         unlock_user(p, arg1, 0);
12022         return ret;
12023 #endif
12024 #ifdef TARGET_NR_getuid32
12025     case TARGET_NR_getuid32:
12026         return get_errno(getuid());
12027 #endif
12028 
12029 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12030    /* Alpha specific */
12031     case TARGET_NR_getxuid:
12032          {
12033             uid_t euid;
12034             euid=geteuid();
12035             cpu_env->ir[IR_A4]=euid;
12036          }
12037         return get_errno(getuid());
12038 #endif
12039 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12040    /* Alpha specific */
12041     case TARGET_NR_getxgid:
12042          {
12043             uid_t egid;
12044             egid=getegid();
12045             cpu_env->ir[IR_A4]=egid;
12046          }
12047         return get_errno(getgid());
12048 #endif
12049 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12050     /* Alpha specific */
12051     case TARGET_NR_osf_getsysinfo:
12052         ret = -TARGET_EOPNOTSUPP;
12053         switch (arg1) {
12054           case TARGET_GSI_IEEE_FP_CONTROL:
12055             {
12056                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12057                 uint64_t swcr = cpu_env->swcr;
12058 
12059                 swcr &= ~SWCR_STATUS_MASK;
12060                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12061 
12062                 if (put_user_u64 (swcr, arg2))
12063                         return -TARGET_EFAULT;
12064                 ret = 0;
12065             }
12066             break;
12067 
12068           /* case GSI_IEEE_STATE_AT_SIGNAL:
12069              -- Not implemented in linux kernel.
12070              case GSI_UACPROC:
12071              -- Retrieves current unaligned access state; not much used.
12072              case GSI_PROC_TYPE:
12073              -- Retrieves implver information; surely not used.
12074              case GSI_GET_HWRPB:
12075              -- Grabs a copy of the HWRPB; surely not used.
12076           */
12077         }
12078         return ret;
12079 #endif
12080 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12081     /* Alpha specific */
12082     case TARGET_NR_osf_setsysinfo:
12083         ret = -TARGET_EOPNOTSUPP;
12084         switch (arg1) {
12085           case TARGET_SSI_IEEE_FP_CONTROL:
12086             {
12087                 uint64_t swcr, fpcr;
12088 
12089                 if (get_user_u64 (swcr, arg2)) {
12090                     return -TARGET_EFAULT;
12091                 }
12092 
12093                 /*
12094                  * The kernel calls swcr_update_status to update the
12095                  * status bits from the fpcr at every point that it
12096                  * could be queried.  Therefore, we store the status
12097                  * bits only in FPCR.
12098                  */
12099                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12100 
12101                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12102                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12103                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12104                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12105                 ret = 0;
12106             }
12107             break;
12108 
12109           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12110             {
12111                 uint64_t exc, fpcr, fex;
12112 
12113                 if (get_user_u64(exc, arg2)) {
12114                     return -TARGET_EFAULT;
12115                 }
12116                 exc &= SWCR_STATUS_MASK;
12117                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12118 
12119                 /* Old exceptions are not signaled.  */
12120                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12121                 fex = exc & ~fex;
12122                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12123                 fex &= (cpu_env)->swcr;
12124 
12125                 /* Update the hardware fpcr.  */
12126                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12127                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12128 
12129                 if (fex) {
12130                     int si_code = TARGET_FPE_FLTUNK;
12131                     target_siginfo_t info;
12132 
12133                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12134                         si_code = TARGET_FPE_FLTUND;
12135                     }
12136                     if (fex & SWCR_TRAP_ENABLE_INE) {
12137                         si_code = TARGET_FPE_FLTRES;
12138                     }
12139                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12140                         si_code = TARGET_FPE_FLTUND;
12141                     }
12142                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12143                         si_code = TARGET_FPE_FLTOVF;
12144                     }
12145                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12146                         si_code = TARGET_FPE_FLTDIV;
12147                     }
12148                     if (fex & SWCR_TRAP_ENABLE_INV) {
12149                         si_code = TARGET_FPE_FLTINV;
12150                     }
12151 
12152                     info.si_signo = SIGFPE;
12153                     info.si_errno = 0;
12154                     info.si_code = si_code;
12155                     info._sifields._sigfault._addr = (cpu_env)->pc;
12156                     queue_signal(cpu_env, info.si_signo,
12157                                  QEMU_SI_FAULT, &info);
12158                 }
12159                 ret = 0;
12160             }
12161             break;
12162 
12163           /* case SSI_NVPAIRS:
12164              -- Used with SSIN_UACPROC to enable unaligned accesses.
12165              case SSI_IEEE_STATE_AT_SIGNAL:
12166              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12167              -- Not implemented in linux kernel
12168           */
12169         }
12170         return ret;
12171 #endif
12172 #ifdef TARGET_NR_osf_sigprocmask
12173     /* Alpha specific.  */
12174     case TARGET_NR_osf_sigprocmask:
12175         {
12176             abi_ulong mask;
12177             int how;
12178             sigset_t set, oldset;
12179 
12180             switch(arg1) {
12181             case TARGET_SIG_BLOCK:
12182                 how = SIG_BLOCK;
12183                 break;
12184             case TARGET_SIG_UNBLOCK:
12185                 how = SIG_UNBLOCK;
12186                 break;
12187             case TARGET_SIG_SETMASK:
12188                 how = SIG_SETMASK;
12189                 break;
12190             default:
12191                 return -TARGET_EINVAL;
12192             }
12193             mask = arg2;
12194             target_to_host_old_sigset(&set, &mask);
12195             ret = do_sigprocmask(how, &set, &oldset);
12196             if (!ret) {
12197                 host_to_target_old_sigset(&mask, &oldset);
12198                 ret = mask;
12199             }
12200         }
12201         return ret;
12202 #endif
12203 
12204 #ifdef TARGET_NR_getgid32
12205     case TARGET_NR_getgid32:
12206         return get_errno(getgid());
12207 #endif
12208 #ifdef TARGET_NR_geteuid32
12209     case TARGET_NR_geteuid32:
12210         return get_errno(geteuid());
12211 #endif
12212 #ifdef TARGET_NR_getegid32
12213     case TARGET_NR_getegid32:
12214         return get_errno(getegid());
12215 #endif
12216 #ifdef TARGET_NR_setreuid32
12217     case TARGET_NR_setreuid32:
12218         return get_errno(setreuid(arg1, arg2));
12219 #endif
12220 #ifdef TARGET_NR_setregid32
12221     case TARGET_NR_setregid32:
12222         return get_errno(setregid(arg1, arg2));
12223 #endif
12224 #ifdef TARGET_NR_getgroups32
12225     case TARGET_NR_getgroups32:
12226         { /* the same code as for TARGET_NR_getgroups */
12227             int gidsetsize = arg1;
12228             uint32_t *target_grouplist;
12229             g_autofree gid_t *grouplist = NULL;
12230             int i;
12231 
12232             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12233                 return -TARGET_EINVAL;
12234             }
12235             if (gidsetsize > 0) {
12236                 grouplist = g_try_new(gid_t, gidsetsize);
12237                 if (!grouplist) {
12238                     return -TARGET_ENOMEM;
12239                 }
12240             }
12241             ret = get_errno(getgroups(gidsetsize, grouplist));
12242             if (!is_error(ret) && gidsetsize > 0) {
12243                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12244                                              gidsetsize * 4, 0);
12245                 if (!target_grouplist) {
12246                     return -TARGET_EFAULT;
12247                 }
12248                 for (i = 0; i < ret; i++) {
12249                     target_grouplist[i] = tswap32(grouplist[i]);
12250                 }
12251                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12252             }
12253             return ret;
12254         }
12255 #endif
12256 #ifdef TARGET_NR_setgroups32
12257     case TARGET_NR_setgroups32:
12258         { /* the same code as for TARGET_NR_setgroups */
12259             int gidsetsize = arg1;
12260             uint32_t *target_grouplist;
12261             g_autofree gid_t *grouplist = NULL;
12262             int i;
12263 
12264             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12265                 return -TARGET_EINVAL;
12266             }
12267             if (gidsetsize > 0) {
12268                 grouplist = g_try_new(gid_t, gidsetsize);
12269                 if (!grouplist) {
12270                     return -TARGET_ENOMEM;
12271                 }
12272                 target_grouplist = lock_user(VERIFY_READ, arg2,
12273                                              gidsetsize * 4, 1);
12274                 if (!target_grouplist) {
12275                     return -TARGET_EFAULT;
12276                 }
12277                 for (i = 0; i < gidsetsize; i++) {
12278                     grouplist[i] = tswap32(target_grouplist[i]);
12279                 }
12280                 unlock_user(target_grouplist, arg2, 0);
12281             }
12282             return get_errno(setgroups(gidsetsize, grouplist));
12283         }
12284 #endif
12285 #ifdef TARGET_NR_fchown32
12286     case TARGET_NR_fchown32:
12287         return get_errno(fchown(arg1, arg2, arg3));
12288 #endif
12289 #ifdef TARGET_NR_setresuid32
12290     case TARGET_NR_setresuid32:
12291         return get_errno(sys_setresuid(arg1, arg2, arg3));
12292 #endif
12293 #ifdef TARGET_NR_getresuid32
12294     case TARGET_NR_getresuid32:
12295         {
12296             uid_t ruid, euid, suid;
12297             ret = get_errno(getresuid(&ruid, &euid, &suid));
12298             if (!is_error(ret)) {
12299                 if (put_user_u32(ruid, arg1)
12300                     || put_user_u32(euid, arg2)
12301                     || put_user_u32(suid, arg3))
12302                     return -TARGET_EFAULT;
12303             }
12304         }
12305         return ret;
12306 #endif
12307 #ifdef TARGET_NR_setresgid32
12308     case TARGET_NR_setresgid32:
12309         return get_errno(sys_setresgid(arg1, arg2, arg3));
12310 #endif
12311 #ifdef TARGET_NR_getresgid32
12312     case TARGET_NR_getresgid32:
12313         {
12314             gid_t rgid, egid, sgid;
12315             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12316             if (!is_error(ret)) {
12317                 if (put_user_u32(rgid, arg1)
12318                     || put_user_u32(egid, arg2)
12319                     || put_user_u32(sgid, arg3))
12320                     return -TARGET_EFAULT;
12321             }
12322         }
12323         return ret;
12324 #endif
12325 #ifdef TARGET_NR_chown32
12326     case TARGET_NR_chown32:
12327         if (!(p = lock_user_string(arg1)))
12328             return -TARGET_EFAULT;
12329         ret = get_errno(chown(p, arg2, arg3));
12330         unlock_user(p, arg1, 0);
12331         return ret;
12332 #endif
12333 #ifdef TARGET_NR_setuid32
12334     case TARGET_NR_setuid32:
12335         return get_errno(sys_setuid(arg1));
12336 #endif
12337 #ifdef TARGET_NR_setgid32
12338     case TARGET_NR_setgid32:
12339         return get_errno(sys_setgid(arg1));
12340 #endif
12341 #ifdef TARGET_NR_setfsuid32
12342     case TARGET_NR_setfsuid32:
12343         return get_errno(setfsuid(arg1));
12344 #endif
12345 #ifdef TARGET_NR_setfsgid32
12346     case TARGET_NR_setfsgid32:
12347         return get_errno(setfsgid(arg1));
12348 #endif
12349 #ifdef TARGET_NR_mincore
12350     case TARGET_NR_mincore:
12351         {
12352             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12353             if (!a) {
12354                 return -TARGET_ENOMEM;
12355             }
12356             p = lock_user_string(arg3);
12357             if (!p) {
12358                 ret = -TARGET_EFAULT;
12359             } else {
12360                 ret = get_errno(mincore(a, arg2, p));
12361                 unlock_user(p, arg3, ret);
12362             }
12363             unlock_user(a, arg1, 0);
12364         }
12365         return ret;
12366 #endif
12367 #ifdef TARGET_NR_arm_fadvise64_64
12368     case TARGET_NR_arm_fadvise64_64:
12369         /* arm_fadvise64_64 looks like fadvise64_64 but
12370          * with different argument order: fd, advice, offset, len
12371          * rather than the usual fd, offset, len, advice.
12372          * Note that offset and len are both 64-bit so appear as
12373          * pairs of 32-bit registers.
12374          */
12375         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12376                             target_offset64(arg5, arg6), arg2);
12377         return -host_to_target_errno(ret);
12378 #endif
12379 
12380 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12381 
12382 #ifdef TARGET_NR_fadvise64_64
12383     case TARGET_NR_fadvise64_64:
12384 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12385         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12386         ret = arg2;
12387         arg2 = arg3;
12388         arg3 = arg4;
12389         arg4 = arg5;
12390         arg5 = arg6;
12391         arg6 = ret;
12392 #else
12393         /* 6 args: fd, offset (high, low), len (high, low), advice */
12394         if (regpairs_aligned(cpu_env, num)) {
12395             /* offset is in (3,4), len in (5,6) and advice in 7 */
12396             arg2 = arg3;
12397             arg3 = arg4;
12398             arg4 = arg5;
12399             arg5 = arg6;
12400             arg6 = arg7;
12401         }
12402 #endif
12403         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12404                             target_offset64(arg4, arg5), arg6);
12405         return -host_to_target_errno(ret);
12406 #endif
12407 
12408 #ifdef TARGET_NR_fadvise64
12409     case TARGET_NR_fadvise64:
12410         /* 5 args: fd, offset (high, low), len, advice */
12411         if (regpairs_aligned(cpu_env, num)) {
12412             /* offset is in (3,4), len in 5 and advice in 6 */
12413             arg2 = arg3;
12414             arg3 = arg4;
12415             arg4 = arg5;
12416             arg5 = arg6;
12417         }
12418         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12419         return -host_to_target_errno(ret);
12420 #endif
12421 
12422 #else /* not a 32-bit ABI */
12423 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12424 #ifdef TARGET_NR_fadvise64_64
12425     case TARGET_NR_fadvise64_64:
12426 #endif
12427 #ifdef TARGET_NR_fadvise64
12428     case TARGET_NR_fadvise64:
12429 #endif
12430 #ifdef TARGET_S390X
12431         switch (arg4) {
12432         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12433         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12434         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12435         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12436         default: break;
12437         }
12438 #endif
12439         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12440 #endif
12441 #endif /* end of 64-bit ABI fadvise handling */
12442 
12443 #ifdef TARGET_NR_madvise
12444     case TARGET_NR_madvise:
12445         return target_madvise(arg1, arg2, arg3);
12446 #endif
12447 #ifdef TARGET_NR_fcntl64
12448     case TARGET_NR_fcntl64:
12449     {
12450         int cmd;
12451         struct flock64 fl;
12452         from_flock64_fn *copyfrom = copy_from_user_flock64;
12453         to_flock64_fn *copyto = copy_to_user_flock64;
12454 
12455 #ifdef TARGET_ARM
12456         if (!cpu_env->eabi) {
12457             copyfrom = copy_from_user_oabi_flock64;
12458             copyto = copy_to_user_oabi_flock64;
12459         }
12460 #endif
12461 
12462         cmd = target_to_host_fcntl_cmd(arg2);
12463         if (cmd == -TARGET_EINVAL) {
12464             return cmd;
12465         }
12466 
12467         switch(arg2) {
12468         case TARGET_F_GETLK64:
12469             ret = copyfrom(&fl, arg3);
12470             if (ret) {
12471                 break;
12472             }
12473             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12474             if (ret == 0) {
12475                 ret = copyto(arg3, &fl);
12476             }
12477 	    break;
12478 
12479         case TARGET_F_SETLK64:
12480         case TARGET_F_SETLKW64:
12481             ret = copyfrom(&fl, arg3);
12482             if (ret) {
12483                 break;
12484             }
12485             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12486 	    break;
12487         default:
12488             ret = do_fcntl(arg1, arg2, arg3);
12489             break;
12490         }
12491         return ret;
12492     }
12493 #endif
12494 #ifdef TARGET_NR_cacheflush
12495     case TARGET_NR_cacheflush:
12496         /* self-modifying code is handled automatically, so nothing needed */
12497         return 0;
12498 #endif
12499 #ifdef TARGET_NR_getpagesize
12500     case TARGET_NR_getpagesize:
12501         return TARGET_PAGE_SIZE;
12502 #endif
12503     case TARGET_NR_gettid:
12504         return get_errno(sys_gettid());
12505 #ifdef TARGET_NR_readahead
12506     case TARGET_NR_readahead:
12507 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12508         if (regpairs_aligned(cpu_env, num)) {
12509             arg2 = arg3;
12510             arg3 = arg4;
12511             arg4 = arg5;
12512         }
12513         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12514 #else
12515         ret = get_errno(readahead(arg1, arg2, arg3));
12516 #endif
12517         return ret;
12518 #endif
12519 #ifdef CONFIG_ATTR
12520 #ifdef TARGET_NR_setxattr
12521     case TARGET_NR_listxattr:
12522     case TARGET_NR_llistxattr:
12523     {
12524         void *p, *b = 0;
12525         if (arg2) {
12526             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12527             if (!b) {
12528                 return -TARGET_EFAULT;
12529             }
12530         }
12531         p = lock_user_string(arg1);
12532         if (p) {
12533             if (num == TARGET_NR_listxattr) {
12534                 ret = get_errno(listxattr(p, b, arg3));
12535             } else {
12536                 ret = get_errno(llistxattr(p, b, arg3));
12537             }
12538         } else {
12539             ret = -TARGET_EFAULT;
12540         }
12541         unlock_user(p, arg1, 0);
12542         unlock_user(b, arg2, arg3);
12543         return ret;
12544     }
12545     case TARGET_NR_flistxattr:
12546     {
12547         void *b = 0;
12548         if (arg2) {
12549             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12550             if (!b) {
12551                 return -TARGET_EFAULT;
12552             }
12553         }
12554         ret = get_errno(flistxattr(arg1, b, arg3));
12555         unlock_user(b, arg2, arg3);
12556         return ret;
12557     }
12558     case TARGET_NR_setxattr:
12559     case TARGET_NR_lsetxattr:
12560         {
12561             void *p, *n, *v = 0;
12562             if (arg3) {
12563                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12564                 if (!v) {
12565                     return -TARGET_EFAULT;
12566                 }
12567             }
12568             p = lock_user_string(arg1);
12569             n = lock_user_string(arg2);
12570             if (p && n) {
12571                 if (num == TARGET_NR_setxattr) {
12572                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12573                 } else {
12574                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12575                 }
12576             } else {
12577                 ret = -TARGET_EFAULT;
12578             }
12579             unlock_user(p, arg1, 0);
12580             unlock_user(n, arg2, 0);
12581             unlock_user(v, arg3, 0);
12582         }
12583         return ret;
12584     case TARGET_NR_fsetxattr:
12585         {
12586             void *n, *v = 0;
12587             if (arg3) {
12588                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12589                 if (!v) {
12590                     return -TARGET_EFAULT;
12591                 }
12592             }
12593             n = lock_user_string(arg2);
12594             if (n) {
12595                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12596             } else {
12597                 ret = -TARGET_EFAULT;
12598             }
12599             unlock_user(n, arg2, 0);
12600             unlock_user(v, arg3, 0);
12601         }
12602         return ret;
12603     case TARGET_NR_getxattr:
12604     case TARGET_NR_lgetxattr:
12605         {
12606             void *p, *n, *v = 0;
12607             if (arg3) {
12608                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12609                 if (!v) {
12610                     return -TARGET_EFAULT;
12611                 }
12612             }
12613             p = lock_user_string(arg1);
12614             n = lock_user_string(arg2);
12615             if (p && n) {
12616                 if (num == TARGET_NR_getxattr) {
12617                     ret = get_errno(getxattr(p, n, v, arg4));
12618                 } else {
12619                     ret = get_errno(lgetxattr(p, n, v, arg4));
12620                 }
12621             } else {
12622                 ret = -TARGET_EFAULT;
12623             }
12624             unlock_user(p, arg1, 0);
12625             unlock_user(n, arg2, 0);
12626             unlock_user(v, arg3, arg4);
12627         }
12628         return ret;
12629     case TARGET_NR_fgetxattr:
12630         {
12631             void *n, *v = 0;
12632             if (arg3) {
12633                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12634                 if (!v) {
12635                     return -TARGET_EFAULT;
12636                 }
12637             }
12638             n = lock_user_string(arg2);
12639             if (n) {
12640                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12641             } else {
12642                 ret = -TARGET_EFAULT;
12643             }
12644             unlock_user(n, arg2, 0);
12645             unlock_user(v, arg3, arg4);
12646         }
12647         return ret;
12648     case TARGET_NR_removexattr:
12649     case TARGET_NR_lremovexattr:
12650         {
12651             void *p, *n;
12652             p = lock_user_string(arg1);
12653             n = lock_user_string(arg2);
12654             if (p && n) {
12655                 if (num == TARGET_NR_removexattr) {
12656                     ret = get_errno(removexattr(p, n));
12657                 } else {
12658                     ret = get_errno(lremovexattr(p, n));
12659                 }
12660             } else {
12661                 ret = -TARGET_EFAULT;
12662             }
12663             unlock_user(p, arg1, 0);
12664             unlock_user(n, arg2, 0);
12665         }
12666         return ret;
12667     case TARGET_NR_fremovexattr:
12668         {
12669             void *n;
12670             n = lock_user_string(arg2);
12671             if (n) {
12672                 ret = get_errno(fremovexattr(arg1, n));
12673             } else {
12674                 ret = -TARGET_EFAULT;
12675             }
12676             unlock_user(n, arg2, 0);
12677         }
12678         return ret;
12679 #endif
12680 #endif /* CONFIG_ATTR */
12681 #ifdef TARGET_NR_set_thread_area
12682     case TARGET_NR_set_thread_area:
12683 #if defined(TARGET_MIPS)
12684       cpu_env->active_tc.CP0_UserLocal = arg1;
12685       return 0;
12686 #elif defined(TARGET_CRIS)
12687       if (arg1 & 0xff)
12688           ret = -TARGET_EINVAL;
12689       else {
12690           cpu_env->pregs[PR_PID] = arg1;
12691           ret = 0;
12692       }
12693       return ret;
12694 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12695       return do_set_thread_area(cpu_env, arg1);
12696 #elif defined(TARGET_M68K)
12697       {
12698           TaskState *ts = cpu->opaque;
12699           ts->tp_value = arg1;
12700           return 0;
12701       }
12702 #else
12703       return -TARGET_ENOSYS;
12704 #endif
12705 #endif
12706 #ifdef TARGET_NR_get_thread_area
12707     case TARGET_NR_get_thread_area:
12708 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12709         return do_get_thread_area(cpu_env, arg1);
12710 #elif defined(TARGET_M68K)
12711         {
12712             TaskState *ts = cpu->opaque;
12713             return ts->tp_value;
12714         }
12715 #else
12716         return -TARGET_ENOSYS;
12717 #endif
12718 #endif
12719 #ifdef TARGET_NR_getdomainname
12720     case TARGET_NR_getdomainname:
12721         return -TARGET_ENOSYS;
12722 #endif
12723 
12724 #ifdef TARGET_NR_clock_settime
12725     case TARGET_NR_clock_settime:
12726     {
12727         struct timespec ts;
12728 
12729         ret = target_to_host_timespec(&ts, arg2);
12730         if (!is_error(ret)) {
12731             ret = get_errno(clock_settime(arg1, &ts));
12732         }
12733         return ret;
12734     }
12735 #endif
12736 #ifdef TARGET_NR_clock_settime64
12737     case TARGET_NR_clock_settime64:
12738     {
12739         struct timespec ts;
12740 
12741         ret = target_to_host_timespec64(&ts, arg2);
12742         if (!is_error(ret)) {
12743             ret = get_errno(clock_settime(arg1, &ts));
12744         }
12745         return ret;
12746     }
12747 #endif
12748 #ifdef TARGET_NR_clock_gettime
12749     case TARGET_NR_clock_gettime:
12750     {
12751         struct timespec ts;
12752         ret = get_errno(clock_gettime(arg1, &ts));
12753         if (!is_error(ret)) {
12754             ret = host_to_target_timespec(arg2, &ts);
12755         }
12756         return ret;
12757     }
12758 #endif
12759 #ifdef TARGET_NR_clock_gettime64
12760     case TARGET_NR_clock_gettime64:
12761     {
12762         struct timespec ts;
12763         ret = get_errno(clock_gettime(arg1, &ts));
12764         if (!is_error(ret)) {
12765             ret = host_to_target_timespec64(arg2, &ts);
12766         }
12767         return ret;
12768     }
12769 #endif
12770 #ifdef TARGET_NR_clock_getres
12771     case TARGET_NR_clock_getres:
12772     {
12773         struct timespec ts;
12774         ret = get_errno(clock_getres(arg1, &ts));
12775         if (!is_error(ret)) {
12776             host_to_target_timespec(arg2, &ts);
12777         }
12778         return ret;
12779     }
12780 #endif
12781 #ifdef TARGET_NR_clock_getres_time64
12782     case TARGET_NR_clock_getres_time64:
12783     {
12784         struct timespec ts;
12785         ret = get_errno(clock_getres(arg1, &ts));
12786         if (!is_error(ret)) {
12787             host_to_target_timespec64(arg2, &ts);
12788         }
12789         return ret;
12790     }
12791 #endif
12792 #ifdef TARGET_NR_clock_nanosleep
12793     case TARGET_NR_clock_nanosleep:
12794     {
12795         struct timespec ts;
12796         if (target_to_host_timespec(&ts, arg3)) {
12797             return -TARGET_EFAULT;
12798         }
12799         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12800                                              &ts, arg4 ? &ts : NULL));
12801         /*
12802          * if the call is interrupted by a signal handler, it fails
12803          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12804          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12805          */
12806         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12807             host_to_target_timespec(arg4, &ts)) {
12808               return -TARGET_EFAULT;
12809         }
12810 
12811         return ret;
12812     }
12813 #endif
12814 #ifdef TARGET_NR_clock_nanosleep_time64
12815     case TARGET_NR_clock_nanosleep_time64:
12816     {
12817         struct timespec ts;
12818 
12819         if (target_to_host_timespec64(&ts, arg3)) {
12820             return -TARGET_EFAULT;
12821         }
12822 
12823         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12824                                              &ts, arg4 ? &ts : NULL));
12825 
12826         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12827             host_to_target_timespec64(arg4, &ts)) {
12828             return -TARGET_EFAULT;
12829         }
12830         return ret;
12831     }
12832 #endif
12833 
12834 #if defined(TARGET_NR_set_tid_address)
12835     case TARGET_NR_set_tid_address:
12836     {
12837         TaskState *ts = cpu->opaque;
12838         ts->child_tidptr = arg1;
12839         /* do not call host set_tid_address() syscall, instead return tid() */
12840         return get_errno(sys_gettid());
12841     }
12842 #endif
12843 
12844     case TARGET_NR_tkill:
12845         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12846 
12847     case TARGET_NR_tgkill:
12848         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12849                          target_to_host_signal(arg3)));
12850 
12851 #ifdef TARGET_NR_set_robust_list
12852     case TARGET_NR_set_robust_list:
12853     case TARGET_NR_get_robust_list:
12854         /* The ABI for supporting robust futexes has userspace pass
12855          * the kernel a pointer to a linked list which is updated by
12856          * userspace after the syscall; the list is walked by the kernel
12857          * when the thread exits. Since the linked list in QEMU guest
12858          * memory isn't a valid linked list for the host and we have
12859          * no way to reliably intercept the thread-death event, we can't
12860          * support these. Silently return ENOSYS so that guest userspace
12861          * falls back to a non-robust futex implementation (which should
12862          * be OK except in the corner case of the guest crashing while
12863          * holding a mutex that is shared with another process via
12864          * shared memory).
12865          */
12866         return -TARGET_ENOSYS;
12867 #endif
12868 
12869 #if defined(TARGET_NR_utimensat)
12870     case TARGET_NR_utimensat:
12871         {
12872             struct timespec *tsp, ts[2];
12873             if (!arg3) {
12874                 tsp = NULL;
12875             } else {
12876                 if (target_to_host_timespec(ts, arg3)) {
12877                     return -TARGET_EFAULT;
12878                 }
12879                 if (target_to_host_timespec(ts + 1, arg3 +
12880                                             sizeof(struct target_timespec))) {
12881                     return -TARGET_EFAULT;
12882                 }
12883                 tsp = ts;
12884             }
12885             if (!arg2)
12886                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12887             else {
12888                 if (!(p = lock_user_string(arg2))) {
12889                     return -TARGET_EFAULT;
12890                 }
12891                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12892                 unlock_user(p, arg2, 0);
12893             }
12894         }
12895         return ret;
12896 #endif
12897 #ifdef TARGET_NR_utimensat_time64
12898     case TARGET_NR_utimensat_time64:
12899         {
12900             struct timespec *tsp, ts[2];
12901             if (!arg3) {
12902                 tsp = NULL;
12903             } else {
12904                 if (target_to_host_timespec64(ts, arg3)) {
12905                     return -TARGET_EFAULT;
12906                 }
12907                 if (target_to_host_timespec64(ts + 1, arg3 +
12908                                      sizeof(struct target__kernel_timespec))) {
12909                     return -TARGET_EFAULT;
12910                 }
12911                 tsp = ts;
12912             }
12913             if (!arg2)
12914                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12915             else {
12916                 p = lock_user_string(arg2);
12917                 if (!p) {
12918                     return -TARGET_EFAULT;
12919                 }
12920                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12921                 unlock_user(p, arg2, 0);
12922             }
12923         }
12924         return ret;
12925 #endif
12926 #ifdef TARGET_NR_futex
12927     case TARGET_NR_futex:
12928         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12929 #endif
12930 #ifdef TARGET_NR_futex_time64
12931     case TARGET_NR_futex_time64:
12932         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12933 #endif
12934 #ifdef CONFIG_INOTIFY
12935 #if defined(TARGET_NR_inotify_init)
12936     case TARGET_NR_inotify_init:
12937         ret = get_errno(inotify_init());
12938         if (ret >= 0) {
12939             fd_trans_register(ret, &target_inotify_trans);
12940         }
12941         return ret;
12942 #endif
12943 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12944     case TARGET_NR_inotify_init1:
12945         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12946                                           fcntl_flags_tbl)));
12947         if (ret >= 0) {
12948             fd_trans_register(ret, &target_inotify_trans);
12949         }
12950         return ret;
12951 #endif
12952 #if defined(TARGET_NR_inotify_add_watch)
12953     case TARGET_NR_inotify_add_watch:
12954         p = lock_user_string(arg2);
12955         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12956         unlock_user(p, arg2, 0);
12957         return ret;
12958 #endif
12959 #if defined(TARGET_NR_inotify_rm_watch)
12960     case TARGET_NR_inotify_rm_watch:
12961         return get_errno(inotify_rm_watch(arg1, arg2));
12962 #endif
12963 #endif
12964 
12965 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12966     case TARGET_NR_mq_open:
12967         {
12968             struct mq_attr posix_mq_attr;
12969             struct mq_attr *pposix_mq_attr;
12970             int host_flags;
12971 
12972             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12973             pposix_mq_attr = NULL;
12974             if (arg4) {
12975                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12976                     return -TARGET_EFAULT;
12977                 }
12978                 pposix_mq_attr = &posix_mq_attr;
12979             }
12980             p = lock_user_string(arg1 - 1);
12981             if (!p) {
12982                 return -TARGET_EFAULT;
12983             }
12984             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12985             unlock_user (p, arg1, 0);
12986         }
12987         return ret;
12988 
12989     case TARGET_NR_mq_unlink:
12990         p = lock_user_string(arg1 - 1);
12991         if (!p) {
12992             return -TARGET_EFAULT;
12993         }
12994         ret = get_errno(mq_unlink(p));
12995         unlock_user (p, arg1, 0);
12996         return ret;
12997 
12998 #ifdef TARGET_NR_mq_timedsend
12999     case TARGET_NR_mq_timedsend:
13000         {
13001             struct timespec ts;
13002 
13003             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13004             if (arg5 != 0) {
13005                 if (target_to_host_timespec(&ts, arg5)) {
13006                     return -TARGET_EFAULT;
13007                 }
13008                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13009                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13010                     return -TARGET_EFAULT;
13011                 }
13012             } else {
13013                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13014             }
13015             unlock_user (p, arg2, arg3);
13016         }
13017         return ret;
13018 #endif
13019 #ifdef TARGET_NR_mq_timedsend_time64
13020     case TARGET_NR_mq_timedsend_time64:
13021         {
13022             struct timespec ts;
13023 
13024             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13025             if (arg5 != 0) {
13026                 if (target_to_host_timespec64(&ts, arg5)) {
13027                     return -TARGET_EFAULT;
13028                 }
13029                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13030                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13031                     return -TARGET_EFAULT;
13032                 }
13033             } else {
13034                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13035             }
13036             unlock_user(p, arg2, arg3);
13037         }
13038         return ret;
13039 #endif
13040 
13041 #ifdef TARGET_NR_mq_timedreceive
13042     case TARGET_NR_mq_timedreceive:
13043         {
13044             struct timespec ts;
13045             unsigned int prio;
13046 
13047             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13048             if (arg5 != 0) {
13049                 if (target_to_host_timespec(&ts, arg5)) {
13050                     return -TARGET_EFAULT;
13051                 }
13052                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13053                                                      &prio, &ts));
13054                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13055                     return -TARGET_EFAULT;
13056                 }
13057             } else {
13058                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13059                                                      &prio, NULL));
13060             }
13061             unlock_user (p, arg2, arg3);
13062             if (arg4 != 0)
13063                 put_user_u32(prio, arg4);
13064         }
13065         return ret;
13066 #endif
13067 #ifdef TARGET_NR_mq_timedreceive_time64
13068     case TARGET_NR_mq_timedreceive_time64:
13069         {
13070             struct timespec ts;
13071             unsigned int prio;
13072 
13073             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13074             if (arg5 != 0) {
13075                 if (target_to_host_timespec64(&ts, arg5)) {
13076                     return -TARGET_EFAULT;
13077                 }
13078                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13079                                                      &prio, &ts));
13080                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13081                     return -TARGET_EFAULT;
13082                 }
13083             } else {
13084                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13085                                                      &prio, NULL));
13086             }
13087             unlock_user(p, arg2, arg3);
13088             if (arg4 != 0) {
13089                 put_user_u32(prio, arg4);
13090             }
13091         }
13092         return ret;
13093 #endif
13094 
13095     /* Not implemented for now... */
13096 /*     case TARGET_NR_mq_notify: */
13097 /*         break; */
13098 
13099     case TARGET_NR_mq_getsetattr:
13100         {
13101             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13102             ret = 0;
13103             if (arg2 != 0) {
13104                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13105                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13106                                            &posix_mq_attr_out));
13107             } else if (arg3 != 0) {
13108                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13109             }
13110             if (ret == 0 && arg3 != 0) {
13111                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13112             }
13113         }
13114         return ret;
13115 #endif
13116 
13117 #ifdef CONFIG_SPLICE
13118 #ifdef TARGET_NR_tee
13119     case TARGET_NR_tee:
13120         {
13121             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13122         }
13123         return ret;
13124 #endif
13125 #ifdef TARGET_NR_splice
13126     case TARGET_NR_splice:
13127         {
13128             loff_t loff_in, loff_out;
13129             loff_t *ploff_in = NULL, *ploff_out = NULL;
13130             if (arg2) {
13131                 if (get_user_u64(loff_in, arg2)) {
13132                     return -TARGET_EFAULT;
13133                 }
13134                 ploff_in = &loff_in;
13135             }
13136             if (arg4) {
13137                 if (get_user_u64(loff_out, arg4)) {
13138                     return -TARGET_EFAULT;
13139                 }
13140                 ploff_out = &loff_out;
13141             }
13142             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13143             if (arg2) {
13144                 if (put_user_u64(loff_in, arg2)) {
13145                     return -TARGET_EFAULT;
13146                 }
13147             }
13148             if (arg4) {
13149                 if (put_user_u64(loff_out, arg4)) {
13150                     return -TARGET_EFAULT;
13151                 }
13152             }
13153         }
13154         return ret;
13155 #endif
13156 #ifdef TARGET_NR_vmsplice
13157 	case TARGET_NR_vmsplice:
13158         {
13159             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13160             if (vec != NULL) {
13161                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13162                 unlock_iovec(vec, arg2, arg3, 0);
13163             } else {
13164                 ret = -host_to_target_errno(errno);
13165             }
13166         }
13167         return ret;
13168 #endif
13169 #endif /* CONFIG_SPLICE */
13170 #ifdef CONFIG_EVENTFD
13171 #if defined(TARGET_NR_eventfd)
13172     case TARGET_NR_eventfd:
13173         ret = get_errno(eventfd(arg1, 0));
13174         if (ret >= 0) {
13175             fd_trans_register(ret, &target_eventfd_trans);
13176         }
13177         return ret;
13178 #endif
13179 #if defined(TARGET_NR_eventfd2)
13180     case TARGET_NR_eventfd2:
13181     {
13182         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13183         if (arg2 & TARGET_O_NONBLOCK) {
13184             host_flags |= O_NONBLOCK;
13185         }
13186         if (arg2 & TARGET_O_CLOEXEC) {
13187             host_flags |= O_CLOEXEC;
13188         }
13189         ret = get_errno(eventfd(arg1, host_flags));
13190         if (ret >= 0) {
13191             fd_trans_register(ret, &target_eventfd_trans);
13192         }
13193         return ret;
13194     }
13195 #endif
13196 #endif /* CONFIG_EVENTFD  */
13197 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13198     case TARGET_NR_fallocate:
13199 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13200         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13201                                   target_offset64(arg5, arg6)));
13202 #else
13203         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13204 #endif
13205         return ret;
13206 #endif
13207 #if defined(CONFIG_SYNC_FILE_RANGE)
13208 #if defined(TARGET_NR_sync_file_range)
13209     case TARGET_NR_sync_file_range:
13210 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13211 #if defined(TARGET_MIPS)
13212         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13213                                         target_offset64(arg5, arg6), arg7));
13214 #else
13215         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13216                                         target_offset64(arg4, arg5), arg6));
13217 #endif /* !TARGET_MIPS */
13218 #else
13219         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13220 #endif
13221         return ret;
13222 #endif
13223 #if defined(TARGET_NR_sync_file_range2) || \
13224     defined(TARGET_NR_arm_sync_file_range)
13225 #if defined(TARGET_NR_sync_file_range2)
13226     case TARGET_NR_sync_file_range2:
13227 #endif
13228 #if defined(TARGET_NR_arm_sync_file_range)
13229     case TARGET_NR_arm_sync_file_range:
13230 #endif
13231         /* This is like sync_file_range but the arguments are reordered */
13232 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13233         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13234                                         target_offset64(arg5, arg6), arg2));
13235 #else
13236         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13237 #endif
13238         return ret;
13239 #endif
13240 #endif
13241 #if defined(TARGET_NR_signalfd4)
13242     case TARGET_NR_signalfd4:
13243         return do_signalfd4(arg1, arg2, arg4);
13244 #endif
13245 #if defined(TARGET_NR_signalfd)
13246     case TARGET_NR_signalfd:
13247         return do_signalfd4(arg1, arg2, 0);
13248 #endif
13249 #if defined(CONFIG_EPOLL)
13250 #if defined(TARGET_NR_epoll_create)
13251     case TARGET_NR_epoll_create:
13252         return get_errno(epoll_create(arg1));
13253 #endif
13254 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13255     case TARGET_NR_epoll_create1:
13256         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13257 #endif
13258 #if defined(TARGET_NR_epoll_ctl)
13259     case TARGET_NR_epoll_ctl:
13260     {
13261         struct epoll_event ep;
13262         struct epoll_event *epp = 0;
13263         if (arg4) {
13264             if (arg2 != EPOLL_CTL_DEL) {
13265                 struct target_epoll_event *target_ep;
13266                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13267                     return -TARGET_EFAULT;
13268                 }
13269                 ep.events = tswap32(target_ep->events);
13270                 /*
13271                  * The epoll_data_t union is just opaque data to the kernel,
13272                  * so we transfer all 64 bits across and need not worry what
13273                  * actual data type it is.
13274                  */
13275                 ep.data.u64 = tswap64(target_ep->data.u64);
13276                 unlock_user_struct(target_ep, arg4, 0);
13277             }
13278             /*
13279              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13280              * non-null pointer, even though this argument is ignored.
13281              *
13282              */
13283             epp = &ep;
13284         }
13285         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13286     }
13287 #endif
13288 
13289 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13290 #if defined(TARGET_NR_epoll_wait)
13291     case TARGET_NR_epoll_wait:
13292 #endif
13293 #if defined(TARGET_NR_epoll_pwait)
13294     case TARGET_NR_epoll_pwait:
13295 #endif
13296     {
13297         struct target_epoll_event *target_ep;
13298         struct epoll_event *ep;
13299         int epfd = arg1;
13300         int maxevents = arg3;
13301         int timeout = arg4;
13302 
13303         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13304             return -TARGET_EINVAL;
13305         }
13306 
13307         target_ep = lock_user(VERIFY_WRITE, arg2,
13308                               maxevents * sizeof(struct target_epoll_event), 1);
13309         if (!target_ep) {
13310             return -TARGET_EFAULT;
13311         }
13312 
13313         ep = g_try_new(struct epoll_event, maxevents);
13314         if (!ep) {
13315             unlock_user(target_ep, arg2, 0);
13316             return -TARGET_ENOMEM;
13317         }
13318 
13319         switch (num) {
13320 #if defined(TARGET_NR_epoll_pwait)
13321         case TARGET_NR_epoll_pwait:
13322         {
13323             sigset_t *set = NULL;
13324 
13325             if (arg5) {
13326                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13327                 if (ret != 0) {
13328                     break;
13329                 }
13330             }
13331 
13332             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13333                                              set, SIGSET_T_SIZE));
13334 
13335             if (set) {
13336                 finish_sigsuspend_mask(ret);
13337             }
13338             break;
13339         }
13340 #endif
13341 #if defined(TARGET_NR_epoll_wait)
13342         case TARGET_NR_epoll_wait:
13343             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13344                                              NULL, 0));
13345             break;
13346 #endif
13347         default:
13348             ret = -TARGET_ENOSYS;
13349         }
13350         if (!is_error(ret)) {
13351             int i;
13352             for (i = 0; i < ret; i++) {
13353                 target_ep[i].events = tswap32(ep[i].events);
13354                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13355             }
13356             unlock_user(target_ep, arg2,
13357                         ret * sizeof(struct target_epoll_event));
13358         } else {
13359             unlock_user(target_ep, arg2, 0);
13360         }
13361         g_free(ep);
13362         return ret;
13363     }
13364 #endif
13365 #endif
13366 #ifdef TARGET_NR_prlimit64
13367     case TARGET_NR_prlimit64:
13368     {
13369         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13370         struct target_rlimit64 *target_rnew, *target_rold;
13371         struct host_rlimit64 rnew, rold, *rnewp = 0;
13372         int resource = target_to_host_resource(arg2);
13373 
13374         if (arg3 && (resource != RLIMIT_AS &&
13375                      resource != RLIMIT_DATA &&
13376                      resource != RLIMIT_STACK)) {
13377             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13378                 return -TARGET_EFAULT;
13379             }
13380             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13381             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13382             unlock_user_struct(target_rnew, arg3, 0);
13383             rnewp = &rnew;
13384         }
13385 
13386         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13387         if (!is_error(ret) && arg4) {
13388             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13389                 return -TARGET_EFAULT;
13390             }
13391             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13392             __put_user(rold.rlim_max, &target_rold->rlim_max);
13393             unlock_user_struct(target_rold, arg4, 1);
13394         }
13395         return ret;
13396     }
13397 #endif
13398 #ifdef TARGET_NR_gethostname
13399     case TARGET_NR_gethostname:
13400     {
13401         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13402         if (name) {
13403             ret = get_errno(gethostname(name, arg2));
13404             unlock_user(name, arg1, arg2);
13405         } else {
13406             ret = -TARGET_EFAULT;
13407         }
13408         return ret;
13409     }
13410 #endif
13411 #ifdef TARGET_NR_atomic_cmpxchg_32
13412     case TARGET_NR_atomic_cmpxchg_32:
13413     {
13414         /* should use start_exclusive from main.c */
13415         abi_ulong mem_value;
13416         if (get_user_u32(mem_value, arg6)) {
13417             target_siginfo_t info;
13418             info.si_signo = SIGSEGV;
13419             info.si_errno = 0;
13420             info.si_code = TARGET_SEGV_MAPERR;
13421             info._sifields._sigfault._addr = arg6;
13422             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13423             ret = 0xdeadbeef;
13424 
13425         }
13426         if (mem_value == arg2)
13427             put_user_u32(arg1, arg6);
13428         return mem_value;
13429     }
13430 #endif
13431 #ifdef TARGET_NR_atomic_barrier
13432     case TARGET_NR_atomic_barrier:
13433         /* Like the kernel implementation and the
13434            qemu arm barrier, no-op this? */
13435         return 0;
13436 #endif
13437 
13438 #ifdef TARGET_NR_timer_create
13439     case TARGET_NR_timer_create:
13440     {
13441         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13442 
13443         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13444 
13445         int clkid = arg1;
13446         int timer_index = next_free_host_timer();
13447 
13448         if (timer_index < 0) {
13449             ret = -TARGET_EAGAIN;
13450         } else {
13451             timer_t *phtimer = g_posix_timers  + timer_index;
13452 
13453             if (arg2) {
13454                 phost_sevp = &host_sevp;
13455                 ret = target_to_host_sigevent(phost_sevp, arg2);
13456                 if (ret != 0) {
13457                     free_host_timer_slot(timer_index);
13458                     return ret;
13459                 }
13460             }
13461 
13462             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13463             if (ret) {
13464                 free_host_timer_slot(timer_index);
13465             } else {
13466                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13467                     timer_delete(*phtimer);
13468                     free_host_timer_slot(timer_index);
13469                     return -TARGET_EFAULT;
13470                 }
13471             }
13472         }
13473         return ret;
13474     }
13475 #endif
13476 
13477 #ifdef TARGET_NR_timer_settime
13478     case TARGET_NR_timer_settime:
13479     {
13480         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13481          * struct itimerspec * old_value */
13482         target_timer_t timerid = get_timer_id(arg1);
13483 
13484         if (timerid < 0) {
13485             ret = timerid;
13486         } else if (arg3 == 0) {
13487             ret = -TARGET_EINVAL;
13488         } else {
13489             timer_t htimer = g_posix_timers[timerid];
13490             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13491 
13492             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13493                 return -TARGET_EFAULT;
13494             }
13495             ret = get_errno(
13496                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13497             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13498                 return -TARGET_EFAULT;
13499             }
13500         }
13501         return ret;
13502     }
13503 #endif
13504 
13505 #ifdef TARGET_NR_timer_settime64
13506     case TARGET_NR_timer_settime64:
13507     {
13508         target_timer_t timerid = get_timer_id(arg1);
13509 
13510         if (timerid < 0) {
13511             ret = timerid;
13512         } else if (arg3 == 0) {
13513             ret = -TARGET_EINVAL;
13514         } else {
13515             timer_t htimer = g_posix_timers[timerid];
13516             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13517 
13518             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13519                 return -TARGET_EFAULT;
13520             }
13521             ret = get_errno(
13522                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13523             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13524                 return -TARGET_EFAULT;
13525             }
13526         }
13527         return ret;
13528     }
13529 #endif
13530 
13531 #ifdef TARGET_NR_timer_gettime
13532     case TARGET_NR_timer_gettime:
13533     {
13534         /* args: timer_t timerid, struct itimerspec *curr_value */
13535         target_timer_t timerid = get_timer_id(arg1);
13536 
13537         if (timerid < 0) {
13538             ret = timerid;
13539         } else if (!arg2) {
13540             ret = -TARGET_EFAULT;
13541         } else {
13542             timer_t htimer = g_posix_timers[timerid];
13543             struct itimerspec hspec;
13544             ret = get_errno(timer_gettime(htimer, &hspec));
13545 
13546             if (host_to_target_itimerspec(arg2, &hspec)) {
13547                 ret = -TARGET_EFAULT;
13548             }
13549         }
13550         return ret;
13551     }
13552 #endif
13553 
13554 #ifdef TARGET_NR_timer_gettime64
13555     case TARGET_NR_timer_gettime64:
13556     {
13557         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13558         target_timer_t timerid = get_timer_id(arg1);
13559 
13560         if (timerid < 0) {
13561             ret = timerid;
13562         } else if (!arg2) {
13563             ret = -TARGET_EFAULT;
13564         } else {
13565             timer_t htimer = g_posix_timers[timerid];
13566             struct itimerspec hspec;
13567             ret = get_errno(timer_gettime(htimer, &hspec));
13568 
13569             if (host_to_target_itimerspec64(arg2, &hspec)) {
13570                 ret = -TARGET_EFAULT;
13571             }
13572         }
13573         return ret;
13574     }
13575 #endif
13576 
13577 #ifdef TARGET_NR_timer_getoverrun
13578     case TARGET_NR_timer_getoverrun:
13579     {
13580         /* args: timer_t timerid */
13581         target_timer_t timerid = get_timer_id(arg1);
13582 
13583         if (timerid < 0) {
13584             ret = timerid;
13585         } else {
13586             timer_t htimer = g_posix_timers[timerid];
13587             ret = get_errno(timer_getoverrun(htimer));
13588         }
13589         return ret;
13590     }
13591 #endif
13592 
13593 #ifdef TARGET_NR_timer_delete
13594     case TARGET_NR_timer_delete:
13595     {
13596         /* args: timer_t timerid */
13597         target_timer_t timerid = get_timer_id(arg1);
13598 
13599         if (timerid < 0) {
13600             ret = timerid;
13601         } else {
13602             timer_t htimer = g_posix_timers[timerid];
13603             ret = get_errno(timer_delete(htimer));
13604             free_host_timer_slot(timerid);
13605         }
13606         return ret;
13607     }
13608 #endif
13609 
13610 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13611     case TARGET_NR_timerfd_create:
13612         ret = get_errno(timerfd_create(arg1,
13613                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13614         if (ret >= 0) {
13615             fd_trans_register(ret, &target_timerfd_trans);
13616         }
13617         return ret;
13618 #endif
13619 
13620 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13621     case TARGET_NR_timerfd_gettime:
13622         {
13623             struct itimerspec its_curr;
13624 
13625             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13626 
13627             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13628                 return -TARGET_EFAULT;
13629             }
13630         }
13631         return ret;
13632 #endif
13633 
13634 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13635     case TARGET_NR_timerfd_gettime64:
13636         {
13637             struct itimerspec its_curr;
13638 
13639             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13640 
13641             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13642                 return -TARGET_EFAULT;
13643             }
13644         }
13645         return ret;
13646 #endif
13647 
13648 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13649     case TARGET_NR_timerfd_settime:
13650         {
13651             struct itimerspec its_new, its_old, *p_new;
13652 
13653             if (arg3) {
13654                 if (target_to_host_itimerspec(&its_new, arg3)) {
13655                     return -TARGET_EFAULT;
13656                 }
13657                 p_new = &its_new;
13658             } else {
13659                 p_new = NULL;
13660             }
13661 
13662             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13663 
13664             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13665                 return -TARGET_EFAULT;
13666             }
13667         }
13668         return ret;
13669 #endif
13670 
13671 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13672     case TARGET_NR_timerfd_settime64:
13673         {
13674             struct itimerspec its_new, its_old, *p_new;
13675 
13676             if (arg3) {
13677                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13678                     return -TARGET_EFAULT;
13679                 }
13680                 p_new = &its_new;
13681             } else {
13682                 p_new = NULL;
13683             }
13684 
13685             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13686 
13687             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13688                 return -TARGET_EFAULT;
13689             }
13690         }
13691         return ret;
13692 #endif
13693 
13694 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13695     case TARGET_NR_ioprio_get:
13696         return get_errno(ioprio_get(arg1, arg2));
13697 #endif
13698 
13699 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13700     case TARGET_NR_ioprio_set:
13701         return get_errno(ioprio_set(arg1, arg2, arg3));
13702 #endif
13703 
13704 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13705     case TARGET_NR_setns:
13706         return get_errno(setns(arg1, arg2));
13707 #endif
13708 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13709     case TARGET_NR_unshare:
13710         return get_errno(unshare(arg1));
13711 #endif
13712 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13713     case TARGET_NR_kcmp:
13714         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13715 #endif
13716 #ifdef TARGET_NR_swapcontext
13717     case TARGET_NR_swapcontext:
13718         /* PowerPC specific.  */
13719         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13720 #endif
13721 #ifdef TARGET_NR_memfd_create
13722     case TARGET_NR_memfd_create:
13723         p = lock_user_string(arg1);
13724         if (!p) {
13725             return -TARGET_EFAULT;
13726         }
13727         ret = get_errno(memfd_create(p, arg2));
13728         fd_trans_unregister(ret);
13729         unlock_user(p, arg1, 0);
13730         return ret;
13731 #endif
13732 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13733     case TARGET_NR_membarrier:
13734         return get_errno(membarrier(arg1, arg2));
13735 #endif
13736 
13737 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13738     case TARGET_NR_copy_file_range:
13739         {
13740             loff_t inoff, outoff;
13741             loff_t *pinoff = NULL, *poutoff = NULL;
13742 
13743             if (arg2) {
13744                 if (get_user_u64(inoff, arg2)) {
13745                     return -TARGET_EFAULT;
13746                 }
13747                 pinoff = &inoff;
13748             }
13749             if (arg4) {
13750                 if (get_user_u64(outoff, arg4)) {
13751                     return -TARGET_EFAULT;
13752                 }
13753                 poutoff = &outoff;
13754             }
13755             /* Do not sign-extend the count parameter. */
13756             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13757                                                  (abi_ulong)arg5, arg6));
13758             if (!is_error(ret) && ret > 0) {
13759                 if (arg2) {
13760                     if (put_user_u64(inoff, arg2)) {
13761                         return -TARGET_EFAULT;
13762                     }
13763                 }
13764                 if (arg4) {
13765                     if (put_user_u64(outoff, arg4)) {
13766                         return -TARGET_EFAULT;
13767                     }
13768                 }
13769             }
13770         }
13771         return ret;
13772 #endif
13773 
13774 #if defined(TARGET_NR_pivot_root)
13775     case TARGET_NR_pivot_root:
13776         {
13777             void *p2;
13778             p = lock_user_string(arg1); /* new_root */
13779             p2 = lock_user_string(arg2); /* put_old */
13780             if (!p || !p2) {
13781                 ret = -TARGET_EFAULT;
13782             } else {
13783                 ret = get_errno(pivot_root(p, p2));
13784             }
13785             unlock_user(p2, arg2, 0);
13786             unlock_user(p, arg1, 0);
13787         }
13788         return ret;
13789 #endif
13790 
13791 #if defined(TARGET_NR_riscv_hwprobe)
13792     case TARGET_NR_riscv_hwprobe:
13793         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13794 #endif
13795 
13796     default:
13797         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13798         return -TARGET_ENOSYS;
13799     }
13800     return ret;
13801 }
13802 
13803 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13804                     abi_long arg2, abi_long arg3, abi_long arg4,
13805                     abi_long arg5, abi_long arg6, abi_long arg7,
13806                     abi_long arg8)
13807 {
13808     CPUState *cpu = env_cpu(cpu_env);
13809     abi_long ret;
13810 
13811 #ifdef DEBUG_ERESTARTSYS
13812     /* Debug-only code for exercising the syscall-restart code paths
13813      * in the per-architecture cpu main loops: restart every syscall
13814      * the guest makes once before letting it through.
13815      */
13816     {
13817         static bool flag;
13818         flag = !flag;
13819         if (flag) {
13820             return -QEMU_ERESTARTSYS;
13821         }
13822     }
13823 #endif
13824 
13825     record_syscall_start(cpu, num, arg1,
13826                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13827 
13828     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13829         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13830     }
13831 
13832     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13833                       arg5, arg6, arg7, arg8);
13834 
13835     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13836         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13837                           arg3, arg4, arg5, arg6);
13838     }
13839 
13840     record_syscall_return(cpu, num, ret);
13841     return ret;
13842 }
13843