xref: /qemu/linux-user/syscall.c (revision cb88b7c2)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143 #include "cpu_loop-common.h"
144 
145 #ifndef CLONE_IO
146 #define CLONE_IO                0x80000000      /* Clone io context */
147 #endif
148 
149 /* We can't directly call the host clone syscall, because this will
150  * badly confuse libc (breaking mutexes, for example). So we must
151  * divide clone flags into:
152  *  * flag combinations that look like pthread_create()
153  *  * flag combinations that look like fork()
154  *  * flags we can implement within QEMU itself
155  *  * flags we can't support and will return an error for
156  */
157 /* For thread creation, all these flags must be present; for
158  * fork, none must be present.
159  */
160 #define CLONE_THREAD_FLAGS                              \
161     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
162      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
163 
164 /* These flags are ignored:
165  * CLONE_DETACHED is now ignored by the kernel;
166  * CLONE_IO is just an optimisation hint to the I/O scheduler
167  */
168 #define CLONE_IGNORED_FLAGS                     \
169     (CLONE_DETACHED | CLONE_IO)
170 
171 /* Flags for fork which we can implement within QEMU itself */
172 #define CLONE_OPTIONAL_FORK_FLAGS               \
173     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
174      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
175 
176 /* Flags for thread creation which we can implement within QEMU itself */
177 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
178     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
179      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
180 
181 #define CLONE_INVALID_FORK_FLAGS                                        \
182     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
183 
184 #define CLONE_INVALID_THREAD_FLAGS                                      \
185     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
186        CLONE_IGNORED_FLAGS))
187 
188 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
189  * have almost all been allocated. We cannot support any of
190  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
191  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
192  * The checks against the invalid thread masks above will catch these.
193  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
194  */
195 
196 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
197  * once. This exercises the codepaths for restart.
198  */
199 //#define DEBUG_ERESTARTSYS
200 
201 //#include <linux/msdos_fs.h>
202 #define VFAT_IOCTL_READDIR_BOTH \
203     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
204 #define VFAT_IOCTL_READDIR_SHORT \
205     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
206 
207 #undef _syscall0
208 #undef _syscall1
209 #undef _syscall2
210 #undef _syscall3
211 #undef _syscall4
212 #undef _syscall5
213 #undef _syscall6
214 
215 #define _syscall0(type,name)		\
216 static type name (void)			\
217 {					\
218 	return syscall(__NR_##name);	\
219 }
220 
221 #define _syscall1(type,name,type1,arg1)		\
222 static type name (type1 arg1)			\
223 {						\
224 	return syscall(__NR_##name, arg1);	\
225 }
226 
227 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
228 static type name (type1 arg1,type2 arg2)		\
229 {							\
230 	return syscall(__NR_##name, arg1, arg2);	\
231 }
232 
233 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
234 static type name (type1 arg1,type2 arg2,type3 arg3)		\
235 {								\
236 	return syscall(__NR_##name, arg1, arg2, arg3);		\
237 }
238 
239 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
241 {										\
242 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
243 }
244 
245 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
246 		  type5,arg5)							\
247 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
248 {										\
249 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
250 }
251 
252 
253 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
254 		  type5,arg5,type6,arg6)					\
255 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
256                   type6 arg6)							\
257 {										\
258 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
259 }
260 
261 
262 #define __NR_sys_uname __NR_uname
263 #define __NR_sys_getcwd1 __NR_getcwd
264 #define __NR_sys_getdents __NR_getdents
265 #define __NR_sys_getdents64 __NR_getdents64
266 #define __NR_sys_getpriority __NR_getpriority
267 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
268 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
269 #define __NR_sys_syslog __NR_syslog
270 #if defined(__NR_futex)
271 # define __NR_sys_futex __NR_futex
272 #endif
273 #if defined(__NR_futex_time64)
274 # define __NR_sys_futex_time64 __NR_futex_time64
275 #endif
276 #define __NR_sys_statx __NR_statx
277 
278 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
279 #define __NR__llseek __NR_lseek
280 #endif
281 
282 /* Newer kernel ports have llseek() instead of _llseek() */
283 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
284 #define TARGET_NR__llseek TARGET_NR_llseek
285 #endif
286 
287 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
288 #ifndef TARGET_O_NONBLOCK_MASK
289 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
290 #endif
291 
292 #define __NR_sys_gettid __NR_gettid
293 _syscall0(int, sys_gettid)
294 
295 /* For the 64-bit guest on 32-bit host case we must emulate
296  * getdents using getdents64, because otherwise the host
297  * might hand us back more dirent records than we can fit
298  * into the guest buffer after structure format conversion.
299  * Otherwise we emulate getdents with getdents if the host has it.
300  */
301 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
302 #define EMULATE_GETDENTS_WITH_GETDENTS
303 #endif
304 
305 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
306 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
307 #endif
308 #if (defined(TARGET_NR_getdents) && \
309       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
310     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
311 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
312 #endif
313 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
314 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
315           loff_t *, res, uint, wh);
316 #endif
317 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
318 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
319           siginfo_t *, uinfo)
320 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
321 #ifdef __NR_exit_group
322 _syscall1(int,exit_group,int,error_code)
323 #endif
324 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
325 #define __NR_sys_close_range __NR_close_range
326 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
327 #ifndef CLOSE_RANGE_CLOEXEC
328 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
329 #endif
330 #endif
331 #if defined(__NR_futex)
332 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
333           const struct timespec *,timeout,int *,uaddr2,int,val3)
334 #endif
335 #if defined(__NR_futex_time64)
336 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
337           const struct timespec *,timeout,int *,uaddr2,int,val3)
338 #endif
339 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
340 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
341 #endif
342 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
343 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
344                              unsigned int, flags);
345 #endif
346 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
347 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
348 #endif
349 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
350 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
351           unsigned long *, user_mask_ptr);
352 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
353 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
354           unsigned long *, user_mask_ptr);
355 /* sched_attr is not defined in glibc */
356 struct sched_attr {
357     uint32_t size;
358     uint32_t sched_policy;
359     uint64_t sched_flags;
360     int32_t sched_nice;
361     uint32_t sched_priority;
362     uint64_t sched_runtime;
363     uint64_t sched_deadline;
364     uint64_t sched_period;
365     uint32_t sched_util_min;
366     uint32_t sched_util_max;
367 };
368 #define __NR_sys_sched_getattr __NR_sched_getattr
369 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
370           unsigned int, size, unsigned int, flags);
371 #define __NR_sys_sched_setattr __NR_sched_setattr
372 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
373           unsigned int, flags);
374 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
375 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
376 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
377 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
378           const struct sched_param *, param);
379 #define __NR_sys_sched_getparam __NR_sched_getparam
380 _syscall2(int, sys_sched_getparam, pid_t, pid,
381           struct sched_param *, param);
382 #define __NR_sys_sched_setparam __NR_sched_setparam
383 _syscall2(int, sys_sched_setparam, pid_t, pid,
384           const struct sched_param *, param);
385 #define __NR_sys_getcpu __NR_getcpu
386 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
387 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
388           void *, arg);
389 _syscall2(int, capget, struct __user_cap_header_struct *, header,
390           struct __user_cap_data_struct *, data);
391 _syscall2(int, capset, struct __user_cap_header_struct *, header,
392           struct __user_cap_data_struct *, data);
393 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
394 _syscall2(int, ioprio_get, int, which, int, who)
395 #endif
396 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
397 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
398 #endif
399 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
400 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
401 #endif
402 
403 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
404 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
405           unsigned long, idx1, unsigned long, idx2)
406 #endif
407 
408 /*
409  * It is assumed that struct statx is architecture independent.
410  */
411 #if defined(TARGET_NR_statx) && defined(__NR_statx)
412 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
413           unsigned int, mask, struct target_statx *, statxbuf)
414 #endif
415 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
416 _syscall2(int, membarrier, int, cmd, int, flags)
417 #endif
418 
419 static const bitmask_transtbl fcntl_flags_tbl[] = {
420   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
421   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
422   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
423   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
424   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
425   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
426   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
427   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
428   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
429   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
430   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
431   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
432   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
433 #if defined(O_DIRECT)
434   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
435 #endif
436 #if defined(O_NOATIME)
437   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
438 #endif
439 #if defined(O_CLOEXEC)
440   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
441 #endif
442 #if defined(O_PATH)
443   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
444 #endif
445 #if defined(O_TMPFILE)
446   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
447 #endif
448   /* Don't terminate the list prematurely on 64-bit host+guest.  */
449 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
450   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
451 #endif
452   { 0, 0, 0, 0 }
453 };
454 
455 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
456 
457 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
458 #if defined(__NR_utimensat)
459 #define __NR_sys_utimensat __NR_utimensat
460 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
461           const struct timespec *,tsp,int,flags)
462 #else
463 static int sys_utimensat(int dirfd, const char *pathname,
464                          const struct timespec times[2], int flags)
465 {
466     errno = ENOSYS;
467     return -1;
468 }
469 #endif
470 #endif /* TARGET_NR_utimensat */
471 
472 #ifdef TARGET_NR_renameat2
473 #if defined(__NR_renameat2)
474 #define __NR_sys_renameat2 __NR_renameat2
475 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
476           const char *, new, unsigned int, flags)
477 #else
478 static int sys_renameat2(int oldfd, const char *old,
479                          int newfd, const char *new, int flags)
480 {
481     if (flags == 0) {
482         return renameat(oldfd, old, newfd, new);
483     }
484     errno = ENOSYS;
485     return -1;
486 }
487 #endif
488 #endif /* TARGET_NR_renameat2 */
489 
490 #ifdef CONFIG_INOTIFY
491 #include <sys/inotify.h>
492 #else
493 /* Userspace can usually survive runtime without inotify */
494 #undef TARGET_NR_inotify_init
495 #undef TARGET_NR_inotify_init1
496 #undef TARGET_NR_inotify_add_watch
497 #undef TARGET_NR_inotify_rm_watch
498 #endif /* CONFIG_INOTIFY  */
499 
500 #if defined(TARGET_NR_prlimit64)
501 #ifndef __NR_prlimit64
502 # define __NR_prlimit64 -1
503 #endif
504 #define __NR_sys_prlimit64 __NR_prlimit64
505 /* The glibc rlimit structure may not be that used by the underlying syscall */
506 struct host_rlimit64 {
507     uint64_t rlim_cur;
508     uint64_t rlim_max;
509 };
510 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
511           const struct host_rlimit64 *, new_limit,
512           struct host_rlimit64 *, old_limit)
513 #endif
514 
515 
516 #if defined(TARGET_NR_timer_create)
517 /* Maximum of 32 active POSIX timers allowed at any one time. */
518 #define GUEST_TIMER_MAX 32
519 static timer_t g_posix_timers[GUEST_TIMER_MAX];
520 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
521 
522 static inline int next_free_host_timer(void)
523 {
524     int k;
525     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
526         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
527             return k;
528         }
529     }
530     return -1;
531 }
532 
533 static inline void free_host_timer_slot(int id)
534 {
535     qatomic_store_release(g_posix_timer_allocated + id, 0);
536 }
537 #endif
538 
539 static inline int host_to_target_errno(int host_errno)
540 {
541     switch (host_errno) {
542 #define E(X)  case X: return TARGET_##X;
543 #include "errnos.c.inc"
544 #undef E
545     default:
546         return host_errno;
547     }
548 }
549 
550 static inline int target_to_host_errno(int target_errno)
551 {
552     switch (target_errno) {
553 #define E(X)  case TARGET_##X: return X;
554 #include "errnos.c.inc"
555 #undef E
556     default:
557         return target_errno;
558     }
559 }
560 
561 abi_long get_errno(abi_long ret)
562 {
563     if (ret == -1)
564         return -host_to_target_errno(errno);
565     else
566         return ret;
567 }
568 
569 const char *target_strerror(int err)
570 {
571     if (err == QEMU_ERESTARTSYS) {
572         return "To be restarted";
573     }
574     if (err == QEMU_ESIGRETURN) {
575         return "Successful exit from sigreturn";
576     }
577 
578     return strerror(target_to_host_errno(err));
579 }
580 
581 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
582 {
583     int i;
584     uint8_t b;
585     if (usize <= ksize) {
586         return 1;
587     }
588     for (i = ksize; i < usize; i++) {
589         if (get_user_u8(b, addr + i)) {
590             return -TARGET_EFAULT;
591         }
592         if (b != 0) {
593             return 0;
594         }
595     }
596     return 1;
597 }
598 
599 #define safe_syscall0(type, name) \
600 static type safe_##name(void) \
601 { \
602     return safe_syscall(__NR_##name); \
603 }
604 
605 #define safe_syscall1(type, name, type1, arg1) \
606 static type safe_##name(type1 arg1) \
607 { \
608     return safe_syscall(__NR_##name, arg1); \
609 }
610 
611 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
612 static type safe_##name(type1 arg1, type2 arg2) \
613 { \
614     return safe_syscall(__NR_##name, arg1, arg2); \
615 }
616 
617 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
618 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
619 { \
620     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
621 }
622 
623 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
624     type4, arg4) \
625 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
626 { \
627     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
628 }
629 
630 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
631     type4, arg4, type5, arg5) \
632 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
633     type5 arg5) \
634 { \
635     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
636 }
637 
638 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
639     type4, arg4, type5, arg5, type6, arg6) \
640 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
641     type5 arg5, type6 arg6) \
642 { \
643     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
644 }
645 
646 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
647 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
648 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
649               int, flags, mode_t, mode)
650 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
651 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
652               struct rusage *, rusage)
653 #endif
654 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
655               int, options, struct rusage *, rusage)
656 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
657               char **, argv, char **, envp, int, flags)
658 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
659     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
660 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
661               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
662 #endif
663 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
664 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
665               struct timespec *, tsp, const sigset_t *, sigmask,
666               size_t, sigsetsize)
667 #endif
668 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
669               int, maxevents, int, timeout, const sigset_t *, sigmask,
670               size_t, sigsetsize)
671 #if defined(__NR_futex)
672 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
673               const struct timespec *,timeout,int *,uaddr2,int,val3)
674 #endif
675 #if defined(__NR_futex_time64)
676 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
677               const struct timespec *,timeout,int *,uaddr2,int,val3)
678 #endif
679 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
680 safe_syscall2(int, kill, pid_t, pid, int, sig)
681 safe_syscall2(int, tkill, int, tid, int, sig)
682 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
683 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
684 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
685 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
686               unsigned long, pos_l, unsigned long, pos_h)
687 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
688               unsigned long, pos_l, unsigned long, pos_h)
689 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
690               socklen_t, addrlen)
691 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
692               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
693 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
694               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
695 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
696 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
697 safe_syscall2(int, flock, int, fd, int, operation)
698 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
699 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
700               const struct timespec *, uts, size_t, sigsetsize)
701 #endif
702 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
703               int, flags)
704 #if defined(TARGET_NR_nanosleep)
705 safe_syscall2(int, nanosleep, const struct timespec *, req,
706               struct timespec *, rem)
707 #endif
708 #if defined(TARGET_NR_clock_nanosleep) || \
709     defined(TARGET_NR_clock_nanosleep_time64)
710 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
711               const struct timespec *, req, struct timespec *, rem)
712 #endif
713 #ifdef __NR_ipc
714 #ifdef __s390x__
715 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
716               void *, ptr)
717 #else
718 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
719               void *, ptr, long, fifth)
720 #endif
721 #endif
722 #ifdef __NR_msgsnd
723 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
724               int, flags)
725 #endif
726 #ifdef __NR_msgrcv
727 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
728               long, msgtype, int, flags)
729 #endif
730 #ifdef __NR_semtimedop
731 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
732               unsigned, nsops, const struct timespec *, timeout)
733 #endif
734 #if defined(TARGET_NR_mq_timedsend) || \
735     defined(TARGET_NR_mq_timedsend_time64)
736 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
737               size_t, len, unsigned, prio, const struct timespec *, timeout)
738 #endif
739 #if defined(TARGET_NR_mq_timedreceive) || \
740     defined(TARGET_NR_mq_timedreceive_time64)
741 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
742               size_t, len, unsigned *, prio, const struct timespec *, timeout)
743 #endif
744 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
745 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
746               int, outfd, loff_t *, poutoff, size_t, length,
747               unsigned int, flags)
748 #endif
749 
750 /* We do ioctl like this rather than via safe_syscall3 to preserve the
751  * "third argument might be integer or pointer or not present" behaviour of
752  * the libc function.
753  */
754 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
755 /* Similarly for fcntl. Note that callers must always:
756  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
757  *  use the flock64 struct rather than unsuffixed flock
758  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
759  */
760 #ifdef __NR_fcntl64
761 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
762 #else
763 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
764 #endif
765 
766 static inline int host_to_target_sock_type(int host_type)
767 {
768     int target_type;
769 
770     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
771     case SOCK_DGRAM:
772         target_type = TARGET_SOCK_DGRAM;
773         break;
774     case SOCK_STREAM:
775         target_type = TARGET_SOCK_STREAM;
776         break;
777     default:
778         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
779         break;
780     }
781 
782 #if defined(SOCK_CLOEXEC)
783     if (host_type & SOCK_CLOEXEC) {
784         target_type |= TARGET_SOCK_CLOEXEC;
785     }
786 #endif
787 
788 #if defined(SOCK_NONBLOCK)
789     if (host_type & SOCK_NONBLOCK) {
790         target_type |= TARGET_SOCK_NONBLOCK;
791     }
792 #endif
793 
794     return target_type;
795 }
796 
797 static abi_ulong target_brk;
798 static abi_ulong target_original_brk;
799 static abi_ulong brk_page;
800 
801 void target_set_brk(abi_ulong new_brk)
802 {
803     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
804     brk_page = HOST_PAGE_ALIGN(target_brk);
805 }
806 
807 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
808 #define DEBUGF_BRK(message, args...)
809 
810 /* do_brk() must return target values and target errnos. */
811 abi_long do_brk(abi_ulong new_brk)
812 {
813     abi_long mapped_addr;
814     abi_ulong new_alloc_size;
815 
816     /* brk pointers are always untagged */
817 
818     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
819 
820     if (!new_brk) {
821         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
822         return target_brk;
823     }
824     if (new_brk < target_original_brk) {
825         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
826                    target_brk);
827         return target_brk;
828     }
829 
830     /* If the new brk is less than the highest page reserved to the
831      * target heap allocation, set it and we're almost done...  */
832     if (new_brk <= brk_page) {
833         /* Heap contents are initialized to zero, as for anonymous
834          * mapped pages.  */
835         if (new_brk > target_brk) {
836             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
837         }
838 	target_brk = new_brk;
839         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
840 	return target_brk;
841     }
842 
843     /* We need to allocate more memory after the brk... Note that
844      * we don't use MAP_FIXED because that will map over the top of
845      * any existing mapping (like the one with the host libc or qemu
846      * itself); instead we treat "mapped but at wrong address" as
847      * a failure and unmap again.
848      */
849     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
850     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
851                                         PROT_READ|PROT_WRITE,
852                                         MAP_ANON|MAP_PRIVATE, 0, 0));
853 
854     if (mapped_addr == brk_page) {
855         /* Heap contents are initialized to zero, as for anonymous
856          * mapped pages.  Technically the new pages are already
857          * initialized to zero since they *are* anonymous mapped
858          * pages, however we have to take care with the contents that
859          * come from the remaining part of the previous page: it may
860          * contains garbage data due to a previous heap usage (grown
861          * then shrunken).  */
862         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
863 
864         target_brk = new_brk;
865         brk_page = HOST_PAGE_ALIGN(target_brk);
866         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
867             target_brk);
868         return target_brk;
869     } else if (mapped_addr != -1) {
870         /* Mapped but at wrong address, meaning there wasn't actually
871          * enough space for this brk.
872          */
873         target_munmap(mapped_addr, new_alloc_size);
874         mapped_addr = -1;
875         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
876     }
877     else {
878         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
879     }
880 
881 #if defined(TARGET_ALPHA)
882     /* We (partially) emulate OSF/1 on Alpha, which requires we
883        return a proper errno, not an unchanged brk value.  */
884     return -TARGET_ENOMEM;
885 #endif
886     /* For everything else, return the previous break. */
887     return target_brk;
888 }
889 
890 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
891     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
892 static inline abi_long copy_from_user_fdset(fd_set *fds,
893                                             abi_ulong target_fds_addr,
894                                             int n)
895 {
896     int i, nw, j, k;
897     abi_ulong b, *target_fds;
898 
899     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
900     if (!(target_fds = lock_user(VERIFY_READ,
901                                  target_fds_addr,
902                                  sizeof(abi_ulong) * nw,
903                                  1)))
904         return -TARGET_EFAULT;
905 
906     FD_ZERO(fds);
907     k = 0;
908     for (i = 0; i < nw; i++) {
909         /* grab the abi_ulong */
910         __get_user(b, &target_fds[i]);
911         for (j = 0; j < TARGET_ABI_BITS; j++) {
912             /* check the bit inside the abi_ulong */
913             if ((b >> j) & 1)
914                 FD_SET(k, fds);
915             k++;
916         }
917     }
918 
919     unlock_user(target_fds, target_fds_addr, 0);
920 
921     return 0;
922 }
923 
924 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
925                                                  abi_ulong target_fds_addr,
926                                                  int n)
927 {
928     if (target_fds_addr) {
929         if (copy_from_user_fdset(fds, target_fds_addr, n))
930             return -TARGET_EFAULT;
931         *fds_ptr = fds;
932     } else {
933         *fds_ptr = NULL;
934     }
935     return 0;
936 }
937 
938 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
939                                           const fd_set *fds,
940                                           int n)
941 {
942     int i, nw, j, k;
943     abi_long v;
944     abi_ulong *target_fds;
945 
946     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
947     if (!(target_fds = lock_user(VERIFY_WRITE,
948                                  target_fds_addr,
949                                  sizeof(abi_ulong) * nw,
950                                  0)))
951         return -TARGET_EFAULT;
952 
953     k = 0;
954     for (i = 0; i < nw; i++) {
955         v = 0;
956         for (j = 0; j < TARGET_ABI_BITS; j++) {
957             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
958             k++;
959         }
960         __put_user(v, &target_fds[i]);
961     }
962 
963     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
964 
965     return 0;
966 }
967 #endif
968 
969 #if defined(__alpha__)
970 #define HOST_HZ 1024
971 #else
972 #define HOST_HZ 100
973 #endif
974 
975 static inline abi_long host_to_target_clock_t(long ticks)
976 {
977 #if HOST_HZ == TARGET_HZ
978     return ticks;
979 #else
980     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
981 #endif
982 }
983 
984 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
985                                              const struct rusage *rusage)
986 {
987     struct target_rusage *target_rusage;
988 
989     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
990         return -TARGET_EFAULT;
991     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
992     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
993     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
994     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
995     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
996     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
997     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
998     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
999     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1000     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1001     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1002     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1003     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1004     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1005     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1006     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1007     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1008     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1009     unlock_user_struct(target_rusage, target_addr, 1);
1010 
1011     return 0;
1012 }
1013 
1014 #ifdef TARGET_NR_setrlimit
1015 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1016 {
1017     abi_ulong target_rlim_swap;
1018     rlim_t result;
1019 
1020     target_rlim_swap = tswapal(target_rlim);
1021     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1022         return RLIM_INFINITY;
1023 
1024     result = target_rlim_swap;
1025     if (target_rlim_swap != (rlim_t)result)
1026         return RLIM_INFINITY;
1027 
1028     return result;
1029 }
1030 #endif
1031 
1032 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1033 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1034 {
1035     abi_ulong target_rlim_swap;
1036     abi_ulong result;
1037 
1038     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1039         target_rlim_swap = TARGET_RLIM_INFINITY;
1040     else
1041         target_rlim_swap = rlim;
1042     result = tswapal(target_rlim_swap);
1043 
1044     return result;
1045 }
1046 #endif
1047 
1048 static inline int target_to_host_resource(int code)
1049 {
1050     switch (code) {
1051     case TARGET_RLIMIT_AS:
1052         return RLIMIT_AS;
1053     case TARGET_RLIMIT_CORE:
1054         return RLIMIT_CORE;
1055     case TARGET_RLIMIT_CPU:
1056         return RLIMIT_CPU;
1057     case TARGET_RLIMIT_DATA:
1058         return RLIMIT_DATA;
1059     case TARGET_RLIMIT_FSIZE:
1060         return RLIMIT_FSIZE;
1061     case TARGET_RLIMIT_LOCKS:
1062         return RLIMIT_LOCKS;
1063     case TARGET_RLIMIT_MEMLOCK:
1064         return RLIMIT_MEMLOCK;
1065     case TARGET_RLIMIT_MSGQUEUE:
1066         return RLIMIT_MSGQUEUE;
1067     case TARGET_RLIMIT_NICE:
1068         return RLIMIT_NICE;
1069     case TARGET_RLIMIT_NOFILE:
1070         return RLIMIT_NOFILE;
1071     case TARGET_RLIMIT_NPROC:
1072         return RLIMIT_NPROC;
1073     case TARGET_RLIMIT_RSS:
1074         return RLIMIT_RSS;
1075     case TARGET_RLIMIT_RTPRIO:
1076         return RLIMIT_RTPRIO;
1077 #ifdef RLIMIT_RTTIME
1078     case TARGET_RLIMIT_RTTIME:
1079         return RLIMIT_RTTIME;
1080 #endif
1081     case TARGET_RLIMIT_SIGPENDING:
1082         return RLIMIT_SIGPENDING;
1083     case TARGET_RLIMIT_STACK:
1084         return RLIMIT_STACK;
1085     default:
1086         return code;
1087     }
1088 }
1089 
1090 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1091                                               abi_ulong target_tv_addr)
1092 {
1093     struct target_timeval *target_tv;
1094 
1095     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1096         return -TARGET_EFAULT;
1097     }
1098 
1099     __get_user(tv->tv_sec, &target_tv->tv_sec);
1100     __get_user(tv->tv_usec, &target_tv->tv_usec);
1101 
1102     unlock_user_struct(target_tv, target_tv_addr, 0);
1103 
1104     return 0;
1105 }
1106 
1107 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1108                                             const struct timeval *tv)
1109 {
1110     struct target_timeval *target_tv;
1111 
1112     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1113         return -TARGET_EFAULT;
1114     }
1115 
1116     __put_user(tv->tv_sec, &target_tv->tv_sec);
1117     __put_user(tv->tv_usec, &target_tv->tv_usec);
1118 
1119     unlock_user_struct(target_tv, target_tv_addr, 1);
1120 
1121     return 0;
1122 }
1123 
1124 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1125 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1126                                                 abi_ulong target_tv_addr)
1127 {
1128     struct target__kernel_sock_timeval *target_tv;
1129 
1130     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1131         return -TARGET_EFAULT;
1132     }
1133 
1134     __get_user(tv->tv_sec, &target_tv->tv_sec);
1135     __get_user(tv->tv_usec, &target_tv->tv_usec);
1136 
1137     unlock_user_struct(target_tv, target_tv_addr, 0);
1138 
1139     return 0;
1140 }
1141 #endif
1142 
1143 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1144                                               const struct timeval *tv)
1145 {
1146     struct target__kernel_sock_timeval *target_tv;
1147 
1148     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1149         return -TARGET_EFAULT;
1150     }
1151 
1152     __put_user(tv->tv_sec, &target_tv->tv_sec);
1153     __put_user(tv->tv_usec, &target_tv->tv_usec);
1154 
1155     unlock_user_struct(target_tv, target_tv_addr, 1);
1156 
1157     return 0;
1158 }
1159 
1160 #if defined(TARGET_NR_futex) || \
1161     defined(TARGET_NR_rt_sigtimedwait) || \
1162     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1163     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1164     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1165     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1166     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1167     defined(TARGET_NR_timer_settime) || \
1168     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1169 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1170                                                abi_ulong target_addr)
1171 {
1172     struct target_timespec *target_ts;
1173 
1174     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1175         return -TARGET_EFAULT;
1176     }
1177     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1178     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1179     unlock_user_struct(target_ts, target_addr, 0);
1180     return 0;
1181 }
1182 #endif
1183 
1184 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1185     defined(TARGET_NR_timer_settime64) || \
1186     defined(TARGET_NR_mq_timedsend_time64) || \
1187     defined(TARGET_NR_mq_timedreceive_time64) || \
1188     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1189     defined(TARGET_NR_clock_nanosleep_time64) || \
1190     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1191     defined(TARGET_NR_utimensat) || \
1192     defined(TARGET_NR_utimensat_time64) || \
1193     defined(TARGET_NR_semtimedop_time64) || \
1194     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1195 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1196                                                  abi_ulong target_addr)
1197 {
1198     struct target__kernel_timespec *target_ts;
1199 
1200     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1201         return -TARGET_EFAULT;
1202     }
1203     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1204     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1205     /* in 32bit mode, this drops the padding */
1206     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1207     unlock_user_struct(target_ts, target_addr, 0);
1208     return 0;
1209 }
1210 #endif
1211 
1212 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1213                                                struct timespec *host_ts)
1214 {
1215     struct target_timespec *target_ts;
1216 
1217     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1218         return -TARGET_EFAULT;
1219     }
1220     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1221     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1222     unlock_user_struct(target_ts, target_addr, 1);
1223     return 0;
1224 }
1225 
1226 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1227                                                  struct timespec *host_ts)
1228 {
1229     struct target__kernel_timespec *target_ts;
1230 
1231     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1232         return -TARGET_EFAULT;
1233     }
1234     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1235     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1236     unlock_user_struct(target_ts, target_addr, 1);
1237     return 0;
1238 }
1239 
1240 #if defined(TARGET_NR_gettimeofday)
1241 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1242                                              struct timezone *tz)
1243 {
1244     struct target_timezone *target_tz;
1245 
1246     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1247         return -TARGET_EFAULT;
1248     }
1249 
1250     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1251     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1252 
1253     unlock_user_struct(target_tz, target_tz_addr, 1);
1254 
1255     return 0;
1256 }
1257 #endif
1258 
1259 #if defined(TARGET_NR_settimeofday)
1260 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1261                                                abi_ulong target_tz_addr)
1262 {
1263     struct target_timezone *target_tz;
1264 
1265     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1266         return -TARGET_EFAULT;
1267     }
1268 
1269     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1270     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1271 
1272     unlock_user_struct(target_tz, target_tz_addr, 0);
1273 
1274     return 0;
1275 }
1276 #endif
1277 
1278 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1279 #include <mqueue.h>
1280 
1281 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1282                                               abi_ulong target_mq_attr_addr)
1283 {
1284     struct target_mq_attr *target_mq_attr;
1285 
1286     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1287                           target_mq_attr_addr, 1))
1288         return -TARGET_EFAULT;
1289 
1290     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1291     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1292     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1293     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1294 
1295     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1296 
1297     return 0;
1298 }
1299 
1300 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1301                                             const struct mq_attr *attr)
1302 {
1303     struct target_mq_attr *target_mq_attr;
1304 
1305     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1306                           target_mq_attr_addr, 0))
1307         return -TARGET_EFAULT;
1308 
1309     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1310     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1311     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1312     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1313 
1314     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1315 
1316     return 0;
1317 }
1318 #endif
1319 
1320 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1321 /* do_select() must return target values and target errnos. */
1322 static abi_long do_select(int n,
1323                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1324                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1325 {
1326     fd_set rfds, wfds, efds;
1327     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1328     struct timeval tv;
1329     struct timespec ts, *ts_ptr;
1330     abi_long ret;
1331 
1332     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1333     if (ret) {
1334         return ret;
1335     }
1336     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1337     if (ret) {
1338         return ret;
1339     }
1340     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1341     if (ret) {
1342         return ret;
1343     }
1344 
1345     if (target_tv_addr) {
1346         if (copy_from_user_timeval(&tv, target_tv_addr))
1347             return -TARGET_EFAULT;
1348         ts.tv_sec = tv.tv_sec;
1349         ts.tv_nsec = tv.tv_usec * 1000;
1350         ts_ptr = &ts;
1351     } else {
1352         ts_ptr = NULL;
1353     }
1354 
1355     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1356                                   ts_ptr, NULL));
1357 
1358     if (!is_error(ret)) {
1359         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1360             return -TARGET_EFAULT;
1361         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1362             return -TARGET_EFAULT;
1363         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1364             return -TARGET_EFAULT;
1365 
1366         if (target_tv_addr) {
1367             tv.tv_sec = ts.tv_sec;
1368             tv.tv_usec = ts.tv_nsec / 1000;
1369             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1370                 return -TARGET_EFAULT;
1371             }
1372         }
1373     }
1374 
1375     return ret;
1376 }
1377 
1378 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1379 static abi_long do_old_select(abi_ulong arg1)
1380 {
1381     struct target_sel_arg_struct *sel;
1382     abi_ulong inp, outp, exp, tvp;
1383     long nsel;
1384 
1385     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1386         return -TARGET_EFAULT;
1387     }
1388 
1389     nsel = tswapal(sel->n);
1390     inp = tswapal(sel->inp);
1391     outp = tswapal(sel->outp);
1392     exp = tswapal(sel->exp);
1393     tvp = tswapal(sel->tvp);
1394 
1395     unlock_user_struct(sel, arg1, 0);
1396 
1397     return do_select(nsel, inp, outp, exp, tvp);
1398 }
1399 #endif
1400 #endif
1401 
1402 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1403 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1404                             abi_long arg4, abi_long arg5, abi_long arg6,
1405                             bool time64)
1406 {
1407     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1408     fd_set rfds, wfds, efds;
1409     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1410     struct timespec ts, *ts_ptr;
1411     abi_long ret;
1412 
1413     /*
1414      * The 6th arg is actually two args smashed together,
1415      * so we cannot use the C library.
1416      */
1417     struct {
1418         sigset_t *set;
1419         size_t size;
1420     } sig, *sig_ptr;
1421 
1422     abi_ulong arg_sigset, arg_sigsize, *arg7;
1423 
1424     n = arg1;
1425     rfd_addr = arg2;
1426     wfd_addr = arg3;
1427     efd_addr = arg4;
1428     ts_addr = arg5;
1429 
1430     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1431     if (ret) {
1432         return ret;
1433     }
1434     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1435     if (ret) {
1436         return ret;
1437     }
1438     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1439     if (ret) {
1440         return ret;
1441     }
1442 
1443     /*
1444      * This takes a timespec, and not a timeval, so we cannot
1445      * use the do_select() helper ...
1446      */
1447     if (ts_addr) {
1448         if (time64) {
1449             if (target_to_host_timespec64(&ts, ts_addr)) {
1450                 return -TARGET_EFAULT;
1451             }
1452         } else {
1453             if (target_to_host_timespec(&ts, ts_addr)) {
1454                 return -TARGET_EFAULT;
1455             }
1456         }
1457             ts_ptr = &ts;
1458     } else {
1459         ts_ptr = NULL;
1460     }
1461 
1462     /* Extract the two packed args for the sigset */
1463     sig_ptr = NULL;
1464     if (arg6) {
1465         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1466         if (!arg7) {
1467             return -TARGET_EFAULT;
1468         }
1469         arg_sigset = tswapal(arg7[0]);
1470         arg_sigsize = tswapal(arg7[1]);
1471         unlock_user(arg7, arg6, 0);
1472 
1473         if (arg_sigset) {
1474             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1475             if (ret != 0) {
1476                 return ret;
1477             }
1478             sig_ptr = &sig;
1479             sig.size = SIGSET_T_SIZE;
1480         }
1481     }
1482 
1483     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1484                                   ts_ptr, sig_ptr));
1485 
1486     if (sig_ptr) {
1487         finish_sigsuspend_mask(ret);
1488     }
1489 
1490     if (!is_error(ret)) {
1491         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1492             return -TARGET_EFAULT;
1493         }
1494         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1495             return -TARGET_EFAULT;
1496         }
1497         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1498             return -TARGET_EFAULT;
1499         }
1500         if (time64) {
1501             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1502                 return -TARGET_EFAULT;
1503             }
1504         } else {
1505             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1506                 return -TARGET_EFAULT;
1507             }
1508         }
1509     }
1510     return ret;
1511 }
1512 #endif
1513 
1514 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1515     defined(TARGET_NR_ppoll_time64)
1516 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1517                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1518 {
1519     struct target_pollfd *target_pfd;
1520     unsigned int nfds = arg2;
1521     struct pollfd *pfd;
1522     unsigned int i;
1523     abi_long ret;
1524 
1525     pfd = NULL;
1526     target_pfd = NULL;
1527     if (nfds) {
1528         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1529             return -TARGET_EINVAL;
1530         }
1531         target_pfd = lock_user(VERIFY_WRITE, arg1,
1532                                sizeof(struct target_pollfd) * nfds, 1);
1533         if (!target_pfd) {
1534             return -TARGET_EFAULT;
1535         }
1536 
1537         pfd = alloca(sizeof(struct pollfd) * nfds);
1538         for (i = 0; i < nfds; i++) {
1539             pfd[i].fd = tswap32(target_pfd[i].fd);
1540             pfd[i].events = tswap16(target_pfd[i].events);
1541         }
1542     }
1543     if (ppoll) {
1544         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1545         sigset_t *set = NULL;
1546 
1547         if (arg3) {
1548             if (time64) {
1549                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1550                     unlock_user(target_pfd, arg1, 0);
1551                     return -TARGET_EFAULT;
1552                 }
1553             } else {
1554                 if (target_to_host_timespec(timeout_ts, arg3)) {
1555                     unlock_user(target_pfd, arg1, 0);
1556                     return -TARGET_EFAULT;
1557                 }
1558             }
1559         } else {
1560             timeout_ts = NULL;
1561         }
1562 
1563         if (arg4) {
1564             ret = process_sigsuspend_mask(&set, arg4, arg5);
1565             if (ret != 0) {
1566                 unlock_user(target_pfd, arg1, 0);
1567                 return ret;
1568             }
1569         }
1570 
1571         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1572                                    set, SIGSET_T_SIZE));
1573 
1574         if (set) {
1575             finish_sigsuspend_mask(ret);
1576         }
1577         if (!is_error(ret) && arg3) {
1578             if (time64) {
1579                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1580                     return -TARGET_EFAULT;
1581                 }
1582             } else {
1583                 if (host_to_target_timespec(arg3, timeout_ts)) {
1584                     return -TARGET_EFAULT;
1585                 }
1586             }
1587         }
1588     } else {
1589           struct timespec ts, *pts;
1590 
1591           if (arg3 >= 0) {
1592               /* Convert ms to secs, ns */
1593               ts.tv_sec = arg3 / 1000;
1594               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1595               pts = &ts;
1596           } else {
1597               /* -ve poll() timeout means "infinite" */
1598               pts = NULL;
1599           }
1600           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1601     }
1602 
1603     if (!is_error(ret)) {
1604         for (i = 0; i < nfds; i++) {
1605             target_pfd[i].revents = tswap16(pfd[i].revents);
1606         }
1607     }
1608     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1609     return ret;
1610 }
1611 #endif
1612 
1613 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1614                         int flags, int is_pipe2)
1615 {
1616     int host_pipe[2];
1617     abi_long ret;
1618     ret = pipe2(host_pipe, flags);
1619 
1620     if (is_error(ret))
1621         return get_errno(ret);
1622 
1623     /* Several targets have special calling conventions for the original
1624        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1625     if (!is_pipe2) {
1626 #if defined(TARGET_ALPHA)
1627         cpu_env->ir[IR_A4] = host_pipe[1];
1628         return host_pipe[0];
1629 #elif defined(TARGET_MIPS)
1630         cpu_env->active_tc.gpr[3] = host_pipe[1];
1631         return host_pipe[0];
1632 #elif defined(TARGET_SH4)
1633         cpu_env->gregs[1] = host_pipe[1];
1634         return host_pipe[0];
1635 #elif defined(TARGET_SPARC)
1636         cpu_env->regwptr[1] = host_pipe[1];
1637         return host_pipe[0];
1638 #endif
1639     }
1640 
1641     if (put_user_s32(host_pipe[0], pipedes)
1642         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1643         return -TARGET_EFAULT;
1644     return get_errno(ret);
1645 }
1646 
1647 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1648                                               abi_ulong target_addr,
1649                                               socklen_t len)
1650 {
1651     struct target_ip_mreqn *target_smreqn;
1652 
1653     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1654     if (!target_smreqn)
1655         return -TARGET_EFAULT;
1656     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1657     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1658     if (len == sizeof(struct target_ip_mreqn))
1659         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1660     unlock_user(target_smreqn, target_addr, 0);
1661 
1662     return 0;
1663 }
1664 
1665 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1666                                                abi_ulong target_addr,
1667                                                socklen_t len)
1668 {
1669     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1670     sa_family_t sa_family;
1671     struct target_sockaddr *target_saddr;
1672 
1673     if (fd_trans_target_to_host_addr(fd)) {
1674         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1675     }
1676 
1677     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1678     if (!target_saddr)
1679         return -TARGET_EFAULT;
1680 
1681     sa_family = tswap16(target_saddr->sa_family);
1682 
1683     /* Oops. The caller might send a incomplete sun_path; sun_path
1684      * must be terminated by \0 (see the manual page), but
1685      * unfortunately it is quite common to specify sockaddr_un
1686      * length as "strlen(x->sun_path)" while it should be
1687      * "strlen(...) + 1". We'll fix that here if needed.
1688      * Linux kernel has a similar feature.
1689      */
1690 
1691     if (sa_family == AF_UNIX) {
1692         if (len < unix_maxlen && len > 0) {
1693             char *cp = (char*)target_saddr;
1694 
1695             if ( cp[len-1] && !cp[len] )
1696                 len++;
1697         }
1698         if (len > unix_maxlen)
1699             len = unix_maxlen;
1700     }
1701 
1702     memcpy(addr, target_saddr, len);
1703     addr->sa_family = sa_family;
1704     if (sa_family == AF_NETLINK) {
1705         struct sockaddr_nl *nladdr;
1706 
1707         nladdr = (struct sockaddr_nl *)addr;
1708         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1709         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1710     } else if (sa_family == AF_PACKET) {
1711 	struct target_sockaddr_ll *lladdr;
1712 
1713 	lladdr = (struct target_sockaddr_ll *)addr;
1714 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1715 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1716     }
1717     unlock_user(target_saddr, target_addr, 0);
1718 
1719     return 0;
1720 }
1721 
1722 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1723                                                struct sockaddr *addr,
1724                                                socklen_t len)
1725 {
1726     struct target_sockaddr *target_saddr;
1727 
1728     if (len == 0) {
1729         return 0;
1730     }
1731     assert(addr);
1732 
1733     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1734     if (!target_saddr)
1735         return -TARGET_EFAULT;
1736     memcpy(target_saddr, addr, len);
1737     if (len >= offsetof(struct target_sockaddr, sa_family) +
1738         sizeof(target_saddr->sa_family)) {
1739         target_saddr->sa_family = tswap16(addr->sa_family);
1740     }
1741     if (addr->sa_family == AF_NETLINK &&
1742         len >= sizeof(struct target_sockaddr_nl)) {
1743         struct target_sockaddr_nl *target_nl =
1744                (struct target_sockaddr_nl *)target_saddr;
1745         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1746         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1747     } else if (addr->sa_family == AF_PACKET) {
1748         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1749         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1750         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1751     } else if (addr->sa_family == AF_INET6 &&
1752                len >= sizeof(struct target_sockaddr_in6)) {
1753         struct target_sockaddr_in6 *target_in6 =
1754                (struct target_sockaddr_in6 *)target_saddr;
1755         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1756     }
1757     unlock_user(target_saddr, target_addr, len);
1758 
1759     return 0;
1760 }
1761 
1762 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1763                                            struct target_msghdr *target_msgh)
1764 {
1765     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1766     abi_long msg_controllen;
1767     abi_ulong target_cmsg_addr;
1768     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1769     socklen_t space = 0;
1770 
1771     msg_controllen = tswapal(target_msgh->msg_controllen);
1772     if (msg_controllen < sizeof (struct target_cmsghdr))
1773         goto the_end;
1774     target_cmsg_addr = tswapal(target_msgh->msg_control);
1775     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1776     target_cmsg_start = target_cmsg;
1777     if (!target_cmsg)
1778         return -TARGET_EFAULT;
1779 
1780     while (cmsg && target_cmsg) {
1781         void *data = CMSG_DATA(cmsg);
1782         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1783 
1784         int len = tswapal(target_cmsg->cmsg_len)
1785             - sizeof(struct target_cmsghdr);
1786 
1787         space += CMSG_SPACE(len);
1788         if (space > msgh->msg_controllen) {
1789             space -= CMSG_SPACE(len);
1790             /* This is a QEMU bug, since we allocated the payload
1791              * area ourselves (unlike overflow in host-to-target
1792              * conversion, which is just the guest giving us a buffer
1793              * that's too small). It can't happen for the payload types
1794              * we currently support; if it becomes an issue in future
1795              * we would need to improve our allocation strategy to
1796              * something more intelligent than "twice the size of the
1797              * target buffer we're reading from".
1798              */
1799             qemu_log_mask(LOG_UNIMP,
1800                           ("Unsupported ancillary data %d/%d: "
1801                            "unhandled msg size\n"),
1802                           tswap32(target_cmsg->cmsg_level),
1803                           tswap32(target_cmsg->cmsg_type));
1804             break;
1805         }
1806 
1807         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1808             cmsg->cmsg_level = SOL_SOCKET;
1809         } else {
1810             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1811         }
1812         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1813         cmsg->cmsg_len = CMSG_LEN(len);
1814 
1815         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1816             int *fd = (int *)data;
1817             int *target_fd = (int *)target_data;
1818             int i, numfds = len / sizeof(int);
1819 
1820             for (i = 0; i < numfds; i++) {
1821                 __get_user(fd[i], target_fd + i);
1822             }
1823         } else if (cmsg->cmsg_level == SOL_SOCKET
1824                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1825             struct ucred *cred = (struct ucred *)data;
1826             struct target_ucred *target_cred =
1827                 (struct target_ucred *)target_data;
1828 
1829             __get_user(cred->pid, &target_cred->pid);
1830             __get_user(cred->uid, &target_cred->uid);
1831             __get_user(cred->gid, &target_cred->gid);
1832         } else {
1833             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1834                           cmsg->cmsg_level, cmsg->cmsg_type);
1835             memcpy(data, target_data, len);
1836         }
1837 
1838         cmsg = CMSG_NXTHDR(msgh, cmsg);
1839         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1840                                          target_cmsg_start);
1841     }
1842     unlock_user(target_cmsg, target_cmsg_addr, 0);
1843  the_end:
1844     msgh->msg_controllen = space;
1845     return 0;
1846 }
1847 
1848 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1849                                            struct msghdr *msgh)
1850 {
1851     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1852     abi_long msg_controllen;
1853     abi_ulong target_cmsg_addr;
1854     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1855     socklen_t space = 0;
1856 
1857     msg_controllen = tswapal(target_msgh->msg_controllen);
1858     if (msg_controllen < sizeof (struct target_cmsghdr))
1859         goto the_end;
1860     target_cmsg_addr = tswapal(target_msgh->msg_control);
1861     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1862     target_cmsg_start = target_cmsg;
1863     if (!target_cmsg)
1864         return -TARGET_EFAULT;
1865 
1866     while (cmsg && target_cmsg) {
1867         void *data = CMSG_DATA(cmsg);
1868         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1869 
1870         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1871         int tgt_len, tgt_space;
1872 
1873         /* We never copy a half-header but may copy half-data;
1874          * this is Linux's behaviour in put_cmsg(). Note that
1875          * truncation here is a guest problem (which we report
1876          * to the guest via the CTRUNC bit), unlike truncation
1877          * in target_to_host_cmsg, which is a QEMU bug.
1878          */
1879         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1880             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1881             break;
1882         }
1883 
1884         if (cmsg->cmsg_level == SOL_SOCKET) {
1885             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1886         } else {
1887             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1888         }
1889         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1890 
1891         /* Payload types which need a different size of payload on
1892          * the target must adjust tgt_len here.
1893          */
1894         tgt_len = len;
1895         switch (cmsg->cmsg_level) {
1896         case SOL_SOCKET:
1897             switch (cmsg->cmsg_type) {
1898             case SO_TIMESTAMP:
1899                 tgt_len = sizeof(struct target_timeval);
1900                 break;
1901             default:
1902                 break;
1903             }
1904             break;
1905         default:
1906             break;
1907         }
1908 
1909         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1910             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1911             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1912         }
1913 
1914         /* We must now copy-and-convert len bytes of payload
1915          * into tgt_len bytes of destination space. Bear in mind
1916          * that in both source and destination we may be dealing
1917          * with a truncated value!
1918          */
1919         switch (cmsg->cmsg_level) {
1920         case SOL_SOCKET:
1921             switch (cmsg->cmsg_type) {
1922             case SCM_RIGHTS:
1923             {
1924                 int *fd = (int *)data;
1925                 int *target_fd = (int *)target_data;
1926                 int i, numfds = tgt_len / sizeof(int);
1927 
1928                 for (i = 0; i < numfds; i++) {
1929                     __put_user(fd[i], target_fd + i);
1930                 }
1931                 break;
1932             }
1933             case SO_TIMESTAMP:
1934             {
1935                 struct timeval *tv = (struct timeval *)data;
1936                 struct target_timeval *target_tv =
1937                     (struct target_timeval *)target_data;
1938 
1939                 if (len != sizeof(struct timeval) ||
1940                     tgt_len != sizeof(struct target_timeval)) {
1941                     goto unimplemented;
1942                 }
1943 
1944                 /* copy struct timeval to target */
1945                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1946                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1947                 break;
1948             }
1949             case SCM_CREDENTIALS:
1950             {
1951                 struct ucred *cred = (struct ucred *)data;
1952                 struct target_ucred *target_cred =
1953                     (struct target_ucred *)target_data;
1954 
1955                 __put_user(cred->pid, &target_cred->pid);
1956                 __put_user(cred->uid, &target_cred->uid);
1957                 __put_user(cred->gid, &target_cred->gid);
1958                 break;
1959             }
1960             default:
1961                 goto unimplemented;
1962             }
1963             break;
1964 
1965         case SOL_IP:
1966             switch (cmsg->cmsg_type) {
1967             case IP_TTL:
1968             {
1969                 uint32_t *v = (uint32_t *)data;
1970                 uint32_t *t_int = (uint32_t *)target_data;
1971 
1972                 if (len != sizeof(uint32_t) ||
1973                     tgt_len != sizeof(uint32_t)) {
1974                     goto unimplemented;
1975                 }
1976                 __put_user(*v, t_int);
1977                 break;
1978             }
1979             case IP_RECVERR:
1980             {
1981                 struct errhdr_t {
1982                    struct sock_extended_err ee;
1983                    struct sockaddr_in offender;
1984                 };
1985                 struct errhdr_t *errh = (struct errhdr_t *)data;
1986                 struct errhdr_t *target_errh =
1987                     (struct errhdr_t *)target_data;
1988 
1989                 if (len != sizeof(struct errhdr_t) ||
1990                     tgt_len != sizeof(struct errhdr_t)) {
1991                     goto unimplemented;
1992                 }
1993                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1994                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1995                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1996                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1997                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1998                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1999                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2000                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2001                     (void *) &errh->offender, sizeof(errh->offender));
2002                 break;
2003             }
2004             default:
2005                 goto unimplemented;
2006             }
2007             break;
2008 
2009         case SOL_IPV6:
2010             switch (cmsg->cmsg_type) {
2011             case IPV6_HOPLIMIT:
2012             {
2013                 uint32_t *v = (uint32_t *)data;
2014                 uint32_t *t_int = (uint32_t *)target_data;
2015 
2016                 if (len != sizeof(uint32_t) ||
2017                     tgt_len != sizeof(uint32_t)) {
2018                     goto unimplemented;
2019                 }
2020                 __put_user(*v, t_int);
2021                 break;
2022             }
2023             case IPV6_RECVERR:
2024             {
2025                 struct errhdr6_t {
2026                    struct sock_extended_err ee;
2027                    struct sockaddr_in6 offender;
2028                 };
2029                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2030                 struct errhdr6_t *target_errh =
2031                     (struct errhdr6_t *)target_data;
2032 
2033                 if (len != sizeof(struct errhdr6_t) ||
2034                     tgt_len != sizeof(struct errhdr6_t)) {
2035                     goto unimplemented;
2036                 }
2037                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2038                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2039                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2040                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2041                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2042                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2043                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2044                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2045                     (void *) &errh->offender, sizeof(errh->offender));
2046                 break;
2047             }
2048             default:
2049                 goto unimplemented;
2050             }
2051             break;
2052 
2053         default:
2054         unimplemented:
2055             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2056                           cmsg->cmsg_level, cmsg->cmsg_type);
2057             memcpy(target_data, data, MIN(len, tgt_len));
2058             if (tgt_len > len) {
2059                 memset(target_data + len, 0, tgt_len - len);
2060             }
2061         }
2062 
2063         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2064         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2065         if (msg_controllen < tgt_space) {
2066             tgt_space = msg_controllen;
2067         }
2068         msg_controllen -= tgt_space;
2069         space += tgt_space;
2070         cmsg = CMSG_NXTHDR(msgh, cmsg);
2071         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2072                                          target_cmsg_start);
2073     }
2074     unlock_user(target_cmsg, target_cmsg_addr, space);
2075  the_end:
2076     target_msgh->msg_controllen = tswapal(space);
2077     return 0;
2078 }
2079 
2080 /* do_setsockopt() Must return target values and target errnos. */
2081 static abi_long do_setsockopt(int sockfd, int level, int optname,
2082                               abi_ulong optval_addr, socklen_t optlen)
2083 {
2084     abi_long ret;
2085     int val;
2086     struct ip_mreqn *ip_mreq;
2087     struct ip_mreq_source *ip_mreq_source;
2088 
2089     switch(level) {
2090     case SOL_TCP:
2091     case SOL_UDP:
2092         /* TCP and UDP options all take an 'int' value.  */
2093         if (optlen < sizeof(uint32_t))
2094             return -TARGET_EINVAL;
2095 
2096         if (get_user_u32(val, optval_addr))
2097             return -TARGET_EFAULT;
2098         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2099         break;
2100     case SOL_IP:
2101         switch(optname) {
2102         case IP_TOS:
2103         case IP_TTL:
2104         case IP_HDRINCL:
2105         case IP_ROUTER_ALERT:
2106         case IP_RECVOPTS:
2107         case IP_RETOPTS:
2108         case IP_PKTINFO:
2109         case IP_MTU_DISCOVER:
2110         case IP_RECVERR:
2111         case IP_RECVTTL:
2112         case IP_RECVTOS:
2113 #ifdef IP_FREEBIND
2114         case IP_FREEBIND:
2115 #endif
2116         case IP_MULTICAST_TTL:
2117         case IP_MULTICAST_LOOP:
2118             val = 0;
2119             if (optlen >= sizeof(uint32_t)) {
2120                 if (get_user_u32(val, optval_addr))
2121                     return -TARGET_EFAULT;
2122             } else if (optlen >= 1) {
2123                 if (get_user_u8(val, optval_addr))
2124                     return -TARGET_EFAULT;
2125             }
2126             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2127             break;
2128         case IP_ADD_MEMBERSHIP:
2129         case IP_DROP_MEMBERSHIP:
2130             if (optlen < sizeof (struct target_ip_mreq) ||
2131                 optlen > sizeof (struct target_ip_mreqn))
2132                 return -TARGET_EINVAL;
2133 
2134             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2135             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2136             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2137             break;
2138 
2139         case IP_BLOCK_SOURCE:
2140         case IP_UNBLOCK_SOURCE:
2141         case IP_ADD_SOURCE_MEMBERSHIP:
2142         case IP_DROP_SOURCE_MEMBERSHIP:
2143             if (optlen != sizeof (struct target_ip_mreq_source))
2144                 return -TARGET_EINVAL;
2145 
2146             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2147             if (!ip_mreq_source) {
2148                 return -TARGET_EFAULT;
2149             }
2150             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2151             unlock_user (ip_mreq_source, optval_addr, 0);
2152             break;
2153 
2154         default:
2155             goto unimplemented;
2156         }
2157         break;
2158     case SOL_IPV6:
2159         switch (optname) {
2160         case IPV6_MTU_DISCOVER:
2161         case IPV6_MTU:
2162         case IPV6_V6ONLY:
2163         case IPV6_RECVPKTINFO:
2164         case IPV6_UNICAST_HOPS:
2165         case IPV6_MULTICAST_HOPS:
2166         case IPV6_MULTICAST_LOOP:
2167         case IPV6_RECVERR:
2168         case IPV6_RECVHOPLIMIT:
2169         case IPV6_2292HOPLIMIT:
2170         case IPV6_CHECKSUM:
2171         case IPV6_ADDRFORM:
2172         case IPV6_2292PKTINFO:
2173         case IPV6_RECVTCLASS:
2174         case IPV6_RECVRTHDR:
2175         case IPV6_2292RTHDR:
2176         case IPV6_RECVHOPOPTS:
2177         case IPV6_2292HOPOPTS:
2178         case IPV6_RECVDSTOPTS:
2179         case IPV6_2292DSTOPTS:
2180         case IPV6_TCLASS:
2181         case IPV6_ADDR_PREFERENCES:
2182 #ifdef IPV6_RECVPATHMTU
2183         case IPV6_RECVPATHMTU:
2184 #endif
2185 #ifdef IPV6_TRANSPARENT
2186         case IPV6_TRANSPARENT:
2187 #endif
2188 #ifdef IPV6_FREEBIND
2189         case IPV6_FREEBIND:
2190 #endif
2191 #ifdef IPV6_RECVORIGDSTADDR
2192         case IPV6_RECVORIGDSTADDR:
2193 #endif
2194             val = 0;
2195             if (optlen < sizeof(uint32_t)) {
2196                 return -TARGET_EINVAL;
2197             }
2198             if (get_user_u32(val, optval_addr)) {
2199                 return -TARGET_EFAULT;
2200             }
2201             ret = get_errno(setsockopt(sockfd, level, optname,
2202                                        &val, sizeof(val)));
2203             break;
2204         case IPV6_PKTINFO:
2205         {
2206             struct in6_pktinfo pki;
2207 
2208             if (optlen < sizeof(pki)) {
2209                 return -TARGET_EINVAL;
2210             }
2211 
2212             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2213                 return -TARGET_EFAULT;
2214             }
2215 
2216             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2217 
2218             ret = get_errno(setsockopt(sockfd, level, optname,
2219                                        &pki, sizeof(pki)));
2220             break;
2221         }
2222         case IPV6_ADD_MEMBERSHIP:
2223         case IPV6_DROP_MEMBERSHIP:
2224         {
2225             struct ipv6_mreq ipv6mreq;
2226 
2227             if (optlen < sizeof(ipv6mreq)) {
2228                 return -TARGET_EINVAL;
2229             }
2230 
2231             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2232                 return -TARGET_EFAULT;
2233             }
2234 
2235             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2236 
2237             ret = get_errno(setsockopt(sockfd, level, optname,
2238                                        &ipv6mreq, sizeof(ipv6mreq)));
2239             break;
2240         }
2241         default:
2242             goto unimplemented;
2243         }
2244         break;
2245     case SOL_ICMPV6:
2246         switch (optname) {
2247         case ICMPV6_FILTER:
2248         {
2249             struct icmp6_filter icmp6f;
2250 
2251             if (optlen > sizeof(icmp6f)) {
2252                 optlen = sizeof(icmp6f);
2253             }
2254 
2255             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2256                 return -TARGET_EFAULT;
2257             }
2258 
2259             for (val = 0; val < 8; val++) {
2260                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2261             }
2262 
2263             ret = get_errno(setsockopt(sockfd, level, optname,
2264                                        &icmp6f, optlen));
2265             break;
2266         }
2267         default:
2268             goto unimplemented;
2269         }
2270         break;
2271     case SOL_RAW:
2272         switch (optname) {
2273         case ICMP_FILTER:
2274         case IPV6_CHECKSUM:
2275             /* those take an u32 value */
2276             if (optlen < sizeof(uint32_t)) {
2277                 return -TARGET_EINVAL;
2278             }
2279 
2280             if (get_user_u32(val, optval_addr)) {
2281                 return -TARGET_EFAULT;
2282             }
2283             ret = get_errno(setsockopt(sockfd, level, optname,
2284                                        &val, sizeof(val)));
2285             break;
2286 
2287         default:
2288             goto unimplemented;
2289         }
2290         break;
2291 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2292     case SOL_ALG:
2293         switch (optname) {
2294         case ALG_SET_KEY:
2295         {
2296             char *alg_key = g_malloc(optlen);
2297 
2298             if (!alg_key) {
2299                 return -TARGET_ENOMEM;
2300             }
2301             if (copy_from_user(alg_key, optval_addr, optlen)) {
2302                 g_free(alg_key);
2303                 return -TARGET_EFAULT;
2304             }
2305             ret = get_errno(setsockopt(sockfd, level, optname,
2306                                        alg_key, optlen));
2307             g_free(alg_key);
2308             break;
2309         }
2310         case ALG_SET_AEAD_AUTHSIZE:
2311         {
2312             ret = get_errno(setsockopt(sockfd, level, optname,
2313                                        NULL, optlen));
2314             break;
2315         }
2316         default:
2317             goto unimplemented;
2318         }
2319         break;
2320 #endif
2321     case TARGET_SOL_SOCKET:
2322         switch (optname) {
2323         case TARGET_SO_RCVTIMEO:
2324         {
2325                 struct timeval tv;
2326 
2327                 optname = SO_RCVTIMEO;
2328 
2329 set_timeout:
2330                 if (optlen != sizeof(struct target_timeval)) {
2331                     return -TARGET_EINVAL;
2332                 }
2333 
2334                 if (copy_from_user_timeval(&tv, optval_addr)) {
2335                     return -TARGET_EFAULT;
2336                 }
2337 
2338                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2339                                 &tv, sizeof(tv)));
2340                 return ret;
2341         }
2342         case TARGET_SO_SNDTIMEO:
2343                 optname = SO_SNDTIMEO;
2344                 goto set_timeout;
2345         case TARGET_SO_ATTACH_FILTER:
2346         {
2347                 struct target_sock_fprog *tfprog;
2348                 struct target_sock_filter *tfilter;
2349                 struct sock_fprog fprog;
2350                 struct sock_filter *filter;
2351                 int i;
2352 
2353                 if (optlen != sizeof(*tfprog)) {
2354                     return -TARGET_EINVAL;
2355                 }
2356                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2357                     return -TARGET_EFAULT;
2358                 }
2359                 if (!lock_user_struct(VERIFY_READ, tfilter,
2360                                       tswapal(tfprog->filter), 0)) {
2361                     unlock_user_struct(tfprog, optval_addr, 1);
2362                     return -TARGET_EFAULT;
2363                 }
2364 
2365                 fprog.len = tswap16(tfprog->len);
2366                 filter = g_try_new(struct sock_filter, fprog.len);
2367                 if (filter == NULL) {
2368                     unlock_user_struct(tfilter, tfprog->filter, 1);
2369                     unlock_user_struct(tfprog, optval_addr, 1);
2370                     return -TARGET_ENOMEM;
2371                 }
2372                 for (i = 0; i < fprog.len; i++) {
2373                     filter[i].code = tswap16(tfilter[i].code);
2374                     filter[i].jt = tfilter[i].jt;
2375                     filter[i].jf = tfilter[i].jf;
2376                     filter[i].k = tswap32(tfilter[i].k);
2377                 }
2378                 fprog.filter = filter;
2379 
2380                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2381                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2382                 g_free(filter);
2383 
2384                 unlock_user_struct(tfilter, tfprog->filter, 1);
2385                 unlock_user_struct(tfprog, optval_addr, 1);
2386                 return ret;
2387         }
2388 	case TARGET_SO_BINDTODEVICE:
2389 	{
2390 		char *dev_ifname, *addr_ifname;
2391 
2392 		if (optlen > IFNAMSIZ - 1) {
2393 		    optlen = IFNAMSIZ - 1;
2394 		}
2395 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2396 		if (!dev_ifname) {
2397 		    return -TARGET_EFAULT;
2398 		}
2399 		optname = SO_BINDTODEVICE;
2400 		addr_ifname = alloca(IFNAMSIZ);
2401 		memcpy(addr_ifname, dev_ifname, optlen);
2402 		addr_ifname[optlen] = 0;
2403 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2404                                            addr_ifname, optlen));
2405 		unlock_user (dev_ifname, optval_addr, 0);
2406 		return ret;
2407 	}
2408         case TARGET_SO_LINGER:
2409         {
2410                 struct linger lg;
2411                 struct target_linger *tlg;
2412 
2413                 if (optlen != sizeof(struct target_linger)) {
2414                     return -TARGET_EINVAL;
2415                 }
2416                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2417                     return -TARGET_EFAULT;
2418                 }
2419                 __get_user(lg.l_onoff, &tlg->l_onoff);
2420                 __get_user(lg.l_linger, &tlg->l_linger);
2421                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2422                                 &lg, sizeof(lg)));
2423                 unlock_user_struct(tlg, optval_addr, 0);
2424                 return ret;
2425         }
2426             /* Options with 'int' argument.  */
2427         case TARGET_SO_DEBUG:
2428 		optname = SO_DEBUG;
2429 		break;
2430         case TARGET_SO_REUSEADDR:
2431 		optname = SO_REUSEADDR;
2432 		break;
2433 #ifdef SO_REUSEPORT
2434         case TARGET_SO_REUSEPORT:
2435                 optname = SO_REUSEPORT;
2436                 break;
2437 #endif
2438         case TARGET_SO_TYPE:
2439 		optname = SO_TYPE;
2440 		break;
2441         case TARGET_SO_ERROR:
2442 		optname = SO_ERROR;
2443 		break;
2444         case TARGET_SO_DONTROUTE:
2445 		optname = SO_DONTROUTE;
2446 		break;
2447         case TARGET_SO_BROADCAST:
2448 		optname = SO_BROADCAST;
2449 		break;
2450         case TARGET_SO_SNDBUF:
2451 		optname = SO_SNDBUF;
2452 		break;
2453         case TARGET_SO_SNDBUFFORCE:
2454                 optname = SO_SNDBUFFORCE;
2455                 break;
2456         case TARGET_SO_RCVBUF:
2457 		optname = SO_RCVBUF;
2458 		break;
2459         case TARGET_SO_RCVBUFFORCE:
2460                 optname = SO_RCVBUFFORCE;
2461                 break;
2462         case TARGET_SO_KEEPALIVE:
2463 		optname = SO_KEEPALIVE;
2464 		break;
2465         case TARGET_SO_OOBINLINE:
2466 		optname = SO_OOBINLINE;
2467 		break;
2468         case TARGET_SO_NO_CHECK:
2469 		optname = SO_NO_CHECK;
2470 		break;
2471         case TARGET_SO_PRIORITY:
2472 		optname = SO_PRIORITY;
2473 		break;
2474 #ifdef SO_BSDCOMPAT
2475         case TARGET_SO_BSDCOMPAT:
2476 		optname = SO_BSDCOMPAT;
2477 		break;
2478 #endif
2479         case TARGET_SO_PASSCRED:
2480 		optname = SO_PASSCRED;
2481 		break;
2482         case TARGET_SO_PASSSEC:
2483                 optname = SO_PASSSEC;
2484                 break;
2485         case TARGET_SO_TIMESTAMP:
2486 		optname = SO_TIMESTAMP;
2487 		break;
2488         case TARGET_SO_RCVLOWAT:
2489 		optname = SO_RCVLOWAT;
2490 		break;
2491         default:
2492             goto unimplemented;
2493         }
2494 	if (optlen < sizeof(uint32_t))
2495             return -TARGET_EINVAL;
2496 
2497 	if (get_user_u32(val, optval_addr))
2498             return -TARGET_EFAULT;
2499 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2500         break;
2501 #ifdef SOL_NETLINK
2502     case SOL_NETLINK:
2503         switch (optname) {
2504         case NETLINK_PKTINFO:
2505         case NETLINK_ADD_MEMBERSHIP:
2506         case NETLINK_DROP_MEMBERSHIP:
2507         case NETLINK_BROADCAST_ERROR:
2508         case NETLINK_NO_ENOBUFS:
2509 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2510         case NETLINK_LISTEN_ALL_NSID:
2511         case NETLINK_CAP_ACK:
2512 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2513 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2514         case NETLINK_EXT_ACK:
2515 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2516 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2517         case NETLINK_GET_STRICT_CHK:
2518 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2519             break;
2520         default:
2521             goto unimplemented;
2522         }
2523         val = 0;
2524         if (optlen < sizeof(uint32_t)) {
2525             return -TARGET_EINVAL;
2526         }
2527         if (get_user_u32(val, optval_addr)) {
2528             return -TARGET_EFAULT;
2529         }
2530         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2531                                    sizeof(val)));
2532         break;
2533 #endif /* SOL_NETLINK */
2534     default:
2535     unimplemented:
2536         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2537                       level, optname);
2538         ret = -TARGET_ENOPROTOOPT;
2539     }
2540     return ret;
2541 }
2542 
2543 /* do_getsockopt() Must return target values and target errnos. */
2544 static abi_long do_getsockopt(int sockfd, int level, int optname,
2545                               abi_ulong optval_addr, abi_ulong optlen)
2546 {
2547     abi_long ret;
2548     int len, val;
2549     socklen_t lv;
2550 
2551     switch(level) {
2552     case TARGET_SOL_SOCKET:
2553         level = SOL_SOCKET;
2554         switch (optname) {
2555         /* These don't just return a single integer */
2556         case TARGET_SO_PEERNAME:
2557             goto unimplemented;
2558         case TARGET_SO_RCVTIMEO: {
2559             struct timeval tv;
2560             socklen_t tvlen;
2561 
2562             optname = SO_RCVTIMEO;
2563 
2564 get_timeout:
2565             if (get_user_u32(len, optlen)) {
2566                 return -TARGET_EFAULT;
2567             }
2568             if (len < 0) {
2569                 return -TARGET_EINVAL;
2570             }
2571 
2572             tvlen = sizeof(tv);
2573             ret = get_errno(getsockopt(sockfd, level, optname,
2574                                        &tv, &tvlen));
2575             if (ret < 0) {
2576                 return ret;
2577             }
2578             if (len > sizeof(struct target_timeval)) {
2579                 len = sizeof(struct target_timeval);
2580             }
2581             if (copy_to_user_timeval(optval_addr, &tv)) {
2582                 return -TARGET_EFAULT;
2583             }
2584             if (put_user_u32(len, optlen)) {
2585                 return -TARGET_EFAULT;
2586             }
2587             break;
2588         }
2589         case TARGET_SO_SNDTIMEO:
2590             optname = SO_SNDTIMEO;
2591             goto get_timeout;
2592         case TARGET_SO_PEERCRED: {
2593             struct ucred cr;
2594             socklen_t crlen;
2595             struct target_ucred *tcr;
2596 
2597             if (get_user_u32(len, optlen)) {
2598                 return -TARGET_EFAULT;
2599             }
2600             if (len < 0) {
2601                 return -TARGET_EINVAL;
2602             }
2603 
2604             crlen = sizeof(cr);
2605             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2606                                        &cr, &crlen));
2607             if (ret < 0) {
2608                 return ret;
2609             }
2610             if (len > crlen) {
2611                 len = crlen;
2612             }
2613             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2614                 return -TARGET_EFAULT;
2615             }
2616             __put_user(cr.pid, &tcr->pid);
2617             __put_user(cr.uid, &tcr->uid);
2618             __put_user(cr.gid, &tcr->gid);
2619             unlock_user_struct(tcr, optval_addr, 1);
2620             if (put_user_u32(len, optlen)) {
2621                 return -TARGET_EFAULT;
2622             }
2623             break;
2624         }
2625         case TARGET_SO_PEERSEC: {
2626             char *name;
2627 
2628             if (get_user_u32(len, optlen)) {
2629                 return -TARGET_EFAULT;
2630             }
2631             if (len < 0) {
2632                 return -TARGET_EINVAL;
2633             }
2634             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2635             if (!name) {
2636                 return -TARGET_EFAULT;
2637             }
2638             lv = len;
2639             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2640                                        name, &lv));
2641             if (put_user_u32(lv, optlen)) {
2642                 ret = -TARGET_EFAULT;
2643             }
2644             unlock_user(name, optval_addr, lv);
2645             break;
2646         }
2647         case TARGET_SO_LINGER:
2648         {
2649             struct linger lg;
2650             socklen_t lglen;
2651             struct target_linger *tlg;
2652 
2653             if (get_user_u32(len, optlen)) {
2654                 return -TARGET_EFAULT;
2655             }
2656             if (len < 0) {
2657                 return -TARGET_EINVAL;
2658             }
2659 
2660             lglen = sizeof(lg);
2661             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2662                                        &lg, &lglen));
2663             if (ret < 0) {
2664                 return ret;
2665             }
2666             if (len > lglen) {
2667                 len = lglen;
2668             }
2669             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2670                 return -TARGET_EFAULT;
2671             }
2672             __put_user(lg.l_onoff, &tlg->l_onoff);
2673             __put_user(lg.l_linger, &tlg->l_linger);
2674             unlock_user_struct(tlg, optval_addr, 1);
2675             if (put_user_u32(len, optlen)) {
2676                 return -TARGET_EFAULT;
2677             }
2678             break;
2679         }
2680         /* Options with 'int' argument.  */
2681         case TARGET_SO_DEBUG:
2682             optname = SO_DEBUG;
2683             goto int_case;
2684         case TARGET_SO_REUSEADDR:
2685             optname = SO_REUSEADDR;
2686             goto int_case;
2687 #ifdef SO_REUSEPORT
2688         case TARGET_SO_REUSEPORT:
2689             optname = SO_REUSEPORT;
2690             goto int_case;
2691 #endif
2692         case TARGET_SO_TYPE:
2693             optname = SO_TYPE;
2694             goto int_case;
2695         case TARGET_SO_ERROR:
2696             optname = SO_ERROR;
2697             goto int_case;
2698         case TARGET_SO_DONTROUTE:
2699             optname = SO_DONTROUTE;
2700             goto int_case;
2701         case TARGET_SO_BROADCAST:
2702             optname = SO_BROADCAST;
2703             goto int_case;
2704         case TARGET_SO_SNDBUF:
2705             optname = SO_SNDBUF;
2706             goto int_case;
2707         case TARGET_SO_RCVBUF:
2708             optname = SO_RCVBUF;
2709             goto int_case;
2710         case TARGET_SO_KEEPALIVE:
2711             optname = SO_KEEPALIVE;
2712             goto int_case;
2713         case TARGET_SO_OOBINLINE:
2714             optname = SO_OOBINLINE;
2715             goto int_case;
2716         case TARGET_SO_NO_CHECK:
2717             optname = SO_NO_CHECK;
2718             goto int_case;
2719         case TARGET_SO_PRIORITY:
2720             optname = SO_PRIORITY;
2721             goto int_case;
2722 #ifdef SO_BSDCOMPAT
2723         case TARGET_SO_BSDCOMPAT:
2724             optname = SO_BSDCOMPAT;
2725             goto int_case;
2726 #endif
2727         case TARGET_SO_PASSCRED:
2728             optname = SO_PASSCRED;
2729             goto int_case;
2730         case TARGET_SO_TIMESTAMP:
2731             optname = SO_TIMESTAMP;
2732             goto int_case;
2733         case TARGET_SO_RCVLOWAT:
2734             optname = SO_RCVLOWAT;
2735             goto int_case;
2736         case TARGET_SO_ACCEPTCONN:
2737             optname = SO_ACCEPTCONN;
2738             goto int_case;
2739         case TARGET_SO_PROTOCOL:
2740             optname = SO_PROTOCOL;
2741             goto int_case;
2742         case TARGET_SO_DOMAIN:
2743             optname = SO_DOMAIN;
2744             goto int_case;
2745         default:
2746             goto int_case;
2747         }
2748         break;
2749     case SOL_TCP:
2750     case SOL_UDP:
2751         /* TCP and UDP options all take an 'int' value.  */
2752     int_case:
2753         if (get_user_u32(len, optlen))
2754             return -TARGET_EFAULT;
2755         if (len < 0)
2756             return -TARGET_EINVAL;
2757         lv = sizeof(lv);
2758         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2759         if (ret < 0)
2760             return ret;
2761         switch (optname) {
2762         case SO_TYPE:
2763             val = host_to_target_sock_type(val);
2764             break;
2765         case SO_ERROR:
2766             val = host_to_target_errno(val);
2767             break;
2768         }
2769         if (len > lv)
2770             len = lv;
2771         if (len == 4) {
2772             if (put_user_u32(val, optval_addr))
2773                 return -TARGET_EFAULT;
2774         } else {
2775             if (put_user_u8(val, optval_addr))
2776                 return -TARGET_EFAULT;
2777         }
2778         if (put_user_u32(len, optlen))
2779             return -TARGET_EFAULT;
2780         break;
2781     case SOL_IP:
2782         switch(optname) {
2783         case IP_TOS:
2784         case IP_TTL:
2785         case IP_HDRINCL:
2786         case IP_ROUTER_ALERT:
2787         case IP_RECVOPTS:
2788         case IP_RETOPTS:
2789         case IP_PKTINFO:
2790         case IP_MTU_DISCOVER:
2791         case IP_RECVERR:
2792         case IP_RECVTOS:
2793 #ifdef IP_FREEBIND
2794         case IP_FREEBIND:
2795 #endif
2796         case IP_MULTICAST_TTL:
2797         case IP_MULTICAST_LOOP:
2798             if (get_user_u32(len, optlen))
2799                 return -TARGET_EFAULT;
2800             if (len < 0)
2801                 return -TARGET_EINVAL;
2802             lv = sizeof(lv);
2803             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2804             if (ret < 0)
2805                 return ret;
2806             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2807                 len = 1;
2808                 if (put_user_u32(len, optlen)
2809                     || put_user_u8(val, optval_addr))
2810                     return -TARGET_EFAULT;
2811             } else {
2812                 if (len > sizeof(int))
2813                     len = sizeof(int);
2814                 if (put_user_u32(len, optlen)
2815                     || put_user_u32(val, optval_addr))
2816                     return -TARGET_EFAULT;
2817             }
2818             break;
2819         default:
2820             ret = -TARGET_ENOPROTOOPT;
2821             break;
2822         }
2823         break;
2824     case SOL_IPV6:
2825         switch (optname) {
2826         case IPV6_MTU_DISCOVER:
2827         case IPV6_MTU:
2828         case IPV6_V6ONLY:
2829         case IPV6_RECVPKTINFO:
2830         case IPV6_UNICAST_HOPS:
2831         case IPV6_MULTICAST_HOPS:
2832         case IPV6_MULTICAST_LOOP:
2833         case IPV6_RECVERR:
2834         case IPV6_RECVHOPLIMIT:
2835         case IPV6_2292HOPLIMIT:
2836         case IPV6_CHECKSUM:
2837         case IPV6_ADDRFORM:
2838         case IPV6_2292PKTINFO:
2839         case IPV6_RECVTCLASS:
2840         case IPV6_RECVRTHDR:
2841         case IPV6_2292RTHDR:
2842         case IPV6_RECVHOPOPTS:
2843         case IPV6_2292HOPOPTS:
2844         case IPV6_RECVDSTOPTS:
2845         case IPV6_2292DSTOPTS:
2846         case IPV6_TCLASS:
2847         case IPV6_ADDR_PREFERENCES:
2848 #ifdef IPV6_RECVPATHMTU
2849         case IPV6_RECVPATHMTU:
2850 #endif
2851 #ifdef IPV6_TRANSPARENT
2852         case IPV6_TRANSPARENT:
2853 #endif
2854 #ifdef IPV6_FREEBIND
2855         case IPV6_FREEBIND:
2856 #endif
2857 #ifdef IPV6_RECVORIGDSTADDR
2858         case IPV6_RECVORIGDSTADDR:
2859 #endif
2860             if (get_user_u32(len, optlen))
2861                 return -TARGET_EFAULT;
2862             if (len < 0)
2863                 return -TARGET_EINVAL;
2864             lv = sizeof(lv);
2865             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2866             if (ret < 0)
2867                 return ret;
2868             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2869                 len = 1;
2870                 if (put_user_u32(len, optlen)
2871                     || put_user_u8(val, optval_addr))
2872                     return -TARGET_EFAULT;
2873             } else {
2874                 if (len > sizeof(int))
2875                     len = sizeof(int);
2876                 if (put_user_u32(len, optlen)
2877                     || put_user_u32(val, optval_addr))
2878                     return -TARGET_EFAULT;
2879             }
2880             break;
2881         default:
2882             ret = -TARGET_ENOPROTOOPT;
2883             break;
2884         }
2885         break;
2886 #ifdef SOL_NETLINK
2887     case SOL_NETLINK:
2888         switch (optname) {
2889         case NETLINK_PKTINFO:
2890         case NETLINK_BROADCAST_ERROR:
2891         case NETLINK_NO_ENOBUFS:
2892 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2893         case NETLINK_LISTEN_ALL_NSID:
2894         case NETLINK_CAP_ACK:
2895 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2896 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2897         case NETLINK_EXT_ACK:
2898 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2899 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2900         case NETLINK_GET_STRICT_CHK:
2901 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2902             if (get_user_u32(len, optlen)) {
2903                 return -TARGET_EFAULT;
2904             }
2905             if (len != sizeof(val)) {
2906                 return -TARGET_EINVAL;
2907             }
2908             lv = len;
2909             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2910             if (ret < 0) {
2911                 return ret;
2912             }
2913             if (put_user_u32(lv, optlen)
2914                 || put_user_u32(val, optval_addr)) {
2915                 return -TARGET_EFAULT;
2916             }
2917             break;
2918 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2919         case NETLINK_LIST_MEMBERSHIPS:
2920         {
2921             uint32_t *results;
2922             int i;
2923             if (get_user_u32(len, optlen)) {
2924                 return -TARGET_EFAULT;
2925             }
2926             if (len < 0) {
2927                 return -TARGET_EINVAL;
2928             }
2929             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2930             if (!results && len > 0) {
2931                 return -TARGET_EFAULT;
2932             }
2933             lv = len;
2934             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2935             if (ret < 0) {
2936                 unlock_user(results, optval_addr, 0);
2937                 return ret;
2938             }
2939             /* swap host endianess to target endianess. */
2940             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2941                 results[i] = tswap32(results[i]);
2942             }
2943             if (put_user_u32(lv, optlen)) {
2944                 return -TARGET_EFAULT;
2945             }
2946             unlock_user(results, optval_addr, 0);
2947             break;
2948         }
2949 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2950         default:
2951             goto unimplemented;
2952         }
2953         break;
2954 #endif /* SOL_NETLINK */
2955     default:
2956     unimplemented:
2957         qemu_log_mask(LOG_UNIMP,
2958                       "getsockopt level=%d optname=%d not yet supported\n",
2959                       level, optname);
2960         ret = -TARGET_EOPNOTSUPP;
2961         break;
2962     }
2963     return ret;
2964 }
2965 
2966 /* Convert target low/high pair representing file offset into the host
2967  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2968  * as the kernel doesn't handle them either.
2969  */
2970 static void target_to_host_low_high(abi_ulong tlow,
2971                                     abi_ulong thigh,
2972                                     unsigned long *hlow,
2973                                     unsigned long *hhigh)
2974 {
2975     uint64_t off = tlow |
2976         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2977         TARGET_LONG_BITS / 2;
2978 
2979     *hlow = off;
2980     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2981 }
2982 
2983 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2984                                 abi_ulong count, int copy)
2985 {
2986     struct target_iovec *target_vec;
2987     struct iovec *vec;
2988     abi_ulong total_len, max_len;
2989     int i;
2990     int err = 0;
2991     bool bad_address = false;
2992 
2993     if (count == 0) {
2994         errno = 0;
2995         return NULL;
2996     }
2997     if (count > IOV_MAX) {
2998         errno = EINVAL;
2999         return NULL;
3000     }
3001 
3002     vec = g_try_new0(struct iovec, count);
3003     if (vec == NULL) {
3004         errno = ENOMEM;
3005         return NULL;
3006     }
3007 
3008     target_vec = lock_user(VERIFY_READ, target_addr,
3009                            count * sizeof(struct target_iovec), 1);
3010     if (target_vec == NULL) {
3011         err = EFAULT;
3012         goto fail2;
3013     }
3014 
3015     /* ??? If host page size > target page size, this will result in a
3016        value larger than what we can actually support.  */
3017     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3018     total_len = 0;
3019 
3020     for (i = 0; i < count; i++) {
3021         abi_ulong base = tswapal(target_vec[i].iov_base);
3022         abi_long len = tswapal(target_vec[i].iov_len);
3023 
3024         if (len < 0) {
3025             err = EINVAL;
3026             goto fail;
3027         } else if (len == 0) {
3028             /* Zero length pointer is ignored.  */
3029             vec[i].iov_base = 0;
3030         } else {
3031             vec[i].iov_base = lock_user(type, base, len, copy);
3032             /* If the first buffer pointer is bad, this is a fault.  But
3033              * subsequent bad buffers will result in a partial write; this
3034              * is realized by filling the vector with null pointers and
3035              * zero lengths. */
3036             if (!vec[i].iov_base) {
3037                 if (i == 0) {
3038                     err = EFAULT;
3039                     goto fail;
3040                 } else {
3041                     bad_address = true;
3042                 }
3043             }
3044             if (bad_address) {
3045                 len = 0;
3046             }
3047             if (len > max_len - total_len) {
3048                 len = max_len - total_len;
3049             }
3050         }
3051         vec[i].iov_len = len;
3052         total_len += len;
3053     }
3054 
3055     unlock_user(target_vec, target_addr, 0);
3056     return vec;
3057 
3058  fail:
3059     while (--i >= 0) {
3060         if (tswapal(target_vec[i].iov_len) > 0) {
3061             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3062         }
3063     }
3064     unlock_user(target_vec, target_addr, 0);
3065  fail2:
3066     g_free(vec);
3067     errno = err;
3068     return NULL;
3069 }
3070 
3071 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3072                          abi_ulong count, int copy)
3073 {
3074     struct target_iovec *target_vec;
3075     int i;
3076 
3077     target_vec = lock_user(VERIFY_READ, target_addr,
3078                            count * sizeof(struct target_iovec), 1);
3079     if (target_vec) {
3080         for (i = 0; i < count; i++) {
3081             abi_ulong base = tswapal(target_vec[i].iov_base);
3082             abi_long len = tswapal(target_vec[i].iov_len);
3083             if (len < 0) {
3084                 break;
3085             }
3086             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3087         }
3088         unlock_user(target_vec, target_addr, 0);
3089     }
3090 
3091     g_free(vec);
3092 }
3093 
3094 static inline int target_to_host_sock_type(int *type)
3095 {
3096     int host_type = 0;
3097     int target_type = *type;
3098 
3099     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3100     case TARGET_SOCK_DGRAM:
3101         host_type = SOCK_DGRAM;
3102         break;
3103     case TARGET_SOCK_STREAM:
3104         host_type = SOCK_STREAM;
3105         break;
3106     default:
3107         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3108         break;
3109     }
3110     if (target_type & TARGET_SOCK_CLOEXEC) {
3111 #if defined(SOCK_CLOEXEC)
3112         host_type |= SOCK_CLOEXEC;
3113 #else
3114         return -TARGET_EINVAL;
3115 #endif
3116     }
3117     if (target_type & TARGET_SOCK_NONBLOCK) {
3118 #if defined(SOCK_NONBLOCK)
3119         host_type |= SOCK_NONBLOCK;
3120 #elif !defined(O_NONBLOCK)
3121         return -TARGET_EINVAL;
3122 #endif
3123     }
3124     *type = host_type;
3125     return 0;
3126 }
3127 
3128 /* Try to emulate socket type flags after socket creation.  */
3129 static int sock_flags_fixup(int fd, int target_type)
3130 {
3131 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3132     if (target_type & TARGET_SOCK_NONBLOCK) {
3133         int flags = fcntl(fd, F_GETFL);
3134         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3135             close(fd);
3136             return -TARGET_EINVAL;
3137         }
3138     }
3139 #endif
3140     return fd;
3141 }
3142 
3143 /* do_socket() Must return target values and target errnos. */
3144 static abi_long do_socket(int domain, int type, int protocol)
3145 {
3146     int target_type = type;
3147     int ret;
3148 
3149     ret = target_to_host_sock_type(&type);
3150     if (ret) {
3151         return ret;
3152     }
3153 
3154     if (domain == PF_NETLINK && !(
3155 #ifdef CONFIG_RTNETLINK
3156          protocol == NETLINK_ROUTE ||
3157 #endif
3158          protocol == NETLINK_KOBJECT_UEVENT ||
3159          protocol == NETLINK_AUDIT)) {
3160         return -TARGET_EPROTONOSUPPORT;
3161     }
3162 
3163     if (domain == AF_PACKET ||
3164         (domain == AF_INET && type == SOCK_PACKET)) {
3165         protocol = tswap16(protocol);
3166     }
3167 
3168     ret = get_errno(socket(domain, type, protocol));
3169     if (ret >= 0) {
3170         ret = sock_flags_fixup(ret, target_type);
3171         if (type == SOCK_PACKET) {
3172             /* Manage an obsolete case :
3173              * if socket type is SOCK_PACKET, bind by name
3174              */
3175             fd_trans_register(ret, &target_packet_trans);
3176         } else if (domain == PF_NETLINK) {
3177             switch (protocol) {
3178 #ifdef CONFIG_RTNETLINK
3179             case NETLINK_ROUTE:
3180                 fd_trans_register(ret, &target_netlink_route_trans);
3181                 break;
3182 #endif
3183             case NETLINK_KOBJECT_UEVENT:
3184                 /* nothing to do: messages are strings */
3185                 break;
3186             case NETLINK_AUDIT:
3187                 fd_trans_register(ret, &target_netlink_audit_trans);
3188                 break;
3189             default:
3190                 g_assert_not_reached();
3191             }
3192         }
3193     }
3194     return ret;
3195 }
3196 
3197 /* do_bind() Must return target values and target errnos. */
3198 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3199                         socklen_t addrlen)
3200 {
3201     void *addr;
3202     abi_long ret;
3203 
3204     if ((int)addrlen < 0) {
3205         return -TARGET_EINVAL;
3206     }
3207 
3208     addr = alloca(addrlen+1);
3209 
3210     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3211     if (ret)
3212         return ret;
3213 
3214     return get_errno(bind(sockfd, addr, addrlen));
3215 }
3216 
3217 /* do_connect() Must return target values and target errnos. */
3218 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3219                            socklen_t addrlen)
3220 {
3221     void *addr;
3222     abi_long ret;
3223 
3224     if ((int)addrlen < 0) {
3225         return -TARGET_EINVAL;
3226     }
3227 
3228     addr = alloca(addrlen+1);
3229 
3230     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3231     if (ret)
3232         return ret;
3233 
3234     return get_errno(safe_connect(sockfd, addr, addrlen));
3235 }
3236 
3237 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3238 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3239                                       int flags, int send)
3240 {
3241     abi_long ret, len;
3242     struct msghdr msg;
3243     abi_ulong count;
3244     struct iovec *vec;
3245     abi_ulong target_vec;
3246 
3247     if (msgp->msg_name) {
3248         msg.msg_namelen = tswap32(msgp->msg_namelen);
3249         msg.msg_name = alloca(msg.msg_namelen+1);
3250         ret = target_to_host_sockaddr(fd, msg.msg_name,
3251                                       tswapal(msgp->msg_name),
3252                                       msg.msg_namelen);
3253         if (ret == -TARGET_EFAULT) {
3254             /* For connected sockets msg_name and msg_namelen must
3255              * be ignored, so returning EFAULT immediately is wrong.
3256              * Instead, pass a bad msg_name to the host kernel, and
3257              * let it decide whether to return EFAULT or not.
3258              */
3259             msg.msg_name = (void *)-1;
3260         } else if (ret) {
3261             goto out2;
3262         }
3263     } else {
3264         msg.msg_name = NULL;
3265         msg.msg_namelen = 0;
3266     }
3267     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3268     msg.msg_control = alloca(msg.msg_controllen);
3269     memset(msg.msg_control, 0, msg.msg_controllen);
3270 
3271     msg.msg_flags = tswap32(msgp->msg_flags);
3272 
3273     count = tswapal(msgp->msg_iovlen);
3274     target_vec = tswapal(msgp->msg_iov);
3275 
3276     if (count > IOV_MAX) {
3277         /* sendrcvmsg returns a different errno for this condition than
3278          * readv/writev, so we must catch it here before lock_iovec() does.
3279          */
3280         ret = -TARGET_EMSGSIZE;
3281         goto out2;
3282     }
3283 
3284     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3285                      target_vec, count, send);
3286     if (vec == NULL) {
3287         ret = -host_to_target_errno(errno);
3288         goto out2;
3289     }
3290     msg.msg_iovlen = count;
3291     msg.msg_iov = vec;
3292 
3293     if (send) {
3294         if (fd_trans_target_to_host_data(fd)) {
3295             void *host_msg;
3296 
3297             host_msg = g_malloc(msg.msg_iov->iov_len);
3298             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3299             ret = fd_trans_target_to_host_data(fd)(host_msg,
3300                                                    msg.msg_iov->iov_len);
3301             if (ret >= 0) {
3302                 msg.msg_iov->iov_base = host_msg;
3303                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3304             }
3305             g_free(host_msg);
3306         } else {
3307             ret = target_to_host_cmsg(&msg, msgp);
3308             if (ret == 0) {
3309                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3310             }
3311         }
3312     } else {
3313         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3314         if (!is_error(ret)) {
3315             len = ret;
3316             if (fd_trans_host_to_target_data(fd)) {
3317                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3318                                                MIN(msg.msg_iov->iov_len, len));
3319             }
3320             if (!is_error(ret)) {
3321                 ret = host_to_target_cmsg(msgp, &msg);
3322             }
3323             if (!is_error(ret)) {
3324                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3325                 msgp->msg_flags = tswap32(msg.msg_flags);
3326                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3327                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3328                                     msg.msg_name, msg.msg_namelen);
3329                     if (ret) {
3330                         goto out;
3331                     }
3332                 }
3333 
3334                 ret = len;
3335             }
3336         }
3337     }
3338 
3339 out:
3340     unlock_iovec(vec, target_vec, count, !send);
3341 out2:
3342     return ret;
3343 }
3344 
3345 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3346                                int flags, int send)
3347 {
3348     abi_long ret;
3349     struct target_msghdr *msgp;
3350 
3351     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3352                           msgp,
3353                           target_msg,
3354                           send ? 1 : 0)) {
3355         return -TARGET_EFAULT;
3356     }
3357     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3358     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3359     return ret;
3360 }
3361 
3362 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3363  * so it might not have this *mmsg-specific flag either.
3364  */
3365 #ifndef MSG_WAITFORONE
3366 #define MSG_WAITFORONE 0x10000
3367 #endif
3368 
3369 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3370                                 unsigned int vlen, unsigned int flags,
3371                                 int send)
3372 {
3373     struct target_mmsghdr *mmsgp;
3374     abi_long ret = 0;
3375     int i;
3376 
3377     if (vlen > UIO_MAXIOV) {
3378         vlen = UIO_MAXIOV;
3379     }
3380 
3381     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3382     if (!mmsgp) {
3383         return -TARGET_EFAULT;
3384     }
3385 
3386     for (i = 0; i < vlen; i++) {
3387         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3388         if (is_error(ret)) {
3389             break;
3390         }
3391         mmsgp[i].msg_len = tswap32(ret);
3392         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3393         if (flags & MSG_WAITFORONE) {
3394             flags |= MSG_DONTWAIT;
3395         }
3396     }
3397 
3398     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3399 
3400     /* Return number of datagrams sent if we sent any at all;
3401      * otherwise return the error.
3402      */
3403     if (i) {
3404         return i;
3405     }
3406     return ret;
3407 }
3408 
3409 /* do_accept4() Must return target values and target errnos. */
3410 static abi_long do_accept4(int fd, abi_ulong target_addr,
3411                            abi_ulong target_addrlen_addr, int flags)
3412 {
3413     socklen_t addrlen, ret_addrlen;
3414     void *addr;
3415     abi_long ret;
3416     int host_flags;
3417 
3418     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3419 
3420     if (target_addr == 0) {
3421         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3422     }
3423 
3424     /* linux returns EFAULT if addrlen pointer is invalid */
3425     if (get_user_u32(addrlen, target_addrlen_addr))
3426         return -TARGET_EFAULT;
3427 
3428     if ((int)addrlen < 0) {
3429         return -TARGET_EINVAL;
3430     }
3431 
3432     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3433         return -TARGET_EFAULT;
3434     }
3435 
3436     addr = alloca(addrlen);
3437 
3438     ret_addrlen = addrlen;
3439     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3440     if (!is_error(ret)) {
3441         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3442         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3443             ret = -TARGET_EFAULT;
3444         }
3445     }
3446     return ret;
3447 }
3448 
3449 /* do_getpeername() Must return target values and target errnos. */
3450 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3451                                abi_ulong target_addrlen_addr)
3452 {
3453     socklen_t addrlen, ret_addrlen;
3454     void *addr;
3455     abi_long ret;
3456 
3457     if (get_user_u32(addrlen, target_addrlen_addr))
3458         return -TARGET_EFAULT;
3459 
3460     if ((int)addrlen < 0) {
3461         return -TARGET_EINVAL;
3462     }
3463 
3464     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3465         return -TARGET_EFAULT;
3466     }
3467 
3468     addr = alloca(addrlen);
3469 
3470     ret_addrlen = addrlen;
3471     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3472     if (!is_error(ret)) {
3473         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3474         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3475             ret = -TARGET_EFAULT;
3476         }
3477     }
3478     return ret;
3479 }
3480 
3481 /* do_getsockname() Must return target values and target errnos. */
3482 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3483                                abi_ulong target_addrlen_addr)
3484 {
3485     socklen_t addrlen, ret_addrlen;
3486     void *addr;
3487     abi_long ret;
3488 
3489     if (get_user_u32(addrlen, target_addrlen_addr))
3490         return -TARGET_EFAULT;
3491 
3492     if ((int)addrlen < 0) {
3493         return -TARGET_EINVAL;
3494     }
3495 
3496     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3497         return -TARGET_EFAULT;
3498     }
3499 
3500     addr = alloca(addrlen);
3501 
3502     ret_addrlen = addrlen;
3503     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3504     if (!is_error(ret)) {
3505         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3506         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3507             ret = -TARGET_EFAULT;
3508         }
3509     }
3510     return ret;
3511 }
3512 
3513 /* do_socketpair() Must return target values and target errnos. */
3514 static abi_long do_socketpair(int domain, int type, int protocol,
3515                               abi_ulong target_tab_addr)
3516 {
3517     int tab[2];
3518     abi_long ret;
3519 
3520     target_to_host_sock_type(&type);
3521 
3522     ret = get_errno(socketpair(domain, type, protocol, tab));
3523     if (!is_error(ret)) {
3524         if (put_user_s32(tab[0], target_tab_addr)
3525             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3526             ret = -TARGET_EFAULT;
3527     }
3528     return ret;
3529 }
3530 
3531 /* do_sendto() Must return target values and target errnos. */
3532 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3533                           abi_ulong target_addr, socklen_t addrlen)
3534 {
3535     void *addr;
3536     void *host_msg;
3537     void *copy_msg = NULL;
3538     abi_long ret;
3539 
3540     if ((int)addrlen < 0) {
3541         return -TARGET_EINVAL;
3542     }
3543 
3544     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3545     if (!host_msg)
3546         return -TARGET_EFAULT;
3547     if (fd_trans_target_to_host_data(fd)) {
3548         copy_msg = host_msg;
3549         host_msg = g_malloc(len);
3550         memcpy(host_msg, copy_msg, len);
3551         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3552         if (ret < 0) {
3553             goto fail;
3554         }
3555     }
3556     if (target_addr) {
3557         addr = alloca(addrlen+1);
3558         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3559         if (ret) {
3560             goto fail;
3561         }
3562         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3563     } else {
3564         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3565     }
3566 fail:
3567     if (copy_msg) {
3568         g_free(host_msg);
3569         host_msg = copy_msg;
3570     }
3571     unlock_user(host_msg, msg, 0);
3572     return ret;
3573 }
3574 
3575 /* do_recvfrom() Must return target values and target errnos. */
3576 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3577                             abi_ulong target_addr,
3578                             abi_ulong target_addrlen)
3579 {
3580     socklen_t addrlen, ret_addrlen;
3581     void *addr;
3582     void *host_msg;
3583     abi_long ret;
3584 
3585     if (!msg) {
3586         host_msg = NULL;
3587     } else {
3588         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3589         if (!host_msg) {
3590             return -TARGET_EFAULT;
3591         }
3592     }
3593     if (target_addr) {
3594         if (get_user_u32(addrlen, target_addrlen)) {
3595             ret = -TARGET_EFAULT;
3596             goto fail;
3597         }
3598         if ((int)addrlen < 0) {
3599             ret = -TARGET_EINVAL;
3600             goto fail;
3601         }
3602         addr = alloca(addrlen);
3603         ret_addrlen = addrlen;
3604         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3605                                       addr, &ret_addrlen));
3606     } else {
3607         addr = NULL; /* To keep compiler quiet.  */
3608         addrlen = 0; /* To keep compiler quiet.  */
3609         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3610     }
3611     if (!is_error(ret)) {
3612         if (fd_trans_host_to_target_data(fd)) {
3613             abi_long trans;
3614             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3615             if (is_error(trans)) {
3616                 ret = trans;
3617                 goto fail;
3618             }
3619         }
3620         if (target_addr) {
3621             host_to_target_sockaddr(target_addr, addr,
3622                                     MIN(addrlen, ret_addrlen));
3623             if (put_user_u32(ret_addrlen, target_addrlen)) {
3624                 ret = -TARGET_EFAULT;
3625                 goto fail;
3626             }
3627         }
3628         unlock_user(host_msg, msg, len);
3629     } else {
3630 fail:
3631         unlock_user(host_msg, msg, 0);
3632     }
3633     return ret;
3634 }
3635 
3636 #ifdef TARGET_NR_socketcall
3637 /* do_socketcall() must return target values and target errnos. */
3638 static abi_long do_socketcall(int num, abi_ulong vptr)
3639 {
3640     static const unsigned nargs[] = { /* number of arguments per operation */
3641         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3642         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3643         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3644         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3645         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3646         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3647         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3648         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3649         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3650         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3651         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3652         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3653         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3654         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3655         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3656         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3657         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3658         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3659         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3660         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3661     };
3662     abi_long a[6]; /* max 6 args */
3663     unsigned i;
3664 
3665     /* check the range of the first argument num */
3666     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3667     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3668         return -TARGET_EINVAL;
3669     }
3670     /* ensure we have space for args */
3671     if (nargs[num] > ARRAY_SIZE(a)) {
3672         return -TARGET_EINVAL;
3673     }
3674     /* collect the arguments in a[] according to nargs[] */
3675     for (i = 0; i < nargs[num]; ++i) {
3676         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3677             return -TARGET_EFAULT;
3678         }
3679     }
3680     /* now when we have the args, invoke the appropriate underlying function */
3681     switch (num) {
3682     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3683         return do_socket(a[0], a[1], a[2]);
3684     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3685         return do_bind(a[0], a[1], a[2]);
3686     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3687         return do_connect(a[0], a[1], a[2]);
3688     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3689         return get_errno(listen(a[0], a[1]));
3690     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3691         return do_accept4(a[0], a[1], a[2], 0);
3692     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3693         return do_getsockname(a[0], a[1], a[2]);
3694     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3695         return do_getpeername(a[0], a[1], a[2]);
3696     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3697         return do_socketpair(a[0], a[1], a[2], a[3]);
3698     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3699         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3700     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3701         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3702     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3703         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3704     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3705         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3706     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3707         return get_errno(shutdown(a[0], a[1]));
3708     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3709         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3710     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3711         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3712     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3713         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3714     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3715         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3716     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3717         return do_accept4(a[0], a[1], a[2], a[3]);
3718     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3719         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3720     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3721         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3722     default:
3723         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3724         return -TARGET_EINVAL;
3725     }
3726 }
3727 #endif
3728 
3729 #define N_SHM_REGIONS	32
3730 
3731 static struct shm_region {
3732     abi_ulong start;
3733     abi_ulong size;
3734     bool in_use;
3735 } shm_regions[N_SHM_REGIONS];
3736 
3737 #ifndef TARGET_SEMID64_DS
3738 /* asm-generic version of this struct */
3739 struct target_semid64_ds
3740 {
3741   struct target_ipc_perm sem_perm;
3742   abi_ulong sem_otime;
3743 #if TARGET_ABI_BITS == 32
3744   abi_ulong __unused1;
3745 #endif
3746   abi_ulong sem_ctime;
3747 #if TARGET_ABI_BITS == 32
3748   abi_ulong __unused2;
3749 #endif
3750   abi_ulong sem_nsems;
3751   abi_ulong __unused3;
3752   abi_ulong __unused4;
3753 };
3754 #endif
3755 
3756 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3757                                                abi_ulong target_addr)
3758 {
3759     struct target_ipc_perm *target_ip;
3760     struct target_semid64_ds *target_sd;
3761 
3762     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3763         return -TARGET_EFAULT;
3764     target_ip = &(target_sd->sem_perm);
3765     host_ip->__key = tswap32(target_ip->__key);
3766     host_ip->uid = tswap32(target_ip->uid);
3767     host_ip->gid = tswap32(target_ip->gid);
3768     host_ip->cuid = tswap32(target_ip->cuid);
3769     host_ip->cgid = tswap32(target_ip->cgid);
3770 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3771     host_ip->mode = tswap32(target_ip->mode);
3772 #else
3773     host_ip->mode = tswap16(target_ip->mode);
3774 #endif
3775 #if defined(TARGET_PPC)
3776     host_ip->__seq = tswap32(target_ip->__seq);
3777 #else
3778     host_ip->__seq = tswap16(target_ip->__seq);
3779 #endif
3780     unlock_user_struct(target_sd, target_addr, 0);
3781     return 0;
3782 }
3783 
3784 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3785                                                struct ipc_perm *host_ip)
3786 {
3787     struct target_ipc_perm *target_ip;
3788     struct target_semid64_ds *target_sd;
3789 
3790     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3791         return -TARGET_EFAULT;
3792     target_ip = &(target_sd->sem_perm);
3793     target_ip->__key = tswap32(host_ip->__key);
3794     target_ip->uid = tswap32(host_ip->uid);
3795     target_ip->gid = tswap32(host_ip->gid);
3796     target_ip->cuid = tswap32(host_ip->cuid);
3797     target_ip->cgid = tswap32(host_ip->cgid);
3798 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3799     target_ip->mode = tswap32(host_ip->mode);
3800 #else
3801     target_ip->mode = tswap16(host_ip->mode);
3802 #endif
3803 #if defined(TARGET_PPC)
3804     target_ip->__seq = tswap32(host_ip->__seq);
3805 #else
3806     target_ip->__seq = tswap16(host_ip->__seq);
3807 #endif
3808     unlock_user_struct(target_sd, target_addr, 1);
3809     return 0;
3810 }
3811 
3812 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3813                                                abi_ulong target_addr)
3814 {
3815     struct target_semid64_ds *target_sd;
3816 
3817     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3818         return -TARGET_EFAULT;
3819     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3820         return -TARGET_EFAULT;
3821     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3822     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3823     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3824     unlock_user_struct(target_sd, target_addr, 0);
3825     return 0;
3826 }
3827 
3828 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3829                                                struct semid_ds *host_sd)
3830 {
3831     struct target_semid64_ds *target_sd;
3832 
3833     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3834         return -TARGET_EFAULT;
3835     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3836         return -TARGET_EFAULT;
3837     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3838     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3839     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3840     unlock_user_struct(target_sd, target_addr, 1);
3841     return 0;
3842 }
3843 
3844 struct target_seminfo {
3845     int semmap;
3846     int semmni;
3847     int semmns;
3848     int semmnu;
3849     int semmsl;
3850     int semopm;
3851     int semume;
3852     int semusz;
3853     int semvmx;
3854     int semaem;
3855 };
3856 
3857 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3858                                               struct seminfo *host_seminfo)
3859 {
3860     struct target_seminfo *target_seminfo;
3861     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3862         return -TARGET_EFAULT;
3863     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3864     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3865     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3866     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3867     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3868     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3869     __put_user(host_seminfo->semume, &target_seminfo->semume);
3870     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3871     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3872     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3873     unlock_user_struct(target_seminfo, target_addr, 1);
3874     return 0;
3875 }
3876 
3877 union semun {
3878 	int val;
3879 	struct semid_ds *buf;
3880 	unsigned short *array;
3881 	struct seminfo *__buf;
3882 };
3883 
3884 union target_semun {
3885 	int val;
3886 	abi_ulong buf;
3887 	abi_ulong array;
3888 	abi_ulong __buf;
3889 };
3890 
3891 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3892                                                abi_ulong target_addr)
3893 {
3894     int nsems;
3895     unsigned short *array;
3896     union semun semun;
3897     struct semid_ds semid_ds;
3898     int i, ret;
3899 
3900     semun.buf = &semid_ds;
3901 
3902     ret = semctl(semid, 0, IPC_STAT, semun);
3903     if (ret == -1)
3904         return get_errno(ret);
3905 
3906     nsems = semid_ds.sem_nsems;
3907 
3908     *host_array = g_try_new(unsigned short, nsems);
3909     if (!*host_array) {
3910         return -TARGET_ENOMEM;
3911     }
3912     array = lock_user(VERIFY_READ, target_addr,
3913                       nsems*sizeof(unsigned short), 1);
3914     if (!array) {
3915         g_free(*host_array);
3916         return -TARGET_EFAULT;
3917     }
3918 
3919     for(i=0; i<nsems; i++) {
3920         __get_user((*host_array)[i], &array[i]);
3921     }
3922     unlock_user(array, target_addr, 0);
3923 
3924     return 0;
3925 }
3926 
3927 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3928                                                unsigned short **host_array)
3929 {
3930     int nsems;
3931     unsigned short *array;
3932     union semun semun;
3933     struct semid_ds semid_ds;
3934     int i, ret;
3935 
3936     semun.buf = &semid_ds;
3937 
3938     ret = semctl(semid, 0, IPC_STAT, semun);
3939     if (ret == -1)
3940         return get_errno(ret);
3941 
3942     nsems = semid_ds.sem_nsems;
3943 
3944     array = lock_user(VERIFY_WRITE, target_addr,
3945                       nsems*sizeof(unsigned short), 0);
3946     if (!array)
3947         return -TARGET_EFAULT;
3948 
3949     for(i=0; i<nsems; i++) {
3950         __put_user((*host_array)[i], &array[i]);
3951     }
3952     g_free(*host_array);
3953     unlock_user(array, target_addr, 1);
3954 
3955     return 0;
3956 }
3957 
3958 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3959                                  abi_ulong target_arg)
3960 {
3961     union target_semun target_su = { .buf = target_arg };
3962     union semun arg;
3963     struct semid_ds dsarg;
3964     unsigned short *array = NULL;
3965     struct seminfo seminfo;
3966     abi_long ret = -TARGET_EINVAL;
3967     abi_long err;
3968     cmd &= 0xff;
3969 
3970     switch( cmd ) {
3971 	case GETVAL:
3972 	case SETVAL:
3973             /* In 64 bit cross-endian situations, we will erroneously pick up
3974              * the wrong half of the union for the "val" element.  To rectify
3975              * this, the entire 8-byte structure is byteswapped, followed by
3976 	     * a swap of the 4 byte val field. In other cases, the data is
3977 	     * already in proper host byte order. */
3978 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3979 		target_su.buf = tswapal(target_su.buf);
3980 		arg.val = tswap32(target_su.val);
3981 	    } else {
3982 		arg.val = target_su.val;
3983 	    }
3984             ret = get_errno(semctl(semid, semnum, cmd, arg));
3985             break;
3986 	case GETALL:
3987 	case SETALL:
3988             err = target_to_host_semarray(semid, &array, target_su.array);
3989             if (err)
3990                 return err;
3991             arg.array = array;
3992             ret = get_errno(semctl(semid, semnum, cmd, arg));
3993             err = host_to_target_semarray(semid, target_su.array, &array);
3994             if (err)
3995                 return err;
3996             break;
3997 	case IPC_STAT:
3998 	case IPC_SET:
3999 	case SEM_STAT:
4000             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4001             if (err)
4002                 return err;
4003             arg.buf = &dsarg;
4004             ret = get_errno(semctl(semid, semnum, cmd, arg));
4005             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4006             if (err)
4007                 return err;
4008             break;
4009 	case IPC_INFO:
4010 	case SEM_INFO:
4011             arg.__buf = &seminfo;
4012             ret = get_errno(semctl(semid, semnum, cmd, arg));
4013             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4014             if (err)
4015                 return err;
4016             break;
4017 	case IPC_RMID:
4018 	case GETPID:
4019 	case GETNCNT:
4020 	case GETZCNT:
4021             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4022             break;
4023     }
4024 
4025     return ret;
4026 }
4027 
4028 struct target_sembuf {
4029     unsigned short sem_num;
4030     short sem_op;
4031     short sem_flg;
4032 };
4033 
4034 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4035                                              abi_ulong target_addr,
4036                                              unsigned nsops)
4037 {
4038     struct target_sembuf *target_sembuf;
4039     int i;
4040 
4041     target_sembuf = lock_user(VERIFY_READ, target_addr,
4042                               nsops*sizeof(struct target_sembuf), 1);
4043     if (!target_sembuf)
4044         return -TARGET_EFAULT;
4045 
4046     for(i=0; i<nsops; i++) {
4047         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4048         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4049         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4050     }
4051 
4052     unlock_user(target_sembuf, target_addr, 0);
4053 
4054     return 0;
4055 }
4056 
4057 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4058     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4059 
4060 /*
4061  * This macro is required to handle the s390 variants, which passes the
4062  * arguments in a different order than default.
4063  */
4064 #ifdef __s390x__
4065 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4066   (__nsops), (__timeout), (__sops)
4067 #else
4068 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4069   (__nsops), 0, (__sops), (__timeout)
4070 #endif
4071 
4072 static inline abi_long do_semtimedop(int semid,
4073                                      abi_long ptr,
4074                                      unsigned nsops,
4075                                      abi_long timeout, bool time64)
4076 {
4077     struct sembuf *sops;
4078     struct timespec ts, *pts = NULL;
4079     abi_long ret;
4080 
4081     if (timeout) {
4082         pts = &ts;
4083         if (time64) {
4084             if (target_to_host_timespec64(pts, timeout)) {
4085                 return -TARGET_EFAULT;
4086             }
4087         } else {
4088             if (target_to_host_timespec(pts, timeout)) {
4089                 return -TARGET_EFAULT;
4090             }
4091         }
4092     }
4093 
4094     if (nsops > TARGET_SEMOPM) {
4095         return -TARGET_E2BIG;
4096     }
4097 
4098     sops = g_new(struct sembuf, nsops);
4099 
4100     if (target_to_host_sembuf(sops, ptr, nsops)) {
4101         g_free(sops);
4102         return -TARGET_EFAULT;
4103     }
4104 
4105     ret = -TARGET_ENOSYS;
4106 #ifdef __NR_semtimedop
4107     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4108 #endif
4109 #ifdef __NR_ipc
4110     if (ret == -TARGET_ENOSYS) {
4111         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4112                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4113     }
4114 #endif
4115     g_free(sops);
4116     return ret;
4117 }
4118 #endif
4119 
4120 struct target_msqid_ds
4121 {
4122     struct target_ipc_perm msg_perm;
4123     abi_ulong msg_stime;
4124 #if TARGET_ABI_BITS == 32
4125     abi_ulong __unused1;
4126 #endif
4127     abi_ulong msg_rtime;
4128 #if TARGET_ABI_BITS == 32
4129     abi_ulong __unused2;
4130 #endif
4131     abi_ulong msg_ctime;
4132 #if TARGET_ABI_BITS == 32
4133     abi_ulong __unused3;
4134 #endif
4135     abi_ulong __msg_cbytes;
4136     abi_ulong msg_qnum;
4137     abi_ulong msg_qbytes;
4138     abi_ulong msg_lspid;
4139     abi_ulong msg_lrpid;
4140     abi_ulong __unused4;
4141     abi_ulong __unused5;
4142 };
4143 
4144 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4145                                                abi_ulong target_addr)
4146 {
4147     struct target_msqid_ds *target_md;
4148 
4149     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4150         return -TARGET_EFAULT;
4151     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4152         return -TARGET_EFAULT;
4153     host_md->msg_stime = tswapal(target_md->msg_stime);
4154     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4155     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4156     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4157     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4158     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4159     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4160     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4161     unlock_user_struct(target_md, target_addr, 0);
4162     return 0;
4163 }
4164 
4165 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4166                                                struct msqid_ds *host_md)
4167 {
4168     struct target_msqid_ds *target_md;
4169 
4170     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4171         return -TARGET_EFAULT;
4172     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4173         return -TARGET_EFAULT;
4174     target_md->msg_stime = tswapal(host_md->msg_stime);
4175     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4176     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4177     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4178     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4179     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4180     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4181     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4182     unlock_user_struct(target_md, target_addr, 1);
4183     return 0;
4184 }
4185 
4186 struct target_msginfo {
4187     int msgpool;
4188     int msgmap;
4189     int msgmax;
4190     int msgmnb;
4191     int msgmni;
4192     int msgssz;
4193     int msgtql;
4194     unsigned short int msgseg;
4195 };
4196 
4197 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4198                                               struct msginfo *host_msginfo)
4199 {
4200     struct target_msginfo *target_msginfo;
4201     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4202         return -TARGET_EFAULT;
4203     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4204     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4205     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4206     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4207     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4208     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4209     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4210     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4211     unlock_user_struct(target_msginfo, target_addr, 1);
4212     return 0;
4213 }
4214 
4215 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4216 {
4217     struct msqid_ds dsarg;
4218     struct msginfo msginfo;
4219     abi_long ret = -TARGET_EINVAL;
4220 
4221     cmd &= 0xff;
4222 
4223     switch (cmd) {
4224     case IPC_STAT:
4225     case IPC_SET:
4226     case MSG_STAT:
4227         if (target_to_host_msqid_ds(&dsarg,ptr))
4228             return -TARGET_EFAULT;
4229         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4230         if (host_to_target_msqid_ds(ptr,&dsarg))
4231             return -TARGET_EFAULT;
4232         break;
4233     case IPC_RMID:
4234         ret = get_errno(msgctl(msgid, cmd, NULL));
4235         break;
4236     case IPC_INFO:
4237     case MSG_INFO:
4238         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4239         if (host_to_target_msginfo(ptr, &msginfo))
4240             return -TARGET_EFAULT;
4241         break;
4242     }
4243 
4244     return ret;
4245 }
4246 
4247 struct target_msgbuf {
4248     abi_long mtype;
4249     char	mtext[1];
4250 };
4251 
4252 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4253                                  ssize_t msgsz, int msgflg)
4254 {
4255     struct target_msgbuf *target_mb;
4256     struct msgbuf *host_mb;
4257     abi_long ret = 0;
4258 
4259     if (msgsz < 0) {
4260         return -TARGET_EINVAL;
4261     }
4262 
4263     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4264         return -TARGET_EFAULT;
4265     host_mb = g_try_malloc(msgsz + sizeof(long));
4266     if (!host_mb) {
4267         unlock_user_struct(target_mb, msgp, 0);
4268         return -TARGET_ENOMEM;
4269     }
4270     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4271     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4272     ret = -TARGET_ENOSYS;
4273 #ifdef __NR_msgsnd
4274     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4275 #endif
4276 #ifdef __NR_ipc
4277     if (ret == -TARGET_ENOSYS) {
4278 #ifdef __s390x__
4279         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4280                                  host_mb));
4281 #else
4282         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4283                                  host_mb, 0));
4284 #endif
4285     }
4286 #endif
4287     g_free(host_mb);
4288     unlock_user_struct(target_mb, msgp, 0);
4289 
4290     return ret;
4291 }
4292 
4293 #ifdef __NR_ipc
4294 #if defined(__sparc__)
4295 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4296 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4297 #elif defined(__s390x__)
4298 /* The s390 sys_ipc variant has only five parameters.  */
4299 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4300     ((long int[]){(long int)__msgp, __msgtyp})
4301 #else
4302 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4303     ((long int[]){(long int)__msgp, __msgtyp}), 0
4304 #endif
4305 #endif
4306 
4307 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4308                                  ssize_t msgsz, abi_long msgtyp,
4309                                  int msgflg)
4310 {
4311     struct target_msgbuf *target_mb;
4312     char *target_mtext;
4313     struct msgbuf *host_mb;
4314     abi_long ret = 0;
4315 
4316     if (msgsz < 0) {
4317         return -TARGET_EINVAL;
4318     }
4319 
4320     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4321         return -TARGET_EFAULT;
4322 
4323     host_mb = g_try_malloc(msgsz + sizeof(long));
4324     if (!host_mb) {
4325         ret = -TARGET_ENOMEM;
4326         goto end;
4327     }
4328     ret = -TARGET_ENOSYS;
4329 #ifdef __NR_msgrcv
4330     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4331 #endif
4332 #ifdef __NR_ipc
4333     if (ret == -TARGET_ENOSYS) {
4334         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4335                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4336     }
4337 #endif
4338 
4339     if (ret > 0) {
4340         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4341         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4342         if (!target_mtext) {
4343             ret = -TARGET_EFAULT;
4344             goto end;
4345         }
4346         memcpy(target_mb->mtext, host_mb->mtext, ret);
4347         unlock_user(target_mtext, target_mtext_addr, ret);
4348     }
4349 
4350     target_mb->mtype = tswapal(host_mb->mtype);
4351 
4352 end:
4353     if (target_mb)
4354         unlock_user_struct(target_mb, msgp, 1);
4355     g_free(host_mb);
4356     return ret;
4357 }
4358 
4359 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4360                                                abi_ulong target_addr)
4361 {
4362     struct target_shmid_ds *target_sd;
4363 
4364     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4365         return -TARGET_EFAULT;
4366     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4367         return -TARGET_EFAULT;
4368     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4369     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4370     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4371     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4372     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4373     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4374     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4375     unlock_user_struct(target_sd, target_addr, 0);
4376     return 0;
4377 }
4378 
4379 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4380                                                struct shmid_ds *host_sd)
4381 {
4382     struct target_shmid_ds *target_sd;
4383 
4384     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4385         return -TARGET_EFAULT;
4386     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4387         return -TARGET_EFAULT;
4388     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4389     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4390     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4391     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4392     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4393     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4394     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4395     unlock_user_struct(target_sd, target_addr, 1);
4396     return 0;
4397 }
4398 
4399 struct  target_shminfo {
4400     abi_ulong shmmax;
4401     abi_ulong shmmin;
4402     abi_ulong shmmni;
4403     abi_ulong shmseg;
4404     abi_ulong shmall;
4405 };
4406 
4407 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4408                                               struct shminfo *host_shminfo)
4409 {
4410     struct target_shminfo *target_shminfo;
4411     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4412         return -TARGET_EFAULT;
4413     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4414     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4415     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4416     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4417     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4418     unlock_user_struct(target_shminfo, target_addr, 1);
4419     return 0;
4420 }
4421 
4422 struct target_shm_info {
4423     int used_ids;
4424     abi_ulong shm_tot;
4425     abi_ulong shm_rss;
4426     abi_ulong shm_swp;
4427     abi_ulong swap_attempts;
4428     abi_ulong swap_successes;
4429 };
4430 
4431 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4432                                                struct shm_info *host_shm_info)
4433 {
4434     struct target_shm_info *target_shm_info;
4435     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4436         return -TARGET_EFAULT;
4437     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4438     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4439     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4440     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4441     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4442     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4443     unlock_user_struct(target_shm_info, target_addr, 1);
4444     return 0;
4445 }
4446 
4447 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4448 {
4449     struct shmid_ds dsarg;
4450     struct shminfo shminfo;
4451     struct shm_info shm_info;
4452     abi_long ret = -TARGET_EINVAL;
4453 
4454     cmd &= 0xff;
4455 
4456     switch(cmd) {
4457     case IPC_STAT:
4458     case IPC_SET:
4459     case SHM_STAT:
4460         if (target_to_host_shmid_ds(&dsarg, buf))
4461             return -TARGET_EFAULT;
4462         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4463         if (host_to_target_shmid_ds(buf, &dsarg))
4464             return -TARGET_EFAULT;
4465         break;
4466     case IPC_INFO:
4467         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4468         if (host_to_target_shminfo(buf, &shminfo))
4469             return -TARGET_EFAULT;
4470         break;
4471     case SHM_INFO:
4472         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4473         if (host_to_target_shm_info(buf, &shm_info))
4474             return -TARGET_EFAULT;
4475         break;
4476     case IPC_RMID:
4477     case SHM_LOCK:
4478     case SHM_UNLOCK:
4479         ret = get_errno(shmctl(shmid, cmd, NULL));
4480         break;
4481     }
4482 
4483     return ret;
4484 }
4485 
4486 #ifndef TARGET_FORCE_SHMLBA
4487 /* For most architectures, SHMLBA is the same as the page size;
4488  * some architectures have larger values, in which case they should
4489  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4490  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4491  * and defining its own value for SHMLBA.
4492  *
4493  * The kernel also permits SHMLBA to be set by the architecture to a
4494  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4495  * this means that addresses are rounded to the large size if
4496  * SHM_RND is set but addresses not aligned to that size are not rejected
4497  * as long as they are at least page-aligned. Since the only architecture
4498  * which uses this is ia64 this code doesn't provide for that oddity.
4499  */
4500 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4501 {
4502     return TARGET_PAGE_SIZE;
4503 }
4504 #endif
4505 
4506 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4507                                  int shmid, abi_ulong shmaddr, int shmflg)
4508 {
4509     CPUState *cpu = env_cpu(cpu_env);
4510     abi_long raddr;
4511     void *host_raddr;
4512     struct shmid_ds shm_info;
4513     int i,ret;
4514     abi_ulong shmlba;
4515 
4516     /* shmat pointers are always untagged */
4517 
4518     /* find out the length of the shared memory segment */
4519     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4520     if (is_error(ret)) {
4521         /* can't get length, bail out */
4522         return ret;
4523     }
4524 
4525     shmlba = target_shmlba(cpu_env);
4526 
4527     if (shmaddr & (shmlba - 1)) {
4528         if (shmflg & SHM_RND) {
4529             shmaddr &= ~(shmlba - 1);
4530         } else {
4531             return -TARGET_EINVAL;
4532         }
4533     }
4534     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4535         return -TARGET_EINVAL;
4536     }
4537 
4538     mmap_lock();
4539 
4540     /*
4541      * We're mapping shared memory, so ensure we generate code for parallel
4542      * execution and flush old translations.  This will work up to the level
4543      * supported by the host -- anything that requires EXCP_ATOMIC will not
4544      * be atomic with respect to an external process.
4545      */
4546     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4547         cpu->tcg_cflags |= CF_PARALLEL;
4548         tb_flush(cpu);
4549     }
4550 
4551     if (shmaddr)
4552         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4553     else {
4554         abi_ulong mmap_start;
4555 
4556         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4557         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4558 
4559         if (mmap_start == -1) {
4560             errno = ENOMEM;
4561             host_raddr = (void *)-1;
4562         } else
4563             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4564                                shmflg | SHM_REMAP);
4565     }
4566 
4567     if (host_raddr == (void *)-1) {
4568         mmap_unlock();
4569         return get_errno((long)host_raddr);
4570     }
4571     raddr=h2g((unsigned long)host_raddr);
4572 
4573     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4574                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4575                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4576 
4577     for (i = 0; i < N_SHM_REGIONS; i++) {
4578         if (!shm_regions[i].in_use) {
4579             shm_regions[i].in_use = true;
4580             shm_regions[i].start = raddr;
4581             shm_regions[i].size = shm_info.shm_segsz;
4582             break;
4583         }
4584     }
4585 
4586     mmap_unlock();
4587     return raddr;
4588 
4589 }
4590 
4591 static inline abi_long do_shmdt(abi_ulong shmaddr)
4592 {
4593     int i;
4594     abi_long rv;
4595 
4596     /* shmdt pointers are always untagged */
4597 
4598     mmap_lock();
4599 
4600     for (i = 0; i < N_SHM_REGIONS; ++i) {
4601         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4602             shm_regions[i].in_use = false;
4603             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4604             break;
4605         }
4606     }
4607     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4608 
4609     mmap_unlock();
4610 
4611     return rv;
4612 }
4613 
4614 #ifdef TARGET_NR_ipc
4615 /* ??? This only works with linear mappings.  */
4616 /* do_ipc() must return target values and target errnos. */
4617 static abi_long do_ipc(CPUArchState *cpu_env,
4618                        unsigned int call, abi_long first,
4619                        abi_long second, abi_long third,
4620                        abi_long ptr, abi_long fifth)
4621 {
4622     int version;
4623     abi_long ret = 0;
4624 
4625     version = call >> 16;
4626     call &= 0xffff;
4627 
4628     switch (call) {
4629     case IPCOP_semop:
4630         ret = do_semtimedop(first, ptr, second, 0, false);
4631         break;
4632     case IPCOP_semtimedop:
4633     /*
4634      * The s390 sys_ipc variant has only five parameters instead of six
4635      * (as for default variant) and the only difference is the handling of
4636      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4637      * to a struct timespec where the generic variant uses fifth parameter.
4638      */
4639 #if defined(TARGET_S390X)
4640         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4641 #else
4642         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4643 #endif
4644         break;
4645 
4646     case IPCOP_semget:
4647         ret = get_errno(semget(first, second, third));
4648         break;
4649 
4650     case IPCOP_semctl: {
4651         /* The semun argument to semctl is passed by value, so dereference the
4652          * ptr argument. */
4653         abi_ulong atptr;
4654         get_user_ual(atptr, ptr);
4655         ret = do_semctl(first, second, third, atptr);
4656         break;
4657     }
4658 
4659     case IPCOP_msgget:
4660         ret = get_errno(msgget(first, second));
4661         break;
4662 
4663     case IPCOP_msgsnd:
4664         ret = do_msgsnd(first, ptr, second, third);
4665         break;
4666 
4667     case IPCOP_msgctl:
4668         ret = do_msgctl(first, second, ptr);
4669         break;
4670 
4671     case IPCOP_msgrcv:
4672         switch (version) {
4673         case 0:
4674             {
4675                 struct target_ipc_kludge {
4676                     abi_long msgp;
4677                     abi_long msgtyp;
4678                 } *tmp;
4679 
4680                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4681                     ret = -TARGET_EFAULT;
4682                     break;
4683                 }
4684 
4685                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4686 
4687                 unlock_user_struct(tmp, ptr, 0);
4688                 break;
4689             }
4690         default:
4691             ret = do_msgrcv(first, ptr, second, fifth, third);
4692         }
4693         break;
4694 
4695     case IPCOP_shmat:
4696         switch (version) {
4697         default:
4698         {
4699             abi_ulong raddr;
4700             raddr = do_shmat(cpu_env, first, ptr, second);
4701             if (is_error(raddr))
4702                 return get_errno(raddr);
4703             if (put_user_ual(raddr, third))
4704                 return -TARGET_EFAULT;
4705             break;
4706         }
4707         case 1:
4708             ret = -TARGET_EINVAL;
4709             break;
4710         }
4711 	break;
4712     case IPCOP_shmdt:
4713         ret = do_shmdt(ptr);
4714 	break;
4715 
4716     case IPCOP_shmget:
4717 	/* IPC_* flag values are the same on all linux platforms */
4718 	ret = get_errno(shmget(first, second, third));
4719 	break;
4720 
4721 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4722     case IPCOP_shmctl:
4723         ret = do_shmctl(first, second, ptr);
4724         break;
4725     default:
4726         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4727                       call, version);
4728 	ret = -TARGET_ENOSYS;
4729 	break;
4730     }
4731     return ret;
4732 }
4733 #endif
4734 
4735 /* kernel structure types definitions */
4736 
4737 #define STRUCT(name, ...) STRUCT_ ## name,
4738 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4739 enum {
4740 #include "syscall_types.h"
4741 STRUCT_MAX
4742 };
4743 #undef STRUCT
4744 #undef STRUCT_SPECIAL
4745 
4746 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4747 #define STRUCT_SPECIAL(name)
4748 #include "syscall_types.h"
4749 #undef STRUCT
4750 #undef STRUCT_SPECIAL
4751 
4752 #define MAX_STRUCT_SIZE 4096
4753 
4754 #ifdef CONFIG_FIEMAP
4755 /* So fiemap access checks don't overflow on 32 bit systems.
4756  * This is very slightly smaller than the limit imposed by
4757  * the underlying kernel.
4758  */
4759 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4760                             / sizeof(struct fiemap_extent))
4761 
4762 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4763                                        int fd, int cmd, abi_long arg)
4764 {
4765     /* The parameter for this ioctl is a struct fiemap followed
4766      * by an array of struct fiemap_extent whose size is set
4767      * in fiemap->fm_extent_count. The array is filled in by the
4768      * ioctl.
4769      */
4770     int target_size_in, target_size_out;
4771     struct fiemap *fm;
4772     const argtype *arg_type = ie->arg_type;
4773     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4774     void *argptr, *p;
4775     abi_long ret;
4776     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4777     uint32_t outbufsz;
4778     int free_fm = 0;
4779 
4780     assert(arg_type[0] == TYPE_PTR);
4781     assert(ie->access == IOC_RW);
4782     arg_type++;
4783     target_size_in = thunk_type_size(arg_type, 0);
4784     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4785     if (!argptr) {
4786         return -TARGET_EFAULT;
4787     }
4788     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4789     unlock_user(argptr, arg, 0);
4790     fm = (struct fiemap *)buf_temp;
4791     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4792         return -TARGET_EINVAL;
4793     }
4794 
4795     outbufsz = sizeof (*fm) +
4796         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4797 
4798     if (outbufsz > MAX_STRUCT_SIZE) {
4799         /* We can't fit all the extents into the fixed size buffer.
4800          * Allocate one that is large enough and use it instead.
4801          */
4802         fm = g_try_malloc(outbufsz);
4803         if (!fm) {
4804             return -TARGET_ENOMEM;
4805         }
4806         memcpy(fm, buf_temp, sizeof(struct fiemap));
4807         free_fm = 1;
4808     }
4809     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4810     if (!is_error(ret)) {
4811         target_size_out = target_size_in;
4812         /* An extent_count of 0 means we were only counting the extents
4813          * so there are no structs to copy
4814          */
4815         if (fm->fm_extent_count != 0) {
4816             target_size_out += fm->fm_mapped_extents * extent_size;
4817         }
4818         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4819         if (!argptr) {
4820             ret = -TARGET_EFAULT;
4821         } else {
4822             /* Convert the struct fiemap */
4823             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4824             if (fm->fm_extent_count != 0) {
4825                 p = argptr + target_size_in;
4826                 /* ...and then all the struct fiemap_extents */
4827                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4828                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4829                                   THUNK_TARGET);
4830                     p += extent_size;
4831                 }
4832             }
4833             unlock_user(argptr, arg, target_size_out);
4834         }
4835     }
4836     if (free_fm) {
4837         g_free(fm);
4838     }
4839     return ret;
4840 }
4841 #endif
4842 
4843 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4844                                 int fd, int cmd, abi_long arg)
4845 {
4846     const argtype *arg_type = ie->arg_type;
4847     int target_size;
4848     void *argptr;
4849     int ret;
4850     struct ifconf *host_ifconf;
4851     uint32_t outbufsz;
4852     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4853     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4854     int target_ifreq_size;
4855     int nb_ifreq;
4856     int free_buf = 0;
4857     int i;
4858     int target_ifc_len;
4859     abi_long target_ifc_buf;
4860     int host_ifc_len;
4861     char *host_ifc_buf;
4862 
4863     assert(arg_type[0] == TYPE_PTR);
4864     assert(ie->access == IOC_RW);
4865 
4866     arg_type++;
4867     target_size = thunk_type_size(arg_type, 0);
4868 
4869     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4870     if (!argptr)
4871         return -TARGET_EFAULT;
4872     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4873     unlock_user(argptr, arg, 0);
4874 
4875     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4876     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4877     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4878 
4879     if (target_ifc_buf != 0) {
4880         target_ifc_len = host_ifconf->ifc_len;
4881         nb_ifreq = target_ifc_len / target_ifreq_size;
4882         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4883 
4884         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4885         if (outbufsz > MAX_STRUCT_SIZE) {
4886             /*
4887              * We can't fit all the extents into the fixed size buffer.
4888              * Allocate one that is large enough and use it instead.
4889              */
4890             host_ifconf = g_try_malloc(outbufsz);
4891             if (!host_ifconf) {
4892                 return -TARGET_ENOMEM;
4893             }
4894             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4895             free_buf = 1;
4896         }
4897         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4898 
4899         host_ifconf->ifc_len = host_ifc_len;
4900     } else {
4901       host_ifc_buf = NULL;
4902     }
4903     host_ifconf->ifc_buf = host_ifc_buf;
4904 
4905     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4906     if (!is_error(ret)) {
4907 	/* convert host ifc_len to target ifc_len */
4908 
4909         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4910         target_ifc_len = nb_ifreq * target_ifreq_size;
4911         host_ifconf->ifc_len = target_ifc_len;
4912 
4913 	/* restore target ifc_buf */
4914 
4915         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4916 
4917 	/* copy struct ifconf to target user */
4918 
4919         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4920         if (!argptr)
4921             return -TARGET_EFAULT;
4922         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4923         unlock_user(argptr, arg, target_size);
4924 
4925         if (target_ifc_buf != 0) {
4926             /* copy ifreq[] to target user */
4927             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4928             for (i = 0; i < nb_ifreq ; i++) {
4929                 thunk_convert(argptr + i * target_ifreq_size,
4930                               host_ifc_buf + i * sizeof(struct ifreq),
4931                               ifreq_arg_type, THUNK_TARGET);
4932             }
4933             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4934         }
4935     }
4936 
4937     if (free_buf) {
4938         g_free(host_ifconf);
4939     }
4940 
4941     return ret;
4942 }
4943 
4944 #if defined(CONFIG_USBFS)
4945 #if HOST_LONG_BITS > 64
4946 #error USBDEVFS thunks do not support >64 bit hosts yet.
4947 #endif
4948 struct live_urb {
4949     uint64_t target_urb_adr;
4950     uint64_t target_buf_adr;
4951     char *target_buf_ptr;
4952     struct usbdevfs_urb host_urb;
4953 };
4954 
4955 static GHashTable *usbdevfs_urb_hashtable(void)
4956 {
4957     static GHashTable *urb_hashtable;
4958 
4959     if (!urb_hashtable) {
4960         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4961     }
4962     return urb_hashtable;
4963 }
4964 
4965 static void urb_hashtable_insert(struct live_urb *urb)
4966 {
4967     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4968     g_hash_table_insert(urb_hashtable, urb, urb);
4969 }
4970 
4971 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4972 {
4973     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4974     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4975 }
4976 
4977 static void urb_hashtable_remove(struct live_urb *urb)
4978 {
4979     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4980     g_hash_table_remove(urb_hashtable, urb);
4981 }
4982 
4983 static abi_long
4984 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4985                           int fd, int cmd, abi_long arg)
4986 {
4987     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4988     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4989     struct live_urb *lurb;
4990     void *argptr;
4991     uint64_t hurb;
4992     int target_size;
4993     uintptr_t target_urb_adr;
4994     abi_long ret;
4995 
4996     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4997 
4998     memset(buf_temp, 0, sizeof(uint64_t));
4999     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5000     if (is_error(ret)) {
5001         return ret;
5002     }
5003 
5004     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5005     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5006     if (!lurb->target_urb_adr) {
5007         return -TARGET_EFAULT;
5008     }
5009     urb_hashtable_remove(lurb);
5010     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5011         lurb->host_urb.buffer_length);
5012     lurb->target_buf_ptr = NULL;
5013 
5014     /* restore the guest buffer pointer */
5015     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5016 
5017     /* update the guest urb struct */
5018     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5019     if (!argptr) {
5020         g_free(lurb);
5021         return -TARGET_EFAULT;
5022     }
5023     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5024     unlock_user(argptr, lurb->target_urb_adr, target_size);
5025 
5026     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5027     /* write back the urb handle */
5028     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5029     if (!argptr) {
5030         g_free(lurb);
5031         return -TARGET_EFAULT;
5032     }
5033 
5034     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5035     target_urb_adr = lurb->target_urb_adr;
5036     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5037     unlock_user(argptr, arg, target_size);
5038 
5039     g_free(lurb);
5040     return ret;
5041 }
5042 
5043 static abi_long
5044 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5045                              uint8_t *buf_temp __attribute__((unused)),
5046                              int fd, int cmd, abi_long arg)
5047 {
5048     struct live_urb *lurb;
5049 
5050     /* map target address back to host URB with metadata. */
5051     lurb = urb_hashtable_lookup(arg);
5052     if (!lurb) {
5053         return -TARGET_EFAULT;
5054     }
5055     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5056 }
5057 
5058 static abi_long
5059 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5060                             int fd, int cmd, abi_long arg)
5061 {
5062     const argtype *arg_type = ie->arg_type;
5063     int target_size;
5064     abi_long ret;
5065     void *argptr;
5066     int rw_dir;
5067     struct live_urb *lurb;
5068 
5069     /*
5070      * each submitted URB needs to map to a unique ID for the
5071      * kernel, and that unique ID needs to be a pointer to
5072      * host memory.  hence, we need to malloc for each URB.
5073      * isochronous transfers have a variable length struct.
5074      */
5075     arg_type++;
5076     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5077 
5078     /* construct host copy of urb and metadata */
5079     lurb = g_try_new0(struct live_urb, 1);
5080     if (!lurb) {
5081         return -TARGET_ENOMEM;
5082     }
5083 
5084     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5085     if (!argptr) {
5086         g_free(lurb);
5087         return -TARGET_EFAULT;
5088     }
5089     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5090     unlock_user(argptr, arg, 0);
5091 
5092     lurb->target_urb_adr = arg;
5093     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5094 
5095     /* buffer space used depends on endpoint type so lock the entire buffer */
5096     /* control type urbs should check the buffer contents for true direction */
5097     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5098     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5099         lurb->host_urb.buffer_length, 1);
5100     if (lurb->target_buf_ptr == NULL) {
5101         g_free(lurb);
5102         return -TARGET_EFAULT;
5103     }
5104 
5105     /* update buffer pointer in host copy */
5106     lurb->host_urb.buffer = lurb->target_buf_ptr;
5107 
5108     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5109     if (is_error(ret)) {
5110         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5111         g_free(lurb);
5112     } else {
5113         urb_hashtable_insert(lurb);
5114     }
5115 
5116     return ret;
5117 }
5118 #endif /* CONFIG_USBFS */
5119 
5120 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5121                             int cmd, abi_long arg)
5122 {
5123     void *argptr;
5124     struct dm_ioctl *host_dm;
5125     abi_long guest_data;
5126     uint32_t guest_data_size;
5127     int target_size;
5128     const argtype *arg_type = ie->arg_type;
5129     abi_long ret;
5130     void *big_buf = NULL;
5131     char *host_data;
5132 
5133     arg_type++;
5134     target_size = thunk_type_size(arg_type, 0);
5135     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5136     if (!argptr) {
5137         ret = -TARGET_EFAULT;
5138         goto out;
5139     }
5140     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5141     unlock_user(argptr, arg, 0);
5142 
5143     /* buf_temp is too small, so fetch things into a bigger buffer */
5144     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5145     memcpy(big_buf, buf_temp, target_size);
5146     buf_temp = big_buf;
5147     host_dm = big_buf;
5148 
5149     guest_data = arg + host_dm->data_start;
5150     if ((guest_data - arg) < 0) {
5151         ret = -TARGET_EINVAL;
5152         goto out;
5153     }
5154     guest_data_size = host_dm->data_size - host_dm->data_start;
5155     host_data = (char*)host_dm + host_dm->data_start;
5156 
5157     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5158     if (!argptr) {
5159         ret = -TARGET_EFAULT;
5160         goto out;
5161     }
5162 
5163     switch (ie->host_cmd) {
5164     case DM_REMOVE_ALL:
5165     case DM_LIST_DEVICES:
5166     case DM_DEV_CREATE:
5167     case DM_DEV_REMOVE:
5168     case DM_DEV_SUSPEND:
5169     case DM_DEV_STATUS:
5170     case DM_DEV_WAIT:
5171     case DM_TABLE_STATUS:
5172     case DM_TABLE_CLEAR:
5173     case DM_TABLE_DEPS:
5174     case DM_LIST_VERSIONS:
5175         /* no input data */
5176         break;
5177     case DM_DEV_RENAME:
5178     case DM_DEV_SET_GEOMETRY:
5179         /* data contains only strings */
5180         memcpy(host_data, argptr, guest_data_size);
5181         break;
5182     case DM_TARGET_MSG:
5183         memcpy(host_data, argptr, guest_data_size);
5184         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5185         break;
5186     case DM_TABLE_LOAD:
5187     {
5188         void *gspec = argptr;
5189         void *cur_data = host_data;
5190         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5191         int spec_size = thunk_type_size(arg_type, 0);
5192         int i;
5193 
5194         for (i = 0; i < host_dm->target_count; i++) {
5195             struct dm_target_spec *spec = cur_data;
5196             uint32_t next;
5197             int slen;
5198 
5199             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5200             slen = strlen((char*)gspec + spec_size) + 1;
5201             next = spec->next;
5202             spec->next = sizeof(*spec) + slen;
5203             strcpy((char*)&spec[1], gspec + spec_size);
5204             gspec += next;
5205             cur_data += spec->next;
5206         }
5207         break;
5208     }
5209     default:
5210         ret = -TARGET_EINVAL;
5211         unlock_user(argptr, guest_data, 0);
5212         goto out;
5213     }
5214     unlock_user(argptr, guest_data, 0);
5215 
5216     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5217     if (!is_error(ret)) {
5218         guest_data = arg + host_dm->data_start;
5219         guest_data_size = host_dm->data_size - host_dm->data_start;
5220         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5221         switch (ie->host_cmd) {
5222         case DM_REMOVE_ALL:
5223         case DM_DEV_CREATE:
5224         case DM_DEV_REMOVE:
5225         case DM_DEV_RENAME:
5226         case DM_DEV_SUSPEND:
5227         case DM_DEV_STATUS:
5228         case DM_TABLE_LOAD:
5229         case DM_TABLE_CLEAR:
5230         case DM_TARGET_MSG:
5231         case DM_DEV_SET_GEOMETRY:
5232             /* no return data */
5233             break;
5234         case DM_LIST_DEVICES:
5235         {
5236             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5237             uint32_t remaining_data = guest_data_size;
5238             void *cur_data = argptr;
5239             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5240             int nl_size = 12; /* can't use thunk_size due to alignment */
5241 
5242             while (1) {
5243                 uint32_t next = nl->next;
5244                 if (next) {
5245                     nl->next = nl_size + (strlen(nl->name) + 1);
5246                 }
5247                 if (remaining_data < nl->next) {
5248                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5249                     break;
5250                 }
5251                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5252                 strcpy(cur_data + nl_size, nl->name);
5253                 cur_data += nl->next;
5254                 remaining_data -= nl->next;
5255                 if (!next) {
5256                     break;
5257                 }
5258                 nl = (void*)nl + next;
5259             }
5260             break;
5261         }
5262         case DM_DEV_WAIT:
5263         case DM_TABLE_STATUS:
5264         {
5265             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5266             void *cur_data = argptr;
5267             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5268             int spec_size = thunk_type_size(arg_type, 0);
5269             int i;
5270 
5271             for (i = 0; i < host_dm->target_count; i++) {
5272                 uint32_t next = spec->next;
5273                 int slen = strlen((char*)&spec[1]) + 1;
5274                 spec->next = (cur_data - argptr) + spec_size + slen;
5275                 if (guest_data_size < spec->next) {
5276                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5277                     break;
5278                 }
5279                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5280                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5281                 cur_data = argptr + spec->next;
5282                 spec = (void*)host_dm + host_dm->data_start + next;
5283             }
5284             break;
5285         }
5286         case DM_TABLE_DEPS:
5287         {
5288             void *hdata = (void*)host_dm + host_dm->data_start;
5289             int count = *(uint32_t*)hdata;
5290             uint64_t *hdev = hdata + 8;
5291             uint64_t *gdev = argptr + 8;
5292             int i;
5293 
5294             *(uint32_t*)argptr = tswap32(count);
5295             for (i = 0; i < count; i++) {
5296                 *gdev = tswap64(*hdev);
5297                 gdev++;
5298                 hdev++;
5299             }
5300             break;
5301         }
5302         case DM_LIST_VERSIONS:
5303         {
5304             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5305             uint32_t remaining_data = guest_data_size;
5306             void *cur_data = argptr;
5307             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5308             int vers_size = thunk_type_size(arg_type, 0);
5309 
5310             while (1) {
5311                 uint32_t next = vers->next;
5312                 if (next) {
5313                     vers->next = vers_size + (strlen(vers->name) + 1);
5314                 }
5315                 if (remaining_data < vers->next) {
5316                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5317                     break;
5318                 }
5319                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5320                 strcpy(cur_data + vers_size, vers->name);
5321                 cur_data += vers->next;
5322                 remaining_data -= vers->next;
5323                 if (!next) {
5324                     break;
5325                 }
5326                 vers = (void*)vers + next;
5327             }
5328             break;
5329         }
5330         default:
5331             unlock_user(argptr, guest_data, 0);
5332             ret = -TARGET_EINVAL;
5333             goto out;
5334         }
5335         unlock_user(argptr, guest_data, guest_data_size);
5336 
5337         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5338         if (!argptr) {
5339             ret = -TARGET_EFAULT;
5340             goto out;
5341         }
5342         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5343         unlock_user(argptr, arg, target_size);
5344     }
5345 out:
5346     g_free(big_buf);
5347     return ret;
5348 }
5349 
5350 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5351                                int cmd, abi_long arg)
5352 {
5353     void *argptr;
5354     int target_size;
5355     const argtype *arg_type = ie->arg_type;
5356     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5357     abi_long ret;
5358 
5359     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5360     struct blkpg_partition host_part;
5361 
5362     /* Read and convert blkpg */
5363     arg_type++;
5364     target_size = thunk_type_size(arg_type, 0);
5365     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5366     if (!argptr) {
5367         ret = -TARGET_EFAULT;
5368         goto out;
5369     }
5370     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5371     unlock_user(argptr, arg, 0);
5372 
5373     switch (host_blkpg->op) {
5374     case BLKPG_ADD_PARTITION:
5375     case BLKPG_DEL_PARTITION:
5376         /* payload is struct blkpg_partition */
5377         break;
5378     default:
5379         /* Unknown opcode */
5380         ret = -TARGET_EINVAL;
5381         goto out;
5382     }
5383 
5384     /* Read and convert blkpg->data */
5385     arg = (abi_long)(uintptr_t)host_blkpg->data;
5386     target_size = thunk_type_size(part_arg_type, 0);
5387     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5388     if (!argptr) {
5389         ret = -TARGET_EFAULT;
5390         goto out;
5391     }
5392     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5393     unlock_user(argptr, arg, 0);
5394 
5395     /* Swizzle the data pointer to our local copy and call! */
5396     host_blkpg->data = &host_part;
5397     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5398 
5399 out:
5400     return ret;
5401 }
5402 
5403 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5404                                 int fd, int cmd, abi_long arg)
5405 {
5406     const argtype *arg_type = ie->arg_type;
5407     const StructEntry *se;
5408     const argtype *field_types;
5409     const int *dst_offsets, *src_offsets;
5410     int target_size;
5411     void *argptr;
5412     abi_ulong *target_rt_dev_ptr = NULL;
5413     unsigned long *host_rt_dev_ptr = NULL;
5414     abi_long ret;
5415     int i;
5416 
5417     assert(ie->access == IOC_W);
5418     assert(*arg_type == TYPE_PTR);
5419     arg_type++;
5420     assert(*arg_type == TYPE_STRUCT);
5421     target_size = thunk_type_size(arg_type, 0);
5422     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5423     if (!argptr) {
5424         return -TARGET_EFAULT;
5425     }
5426     arg_type++;
5427     assert(*arg_type == (int)STRUCT_rtentry);
5428     se = struct_entries + *arg_type++;
5429     assert(se->convert[0] == NULL);
5430     /* convert struct here to be able to catch rt_dev string */
5431     field_types = se->field_types;
5432     dst_offsets = se->field_offsets[THUNK_HOST];
5433     src_offsets = se->field_offsets[THUNK_TARGET];
5434     for (i = 0; i < se->nb_fields; i++) {
5435         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5436             assert(*field_types == TYPE_PTRVOID);
5437             target_rt_dev_ptr = argptr + src_offsets[i];
5438             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5439             if (*target_rt_dev_ptr != 0) {
5440                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5441                                                   tswapal(*target_rt_dev_ptr));
5442                 if (!*host_rt_dev_ptr) {
5443                     unlock_user(argptr, arg, 0);
5444                     return -TARGET_EFAULT;
5445                 }
5446             } else {
5447                 *host_rt_dev_ptr = 0;
5448             }
5449             field_types++;
5450             continue;
5451         }
5452         field_types = thunk_convert(buf_temp + dst_offsets[i],
5453                                     argptr + src_offsets[i],
5454                                     field_types, THUNK_HOST);
5455     }
5456     unlock_user(argptr, arg, 0);
5457 
5458     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5459 
5460     assert(host_rt_dev_ptr != NULL);
5461     assert(target_rt_dev_ptr != NULL);
5462     if (*host_rt_dev_ptr != 0) {
5463         unlock_user((void *)*host_rt_dev_ptr,
5464                     *target_rt_dev_ptr, 0);
5465     }
5466     return ret;
5467 }
5468 
5469 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5470                                      int fd, int cmd, abi_long arg)
5471 {
5472     int sig = target_to_host_signal(arg);
5473     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5474 }
5475 
5476 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5477                                     int fd, int cmd, abi_long arg)
5478 {
5479     struct timeval tv;
5480     abi_long ret;
5481 
5482     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5483     if (is_error(ret)) {
5484         return ret;
5485     }
5486 
5487     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5488         if (copy_to_user_timeval(arg, &tv)) {
5489             return -TARGET_EFAULT;
5490         }
5491     } else {
5492         if (copy_to_user_timeval64(arg, &tv)) {
5493             return -TARGET_EFAULT;
5494         }
5495     }
5496 
5497     return ret;
5498 }
5499 
5500 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5501                                       int fd, int cmd, abi_long arg)
5502 {
5503     struct timespec ts;
5504     abi_long ret;
5505 
5506     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5507     if (is_error(ret)) {
5508         return ret;
5509     }
5510 
5511     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5512         if (host_to_target_timespec(arg, &ts)) {
5513             return -TARGET_EFAULT;
5514         }
5515     } else{
5516         if (host_to_target_timespec64(arg, &ts)) {
5517             return -TARGET_EFAULT;
5518         }
5519     }
5520 
5521     return ret;
5522 }
5523 
5524 #ifdef TIOCGPTPEER
5525 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5526                                      int fd, int cmd, abi_long arg)
5527 {
5528     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5529     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5530 }
5531 #endif
5532 
5533 #ifdef HAVE_DRM_H
5534 
5535 static void unlock_drm_version(struct drm_version *host_ver,
5536                                struct target_drm_version *target_ver,
5537                                bool copy)
5538 {
5539     unlock_user(host_ver->name, target_ver->name,
5540                                 copy ? host_ver->name_len : 0);
5541     unlock_user(host_ver->date, target_ver->date,
5542                                 copy ? host_ver->date_len : 0);
5543     unlock_user(host_ver->desc, target_ver->desc,
5544                                 copy ? host_ver->desc_len : 0);
5545 }
5546 
5547 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5548                                           struct target_drm_version *target_ver)
5549 {
5550     memset(host_ver, 0, sizeof(*host_ver));
5551 
5552     __get_user(host_ver->name_len, &target_ver->name_len);
5553     if (host_ver->name_len) {
5554         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5555                                    target_ver->name_len, 0);
5556         if (!host_ver->name) {
5557             return -EFAULT;
5558         }
5559     }
5560 
5561     __get_user(host_ver->date_len, &target_ver->date_len);
5562     if (host_ver->date_len) {
5563         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5564                                    target_ver->date_len, 0);
5565         if (!host_ver->date) {
5566             goto err;
5567         }
5568     }
5569 
5570     __get_user(host_ver->desc_len, &target_ver->desc_len);
5571     if (host_ver->desc_len) {
5572         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5573                                    target_ver->desc_len, 0);
5574         if (!host_ver->desc) {
5575             goto err;
5576         }
5577     }
5578 
5579     return 0;
5580 err:
5581     unlock_drm_version(host_ver, target_ver, false);
5582     return -EFAULT;
5583 }
5584 
5585 static inline void host_to_target_drmversion(
5586                                           struct target_drm_version *target_ver,
5587                                           struct drm_version *host_ver)
5588 {
5589     __put_user(host_ver->version_major, &target_ver->version_major);
5590     __put_user(host_ver->version_minor, &target_ver->version_minor);
5591     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5592     __put_user(host_ver->name_len, &target_ver->name_len);
5593     __put_user(host_ver->date_len, &target_ver->date_len);
5594     __put_user(host_ver->desc_len, &target_ver->desc_len);
5595     unlock_drm_version(host_ver, target_ver, true);
5596 }
5597 
5598 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5599                              int fd, int cmd, abi_long arg)
5600 {
5601     struct drm_version *ver;
5602     struct target_drm_version *target_ver;
5603     abi_long ret;
5604 
5605     switch (ie->host_cmd) {
5606     case DRM_IOCTL_VERSION:
5607         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5608             return -TARGET_EFAULT;
5609         }
5610         ver = (struct drm_version *)buf_temp;
5611         ret = target_to_host_drmversion(ver, target_ver);
5612         if (!is_error(ret)) {
5613             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5614             if (is_error(ret)) {
5615                 unlock_drm_version(ver, target_ver, false);
5616             } else {
5617                 host_to_target_drmversion(target_ver, ver);
5618             }
5619         }
5620         unlock_user_struct(target_ver, arg, 0);
5621         return ret;
5622     }
5623     return -TARGET_ENOSYS;
5624 }
5625 
5626 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5627                                            struct drm_i915_getparam *gparam,
5628                                            int fd, abi_long arg)
5629 {
5630     abi_long ret;
5631     int value;
5632     struct target_drm_i915_getparam *target_gparam;
5633 
5634     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5635         return -TARGET_EFAULT;
5636     }
5637 
5638     __get_user(gparam->param, &target_gparam->param);
5639     gparam->value = &value;
5640     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5641     put_user_s32(value, target_gparam->value);
5642 
5643     unlock_user_struct(target_gparam, arg, 0);
5644     return ret;
5645 }
5646 
5647 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5648                                   int fd, int cmd, abi_long arg)
5649 {
5650     switch (ie->host_cmd) {
5651     case DRM_IOCTL_I915_GETPARAM:
5652         return do_ioctl_drm_i915_getparam(ie,
5653                                           (struct drm_i915_getparam *)buf_temp,
5654                                           fd, arg);
5655     default:
5656         return -TARGET_ENOSYS;
5657     }
5658 }
5659 
5660 #endif
5661 
5662 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5663                                         int fd, int cmd, abi_long arg)
5664 {
5665     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5666     struct tun_filter *target_filter;
5667     char *target_addr;
5668 
5669     assert(ie->access == IOC_W);
5670 
5671     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5672     if (!target_filter) {
5673         return -TARGET_EFAULT;
5674     }
5675     filter->flags = tswap16(target_filter->flags);
5676     filter->count = tswap16(target_filter->count);
5677     unlock_user(target_filter, arg, 0);
5678 
5679     if (filter->count) {
5680         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5681             MAX_STRUCT_SIZE) {
5682             return -TARGET_EFAULT;
5683         }
5684 
5685         target_addr = lock_user(VERIFY_READ,
5686                                 arg + offsetof(struct tun_filter, addr),
5687                                 filter->count * ETH_ALEN, 1);
5688         if (!target_addr) {
5689             return -TARGET_EFAULT;
5690         }
5691         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5692         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5693     }
5694 
5695     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5696 }
5697 
5698 IOCTLEntry ioctl_entries[] = {
5699 #define IOCTL(cmd, access, ...) \
5700     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5701 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5702     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5703 #define IOCTL_IGNORE(cmd) \
5704     { TARGET_ ## cmd, 0, #cmd },
5705 #include "ioctls.h"
5706     { 0, 0, },
5707 };
5708 
5709 /* ??? Implement proper locking for ioctls.  */
5710 /* do_ioctl() Must return target values and target errnos. */
5711 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5712 {
5713     const IOCTLEntry *ie;
5714     const argtype *arg_type;
5715     abi_long ret;
5716     uint8_t buf_temp[MAX_STRUCT_SIZE];
5717     int target_size;
5718     void *argptr;
5719 
5720     ie = ioctl_entries;
5721     for(;;) {
5722         if (ie->target_cmd == 0) {
5723             qemu_log_mask(
5724                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5725             return -TARGET_ENOSYS;
5726         }
5727         if (ie->target_cmd == cmd)
5728             break;
5729         ie++;
5730     }
5731     arg_type = ie->arg_type;
5732     if (ie->do_ioctl) {
5733         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5734     } else if (!ie->host_cmd) {
5735         /* Some architectures define BSD ioctls in their headers
5736            that are not implemented in Linux.  */
5737         return -TARGET_ENOSYS;
5738     }
5739 
5740     switch(arg_type[0]) {
5741     case TYPE_NULL:
5742         /* no argument */
5743         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5744         break;
5745     case TYPE_PTRVOID:
5746     case TYPE_INT:
5747     case TYPE_LONG:
5748     case TYPE_ULONG:
5749         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5750         break;
5751     case TYPE_PTR:
5752         arg_type++;
5753         target_size = thunk_type_size(arg_type, 0);
5754         switch(ie->access) {
5755         case IOC_R:
5756             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5757             if (!is_error(ret)) {
5758                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5759                 if (!argptr)
5760                     return -TARGET_EFAULT;
5761                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5762                 unlock_user(argptr, arg, target_size);
5763             }
5764             break;
5765         case IOC_W:
5766             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5767             if (!argptr)
5768                 return -TARGET_EFAULT;
5769             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5770             unlock_user(argptr, arg, 0);
5771             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5772             break;
5773         default:
5774         case IOC_RW:
5775             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5776             if (!argptr)
5777                 return -TARGET_EFAULT;
5778             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5779             unlock_user(argptr, arg, 0);
5780             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5781             if (!is_error(ret)) {
5782                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5783                 if (!argptr)
5784                     return -TARGET_EFAULT;
5785                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5786                 unlock_user(argptr, arg, target_size);
5787             }
5788             break;
5789         }
5790         break;
5791     default:
5792         qemu_log_mask(LOG_UNIMP,
5793                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5794                       (long)cmd, arg_type[0]);
5795         ret = -TARGET_ENOSYS;
5796         break;
5797     }
5798     return ret;
5799 }
5800 
5801 static const bitmask_transtbl iflag_tbl[] = {
5802         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5803         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5804         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5805         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5806         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5807         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5808         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5809         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5810         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5811         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5812         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5813         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5814         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5815         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5816         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5817         { 0, 0, 0, 0 }
5818 };
5819 
5820 static const bitmask_transtbl oflag_tbl[] = {
5821 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5822 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5823 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5824 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5825 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5826 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5827 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5828 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5829 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5830 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5831 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5832 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5833 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5834 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5835 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5836 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5837 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5838 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5839 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5840 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5841 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5842 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5843 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5844 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5845 	{ 0, 0, 0, 0 }
5846 };
5847 
5848 static const bitmask_transtbl cflag_tbl[] = {
5849 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5850 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5851 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5852 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5853 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5854 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5855 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5856 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5857 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5858 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5859 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5860 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5861 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5862 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5863 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5864 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5865 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5866 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5867 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5868 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5869 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5870 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5871 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5872 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5873 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5874 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5875 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5876 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5877 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5878 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5879 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5880 	{ 0, 0, 0, 0 }
5881 };
5882 
5883 static const bitmask_transtbl lflag_tbl[] = {
5884   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5885   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5886   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5887   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5888   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5889   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5890   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5891   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5892   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5893   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5894   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5895   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5896   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5897   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5898   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5899   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5900   { 0, 0, 0, 0 }
5901 };
5902 
5903 static void target_to_host_termios (void *dst, const void *src)
5904 {
5905     struct host_termios *host = dst;
5906     const struct target_termios *target = src;
5907 
5908     host->c_iflag =
5909         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5910     host->c_oflag =
5911         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5912     host->c_cflag =
5913         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5914     host->c_lflag =
5915         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5916     host->c_line = target->c_line;
5917 
5918     memset(host->c_cc, 0, sizeof(host->c_cc));
5919     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5920     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5921     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5922     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5923     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5924     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5925     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5926     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5927     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5928     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5929     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5930     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5931     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5932     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5933     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5934     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5935     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5936 }
5937 
5938 static void host_to_target_termios (void *dst, const void *src)
5939 {
5940     struct target_termios *target = dst;
5941     const struct host_termios *host = src;
5942 
5943     target->c_iflag =
5944         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5945     target->c_oflag =
5946         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5947     target->c_cflag =
5948         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5949     target->c_lflag =
5950         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5951     target->c_line = host->c_line;
5952 
5953     memset(target->c_cc, 0, sizeof(target->c_cc));
5954     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5955     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5956     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5957     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5958     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5959     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5960     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5961     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5962     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5963     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5964     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5965     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5966     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5967     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5968     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5969     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5970     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5971 }
5972 
5973 static const StructEntry struct_termios_def = {
5974     .convert = { host_to_target_termios, target_to_host_termios },
5975     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5976     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5977     .print = print_termios,
5978 };
5979 
5980 static const bitmask_transtbl mmap_flags_tbl[] = {
5981     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5982     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5983     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5984     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5985       MAP_ANONYMOUS, MAP_ANONYMOUS },
5986     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5987       MAP_GROWSDOWN, MAP_GROWSDOWN },
5988     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5989       MAP_DENYWRITE, MAP_DENYWRITE },
5990     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5991       MAP_EXECUTABLE, MAP_EXECUTABLE },
5992     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5993     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5994       MAP_NORESERVE, MAP_NORESERVE },
5995     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5996     /* MAP_STACK had been ignored by the kernel for quite some time.
5997        Recognize it for the target insofar as we do not want to pass
5998        it through to the host.  */
5999     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6000     { 0, 0, 0, 0 }
6001 };
6002 
6003 /*
6004  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6005  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6006  */
6007 #if defined(TARGET_I386)
6008 
6009 /* NOTE: there is really one LDT for all the threads */
6010 static uint8_t *ldt_table;
6011 
6012 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6013 {
6014     int size;
6015     void *p;
6016 
6017     if (!ldt_table)
6018         return 0;
6019     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6020     if (size > bytecount)
6021         size = bytecount;
6022     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6023     if (!p)
6024         return -TARGET_EFAULT;
6025     /* ??? Should this by byteswapped?  */
6026     memcpy(p, ldt_table, size);
6027     unlock_user(p, ptr, size);
6028     return size;
6029 }
6030 
6031 /* XXX: add locking support */
6032 static abi_long write_ldt(CPUX86State *env,
6033                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6034 {
6035     struct target_modify_ldt_ldt_s ldt_info;
6036     struct target_modify_ldt_ldt_s *target_ldt_info;
6037     int seg_32bit, contents, read_exec_only, limit_in_pages;
6038     int seg_not_present, useable, lm;
6039     uint32_t *lp, entry_1, entry_2;
6040 
6041     if (bytecount != sizeof(ldt_info))
6042         return -TARGET_EINVAL;
6043     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6044         return -TARGET_EFAULT;
6045     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6046     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6047     ldt_info.limit = tswap32(target_ldt_info->limit);
6048     ldt_info.flags = tswap32(target_ldt_info->flags);
6049     unlock_user_struct(target_ldt_info, ptr, 0);
6050 
6051     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6052         return -TARGET_EINVAL;
6053     seg_32bit = ldt_info.flags & 1;
6054     contents = (ldt_info.flags >> 1) & 3;
6055     read_exec_only = (ldt_info.flags >> 3) & 1;
6056     limit_in_pages = (ldt_info.flags >> 4) & 1;
6057     seg_not_present = (ldt_info.flags >> 5) & 1;
6058     useable = (ldt_info.flags >> 6) & 1;
6059 #ifdef TARGET_ABI32
6060     lm = 0;
6061 #else
6062     lm = (ldt_info.flags >> 7) & 1;
6063 #endif
6064     if (contents == 3) {
6065         if (oldmode)
6066             return -TARGET_EINVAL;
6067         if (seg_not_present == 0)
6068             return -TARGET_EINVAL;
6069     }
6070     /* allocate the LDT */
6071     if (!ldt_table) {
6072         env->ldt.base = target_mmap(0,
6073                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6074                                     PROT_READ|PROT_WRITE,
6075                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6076         if (env->ldt.base == -1)
6077             return -TARGET_ENOMEM;
6078         memset(g2h_untagged(env->ldt.base), 0,
6079                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6080         env->ldt.limit = 0xffff;
6081         ldt_table = g2h_untagged(env->ldt.base);
6082     }
6083 
6084     /* NOTE: same code as Linux kernel */
6085     /* Allow LDTs to be cleared by the user. */
6086     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6087         if (oldmode ||
6088             (contents == 0		&&
6089              read_exec_only == 1	&&
6090              seg_32bit == 0		&&
6091              limit_in_pages == 0	&&
6092              seg_not_present == 1	&&
6093              useable == 0 )) {
6094             entry_1 = 0;
6095             entry_2 = 0;
6096             goto install;
6097         }
6098     }
6099 
6100     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6101         (ldt_info.limit & 0x0ffff);
6102     entry_2 = (ldt_info.base_addr & 0xff000000) |
6103         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6104         (ldt_info.limit & 0xf0000) |
6105         ((read_exec_only ^ 1) << 9) |
6106         (contents << 10) |
6107         ((seg_not_present ^ 1) << 15) |
6108         (seg_32bit << 22) |
6109         (limit_in_pages << 23) |
6110         (lm << 21) |
6111         0x7000;
6112     if (!oldmode)
6113         entry_2 |= (useable << 20);
6114 
6115     /* Install the new entry ...  */
6116 install:
6117     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6118     lp[0] = tswap32(entry_1);
6119     lp[1] = tswap32(entry_2);
6120     return 0;
6121 }
6122 
6123 /* specific and weird i386 syscalls */
6124 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6125                               unsigned long bytecount)
6126 {
6127     abi_long ret;
6128 
6129     switch (func) {
6130     case 0:
6131         ret = read_ldt(ptr, bytecount);
6132         break;
6133     case 1:
6134         ret = write_ldt(env, ptr, bytecount, 1);
6135         break;
6136     case 0x11:
6137         ret = write_ldt(env, ptr, bytecount, 0);
6138         break;
6139     default:
6140         ret = -TARGET_ENOSYS;
6141         break;
6142     }
6143     return ret;
6144 }
6145 
6146 #if defined(TARGET_ABI32)
6147 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6148 {
6149     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6150     struct target_modify_ldt_ldt_s ldt_info;
6151     struct target_modify_ldt_ldt_s *target_ldt_info;
6152     int seg_32bit, contents, read_exec_only, limit_in_pages;
6153     int seg_not_present, useable, lm;
6154     uint32_t *lp, entry_1, entry_2;
6155     int i;
6156 
6157     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6158     if (!target_ldt_info)
6159         return -TARGET_EFAULT;
6160     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6161     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6162     ldt_info.limit = tswap32(target_ldt_info->limit);
6163     ldt_info.flags = tswap32(target_ldt_info->flags);
6164     if (ldt_info.entry_number == -1) {
6165         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6166             if (gdt_table[i] == 0) {
6167                 ldt_info.entry_number = i;
6168                 target_ldt_info->entry_number = tswap32(i);
6169                 break;
6170             }
6171         }
6172     }
6173     unlock_user_struct(target_ldt_info, ptr, 1);
6174 
6175     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6176         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6177            return -TARGET_EINVAL;
6178     seg_32bit = ldt_info.flags & 1;
6179     contents = (ldt_info.flags >> 1) & 3;
6180     read_exec_only = (ldt_info.flags >> 3) & 1;
6181     limit_in_pages = (ldt_info.flags >> 4) & 1;
6182     seg_not_present = (ldt_info.flags >> 5) & 1;
6183     useable = (ldt_info.flags >> 6) & 1;
6184 #ifdef TARGET_ABI32
6185     lm = 0;
6186 #else
6187     lm = (ldt_info.flags >> 7) & 1;
6188 #endif
6189 
6190     if (contents == 3) {
6191         if (seg_not_present == 0)
6192             return -TARGET_EINVAL;
6193     }
6194 
6195     /* NOTE: same code as Linux kernel */
6196     /* Allow LDTs to be cleared by the user. */
6197     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6198         if ((contents == 0             &&
6199              read_exec_only == 1       &&
6200              seg_32bit == 0            &&
6201              limit_in_pages == 0       &&
6202              seg_not_present == 1      &&
6203              useable == 0 )) {
6204             entry_1 = 0;
6205             entry_2 = 0;
6206             goto install;
6207         }
6208     }
6209 
6210     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6211         (ldt_info.limit & 0x0ffff);
6212     entry_2 = (ldt_info.base_addr & 0xff000000) |
6213         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6214         (ldt_info.limit & 0xf0000) |
6215         ((read_exec_only ^ 1) << 9) |
6216         (contents << 10) |
6217         ((seg_not_present ^ 1) << 15) |
6218         (seg_32bit << 22) |
6219         (limit_in_pages << 23) |
6220         (useable << 20) |
6221         (lm << 21) |
6222         0x7000;
6223 
6224     /* Install the new entry ...  */
6225 install:
6226     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6227     lp[0] = tswap32(entry_1);
6228     lp[1] = tswap32(entry_2);
6229     return 0;
6230 }
6231 
6232 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6233 {
6234     struct target_modify_ldt_ldt_s *target_ldt_info;
6235     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6236     uint32_t base_addr, limit, flags;
6237     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6238     int seg_not_present, useable, lm;
6239     uint32_t *lp, entry_1, entry_2;
6240 
6241     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6242     if (!target_ldt_info)
6243         return -TARGET_EFAULT;
6244     idx = tswap32(target_ldt_info->entry_number);
6245     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6246         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6247         unlock_user_struct(target_ldt_info, ptr, 1);
6248         return -TARGET_EINVAL;
6249     }
6250     lp = (uint32_t *)(gdt_table + idx);
6251     entry_1 = tswap32(lp[0]);
6252     entry_2 = tswap32(lp[1]);
6253 
6254     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6255     contents = (entry_2 >> 10) & 3;
6256     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6257     seg_32bit = (entry_2 >> 22) & 1;
6258     limit_in_pages = (entry_2 >> 23) & 1;
6259     useable = (entry_2 >> 20) & 1;
6260 #ifdef TARGET_ABI32
6261     lm = 0;
6262 #else
6263     lm = (entry_2 >> 21) & 1;
6264 #endif
6265     flags = (seg_32bit << 0) | (contents << 1) |
6266         (read_exec_only << 3) | (limit_in_pages << 4) |
6267         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6268     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6269     base_addr = (entry_1 >> 16) |
6270         (entry_2 & 0xff000000) |
6271         ((entry_2 & 0xff) << 16);
6272     target_ldt_info->base_addr = tswapal(base_addr);
6273     target_ldt_info->limit = tswap32(limit);
6274     target_ldt_info->flags = tswap32(flags);
6275     unlock_user_struct(target_ldt_info, ptr, 1);
6276     return 0;
6277 }
6278 
6279 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6280 {
6281     return -TARGET_ENOSYS;
6282 }
6283 #else
6284 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6285 {
6286     abi_long ret = 0;
6287     abi_ulong val;
6288     int idx;
6289 
6290     switch(code) {
6291     case TARGET_ARCH_SET_GS:
6292     case TARGET_ARCH_SET_FS:
6293         if (code == TARGET_ARCH_SET_GS)
6294             idx = R_GS;
6295         else
6296             idx = R_FS;
6297         cpu_x86_load_seg(env, idx, 0);
6298         env->segs[idx].base = addr;
6299         break;
6300     case TARGET_ARCH_GET_GS:
6301     case TARGET_ARCH_GET_FS:
6302         if (code == TARGET_ARCH_GET_GS)
6303             idx = R_GS;
6304         else
6305             idx = R_FS;
6306         val = env->segs[idx].base;
6307         if (put_user(val, addr, abi_ulong))
6308             ret = -TARGET_EFAULT;
6309         break;
6310     default:
6311         ret = -TARGET_EINVAL;
6312         break;
6313     }
6314     return ret;
6315 }
6316 #endif /* defined(TARGET_ABI32 */
6317 #endif /* defined(TARGET_I386) */
6318 
6319 /*
6320  * These constants are generic.  Supply any that are missing from the host.
6321  */
6322 #ifndef PR_SET_NAME
6323 # define PR_SET_NAME    15
6324 # define PR_GET_NAME    16
6325 #endif
6326 #ifndef PR_SET_FP_MODE
6327 # define PR_SET_FP_MODE 45
6328 # define PR_GET_FP_MODE 46
6329 # define PR_FP_MODE_FR   (1 << 0)
6330 # define PR_FP_MODE_FRE  (1 << 1)
6331 #endif
6332 #ifndef PR_SVE_SET_VL
6333 # define PR_SVE_SET_VL  50
6334 # define PR_SVE_GET_VL  51
6335 # define PR_SVE_VL_LEN_MASK  0xffff
6336 # define PR_SVE_VL_INHERIT   (1 << 17)
6337 #endif
6338 #ifndef PR_PAC_RESET_KEYS
6339 # define PR_PAC_RESET_KEYS  54
6340 # define PR_PAC_APIAKEY   (1 << 0)
6341 # define PR_PAC_APIBKEY   (1 << 1)
6342 # define PR_PAC_APDAKEY   (1 << 2)
6343 # define PR_PAC_APDBKEY   (1 << 3)
6344 # define PR_PAC_APGAKEY   (1 << 4)
6345 #endif
6346 #ifndef PR_SET_TAGGED_ADDR_CTRL
6347 # define PR_SET_TAGGED_ADDR_CTRL 55
6348 # define PR_GET_TAGGED_ADDR_CTRL 56
6349 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6350 #endif
6351 #ifndef PR_MTE_TCF_SHIFT
6352 # define PR_MTE_TCF_SHIFT       1
6353 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6354 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6355 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6356 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6357 # define PR_MTE_TAG_SHIFT       3
6358 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6359 #endif
6360 #ifndef PR_SET_IO_FLUSHER
6361 # define PR_SET_IO_FLUSHER 57
6362 # define PR_GET_IO_FLUSHER 58
6363 #endif
6364 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6365 # define PR_SET_SYSCALL_USER_DISPATCH 59
6366 #endif
6367 #ifndef PR_SME_SET_VL
6368 # define PR_SME_SET_VL  63
6369 # define PR_SME_GET_VL  64
6370 # define PR_SME_VL_LEN_MASK  0xffff
6371 # define PR_SME_VL_INHERIT   (1 << 17)
6372 #endif
6373 
6374 #include "target_prctl.h"
6375 
6376 static abi_long do_prctl_inval0(CPUArchState *env)
6377 {
6378     return -TARGET_EINVAL;
6379 }
6380 
6381 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6382 {
6383     return -TARGET_EINVAL;
6384 }
6385 
6386 #ifndef do_prctl_get_fp_mode
6387 #define do_prctl_get_fp_mode do_prctl_inval0
6388 #endif
6389 #ifndef do_prctl_set_fp_mode
6390 #define do_prctl_set_fp_mode do_prctl_inval1
6391 #endif
6392 #ifndef do_prctl_sve_get_vl
6393 #define do_prctl_sve_get_vl do_prctl_inval0
6394 #endif
6395 #ifndef do_prctl_sve_set_vl
6396 #define do_prctl_sve_set_vl do_prctl_inval1
6397 #endif
6398 #ifndef do_prctl_reset_keys
6399 #define do_prctl_reset_keys do_prctl_inval1
6400 #endif
6401 #ifndef do_prctl_set_tagged_addr_ctrl
6402 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6403 #endif
6404 #ifndef do_prctl_get_tagged_addr_ctrl
6405 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6406 #endif
6407 #ifndef do_prctl_get_unalign
6408 #define do_prctl_get_unalign do_prctl_inval1
6409 #endif
6410 #ifndef do_prctl_set_unalign
6411 #define do_prctl_set_unalign do_prctl_inval1
6412 #endif
6413 #ifndef do_prctl_sme_get_vl
6414 #define do_prctl_sme_get_vl do_prctl_inval0
6415 #endif
6416 #ifndef do_prctl_sme_set_vl
6417 #define do_prctl_sme_set_vl do_prctl_inval1
6418 #endif
6419 
6420 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6421                          abi_long arg3, abi_long arg4, abi_long arg5)
6422 {
6423     abi_long ret;
6424 
6425     switch (option) {
6426     case PR_GET_PDEATHSIG:
6427         {
6428             int deathsig;
6429             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6430                                   arg3, arg4, arg5));
6431             if (!is_error(ret) &&
6432                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6433                 return -TARGET_EFAULT;
6434             }
6435             return ret;
6436         }
6437     case PR_SET_PDEATHSIG:
6438         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6439                                arg3, arg4, arg5));
6440     case PR_GET_NAME:
6441         {
6442             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6443             if (!name) {
6444                 return -TARGET_EFAULT;
6445             }
6446             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6447                                   arg3, arg4, arg5));
6448             unlock_user(name, arg2, 16);
6449             return ret;
6450         }
6451     case PR_SET_NAME:
6452         {
6453             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6454             if (!name) {
6455                 return -TARGET_EFAULT;
6456             }
6457             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6458                                   arg3, arg4, arg5));
6459             unlock_user(name, arg2, 0);
6460             return ret;
6461         }
6462     case PR_GET_FP_MODE:
6463         return do_prctl_get_fp_mode(env);
6464     case PR_SET_FP_MODE:
6465         return do_prctl_set_fp_mode(env, arg2);
6466     case PR_SVE_GET_VL:
6467         return do_prctl_sve_get_vl(env);
6468     case PR_SVE_SET_VL:
6469         return do_prctl_sve_set_vl(env, arg2);
6470     case PR_SME_GET_VL:
6471         return do_prctl_sme_get_vl(env);
6472     case PR_SME_SET_VL:
6473         return do_prctl_sme_set_vl(env, arg2);
6474     case PR_PAC_RESET_KEYS:
6475         if (arg3 || arg4 || arg5) {
6476             return -TARGET_EINVAL;
6477         }
6478         return do_prctl_reset_keys(env, arg2);
6479     case PR_SET_TAGGED_ADDR_CTRL:
6480         if (arg3 || arg4 || arg5) {
6481             return -TARGET_EINVAL;
6482         }
6483         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6484     case PR_GET_TAGGED_ADDR_CTRL:
6485         if (arg2 || arg3 || arg4 || arg5) {
6486             return -TARGET_EINVAL;
6487         }
6488         return do_prctl_get_tagged_addr_ctrl(env);
6489 
6490     case PR_GET_UNALIGN:
6491         return do_prctl_get_unalign(env, arg2);
6492     case PR_SET_UNALIGN:
6493         return do_prctl_set_unalign(env, arg2);
6494 
6495     case PR_CAP_AMBIENT:
6496     case PR_CAPBSET_READ:
6497     case PR_CAPBSET_DROP:
6498     case PR_GET_DUMPABLE:
6499     case PR_SET_DUMPABLE:
6500     case PR_GET_KEEPCAPS:
6501     case PR_SET_KEEPCAPS:
6502     case PR_GET_SECUREBITS:
6503     case PR_SET_SECUREBITS:
6504     case PR_GET_TIMING:
6505     case PR_SET_TIMING:
6506     case PR_GET_TIMERSLACK:
6507     case PR_SET_TIMERSLACK:
6508     case PR_MCE_KILL:
6509     case PR_MCE_KILL_GET:
6510     case PR_GET_NO_NEW_PRIVS:
6511     case PR_SET_NO_NEW_PRIVS:
6512     case PR_GET_IO_FLUSHER:
6513     case PR_SET_IO_FLUSHER:
6514         /* Some prctl options have no pointer arguments and we can pass on. */
6515         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6516 
6517     case PR_GET_CHILD_SUBREAPER:
6518     case PR_SET_CHILD_SUBREAPER:
6519     case PR_GET_SPECULATION_CTRL:
6520     case PR_SET_SPECULATION_CTRL:
6521     case PR_GET_TID_ADDRESS:
6522         /* TODO */
6523         return -TARGET_EINVAL;
6524 
6525     case PR_GET_FPEXC:
6526     case PR_SET_FPEXC:
6527         /* Was used for SPE on PowerPC. */
6528         return -TARGET_EINVAL;
6529 
6530     case PR_GET_ENDIAN:
6531     case PR_SET_ENDIAN:
6532     case PR_GET_FPEMU:
6533     case PR_SET_FPEMU:
6534     case PR_SET_MM:
6535     case PR_GET_SECCOMP:
6536     case PR_SET_SECCOMP:
6537     case PR_SET_SYSCALL_USER_DISPATCH:
6538     case PR_GET_THP_DISABLE:
6539     case PR_SET_THP_DISABLE:
6540     case PR_GET_TSC:
6541     case PR_SET_TSC:
6542         /* Disable to prevent the target disabling stuff we need. */
6543         return -TARGET_EINVAL;
6544 
6545     default:
6546         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6547                       option);
6548         return -TARGET_EINVAL;
6549     }
6550 }
6551 
6552 #define NEW_STACK_SIZE 0x40000
6553 
6554 
6555 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6556 typedef struct {
6557     CPUArchState *env;
6558     pthread_mutex_t mutex;
6559     pthread_cond_t cond;
6560     pthread_t thread;
6561     uint32_t tid;
6562     abi_ulong child_tidptr;
6563     abi_ulong parent_tidptr;
6564     sigset_t sigmask;
6565 } new_thread_info;
6566 
6567 static void *clone_func(void *arg)
6568 {
6569     new_thread_info *info = arg;
6570     CPUArchState *env;
6571     CPUState *cpu;
6572     TaskState *ts;
6573 
6574     rcu_register_thread();
6575     tcg_register_thread();
6576     env = info->env;
6577     cpu = env_cpu(env);
6578     thread_cpu = cpu;
6579     ts = (TaskState *)cpu->opaque;
6580     info->tid = sys_gettid();
6581     task_settid(ts);
6582     if (info->child_tidptr)
6583         put_user_u32(info->tid, info->child_tidptr);
6584     if (info->parent_tidptr)
6585         put_user_u32(info->tid, info->parent_tidptr);
6586     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6587     /* Enable signals.  */
6588     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6589     /* Signal to the parent that we're ready.  */
6590     pthread_mutex_lock(&info->mutex);
6591     pthread_cond_broadcast(&info->cond);
6592     pthread_mutex_unlock(&info->mutex);
6593     /* Wait until the parent has finished initializing the tls state.  */
6594     pthread_mutex_lock(&clone_lock);
6595     pthread_mutex_unlock(&clone_lock);
6596     cpu_loop(env);
6597     /* never exits */
6598     return NULL;
6599 }
6600 
6601 /* do_fork() Must return host values and target errnos (unlike most
6602    do_*() functions). */
6603 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6604                    abi_ulong parent_tidptr, target_ulong newtls,
6605                    abi_ulong child_tidptr)
6606 {
6607     CPUState *cpu = env_cpu(env);
6608     int ret;
6609     TaskState *ts;
6610     CPUState *new_cpu;
6611     CPUArchState *new_env;
6612     sigset_t sigmask;
6613 
6614     flags &= ~CLONE_IGNORED_FLAGS;
6615 
6616     /* Emulate vfork() with fork() */
6617     if (flags & CLONE_VFORK)
6618         flags &= ~(CLONE_VFORK | CLONE_VM);
6619 
6620     if (flags & CLONE_VM) {
6621         TaskState *parent_ts = (TaskState *)cpu->opaque;
6622         new_thread_info info;
6623         pthread_attr_t attr;
6624 
6625         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6626             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6627             return -TARGET_EINVAL;
6628         }
6629 
6630         ts = g_new0(TaskState, 1);
6631         init_task_state(ts);
6632 
6633         /* Grab a mutex so that thread setup appears atomic.  */
6634         pthread_mutex_lock(&clone_lock);
6635 
6636         /*
6637          * If this is our first additional thread, we need to ensure we
6638          * generate code for parallel execution and flush old translations.
6639          * Do this now so that the copy gets CF_PARALLEL too.
6640          */
6641         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6642             cpu->tcg_cflags |= CF_PARALLEL;
6643             tb_flush(cpu);
6644         }
6645 
6646         /* we create a new CPU instance. */
6647         new_env = cpu_copy(env);
6648         /* Init regs that differ from the parent.  */
6649         cpu_clone_regs_child(new_env, newsp, flags);
6650         cpu_clone_regs_parent(env, flags);
6651         new_cpu = env_cpu(new_env);
6652         new_cpu->opaque = ts;
6653         ts->bprm = parent_ts->bprm;
6654         ts->info = parent_ts->info;
6655         ts->signal_mask = parent_ts->signal_mask;
6656 
6657         if (flags & CLONE_CHILD_CLEARTID) {
6658             ts->child_tidptr = child_tidptr;
6659         }
6660 
6661         if (flags & CLONE_SETTLS) {
6662             cpu_set_tls (new_env, newtls);
6663         }
6664 
6665         memset(&info, 0, sizeof(info));
6666         pthread_mutex_init(&info.mutex, NULL);
6667         pthread_mutex_lock(&info.mutex);
6668         pthread_cond_init(&info.cond, NULL);
6669         info.env = new_env;
6670         if (flags & CLONE_CHILD_SETTID) {
6671             info.child_tidptr = child_tidptr;
6672         }
6673         if (flags & CLONE_PARENT_SETTID) {
6674             info.parent_tidptr = parent_tidptr;
6675         }
6676 
6677         ret = pthread_attr_init(&attr);
6678         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6679         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6680         /* It is not safe to deliver signals until the child has finished
6681            initializing, so temporarily block all signals.  */
6682         sigfillset(&sigmask);
6683         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6684         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6685 
6686         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6687         /* TODO: Free new CPU state if thread creation failed.  */
6688 
6689         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6690         pthread_attr_destroy(&attr);
6691         if (ret == 0) {
6692             /* Wait for the child to initialize.  */
6693             pthread_cond_wait(&info.cond, &info.mutex);
6694             ret = info.tid;
6695         } else {
6696             ret = -1;
6697         }
6698         pthread_mutex_unlock(&info.mutex);
6699         pthread_cond_destroy(&info.cond);
6700         pthread_mutex_destroy(&info.mutex);
6701         pthread_mutex_unlock(&clone_lock);
6702     } else {
6703         /* if no CLONE_VM, we consider it is a fork */
6704         if (flags & CLONE_INVALID_FORK_FLAGS) {
6705             return -TARGET_EINVAL;
6706         }
6707 
6708         /* We can't support custom termination signals */
6709         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6710             return -TARGET_EINVAL;
6711         }
6712 
6713         if (block_signals()) {
6714             return -QEMU_ERESTARTSYS;
6715         }
6716 
6717         fork_start();
6718         ret = fork();
6719         if (ret == 0) {
6720             /* Child Process.  */
6721             cpu_clone_regs_child(env, newsp, flags);
6722             fork_end(1);
6723             /* There is a race condition here.  The parent process could
6724                theoretically read the TID in the child process before the child
6725                tid is set.  This would require using either ptrace
6726                (not implemented) or having *_tidptr to point at a shared memory
6727                mapping.  We can't repeat the spinlock hack used above because
6728                the child process gets its own copy of the lock.  */
6729             if (flags & CLONE_CHILD_SETTID)
6730                 put_user_u32(sys_gettid(), child_tidptr);
6731             if (flags & CLONE_PARENT_SETTID)
6732                 put_user_u32(sys_gettid(), parent_tidptr);
6733             ts = (TaskState *)cpu->opaque;
6734             if (flags & CLONE_SETTLS)
6735                 cpu_set_tls (env, newtls);
6736             if (flags & CLONE_CHILD_CLEARTID)
6737                 ts->child_tidptr = child_tidptr;
6738         } else {
6739             cpu_clone_regs_parent(env, flags);
6740             fork_end(0);
6741         }
6742     }
6743     return ret;
6744 }
6745 
6746 /* warning : doesn't handle linux specific flags... */
6747 static int target_to_host_fcntl_cmd(int cmd)
6748 {
6749     int ret;
6750 
6751     switch(cmd) {
6752     case TARGET_F_DUPFD:
6753     case TARGET_F_GETFD:
6754     case TARGET_F_SETFD:
6755     case TARGET_F_GETFL:
6756     case TARGET_F_SETFL:
6757     case TARGET_F_OFD_GETLK:
6758     case TARGET_F_OFD_SETLK:
6759     case TARGET_F_OFD_SETLKW:
6760         ret = cmd;
6761         break;
6762     case TARGET_F_GETLK:
6763         ret = F_GETLK64;
6764         break;
6765     case TARGET_F_SETLK:
6766         ret = F_SETLK64;
6767         break;
6768     case TARGET_F_SETLKW:
6769         ret = F_SETLKW64;
6770         break;
6771     case TARGET_F_GETOWN:
6772         ret = F_GETOWN;
6773         break;
6774     case TARGET_F_SETOWN:
6775         ret = F_SETOWN;
6776         break;
6777     case TARGET_F_GETSIG:
6778         ret = F_GETSIG;
6779         break;
6780     case TARGET_F_SETSIG:
6781         ret = F_SETSIG;
6782         break;
6783 #if TARGET_ABI_BITS == 32
6784     case TARGET_F_GETLK64:
6785         ret = F_GETLK64;
6786         break;
6787     case TARGET_F_SETLK64:
6788         ret = F_SETLK64;
6789         break;
6790     case TARGET_F_SETLKW64:
6791         ret = F_SETLKW64;
6792         break;
6793 #endif
6794     case TARGET_F_SETLEASE:
6795         ret = F_SETLEASE;
6796         break;
6797     case TARGET_F_GETLEASE:
6798         ret = F_GETLEASE;
6799         break;
6800 #ifdef F_DUPFD_CLOEXEC
6801     case TARGET_F_DUPFD_CLOEXEC:
6802         ret = F_DUPFD_CLOEXEC;
6803         break;
6804 #endif
6805     case TARGET_F_NOTIFY:
6806         ret = F_NOTIFY;
6807         break;
6808 #ifdef F_GETOWN_EX
6809     case TARGET_F_GETOWN_EX:
6810         ret = F_GETOWN_EX;
6811         break;
6812 #endif
6813 #ifdef F_SETOWN_EX
6814     case TARGET_F_SETOWN_EX:
6815         ret = F_SETOWN_EX;
6816         break;
6817 #endif
6818 #ifdef F_SETPIPE_SZ
6819     case TARGET_F_SETPIPE_SZ:
6820         ret = F_SETPIPE_SZ;
6821         break;
6822     case TARGET_F_GETPIPE_SZ:
6823         ret = F_GETPIPE_SZ;
6824         break;
6825 #endif
6826 #ifdef F_ADD_SEALS
6827     case TARGET_F_ADD_SEALS:
6828         ret = F_ADD_SEALS;
6829         break;
6830     case TARGET_F_GET_SEALS:
6831         ret = F_GET_SEALS;
6832         break;
6833 #endif
6834     default:
6835         ret = -TARGET_EINVAL;
6836         break;
6837     }
6838 
6839 #if defined(__powerpc64__)
6840     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6841      * is not supported by kernel. The glibc fcntl call actually adjusts
6842      * them to 5, 6 and 7 before making the syscall(). Since we make the
6843      * syscall directly, adjust to what is supported by the kernel.
6844      */
6845     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6846         ret -= F_GETLK64 - 5;
6847     }
6848 #endif
6849 
6850     return ret;
6851 }
6852 
6853 #define FLOCK_TRANSTBL \
6854     switch (type) { \
6855     TRANSTBL_CONVERT(F_RDLCK); \
6856     TRANSTBL_CONVERT(F_WRLCK); \
6857     TRANSTBL_CONVERT(F_UNLCK); \
6858     }
6859 
6860 static int target_to_host_flock(int type)
6861 {
6862 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6863     FLOCK_TRANSTBL
6864 #undef  TRANSTBL_CONVERT
6865     return -TARGET_EINVAL;
6866 }
6867 
6868 static int host_to_target_flock(int type)
6869 {
6870 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6871     FLOCK_TRANSTBL
6872 #undef  TRANSTBL_CONVERT
6873     /* if we don't know how to convert the value coming
6874      * from the host we copy to the target field as-is
6875      */
6876     return type;
6877 }
6878 
6879 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6880                                             abi_ulong target_flock_addr)
6881 {
6882     struct target_flock *target_fl;
6883     int l_type;
6884 
6885     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6886         return -TARGET_EFAULT;
6887     }
6888 
6889     __get_user(l_type, &target_fl->l_type);
6890     l_type = target_to_host_flock(l_type);
6891     if (l_type < 0) {
6892         return l_type;
6893     }
6894     fl->l_type = l_type;
6895     __get_user(fl->l_whence, &target_fl->l_whence);
6896     __get_user(fl->l_start, &target_fl->l_start);
6897     __get_user(fl->l_len, &target_fl->l_len);
6898     __get_user(fl->l_pid, &target_fl->l_pid);
6899     unlock_user_struct(target_fl, target_flock_addr, 0);
6900     return 0;
6901 }
6902 
6903 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6904                                           const struct flock64 *fl)
6905 {
6906     struct target_flock *target_fl;
6907     short l_type;
6908 
6909     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6910         return -TARGET_EFAULT;
6911     }
6912 
6913     l_type = host_to_target_flock(fl->l_type);
6914     __put_user(l_type, &target_fl->l_type);
6915     __put_user(fl->l_whence, &target_fl->l_whence);
6916     __put_user(fl->l_start, &target_fl->l_start);
6917     __put_user(fl->l_len, &target_fl->l_len);
6918     __put_user(fl->l_pid, &target_fl->l_pid);
6919     unlock_user_struct(target_fl, target_flock_addr, 1);
6920     return 0;
6921 }
6922 
6923 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6924 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6925 
6926 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6927 struct target_oabi_flock64 {
6928     abi_short l_type;
6929     abi_short l_whence;
6930     abi_llong l_start;
6931     abi_llong l_len;
6932     abi_int   l_pid;
6933 } QEMU_PACKED;
6934 
6935 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6936                                                    abi_ulong target_flock_addr)
6937 {
6938     struct target_oabi_flock64 *target_fl;
6939     int l_type;
6940 
6941     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6942         return -TARGET_EFAULT;
6943     }
6944 
6945     __get_user(l_type, &target_fl->l_type);
6946     l_type = target_to_host_flock(l_type);
6947     if (l_type < 0) {
6948         return l_type;
6949     }
6950     fl->l_type = l_type;
6951     __get_user(fl->l_whence, &target_fl->l_whence);
6952     __get_user(fl->l_start, &target_fl->l_start);
6953     __get_user(fl->l_len, &target_fl->l_len);
6954     __get_user(fl->l_pid, &target_fl->l_pid);
6955     unlock_user_struct(target_fl, target_flock_addr, 0);
6956     return 0;
6957 }
6958 
6959 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6960                                                  const struct flock64 *fl)
6961 {
6962     struct target_oabi_flock64 *target_fl;
6963     short l_type;
6964 
6965     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6966         return -TARGET_EFAULT;
6967     }
6968 
6969     l_type = host_to_target_flock(fl->l_type);
6970     __put_user(l_type, &target_fl->l_type);
6971     __put_user(fl->l_whence, &target_fl->l_whence);
6972     __put_user(fl->l_start, &target_fl->l_start);
6973     __put_user(fl->l_len, &target_fl->l_len);
6974     __put_user(fl->l_pid, &target_fl->l_pid);
6975     unlock_user_struct(target_fl, target_flock_addr, 1);
6976     return 0;
6977 }
6978 #endif
6979 
6980 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6981                                               abi_ulong target_flock_addr)
6982 {
6983     struct target_flock64 *target_fl;
6984     int l_type;
6985 
6986     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6987         return -TARGET_EFAULT;
6988     }
6989 
6990     __get_user(l_type, &target_fl->l_type);
6991     l_type = target_to_host_flock(l_type);
6992     if (l_type < 0) {
6993         return l_type;
6994     }
6995     fl->l_type = l_type;
6996     __get_user(fl->l_whence, &target_fl->l_whence);
6997     __get_user(fl->l_start, &target_fl->l_start);
6998     __get_user(fl->l_len, &target_fl->l_len);
6999     __get_user(fl->l_pid, &target_fl->l_pid);
7000     unlock_user_struct(target_fl, target_flock_addr, 0);
7001     return 0;
7002 }
7003 
7004 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7005                                             const struct flock64 *fl)
7006 {
7007     struct target_flock64 *target_fl;
7008     short l_type;
7009 
7010     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7011         return -TARGET_EFAULT;
7012     }
7013 
7014     l_type = host_to_target_flock(fl->l_type);
7015     __put_user(l_type, &target_fl->l_type);
7016     __put_user(fl->l_whence, &target_fl->l_whence);
7017     __put_user(fl->l_start, &target_fl->l_start);
7018     __put_user(fl->l_len, &target_fl->l_len);
7019     __put_user(fl->l_pid, &target_fl->l_pid);
7020     unlock_user_struct(target_fl, target_flock_addr, 1);
7021     return 0;
7022 }
7023 
7024 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7025 {
7026     struct flock64 fl64;
7027 #ifdef F_GETOWN_EX
7028     struct f_owner_ex fox;
7029     struct target_f_owner_ex *target_fox;
7030 #endif
7031     abi_long ret;
7032     int host_cmd = target_to_host_fcntl_cmd(cmd);
7033 
7034     if (host_cmd == -TARGET_EINVAL)
7035 	    return host_cmd;
7036 
7037     switch(cmd) {
7038     case TARGET_F_GETLK:
7039         ret = copy_from_user_flock(&fl64, arg);
7040         if (ret) {
7041             return ret;
7042         }
7043         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7044         if (ret == 0) {
7045             ret = copy_to_user_flock(arg, &fl64);
7046         }
7047         break;
7048 
7049     case TARGET_F_SETLK:
7050     case TARGET_F_SETLKW:
7051         ret = copy_from_user_flock(&fl64, arg);
7052         if (ret) {
7053             return ret;
7054         }
7055         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7056         break;
7057 
7058     case TARGET_F_GETLK64:
7059     case TARGET_F_OFD_GETLK:
7060         ret = copy_from_user_flock64(&fl64, arg);
7061         if (ret) {
7062             return ret;
7063         }
7064         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7065         if (ret == 0) {
7066             ret = copy_to_user_flock64(arg, &fl64);
7067         }
7068         break;
7069     case TARGET_F_SETLK64:
7070     case TARGET_F_SETLKW64:
7071     case TARGET_F_OFD_SETLK:
7072     case TARGET_F_OFD_SETLKW:
7073         ret = copy_from_user_flock64(&fl64, arg);
7074         if (ret) {
7075             return ret;
7076         }
7077         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7078         break;
7079 
7080     case TARGET_F_GETFL:
7081         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7082         if (ret >= 0) {
7083             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7084         }
7085         break;
7086 
7087     case TARGET_F_SETFL:
7088         ret = get_errno(safe_fcntl(fd, host_cmd,
7089                                    target_to_host_bitmask(arg,
7090                                                           fcntl_flags_tbl)));
7091         break;
7092 
7093 #ifdef F_GETOWN_EX
7094     case TARGET_F_GETOWN_EX:
7095         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7096         if (ret >= 0) {
7097             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7098                 return -TARGET_EFAULT;
7099             target_fox->type = tswap32(fox.type);
7100             target_fox->pid = tswap32(fox.pid);
7101             unlock_user_struct(target_fox, arg, 1);
7102         }
7103         break;
7104 #endif
7105 
7106 #ifdef F_SETOWN_EX
7107     case TARGET_F_SETOWN_EX:
7108         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7109             return -TARGET_EFAULT;
7110         fox.type = tswap32(target_fox->type);
7111         fox.pid = tswap32(target_fox->pid);
7112         unlock_user_struct(target_fox, arg, 0);
7113         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7114         break;
7115 #endif
7116 
7117     case TARGET_F_SETSIG:
7118         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7119         break;
7120 
7121     case TARGET_F_GETSIG:
7122         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7123         break;
7124 
7125     case TARGET_F_SETOWN:
7126     case TARGET_F_GETOWN:
7127     case TARGET_F_SETLEASE:
7128     case TARGET_F_GETLEASE:
7129     case TARGET_F_SETPIPE_SZ:
7130     case TARGET_F_GETPIPE_SZ:
7131     case TARGET_F_ADD_SEALS:
7132     case TARGET_F_GET_SEALS:
7133         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7134         break;
7135 
7136     default:
7137         ret = get_errno(safe_fcntl(fd, cmd, arg));
7138         break;
7139     }
7140     return ret;
7141 }
7142 
7143 #ifdef USE_UID16
7144 
7145 static inline int high2lowuid(int uid)
7146 {
7147     if (uid > 65535)
7148         return 65534;
7149     else
7150         return uid;
7151 }
7152 
7153 static inline int high2lowgid(int gid)
7154 {
7155     if (gid > 65535)
7156         return 65534;
7157     else
7158         return gid;
7159 }
7160 
7161 static inline int low2highuid(int uid)
7162 {
7163     if ((int16_t)uid == -1)
7164         return -1;
7165     else
7166         return uid;
7167 }
7168 
7169 static inline int low2highgid(int gid)
7170 {
7171     if ((int16_t)gid == -1)
7172         return -1;
7173     else
7174         return gid;
7175 }
7176 static inline int tswapid(int id)
7177 {
7178     return tswap16(id);
7179 }
7180 
7181 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7182 
7183 #else /* !USE_UID16 */
7184 static inline int high2lowuid(int uid)
7185 {
7186     return uid;
7187 }
7188 static inline int high2lowgid(int gid)
7189 {
7190     return gid;
7191 }
7192 static inline int low2highuid(int uid)
7193 {
7194     return uid;
7195 }
7196 static inline int low2highgid(int gid)
7197 {
7198     return gid;
7199 }
7200 static inline int tswapid(int id)
7201 {
7202     return tswap32(id);
7203 }
7204 
7205 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7206 
7207 #endif /* USE_UID16 */
7208 
7209 /* We must do direct syscalls for setting UID/GID, because we want to
7210  * implement the Linux system call semantics of "change only for this thread",
7211  * not the libc/POSIX semantics of "change for all threads in process".
7212  * (See http://ewontfix.com/17/ for more details.)
7213  * We use the 32-bit version of the syscalls if present; if it is not
7214  * then either the host architecture supports 32-bit UIDs natively with
7215  * the standard syscall, or the 16-bit UID is the best we can do.
7216  */
7217 #ifdef __NR_setuid32
7218 #define __NR_sys_setuid __NR_setuid32
7219 #else
7220 #define __NR_sys_setuid __NR_setuid
7221 #endif
7222 #ifdef __NR_setgid32
7223 #define __NR_sys_setgid __NR_setgid32
7224 #else
7225 #define __NR_sys_setgid __NR_setgid
7226 #endif
7227 #ifdef __NR_setresuid32
7228 #define __NR_sys_setresuid __NR_setresuid32
7229 #else
7230 #define __NR_sys_setresuid __NR_setresuid
7231 #endif
7232 #ifdef __NR_setresgid32
7233 #define __NR_sys_setresgid __NR_setresgid32
7234 #else
7235 #define __NR_sys_setresgid __NR_setresgid
7236 #endif
7237 
7238 _syscall1(int, sys_setuid, uid_t, uid)
7239 _syscall1(int, sys_setgid, gid_t, gid)
7240 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7241 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7242 
7243 void syscall_init(void)
7244 {
7245     IOCTLEntry *ie;
7246     const argtype *arg_type;
7247     int size;
7248 
7249     thunk_init(STRUCT_MAX);
7250 
7251 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7252 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7253 #include "syscall_types.h"
7254 #undef STRUCT
7255 #undef STRUCT_SPECIAL
7256 
7257     /* we patch the ioctl size if necessary. We rely on the fact that
7258        no ioctl has all the bits at '1' in the size field */
7259     ie = ioctl_entries;
7260     while (ie->target_cmd != 0) {
7261         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7262             TARGET_IOC_SIZEMASK) {
7263             arg_type = ie->arg_type;
7264             if (arg_type[0] != TYPE_PTR) {
7265                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7266                         ie->target_cmd);
7267                 exit(1);
7268             }
7269             arg_type++;
7270             size = thunk_type_size(arg_type, 0);
7271             ie->target_cmd = (ie->target_cmd &
7272                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7273                 (size << TARGET_IOC_SIZESHIFT);
7274         }
7275 
7276         /* automatic consistency check if same arch */
7277 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7278     (defined(__x86_64__) && defined(TARGET_X86_64))
7279         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7280             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7281                     ie->name, ie->target_cmd, ie->host_cmd);
7282         }
7283 #endif
7284         ie++;
7285     }
7286 }
7287 
7288 #ifdef TARGET_NR_truncate64
7289 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7290                                          abi_long arg2,
7291                                          abi_long arg3,
7292                                          abi_long arg4)
7293 {
7294     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7295         arg2 = arg3;
7296         arg3 = arg4;
7297     }
7298     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7299 }
7300 #endif
7301 
7302 #ifdef TARGET_NR_ftruncate64
7303 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7304                                           abi_long arg2,
7305                                           abi_long arg3,
7306                                           abi_long arg4)
7307 {
7308     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7309         arg2 = arg3;
7310         arg3 = arg4;
7311     }
7312     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7313 }
7314 #endif
7315 
7316 #if defined(TARGET_NR_timer_settime) || \
7317     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7318 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7319                                                  abi_ulong target_addr)
7320 {
7321     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7322                                 offsetof(struct target_itimerspec,
7323                                          it_interval)) ||
7324         target_to_host_timespec(&host_its->it_value, target_addr +
7325                                 offsetof(struct target_itimerspec,
7326                                          it_value))) {
7327         return -TARGET_EFAULT;
7328     }
7329 
7330     return 0;
7331 }
7332 #endif
7333 
7334 #if defined(TARGET_NR_timer_settime64) || \
7335     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7336 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7337                                                    abi_ulong target_addr)
7338 {
7339     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7340                                   offsetof(struct target__kernel_itimerspec,
7341                                            it_interval)) ||
7342         target_to_host_timespec64(&host_its->it_value, target_addr +
7343                                   offsetof(struct target__kernel_itimerspec,
7344                                            it_value))) {
7345         return -TARGET_EFAULT;
7346     }
7347 
7348     return 0;
7349 }
7350 #endif
7351 
7352 #if ((defined(TARGET_NR_timerfd_gettime) || \
7353       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7354       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7355 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7356                                                  struct itimerspec *host_its)
7357 {
7358     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7359                                                        it_interval),
7360                                 &host_its->it_interval) ||
7361         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7362                                                        it_value),
7363                                 &host_its->it_value)) {
7364         return -TARGET_EFAULT;
7365     }
7366     return 0;
7367 }
7368 #endif
7369 
7370 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7371       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7372       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7373 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7374                                                    struct itimerspec *host_its)
7375 {
7376     if (host_to_target_timespec64(target_addr +
7377                                   offsetof(struct target__kernel_itimerspec,
7378                                            it_interval),
7379                                   &host_its->it_interval) ||
7380         host_to_target_timespec64(target_addr +
7381                                   offsetof(struct target__kernel_itimerspec,
7382                                            it_value),
7383                                   &host_its->it_value)) {
7384         return -TARGET_EFAULT;
7385     }
7386     return 0;
7387 }
7388 #endif
7389 
7390 #if defined(TARGET_NR_adjtimex) || \
7391     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7392 static inline abi_long target_to_host_timex(struct timex *host_tx,
7393                                             abi_long target_addr)
7394 {
7395     struct target_timex *target_tx;
7396 
7397     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7398         return -TARGET_EFAULT;
7399     }
7400 
7401     __get_user(host_tx->modes, &target_tx->modes);
7402     __get_user(host_tx->offset, &target_tx->offset);
7403     __get_user(host_tx->freq, &target_tx->freq);
7404     __get_user(host_tx->maxerror, &target_tx->maxerror);
7405     __get_user(host_tx->esterror, &target_tx->esterror);
7406     __get_user(host_tx->status, &target_tx->status);
7407     __get_user(host_tx->constant, &target_tx->constant);
7408     __get_user(host_tx->precision, &target_tx->precision);
7409     __get_user(host_tx->tolerance, &target_tx->tolerance);
7410     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7411     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7412     __get_user(host_tx->tick, &target_tx->tick);
7413     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7414     __get_user(host_tx->jitter, &target_tx->jitter);
7415     __get_user(host_tx->shift, &target_tx->shift);
7416     __get_user(host_tx->stabil, &target_tx->stabil);
7417     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7418     __get_user(host_tx->calcnt, &target_tx->calcnt);
7419     __get_user(host_tx->errcnt, &target_tx->errcnt);
7420     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7421     __get_user(host_tx->tai, &target_tx->tai);
7422 
7423     unlock_user_struct(target_tx, target_addr, 0);
7424     return 0;
7425 }
7426 
7427 static inline abi_long host_to_target_timex(abi_long target_addr,
7428                                             struct timex *host_tx)
7429 {
7430     struct target_timex *target_tx;
7431 
7432     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7433         return -TARGET_EFAULT;
7434     }
7435 
7436     __put_user(host_tx->modes, &target_tx->modes);
7437     __put_user(host_tx->offset, &target_tx->offset);
7438     __put_user(host_tx->freq, &target_tx->freq);
7439     __put_user(host_tx->maxerror, &target_tx->maxerror);
7440     __put_user(host_tx->esterror, &target_tx->esterror);
7441     __put_user(host_tx->status, &target_tx->status);
7442     __put_user(host_tx->constant, &target_tx->constant);
7443     __put_user(host_tx->precision, &target_tx->precision);
7444     __put_user(host_tx->tolerance, &target_tx->tolerance);
7445     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7446     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7447     __put_user(host_tx->tick, &target_tx->tick);
7448     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7449     __put_user(host_tx->jitter, &target_tx->jitter);
7450     __put_user(host_tx->shift, &target_tx->shift);
7451     __put_user(host_tx->stabil, &target_tx->stabil);
7452     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7453     __put_user(host_tx->calcnt, &target_tx->calcnt);
7454     __put_user(host_tx->errcnt, &target_tx->errcnt);
7455     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7456     __put_user(host_tx->tai, &target_tx->tai);
7457 
7458     unlock_user_struct(target_tx, target_addr, 1);
7459     return 0;
7460 }
7461 #endif
7462 
7463 
7464 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7465 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7466                                               abi_long target_addr)
7467 {
7468     struct target__kernel_timex *target_tx;
7469 
7470     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7471                                  offsetof(struct target__kernel_timex,
7472                                           time))) {
7473         return -TARGET_EFAULT;
7474     }
7475 
7476     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7477         return -TARGET_EFAULT;
7478     }
7479 
7480     __get_user(host_tx->modes, &target_tx->modes);
7481     __get_user(host_tx->offset, &target_tx->offset);
7482     __get_user(host_tx->freq, &target_tx->freq);
7483     __get_user(host_tx->maxerror, &target_tx->maxerror);
7484     __get_user(host_tx->esterror, &target_tx->esterror);
7485     __get_user(host_tx->status, &target_tx->status);
7486     __get_user(host_tx->constant, &target_tx->constant);
7487     __get_user(host_tx->precision, &target_tx->precision);
7488     __get_user(host_tx->tolerance, &target_tx->tolerance);
7489     __get_user(host_tx->tick, &target_tx->tick);
7490     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7491     __get_user(host_tx->jitter, &target_tx->jitter);
7492     __get_user(host_tx->shift, &target_tx->shift);
7493     __get_user(host_tx->stabil, &target_tx->stabil);
7494     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7495     __get_user(host_tx->calcnt, &target_tx->calcnt);
7496     __get_user(host_tx->errcnt, &target_tx->errcnt);
7497     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7498     __get_user(host_tx->tai, &target_tx->tai);
7499 
7500     unlock_user_struct(target_tx, target_addr, 0);
7501     return 0;
7502 }
7503 
7504 static inline abi_long host_to_target_timex64(abi_long target_addr,
7505                                               struct timex *host_tx)
7506 {
7507     struct target__kernel_timex *target_tx;
7508 
7509    if (copy_to_user_timeval64(target_addr +
7510                               offsetof(struct target__kernel_timex, time),
7511                               &host_tx->time)) {
7512         return -TARGET_EFAULT;
7513     }
7514 
7515     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7516         return -TARGET_EFAULT;
7517     }
7518 
7519     __put_user(host_tx->modes, &target_tx->modes);
7520     __put_user(host_tx->offset, &target_tx->offset);
7521     __put_user(host_tx->freq, &target_tx->freq);
7522     __put_user(host_tx->maxerror, &target_tx->maxerror);
7523     __put_user(host_tx->esterror, &target_tx->esterror);
7524     __put_user(host_tx->status, &target_tx->status);
7525     __put_user(host_tx->constant, &target_tx->constant);
7526     __put_user(host_tx->precision, &target_tx->precision);
7527     __put_user(host_tx->tolerance, &target_tx->tolerance);
7528     __put_user(host_tx->tick, &target_tx->tick);
7529     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7530     __put_user(host_tx->jitter, &target_tx->jitter);
7531     __put_user(host_tx->shift, &target_tx->shift);
7532     __put_user(host_tx->stabil, &target_tx->stabil);
7533     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7534     __put_user(host_tx->calcnt, &target_tx->calcnt);
7535     __put_user(host_tx->errcnt, &target_tx->errcnt);
7536     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7537     __put_user(host_tx->tai, &target_tx->tai);
7538 
7539     unlock_user_struct(target_tx, target_addr, 1);
7540     return 0;
7541 }
7542 #endif
7543 
7544 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7545 #define sigev_notify_thread_id _sigev_un._tid
7546 #endif
7547 
7548 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7549                                                abi_ulong target_addr)
7550 {
7551     struct target_sigevent *target_sevp;
7552 
7553     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7554         return -TARGET_EFAULT;
7555     }
7556 
7557     /* This union is awkward on 64 bit systems because it has a 32 bit
7558      * integer and a pointer in it; we follow the conversion approach
7559      * used for handling sigval types in signal.c so the guest should get
7560      * the correct value back even if we did a 64 bit byteswap and it's
7561      * using the 32 bit integer.
7562      */
7563     host_sevp->sigev_value.sival_ptr =
7564         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7565     host_sevp->sigev_signo =
7566         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7567     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7568     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7569 
7570     unlock_user_struct(target_sevp, target_addr, 1);
7571     return 0;
7572 }
7573 
7574 #if defined(TARGET_NR_mlockall)
7575 static inline int target_to_host_mlockall_arg(int arg)
7576 {
7577     int result = 0;
7578 
7579     if (arg & TARGET_MCL_CURRENT) {
7580         result |= MCL_CURRENT;
7581     }
7582     if (arg & TARGET_MCL_FUTURE) {
7583         result |= MCL_FUTURE;
7584     }
7585 #ifdef MCL_ONFAULT
7586     if (arg & TARGET_MCL_ONFAULT) {
7587         result |= MCL_ONFAULT;
7588     }
7589 #endif
7590 
7591     return result;
7592 }
7593 #endif
7594 
7595 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7596      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7597      defined(TARGET_NR_newfstatat))
7598 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7599                                              abi_ulong target_addr,
7600                                              struct stat *host_st)
7601 {
7602 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7603     if (cpu_env->eabi) {
7604         struct target_eabi_stat64 *target_st;
7605 
7606         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7607             return -TARGET_EFAULT;
7608         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7609         __put_user(host_st->st_dev, &target_st->st_dev);
7610         __put_user(host_st->st_ino, &target_st->st_ino);
7611 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7612         __put_user(host_st->st_ino, &target_st->__st_ino);
7613 #endif
7614         __put_user(host_st->st_mode, &target_st->st_mode);
7615         __put_user(host_st->st_nlink, &target_st->st_nlink);
7616         __put_user(host_st->st_uid, &target_st->st_uid);
7617         __put_user(host_st->st_gid, &target_st->st_gid);
7618         __put_user(host_st->st_rdev, &target_st->st_rdev);
7619         __put_user(host_st->st_size, &target_st->st_size);
7620         __put_user(host_st->st_blksize, &target_st->st_blksize);
7621         __put_user(host_st->st_blocks, &target_st->st_blocks);
7622         __put_user(host_st->st_atime, &target_st->target_st_atime);
7623         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7624         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7625 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7626         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7627         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7628         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7629 #endif
7630         unlock_user_struct(target_st, target_addr, 1);
7631     } else
7632 #endif
7633     {
7634 #if defined(TARGET_HAS_STRUCT_STAT64)
7635         struct target_stat64 *target_st;
7636 #else
7637         struct target_stat *target_st;
7638 #endif
7639 
7640         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7641             return -TARGET_EFAULT;
7642         memset(target_st, 0, sizeof(*target_st));
7643         __put_user(host_st->st_dev, &target_st->st_dev);
7644         __put_user(host_st->st_ino, &target_st->st_ino);
7645 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7646         __put_user(host_st->st_ino, &target_st->__st_ino);
7647 #endif
7648         __put_user(host_st->st_mode, &target_st->st_mode);
7649         __put_user(host_st->st_nlink, &target_st->st_nlink);
7650         __put_user(host_st->st_uid, &target_st->st_uid);
7651         __put_user(host_st->st_gid, &target_st->st_gid);
7652         __put_user(host_st->st_rdev, &target_st->st_rdev);
7653         /* XXX: better use of kernel struct */
7654         __put_user(host_st->st_size, &target_st->st_size);
7655         __put_user(host_st->st_blksize, &target_st->st_blksize);
7656         __put_user(host_st->st_blocks, &target_st->st_blocks);
7657         __put_user(host_st->st_atime, &target_st->target_st_atime);
7658         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7659         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7660 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7661         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7662         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7663         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7664 #endif
7665         unlock_user_struct(target_st, target_addr, 1);
7666     }
7667 
7668     return 0;
7669 }
7670 #endif
7671 
7672 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7673 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7674                                             abi_ulong target_addr)
7675 {
7676     struct target_statx *target_stx;
7677 
7678     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7679         return -TARGET_EFAULT;
7680     }
7681     memset(target_stx, 0, sizeof(*target_stx));
7682 
7683     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7684     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7685     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7686     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7687     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7688     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7689     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7690     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7691     __put_user(host_stx->stx_size, &target_stx->stx_size);
7692     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7693     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7694     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7695     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7696     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7697     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7698     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7699     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7700     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7701     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7702     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7703     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7704     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7705     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7706 
7707     unlock_user_struct(target_stx, target_addr, 1);
7708 
7709     return 0;
7710 }
7711 #endif
7712 
7713 static int do_sys_futex(int *uaddr, int op, int val,
7714                          const struct timespec *timeout, int *uaddr2,
7715                          int val3)
7716 {
7717 #if HOST_LONG_BITS == 64
7718 #if defined(__NR_futex)
7719     /* always a 64-bit time_t, it doesn't define _time64 version  */
7720     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7721 
7722 #endif
7723 #else /* HOST_LONG_BITS == 64 */
7724 #if defined(__NR_futex_time64)
7725     if (sizeof(timeout->tv_sec) == 8) {
7726         /* _time64 function on 32bit arch */
7727         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7728     }
7729 #endif
7730 #if defined(__NR_futex)
7731     /* old function on 32bit arch */
7732     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7733 #endif
7734 #endif /* HOST_LONG_BITS == 64 */
7735     g_assert_not_reached();
7736 }
7737 
7738 static int do_safe_futex(int *uaddr, int op, int val,
7739                          const struct timespec *timeout, int *uaddr2,
7740                          int val3)
7741 {
7742 #if HOST_LONG_BITS == 64
7743 #if defined(__NR_futex)
7744     /* always a 64-bit time_t, it doesn't define _time64 version  */
7745     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7746 #endif
7747 #else /* HOST_LONG_BITS == 64 */
7748 #if defined(__NR_futex_time64)
7749     if (sizeof(timeout->tv_sec) == 8) {
7750         /* _time64 function on 32bit arch */
7751         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7752                                            val3));
7753     }
7754 #endif
7755 #if defined(__NR_futex)
7756     /* old function on 32bit arch */
7757     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7758 #endif
7759 #endif /* HOST_LONG_BITS == 64 */
7760     return -TARGET_ENOSYS;
7761 }
7762 
7763 /* ??? Using host futex calls even when target atomic operations
7764    are not really atomic probably breaks things.  However implementing
7765    futexes locally would make futexes shared between multiple processes
7766    tricky.  However they're probably useless because guest atomic
7767    operations won't work either.  */
7768 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7769 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7770                     int op, int val, target_ulong timeout,
7771                     target_ulong uaddr2, int val3)
7772 {
7773     struct timespec ts, *pts = NULL;
7774     void *haddr2 = NULL;
7775     int base_op;
7776 
7777     /* We assume FUTEX_* constants are the same on both host and target. */
7778 #ifdef FUTEX_CMD_MASK
7779     base_op = op & FUTEX_CMD_MASK;
7780 #else
7781     base_op = op;
7782 #endif
7783     switch (base_op) {
7784     case FUTEX_WAIT:
7785     case FUTEX_WAIT_BITSET:
7786         val = tswap32(val);
7787         break;
7788     case FUTEX_WAIT_REQUEUE_PI:
7789         val = tswap32(val);
7790         haddr2 = g2h(cpu, uaddr2);
7791         break;
7792     case FUTEX_LOCK_PI:
7793     case FUTEX_LOCK_PI2:
7794         break;
7795     case FUTEX_WAKE:
7796     case FUTEX_WAKE_BITSET:
7797     case FUTEX_TRYLOCK_PI:
7798     case FUTEX_UNLOCK_PI:
7799         timeout = 0;
7800         break;
7801     case FUTEX_FD:
7802         val = target_to_host_signal(val);
7803         timeout = 0;
7804         break;
7805     case FUTEX_CMP_REQUEUE:
7806     case FUTEX_CMP_REQUEUE_PI:
7807         val3 = tswap32(val3);
7808         /* fall through */
7809     case FUTEX_REQUEUE:
7810     case FUTEX_WAKE_OP:
7811         /*
7812          * For these, the 4th argument is not TIMEOUT, but VAL2.
7813          * But the prototype of do_safe_futex takes a pointer, so
7814          * insert casts to satisfy the compiler.  We do not need
7815          * to tswap VAL2 since it's not compared to guest memory.
7816           */
7817         pts = (struct timespec *)(uintptr_t)timeout;
7818         timeout = 0;
7819         haddr2 = g2h(cpu, uaddr2);
7820         break;
7821     default:
7822         return -TARGET_ENOSYS;
7823     }
7824     if (timeout) {
7825         pts = &ts;
7826         if (time64
7827             ? target_to_host_timespec64(pts, timeout)
7828             : target_to_host_timespec(pts, timeout)) {
7829             return -TARGET_EFAULT;
7830         }
7831     }
7832     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7833 }
7834 #endif
7835 
7836 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7837 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7838                                      abi_long handle, abi_long mount_id,
7839                                      abi_long flags)
7840 {
7841     struct file_handle *target_fh;
7842     struct file_handle *fh;
7843     int mid = 0;
7844     abi_long ret;
7845     char *name;
7846     unsigned int size, total_size;
7847 
7848     if (get_user_s32(size, handle)) {
7849         return -TARGET_EFAULT;
7850     }
7851 
7852     name = lock_user_string(pathname);
7853     if (!name) {
7854         return -TARGET_EFAULT;
7855     }
7856 
7857     total_size = sizeof(struct file_handle) + size;
7858     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7859     if (!target_fh) {
7860         unlock_user(name, pathname, 0);
7861         return -TARGET_EFAULT;
7862     }
7863 
7864     fh = g_malloc0(total_size);
7865     fh->handle_bytes = size;
7866 
7867     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7868     unlock_user(name, pathname, 0);
7869 
7870     /* man name_to_handle_at(2):
7871      * Other than the use of the handle_bytes field, the caller should treat
7872      * the file_handle structure as an opaque data type
7873      */
7874 
7875     memcpy(target_fh, fh, total_size);
7876     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7877     target_fh->handle_type = tswap32(fh->handle_type);
7878     g_free(fh);
7879     unlock_user(target_fh, handle, total_size);
7880 
7881     if (put_user_s32(mid, mount_id)) {
7882         return -TARGET_EFAULT;
7883     }
7884 
7885     return ret;
7886 
7887 }
7888 #endif
7889 
7890 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7891 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7892                                      abi_long flags)
7893 {
7894     struct file_handle *target_fh;
7895     struct file_handle *fh;
7896     unsigned int size, total_size;
7897     abi_long ret;
7898 
7899     if (get_user_s32(size, handle)) {
7900         return -TARGET_EFAULT;
7901     }
7902 
7903     total_size = sizeof(struct file_handle) + size;
7904     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7905     if (!target_fh) {
7906         return -TARGET_EFAULT;
7907     }
7908 
7909     fh = g_memdup(target_fh, total_size);
7910     fh->handle_bytes = size;
7911     fh->handle_type = tswap32(target_fh->handle_type);
7912 
7913     ret = get_errno(open_by_handle_at(mount_fd, fh,
7914                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7915 
7916     g_free(fh);
7917 
7918     unlock_user(target_fh, handle, total_size);
7919 
7920     return ret;
7921 }
7922 #endif
7923 
7924 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7925 
7926 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7927 {
7928     int host_flags;
7929     target_sigset_t *target_mask;
7930     sigset_t host_mask;
7931     abi_long ret;
7932 
7933     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7934         return -TARGET_EINVAL;
7935     }
7936     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7937         return -TARGET_EFAULT;
7938     }
7939 
7940     target_to_host_sigset(&host_mask, target_mask);
7941 
7942     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7943 
7944     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7945     if (ret >= 0) {
7946         fd_trans_register(ret, &target_signalfd_trans);
7947     }
7948 
7949     unlock_user_struct(target_mask, mask, 0);
7950 
7951     return ret;
7952 }
7953 #endif
7954 
7955 /* Map host to target signal numbers for the wait family of syscalls.
7956    Assume all other status bits are the same.  */
7957 int host_to_target_waitstatus(int status)
7958 {
7959     if (WIFSIGNALED(status)) {
7960         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7961     }
7962     if (WIFSTOPPED(status)) {
7963         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7964                | (status & 0xff);
7965     }
7966     return status;
7967 }
7968 
7969 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7970 {
7971     CPUState *cpu = env_cpu(cpu_env);
7972     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7973     int i;
7974 
7975     for (i = 0; i < bprm->argc; i++) {
7976         size_t len = strlen(bprm->argv[i]) + 1;
7977 
7978         if (write(fd, bprm->argv[i], len) != len) {
7979             return -1;
7980         }
7981     }
7982 
7983     return 0;
7984 }
7985 
7986 static int open_self_maps(CPUArchState *cpu_env, int fd)
7987 {
7988     CPUState *cpu = env_cpu(cpu_env);
7989     TaskState *ts = cpu->opaque;
7990     GSList *map_info = read_self_maps();
7991     GSList *s;
7992     int count;
7993 
7994     for (s = map_info; s; s = g_slist_next(s)) {
7995         MapInfo *e = (MapInfo *) s->data;
7996 
7997         if (h2g_valid(e->start)) {
7998             unsigned long min = e->start;
7999             unsigned long max = e->end;
8000             int flags = page_get_flags(h2g(min));
8001             const char *path;
8002 
8003             max = h2g_valid(max - 1) ?
8004                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8005 
8006             if (page_check_range(h2g(min), max - min, flags) == -1) {
8007                 continue;
8008             }
8009 
8010 #ifdef TARGET_HPPA
8011             if (h2g(max) == ts->info->stack_limit) {
8012 #else
8013             if (h2g(min) == ts->info->stack_limit) {
8014 #endif
8015                 path = "[stack]";
8016             } else {
8017                 path = e->path;
8018             }
8019 
8020             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8021                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8022                             h2g(min), h2g(max - 1) + 1,
8023                             (flags & PAGE_READ) ? 'r' : '-',
8024                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8025                             (flags & PAGE_EXEC) ? 'x' : '-',
8026                             e->is_priv ? 'p' : 's',
8027                             (uint64_t) e->offset, e->dev, e->inode);
8028             if (path) {
8029                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8030             } else {
8031                 dprintf(fd, "\n");
8032             }
8033         }
8034     }
8035 
8036     free_self_maps(map_info);
8037 
8038 #ifdef TARGET_VSYSCALL_PAGE
8039     /*
8040      * We only support execution from the vsyscall page.
8041      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8042      */
8043     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8044                     " --xp 00000000 00:00 0",
8045                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8046     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8047 #endif
8048 
8049     return 0;
8050 }
8051 
8052 static int open_self_stat(CPUArchState *cpu_env, int fd)
8053 {
8054     CPUState *cpu = env_cpu(cpu_env);
8055     TaskState *ts = cpu->opaque;
8056     g_autoptr(GString) buf = g_string_new(NULL);
8057     int i;
8058 
8059     for (i = 0; i < 44; i++) {
8060         if (i == 0) {
8061             /* pid */
8062             g_string_printf(buf, FMT_pid " ", getpid());
8063         } else if (i == 1) {
8064             /* app name */
8065             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8066             bin = bin ? bin + 1 : ts->bprm->argv[0];
8067             g_string_printf(buf, "(%.15s) ", bin);
8068         } else if (i == 3) {
8069             /* ppid */
8070             g_string_printf(buf, FMT_pid " ", getppid());
8071         } else if (i == 21) {
8072             /* starttime */
8073             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8074         } else if (i == 27) {
8075             /* stack bottom */
8076             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8077         } else {
8078             /* for the rest, there is MasterCard */
8079             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8080         }
8081 
8082         if (write(fd, buf->str, buf->len) != buf->len) {
8083             return -1;
8084         }
8085     }
8086 
8087     return 0;
8088 }
8089 
8090 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8091 {
8092     CPUState *cpu = env_cpu(cpu_env);
8093     TaskState *ts = cpu->opaque;
8094     abi_ulong auxv = ts->info->saved_auxv;
8095     abi_ulong len = ts->info->auxv_len;
8096     char *ptr;
8097 
8098     /*
8099      * Auxiliary vector is stored in target process stack.
8100      * read in whole auxv vector and copy it to file
8101      */
8102     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8103     if (ptr != NULL) {
8104         while (len > 0) {
8105             ssize_t r;
8106             r = write(fd, ptr, len);
8107             if (r <= 0) {
8108                 break;
8109             }
8110             len -= r;
8111             ptr += r;
8112         }
8113         lseek(fd, 0, SEEK_SET);
8114         unlock_user(ptr, auxv, len);
8115     }
8116 
8117     return 0;
8118 }
8119 
8120 static int is_proc_myself(const char *filename, const char *entry)
8121 {
8122     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8123         filename += strlen("/proc/");
8124         if (!strncmp(filename, "self/", strlen("self/"))) {
8125             filename += strlen("self/");
8126         } else if (*filename >= '1' && *filename <= '9') {
8127             char myself[80];
8128             snprintf(myself, sizeof(myself), "%d/", getpid());
8129             if (!strncmp(filename, myself, strlen(myself))) {
8130                 filename += strlen(myself);
8131             } else {
8132                 return 0;
8133             }
8134         } else {
8135             return 0;
8136         }
8137         if (!strcmp(filename, entry)) {
8138             return 1;
8139         }
8140     }
8141     return 0;
8142 }
8143 
8144 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8145                       const char *fmt, int code)
8146 {
8147     if (logfile) {
8148         CPUState *cs = env_cpu(env);
8149 
8150         fprintf(logfile, fmt, code);
8151         fprintf(logfile, "Failing executable: %s\n", exec_path);
8152         cpu_dump_state(cs, logfile, 0);
8153         open_self_maps(env, fileno(logfile));
8154     }
8155 }
8156 
8157 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8158 {
8159     /* dump to console */
8160     excp_dump_file(stderr, env, fmt, code);
8161 
8162     /* dump to log file */
8163     if (qemu_log_separate()) {
8164         FILE *logfile = qemu_log_trylock();
8165 
8166         excp_dump_file(logfile, env, fmt, code);
8167         qemu_log_unlock(logfile);
8168     }
8169 }
8170 
8171 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8172     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8173 static int is_proc(const char *filename, const char *entry)
8174 {
8175     return strcmp(filename, entry) == 0;
8176 }
8177 #endif
8178 
8179 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8180 static int open_net_route(CPUArchState *cpu_env, int fd)
8181 {
8182     FILE *fp;
8183     char *line = NULL;
8184     size_t len = 0;
8185     ssize_t read;
8186 
8187     fp = fopen("/proc/net/route", "r");
8188     if (fp == NULL) {
8189         return -1;
8190     }
8191 
8192     /* read header */
8193 
8194     read = getline(&line, &len, fp);
8195     dprintf(fd, "%s", line);
8196 
8197     /* read routes */
8198 
8199     while ((read = getline(&line, &len, fp)) != -1) {
8200         char iface[16];
8201         uint32_t dest, gw, mask;
8202         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8203         int fields;
8204 
8205         fields = sscanf(line,
8206                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8207                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8208                         &mask, &mtu, &window, &irtt);
8209         if (fields != 11) {
8210             continue;
8211         }
8212         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8213                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8214                 metric, tswap32(mask), mtu, window, irtt);
8215     }
8216 
8217     free(line);
8218     fclose(fp);
8219 
8220     return 0;
8221 }
8222 #endif
8223 
8224 #if defined(TARGET_SPARC)
8225 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8226 {
8227     dprintf(fd, "type\t\t: sun4u\n");
8228     return 0;
8229 }
8230 #endif
8231 
8232 #if defined(TARGET_HPPA)
8233 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8234 {
8235     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8236     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8237     dprintf(fd, "capabilities\t: os32\n");
8238     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8239     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8240     return 0;
8241 }
8242 #endif
8243 
8244 #if defined(TARGET_M68K)
8245 static int open_hardware(CPUArchState *cpu_env, int fd)
8246 {
8247     dprintf(fd, "Model:\t\tqemu-m68k\n");
8248     return 0;
8249 }
8250 #endif
8251 
8252 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8253 {
8254     struct fake_open {
8255         const char *filename;
8256         int (*fill)(CPUArchState *cpu_env, int fd);
8257         int (*cmp)(const char *s1, const char *s2);
8258     };
8259     const struct fake_open *fake_open;
8260     static const struct fake_open fakes[] = {
8261         { "maps", open_self_maps, is_proc_myself },
8262         { "stat", open_self_stat, is_proc_myself },
8263         { "auxv", open_self_auxv, is_proc_myself },
8264         { "cmdline", open_self_cmdline, is_proc_myself },
8265 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8266         { "/proc/net/route", open_net_route, is_proc },
8267 #endif
8268 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8269         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8270 #endif
8271 #if defined(TARGET_M68K)
8272         { "/proc/hardware", open_hardware, is_proc },
8273 #endif
8274         { NULL, NULL, NULL }
8275     };
8276 
8277     if (is_proc_myself(pathname, "exe")) {
8278         return safe_openat(dirfd, exec_path, flags, mode);
8279     }
8280 
8281     for (fake_open = fakes; fake_open->filename; fake_open++) {
8282         if (fake_open->cmp(pathname, fake_open->filename)) {
8283             break;
8284         }
8285     }
8286 
8287     if (fake_open->filename) {
8288         const char *tmpdir;
8289         char filename[PATH_MAX];
8290         int fd, r;
8291 
8292         fd = memfd_create("qemu-open", 0);
8293         if (fd < 0) {
8294             if (errno != ENOSYS) {
8295                 return fd;
8296             }
8297             /* create temporary file to map stat to */
8298             tmpdir = getenv("TMPDIR");
8299             if (!tmpdir)
8300                 tmpdir = "/tmp";
8301             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8302             fd = mkstemp(filename);
8303             if (fd < 0) {
8304                 return fd;
8305             }
8306             unlink(filename);
8307         }
8308 
8309         if ((r = fake_open->fill(cpu_env, fd))) {
8310             int e = errno;
8311             close(fd);
8312             errno = e;
8313             return r;
8314         }
8315         lseek(fd, 0, SEEK_SET);
8316 
8317         return fd;
8318     }
8319 
8320     return safe_openat(dirfd, path(pathname), flags, mode);
8321 }
8322 
8323 static int do_execveat(CPUArchState *cpu_env, int dirfd,
8324                        abi_long pathname, abi_long guest_argp,
8325                        abi_long guest_envp, int flags)
8326 {
8327     int ret;
8328     char **argp, **envp;
8329     int argc, envc;
8330     abi_ulong gp;
8331     abi_ulong addr;
8332     char **q;
8333     void *p;
8334 
8335     argc = 0;
8336 
8337     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8338         if (get_user_ual(addr, gp)) {
8339             return -TARGET_EFAULT;
8340         }
8341         if (!addr) {
8342             break;
8343         }
8344         argc++;
8345     }
8346     envc = 0;
8347     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8348         if (get_user_ual(addr, gp)) {
8349             return -TARGET_EFAULT;
8350         }
8351         if (!addr) {
8352             break;
8353         }
8354         envc++;
8355     }
8356 
8357     argp = g_new0(char *, argc + 1);
8358     envp = g_new0(char *, envc + 1);
8359 
8360     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8361         if (get_user_ual(addr, gp)) {
8362             goto execve_efault;
8363         }
8364         if (!addr) {
8365             break;
8366         }
8367         *q = lock_user_string(addr);
8368         if (!*q) {
8369             goto execve_efault;
8370         }
8371     }
8372     *q = NULL;
8373 
8374     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8375         if (get_user_ual(addr, gp)) {
8376             goto execve_efault;
8377         }
8378         if (!addr) {
8379             break;
8380         }
8381         *q = lock_user_string(addr);
8382         if (!*q) {
8383             goto execve_efault;
8384         }
8385     }
8386     *q = NULL;
8387 
8388     /*
8389      * Although execve() is not an interruptible syscall it is
8390      * a special case where we must use the safe_syscall wrapper:
8391      * if we allow a signal to happen before we make the host
8392      * syscall then we will 'lose' it, because at the point of
8393      * execve the process leaves QEMU's control. So we use the
8394      * safe syscall wrapper to ensure that we either take the
8395      * signal as a guest signal, or else it does not happen
8396      * before the execve completes and makes it the other
8397      * program's problem.
8398      */
8399     p = lock_user_string(pathname);
8400     if (!p) {
8401         goto execve_efault;
8402     }
8403 
8404     if (is_proc_myself(p, "exe")) {
8405         ret = get_errno(safe_execveat(dirfd, exec_path, argp, envp, flags));
8406     } else {
8407         ret = get_errno(safe_execveat(dirfd, p, argp, envp, flags));
8408     }
8409 
8410     unlock_user(p, pathname, 0);
8411 
8412     goto execve_end;
8413 
8414 execve_efault:
8415     ret = -TARGET_EFAULT;
8416 
8417 execve_end:
8418     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8419         if (get_user_ual(addr, gp) || !addr) {
8420             break;
8421         }
8422         unlock_user(*q, addr, 0);
8423     }
8424     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8425         if (get_user_ual(addr, gp) || !addr) {
8426             break;
8427         }
8428         unlock_user(*q, addr, 0);
8429     }
8430 
8431     g_free(argp);
8432     g_free(envp);
8433     return ret;
8434 }
8435 
8436 #define TIMER_MAGIC 0x0caf0000
8437 #define TIMER_MAGIC_MASK 0xffff0000
8438 
8439 /* Convert QEMU provided timer ID back to internal 16bit index format */
8440 static target_timer_t get_timer_id(abi_long arg)
8441 {
8442     target_timer_t timerid = arg;
8443 
8444     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8445         return -TARGET_EINVAL;
8446     }
8447 
8448     timerid &= 0xffff;
8449 
8450     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8451         return -TARGET_EINVAL;
8452     }
8453 
8454     return timerid;
8455 }
8456 
8457 static int target_to_host_cpu_mask(unsigned long *host_mask,
8458                                    size_t host_size,
8459                                    abi_ulong target_addr,
8460                                    size_t target_size)
8461 {
8462     unsigned target_bits = sizeof(abi_ulong) * 8;
8463     unsigned host_bits = sizeof(*host_mask) * 8;
8464     abi_ulong *target_mask;
8465     unsigned i, j;
8466 
8467     assert(host_size >= target_size);
8468 
8469     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8470     if (!target_mask) {
8471         return -TARGET_EFAULT;
8472     }
8473     memset(host_mask, 0, host_size);
8474 
8475     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8476         unsigned bit = i * target_bits;
8477         abi_ulong val;
8478 
8479         __get_user(val, &target_mask[i]);
8480         for (j = 0; j < target_bits; j++, bit++) {
8481             if (val & (1UL << j)) {
8482                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8483             }
8484         }
8485     }
8486 
8487     unlock_user(target_mask, target_addr, 0);
8488     return 0;
8489 }
8490 
8491 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8492                                    size_t host_size,
8493                                    abi_ulong target_addr,
8494                                    size_t target_size)
8495 {
8496     unsigned target_bits = sizeof(abi_ulong) * 8;
8497     unsigned host_bits = sizeof(*host_mask) * 8;
8498     abi_ulong *target_mask;
8499     unsigned i, j;
8500 
8501     assert(host_size >= target_size);
8502 
8503     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8504     if (!target_mask) {
8505         return -TARGET_EFAULT;
8506     }
8507 
8508     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8509         unsigned bit = i * target_bits;
8510         abi_ulong val = 0;
8511 
8512         for (j = 0; j < target_bits; j++, bit++) {
8513             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8514                 val |= 1UL << j;
8515             }
8516         }
8517         __put_user(val, &target_mask[i]);
8518     }
8519 
8520     unlock_user(target_mask, target_addr, target_size);
8521     return 0;
8522 }
8523 
8524 #ifdef TARGET_NR_getdents
8525 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8526 {
8527     g_autofree void *hdirp = NULL;
8528     void *tdirp;
8529     int hlen, hoff, toff;
8530     int hreclen, treclen;
8531     off64_t prev_diroff = 0;
8532 
8533     hdirp = g_try_malloc(count);
8534     if (!hdirp) {
8535         return -TARGET_ENOMEM;
8536     }
8537 
8538 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8539     hlen = sys_getdents(dirfd, hdirp, count);
8540 #else
8541     hlen = sys_getdents64(dirfd, hdirp, count);
8542 #endif
8543 
8544     hlen = get_errno(hlen);
8545     if (is_error(hlen)) {
8546         return hlen;
8547     }
8548 
8549     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8550     if (!tdirp) {
8551         return -TARGET_EFAULT;
8552     }
8553 
8554     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8555 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8556         struct linux_dirent *hde = hdirp + hoff;
8557 #else
8558         struct linux_dirent64 *hde = hdirp + hoff;
8559 #endif
8560         struct target_dirent *tde = tdirp + toff;
8561         int namelen;
8562         uint8_t type;
8563 
8564         namelen = strlen(hde->d_name);
8565         hreclen = hde->d_reclen;
8566         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8567         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8568 
8569         if (toff + treclen > count) {
8570             /*
8571              * If the host struct is smaller than the target struct, or
8572              * requires less alignment and thus packs into less space,
8573              * then the host can return more entries than we can pass
8574              * on to the guest.
8575              */
8576             if (toff == 0) {
8577                 toff = -TARGET_EINVAL; /* result buffer is too small */
8578                 break;
8579             }
8580             /*
8581              * Return what we have, resetting the file pointer to the
8582              * location of the first record not returned.
8583              */
8584             lseek64(dirfd, prev_diroff, SEEK_SET);
8585             break;
8586         }
8587 
8588         prev_diroff = hde->d_off;
8589         tde->d_ino = tswapal(hde->d_ino);
8590         tde->d_off = tswapal(hde->d_off);
8591         tde->d_reclen = tswap16(treclen);
8592         memcpy(tde->d_name, hde->d_name, namelen + 1);
8593 
8594         /*
8595          * The getdents type is in what was formerly a padding byte at the
8596          * end of the structure.
8597          */
8598 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8599         type = *((uint8_t *)hde + hreclen - 1);
8600 #else
8601         type = hde->d_type;
8602 #endif
8603         *((uint8_t *)tde + treclen - 1) = type;
8604     }
8605 
8606     unlock_user(tdirp, arg2, toff);
8607     return toff;
8608 }
8609 #endif /* TARGET_NR_getdents */
8610 
8611 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8612 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8613 {
8614     g_autofree void *hdirp = NULL;
8615     void *tdirp;
8616     int hlen, hoff, toff;
8617     int hreclen, treclen;
8618     off64_t prev_diroff = 0;
8619 
8620     hdirp = g_try_malloc(count);
8621     if (!hdirp) {
8622         return -TARGET_ENOMEM;
8623     }
8624 
8625     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8626     if (is_error(hlen)) {
8627         return hlen;
8628     }
8629 
8630     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8631     if (!tdirp) {
8632         return -TARGET_EFAULT;
8633     }
8634 
8635     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8636         struct linux_dirent64 *hde = hdirp + hoff;
8637         struct target_dirent64 *tde = tdirp + toff;
8638         int namelen;
8639 
8640         namelen = strlen(hde->d_name) + 1;
8641         hreclen = hde->d_reclen;
8642         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8643         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8644 
8645         if (toff + treclen > count) {
8646             /*
8647              * If the host struct is smaller than the target struct, or
8648              * requires less alignment and thus packs into less space,
8649              * then the host can return more entries than we can pass
8650              * on to the guest.
8651              */
8652             if (toff == 0) {
8653                 toff = -TARGET_EINVAL; /* result buffer is too small */
8654                 break;
8655             }
8656             /*
8657              * Return what we have, resetting the file pointer to the
8658              * location of the first record not returned.
8659              */
8660             lseek64(dirfd, prev_diroff, SEEK_SET);
8661             break;
8662         }
8663 
8664         prev_diroff = hde->d_off;
8665         tde->d_ino = tswap64(hde->d_ino);
8666         tde->d_off = tswap64(hde->d_off);
8667         tde->d_reclen = tswap16(treclen);
8668         tde->d_type = hde->d_type;
8669         memcpy(tde->d_name, hde->d_name, namelen);
8670     }
8671 
8672     unlock_user(tdirp, arg2, toff);
8673     return toff;
8674 }
8675 #endif /* TARGET_NR_getdents64 */
8676 
8677 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8678 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8679 #endif
8680 
8681 /* This is an internal helper for do_syscall so that it is easier
8682  * to have a single return point, so that actions, such as logging
8683  * of syscall results, can be performed.
8684  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8685  */
8686 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8687                             abi_long arg2, abi_long arg3, abi_long arg4,
8688                             abi_long arg5, abi_long arg6, abi_long arg7,
8689                             abi_long arg8)
8690 {
8691     CPUState *cpu = env_cpu(cpu_env);
8692     abi_long ret;
8693 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8694     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8695     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8696     || defined(TARGET_NR_statx)
8697     struct stat st;
8698 #endif
8699 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8700     || defined(TARGET_NR_fstatfs)
8701     struct statfs stfs;
8702 #endif
8703     void *p;
8704 
8705     switch(num) {
8706     case TARGET_NR_exit:
8707         /* In old applications this may be used to implement _exit(2).
8708            However in threaded applications it is used for thread termination,
8709            and _exit_group is used for application termination.
8710            Do thread termination if we have more then one thread.  */
8711 
8712         if (block_signals()) {
8713             return -QEMU_ERESTARTSYS;
8714         }
8715 
8716         pthread_mutex_lock(&clone_lock);
8717 
8718         if (CPU_NEXT(first_cpu)) {
8719             TaskState *ts = cpu->opaque;
8720 
8721             if (ts->child_tidptr) {
8722                 put_user_u32(0, ts->child_tidptr);
8723                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8724                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8725             }
8726 
8727             object_unparent(OBJECT(cpu));
8728             object_unref(OBJECT(cpu));
8729             /*
8730              * At this point the CPU should be unrealized and removed
8731              * from cpu lists. We can clean-up the rest of the thread
8732              * data without the lock held.
8733              */
8734 
8735             pthread_mutex_unlock(&clone_lock);
8736 
8737             thread_cpu = NULL;
8738             g_free(ts);
8739             rcu_unregister_thread();
8740             pthread_exit(NULL);
8741         }
8742 
8743         pthread_mutex_unlock(&clone_lock);
8744         preexit_cleanup(cpu_env, arg1);
8745         _exit(arg1);
8746         return 0; /* avoid warning */
8747     case TARGET_NR_read:
8748         if (arg2 == 0 && arg3 == 0) {
8749             return get_errno(safe_read(arg1, 0, 0));
8750         } else {
8751             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8752                 return -TARGET_EFAULT;
8753             ret = get_errno(safe_read(arg1, p, arg3));
8754             if (ret >= 0 &&
8755                 fd_trans_host_to_target_data(arg1)) {
8756                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8757             }
8758             unlock_user(p, arg2, ret);
8759         }
8760         return ret;
8761     case TARGET_NR_write:
8762         if (arg2 == 0 && arg3 == 0) {
8763             return get_errno(safe_write(arg1, 0, 0));
8764         }
8765         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8766             return -TARGET_EFAULT;
8767         if (fd_trans_target_to_host_data(arg1)) {
8768             void *copy = g_malloc(arg3);
8769             memcpy(copy, p, arg3);
8770             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8771             if (ret >= 0) {
8772                 ret = get_errno(safe_write(arg1, copy, ret));
8773             }
8774             g_free(copy);
8775         } else {
8776             ret = get_errno(safe_write(arg1, p, arg3));
8777         }
8778         unlock_user(p, arg2, 0);
8779         return ret;
8780 
8781 #ifdef TARGET_NR_open
8782     case TARGET_NR_open:
8783         if (!(p = lock_user_string(arg1)))
8784             return -TARGET_EFAULT;
8785         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8786                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8787                                   arg3));
8788         fd_trans_unregister(ret);
8789         unlock_user(p, arg1, 0);
8790         return ret;
8791 #endif
8792     case TARGET_NR_openat:
8793         if (!(p = lock_user_string(arg2)))
8794             return -TARGET_EFAULT;
8795         ret = get_errno(do_openat(cpu_env, arg1, p,
8796                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8797                                   arg4));
8798         fd_trans_unregister(ret);
8799         unlock_user(p, arg2, 0);
8800         return ret;
8801 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8802     case TARGET_NR_name_to_handle_at:
8803         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8804         return ret;
8805 #endif
8806 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8807     case TARGET_NR_open_by_handle_at:
8808         ret = do_open_by_handle_at(arg1, arg2, arg3);
8809         fd_trans_unregister(ret);
8810         return ret;
8811 #endif
8812 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8813     case TARGET_NR_pidfd_open:
8814         return get_errno(pidfd_open(arg1, arg2));
8815 #endif
8816 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8817     case TARGET_NR_pidfd_send_signal:
8818         {
8819             siginfo_t uinfo, *puinfo;
8820 
8821             if (arg3) {
8822                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8823                 if (!p) {
8824                     return -TARGET_EFAULT;
8825                  }
8826                  target_to_host_siginfo(&uinfo, p);
8827                  unlock_user(p, arg3, 0);
8828                  puinfo = &uinfo;
8829             } else {
8830                  puinfo = NULL;
8831             }
8832             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
8833                                               puinfo, arg4));
8834         }
8835         return ret;
8836 #endif
8837 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8838     case TARGET_NR_pidfd_getfd:
8839         return get_errno(pidfd_getfd(arg1, arg2, arg3));
8840 #endif
8841     case TARGET_NR_close:
8842         fd_trans_unregister(arg1);
8843         return get_errno(close(arg1));
8844 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
8845     case TARGET_NR_close_range:
8846         ret = get_errno(sys_close_range(arg1, arg2, arg3));
8847         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
8848             abi_long fd, maxfd;
8849             maxfd = MIN(arg2, target_fd_max);
8850             for (fd = arg1; fd < maxfd; fd++) {
8851                 fd_trans_unregister(fd);
8852             }
8853         }
8854         return ret;
8855 #endif
8856 
8857     case TARGET_NR_brk:
8858         return do_brk(arg1);
8859 #ifdef TARGET_NR_fork
8860     case TARGET_NR_fork:
8861         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8862 #endif
8863 #ifdef TARGET_NR_waitpid
8864     case TARGET_NR_waitpid:
8865         {
8866             int status;
8867             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8868             if (!is_error(ret) && arg2 && ret
8869                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8870                 return -TARGET_EFAULT;
8871         }
8872         return ret;
8873 #endif
8874 #ifdef TARGET_NR_waitid
8875     case TARGET_NR_waitid:
8876         {
8877             siginfo_t info;
8878             info.si_pid = 0;
8879             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8880             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8881                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8882                     return -TARGET_EFAULT;
8883                 host_to_target_siginfo(p, &info);
8884                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8885             }
8886         }
8887         return ret;
8888 #endif
8889 #ifdef TARGET_NR_creat /* not on alpha */
8890     case TARGET_NR_creat:
8891         if (!(p = lock_user_string(arg1)))
8892             return -TARGET_EFAULT;
8893         ret = get_errno(creat(p, arg2));
8894         fd_trans_unregister(ret);
8895         unlock_user(p, arg1, 0);
8896         return ret;
8897 #endif
8898 #ifdef TARGET_NR_link
8899     case TARGET_NR_link:
8900         {
8901             void * p2;
8902             p = lock_user_string(arg1);
8903             p2 = lock_user_string(arg2);
8904             if (!p || !p2)
8905                 ret = -TARGET_EFAULT;
8906             else
8907                 ret = get_errno(link(p, p2));
8908             unlock_user(p2, arg2, 0);
8909             unlock_user(p, arg1, 0);
8910         }
8911         return ret;
8912 #endif
8913 #if defined(TARGET_NR_linkat)
8914     case TARGET_NR_linkat:
8915         {
8916             void * p2 = NULL;
8917             if (!arg2 || !arg4)
8918                 return -TARGET_EFAULT;
8919             p  = lock_user_string(arg2);
8920             p2 = lock_user_string(arg4);
8921             if (!p || !p2)
8922                 ret = -TARGET_EFAULT;
8923             else
8924                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8925             unlock_user(p, arg2, 0);
8926             unlock_user(p2, arg4, 0);
8927         }
8928         return ret;
8929 #endif
8930 #ifdef TARGET_NR_unlink
8931     case TARGET_NR_unlink:
8932         if (!(p = lock_user_string(arg1)))
8933             return -TARGET_EFAULT;
8934         ret = get_errno(unlink(p));
8935         unlock_user(p, arg1, 0);
8936         return ret;
8937 #endif
8938 #if defined(TARGET_NR_unlinkat)
8939     case TARGET_NR_unlinkat:
8940         if (!(p = lock_user_string(arg2)))
8941             return -TARGET_EFAULT;
8942         ret = get_errno(unlinkat(arg1, p, arg3));
8943         unlock_user(p, arg2, 0);
8944         return ret;
8945 #endif
8946     case TARGET_NR_execveat:
8947         return do_execveat(cpu_env, arg1, arg2, arg3, arg4, arg5);
8948     case TARGET_NR_execve:
8949         return do_execveat(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0);
8950     case TARGET_NR_chdir:
8951         if (!(p = lock_user_string(arg1)))
8952             return -TARGET_EFAULT;
8953         ret = get_errno(chdir(p));
8954         unlock_user(p, arg1, 0);
8955         return ret;
8956 #ifdef TARGET_NR_time
8957     case TARGET_NR_time:
8958         {
8959             time_t host_time;
8960             ret = get_errno(time(&host_time));
8961             if (!is_error(ret)
8962                 && arg1
8963                 && put_user_sal(host_time, arg1))
8964                 return -TARGET_EFAULT;
8965         }
8966         return ret;
8967 #endif
8968 #ifdef TARGET_NR_mknod
8969     case TARGET_NR_mknod:
8970         if (!(p = lock_user_string(arg1)))
8971             return -TARGET_EFAULT;
8972         ret = get_errno(mknod(p, arg2, arg3));
8973         unlock_user(p, arg1, 0);
8974         return ret;
8975 #endif
8976 #if defined(TARGET_NR_mknodat)
8977     case TARGET_NR_mknodat:
8978         if (!(p = lock_user_string(arg2)))
8979             return -TARGET_EFAULT;
8980         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8981         unlock_user(p, arg2, 0);
8982         return ret;
8983 #endif
8984 #ifdef TARGET_NR_chmod
8985     case TARGET_NR_chmod:
8986         if (!(p = lock_user_string(arg1)))
8987             return -TARGET_EFAULT;
8988         ret = get_errno(chmod(p, arg2));
8989         unlock_user(p, arg1, 0);
8990         return ret;
8991 #endif
8992 #ifdef TARGET_NR_lseek
8993     case TARGET_NR_lseek:
8994         return get_errno(lseek(arg1, arg2, arg3));
8995 #endif
8996 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8997     /* Alpha specific */
8998     case TARGET_NR_getxpid:
8999         cpu_env->ir[IR_A4] = getppid();
9000         return get_errno(getpid());
9001 #endif
9002 #ifdef TARGET_NR_getpid
9003     case TARGET_NR_getpid:
9004         return get_errno(getpid());
9005 #endif
9006     case TARGET_NR_mount:
9007         {
9008             /* need to look at the data field */
9009             void *p2, *p3;
9010 
9011             if (arg1) {
9012                 p = lock_user_string(arg1);
9013                 if (!p) {
9014                     return -TARGET_EFAULT;
9015                 }
9016             } else {
9017                 p = NULL;
9018             }
9019 
9020             p2 = lock_user_string(arg2);
9021             if (!p2) {
9022                 if (arg1) {
9023                     unlock_user(p, arg1, 0);
9024                 }
9025                 return -TARGET_EFAULT;
9026             }
9027 
9028             if (arg3) {
9029                 p3 = lock_user_string(arg3);
9030                 if (!p3) {
9031                     if (arg1) {
9032                         unlock_user(p, arg1, 0);
9033                     }
9034                     unlock_user(p2, arg2, 0);
9035                     return -TARGET_EFAULT;
9036                 }
9037             } else {
9038                 p3 = NULL;
9039             }
9040 
9041             /* FIXME - arg5 should be locked, but it isn't clear how to
9042              * do that since it's not guaranteed to be a NULL-terminated
9043              * string.
9044              */
9045             if (!arg5) {
9046                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9047             } else {
9048                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9049             }
9050             ret = get_errno(ret);
9051 
9052             if (arg1) {
9053                 unlock_user(p, arg1, 0);
9054             }
9055             unlock_user(p2, arg2, 0);
9056             if (arg3) {
9057                 unlock_user(p3, arg3, 0);
9058             }
9059         }
9060         return ret;
9061 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9062 #if defined(TARGET_NR_umount)
9063     case TARGET_NR_umount:
9064 #endif
9065 #if defined(TARGET_NR_oldumount)
9066     case TARGET_NR_oldumount:
9067 #endif
9068         if (!(p = lock_user_string(arg1)))
9069             return -TARGET_EFAULT;
9070         ret = get_errno(umount(p));
9071         unlock_user(p, arg1, 0);
9072         return ret;
9073 #endif
9074 #ifdef TARGET_NR_stime /* not on alpha */
9075     case TARGET_NR_stime:
9076         {
9077             struct timespec ts;
9078             ts.tv_nsec = 0;
9079             if (get_user_sal(ts.tv_sec, arg1)) {
9080                 return -TARGET_EFAULT;
9081             }
9082             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9083         }
9084 #endif
9085 #ifdef TARGET_NR_alarm /* not on alpha */
9086     case TARGET_NR_alarm:
9087         return alarm(arg1);
9088 #endif
9089 #ifdef TARGET_NR_pause /* not on alpha */
9090     case TARGET_NR_pause:
9091         if (!block_signals()) {
9092             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9093         }
9094         return -TARGET_EINTR;
9095 #endif
9096 #ifdef TARGET_NR_utime
9097     case TARGET_NR_utime:
9098         {
9099             struct utimbuf tbuf, *host_tbuf;
9100             struct target_utimbuf *target_tbuf;
9101             if (arg2) {
9102                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9103                     return -TARGET_EFAULT;
9104                 tbuf.actime = tswapal(target_tbuf->actime);
9105                 tbuf.modtime = tswapal(target_tbuf->modtime);
9106                 unlock_user_struct(target_tbuf, arg2, 0);
9107                 host_tbuf = &tbuf;
9108             } else {
9109                 host_tbuf = NULL;
9110             }
9111             if (!(p = lock_user_string(arg1)))
9112                 return -TARGET_EFAULT;
9113             ret = get_errno(utime(p, host_tbuf));
9114             unlock_user(p, arg1, 0);
9115         }
9116         return ret;
9117 #endif
9118 #ifdef TARGET_NR_utimes
9119     case TARGET_NR_utimes:
9120         {
9121             struct timeval *tvp, tv[2];
9122             if (arg2) {
9123                 if (copy_from_user_timeval(&tv[0], arg2)
9124                     || copy_from_user_timeval(&tv[1],
9125                                               arg2 + sizeof(struct target_timeval)))
9126                     return -TARGET_EFAULT;
9127                 tvp = tv;
9128             } else {
9129                 tvp = NULL;
9130             }
9131             if (!(p = lock_user_string(arg1)))
9132                 return -TARGET_EFAULT;
9133             ret = get_errno(utimes(p, tvp));
9134             unlock_user(p, arg1, 0);
9135         }
9136         return ret;
9137 #endif
9138 #if defined(TARGET_NR_futimesat)
9139     case TARGET_NR_futimesat:
9140         {
9141             struct timeval *tvp, tv[2];
9142             if (arg3) {
9143                 if (copy_from_user_timeval(&tv[0], arg3)
9144                     || copy_from_user_timeval(&tv[1],
9145                                               arg3 + sizeof(struct target_timeval)))
9146                     return -TARGET_EFAULT;
9147                 tvp = tv;
9148             } else {
9149                 tvp = NULL;
9150             }
9151             if (!(p = lock_user_string(arg2))) {
9152                 return -TARGET_EFAULT;
9153             }
9154             ret = get_errno(futimesat(arg1, path(p), tvp));
9155             unlock_user(p, arg2, 0);
9156         }
9157         return ret;
9158 #endif
9159 #ifdef TARGET_NR_access
9160     case TARGET_NR_access:
9161         if (!(p = lock_user_string(arg1))) {
9162             return -TARGET_EFAULT;
9163         }
9164         ret = get_errno(access(path(p), arg2));
9165         unlock_user(p, arg1, 0);
9166         return ret;
9167 #endif
9168 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9169     case TARGET_NR_faccessat:
9170         if (!(p = lock_user_string(arg2))) {
9171             return -TARGET_EFAULT;
9172         }
9173         ret = get_errno(faccessat(arg1, p, arg3, 0));
9174         unlock_user(p, arg2, 0);
9175         return ret;
9176 #endif
9177 #if defined(TARGET_NR_faccessat2)
9178     case TARGET_NR_faccessat2:
9179         if (!(p = lock_user_string(arg2))) {
9180             return -TARGET_EFAULT;
9181         }
9182         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9183         unlock_user(p, arg2, 0);
9184         return ret;
9185 #endif
9186 #ifdef TARGET_NR_nice /* not on alpha */
9187     case TARGET_NR_nice:
9188         return get_errno(nice(arg1));
9189 #endif
9190     case TARGET_NR_sync:
9191         sync();
9192         return 0;
9193 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9194     case TARGET_NR_syncfs:
9195         return get_errno(syncfs(arg1));
9196 #endif
9197     case TARGET_NR_kill:
9198         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9199 #ifdef TARGET_NR_rename
9200     case TARGET_NR_rename:
9201         {
9202             void *p2;
9203             p = lock_user_string(arg1);
9204             p2 = lock_user_string(arg2);
9205             if (!p || !p2)
9206                 ret = -TARGET_EFAULT;
9207             else
9208                 ret = get_errno(rename(p, p2));
9209             unlock_user(p2, arg2, 0);
9210             unlock_user(p, arg1, 0);
9211         }
9212         return ret;
9213 #endif
9214 #if defined(TARGET_NR_renameat)
9215     case TARGET_NR_renameat:
9216         {
9217             void *p2;
9218             p  = lock_user_string(arg2);
9219             p2 = lock_user_string(arg4);
9220             if (!p || !p2)
9221                 ret = -TARGET_EFAULT;
9222             else
9223                 ret = get_errno(renameat(arg1, p, arg3, p2));
9224             unlock_user(p2, arg4, 0);
9225             unlock_user(p, arg2, 0);
9226         }
9227         return ret;
9228 #endif
9229 #if defined(TARGET_NR_renameat2)
9230     case TARGET_NR_renameat2:
9231         {
9232             void *p2;
9233             p  = lock_user_string(arg2);
9234             p2 = lock_user_string(arg4);
9235             if (!p || !p2) {
9236                 ret = -TARGET_EFAULT;
9237             } else {
9238                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9239             }
9240             unlock_user(p2, arg4, 0);
9241             unlock_user(p, arg2, 0);
9242         }
9243         return ret;
9244 #endif
9245 #ifdef TARGET_NR_mkdir
9246     case TARGET_NR_mkdir:
9247         if (!(p = lock_user_string(arg1)))
9248             return -TARGET_EFAULT;
9249         ret = get_errno(mkdir(p, arg2));
9250         unlock_user(p, arg1, 0);
9251         return ret;
9252 #endif
9253 #if defined(TARGET_NR_mkdirat)
9254     case TARGET_NR_mkdirat:
9255         if (!(p = lock_user_string(arg2)))
9256             return -TARGET_EFAULT;
9257         ret = get_errno(mkdirat(arg1, p, arg3));
9258         unlock_user(p, arg2, 0);
9259         return ret;
9260 #endif
9261 #ifdef TARGET_NR_rmdir
9262     case TARGET_NR_rmdir:
9263         if (!(p = lock_user_string(arg1)))
9264             return -TARGET_EFAULT;
9265         ret = get_errno(rmdir(p));
9266         unlock_user(p, arg1, 0);
9267         return ret;
9268 #endif
9269     case TARGET_NR_dup:
9270         ret = get_errno(dup(arg1));
9271         if (ret >= 0) {
9272             fd_trans_dup(arg1, ret);
9273         }
9274         return ret;
9275 #ifdef TARGET_NR_pipe
9276     case TARGET_NR_pipe:
9277         return do_pipe(cpu_env, arg1, 0, 0);
9278 #endif
9279 #ifdef TARGET_NR_pipe2
9280     case TARGET_NR_pipe2:
9281         return do_pipe(cpu_env, arg1,
9282                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9283 #endif
9284     case TARGET_NR_times:
9285         {
9286             struct target_tms *tmsp;
9287             struct tms tms;
9288             ret = get_errno(times(&tms));
9289             if (arg1) {
9290                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9291                 if (!tmsp)
9292                     return -TARGET_EFAULT;
9293                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9294                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9295                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9296                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9297             }
9298             if (!is_error(ret))
9299                 ret = host_to_target_clock_t(ret);
9300         }
9301         return ret;
9302     case TARGET_NR_acct:
9303         if (arg1 == 0) {
9304             ret = get_errno(acct(NULL));
9305         } else {
9306             if (!(p = lock_user_string(arg1))) {
9307                 return -TARGET_EFAULT;
9308             }
9309             ret = get_errno(acct(path(p)));
9310             unlock_user(p, arg1, 0);
9311         }
9312         return ret;
9313 #ifdef TARGET_NR_umount2
9314     case TARGET_NR_umount2:
9315         if (!(p = lock_user_string(arg1)))
9316             return -TARGET_EFAULT;
9317         ret = get_errno(umount2(p, arg2));
9318         unlock_user(p, arg1, 0);
9319         return ret;
9320 #endif
9321     case TARGET_NR_ioctl:
9322         return do_ioctl(arg1, arg2, arg3);
9323 #ifdef TARGET_NR_fcntl
9324     case TARGET_NR_fcntl:
9325         return do_fcntl(arg1, arg2, arg3);
9326 #endif
9327     case TARGET_NR_setpgid:
9328         return get_errno(setpgid(arg1, arg2));
9329     case TARGET_NR_umask:
9330         return get_errno(umask(arg1));
9331     case TARGET_NR_chroot:
9332         if (!(p = lock_user_string(arg1)))
9333             return -TARGET_EFAULT;
9334         ret = get_errno(chroot(p));
9335         unlock_user(p, arg1, 0);
9336         return ret;
9337 #ifdef TARGET_NR_dup2
9338     case TARGET_NR_dup2:
9339         ret = get_errno(dup2(arg1, arg2));
9340         if (ret >= 0) {
9341             fd_trans_dup(arg1, arg2);
9342         }
9343         return ret;
9344 #endif
9345 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9346     case TARGET_NR_dup3:
9347     {
9348         int host_flags;
9349 
9350         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9351             return -EINVAL;
9352         }
9353         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9354         ret = get_errno(dup3(arg1, arg2, host_flags));
9355         if (ret >= 0) {
9356             fd_trans_dup(arg1, arg2);
9357         }
9358         return ret;
9359     }
9360 #endif
9361 #ifdef TARGET_NR_getppid /* not on alpha */
9362     case TARGET_NR_getppid:
9363         return get_errno(getppid());
9364 #endif
9365 #ifdef TARGET_NR_getpgrp
9366     case TARGET_NR_getpgrp:
9367         return get_errno(getpgrp());
9368 #endif
9369     case TARGET_NR_setsid:
9370         return get_errno(setsid());
9371 #ifdef TARGET_NR_sigaction
9372     case TARGET_NR_sigaction:
9373         {
9374 #if defined(TARGET_MIPS)
9375 	    struct target_sigaction act, oact, *pact, *old_act;
9376 
9377 	    if (arg2) {
9378                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9379                     return -TARGET_EFAULT;
9380 		act._sa_handler = old_act->_sa_handler;
9381 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9382 		act.sa_flags = old_act->sa_flags;
9383 		unlock_user_struct(old_act, arg2, 0);
9384 		pact = &act;
9385 	    } else {
9386 		pact = NULL;
9387 	    }
9388 
9389         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9390 
9391 	    if (!is_error(ret) && arg3) {
9392                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9393                     return -TARGET_EFAULT;
9394 		old_act->_sa_handler = oact._sa_handler;
9395 		old_act->sa_flags = oact.sa_flags;
9396 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9397 		old_act->sa_mask.sig[1] = 0;
9398 		old_act->sa_mask.sig[2] = 0;
9399 		old_act->sa_mask.sig[3] = 0;
9400 		unlock_user_struct(old_act, arg3, 1);
9401 	    }
9402 #else
9403             struct target_old_sigaction *old_act;
9404             struct target_sigaction act, oact, *pact;
9405             if (arg2) {
9406                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9407                     return -TARGET_EFAULT;
9408                 act._sa_handler = old_act->_sa_handler;
9409                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9410                 act.sa_flags = old_act->sa_flags;
9411 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9412                 act.sa_restorer = old_act->sa_restorer;
9413 #endif
9414                 unlock_user_struct(old_act, arg2, 0);
9415                 pact = &act;
9416             } else {
9417                 pact = NULL;
9418             }
9419             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9420             if (!is_error(ret) && arg3) {
9421                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9422                     return -TARGET_EFAULT;
9423                 old_act->_sa_handler = oact._sa_handler;
9424                 old_act->sa_mask = oact.sa_mask.sig[0];
9425                 old_act->sa_flags = oact.sa_flags;
9426 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9427                 old_act->sa_restorer = oact.sa_restorer;
9428 #endif
9429                 unlock_user_struct(old_act, arg3, 1);
9430             }
9431 #endif
9432         }
9433         return ret;
9434 #endif
9435     case TARGET_NR_rt_sigaction:
9436         {
9437             /*
9438              * For Alpha and SPARC this is a 5 argument syscall, with
9439              * a 'restorer' parameter which must be copied into the
9440              * sa_restorer field of the sigaction struct.
9441              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9442              * and arg5 is the sigsetsize.
9443              */
9444 #if defined(TARGET_ALPHA)
9445             target_ulong sigsetsize = arg4;
9446             target_ulong restorer = arg5;
9447 #elif defined(TARGET_SPARC)
9448             target_ulong restorer = arg4;
9449             target_ulong sigsetsize = arg5;
9450 #else
9451             target_ulong sigsetsize = arg4;
9452             target_ulong restorer = 0;
9453 #endif
9454             struct target_sigaction *act = NULL;
9455             struct target_sigaction *oact = NULL;
9456 
9457             if (sigsetsize != sizeof(target_sigset_t)) {
9458                 return -TARGET_EINVAL;
9459             }
9460             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9461                 return -TARGET_EFAULT;
9462             }
9463             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9464                 ret = -TARGET_EFAULT;
9465             } else {
9466                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9467                 if (oact) {
9468                     unlock_user_struct(oact, arg3, 1);
9469                 }
9470             }
9471             if (act) {
9472                 unlock_user_struct(act, arg2, 0);
9473             }
9474         }
9475         return ret;
9476 #ifdef TARGET_NR_sgetmask /* not on alpha */
9477     case TARGET_NR_sgetmask:
9478         {
9479             sigset_t cur_set;
9480             abi_ulong target_set;
9481             ret = do_sigprocmask(0, NULL, &cur_set);
9482             if (!ret) {
9483                 host_to_target_old_sigset(&target_set, &cur_set);
9484                 ret = target_set;
9485             }
9486         }
9487         return ret;
9488 #endif
9489 #ifdef TARGET_NR_ssetmask /* not on alpha */
9490     case TARGET_NR_ssetmask:
9491         {
9492             sigset_t set, oset;
9493             abi_ulong target_set = arg1;
9494             target_to_host_old_sigset(&set, &target_set);
9495             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9496             if (!ret) {
9497                 host_to_target_old_sigset(&target_set, &oset);
9498                 ret = target_set;
9499             }
9500         }
9501         return ret;
9502 #endif
9503 #ifdef TARGET_NR_sigprocmask
9504     case TARGET_NR_sigprocmask:
9505         {
9506 #if defined(TARGET_ALPHA)
9507             sigset_t set, oldset;
9508             abi_ulong mask;
9509             int how;
9510 
9511             switch (arg1) {
9512             case TARGET_SIG_BLOCK:
9513                 how = SIG_BLOCK;
9514                 break;
9515             case TARGET_SIG_UNBLOCK:
9516                 how = SIG_UNBLOCK;
9517                 break;
9518             case TARGET_SIG_SETMASK:
9519                 how = SIG_SETMASK;
9520                 break;
9521             default:
9522                 return -TARGET_EINVAL;
9523             }
9524             mask = arg2;
9525             target_to_host_old_sigset(&set, &mask);
9526 
9527             ret = do_sigprocmask(how, &set, &oldset);
9528             if (!is_error(ret)) {
9529                 host_to_target_old_sigset(&mask, &oldset);
9530                 ret = mask;
9531                 cpu_env->ir[IR_V0] = 0; /* force no error */
9532             }
9533 #else
9534             sigset_t set, oldset, *set_ptr;
9535             int how;
9536 
9537             if (arg2) {
9538                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9539                 if (!p) {
9540                     return -TARGET_EFAULT;
9541                 }
9542                 target_to_host_old_sigset(&set, p);
9543                 unlock_user(p, arg2, 0);
9544                 set_ptr = &set;
9545                 switch (arg1) {
9546                 case TARGET_SIG_BLOCK:
9547                     how = SIG_BLOCK;
9548                     break;
9549                 case TARGET_SIG_UNBLOCK:
9550                     how = SIG_UNBLOCK;
9551                     break;
9552                 case TARGET_SIG_SETMASK:
9553                     how = SIG_SETMASK;
9554                     break;
9555                 default:
9556                     return -TARGET_EINVAL;
9557                 }
9558             } else {
9559                 how = 0;
9560                 set_ptr = NULL;
9561             }
9562             ret = do_sigprocmask(how, set_ptr, &oldset);
9563             if (!is_error(ret) && arg3) {
9564                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9565                     return -TARGET_EFAULT;
9566                 host_to_target_old_sigset(p, &oldset);
9567                 unlock_user(p, arg3, sizeof(target_sigset_t));
9568             }
9569 #endif
9570         }
9571         return ret;
9572 #endif
9573     case TARGET_NR_rt_sigprocmask:
9574         {
9575             int how = arg1;
9576             sigset_t set, oldset, *set_ptr;
9577 
9578             if (arg4 != sizeof(target_sigset_t)) {
9579                 return -TARGET_EINVAL;
9580             }
9581 
9582             if (arg2) {
9583                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9584                 if (!p) {
9585                     return -TARGET_EFAULT;
9586                 }
9587                 target_to_host_sigset(&set, p);
9588                 unlock_user(p, arg2, 0);
9589                 set_ptr = &set;
9590                 switch(how) {
9591                 case TARGET_SIG_BLOCK:
9592                     how = SIG_BLOCK;
9593                     break;
9594                 case TARGET_SIG_UNBLOCK:
9595                     how = SIG_UNBLOCK;
9596                     break;
9597                 case TARGET_SIG_SETMASK:
9598                     how = SIG_SETMASK;
9599                     break;
9600                 default:
9601                     return -TARGET_EINVAL;
9602                 }
9603             } else {
9604                 how = 0;
9605                 set_ptr = NULL;
9606             }
9607             ret = do_sigprocmask(how, set_ptr, &oldset);
9608             if (!is_error(ret) && arg3) {
9609                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9610                     return -TARGET_EFAULT;
9611                 host_to_target_sigset(p, &oldset);
9612                 unlock_user(p, arg3, sizeof(target_sigset_t));
9613             }
9614         }
9615         return ret;
9616 #ifdef TARGET_NR_sigpending
9617     case TARGET_NR_sigpending:
9618         {
9619             sigset_t set;
9620             ret = get_errno(sigpending(&set));
9621             if (!is_error(ret)) {
9622                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9623                     return -TARGET_EFAULT;
9624                 host_to_target_old_sigset(p, &set);
9625                 unlock_user(p, arg1, sizeof(target_sigset_t));
9626             }
9627         }
9628         return ret;
9629 #endif
9630     case TARGET_NR_rt_sigpending:
9631         {
9632             sigset_t set;
9633 
9634             /* Yes, this check is >, not != like most. We follow the kernel's
9635              * logic and it does it like this because it implements
9636              * NR_sigpending through the same code path, and in that case
9637              * the old_sigset_t is smaller in size.
9638              */
9639             if (arg2 > sizeof(target_sigset_t)) {
9640                 return -TARGET_EINVAL;
9641             }
9642 
9643             ret = get_errno(sigpending(&set));
9644             if (!is_error(ret)) {
9645                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9646                     return -TARGET_EFAULT;
9647                 host_to_target_sigset(p, &set);
9648                 unlock_user(p, arg1, sizeof(target_sigset_t));
9649             }
9650         }
9651         return ret;
9652 #ifdef TARGET_NR_sigsuspend
9653     case TARGET_NR_sigsuspend:
9654         {
9655             sigset_t *set;
9656 
9657 #if defined(TARGET_ALPHA)
9658             TaskState *ts = cpu->opaque;
9659             /* target_to_host_old_sigset will bswap back */
9660             abi_ulong mask = tswapal(arg1);
9661             set = &ts->sigsuspend_mask;
9662             target_to_host_old_sigset(set, &mask);
9663 #else
9664             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9665             if (ret != 0) {
9666                 return ret;
9667             }
9668 #endif
9669             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9670             finish_sigsuspend_mask(ret);
9671         }
9672         return ret;
9673 #endif
9674     case TARGET_NR_rt_sigsuspend:
9675         {
9676             sigset_t *set;
9677 
9678             ret = process_sigsuspend_mask(&set, arg1, arg2);
9679             if (ret != 0) {
9680                 return ret;
9681             }
9682             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9683             finish_sigsuspend_mask(ret);
9684         }
9685         return ret;
9686 #ifdef TARGET_NR_rt_sigtimedwait
9687     case TARGET_NR_rt_sigtimedwait:
9688         {
9689             sigset_t set;
9690             struct timespec uts, *puts;
9691             siginfo_t uinfo;
9692 
9693             if (arg4 != sizeof(target_sigset_t)) {
9694                 return -TARGET_EINVAL;
9695             }
9696 
9697             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9698                 return -TARGET_EFAULT;
9699             target_to_host_sigset(&set, p);
9700             unlock_user(p, arg1, 0);
9701             if (arg3) {
9702                 puts = &uts;
9703                 if (target_to_host_timespec(puts, arg3)) {
9704                     return -TARGET_EFAULT;
9705                 }
9706             } else {
9707                 puts = NULL;
9708             }
9709             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9710                                                  SIGSET_T_SIZE));
9711             if (!is_error(ret)) {
9712                 if (arg2) {
9713                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9714                                   0);
9715                     if (!p) {
9716                         return -TARGET_EFAULT;
9717                     }
9718                     host_to_target_siginfo(p, &uinfo);
9719                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9720                 }
9721                 ret = host_to_target_signal(ret);
9722             }
9723         }
9724         return ret;
9725 #endif
9726 #ifdef TARGET_NR_rt_sigtimedwait_time64
9727     case TARGET_NR_rt_sigtimedwait_time64:
9728         {
9729             sigset_t set;
9730             struct timespec uts, *puts;
9731             siginfo_t uinfo;
9732 
9733             if (arg4 != sizeof(target_sigset_t)) {
9734                 return -TARGET_EINVAL;
9735             }
9736 
9737             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9738             if (!p) {
9739                 return -TARGET_EFAULT;
9740             }
9741             target_to_host_sigset(&set, p);
9742             unlock_user(p, arg1, 0);
9743             if (arg3) {
9744                 puts = &uts;
9745                 if (target_to_host_timespec64(puts, arg3)) {
9746                     return -TARGET_EFAULT;
9747                 }
9748             } else {
9749                 puts = NULL;
9750             }
9751             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9752                                                  SIGSET_T_SIZE));
9753             if (!is_error(ret)) {
9754                 if (arg2) {
9755                     p = lock_user(VERIFY_WRITE, arg2,
9756                                   sizeof(target_siginfo_t), 0);
9757                     if (!p) {
9758                         return -TARGET_EFAULT;
9759                     }
9760                     host_to_target_siginfo(p, &uinfo);
9761                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9762                 }
9763                 ret = host_to_target_signal(ret);
9764             }
9765         }
9766         return ret;
9767 #endif
9768     case TARGET_NR_rt_sigqueueinfo:
9769         {
9770             siginfo_t uinfo;
9771 
9772             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9773             if (!p) {
9774                 return -TARGET_EFAULT;
9775             }
9776             target_to_host_siginfo(&uinfo, p);
9777             unlock_user(p, arg3, 0);
9778             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
9779         }
9780         return ret;
9781     case TARGET_NR_rt_tgsigqueueinfo:
9782         {
9783             siginfo_t uinfo;
9784 
9785             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9786             if (!p) {
9787                 return -TARGET_EFAULT;
9788             }
9789             target_to_host_siginfo(&uinfo, p);
9790             unlock_user(p, arg4, 0);
9791             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
9792         }
9793         return ret;
9794 #ifdef TARGET_NR_sigreturn
9795     case TARGET_NR_sigreturn:
9796         if (block_signals()) {
9797             return -QEMU_ERESTARTSYS;
9798         }
9799         return do_sigreturn(cpu_env);
9800 #endif
9801     case TARGET_NR_rt_sigreturn:
9802         if (block_signals()) {
9803             return -QEMU_ERESTARTSYS;
9804         }
9805         return do_rt_sigreturn(cpu_env);
9806     case TARGET_NR_sethostname:
9807         if (!(p = lock_user_string(arg1)))
9808             return -TARGET_EFAULT;
9809         ret = get_errno(sethostname(p, arg2));
9810         unlock_user(p, arg1, 0);
9811         return ret;
9812 #ifdef TARGET_NR_setrlimit
9813     case TARGET_NR_setrlimit:
9814         {
9815             int resource = target_to_host_resource(arg1);
9816             struct target_rlimit *target_rlim;
9817             struct rlimit rlim;
9818             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9819                 return -TARGET_EFAULT;
9820             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9821             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9822             unlock_user_struct(target_rlim, arg2, 0);
9823             /*
9824              * If we just passed through resource limit settings for memory then
9825              * they would also apply to QEMU's own allocations, and QEMU will
9826              * crash or hang or die if its allocations fail. Ideally we would
9827              * track the guest allocations in QEMU and apply the limits ourselves.
9828              * For now, just tell the guest the call succeeded but don't actually
9829              * limit anything.
9830              */
9831             if (resource != RLIMIT_AS &&
9832                 resource != RLIMIT_DATA &&
9833                 resource != RLIMIT_STACK) {
9834                 return get_errno(setrlimit(resource, &rlim));
9835             } else {
9836                 return 0;
9837             }
9838         }
9839 #endif
9840 #ifdef TARGET_NR_getrlimit
9841     case TARGET_NR_getrlimit:
9842         {
9843             int resource = target_to_host_resource(arg1);
9844             struct target_rlimit *target_rlim;
9845             struct rlimit rlim;
9846 
9847             ret = get_errno(getrlimit(resource, &rlim));
9848             if (!is_error(ret)) {
9849                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9850                     return -TARGET_EFAULT;
9851                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9852                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9853                 unlock_user_struct(target_rlim, arg2, 1);
9854             }
9855         }
9856         return ret;
9857 #endif
9858     case TARGET_NR_getrusage:
9859         {
9860             struct rusage rusage;
9861             ret = get_errno(getrusage(arg1, &rusage));
9862             if (!is_error(ret)) {
9863                 ret = host_to_target_rusage(arg2, &rusage);
9864             }
9865         }
9866         return ret;
9867 #if defined(TARGET_NR_gettimeofday)
9868     case TARGET_NR_gettimeofday:
9869         {
9870             struct timeval tv;
9871             struct timezone tz;
9872 
9873             ret = get_errno(gettimeofday(&tv, &tz));
9874             if (!is_error(ret)) {
9875                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9876                     return -TARGET_EFAULT;
9877                 }
9878                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9879                     return -TARGET_EFAULT;
9880                 }
9881             }
9882         }
9883         return ret;
9884 #endif
9885 #if defined(TARGET_NR_settimeofday)
9886     case TARGET_NR_settimeofday:
9887         {
9888             struct timeval tv, *ptv = NULL;
9889             struct timezone tz, *ptz = NULL;
9890 
9891             if (arg1) {
9892                 if (copy_from_user_timeval(&tv, arg1)) {
9893                     return -TARGET_EFAULT;
9894                 }
9895                 ptv = &tv;
9896             }
9897 
9898             if (arg2) {
9899                 if (copy_from_user_timezone(&tz, arg2)) {
9900                     return -TARGET_EFAULT;
9901                 }
9902                 ptz = &tz;
9903             }
9904 
9905             return get_errno(settimeofday(ptv, ptz));
9906         }
9907 #endif
9908 #if defined(TARGET_NR_select)
9909     case TARGET_NR_select:
9910 #if defined(TARGET_WANT_NI_OLD_SELECT)
9911         /* some architectures used to have old_select here
9912          * but now ENOSYS it.
9913          */
9914         ret = -TARGET_ENOSYS;
9915 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9916         ret = do_old_select(arg1);
9917 #else
9918         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9919 #endif
9920         return ret;
9921 #endif
9922 #ifdef TARGET_NR_pselect6
9923     case TARGET_NR_pselect6:
9924         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9925 #endif
9926 #ifdef TARGET_NR_pselect6_time64
9927     case TARGET_NR_pselect6_time64:
9928         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9929 #endif
9930 #ifdef TARGET_NR_symlink
9931     case TARGET_NR_symlink:
9932         {
9933             void *p2;
9934             p = lock_user_string(arg1);
9935             p2 = lock_user_string(arg2);
9936             if (!p || !p2)
9937                 ret = -TARGET_EFAULT;
9938             else
9939                 ret = get_errno(symlink(p, p2));
9940             unlock_user(p2, arg2, 0);
9941             unlock_user(p, arg1, 0);
9942         }
9943         return ret;
9944 #endif
9945 #if defined(TARGET_NR_symlinkat)
9946     case TARGET_NR_symlinkat:
9947         {
9948             void *p2;
9949             p  = lock_user_string(arg1);
9950             p2 = lock_user_string(arg3);
9951             if (!p || !p2)
9952                 ret = -TARGET_EFAULT;
9953             else
9954                 ret = get_errno(symlinkat(p, arg2, p2));
9955             unlock_user(p2, arg3, 0);
9956             unlock_user(p, arg1, 0);
9957         }
9958         return ret;
9959 #endif
9960 #ifdef TARGET_NR_readlink
9961     case TARGET_NR_readlink:
9962         {
9963             void *p2;
9964             p = lock_user_string(arg1);
9965             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9966             if (!p || !p2) {
9967                 ret = -TARGET_EFAULT;
9968             } else if (!arg3) {
9969                 /* Short circuit this for the magic exe check. */
9970                 ret = -TARGET_EINVAL;
9971             } else if (is_proc_myself((const char *)p, "exe")) {
9972                 char real[PATH_MAX], *temp;
9973                 temp = realpath(exec_path, real);
9974                 /* Return value is # of bytes that we wrote to the buffer. */
9975                 if (temp == NULL) {
9976                     ret = get_errno(-1);
9977                 } else {
9978                     /* Don't worry about sign mismatch as earlier mapping
9979                      * logic would have thrown a bad address error. */
9980                     ret = MIN(strlen(real), arg3);
9981                     /* We cannot NUL terminate the string. */
9982                     memcpy(p2, real, ret);
9983                 }
9984             } else {
9985                 ret = get_errno(readlink(path(p), p2, arg3));
9986             }
9987             unlock_user(p2, arg2, ret);
9988             unlock_user(p, arg1, 0);
9989         }
9990         return ret;
9991 #endif
9992 #if defined(TARGET_NR_readlinkat)
9993     case TARGET_NR_readlinkat:
9994         {
9995             void *p2;
9996             p  = lock_user_string(arg2);
9997             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9998             if (!p || !p2) {
9999                 ret = -TARGET_EFAULT;
10000             } else if (!arg4) {
10001                 /* Short circuit this for the magic exe check. */
10002                 ret = -TARGET_EINVAL;
10003             } else if (is_proc_myself((const char *)p, "exe")) {
10004                 char real[PATH_MAX], *temp;
10005                 temp = realpath(exec_path, real);
10006                 /* Return value is # of bytes that we wrote to the buffer. */
10007                 if (temp == NULL) {
10008                     ret = get_errno(-1);
10009                 } else {
10010                     /* Don't worry about sign mismatch as earlier mapping
10011                      * logic would have thrown a bad address error. */
10012                     ret = MIN(strlen(real), arg4);
10013                     /* We cannot NUL terminate the string. */
10014                     memcpy(p2, real, ret);
10015                 }
10016             } else {
10017                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10018             }
10019             unlock_user(p2, arg3, ret);
10020             unlock_user(p, arg2, 0);
10021         }
10022         return ret;
10023 #endif
10024 #ifdef TARGET_NR_swapon
10025     case TARGET_NR_swapon:
10026         if (!(p = lock_user_string(arg1)))
10027             return -TARGET_EFAULT;
10028         ret = get_errno(swapon(p, arg2));
10029         unlock_user(p, arg1, 0);
10030         return ret;
10031 #endif
10032     case TARGET_NR_reboot:
10033         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10034            /* arg4 must be ignored in all other cases */
10035            p = lock_user_string(arg4);
10036            if (!p) {
10037                return -TARGET_EFAULT;
10038            }
10039            ret = get_errno(reboot(arg1, arg2, arg3, p));
10040            unlock_user(p, arg4, 0);
10041         } else {
10042            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10043         }
10044         return ret;
10045 #ifdef TARGET_NR_mmap
10046     case TARGET_NR_mmap:
10047 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10048     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10049     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10050     || defined(TARGET_S390X)
10051         {
10052             abi_ulong *v;
10053             abi_ulong v1, v2, v3, v4, v5, v6;
10054             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10055                 return -TARGET_EFAULT;
10056             v1 = tswapal(v[0]);
10057             v2 = tswapal(v[1]);
10058             v3 = tswapal(v[2]);
10059             v4 = tswapal(v[3]);
10060             v5 = tswapal(v[4]);
10061             v6 = tswapal(v[5]);
10062             unlock_user(v, arg1, 0);
10063             ret = get_errno(target_mmap(v1, v2, v3,
10064                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10065                                         v5, v6));
10066         }
10067 #else
10068         /* mmap pointers are always untagged */
10069         ret = get_errno(target_mmap(arg1, arg2, arg3,
10070                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10071                                     arg5,
10072                                     arg6));
10073 #endif
10074         return ret;
10075 #endif
10076 #ifdef TARGET_NR_mmap2
10077     case TARGET_NR_mmap2:
10078 #ifndef MMAP_SHIFT
10079 #define MMAP_SHIFT 12
10080 #endif
10081         ret = target_mmap(arg1, arg2, arg3,
10082                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10083                           arg5, arg6 << MMAP_SHIFT);
10084         return get_errno(ret);
10085 #endif
10086     case TARGET_NR_munmap:
10087         arg1 = cpu_untagged_addr(cpu, arg1);
10088         return get_errno(target_munmap(arg1, arg2));
10089     case TARGET_NR_mprotect:
10090         arg1 = cpu_untagged_addr(cpu, arg1);
10091         {
10092             TaskState *ts = cpu->opaque;
10093             /* Special hack to detect libc making the stack executable.  */
10094             if ((arg3 & PROT_GROWSDOWN)
10095                 && arg1 >= ts->info->stack_limit
10096                 && arg1 <= ts->info->start_stack) {
10097                 arg3 &= ~PROT_GROWSDOWN;
10098                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10099                 arg1 = ts->info->stack_limit;
10100             }
10101         }
10102         return get_errno(target_mprotect(arg1, arg2, arg3));
10103 #ifdef TARGET_NR_mremap
10104     case TARGET_NR_mremap:
10105         arg1 = cpu_untagged_addr(cpu, arg1);
10106         /* mremap new_addr (arg5) is always untagged */
10107         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10108 #endif
10109         /* ??? msync/mlock/munlock are broken for softmmu.  */
10110 #ifdef TARGET_NR_msync
10111     case TARGET_NR_msync:
10112         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10113 #endif
10114 #ifdef TARGET_NR_mlock
10115     case TARGET_NR_mlock:
10116         return get_errno(mlock(g2h(cpu, arg1), arg2));
10117 #endif
10118 #ifdef TARGET_NR_munlock
10119     case TARGET_NR_munlock:
10120         return get_errno(munlock(g2h(cpu, arg1), arg2));
10121 #endif
10122 #ifdef TARGET_NR_mlockall
10123     case TARGET_NR_mlockall:
10124         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10125 #endif
10126 #ifdef TARGET_NR_munlockall
10127     case TARGET_NR_munlockall:
10128         return get_errno(munlockall());
10129 #endif
10130 #ifdef TARGET_NR_truncate
10131     case TARGET_NR_truncate:
10132         if (!(p = lock_user_string(arg1)))
10133             return -TARGET_EFAULT;
10134         ret = get_errno(truncate(p, arg2));
10135         unlock_user(p, arg1, 0);
10136         return ret;
10137 #endif
10138 #ifdef TARGET_NR_ftruncate
10139     case TARGET_NR_ftruncate:
10140         return get_errno(ftruncate(arg1, arg2));
10141 #endif
10142     case TARGET_NR_fchmod:
10143         return get_errno(fchmod(arg1, arg2));
10144 #if defined(TARGET_NR_fchmodat)
10145     case TARGET_NR_fchmodat:
10146         if (!(p = lock_user_string(arg2)))
10147             return -TARGET_EFAULT;
10148         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10149         unlock_user(p, arg2, 0);
10150         return ret;
10151 #endif
10152     case TARGET_NR_getpriority:
10153         /* Note that negative values are valid for getpriority, so we must
10154            differentiate based on errno settings.  */
10155         errno = 0;
10156         ret = getpriority(arg1, arg2);
10157         if (ret == -1 && errno != 0) {
10158             return -host_to_target_errno(errno);
10159         }
10160 #ifdef TARGET_ALPHA
10161         /* Return value is the unbiased priority.  Signal no error.  */
10162         cpu_env->ir[IR_V0] = 0;
10163 #else
10164         /* Return value is a biased priority to avoid negative numbers.  */
10165         ret = 20 - ret;
10166 #endif
10167         return ret;
10168     case TARGET_NR_setpriority:
10169         return get_errno(setpriority(arg1, arg2, arg3));
10170 #ifdef TARGET_NR_statfs
10171     case TARGET_NR_statfs:
10172         if (!(p = lock_user_string(arg1))) {
10173             return -TARGET_EFAULT;
10174         }
10175         ret = get_errno(statfs(path(p), &stfs));
10176         unlock_user(p, arg1, 0);
10177     convert_statfs:
10178         if (!is_error(ret)) {
10179             struct target_statfs *target_stfs;
10180 
10181             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10182                 return -TARGET_EFAULT;
10183             __put_user(stfs.f_type, &target_stfs->f_type);
10184             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10185             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10186             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10187             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10188             __put_user(stfs.f_files, &target_stfs->f_files);
10189             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10190             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10191             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10192             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10193             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10194 #ifdef _STATFS_F_FLAGS
10195             __put_user(stfs.f_flags, &target_stfs->f_flags);
10196 #else
10197             __put_user(0, &target_stfs->f_flags);
10198 #endif
10199             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10200             unlock_user_struct(target_stfs, arg2, 1);
10201         }
10202         return ret;
10203 #endif
10204 #ifdef TARGET_NR_fstatfs
10205     case TARGET_NR_fstatfs:
10206         ret = get_errno(fstatfs(arg1, &stfs));
10207         goto convert_statfs;
10208 #endif
10209 #ifdef TARGET_NR_statfs64
10210     case TARGET_NR_statfs64:
10211         if (!(p = lock_user_string(arg1))) {
10212             return -TARGET_EFAULT;
10213         }
10214         ret = get_errno(statfs(path(p), &stfs));
10215         unlock_user(p, arg1, 0);
10216     convert_statfs64:
10217         if (!is_error(ret)) {
10218             struct target_statfs64 *target_stfs;
10219 
10220             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10221                 return -TARGET_EFAULT;
10222             __put_user(stfs.f_type, &target_stfs->f_type);
10223             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10224             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10225             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10226             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10227             __put_user(stfs.f_files, &target_stfs->f_files);
10228             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10229             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10230             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10231             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10232             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10233 #ifdef _STATFS_F_FLAGS
10234             __put_user(stfs.f_flags, &target_stfs->f_flags);
10235 #else
10236             __put_user(0, &target_stfs->f_flags);
10237 #endif
10238             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10239             unlock_user_struct(target_stfs, arg3, 1);
10240         }
10241         return ret;
10242     case TARGET_NR_fstatfs64:
10243         ret = get_errno(fstatfs(arg1, &stfs));
10244         goto convert_statfs64;
10245 #endif
10246 #ifdef TARGET_NR_socketcall
10247     case TARGET_NR_socketcall:
10248         return do_socketcall(arg1, arg2);
10249 #endif
10250 #ifdef TARGET_NR_accept
10251     case TARGET_NR_accept:
10252         return do_accept4(arg1, arg2, arg3, 0);
10253 #endif
10254 #ifdef TARGET_NR_accept4
10255     case TARGET_NR_accept4:
10256         return do_accept4(arg1, arg2, arg3, arg4);
10257 #endif
10258 #ifdef TARGET_NR_bind
10259     case TARGET_NR_bind:
10260         return do_bind(arg1, arg2, arg3);
10261 #endif
10262 #ifdef TARGET_NR_connect
10263     case TARGET_NR_connect:
10264         return do_connect(arg1, arg2, arg3);
10265 #endif
10266 #ifdef TARGET_NR_getpeername
10267     case TARGET_NR_getpeername:
10268         return do_getpeername(arg1, arg2, arg3);
10269 #endif
10270 #ifdef TARGET_NR_getsockname
10271     case TARGET_NR_getsockname:
10272         return do_getsockname(arg1, arg2, arg3);
10273 #endif
10274 #ifdef TARGET_NR_getsockopt
10275     case TARGET_NR_getsockopt:
10276         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10277 #endif
10278 #ifdef TARGET_NR_listen
10279     case TARGET_NR_listen:
10280         return get_errno(listen(arg1, arg2));
10281 #endif
10282 #ifdef TARGET_NR_recv
10283     case TARGET_NR_recv:
10284         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10285 #endif
10286 #ifdef TARGET_NR_recvfrom
10287     case TARGET_NR_recvfrom:
10288         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10289 #endif
10290 #ifdef TARGET_NR_recvmsg
10291     case TARGET_NR_recvmsg:
10292         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10293 #endif
10294 #ifdef TARGET_NR_send
10295     case TARGET_NR_send:
10296         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10297 #endif
10298 #ifdef TARGET_NR_sendmsg
10299     case TARGET_NR_sendmsg:
10300         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10301 #endif
10302 #ifdef TARGET_NR_sendmmsg
10303     case TARGET_NR_sendmmsg:
10304         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10305 #endif
10306 #ifdef TARGET_NR_recvmmsg
10307     case TARGET_NR_recvmmsg:
10308         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10309 #endif
10310 #ifdef TARGET_NR_sendto
10311     case TARGET_NR_sendto:
10312         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10313 #endif
10314 #ifdef TARGET_NR_shutdown
10315     case TARGET_NR_shutdown:
10316         return get_errno(shutdown(arg1, arg2));
10317 #endif
10318 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10319     case TARGET_NR_getrandom:
10320         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10321         if (!p) {
10322             return -TARGET_EFAULT;
10323         }
10324         ret = get_errno(getrandom(p, arg2, arg3));
10325         unlock_user(p, arg1, ret);
10326         return ret;
10327 #endif
10328 #ifdef TARGET_NR_socket
10329     case TARGET_NR_socket:
10330         return do_socket(arg1, arg2, arg3);
10331 #endif
10332 #ifdef TARGET_NR_socketpair
10333     case TARGET_NR_socketpair:
10334         return do_socketpair(arg1, arg2, arg3, arg4);
10335 #endif
10336 #ifdef TARGET_NR_setsockopt
10337     case TARGET_NR_setsockopt:
10338         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10339 #endif
10340 #if defined(TARGET_NR_syslog)
10341     case TARGET_NR_syslog:
10342         {
10343             int len = arg2;
10344 
10345             switch (arg1) {
10346             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10347             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10348             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10349             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10350             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10351             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10352             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10353             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10354                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10355             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10356             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10357             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10358                 {
10359                     if (len < 0) {
10360                         return -TARGET_EINVAL;
10361                     }
10362                     if (len == 0) {
10363                         return 0;
10364                     }
10365                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10366                     if (!p) {
10367                         return -TARGET_EFAULT;
10368                     }
10369                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10370                     unlock_user(p, arg2, arg3);
10371                 }
10372                 return ret;
10373             default:
10374                 return -TARGET_EINVAL;
10375             }
10376         }
10377         break;
10378 #endif
10379     case TARGET_NR_setitimer:
10380         {
10381             struct itimerval value, ovalue, *pvalue;
10382 
10383             if (arg2) {
10384                 pvalue = &value;
10385                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10386                     || copy_from_user_timeval(&pvalue->it_value,
10387                                               arg2 + sizeof(struct target_timeval)))
10388                     return -TARGET_EFAULT;
10389             } else {
10390                 pvalue = NULL;
10391             }
10392             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10393             if (!is_error(ret) && arg3) {
10394                 if (copy_to_user_timeval(arg3,
10395                                          &ovalue.it_interval)
10396                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10397                                             &ovalue.it_value))
10398                     return -TARGET_EFAULT;
10399             }
10400         }
10401         return ret;
10402     case TARGET_NR_getitimer:
10403         {
10404             struct itimerval value;
10405 
10406             ret = get_errno(getitimer(arg1, &value));
10407             if (!is_error(ret) && arg2) {
10408                 if (copy_to_user_timeval(arg2,
10409                                          &value.it_interval)
10410                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10411                                             &value.it_value))
10412                     return -TARGET_EFAULT;
10413             }
10414         }
10415         return ret;
10416 #ifdef TARGET_NR_stat
10417     case TARGET_NR_stat:
10418         if (!(p = lock_user_string(arg1))) {
10419             return -TARGET_EFAULT;
10420         }
10421         ret = get_errno(stat(path(p), &st));
10422         unlock_user(p, arg1, 0);
10423         goto do_stat;
10424 #endif
10425 #ifdef TARGET_NR_lstat
10426     case TARGET_NR_lstat:
10427         if (!(p = lock_user_string(arg1))) {
10428             return -TARGET_EFAULT;
10429         }
10430         ret = get_errno(lstat(path(p), &st));
10431         unlock_user(p, arg1, 0);
10432         goto do_stat;
10433 #endif
10434 #ifdef TARGET_NR_fstat
10435     case TARGET_NR_fstat:
10436         {
10437             ret = get_errno(fstat(arg1, &st));
10438 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10439         do_stat:
10440 #endif
10441             if (!is_error(ret)) {
10442                 struct target_stat *target_st;
10443 
10444                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10445                     return -TARGET_EFAULT;
10446                 memset(target_st, 0, sizeof(*target_st));
10447                 __put_user(st.st_dev, &target_st->st_dev);
10448                 __put_user(st.st_ino, &target_st->st_ino);
10449                 __put_user(st.st_mode, &target_st->st_mode);
10450                 __put_user(st.st_uid, &target_st->st_uid);
10451                 __put_user(st.st_gid, &target_st->st_gid);
10452                 __put_user(st.st_nlink, &target_st->st_nlink);
10453                 __put_user(st.st_rdev, &target_st->st_rdev);
10454                 __put_user(st.st_size, &target_st->st_size);
10455                 __put_user(st.st_blksize, &target_st->st_blksize);
10456                 __put_user(st.st_blocks, &target_st->st_blocks);
10457                 __put_user(st.st_atime, &target_st->target_st_atime);
10458                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10459                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10460 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10461                 __put_user(st.st_atim.tv_nsec,
10462                            &target_st->target_st_atime_nsec);
10463                 __put_user(st.st_mtim.tv_nsec,
10464                            &target_st->target_st_mtime_nsec);
10465                 __put_user(st.st_ctim.tv_nsec,
10466                            &target_st->target_st_ctime_nsec);
10467 #endif
10468                 unlock_user_struct(target_st, arg2, 1);
10469             }
10470         }
10471         return ret;
10472 #endif
10473     case TARGET_NR_vhangup:
10474         return get_errno(vhangup());
10475 #ifdef TARGET_NR_syscall
10476     case TARGET_NR_syscall:
10477         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10478                           arg6, arg7, arg8, 0);
10479 #endif
10480 #if defined(TARGET_NR_wait4)
10481     case TARGET_NR_wait4:
10482         {
10483             int status;
10484             abi_long status_ptr = arg2;
10485             struct rusage rusage, *rusage_ptr;
10486             abi_ulong target_rusage = arg4;
10487             abi_long rusage_err;
10488             if (target_rusage)
10489                 rusage_ptr = &rusage;
10490             else
10491                 rusage_ptr = NULL;
10492             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10493             if (!is_error(ret)) {
10494                 if (status_ptr && ret) {
10495                     status = host_to_target_waitstatus(status);
10496                     if (put_user_s32(status, status_ptr))
10497                         return -TARGET_EFAULT;
10498                 }
10499                 if (target_rusage) {
10500                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10501                     if (rusage_err) {
10502                         ret = rusage_err;
10503                     }
10504                 }
10505             }
10506         }
10507         return ret;
10508 #endif
10509 #ifdef TARGET_NR_swapoff
10510     case TARGET_NR_swapoff:
10511         if (!(p = lock_user_string(arg1)))
10512             return -TARGET_EFAULT;
10513         ret = get_errno(swapoff(p));
10514         unlock_user(p, arg1, 0);
10515         return ret;
10516 #endif
10517     case TARGET_NR_sysinfo:
10518         {
10519             struct target_sysinfo *target_value;
10520             struct sysinfo value;
10521             ret = get_errno(sysinfo(&value));
10522             if (!is_error(ret) && arg1)
10523             {
10524                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10525                     return -TARGET_EFAULT;
10526                 __put_user(value.uptime, &target_value->uptime);
10527                 __put_user(value.loads[0], &target_value->loads[0]);
10528                 __put_user(value.loads[1], &target_value->loads[1]);
10529                 __put_user(value.loads[2], &target_value->loads[2]);
10530                 __put_user(value.totalram, &target_value->totalram);
10531                 __put_user(value.freeram, &target_value->freeram);
10532                 __put_user(value.sharedram, &target_value->sharedram);
10533                 __put_user(value.bufferram, &target_value->bufferram);
10534                 __put_user(value.totalswap, &target_value->totalswap);
10535                 __put_user(value.freeswap, &target_value->freeswap);
10536                 __put_user(value.procs, &target_value->procs);
10537                 __put_user(value.totalhigh, &target_value->totalhigh);
10538                 __put_user(value.freehigh, &target_value->freehigh);
10539                 __put_user(value.mem_unit, &target_value->mem_unit);
10540                 unlock_user_struct(target_value, arg1, 1);
10541             }
10542         }
10543         return ret;
10544 #ifdef TARGET_NR_ipc
10545     case TARGET_NR_ipc:
10546         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10547 #endif
10548 #ifdef TARGET_NR_semget
10549     case TARGET_NR_semget:
10550         return get_errno(semget(arg1, arg2, arg3));
10551 #endif
10552 #ifdef TARGET_NR_semop
10553     case TARGET_NR_semop:
10554         return do_semtimedop(arg1, arg2, arg3, 0, false);
10555 #endif
10556 #ifdef TARGET_NR_semtimedop
10557     case TARGET_NR_semtimedop:
10558         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10559 #endif
10560 #ifdef TARGET_NR_semtimedop_time64
10561     case TARGET_NR_semtimedop_time64:
10562         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10563 #endif
10564 #ifdef TARGET_NR_semctl
10565     case TARGET_NR_semctl:
10566         return do_semctl(arg1, arg2, arg3, arg4);
10567 #endif
10568 #ifdef TARGET_NR_msgctl
10569     case TARGET_NR_msgctl:
10570         return do_msgctl(arg1, arg2, arg3);
10571 #endif
10572 #ifdef TARGET_NR_msgget
10573     case TARGET_NR_msgget:
10574         return get_errno(msgget(arg1, arg2));
10575 #endif
10576 #ifdef TARGET_NR_msgrcv
10577     case TARGET_NR_msgrcv:
10578         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10579 #endif
10580 #ifdef TARGET_NR_msgsnd
10581     case TARGET_NR_msgsnd:
10582         return do_msgsnd(arg1, arg2, arg3, arg4);
10583 #endif
10584 #ifdef TARGET_NR_shmget
10585     case TARGET_NR_shmget:
10586         return get_errno(shmget(arg1, arg2, arg3));
10587 #endif
10588 #ifdef TARGET_NR_shmctl
10589     case TARGET_NR_shmctl:
10590         return do_shmctl(arg1, arg2, arg3);
10591 #endif
10592 #ifdef TARGET_NR_shmat
10593     case TARGET_NR_shmat:
10594         return do_shmat(cpu_env, arg1, arg2, arg3);
10595 #endif
10596 #ifdef TARGET_NR_shmdt
10597     case TARGET_NR_shmdt:
10598         return do_shmdt(arg1);
10599 #endif
10600     case TARGET_NR_fsync:
10601         return get_errno(fsync(arg1));
10602     case TARGET_NR_clone:
10603         /* Linux manages to have three different orderings for its
10604          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10605          * match the kernel's CONFIG_CLONE_* settings.
10606          * Microblaze is further special in that it uses a sixth
10607          * implicit argument to clone for the TLS pointer.
10608          */
10609 #if defined(TARGET_MICROBLAZE)
10610         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10611 #elif defined(TARGET_CLONE_BACKWARDS)
10612         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10613 #elif defined(TARGET_CLONE_BACKWARDS2)
10614         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10615 #else
10616         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10617 #endif
10618         return ret;
10619 #ifdef __NR_exit_group
10620         /* new thread calls */
10621     case TARGET_NR_exit_group:
10622         preexit_cleanup(cpu_env, arg1);
10623         return get_errno(exit_group(arg1));
10624 #endif
10625     case TARGET_NR_setdomainname:
10626         if (!(p = lock_user_string(arg1)))
10627             return -TARGET_EFAULT;
10628         ret = get_errno(setdomainname(p, arg2));
10629         unlock_user(p, arg1, 0);
10630         return ret;
10631     case TARGET_NR_uname:
10632         /* no need to transcode because we use the linux syscall */
10633         {
10634             struct new_utsname * buf;
10635 
10636             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10637                 return -TARGET_EFAULT;
10638             ret = get_errno(sys_uname(buf));
10639             if (!is_error(ret)) {
10640                 /* Overwrite the native machine name with whatever is being
10641                    emulated. */
10642                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10643                           sizeof(buf->machine));
10644                 /* Allow the user to override the reported release.  */
10645                 if (qemu_uname_release && *qemu_uname_release) {
10646                     g_strlcpy(buf->release, qemu_uname_release,
10647                               sizeof(buf->release));
10648                 }
10649             }
10650             unlock_user_struct(buf, arg1, 1);
10651         }
10652         return ret;
10653 #ifdef TARGET_I386
10654     case TARGET_NR_modify_ldt:
10655         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10656 #if !defined(TARGET_X86_64)
10657     case TARGET_NR_vm86:
10658         return do_vm86(cpu_env, arg1, arg2);
10659 #endif
10660 #endif
10661 #if defined(TARGET_NR_adjtimex)
10662     case TARGET_NR_adjtimex:
10663         {
10664             struct timex host_buf;
10665 
10666             if (target_to_host_timex(&host_buf, arg1) != 0) {
10667                 return -TARGET_EFAULT;
10668             }
10669             ret = get_errno(adjtimex(&host_buf));
10670             if (!is_error(ret)) {
10671                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10672                     return -TARGET_EFAULT;
10673                 }
10674             }
10675         }
10676         return ret;
10677 #endif
10678 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10679     case TARGET_NR_clock_adjtime:
10680         {
10681             struct timex htx, *phtx = &htx;
10682 
10683             if (target_to_host_timex(phtx, arg2) != 0) {
10684                 return -TARGET_EFAULT;
10685             }
10686             ret = get_errno(clock_adjtime(arg1, phtx));
10687             if (!is_error(ret) && phtx) {
10688                 if (host_to_target_timex(arg2, phtx) != 0) {
10689                     return -TARGET_EFAULT;
10690                 }
10691             }
10692         }
10693         return ret;
10694 #endif
10695 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10696     case TARGET_NR_clock_adjtime64:
10697         {
10698             struct timex htx;
10699 
10700             if (target_to_host_timex64(&htx, arg2) != 0) {
10701                 return -TARGET_EFAULT;
10702             }
10703             ret = get_errno(clock_adjtime(arg1, &htx));
10704             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10705                     return -TARGET_EFAULT;
10706             }
10707         }
10708         return ret;
10709 #endif
10710     case TARGET_NR_getpgid:
10711         return get_errno(getpgid(arg1));
10712     case TARGET_NR_fchdir:
10713         return get_errno(fchdir(arg1));
10714     case TARGET_NR_personality:
10715         return get_errno(personality(arg1));
10716 #ifdef TARGET_NR__llseek /* Not on alpha */
10717     case TARGET_NR__llseek:
10718         {
10719             int64_t res;
10720 #if !defined(__NR_llseek)
10721             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10722             if (res == -1) {
10723                 ret = get_errno(res);
10724             } else {
10725                 ret = 0;
10726             }
10727 #else
10728             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10729 #endif
10730             if ((ret == 0) && put_user_s64(res, arg4)) {
10731                 return -TARGET_EFAULT;
10732             }
10733         }
10734         return ret;
10735 #endif
10736 #ifdef TARGET_NR_getdents
10737     case TARGET_NR_getdents:
10738         return do_getdents(arg1, arg2, arg3);
10739 #endif /* TARGET_NR_getdents */
10740 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10741     case TARGET_NR_getdents64:
10742         return do_getdents64(arg1, arg2, arg3);
10743 #endif /* TARGET_NR_getdents64 */
10744 #if defined(TARGET_NR__newselect)
10745     case TARGET_NR__newselect:
10746         return do_select(arg1, arg2, arg3, arg4, arg5);
10747 #endif
10748 #ifdef TARGET_NR_poll
10749     case TARGET_NR_poll:
10750         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10751 #endif
10752 #ifdef TARGET_NR_ppoll
10753     case TARGET_NR_ppoll:
10754         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10755 #endif
10756 #ifdef TARGET_NR_ppoll_time64
10757     case TARGET_NR_ppoll_time64:
10758         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10759 #endif
10760     case TARGET_NR_flock:
10761         /* NOTE: the flock constant seems to be the same for every
10762            Linux platform */
10763         return get_errno(safe_flock(arg1, arg2));
10764     case TARGET_NR_readv:
10765         {
10766             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10767             if (vec != NULL) {
10768                 ret = get_errno(safe_readv(arg1, vec, arg3));
10769                 unlock_iovec(vec, arg2, arg3, 1);
10770             } else {
10771                 ret = -host_to_target_errno(errno);
10772             }
10773         }
10774         return ret;
10775     case TARGET_NR_writev:
10776         {
10777             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10778             if (vec != NULL) {
10779                 ret = get_errno(safe_writev(arg1, vec, arg3));
10780                 unlock_iovec(vec, arg2, arg3, 0);
10781             } else {
10782                 ret = -host_to_target_errno(errno);
10783             }
10784         }
10785         return ret;
10786 #if defined(TARGET_NR_preadv)
10787     case TARGET_NR_preadv:
10788         {
10789             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10790             if (vec != NULL) {
10791                 unsigned long low, high;
10792 
10793                 target_to_host_low_high(arg4, arg5, &low, &high);
10794                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10795                 unlock_iovec(vec, arg2, arg3, 1);
10796             } else {
10797                 ret = -host_to_target_errno(errno);
10798            }
10799         }
10800         return ret;
10801 #endif
10802 #if defined(TARGET_NR_pwritev)
10803     case TARGET_NR_pwritev:
10804         {
10805             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10806             if (vec != NULL) {
10807                 unsigned long low, high;
10808 
10809                 target_to_host_low_high(arg4, arg5, &low, &high);
10810                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10811                 unlock_iovec(vec, arg2, arg3, 0);
10812             } else {
10813                 ret = -host_to_target_errno(errno);
10814            }
10815         }
10816         return ret;
10817 #endif
10818     case TARGET_NR_getsid:
10819         return get_errno(getsid(arg1));
10820 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10821     case TARGET_NR_fdatasync:
10822         return get_errno(fdatasync(arg1));
10823 #endif
10824     case TARGET_NR_sched_getaffinity:
10825         {
10826             unsigned int mask_size;
10827             unsigned long *mask;
10828 
10829             /*
10830              * sched_getaffinity needs multiples of ulong, so need to take
10831              * care of mismatches between target ulong and host ulong sizes.
10832              */
10833             if (arg2 & (sizeof(abi_ulong) - 1)) {
10834                 return -TARGET_EINVAL;
10835             }
10836             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10837 
10838             mask = alloca(mask_size);
10839             memset(mask, 0, mask_size);
10840             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10841 
10842             if (!is_error(ret)) {
10843                 if (ret > arg2) {
10844                     /* More data returned than the caller's buffer will fit.
10845                      * This only happens if sizeof(abi_long) < sizeof(long)
10846                      * and the caller passed us a buffer holding an odd number
10847                      * of abi_longs. If the host kernel is actually using the
10848                      * extra 4 bytes then fail EINVAL; otherwise we can just
10849                      * ignore them and only copy the interesting part.
10850                      */
10851                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10852                     if (numcpus > arg2 * 8) {
10853                         return -TARGET_EINVAL;
10854                     }
10855                     ret = arg2;
10856                 }
10857 
10858                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10859                     return -TARGET_EFAULT;
10860                 }
10861             }
10862         }
10863         return ret;
10864     case TARGET_NR_sched_setaffinity:
10865         {
10866             unsigned int mask_size;
10867             unsigned long *mask;
10868 
10869             /*
10870              * sched_setaffinity needs multiples of ulong, so need to take
10871              * care of mismatches between target ulong and host ulong sizes.
10872              */
10873             if (arg2 & (sizeof(abi_ulong) - 1)) {
10874                 return -TARGET_EINVAL;
10875             }
10876             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10877             mask = alloca(mask_size);
10878 
10879             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10880             if (ret) {
10881                 return ret;
10882             }
10883 
10884             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10885         }
10886     case TARGET_NR_getcpu:
10887         {
10888             unsigned cpu, node;
10889             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10890                                        arg2 ? &node : NULL,
10891                                        NULL));
10892             if (is_error(ret)) {
10893                 return ret;
10894             }
10895             if (arg1 && put_user_u32(cpu, arg1)) {
10896                 return -TARGET_EFAULT;
10897             }
10898             if (arg2 && put_user_u32(node, arg2)) {
10899                 return -TARGET_EFAULT;
10900             }
10901         }
10902         return ret;
10903     case TARGET_NR_sched_setparam:
10904         {
10905             struct target_sched_param *target_schp;
10906             struct sched_param schp;
10907 
10908             if (arg2 == 0) {
10909                 return -TARGET_EINVAL;
10910             }
10911             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10912                 return -TARGET_EFAULT;
10913             }
10914             schp.sched_priority = tswap32(target_schp->sched_priority);
10915             unlock_user_struct(target_schp, arg2, 0);
10916             return get_errno(sys_sched_setparam(arg1, &schp));
10917         }
10918     case TARGET_NR_sched_getparam:
10919         {
10920             struct target_sched_param *target_schp;
10921             struct sched_param schp;
10922 
10923             if (arg2 == 0) {
10924                 return -TARGET_EINVAL;
10925             }
10926             ret = get_errno(sys_sched_getparam(arg1, &schp));
10927             if (!is_error(ret)) {
10928                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10929                     return -TARGET_EFAULT;
10930                 }
10931                 target_schp->sched_priority = tswap32(schp.sched_priority);
10932                 unlock_user_struct(target_schp, arg2, 1);
10933             }
10934         }
10935         return ret;
10936     case TARGET_NR_sched_setscheduler:
10937         {
10938             struct target_sched_param *target_schp;
10939             struct sched_param schp;
10940             if (arg3 == 0) {
10941                 return -TARGET_EINVAL;
10942             }
10943             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10944                 return -TARGET_EFAULT;
10945             }
10946             schp.sched_priority = tswap32(target_schp->sched_priority);
10947             unlock_user_struct(target_schp, arg3, 0);
10948             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10949         }
10950     case TARGET_NR_sched_getscheduler:
10951         return get_errno(sys_sched_getscheduler(arg1));
10952     case TARGET_NR_sched_getattr:
10953         {
10954             struct target_sched_attr *target_scha;
10955             struct sched_attr scha;
10956             if (arg2 == 0) {
10957                 return -TARGET_EINVAL;
10958             }
10959             if (arg3 > sizeof(scha)) {
10960                 arg3 = sizeof(scha);
10961             }
10962             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10963             if (!is_error(ret)) {
10964                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10965                 if (!target_scha) {
10966                     return -TARGET_EFAULT;
10967                 }
10968                 target_scha->size = tswap32(scha.size);
10969                 target_scha->sched_policy = tswap32(scha.sched_policy);
10970                 target_scha->sched_flags = tswap64(scha.sched_flags);
10971                 target_scha->sched_nice = tswap32(scha.sched_nice);
10972                 target_scha->sched_priority = tswap32(scha.sched_priority);
10973                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10974                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10975                 target_scha->sched_period = tswap64(scha.sched_period);
10976                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10977                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10978                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10979                 }
10980                 unlock_user(target_scha, arg2, arg3);
10981             }
10982             return ret;
10983         }
10984     case TARGET_NR_sched_setattr:
10985         {
10986             struct target_sched_attr *target_scha;
10987             struct sched_attr scha;
10988             uint32_t size;
10989             int zeroed;
10990             if (arg2 == 0) {
10991                 return -TARGET_EINVAL;
10992             }
10993             if (get_user_u32(size, arg2)) {
10994                 return -TARGET_EFAULT;
10995             }
10996             if (!size) {
10997                 size = offsetof(struct target_sched_attr, sched_util_min);
10998             }
10999             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11000                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11001                     return -TARGET_EFAULT;
11002                 }
11003                 return -TARGET_E2BIG;
11004             }
11005 
11006             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11007             if (zeroed < 0) {
11008                 return zeroed;
11009             } else if (zeroed == 0) {
11010                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11011                     return -TARGET_EFAULT;
11012                 }
11013                 return -TARGET_E2BIG;
11014             }
11015             if (size > sizeof(struct target_sched_attr)) {
11016                 size = sizeof(struct target_sched_attr);
11017             }
11018 
11019             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11020             if (!target_scha) {
11021                 return -TARGET_EFAULT;
11022             }
11023             scha.size = size;
11024             scha.sched_policy = tswap32(target_scha->sched_policy);
11025             scha.sched_flags = tswap64(target_scha->sched_flags);
11026             scha.sched_nice = tswap32(target_scha->sched_nice);
11027             scha.sched_priority = tswap32(target_scha->sched_priority);
11028             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11029             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11030             scha.sched_period = tswap64(target_scha->sched_period);
11031             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11032                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11033                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11034             }
11035             unlock_user(target_scha, arg2, 0);
11036             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11037         }
11038     case TARGET_NR_sched_yield:
11039         return get_errno(sched_yield());
11040     case TARGET_NR_sched_get_priority_max:
11041         return get_errno(sched_get_priority_max(arg1));
11042     case TARGET_NR_sched_get_priority_min:
11043         return get_errno(sched_get_priority_min(arg1));
11044 #ifdef TARGET_NR_sched_rr_get_interval
11045     case TARGET_NR_sched_rr_get_interval:
11046         {
11047             struct timespec ts;
11048             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11049             if (!is_error(ret)) {
11050                 ret = host_to_target_timespec(arg2, &ts);
11051             }
11052         }
11053         return ret;
11054 #endif
11055 #ifdef TARGET_NR_sched_rr_get_interval_time64
11056     case TARGET_NR_sched_rr_get_interval_time64:
11057         {
11058             struct timespec ts;
11059             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11060             if (!is_error(ret)) {
11061                 ret = host_to_target_timespec64(arg2, &ts);
11062             }
11063         }
11064         return ret;
11065 #endif
11066 #if defined(TARGET_NR_nanosleep)
11067     case TARGET_NR_nanosleep:
11068         {
11069             struct timespec req, rem;
11070             target_to_host_timespec(&req, arg1);
11071             ret = get_errno(safe_nanosleep(&req, &rem));
11072             if (is_error(ret) && arg2) {
11073                 host_to_target_timespec(arg2, &rem);
11074             }
11075         }
11076         return ret;
11077 #endif
11078     case TARGET_NR_prctl:
11079         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11080         break;
11081 #ifdef TARGET_NR_arch_prctl
11082     case TARGET_NR_arch_prctl:
11083         return do_arch_prctl(cpu_env, arg1, arg2);
11084 #endif
11085 #ifdef TARGET_NR_pread64
11086     case TARGET_NR_pread64:
11087         if (regpairs_aligned(cpu_env, num)) {
11088             arg4 = arg5;
11089             arg5 = arg6;
11090         }
11091         if (arg2 == 0 && arg3 == 0) {
11092             /* Special-case NULL buffer and zero length, which should succeed */
11093             p = 0;
11094         } else {
11095             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11096             if (!p) {
11097                 return -TARGET_EFAULT;
11098             }
11099         }
11100         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11101         unlock_user(p, arg2, ret);
11102         return ret;
11103     case TARGET_NR_pwrite64:
11104         if (regpairs_aligned(cpu_env, num)) {
11105             arg4 = arg5;
11106             arg5 = arg6;
11107         }
11108         if (arg2 == 0 && arg3 == 0) {
11109             /* Special-case NULL buffer and zero length, which should succeed */
11110             p = 0;
11111         } else {
11112             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11113             if (!p) {
11114                 return -TARGET_EFAULT;
11115             }
11116         }
11117         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11118         unlock_user(p, arg2, 0);
11119         return ret;
11120 #endif
11121     case TARGET_NR_getcwd:
11122         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11123             return -TARGET_EFAULT;
11124         ret = get_errno(sys_getcwd1(p, arg2));
11125         unlock_user(p, arg1, ret);
11126         return ret;
11127     case TARGET_NR_capget:
11128     case TARGET_NR_capset:
11129     {
11130         struct target_user_cap_header *target_header;
11131         struct target_user_cap_data *target_data = NULL;
11132         struct __user_cap_header_struct header;
11133         struct __user_cap_data_struct data[2];
11134         struct __user_cap_data_struct *dataptr = NULL;
11135         int i, target_datalen;
11136         int data_items = 1;
11137 
11138         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11139             return -TARGET_EFAULT;
11140         }
11141         header.version = tswap32(target_header->version);
11142         header.pid = tswap32(target_header->pid);
11143 
11144         if (header.version != _LINUX_CAPABILITY_VERSION) {
11145             /* Version 2 and up takes pointer to two user_data structs */
11146             data_items = 2;
11147         }
11148 
11149         target_datalen = sizeof(*target_data) * data_items;
11150 
11151         if (arg2) {
11152             if (num == TARGET_NR_capget) {
11153                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11154             } else {
11155                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11156             }
11157             if (!target_data) {
11158                 unlock_user_struct(target_header, arg1, 0);
11159                 return -TARGET_EFAULT;
11160             }
11161 
11162             if (num == TARGET_NR_capset) {
11163                 for (i = 0; i < data_items; i++) {
11164                     data[i].effective = tswap32(target_data[i].effective);
11165                     data[i].permitted = tswap32(target_data[i].permitted);
11166                     data[i].inheritable = tswap32(target_data[i].inheritable);
11167                 }
11168             }
11169 
11170             dataptr = data;
11171         }
11172 
11173         if (num == TARGET_NR_capget) {
11174             ret = get_errno(capget(&header, dataptr));
11175         } else {
11176             ret = get_errno(capset(&header, dataptr));
11177         }
11178 
11179         /* The kernel always updates version for both capget and capset */
11180         target_header->version = tswap32(header.version);
11181         unlock_user_struct(target_header, arg1, 1);
11182 
11183         if (arg2) {
11184             if (num == TARGET_NR_capget) {
11185                 for (i = 0; i < data_items; i++) {
11186                     target_data[i].effective = tswap32(data[i].effective);
11187                     target_data[i].permitted = tswap32(data[i].permitted);
11188                     target_data[i].inheritable = tswap32(data[i].inheritable);
11189                 }
11190                 unlock_user(target_data, arg2, target_datalen);
11191             } else {
11192                 unlock_user(target_data, arg2, 0);
11193             }
11194         }
11195         return ret;
11196     }
11197     case TARGET_NR_sigaltstack:
11198         return do_sigaltstack(arg1, arg2, cpu_env);
11199 
11200 #ifdef CONFIG_SENDFILE
11201 #ifdef TARGET_NR_sendfile
11202     case TARGET_NR_sendfile:
11203     {
11204         off_t *offp = NULL;
11205         off_t off;
11206         if (arg3) {
11207             ret = get_user_sal(off, arg3);
11208             if (is_error(ret)) {
11209                 return ret;
11210             }
11211             offp = &off;
11212         }
11213         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11214         if (!is_error(ret) && arg3) {
11215             abi_long ret2 = put_user_sal(off, arg3);
11216             if (is_error(ret2)) {
11217                 ret = ret2;
11218             }
11219         }
11220         return ret;
11221     }
11222 #endif
11223 #ifdef TARGET_NR_sendfile64
11224     case TARGET_NR_sendfile64:
11225     {
11226         off_t *offp = NULL;
11227         off_t off;
11228         if (arg3) {
11229             ret = get_user_s64(off, arg3);
11230             if (is_error(ret)) {
11231                 return ret;
11232             }
11233             offp = &off;
11234         }
11235         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11236         if (!is_error(ret) && arg3) {
11237             abi_long ret2 = put_user_s64(off, arg3);
11238             if (is_error(ret2)) {
11239                 ret = ret2;
11240             }
11241         }
11242         return ret;
11243     }
11244 #endif
11245 #endif
11246 #ifdef TARGET_NR_vfork
11247     case TARGET_NR_vfork:
11248         return get_errno(do_fork(cpu_env,
11249                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11250                          0, 0, 0, 0));
11251 #endif
11252 #ifdef TARGET_NR_ugetrlimit
11253     case TARGET_NR_ugetrlimit:
11254     {
11255 	struct rlimit rlim;
11256 	int resource = target_to_host_resource(arg1);
11257 	ret = get_errno(getrlimit(resource, &rlim));
11258 	if (!is_error(ret)) {
11259 	    struct target_rlimit *target_rlim;
11260             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11261                 return -TARGET_EFAULT;
11262 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11263 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11264             unlock_user_struct(target_rlim, arg2, 1);
11265 	}
11266         return ret;
11267     }
11268 #endif
11269 #ifdef TARGET_NR_truncate64
11270     case TARGET_NR_truncate64:
11271         if (!(p = lock_user_string(arg1)))
11272             return -TARGET_EFAULT;
11273 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11274         unlock_user(p, arg1, 0);
11275         return ret;
11276 #endif
11277 #ifdef TARGET_NR_ftruncate64
11278     case TARGET_NR_ftruncate64:
11279         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11280 #endif
11281 #ifdef TARGET_NR_stat64
11282     case TARGET_NR_stat64:
11283         if (!(p = lock_user_string(arg1))) {
11284             return -TARGET_EFAULT;
11285         }
11286         ret = get_errno(stat(path(p), &st));
11287         unlock_user(p, arg1, 0);
11288         if (!is_error(ret))
11289             ret = host_to_target_stat64(cpu_env, arg2, &st);
11290         return ret;
11291 #endif
11292 #ifdef TARGET_NR_lstat64
11293     case TARGET_NR_lstat64:
11294         if (!(p = lock_user_string(arg1))) {
11295             return -TARGET_EFAULT;
11296         }
11297         ret = get_errno(lstat(path(p), &st));
11298         unlock_user(p, arg1, 0);
11299         if (!is_error(ret))
11300             ret = host_to_target_stat64(cpu_env, arg2, &st);
11301         return ret;
11302 #endif
11303 #ifdef TARGET_NR_fstat64
11304     case TARGET_NR_fstat64:
11305         ret = get_errno(fstat(arg1, &st));
11306         if (!is_error(ret))
11307             ret = host_to_target_stat64(cpu_env, arg2, &st);
11308         return ret;
11309 #endif
11310 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11311 #ifdef TARGET_NR_fstatat64
11312     case TARGET_NR_fstatat64:
11313 #endif
11314 #ifdef TARGET_NR_newfstatat
11315     case TARGET_NR_newfstatat:
11316 #endif
11317         if (!(p = lock_user_string(arg2))) {
11318             return -TARGET_EFAULT;
11319         }
11320         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11321         unlock_user(p, arg2, 0);
11322         if (!is_error(ret))
11323             ret = host_to_target_stat64(cpu_env, arg3, &st);
11324         return ret;
11325 #endif
11326 #if defined(TARGET_NR_statx)
11327     case TARGET_NR_statx:
11328         {
11329             struct target_statx *target_stx;
11330             int dirfd = arg1;
11331             int flags = arg3;
11332 
11333             p = lock_user_string(arg2);
11334             if (p == NULL) {
11335                 return -TARGET_EFAULT;
11336             }
11337 #if defined(__NR_statx)
11338             {
11339                 /*
11340                  * It is assumed that struct statx is architecture independent.
11341                  */
11342                 struct target_statx host_stx;
11343                 int mask = arg4;
11344 
11345                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11346                 if (!is_error(ret)) {
11347                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11348                         unlock_user(p, arg2, 0);
11349                         return -TARGET_EFAULT;
11350                     }
11351                 }
11352 
11353                 if (ret != -TARGET_ENOSYS) {
11354                     unlock_user(p, arg2, 0);
11355                     return ret;
11356                 }
11357             }
11358 #endif
11359             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11360             unlock_user(p, arg2, 0);
11361 
11362             if (!is_error(ret)) {
11363                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11364                     return -TARGET_EFAULT;
11365                 }
11366                 memset(target_stx, 0, sizeof(*target_stx));
11367                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11368                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11369                 __put_user(st.st_ino, &target_stx->stx_ino);
11370                 __put_user(st.st_mode, &target_stx->stx_mode);
11371                 __put_user(st.st_uid, &target_stx->stx_uid);
11372                 __put_user(st.st_gid, &target_stx->stx_gid);
11373                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11374                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11375                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11376                 __put_user(st.st_size, &target_stx->stx_size);
11377                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11378                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11379                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11380                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11381                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11382                 unlock_user_struct(target_stx, arg5, 1);
11383             }
11384         }
11385         return ret;
11386 #endif
11387 #ifdef TARGET_NR_lchown
11388     case TARGET_NR_lchown:
11389         if (!(p = lock_user_string(arg1)))
11390             return -TARGET_EFAULT;
11391         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11392         unlock_user(p, arg1, 0);
11393         return ret;
11394 #endif
11395 #ifdef TARGET_NR_getuid
11396     case TARGET_NR_getuid:
11397         return get_errno(high2lowuid(getuid()));
11398 #endif
11399 #ifdef TARGET_NR_getgid
11400     case TARGET_NR_getgid:
11401         return get_errno(high2lowgid(getgid()));
11402 #endif
11403 #ifdef TARGET_NR_geteuid
11404     case TARGET_NR_geteuid:
11405         return get_errno(high2lowuid(geteuid()));
11406 #endif
11407 #ifdef TARGET_NR_getegid
11408     case TARGET_NR_getegid:
11409         return get_errno(high2lowgid(getegid()));
11410 #endif
11411     case TARGET_NR_setreuid:
11412         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11413     case TARGET_NR_setregid:
11414         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11415     case TARGET_NR_getgroups:
11416         {
11417             int gidsetsize = arg1;
11418             target_id *target_grouplist;
11419             gid_t *grouplist;
11420             int i;
11421 
11422             grouplist = alloca(gidsetsize * sizeof(gid_t));
11423             ret = get_errno(getgroups(gidsetsize, grouplist));
11424             if (gidsetsize == 0)
11425                 return ret;
11426             if (!is_error(ret)) {
11427                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11428                 if (!target_grouplist)
11429                     return -TARGET_EFAULT;
11430                 for(i = 0;i < ret; i++)
11431                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11432                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11433             }
11434         }
11435         return ret;
11436     case TARGET_NR_setgroups:
11437         {
11438             int gidsetsize = arg1;
11439             target_id *target_grouplist;
11440             gid_t *grouplist = NULL;
11441             int i;
11442             if (gidsetsize) {
11443                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11444                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11445                 if (!target_grouplist) {
11446                     return -TARGET_EFAULT;
11447                 }
11448                 for (i = 0; i < gidsetsize; i++) {
11449                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11450                 }
11451                 unlock_user(target_grouplist, arg2, 0);
11452             }
11453             return get_errno(setgroups(gidsetsize, grouplist));
11454         }
11455     case TARGET_NR_fchown:
11456         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11457 #if defined(TARGET_NR_fchownat)
11458     case TARGET_NR_fchownat:
11459         if (!(p = lock_user_string(arg2)))
11460             return -TARGET_EFAULT;
11461         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11462                                  low2highgid(arg4), arg5));
11463         unlock_user(p, arg2, 0);
11464         return ret;
11465 #endif
11466 #ifdef TARGET_NR_setresuid
11467     case TARGET_NR_setresuid:
11468         return get_errno(sys_setresuid(low2highuid(arg1),
11469                                        low2highuid(arg2),
11470                                        low2highuid(arg3)));
11471 #endif
11472 #ifdef TARGET_NR_getresuid
11473     case TARGET_NR_getresuid:
11474         {
11475             uid_t ruid, euid, suid;
11476             ret = get_errno(getresuid(&ruid, &euid, &suid));
11477             if (!is_error(ret)) {
11478                 if (put_user_id(high2lowuid(ruid), arg1)
11479                     || put_user_id(high2lowuid(euid), arg2)
11480                     || put_user_id(high2lowuid(suid), arg3))
11481                     return -TARGET_EFAULT;
11482             }
11483         }
11484         return ret;
11485 #endif
11486 #ifdef TARGET_NR_getresgid
11487     case TARGET_NR_setresgid:
11488         return get_errno(sys_setresgid(low2highgid(arg1),
11489                                        low2highgid(arg2),
11490                                        low2highgid(arg3)));
11491 #endif
11492 #ifdef TARGET_NR_getresgid
11493     case TARGET_NR_getresgid:
11494         {
11495             gid_t rgid, egid, sgid;
11496             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11497             if (!is_error(ret)) {
11498                 if (put_user_id(high2lowgid(rgid), arg1)
11499                     || put_user_id(high2lowgid(egid), arg2)
11500                     || put_user_id(high2lowgid(sgid), arg3))
11501                     return -TARGET_EFAULT;
11502             }
11503         }
11504         return ret;
11505 #endif
11506 #ifdef TARGET_NR_chown
11507     case TARGET_NR_chown:
11508         if (!(p = lock_user_string(arg1)))
11509             return -TARGET_EFAULT;
11510         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11511         unlock_user(p, arg1, 0);
11512         return ret;
11513 #endif
11514     case TARGET_NR_setuid:
11515         return get_errno(sys_setuid(low2highuid(arg1)));
11516     case TARGET_NR_setgid:
11517         return get_errno(sys_setgid(low2highgid(arg1)));
11518     case TARGET_NR_setfsuid:
11519         return get_errno(setfsuid(arg1));
11520     case TARGET_NR_setfsgid:
11521         return get_errno(setfsgid(arg1));
11522 
11523 #ifdef TARGET_NR_lchown32
11524     case TARGET_NR_lchown32:
11525         if (!(p = lock_user_string(arg1)))
11526             return -TARGET_EFAULT;
11527         ret = get_errno(lchown(p, arg2, arg3));
11528         unlock_user(p, arg1, 0);
11529         return ret;
11530 #endif
11531 #ifdef TARGET_NR_getuid32
11532     case TARGET_NR_getuid32:
11533         return get_errno(getuid());
11534 #endif
11535 
11536 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11537    /* Alpha specific */
11538     case TARGET_NR_getxuid:
11539          {
11540             uid_t euid;
11541             euid=geteuid();
11542             cpu_env->ir[IR_A4]=euid;
11543          }
11544         return get_errno(getuid());
11545 #endif
11546 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11547    /* Alpha specific */
11548     case TARGET_NR_getxgid:
11549          {
11550             uid_t egid;
11551             egid=getegid();
11552             cpu_env->ir[IR_A4]=egid;
11553          }
11554         return get_errno(getgid());
11555 #endif
11556 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11557     /* Alpha specific */
11558     case TARGET_NR_osf_getsysinfo:
11559         ret = -TARGET_EOPNOTSUPP;
11560         switch (arg1) {
11561           case TARGET_GSI_IEEE_FP_CONTROL:
11562             {
11563                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11564                 uint64_t swcr = cpu_env->swcr;
11565 
11566                 swcr &= ~SWCR_STATUS_MASK;
11567                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11568 
11569                 if (put_user_u64 (swcr, arg2))
11570                         return -TARGET_EFAULT;
11571                 ret = 0;
11572             }
11573             break;
11574 
11575           /* case GSI_IEEE_STATE_AT_SIGNAL:
11576              -- Not implemented in linux kernel.
11577              case GSI_UACPROC:
11578              -- Retrieves current unaligned access state; not much used.
11579              case GSI_PROC_TYPE:
11580              -- Retrieves implver information; surely not used.
11581              case GSI_GET_HWRPB:
11582              -- Grabs a copy of the HWRPB; surely not used.
11583           */
11584         }
11585         return ret;
11586 #endif
11587 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11588     /* Alpha specific */
11589     case TARGET_NR_osf_setsysinfo:
11590         ret = -TARGET_EOPNOTSUPP;
11591         switch (arg1) {
11592           case TARGET_SSI_IEEE_FP_CONTROL:
11593             {
11594                 uint64_t swcr, fpcr;
11595 
11596                 if (get_user_u64 (swcr, arg2)) {
11597                     return -TARGET_EFAULT;
11598                 }
11599 
11600                 /*
11601                  * The kernel calls swcr_update_status to update the
11602                  * status bits from the fpcr at every point that it
11603                  * could be queried.  Therefore, we store the status
11604                  * bits only in FPCR.
11605                  */
11606                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11607 
11608                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11609                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11610                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11611                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11612                 ret = 0;
11613             }
11614             break;
11615 
11616           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11617             {
11618                 uint64_t exc, fpcr, fex;
11619 
11620                 if (get_user_u64(exc, arg2)) {
11621                     return -TARGET_EFAULT;
11622                 }
11623                 exc &= SWCR_STATUS_MASK;
11624                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11625 
11626                 /* Old exceptions are not signaled.  */
11627                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11628                 fex = exc & ~fex;
11629                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11630                 fex &= (cpu_env)->swcr;
11631 
11632                 /* Update the hardware fpcr.  */
11633                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11634                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11635 
11636                 if (fex) {
11637                     int si_code = TARGET_FPE_FLTUNK;
11638                     target_siginfo_t info;
11639 
11640                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11641                         si_code = TARGET_FPE_FLTUND;
11642                     }
11643                     if (fex & SWCR_TRAP_ENABLE_INE) {
11644                         si_code = TARGET_FPE_FLTRES;
11645                     }
11646                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11647                         si_code = TARGET_FPE_FLTUND;
11648                     }
11649                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11650                         si_code = TARGET_FPE_FLTOVF;
11651                     }
11652                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11653                         si_code = TARGET_FPE_FLTDIV;
11654                     }
11655                     if (fex & SWCR_TRAP_ENABLE_INV) {
11656                         si_code = TARGET_FPE_FLTINV;
11657                     }
11658 
11659                     info.si_signo = SIGFPE;
11660                     info.si_errno = 0;
11661                     info.si_code = si_code;
11662                     info._sifields._sigfault._addr = (cpu_env)->pc;
11663                     queue_signal(cpu_env, info.si_signo,
11664                                  QEMU_SI_FAULT, &info);
11665                 }
11666                 ret = 0;
11667             }
11668             break;
11669 
11670           /* case SSI_NVPAIRS:
11671              -- Used with SSIN_UACPROC to enable unaligned accesses.
11672              case SSI_IEEE_STATE_AT_SIGNAL:
11673              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11674              -- Not implemented in linux kernel
11675           */
11676         }
11677         return ret;
11678 #endif
11679 #ifdef TARGET_NR_osf_sigprocmask
11680     /* Alpha specific.  */
11681     case TARGET_NR_osf_sigprocmask:
11682         {
11683             abi_ulong mask;
11684             int how;
11685             sigset_t set, oldset;
11686 
11687             switch(arg1) {
11688             case TARGET_SIG_BLOCK:
11689                 how = SIG_BLOCK;
11690                 break;
11691             case TARGET_SIG_UNBLOCK:
11692                 how = SIG_UNBLOCK;
11693                 break;
11694             case TARGET_SIG_SETMASK:
11695                 how = SIG_SETMASK;
11696                 break;
11697             default:
11698                 return -TARGET_EINVAL;
11699             }
11700             mask = arg2;
11701             target_to_host_old_sigset(&set, &mask);
11702             ret = do_sigprocmask(how, &set, &oldset);
11703             if (!ret) {
11704                 host_to_target_old_sigset(&mask, &oldset);
11705                 ret = mask;
11706             }
11707         }
11708         return ret;
11709 #endif
11710 
11711 #ifdef TARGET_NR_getgid32
11712     case TARGET_NR_getgid32:
11713         return get_errno(getgid());
11714 #endif
11715 #ifdef TARGET_NR_geteuid32
11716     case TARGET_NR_geteuid32:
11717         return get_errno(geteuid());
11718 #endif
11719 #ifdef TARGET_NR_getegid32
11720     case TARGET_NR_getegid32:
11721         return get_errno(getegid());
11722 #endif
11723 #ifdef TARGET_NR_setreuid32
11724     case TARGET_NR_setreuid32:
11725         return get_errno(setreuid(arg1, arg2));
11726 #endif
11727 #ifdef TARGET_NR_setregid32
11728     case TARGET_NR_setregid32:
11729         return get_errno(setregid(arg1, arg2));
11730 #endif
11731 #ifdef TARGET_NR_getgroups32
11732     case TARGET_NR_getgroups32:
11733         {
11734             int gidsetsize = arg1;
11735             uint32_t *target_grouplist;
11736             gid_t *grouplist;
11737             int i;
11738 
11739             grouplist = alloca(gidsetsize * sizeof(gid_t));
11740             ret = get_errno(getgroups(gidsetsize, grouplist));
11741             if (gidsetsize == 0)
11742                 return ret;
11743             if (!is_error(ret)) {
11744                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11745                 if (!target_grouplist) {
11746                     return -TARGET_EFAULT;
11747                 }
11748                 for(i = 0;i < ret; i++)
11749                     target_grouplist[i] = tswap32(grouplist[i]);
11750                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11751             }
11752         }
11753         return ret;
11754 #endif
11755 #ifdef TARGET_NR_setgroups32
11756     case TARGET_NR_setgroups32:
11757         {
11758             int gidsetsize = arg1;
11759             uint32_t *target_grouplist;
11760             gid_t *grouplist;
11761             int i;
11762 
11763             grouplist = alloca(gidsetsize * sizeof(gid_t));
11764             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11765             if (!target_grouplist) {
11766                 return -TARGET_EFAULT;
11767             }
11768             for(i = 0;i < gidsetsize; i++)
11769                 grouplist[i] = tswap32(target_grouplist[i]);
11770             unlock_user(target_grouplist, arg2, 0);
11771             return get_errno(setgroups(gidsetsize, grouplist));
11772         }
11773 #endif
11774 #ifdef TARGET_NR_fchown32
11775     case TARGET_NR_fchown32:
11776         return get_errno(fchown(arg1, arg2, arg3));
11777 #endif
11778 #ifdef TARGET_NR_setresuid32
11779     case TARGET_NR_setresuid32:
11780         return get_errno(sys_setresuid(arg1, arg2, arg3));
11781 #endif
11782 #ifdef TARGET_NR_getresuid32
11783     case TARGET_NR_getresuid32:
11784         {
11785             uid_t ruid, euid, suid;
11786             ret = get_errno(getresuid(&ruid, &euid, &suid));
11787             if (!is_error(ret)) {
11788                 if (put_user_u32(ruid, arg1)
11789                     || put_user_u32(euid, arg2)
11790                     || put_user_u32(suid, arg3))
11791                     return -TARGET_EFAULT;
11792             }
11793         }
11794         return ret;
11795 #endif
11796 #ifdef TARGET_NR_setresgid32
11797     case TARGET_NR_setresgid32:
11798         return get_errno(sys_setresgid(arg1, arg2, arg3));
11799 #endif
11800 #ifdef TARGET_NR_getresgid32
11801     case TARGET_NR_getresgid32:
11802         {
11803             gid_t rgid, egid, sgid;
11804             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11805             if (!is_error(ret)) {
11806                 if (put_user_u32(rgid, arg1)
11807                     || put_user_u32(egid, arg2)
11808                     || put_user_u32(sgid, arg3))
11809                     return -TARGET_EFAULT;
11810             }
11811         }
11812         return ret;
11813 #endif
11814 #ifdef TARGET_NR_chown32
11815     case TARGET_NR_chown32:
11816         if (!(p = lock_user_string(arg1)))
11817             return -TARGET_EFAULT;
11818         ret = get_errno(chown(p, arg2, arg3));
11819         unlock_user(p, arg1, 0);
11820         return ret;
11821 #endif
11822 #ifdef TARGET_NR_setuid32
11823     case TARGET_NR_setuid32:
11824         return get_errno(sys_setuid(arg1));
11825 #endif
11826 #ifdef TARGET_NR_setgid32
11827     case TARGET_NR_setgid32:
11828         return get_errno(sys_setgid(arg1));
11829 #endif
11830 #ifdef TARGET_NR_setfsuid32
11831     case TARGET_NR_setfsuid32:
11832         return get_errno(setfsuid(arg1));
11833 #endif
11834 #ifdef TARGET_NR_setfsgid32
11835     case TARGET_NR_setfsgid32:
11836         return get_errno(setfsgid(arg1));
11837 #endif
11838 #ifdef TARGET_NR_mincore
11839     case TARGET_NR_mincore:
11840         {
11841             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11842             if (!a) {
11843                 return -TARGET_ENOMEM;
11844             }
11845             p = lock_user_string(arg3);
11846             if (!p) {
11847                 ret = -TARGET_EFAULT;
11848             } else {
11849                 ret = get_errno(mincore(a, arg2, p));
11850                 unlock_user(p, arg3, ret);
11851             }
11852             unlock_user(a, arg1, 0);
11853         }
11854         return ret;
11855 #endif
11856 #ifdef TARGET_NR_arm_fadvise64_64
11857     case TARGET_NR_arm_fadvise64_64:
11858         /* arm_fadvise64_64 looks like fadvise64_64 but
11859          * with different argument order: fd, advice, offset, len
11860          * rather than the usual fd, offset, len, advice.
11861          * Note that offset and len are both 64-bit so appear as
11862          * pairs of 32-bit registers.
11863          */
11864         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11865                             target_offset64(arg5, arg6), arg2);
11866         return -host_to_target_errno(ret);
11867 #endif
11868 
11869 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11870 
11871 #ifdef TARGET_NR_fadvise64_64
11872     case TARGET_NR_fadvise64_64:
11873 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11874         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11875         ret = arg2;
11876         arg2 = arg3;
11877         arg3 = arg4;
11878         arg4 = arg5;
11879         arg5 = arg6;
11880         arg6 = ret;
11881 #else
11882         /* 6 args: fd, offset (high, low), len (high, low), advice */
11883         if (regpairs_aligned(cpu_env, num)) {
11884             /* offset is in (3,4), len in (5,6) and advice in 7 */
11885             arg2 = arg3;
11886             arg3 = arg4;
11887             arg4 = arg5;
11888             arg5 = arg6;
11889             arg6 = arg7;
11890         }
11891 #endif
11892         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11893                             target_offset64(arg4, arg5), arg6);
11894         return -host_to_target_errno(ret);
11895 #endif
11896 
11897 #ifdef TARGET_NR_fadvise64
11898     case TARGET_NR_fadvise64:
11899         /* 5 args: fd, offset (high, low), len, advice */
11900         if (regpairs_aligned(cpu_env, num)) {
11901             /* offset is in (3,4), len in 5 and advice in 6 */
11902             arg2 = arg3;
11903             arg3 = arg4;
11904             arg4 = arg5;
11905             arg5 = arg6;
11906         }
11907         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11908         return -host_to_target_errno(ret);
11909 #endif
11910 
11911 #else /* not a 32-bit ABI */
11912 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11913 #ifdef TARGET_NR_fadvise64_64
11914     case TARGET_NR_fadvise64_64:
11915 #endif
11916 #ifdef TARGET_NR_fadvise64
11917     case TARGET_NR_fadvise64:
11918 #endif
11919 #ifdef TARGET_S390X
11920         switch (arg4) {
11921         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11922         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11923         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11924         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11925         default: break;
11926         }
11927 #endif
11928         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11929 #endif
11930 #endif /* end of 64-bit ABI fadvise handling */
11931 
11932 #ifdef TARGET_NR_madvise
11933     case TARGET_NR_madvise:
11934         return target_madvise(arg1, arg2, arg3);
11935 #endif
11936 #ifdef TARGET_NR_fcntl64
11937     case TARGET_NR_fcntl64:
11938     {
11939         int cmd;
11940         struct flock64 fl;
11941         from_flock64_fn *copyfrom = copy_from_user_flock64;
11942         to_flock64_fn *copyto = copy_to_user_flock64;
11943 
11944 #ifdef TARGET_ARM
11945         if (!cpu_env->eabi) {
11946             copyfrom = copy_from_user_oabi_flock64;
11947             copyto = copy_to_user_oabi_flock64;
11948         }
11949 #endif
11950 
11951         cmd = target_to_host_fcntl_cmd(arg2);
11952         if (cmd == -TARGET_EINVAL) {
11953             return cmd;
11954         }
11955 
11956         switch(arg2) {
11957         case TARGET_F_GETLK64:
11958             ret = copyfrom(&fl, arg3);
11959             if (ret) {
11960                 break;
11961             }
11962             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11963             if (ret == 0) {
11964                 ret = copyto(arg3, &fl);
11965             }
11966 	    break;
11967 
11968         case TARGET_F_SETLK64:
11969         case TARGET_F_SETLKW64:
11970             ret = copyfrom(&fl, arg3);
11971             if (ret) {
11972                 break;
11973             }
11974             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11975 	    break;
11976         default:
11977             ret = do_fcntl(arg1, arg2, arg3);
11978             break;
11979         }
11980         return ret;
11981     }
11982 #endif
11983 #ifdef TARGET_NR_cacheflush
11984     case TARGET_NR_cacheflush:
11985         /* self-modifying code is handled automatically, so nothing needed */
11986         return 0;
11987 #endif
11988 #ifdef TARGET_NR_getpagesize
11989     case TARGET_NR_getpagesize:
11990         return TARGET_PAGE_SIZE;
11991 #endif
11992     case TARGET_NR_gettid:
11993         return get_errno(sys_gettid());
11994 #ifdef TARGET_NR_readahead
11995     case TARGET_NR_readahead:
11996 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11997         if (regpairs_aligned(cpu_env, num)) {
11998             arg2 = arg3;
11999             arg3 = arg4;
12000             arg4 = arg5;
12001         }
12002         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12003 #else
12004         ret = get_errno(readahead(arg1, arg2, arg3));
12005 #endif
12006         return ret;
12007 #endif
12008 #ifdef CONFIG_ATTR
12009 #ifdef TARGET_NR_setxattr
12010     case TARGET_NR_listxattr:
12011     case TARGET_NR_llistxattr:
12012     {
12013         void *p, *b = 0;
12014         if (arg2) {
12015             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12016             if (!b) {
12017                 return -TARGET_EFAULT;
12018             }
12019         }
12020         p = lock_user_string(arg1);
12021         if (p) {
12022             if (num == TARGET_NR_listxattr) {
12023                 ret = get_errno(listxattr(p, b, arg3));
12024             } else {
12025                 ret = get_errno(llistxattr(p, b, arg3));
12026             }
12027         } else {
12028             ret = -TARGET_EFAULT;
12029         }
12030         unlock_user(p, arg1, 0);
12031         unlock_user(b, arg2, arg3);
12032         return ret;
12033     }
12034     case TARGET_NR_flistxattr:
12035     {
12036         void *b = 0;
12037         if (arg2) {
12038             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12039             if (!b) {
12040                 return -TARGET_EFAULT;
12041             }
12042         }
12043         ret = get_errno(flistxattr(arg1, b, arg3));
12044         unlock_user(b, arg2, arg3);
12045         return ret;
12046     }
12047     case TARGET_NR_setxattr:
12048     case TARGET_NR_lsetxattr:
12049         {
12050             void *p, *n, *v = 0;
12051             if (arg3) {
12052                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12053                 if (!v) {
12054                     return -TARGET_EFAULT;
12055                 }
12056             }
12057             p = lock_user_string(arg1);
12058             n = lock_user_string(arg2);
12059             if (p && n) {
12060                 if (num == TARGET_NR_setxattr) {
12061                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12062                 } else {
12063                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12064                 }
12065             } else {
12066                 ret = -TARGET_EFAULT;
12067             }
12068             unlock_user(p, arg1, 0);
12069             unlock_user(n, arg2, 0);
12070             unlock_user(v, arg3, 0);
12071         }
12072         return ret;
12073     case TARGET_NR_fsetxattr:
12074         {
12075             void *n, *v = 0;
12076             if (arg3) {
12077                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12078                 if (!v) {
12079                     return -TARGET_EFAULT;
12080                 }
12081             }
12082             n = lock_user_string(arg2);
12083             if (n) {
12084                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12085             } else {
12086                 ret = -TARGET_EFAULT;
12087             }
12088             unlock_user(n, arg2, 0);
12089             unlock_user(v, arg3, 0);
12090         }
12091         return ret;
12092     case TARGET_NR_getxattr:
12093     case TARGET_NR_lgetxattr:
12094         {
12095             void *p, *n, *v = 0;
12096             if (arg3) {
12097                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12098                 if (!v) {
12099                     return -TARGET_EFAULT;
12100                 }
12101             }
12102             p = lock_user_string(arg1);
12103             n = lock_user_string(arg2);
12104             if (p && n) {
12105                 if (num == TARGET_NR_getxattr) {
12106                     ret = get_errno(getxattr(p, n, v, arg4));
12107                 } else {
12108                     ret = get_errno(lgetxattr(p, n, v, arg4));
12109                 }
12110             } else {
12111                 ret = -TARGET_EFAULT;
12112             }
12113             unlock_user(p, arg1, 0);
12114             unlock_user(n, arg2, 0);
12115             unlock_user(v, arg3, arg4);
12116         }
12117         return ret;
12118     case TARGET_NR_fgetxattr:
12119         {
12120             void *n, *v = 0;
12121             if (arg3) {
12122                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12123                 if (!v) {
12124                     return -TARGET_EFAULT;
12125                 }
12126             }
12127             n = lock_user_string(arg2);
12128             if (n) {
12129                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12130             } else {
12131                 ret = -TARGET_EFAULT;
12132             }
12133             unlock_user(n, arg2, 0);
12134             unlock_user(v, arg3, arg4);
12135         }
12136         return ret;
12137     case TARGET_NR_removexattr:
12138     case TARGET_NR_lremovexattr:
12139         {
12140             void *p, *n;
12141             p = lock_user_string(arg1);
12142             n = lock_user_string(arg2);
12143             if (p && n) {
12144                 if (num == TARGET_NR_removexattr) {
12145                     ret = get_errno(removexattr(p, n));
12146                 } else {
12147                     ret = get_errno(lremovexattr(p, n));
12148                 }
12149             } else {
12150                 ret = -TARGET_EFAULT;
12151             }
12152             unlock_user(p, arg1, 0);
12153             unlock_user(n, arg2, 0);
12154         }
12155         return ret;
12156     case TARGET_NR_fremovexattr:
12157         {
12158             void *n;
12159             n = lock_user_string(arg2);
12160             if (n) {
12161                 ret = get_errno(fremovexattr(arg1, n));
12162             } else {
12163                 ret = -TARGET_EFAULT;
12164             }
12165             unlock_user(n, arg2, 0);
12166         }
12167         return ret;
12168 #endif
12169 #endif /* CONFIG_ATTR */
12170 #ifdef TARGET_NR_set_thread_area
12171     case TARGET_NR_set_thread_area:
12172 #if defined(TARGET_MIPS)
12173       cpu_env->active_tc.CP0_UserLocal = arg1;
12174       return 0;
12175 #elif defined(TARGET_CRIS)
12176       if (arg1 & 0xff)
12177           ret = -TARGET_EINVAL;
12178       else {
12179           cpu_env->pregs[PR_PID] = arg1;
12180           ret = 0;
12181       }
12182       return ret;
12183 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12184       return do_set_thread_area(cpu_env, arg1);
12185 #elif defined(TARGET_M68K)
12186       {
12187           TaskState *ts = cpu->opaque;
12188           ts->tp_value = arg1;
12189           return 0;
12190       }
12191 #else
12192       return -TARGET_ENOSYS;
12193 #endif
12194 #endif
12195 #ifdef TARGET_NR_get_thread_area
12196     case TARGET_NR_get_thread_area:
12197 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12198         return do_get_thread_area(cpu_env, arg1);
12199 #elif defined(TARGET_M68K)
12200         {
12201             TaskState *ts = cpu->opaque;
12202             return ts->tp_value;
12203         }
12204 #else
12205         return -TARGET_ENOSYS;
12206 #endif
12207 #endif
12208 #ifdef TARGET_NR_getdomainname
12209     case TARGET_NR_getdomainname:
12210         return -TARGET_ENOSYS;
12211 #endif
12212 
12213 #ifdef TARGET_NR_clock_settime
12214     case TARGET_NR_clock_settime:
12215     {
12216         struct timespec ts;
12217 
12218         ret = target_to_host_timespec(&ts, arg2);
12219         if (!is_error(ret)) {
12220             ret = get_errno(clock_settime(arg1, &ts));
12221         }
12222         return ret;
12223     }
12224 #endif
12225 #ifdef TARGET_NR_clock_settime64
12226     case TARGET_NR_clock_settime64:
12227     {
12228         struct timespec ts;
12229 
12230         ret = target_to_host_timespec64(&ts, arg2);
12231         if (!is_error(ret)) {
12232             ret = get_errno(clock_settime(arg1, &ts));
12233         }
12234         return ret;
12235     }
12236 #endif
12237 #ifdef TARGET_NR_clock_gettime
12238     case TARGET_NR_clock_gettime:
12239     {
12240         struct timespec ts;
12241         ret = get_errno(clock_gettime(arg1, &ts));
12242         if (!is_error(ret)) {
12243             ret = host_to_target_timespec(arg2, &ts);
12244         }
12245         return ret;
12246     }
12247 #endif
12248 #ifdef TARGET_NR_clock_gettime64
12249     case TARGET_NR_clock_gettime64:
12250     {
12251         struct timespec ts;
12252         ret = get_errno(clock_gettime(arg1, &ts));
12253         if (!is_error(ret)) {
12254             ret = host_to_target_timespec64(arg2, &ts);
12255         }
12256         return ret;
12257     }
12258 #endif
12259 #ifdef TARGET_NR_clock_getres
12260     case TARGET_NR_clock_getres:
12261     {
12262         struct timespec ts;
12263         ret = get_errno(clock_getres(arg1, &ts));
12264         if (!is_error(ret)) {
12265             host_to_target_timespec(arg2, &ts);
12266         }
12267         return ret;
12268     }
12269 #endif
12270 #ifdef TARGET_NR_clock_getres_time64
12271     case TARGET_NR_clock_getres_time64:
12272     {
12273         struct timespec ts;
12274         ret = get_errno(clock_getres(arg1, &ts));
12275         if (!is_error(ret)) {
12276             host_to_target_timespec64(arg2, &ts);
12277         }
12278         return ret;
12279     }
12280 #endif
12281 #ifdef TARGET_NR_clock_nanosleep
12282     case TARGET_NR_clock_nanosleep:
12283     {
12284         struct timespec ts;
12285         if (target_to_host_timespec(&ts, arg3)) {
12286             return -TARGET_EFAULT;
12287         }
12288         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12289                                              &ts, arg4 ? &ts : NULL));
12290         /*
12291          * if the call is interrupted by a signal handler, it fails
12292          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12293          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12294          */
12295         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12296             host_to_target_timespec(arg4, &ts)) {
12297               return -TARGET_EFAULT;
12298         }
12299 
12300         return ret;
12301     }
12302 #endif
12303 #ifdef TARGET_NR_clock_nanosleep_time64
12304     case TARGET_NR_clock_nanosleep_time64:
12305     {
12306         struct timespec ts;
12307 
12308         if (target_to_host_timespec64(&ts, arg3)) {
12309             return -TARGET_EFAULT;
12310         }
12311 
12312         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12313                                              &ts, arg4 ? &ts : NULL));
12314 
12315         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12316             host_to_target_timespec64(arg4, &ts)) {
12317             return -TARGET_EFAULT;
12318         }
12319         return ret;
12320     }
12321 #endif
12322 
12323 #if defined(TARGET_NR_set_tid_address)
12324     case TARGET_NR_set_tid_address:
12325     {
12326         TaskState *ts = cpu->opaque;
12327         ts->child_tidptr = arg1;
12328         /* do not call host set_tid_address() syscall, instead return tid() */
12329         return get_errno(sys_gettid());
12330     }
12331 #endif
12332 
12333     case TARGET_NR_tkill:
12334         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12335 
12336     case TARGET_NR_tgkill:
12337         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12338                          target_to_host_signal(arg3)));
12339 
12340 #ifdef TARGET_NR_set_robust_list
12341     case TARGET_NR_set_robust_list:
12342     case TARGET_NR_get_robust_list:
12343         /* The ABI for supporting robust futexes has userspace pass
12344          * the kernel a pointer to a linked list which is updated by
12345          * userspace after the syscall; the list is walked by the kernel
12346          * when the thread exits. Since the linked list in QEMU guest
12347          * memory isn't a valid linked list for the host and we have
12348          * no way to reliably intercept the thread-death event, we can't
12349          * support these. Silently return ENOSYS so that guest userspace
12350          * falls back to a non-robust futex implementation (which should
12351          * be OK except in the corner case of the guest crashing while
12352          * holding a mutex that is shared with another process via
12353          * shared memory).
12354          */
12355         return -TARGET_ENOSYS;
12356 #endif
12357 
12358 #if defined(TARGET_NR_utimensat)
12359     case TARGET_NR_utimensat:
12360         {
12361             struct timespec *tsp, ts[2];
12362             if (!arg3) {
12363                 tsp = NULL;
12364             } else {
12365                 if (target_to_host_timespec(ts, arg3)) {
12366                     return -TARGET_EFAULT;
12367                 }
12368                 if (target_to_host_timespec(ts + 1, arg3 +
12369                                             sizeof(struct target_timespec))) {
12370                     return -TARGET_EFAULT;
12371                 }
12372                 tsp = ts;
12373             }
12374             if (!arg2)
12375                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12376             else {
12377                 if (!(p = lock_user_string(arg2))) {
12378                     return -TARGET_EFAULT;
12379                 }
12380                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12381                 unlock_user(p, arg2, 0);
12382             }
12383         }
12384         return ret;
12385 #endif
12386 #ifdef TARGET_NR_utimensat_time64
12387     case TARGET_NR_utimensat_time64:
12388         {
12389             struct timespec *tsp, ts[2];
12390             if (!arg3) {
12391                 tsp = NULL;
12392             } else {
12393                 if (target_to_host_timespec64(ts, arg3)) {
12394                     return -TARGET_EFAULT;
12395                 }
12396                 if (target_to_host_timespec64(ts + 1, arg3 +
12397                                      sizeof(struct target__kernel_timespec))) {
12398                     return -TARGET_EFAULT;
12399                 }
12400                 tsp = ts;
12401             }
12402             if (!arg2)
12403                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12404             else {
12405                 p = lock_user_string(arg2);
12406                 if (!p) {
12407                     return -TARGET_EFAULT;
12408                 }
12409                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12410                 unlock_user(p, arg2, 0);
12411             }
12412         }
12413         return ret;
12414 #endif
12415 #ifdef TARGET_NR_futex
12416     case TARGET_NR_futex:
12417         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12418 #endif
12419 #ifdef TARGET_NR_futex_time64
12420     case TARGET_NR_futex_time64:
12421         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12422 #endif
12423 #ifdef CONFIG_INOTIFY
12424 #if defined(TARGET_NR_inotify_init)
12425     case TARGET_NR_inotify_init:
12426         ret = get_errno(inotify_init());
12427         if (ret >= 0) {
12428             fd_trans_register(ret, &target_inotify_trans);
12429         }
12430         return ret;
12431 #endif
12432 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12433     case TARGET_NR_inotify_init1:
12434         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12435                                           fcntl_flags_tbl)));
12436         if (ret >= 0) {
12437             fd_trans_register(ret, &target_inotify_trans);
12438         }
12439         return ret;
12440 #endif
12441 #if defined(TARGET_NR_inotify_add_watch)
12442     case TARGET_NR_inotify_add_watch:
12443         p = lock_user_string(arg2);
12444         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12445         unlock_user(p, arg2, 0);
12446         return ret;
12447 #endif
12448 #if defined(TARGET_NR_inotify_rm_watch)
12449     case TARGET_NR_inotify_rm_watch:
12450         return get_errno(inotify_rm_watch(arg1, arg2));
12451 #endif
12452 #endif
12453 
12454 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12455     case TARGET_NR_mq_open:
12456         {
12457             struct mq_attr posix_mq_attr;
12458             struct mq_attr *pposix_mq_attr;
12459             int host_flags;
12460 
12461             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12462             pposix_mq_attr = NULL;
12463             if (arg4) {
12464                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12465                     return -TARGET_EFAULT;
12466                 }
12467                 pposix_mq_attr = &posix_mq_attr;
12468             }
12469             p = lock_user_string(arg1 - 1);
12470             if (!p) {
12471                 return -TARGET_EFAULT;
12472             }
12473             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12474             unlock_user (p, arg1, 0);
12475         }
12476         return ret;
12477 
12478     case TARGET_NR_mq_unlink:
12479         p = lock_user_string(arg1 - 1);
12480         if (!p) {
12481             return -TARGET_EFAULT;
12482         }
12483         ret = get_errno(mq_unlink(p));
12484         unlock_user (p, arg1, 0);
12485         return ret;
12486 
12487 #ifdef TARGET_NR_mq_timedsend
12488     case TARGET_NR_mq_timedsend:
12489         {
12490             struct timespec ts;
12491 
12492             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12493             if (arg5 != 0) {
12494                 if (target_to_host_timespec(&ts, arg5)) {
12495                     return -TARGET_EFAULT;
12496                 }
12497                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12498                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12499                     return -TARGET_EFAULT;
12500                 }
12501             } else {
12502                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12503             }
12504             unlock_user (p, arg2, arg3);
12505         }
12506         return ret;
12507 #endif
12508 #ifdef TARGET_NR_mq_timedsend_time64
12509     case TARGET_NR_mq_timedsend_time64:
12510         {
12511             struct timespec ts;
12512 
12513             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12514             if (arg5 != 0) {
12515                 if (target_to_host_timespec64(&ts, arg5)) {
12516                     return -TARGET_EFAULT;
12517                 }
12518                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12519                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12520                     return -TARGET_EFAULT;
12521                 }
12522             } else {
12523                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12524             }
12525             unlock_user(p, arg2, arg3);
12526         }
12527         return ret;
12528 #endif
12529 
12530 #ifdef TARGET_NR_mq_timedreceive
12531     case TARGET_NR_mq_timedreceive:
12532         {
12533             struct timespec ts;
12534             unsigned int prio;
12535 
12536             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12537             if (arg5 != 0) {
12538                 if (target_to_host_timespec(&ts, arg5)) {
12539                     return -TARGET_EFAULT;
12540                 }
12541                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12542                                                      &prio, &ts));
12543                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12544                     return -TARGET_EFAULT;
12545                 }
12546             } else {
12547                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12548                                                      &prio, NULL));
12549             }
12550             unlock_user (p, arg2, arg3);
12551             if (arg4 != 0)
12552                 put_user_u32(prio, arg4);
12553         }
12554         return ret;
12555 #endif
12556 #ifdef TARGET_NR_mq_timedreceive_time64
12557     case TARGET_NR_mq_timedreceive_time64:
12558         {
12559             struct timespec ts;
12560             unsigned int prio;
12561 
12562             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12563             if (arg5 != 0) {
12564                 if (target_to_host_timespec64(&ts, arg5)) {
12565                     return -TARGET_EFAULT;
12566                 }
12567                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12568                                                      &prio, &ts));
12569                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12570                     return -TARGET_EFAULT;
12571                 }
12572             } else {
12573                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12574                                                      &prio, NULL));
12575             }
12576             unlock_user(p, arg2, arg3);
12577             if (arg4 != 0) {
12578                 put_user_u32(prio, arg4);
12579             }
12580         }
12581         return ret;
12582 #endif
12583 
12584     /* Not implemented for now... */
12585 /*     case TARGET_NR_mq_notify: */
12586 /*         break; */
12587 
12588     case TARGET_NR_mq_getsetattr:
12589         {
12590             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12591             ret = 0;
12592             if (arg2 != 0) {
12593                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12594                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12595                                            &posix_mq_attr_out));
12596             } else if (arg3 != 0) {
12597                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12598             }
12599             if (ret == 0 && arg3 != 0) {
12600                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12601             }
12602         }
12603         return ret;
12604 #endif
12605 
12606 #ifdef CONFIG_SPLICE
12607 #ifdef TARGET_NR_tee
12608     case TARGET_NR_tee:
12609         {
12610             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12611         }
12612         return ret;
12613 #endif
12614 #ifdef TARGET_NR_splice
12615     case TARGET_NR_splice:
12616         {
12617             loff_t loff_in, loff_out;
12618             loff_t *ploff_in = NULL, *ploff_out = NULL;
12619             if (arg2) {
12620                 if (get_user_u64(loff_in, arg2)) {
12621                     return -TARGET_EFAULT;
12622                 }
12623                 ploff_in = &loff_in;
12624             }
12625             if (arg4) {
12626                 if (get_user_u64(loff_out, arg4)) {
12627                     return -TARGET_EFAULT;
12628                 }
12629                 ploff_out = &loff_out;
12630             }
12631             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12632             if (arg2) {
12633                 if (put_user_u64(loff_in, arg2)) {
12634                     return -TARGET_EFAULT;
12635                 }
12636             }
12637             if (arg4) {
12638                 if (put_user_u64(loff_out, arg4)) {
12639                     return -TARGET_EFAULT;
12640                 }
12641             }
12642         }
12643         return ret;
12644 #endif
12645 #ifdef TARGET_NR_vmsplice
12646 	case TARGET_NR_vmsplice:
12647         {
12648             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12649             if (vec != NULL) {
12650                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12651                 unlock_iovec(vec, arg2, arg3, 0);
12652             } else {
12653                 ret = -host_to_target_errno(errno);
12654             }
12655         }
12656         return ret;
12657 #endif
12658 #endif /* CONFIG_SPLICE */
12659 #ifdef CONFIG_EVENTFD
12660 #if defined(TARGET_NR_eventfd)
12661     case TARGET_NR_eventfd:
12662         ret = get_errno(eventfd(arg1, 0));
12663         if (ret >= 0) {
12664             fd_trans_register(ret, &target_eventfd_trans);
12665         }
12666         return ret;
12667 #endif
12668 #if defined(TARGET_NR_eventfd2)
12669     case TARGET_NR_eventfd2:
12670     {
12671         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12672         if (arg2 & TARGET_O_NONBLOCK) {
12673             host_flags |= O_NONBLOCK;
12674         }
12675         if (arg2 & TARGET_O_CLOEXEC) {
12676             host_flags |= O_CLOEXEC;
12677         }
12678         ret = get_errno(eventfd(arg1, host_flags));
12679         if (ret >= 0) {
12680             fd_trans_register(ret, &target_eventfd_trans);
12681         }
12682         return ret;
12683     }
12684 #endif
12685 #endif /* CONFIG_EVENTFD  */
12686 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12687     case TARGET_NR_fallocate:
12688 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12689         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12690                                   target_offset64(arg5, arg6)));
12691 #else
12692         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12693 #endif
12694         return ret;
12695 #endif
12696 #if defined(CONFIG_SYNC_FILE_RANGE)
12697 #if defined(TARGET_NR_sync_file_range)
12698     case TARGET_NR_sync_file_range:
12699 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12700 #if defined(TARGET_MIPS)
12701         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12702                                         target_offset64(arg5, arg6), arg7));
12703 #else
12704         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12705                                         target_offset64(arg4, arg5), arg6));
12706 #endif /* !TARGET_MIPS */
12707 #else
12708         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12709 #endif
12710         return ret;
12711 #endif
12712 #if defined(TARGET_NR_sync_file_range2) || \
12713     defined(TARGET_NR_arm_sync_file_range)
12714 #if defined(TARGET_NR_sync_file_range2)
12715     case TARGET_NR_sync_file_range2:
12716 #endif
12717 #if defined(TARGET_NR_arm_sync_file_range)
12718     case TARGET_NR_arm_sync_file_range:
12719 #endif
12720         /* This is like sync_file_range but the arguments are reordered */
12721 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12722         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12723                                         target_offset64(arg5, arg6), arg2));
12724 #else
12725         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12726 #endif
12727         return ret;
12728 #endif
12729 #endif
12730 #if defined(TARGET_NR_signalfd4)
12731     case TARGET_NR_signalfd4:
12732         return do_signalfd4(arg1, arg2, arg4);
12733 #endif
12734 #if defined(TARGET_NR_signalfd)
12735     case TARGET_NR_signalfd:
12736         return do_signalfd4(arg1, arg2, 0);
12737 #endif
12738 #if defined(CONFIG_EPOLL)
12739 #if defined(TARGET_NR_epoll_create)
12740     case TARGET_NR_epoll_create:
12741         return get_errno(epoll_create(arg1));
12742 #endif
12743 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12744     case TARGET_NR_epoll_create1:
12745         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12746 #endif
12747 #if defined(TARGET_NR_epoll_ctl)
12748     case TARGET_NR_epoll_ctl:
12749     {
12750         struct epoll_event ep;
12751         struct epoll_event *epp = 0;
12752         if (arg4) {
12753             if (arg2 != EPOLL_CTL_DEL) {
12754                 struct target_epoll_event *target_ep;
12755                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12756                     return -TARGET_EFAULT;
12757                 }
12758                 ep.events = tswap32(target_ep->events);
12759                 /*
12760                  * The epoll_data_t union is just opaque data to the kernel,
12761                  * so we transfer all 64 bits across and need not worry what
12762                  * actual data type it is.
12763                  */
12764                 ep.data.u64 = tswap64(target_ep->data.u64);
12765                 unlock_user_struct(target_ep, arg4, 0);
12766             }
12767             /*
12768              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12769              * non-null pointer, even though this argument is ignored.
12770              *
12771              */
12772             epp = &ep;
12773         }
12774         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12775     }
12776 #endif
12777 
12778 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12779 #if defined(TARGET_NR_epoll_wait)
12780     case TARGET_NR_epoll_wait:
12781 #endif
12782 #if defined(TARGET_NR_epoll_pwait)
12783     case TARGET_NR_epoll_pwait:
12784 #endif
12785     {
12786         struct target_epoll_event *target_ep;
12787         struct epoll_event *ep;
12788         int epfd = arg1;
12789         int maxevents = arg3;
12790         int timeout = arg4;
12791 
12792         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12793             return -TARGET_EINVAL;
12794         }
12795 
12796         target_ep = lock_user(VERIFY_WRITE, arg2,
12797                               maxevents * sizeof(struct target_epoll_event), 1);
12798         if (!target_ep) {
12799             return -TARGET_EFAULT;
12800         }
12801 
12802         ep = g_try_new(struct epoll_event, maxevents);
12803         if (!ep) {
12804             unlock_user(target_ep, arg2, 0);
12805             return -TARGET_ENOMEM;
12806         }
12807 
12808         switch (num) {
12809 #if defined(TARGET_NR_epoll_pwait)
12810         case TARGET_NR_epoll_pwait:
12811         {
12812             sigset_t *set = NULL;
12813 
12814             if (arg5) {
12815                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12816                 if (ret != 0) {
12817                     break;
12818                 }
12819             }
12820 
12821             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12822                                              set, SIGSET_T_SIZE));
12823 
12824             if (set) {
12825                 finish_sigsuspend_mask(ret);
12826             }
12827             break;
12828         }
12829 #endif
12830 #if defined(TARGET_NR_epoll_wait)
12831         case TARGET_NR_epoll_wait:
12832             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12833                                              NULL, 0));
12834             break;
12835 #endif
12836         default:
12837             ret = -TARGET_ENOSYS;
12838         }
12839         if (!is_error(ret)) {
12840             int i;
12841             for (i = 0; i < ret; i++) {
12842                 target_ep[i].events = tswap32(ep[i].events);
12843                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12844             }
12845             unlock_user(target_ep, arg2,
12846                         ret * sizeof(struct target_epoll_event));
12847         } else {
12848             unlock_user(target_ep, arg2, 0);
12849         }
12850         g_free(ep);
12851         return ret;
12852     }
12853 #endif
12854 #endif
12855 #ifdef TARGET_NR_prlimit64
12856     case TARGET_NR_prlimit64:
12857     {
12858         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12859         struct target_rlimit64 *target_rnew, *target_rold;
12860         struct host_rlimit64 rnew, rold, *rnewp = 0;
12861         int resource = target_to_host_resource(arg2);
12862 
12863         if (arg3 && (resource != RLIMIT_AS &&
12864                      resource != RLIMIT_DATA &&
12865                      resource != RLIMIT_STACK)) {
12866             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12867                 return -TARGET_EFAULT;
12868             }
12869             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12870             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12871             unlock_user_struct(target_rnew, arg3, 0);
12872             rnewp = &rnew;
12873         }
12874 
12875         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12876         if (!is_error(ret) && arg4) {
12877             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12878                 return -TARGET_EFAULT;
12879             }
12880             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12881             target_rold->rlim_max = tswap64(rold.rlim_max);
12882             unlock_user_struct(target_rold, arg4, 1);
12883         }
12884         return ret;
12885     }
12886 #endif
12887 #ifdef TARGET_NR_gethostname
12888     case TARGET_NR_gethostname:
12889     {
12890         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12891         if (name) {
12892             ret = get_errno(gethostname(name, arg2));
12893             unlock_user(name, arg1, arg2);
12894         } else {
12895             ret = -TARGET_EFAULT;
12896         }
12897         return ret;
12898     }
12899 #endif
12900 #ifdef TARGET_NR_atomic_cmpxchg_32
12901     case TARGET_NR_atomic_cmpxchg_32:
12902     {
12903         /* should use start_exclusive from main.c */
12904         abi_ulong mem_value;
12905         if (get_user_u32(mem_value, arg6)) {
12906             target_siginfo_t info;
12907             info.si_signo = SIGSEGV;
12908             info.si_errno = 0;
12909             info.si_code = TARGET_SEGV_MAPERR;
12910             info._sifields._sigfault._addr = arg6;
12911             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12912             ret = 0xdeadbeef;
12913 
12914         }
12915         if (mem_value == arg2)
12916             put_user_u32(arg1, arg6);
12917         return mem_value;
12918     }
12919 #endif
12920 #ifdef TARGET_NR_atomic_barrier
12921     case TARGET_NR_atomic_barrier:
12922         /* Like the kernel implementation and the
12923            qemu arm barrier, no-op this? */
12924         return 0;
12925 #endif
12926 
12927 #ifdef TARGET_NR_timer_create
12928     case TARGET_NR_timer_create:
12929     {
12930         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12931 
12932         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12933 
12934         int clkid = arg1;
12935         int timer_index = next_free_host_timer();
12936 
12937         if (timer_index < 0) {
12938             ret = -TARGET_EAGAIN;
12939         } else {
12940             timer_t *phtimer = g_posix_timers  + timer_index;
12941 
12942             if (arg2) {
12943                 phost_sevp = &host_sevp;
12944                 ret = target_to_host_sigevent(phost_sevp, arg2);
12945                 if (ret != 0) {
12946                     free_host_timer_slot(timer_index);
12947                     return ret;
12948                 }
12949             }
12950 
12951             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12952             if (ret) {
12953                 free_host_timer_slot(timer_index);
12954             } else {
12955                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12956                     timer_delete(*phtimer);
12957                     free_host_timer_slot(timer_index);
12958                     return -TARGET_EFAULT;
12959                 }
12960             }
12961         }
12962         return ret;
12963     }
12964 #endif
12965 
12966 #ifdef TARGET_NR_timer_settime
12967     case TARGET_NR_timer_settime:
12968     {
12969         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12970          * struct itimerspec * old_value */
12971         target_timer_t timerid = get_timer_id(arg1);
12972 
12973         if (timerid < 0) {
12974             ret = timerid;
12975         } else if (arg3 == 0) {
12976             ret = -TARGET_EINVAL;
12977         } else {
12978             timer_t htimer = g_posix_timers[timerid];
12979             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12980 
12981             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12982                 return -TARGET_EFAULT;
12983             }
12984             ret = get_errno(
12985                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12986             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12987                 return -TARGET_EFAULT;
12988             }
12989         }
12990         return ret;
12991     }
12992 #endif
12993 
12994 #ifdef TARGET_NR_timer_settime64
12995     case TARGET_NR_timer_settime64:
12996     {
12997         target_timer_t timerid = get_timer_id(arg1);
12998 
12999         if (timerid < 0) {
13000             ret = timerid;
13001         } else if (arg3 == 0) {
13002             ret = -TARGET_EINVAL;
13003         } else {
13004             timer_t htimer = g_posix_timers[timerid];
13005             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13006 
13007             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13008                 return -TARGET_EFAULT;
13009             }
13010             ret = get_errno(
13011                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13012             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13013                 return -TARGET_EFAULT;
13014             }
13015         }
13016         return ret;
13017     }
13018 #endif
13019 
13020 #ifdef TARGET_NR_timer_gettime
13021     case TARGET_NR_timer_gettime:
13022     {
13023         /* args: timer_t timerid, struct itimerspec *curr_value */
13024         target_timer_t timerid = get_timer_id(arg1);
13025 
13026         if (timerid < 0) {
13027             ret = timerid;
13028         } else if (!arg2) {
13029             ret = -TARGET_EFAULT;
13030         } else {
13031             timer_t htimer = g_posix_timers[timerid];
13032             struct itimerspec hspec;
13033             ret = get_errno(timer_gettime(htimer, &hspec));
13034 
13035             if (host_to_target_itimerspec(arg2, &hspec)) {
13036                 ret = -TARGET_EFAULT;
13037             }
13038         }
13039         return ret;
13040     }
13041 #endif
13042 
13043 #ifdef TARGET_NR_timer_gettime64
13044     case TARGET_NR_timer_gettime64:
13045     {
13046         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13047         target_timer_t timerid = get_timer_id(arg1);
13048 
13049         if (timerid < 0) {
13050             ret = timerid;
13051         } else if (!arg2) {
13052             ret = -TARGET_EFAULT;
13053         } else {
13054             timer_t htimer = g_posix_timers[timerid];
13055             struct itimerspec hspec;
13056             ret = get_errno(timer_gettime(htimer, &hspec));
13057 
13058             if (host_to_target_itimerspec64(arg2, &hspec)) {
13059                 ret = -TARGET_EFAULT;
13060             }
13061         }
13062         return ret;
13063     }
13064 #endif
13065 
13066 #ifdef TARGET_NR_timer_getoverrun
13067     case TARGET_NR_timer_getoverrun:
13068     {
13069         /* args: timer_t timerid */
13070         target_timer_t timerid = get_timer_id(arg1);
13071 
13072         if (timerid < 0) {
13073             ret = timerid;
13074         } else {
13075             timer_t htimer = g_posix_timers[timerid];
13076             ret = get_errno(timer_getoverrun(htimer));
13077         }
13078         return ret;
13079     }
13080 #endif
13081 
13082 #ifdef TARGET_NR_timer_delete
13083     case TARGET_NR_timer_delete:
13084     {
13085         /* args: timer_t timerid */
13086         target_timer_t timerid = get_timer_id(arg1);
13087 
13088         if (timerid < 0) {
13089             ret = timerid;
13090         } else {
13091             timer_t htimer = g_posix_timers[timerid];
13092             ret = get_errno(timer_delete(htimer));
13093             free_host_timer_slot(timerid);
13094         }
13095         return ret;
13096     }
13097 #endif
13098 
13099 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13100     case TARGET_NR_timerfd_create:
13101         return get_errno(timerfd_create(arg1,
13102                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13103 #endif
13104 
13105 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13106     case TARGET_NR_timerfd_gettime:
13107         {
13108             struct itimerspec its_curr;
13109 
13110             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13111 
13112             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13113                 return -TARGET_EFAULT;
13114             }
13115         }
13116         return ret;
13117 #endif
13118 
13119 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13120     case TARGET_NR_timerfd_gettime64:
13121         {
13122             struct itimerspec its_curr;
13123 
13124             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13125 
13126             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13127                 return -TARGET_EFAULT;
13128             }
13129         }
13130         return ret;
13131 #endif
13132 
13133 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13134     case TARGET_NR_timerfd_settime:
13135         {
13136             struct itimerspec its_new, its_old, *p_new;
13137 
13138             if (arg3) {
13139                 if (target_to_host_itimerspec(&its_new, arg3)) {
13140                     return -TARGET_EFAULT;
13141                 }
13142                 p_new = &its_new;
13143             } else {
13144                 p_new = NULL;
13145             }
13146 
13147             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13148 
13149             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13150                 return -TARGET_EFAULT;
13151             }
13152         }
13153         return ret;
13154 #endif
13155 
13156 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13157     case TARGET_NR_timerfd_settime64:
13158         {
13159             struct itimerspec its_new, its_old, *p_new;
13160 
13161             if (arg3) {
13162                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13163                     return -TARGET_EFAULT;
13164                 }
13165                 p_new = &its_new;
13166             } else {
13167                 p_new = NULL;
13168             }
13169 
13170             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13171 
13172             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13173                 return -TARGET_EFAULT;
13174             }
13175         }
13176         return ret;
13177 #endif
13178 
13179 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13180     case TARGET_NR_ioprio_get:
13181         return get_errno(ioprio_get(arg1, arg2));
13182 #endif
13183 
13184 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13185     case TARGET_NR_ioprio_set:
13186         return get_errno(ioprio_set(arg1, arg2, arg3));
13187 #endif
13188 
13189 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13190     case TARGET_NR_setns:
13191         return get_errno(setns(arg1, arg2));
13192 #endif
13193 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13194     case TARGET_NR_unshare:
13195         return get_errno(unshare(arg1));
13196 #endif
13197 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13198     case TARGET_NR_kcmp:
13199         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13200 #endif
13201 #ifdef TARGET_NR_swapcontext
13202     case TARGET_NR_swapcontext:
13203         /* PowerPC specific.  */
13204         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13205 #endif
13206 #ifdef TARGET_NR_memfd_create
13207     case TARGET_NR_memfd_create:
13208         p = lock_user_string(arg1);
13209         if (!p) {
13210             return -TARGET_EFAULT;
13211         }
13212         ret = get_errno(memfd_create(p, arg2));
13213         fd_trans_unregister(ret);
13214         unlock_user(p, arg1, 0);
13215         return ret;
13216 #endif
13217 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13218     case TARGET_NR_membarrier:
13219         return get_errno(membarrier(arg1, arg2));
13220 #endif
13221 
13222 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13223     case TARGET_NR_copy_file_range:
13224         {
13225             loff_t inoff, outoff;
13226             loff_t *pinoff = NULL, *poutoff = NULL;
13227 
13228             if (arg2) {
13229                 if (get_user_u64(inoff, arg2)) {
13230                     return -TARGET_EFAULT;
13231                 }
13232                 pinoff = &inoff;
13233             }
13234             if (arg4) {
13235                 if (get_user_u64(outoff, arg4)) {
13236                     return -TARGET_EFAULT;
13237                 }
13238                 poutoff = &outoff;
13239             }
13240             /* Do not sign-extend the count parameter. */
13241             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13242                                                  (abi_ulong)arg5, arg6));
13243             if (!is_error(ret) && ret > 0) {
13244                 if (arg2) {
13245                     if (put_user_u64(inoff, arg2)) {
13246                         return -TARGET_EFAULT;
13247                     }
13248                 }
13249                 if (arg4) {
13250                     if (put_user_u64(outoff, arg4)) {
13251                         return -TARGET_EFAULT;
13252                     }
13253                 }
13254             }
13255         }
13256         return ret;
13257 #endif
13258 
13259 #if defined(TARGET_NR_pivot_root)
13260     case TARGET_NR_pivot_root:
13261         {
13262             void *p2;
13263             p = lock_user_string(arg1); /* new_root */
13264             p2 = lock_user_string(arg2); /* put_old */
13265             if (!p || !p2) {
13266                 ret = -TARGET_EFAULT;
13267             } else {
13268                 ret = get_errno(pivot_root(p, p2));
13269             }
13270             unlock_user(p2, arg2, 0);
13271             unlock_user(p, arg1, 0);
13272         }
13273         return ret;
13274 #endif
13275 
13276     default:
13277         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13278         return -TARGET_ENOSYS;
13279     }
13280     return ret;
13281 }
13282 
13283 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13284                     abi_long arg2, abi_long arg3, abi_long arg4,
13285                     abi_long arg5, abi_long arg6, abi_long arg7,
13286                     abi_long arg8)
13287 {
13288     CPUState *cpu = env_cpu(cpu_env);
13289     abi_long ret;
13290 
13291 #ifdef DEBUG_ERESTARTSYS
13292     /* Debug-only code for exercising the syscall-restart code paths
13293      * in the per-architecture cpu main loops: restart every syscall
13294      * the guest makes once before letting it through.
13295      */
13296     {
13297         static bool flag;
13298         flag = !flag;
13299         if (flag) {
13300             return -QEMU_ERESTARTSYS;
13301         }
13302     }
13303 #endif
13304 
13305     record_syscall_start(cpu, num, arg1,
13306                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13307 
13308     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13309         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13310     }
13311 
13312     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13313                       arg5, arg6, arg7, arg8);
13314 
13315     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13316         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13317                           arg3, arg4, arg5, arg6);
13318     }
13319 
13320     record_syscall_return(cpu, num, ret);
13321     return ret;
13322 }
13323