xref: /qemu/linux-user/syscall.c (revision 9f024653)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 
99 #ifdef HAVE_SYS_MOUNT_FSCONFIG
100 /*
101  * glibc >= 2.36 linux/mount.h conflicts with sys/mount.h,
102  * which in turn prevents use of linux/fs.h. So we have to
103  * define the constants ourselves for now.
104  */
105 #define FS_IOC_GETFLAGS                _IOR('f', 1, long)
106 #define FS_IOC_SETFLAGS                _IOW('f', 2, long)
107 #define FS_IOC_GETVERSION              _IOR('v', 1, long)
108 #define FS_IOC_SETVERSION              _IOW('v', 2, long)
109 #define FS_IOC_FIEMAP                  _IOWR('f', 11, struct fiemap)
110 #define FS_IOC32_GETFLAGS              _IOR('f', 1, int)
111 #define FS_IOC32_SETFLAGS              _IOW('f', 2, int)
112 #define FS_IOC32_GETVERSION            _IOR('v', 1, int)
113 #define FS_IOC32_SETVERSION            _IOW('v', 2, int)
114 #else
115 #include <linux/fs.h>
116 #endif
117 #include <linux/fd.h>
118 #if defined(CONFIG_FIEMAP)
119 #include <linux/fiemap.h>
120 #endif
121 #include <linux/fb.h>
122 #if defined(CONFIG_USBFS)
123 #include <linux/usbdevice_fs.h>
124 #include <linux/usb/ch9.h>
125 #endif
126 #include <linux/vt.h>
127 #include <linux/dm-ioctl.h>
128 #include <linux/reboot.h>
129 #include <linux/route.h>
130 #include <linux/filter.h>
131 #include <linux/blkpg.h>
132 #include <netpacket/packet.h>
133 #include <linux/netlink.h>
134 #include <linux/if_alg.h>
135 #include <linux/rtc.h>
136 #include <sound/asound.h>
137 #ifdef HAVE_BTRFS_H
138 #include <linux/btrfs.h>
139 #endif
140 #ifdef HAVE_DRM_H
141 #include <libdrm/drm.h>
142 #include <libdrm/i915_drm.h>
143 #endif
144 #include "linux_loop.h"
145 #include "uname.h"
146 
147 #include "qemu.h"
148 #include "user-internals.h"
149 #include "strace.h"
150 #include "signal-common.h"
151 #include "loader.h"
152 #include "user-mmap.h"
153 #include "user/safe-syscall.h"
154 #include "qemu/guest-random.h"
155 #include "qemu/selfmap.h"
156 #include "user/syscall-trace.h"
157 #include "special-errno.h"
158 #include "qapi/error.h"
159 #include "fd-trans.h"
160 #include "tcg/tcg.h"
161 #include "cpu_loop-common.h"
162 
163 #ifndef CLONE_IO
164 #define CLONE_IO                0x80000000      /* Clone io context */
165 #endif
166 
167 /* We can't directly call the host clone syscall, because this will
168  * badly confuse libc (breaking mutexes, for example). So we must
169  * divide clone flags into:
170  *  * flag combinations that look like pthread_create()
171  *  * flag combinations that look like fork()
172  *  * flags we can implement within QEMU itself
173  *  * flags we can't support and will return an error for
174  */
175 /* For thread creation, all these flags must be present; for
176  * fork, none must be present.
177  */
178 #define CLONE_THREAD_FLAGS                              \
179     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
180      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
181 
182 /* These flags are ignored:
183  * CLONE_DETACHED is now ignored by the kernel;
184  * CLONE_IO is just an optimisation hint to the I/O scheduler
185  */
186 #define CLONE_IGNORED_FLAGS                     \
187     (CLONE_DETACHED | CLONE_IO)
188 
189 /* Flags for fork which we can implement within QEMU itself */
190 #define CLONE_OPTIONAL_FORK_FLAGS               \
191     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
192      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
193 
194 /* Flags for thread creation which we can implement within QEMU itself */
195 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
196     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
197      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
198 
199 #define CLONE_INVALID_FORK_FLAGS                                        \
200     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
201 
202 #define CLONE_INVALID_THREAD_FLAGS                                      \
203     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
204        CLONE_IGNORED_FLAGS))
205 
206 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
207  * have almost all been allocated. We cannot support any of
208  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
209  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
210  * The checks against the invalid thread masks above will catch these.
211  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
212  */
213 
214 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
215  * once. This exercises the codepaths for restart.
216  */
217 //#define DEBUG_ERESTARTSYS
218 
219 //#include <linux/msdos_fs.h>
220 #define VFAT_IOCTL_READDIR_BOTH \
221     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
222 #define VFAT_IOCTL_READDIR_SHORT \
223     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
224 
225 #undef _syscall0
226 #undef _syscall1
227 #undef _syscall2
228 #undef _syscall3
229 #undef _syscall4
230 #undef _syscall5
231 #undef _syscall6
232 
233 #define _syscall0(type,name)		\
234 static type name (void)			\
235 {					\
236 	return syscall(__NR_##name);	\
237 }
238 
239 #define _syscall1(type,name,type1,arg1)		\
240 static type name (type1 arg1)			\
241 {						\
242 	return syscall(__NR_##name, arg1);	\
243 }
244 
245 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
246 static type name (type1 arg1,type2 arg2)		\
247 {							\
248 	return syscall(__NR_##name, arg1, arg2);	\
249 }
250 
251 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
252 static type name (type1 arg1,type2 arg2,type3 arg3)		\
253 {								\
254 	return syscall(__NR_##name, arg1, arg2, arg3);		\
255 }
256 
257 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
258 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
259 {										\
260 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
261 }
262 
263 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
264 		  type5,arg5)							\
265 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
266 {										\
267 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
268 }
269 
270 
271 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
272 		  type5,arg5,type6,arg6)					\
273 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
274                   type6 arg6)							\
275 {										\
276 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
277 }
278 
279 
280 #define __NR_sys_uname __NR_uname
281 #define __NR_sys_getcwd1 __NR_getcwd
282 #define __NR_sys_getdents __NR_getdents
283 #define __NR_sys_getdents64 __NR_getdents64
284 #define __NR_sys_getpriority __NR_getpriority
285 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
286 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
287 #define __NR_sys_syslog __NR_syslog
288 #if defined(__NR_futex)
289 # define __NR_sys_futex __NR_futex
290 #endif
291 #if defined(__NR_futex_time64)
292 # define __NR_sys_futex_time64 __NR_futex_time64
293 #endif
294 #define __NR_sys_statx __NR_statx
295 
296 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
297 #define __NR__llseek __NR_lseek
298 #endif
299 
300 /* Newer kernel ports have llseek() instead of _llseek() */
301 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
302 #define TARGET_NR__llseek TARGET_NR_llseek
303 #endif
304 
305 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
306 #ifndef TARGET_O_NONBLOCK_MASK
307 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
308 #endif
309 
310 #define __NR_sys_gettid __NR_gettid
311 _syscall0(int, sys_gettid)
312 
313 /* For the 64-bit guest on 32-bit host case we must emulate
314  * getdents using getdents64, because otherwise the host
315  * might hand us back more dirent records than we can fit
316  * into the guest buffer after structure format conversion.
317  * Otherwise we emulate getdents with getdents if the host has it.
318  */
319 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
320 #define EMULATE_GETDENTS_WITH_GETDENTS
321 #endif
322 
323 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
324 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
325 #endif
326 #if (defined(TARGET_NR_getdents) && \
327       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
328     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
329 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
330 #endif
331 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
332 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
333           loff_t *, res, uint, wh);
334 #endif
335 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
336 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
337           siginfo_t *, uinfo)
338 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
339 #ifdef __NR_exit_group
340 _syscall1(int,exit_group,int,error_code)
341 #endif
342 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
343 #define __NR_sys_close_range __NR_close_range
344 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
345 #ifndef CLOSE_RANGE_CLOEXEC
346 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
347 #endif
348 #endif
349 #if defined(__NR_futex)
350 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
351           const struct timespec *,timeout,int *,uaddr2,int,val3)
352 #endif
353 #if defined(__NR_futex_time64)
354 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
355           const struct timespec *,timeout,int *,uaddr2,int,val3)
356 #endif
357 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
358 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
359 #endif
360 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
361 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
362                              unsigned int, flags);
363 #endif
364 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
365 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
366 #endif
367 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
368 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
369           unsigned long *, user_mask_ptr);
370 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
371 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
372           unsigned long *, user_mask_ptr);
373 /* sched_attr is not defined in glibc */
374 struct sched_attr {
375     uint32_t size;
376     uint32_t sched_policy;
377     uint64_t sched_flags;
378     int32_t sched_nice;
379     uint32_t sched_priority;
380     uint64_t sched_runtime;
381     uint64_t sched_deadline;
382     uint64_t sched_period;
383     uint32_t sched_util_min;
384     uint32_t sched_util_max;
385 };
386 #define __NR_sys_sched_getattr __NR_sched_getattr
387 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
388           unsigned int, size, unsigned int, flags);
389 #define __NR_sys_sched_setattr __NR_sched_setattr
390 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
391           unsigned int, flags);
392 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
393 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
394 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
395 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
396           const struct sched_param *, param);
397 #define __NR_sys_sched_getparam __NR_sched_getparam
398 _syscall2(int, sys_sched_getparam, pid_t, pid,
399           struct sched_param *, param);
400 #define __NR_sys_sched_setparam __NR_sched_setparam
401 _syscall2(int, sys_sched_setparam, pid_t, pid,
402           const struct sched_param *, param);
403 #define __NR_sys_getcpu __NR_getcpu
404 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
405 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
406           void *, arg);
407 _syscall2(int, capget, struct __user_cap_header_struct *, header,
408           struct __user_cap_data_struct *, data);
409 _syscall2(int, capset, struct __user_cap_header_struct *, header,
410           struct __user_cap_data_struct *, data);
411 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
412 _syscall2(int, ioprio_get, int, which, int, who)
413 #endif
414 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
415 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
416 #endif
417 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
418 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
419 #endif
420 
421 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
422 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
423           unsigned long, idx1, unsigned long, idx2)
424 #endif
425 
426 /*
427  * It is assumed that struct statx is architecture independent.
428  */
429 #if defined(TARGET_NR_statx) && defined(__NR_statx)
430 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
431           unsigned int, mask, struct target_statx *, statxbuf)
432 #endif
433 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
434 _syscall2(int, membarrier, int, cmd, int, flags)
435 #endif
436 
437 static const bitmask_transtbl fcntl_flags_tbl[] = {
438   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
439   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
440   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
441   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
442   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
443   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
444   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
445   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
446   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
447   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
448   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
449   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
450   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
451 #if defined(O_DIRECT)
452   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
453 #endif
454 #if defined(O_NOATIME)
455   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
456 #endif
457 #if defined(O_CLOEXEC)
458   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
459 #endif
460 #if defined(O_PATH)
461   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
462 #endif
463 #if defined(O_TMPFILE)
464   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
465 #endif
466   /* Don't terminate the list prematurely on 64-bit host+guest.  */
467 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
468   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
469 #endif
470   { 0, 0, 0, 0 }
471 };
472 
473 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
474 
475 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
476 #if defined(__NR_utimensat)
477 #define __NR_sys_utimensat __NR_utimensat
478 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
479           const struct timespec *,tsp,int,flags)
480 #else
481 static int sys_utimensat(int dirfd, const char *pathname,
482                          const struct timespec times[2], int flags)
483 {
484     errno = ENOSYS;
485     return -1;
486 }
487 #endif
488 #endif /* TARGET_NR_utimensat */
489 
490 #ifdef TARGET_NR_renameat2
491 #if defined(__NR_renameat2)
492 #define __NR_sys_renameat2 __NR_renameat2
493 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
494           const char *, new, unsigned int, flags)
495 #else
496 static int sys_renameat2(int oldfd, const char *old,
497                          int newfd, const char *new, int flags)
498 {
499     if (flags == 0) {
500         return renameat(oldfd, old, newfd, new);
501     }
502     errno = ENOSYS;
503     return -1;
504 }
505 #endif
506 #endif /* TARGET_NR_renameat2 */
507 
508 #ifdef CONFIG_INOTIFY
509 #include <sys/inotify.h>
510 #else
511 /* Userspace can usually survive runtime without inotify */
512 #undef TARGET_NR_inotify_init
513 #undef TARGET_NR_inotify_init1
514 #undef TARGET_NR_inotify_add_watch
515 #undef TARGET_NR_inotify_rm_watch
516 #endif /* CONFIG_INOTIFY  */
517 
518 #if defined(TARGET_NR_prlimit64)
519 #ifndef __NR_prlimit64
520 # define __NR_prlimit64 -1
521 #endif
522 #define __NR_sys_prlimit64 __NR_prlimit64
523 /* The glibc rlimit structure may not be that used by the underlying syscall */
524 struct host_rlimit64 {
525     uint64_t rlim_cur;
526     uint64_t rlim_max;
527 };
528 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
529           const struct host_rlimit64 *, new_limit,
530           struct host_rlimit64 *, old_limit)
531 #endif
532 
533 
534 #if defined(TARGET_NR_timer_create)
535 /* Maximum of 32 active POSIX timers allowed at any one time. */
536 #define GUEST_TIMER_MAX 32
537 static timer_t g_posix_timers[GUEST_TIMER_MAX];
538 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
539 
540 static inline int next_free_host_timer(void)
541 {
542     int k;
543     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
544         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
545             return k;
546         }
547     }
548     return -1;
549 }
550 
551 static inline void free_host_timer_slot(int id)
552 {
553     qatomic_store_release(g_posix_timer_allocated + id, 0);
554 }
555 #endif
556 
557 static inline int host_to_target_errno(int host_errno)
558 {
559     switch (host_errno) {
560 #define E(X)  case X: return TARGET_##X;
561 #include "errnos.c.inc"
562 #undef E
563     default:
564         return host_errno;
565     }
566 }
567 
568 static inline int target_to_host_errno(int target_errno)
569 {
570     switch (target_errno) {
571 #define E(X)  case TARGET_##X: return X;
572 #include "errnos.c.inc"
573 #undef E
574     default:
575         return target_errno;
576     }
577 }
578 
579 abi_long get_errno(abi_long ret)
580 {
581     if (ret == -1)
582         return -host_to_target_errno(errno);
583     else
584         return ret;
585 }
586 
587 const char *target_strerror(int err)
588 {
589     if (err == QEMU_ERESTARTSYS) {
590         return "To be restarted";
591     }
592     if (err == QEMU_ESIGRETURN) {
593         return "Successful exit from sigreturn";
594     }
595 
596     return strerror(target_to_host_errno(err));
597 }
598 
599 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
600 {
601     int i;
602     uint8_t b;
603     if (usize <= ksize) {
604         return 1;
605     }
606     for (i = ksize; i < usize; i++) {
607         if (get_user_u8(b, addr + i)) {
608             return -TARGET_EFAULT;
609         }
610         if (b != 0) {
611             return 0;
612         }
613     }
614     return 1;
615 }
616 
617 #define safe_syscall0(type, name) \
618 static type safe_##name(void) \
619 { \
620     return safe_syscall(__NR_##name); \
621 }
622 
623 #define safe_syscall1(type, name, type1, arg1) \
624 static type safe_##name(type1 arg1) \
625 { \
626     return safe_syscall(__NR_##name, arg1); \
627 }
628 
629 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
630 static type safe_##name(type1 arg1, type2 arg2) \
631 { \
632     return safe_syscall(__NR_##name, arg1, arg2); \
633 }
634 
635 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
636 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
637 { \
638     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
639 }
640 
641 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
642     type4, arg4) \
643 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
644 { \
645     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
646 }
647 
648 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
649     type4, arg4, type5, arg5) \
650 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
651     type5 arg5) \
652 { \
653     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
654 }
655 
656 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
657     type4, arg4, type5, arg5, type6, arg6) \
658 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
659     type5 arg5, type6 arg6) \
660 { \
661     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
662 }
663 
664 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
665 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
666 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
667               int, flags, mode_t, mode)
668 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
669 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
670               struct rusage *, rusage)
671 #endif
672 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
673               int, options, struct rusage *, rusage)
674 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
675               char **, argv, char **, envp, int, flags)
676 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
677     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
678 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
679               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
680 #endif
681 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
682 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
683               struct timespec *, tsp, const sigset_t *, sigmask,
684               size_t, sigsetsize)
685 #endif
686 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
687               int, maxevents, int, timeout, const sigset_t *, sigmask,
688               size_t, sigsetsize)
689 #if defined(__NR_futex)
690 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
691               const struct timespec *,timeout,int *,uaddr2,int,val3)
692 #endif
693 #if defined(__NR_futex_time64)
694 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
695               const struct timespec *,timeout,int *,uaddr2,int,val3)
696 #endif
697 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
698 safe_syscall2(int, kill, pid_t, pid, int, sig)
699 safe_syscall2(int, tkill, int, tid, int, sig)
700 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
701 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
702 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
703 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
704               unsigned long, pos_l, unsigned long, pos_h)
705 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
706               unsigned long, pos_l, unsigned long, pos_h)
707 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
708               socklen_t, addrlen)
709 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
710               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
711 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
712               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
713 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
714 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
715 safe_syscall2(int, flock, int, fd, int, operation)
716 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
717 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
718               const struct timespec *, uts, size_t, sigsetsize)
719 #endif
720 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
721               int, flags)
722 #if defined(TARGET_NR_nanosleep)
723 safe_syscall2(int, nanosleep, const struct timespec *, req,
724               struct timespec *, rem)
725 #endif
726 #if defined(TARGET_NR_clock_nanosleep) || \
727     defined(TARGET_NR_clock_nanosleep_time64)
728 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
729               const struct timespec *, req, struct timespec *, rem)
730 #endif
731 #ifdef __NR_ipc
732 #ifdef __s390x__
733 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
734               void *, ptr)
735 #else
736 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
737               void *, ptr, long, fifth)
738 #endif
739 #endif
740 #ifdef __NR_msgsnd
741 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
742               int, flags)
743 #endif
744 #ifdef __NR_msgrcv
745 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
746               long, msgtype, int, flags)
747 #endif
748 #ifdef __NR_semtimedop
749 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
750               unsigned, nsops, const struct timespec *, timeout)
751 #endif
752 #if defined(TARGET_NR_mq_timedsend) || \
753     defined(TARGET_NR_mq_timedsend_time64)
754 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
755               size_t, len, unsigned, prio, const struct timespec *, timeout)
756 #endif
757 #if defined(TARGET_NR_mq_timedreceive) || \
758     defined(TARGET_NR_mq_timedreceive_time64)
759 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
760               size_t, len, unsigned *, prio, const struct timespec *, timeout)
761 #endif
762 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
763 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
764               int, outfd, loff_t *, poutoff, size_t, length,
765               unsigned int, flags)
766 #endif
767 
768 /* We do ioctl like this rather than via safe_syscall3 to preserve the
769  * "third argument might be integer or pointer or not present" behaviour of
770  * the libc function.
771  */
772 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
773 /* Similarly for fcntl. Note that callers must always:
774  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
775  *  use the flock64 struct rather than unsuffixed flock
776  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
777  */
778 #ifdef __NR_fcntl64
779 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
780 #else
781 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
782 #endif
783 
784 static inline int host_to_target_sock_type(int host_type)
785 {
786     int target_type;
787 
788     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
789     case SOCK_DGRAM:
790         target_type = TARGET_SOCK_DGRAM;
791         break;
792     case SOCK_STREAM:
793         target_type = TARGET_SOCK_STREAM;
794         break;
795     default:
796         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
797         break;
798     }
799 
800 #if defined(SOCK_CLOEXEC)
801     if (host_type & SOCK_CLOEXEC) {
802         target_type |= TARGET_SOCK_CLOEXEC;
803     }
804 #endif
805 
806 #if defined(SOCK_NONBLOCK)
807     if (host_type & SOCK_NONBLOCK) {
808         target_type |= TARGET_SOCK_NONBLOCK;
809     }
810 #endif
811 
812     return target_type;
813 }
814 
815 static abi_ulong target_brk;
816 static abi_ulong target_original_brk;
817 static abi_ulong brk_page;
818 
819 void target_set_brk(abi_ulong new_brk)
820 {
821     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
822     brk_page = HOST_PAGE_ALIGN(target_brk);
823 }
824 
825 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
826 #define DEBUGF_BRK(message, args...)
827 
828 /* do_brk() must return target values and target errnos. */
829 abi_long do_brk(abi_ulong new_brk)
830 {
831     abi_long mapped_addr;
832     abi_ulong new_alloc_size;
833 
834     /* brk pointers are always untagged */
835 
836     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
837 
838     if (!new_brk) {
839         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
840         return target_brk;
841     }
842     if (new_brk < target_original_brk) {
843         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
844                    target_brk);
845         return target_brk;
846     }
847 
848     /* If the new brk is less than the highest page reserved to the
849      * target heap allocation, set it and we're almost done...  */
850     if (new_brk <= brk_page) {
851         /* Heap contents are initialized to zero, as for anonymous
852          * mapped pages.  */
853         if (new_brk > target_brk) {
854             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
855         }
856 	target_brk = new_brk;
857         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
858 	return target_brk;
859     }
860 
861     /* We need to allocate more memory after the brk... Note that
862      * we don't use MAP_FIXED because that will map over the top of
863      * any existing mapping (like the one with the host libc or qemu
864      * itself); instead we treat "mapped but at wrong address" as
865      * a failure and unmap again.
866      */
867     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
868     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
869                                         PROT_READ|PROT_WRITE,
870                                         MAP_ANON|MAP_PRIVATE, 0, 0));
871 
872     if (mapped_addr == brk_page) {
873         /* Heap contents are initialized to zero, as for anonymous
874          * mapped pages.  Technically the new pages are already
875          * initialized to zero since they *are* anonymous mapped
876          * pages, however we have to take care with the contents that
877          * come from the remaining part of the previous page: it may
878          * contains garbage data due to a previous heap usage (grown
879          * then shrunken).  */
880         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
881 
882         target_brk = new_brk;
883         brk_page = HOST_PAGE_ALIGN(target_brk);
884         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
885             target_brk);
886         return target_brk;
887     } else if (mapped_addr != -1) {
888         /* Mapped but at wrong address, meaning there wasn't actually
889          * enough space for this brk.
890          */
891         target_munmap(mapped_addr, new_alloc_size);
892         mapped_addr = -1;
893         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
894     }
895     else {
896         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
897     }
898 
899 #if defined(TARGET_ALPHA)
900     /* We (partially) emulate OSF/1 on Alpha, which requires we
901        return a proper errno, not an unchanged brk value.  */
902     return -TARGET_ENOMEM;
903 #endif
904     /* For everything else, return the previous break. */
905     return target_brk;
906 }
907 
908 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
909     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
910 static inline abi_long copy_from_user_fdset(fd_set *fds,
911                                             abi_ulong target_fds_addr,
912                                             int n)
913 {
914     int i, nw, j, k;
915     abi_ulong b, *target_fds;
916 
917     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
918     if (!(target_fds = lock_user(VERIFY_READ,
919                                  target_fds_addr,
920                                  sizeof(abi_ulong) * nw,
921                                  1)))
922         return -TARGET_EFAULT;
923 
924     FD_ZERO(fds);
925     k = 0;
926     for (i = 0; i < nw; i++) {
927         /* grab the abi_ulong */
928         __get_user(b, &target_fds[i]);
929         for (j = 0; j < TARGET_ABI_BITS; j++) {
930             /* check the bit inside the abi_ulong */
931             if ((b >> j) & 1)
932                 FD_SET(k, fds);
933             k++;
934         }
935     }
936 
937     unlock_user(target_fds, target_fds_addr, 0);
938 
939     return 0;
940 }
941 
942 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
943                                                  abi_ulong target_fds_addr,
944                                                  int n)
945 {
946     if (target_fds_addr) {
947         if (copy_from_user_fdset(fds, target_fds_addr, n))
948             return -TARGET_EFAULT;
949         *fds_ptr = fds;
950     } else {
951         *fds_ptr = NULL;
952     }
953     return 0;
954 }
955 
956 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
957                                           const fd_set *fds,
958                                           int n)
959 {
960     int i, nw, j, k;
961     abi_long v;
962     abi_ulong *target_fds;
963 
964     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
965     if (!(target_fds = lock_user(VERIFY_WRITE,
966                                  target_fds_addr,
967                                  sizeof(abi_ulong) * nw,
968                                  0)))
969         return -TARGET_EFAULT;
970 
971     k = 0;
972     for (i = 0; i < nw; i++) {
973         v = 0;
974         for (j = 0; j < TARGET_ABI_BITS; j++) {
975             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
976             k++;
977         }
978         __put_user(v, &target_fds[i]);
979     }
980 
981     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
982 
983     return 0;
984 }
985 #endif
986 
987 #if defined(__alpha__)
988 #define HOST_HZ 1024
989 #else
990 #define HOST_HZ 100
991 #endif
992 
993 static inline abi_long host_to_target_clock_t(long ticks)
994 {
995 #if HOST_HZ == TARGET_HZ
996     return ticks;
997 #else
998     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
999 #endif
1000 }
1001 
1002 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1003                                              const struct rusage *rusage)
1004 {
1005     struct target_rusage *target_rusage;
1006 
1007     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1008         return -TARGET_EFAULT;
1009     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1010     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1011     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1012     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1013     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1014     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1015     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1016     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1017     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1018     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1019     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1020     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1021     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1022     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1023     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1024     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1025     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1026     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1027     unlock_user_struct(target_rusage, target_addr, 1);
1028 
1029     return 0;
1030 }
1031 
1032 #ifdef TARGET_NR_setrlimit
1033 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1034 {
1035     abi_ulong target_rlim_swap;
1036     rlim_t result;
1037 
1038     target_rlim_swap = tswapal(target_rlim);
1039     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1040         return RLIM_INFINITY;
1041 
1042     result = target_rlim_swap;
1043     if (target_rlim_swap != (rlim_t)result)
1044         return RLIM_INFINITY;
1045 
1046     return result;
1047 }
1048 #endif
1049 
1050 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1051 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1052 {
1053     abi_ulong target_rlim_swap;
1054     abi_ulong result;
1055 
1056     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1057         target_rlim_swap = TARGET_RLIM_INFINITY;
1058     else
1059         target_rlim_swap = rlim;
1060     result = tswapal(target_rlim_swap);
1061 
1062     return result;
1063 }
1064 #endif
1065 
1066 static inline int target_to_host_resource(int code)
1067 {
1068     switch (code) {
1069     case TARGET_RLIMIT_AS:
1070         return RLIMIT_AS;
1071     case TARGET_RLIMIT_CORE:
1072         return RLIMIT_CORE;
1073     case TARGET_RLIMIT_CPU:
1074         return RLIMIT_CPU;
1075     case TARGET_RLIMIT_DATA:
1076         return RLIMIT_DATA;
1077     case TARGET_RLIMIT_FSIZE:
1078         return RLIMIT_FSIZE;
1079     case TARGET_RLIMIT_LOCKS:
1080         return RLIMIT_LOCKS;
1081     case TARGET_RLIMIT_MEMLOCK:
1082         return RLIMIT_MEMLOCK;
1083     case TARGET_RLIMIT_MSGQUEUE:
1084         return RLIMIT_MSGQUEUE;
1085     case TARGET_RLIMIT_NICE:
1086         return RLIMIT_NICE;
1087     case TARGET_RLIMIT_NOFILE:
1088         return RLIMIT_NOFILE;
1089     case TARGET_RLIMIT_NPROC:
1090         return RLIMIT_NPROC;
1091     case TARGET_RLIMIT_RSS:
1092         return RLIMIT_RSS;
1093     case TARGET_RLIMIT_RTPRIO:
1094         return RLIMIT_RTPRIO;
1095 #ifdef RLIMIT_RTTIME
1096     case TARGET_RLIMIT_RTTIME:
1097         return RLIMIT_RTTIME;
1098 #endif
1099     case TARGET_RLIMIT_SIGPENDING:
1100         return RLIMIT_SIGPENDING;
1101     case TARGET_RLIMIT_STACK:
1102         return RLIMIT_STACK;
1103     default:
1104         return code;
1105     }
1106 }
1107 
1108 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1109                                               abi_ulong target_tv_addr)
1110 {
1111     struct target_timeval *target_tv;
1112 
1113     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1114         return -TARGET_EFAULT;
1115     }
1116 
1117     __get_user(tv->tv_sec, &target_tv->tv_sec);
1118     __get_user(tv->tv_usec, &target_tv->tv_usec);
1119 
1120     unlock_user_struct(target_tv, target_tv_addr, 0);
1121 
1122     return 0;
1123 }
1124 
1125 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1126                                             const struct timeval *tv)
1127 {
1128     struct target_timeval *target_tv;
1129 
1130     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1131         return -TARGET_EFAULT;
1132     }
1133 
1134     __put_user(tv->tv_sec, &target_tv->tv_sec);
1135     __put_user(tv->tv_usec, &target_tv->tv_usec);
1136 
1137     unlock_user_struct(target_tv, target_tv_addr, 1);
1138 
1139     return 0;
1140 }
1141 
1142 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1143 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1144                                                 abi_ulong target_tv_addr)
1145 {
1146     struct target__kernel_sock_timeval *target_tv;
1147 
1148     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1149         return -TARGET_EFAULT;
1150     }
1151 
1152     __get_user(tv->tv_sec, &target_tv->tv_sec);
1153     __get_user(tv->tv_usec, &target_tv->tv_usec);
1154 
1155     unlock_user_struct(target_tv, target_tv_addr, 0);
1156 
1157     return 0;
1158 }
1159 #endif
1160 
1161 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1162                                               const struct timeval *tv)
1163 {
1164     struct target__kernel_sock_timeval *target_tv;
1165 
1166     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1167         return -TARGET_EFAULT;
1168     }
1169 
1170     __put_user(tv->tv_sec, &target_tv->tv_sec);
1171     __put_user(tv->tv_usec, &target_tv->tv_usec);
1172 
1173     unlock_user_struct(target_tv, target_tv_addr, 1);
1174 
1175     return 0;
1176 }
1177 
1178 #if defined(TARGET_NR_futex) || \
1179     defined(TARGET_NR_rt_sigtimedwait) || \
1180     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1181     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1182     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1183     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1184     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1185     defined(TARGET_NR_timer_settime) || \
1186     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1187 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1188                                                abi_ulong target_addr)
1189 {
1190     struct target_timespec *target_ts;
1191 
1192     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1193         return -TARGET_EFAULT;
1194     }
1195     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1196     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1197     unlock_user_struct(target_ts, target_addr, 0);
1198     return 0;
1199 }
1200 #endif
1201 
1202 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1203     defined(TARGET_NR_timer_settime64) || \
1204     defined(TARGET_NR_mq_timedsend_time64) || \
1205     defined(TARGET_NR_mq_timedreceive_time64) || \
1206     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1207     defined(TARGET_NR_clock_nanosleep_time64) || \
1208     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1209     defined(TARGET_NR_utimensat) || \
1210     defined(TARGET_NR_utimensat_time64) || \
1211     defined(TARGET_NR_semtimedop_time64) || \
1212     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1213 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1214                                                  abi_ulong target_addr)
1215 {
1216     struct target__kernel_timespec *target_ts;
1217 
1218     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1219         return -TARGET_EFAULT;
1220     }
1221     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1222     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1223     /* in 32bit mode, this drops the padding */
1224     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1225     unlock_user_struct(target_ts, target_addr, 0);
1226     return 0;
1227 }
1228 #endif
1229 
1230 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1231                                                struct timespec *host_ts)
1232 {
1233     struct target_timespec *target_ts;
1234 
1235     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1236         return -TARGET_EFAULT;
1237     }
1238     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1239     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1240     unlock_user_struct(target_ts, target_addr, 1);
1241     return 0;
1242 }
1243 
1244 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1245                                                  struct timespec *host_ts)
1246 {
1247     struct target__kernel_timespec *target_ts;
1248 
1249     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1250         return -TARGET_EFAULT;
1251     }
1252     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1253     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1254     unlock_user_struct(target_ts, target_addr, 1);
1255     return 0;
1256 }
1257 
1258 #if defined(TARGET_NR_gettimeofday)
1259 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1260                                              struct timezone *tz)
1261 {
1262     struct target_timezone *target_tz;
1263 
1264     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1265         return -TARGET_EFAULT;
1266     }
1267 
1268     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1269     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1270 
1271     unlock_user_struct(target_tz, target_tz_addr, 1);
1272 
1273     return 0;
1274 }
1275 #endif
1276 
1277 #if defined(TARGET_NR_settimeofday)
1278 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1279                                                abi_ulong target_tz_addr)
1280 {
1281     struct target_timezone *target_tz;
1282 
1283     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1284         return -TARGET_EFAULT;
1285     }
1286 
1287     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1288     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1289 
1290     unlock_user_struct(target_tz, target_tz_addr, 0);
1291 
1292     return 0;
1293 }
1294 #endif
1295 
1296 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1297 #include <mqueue.h>
1298 
1299 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1300                                               abi_ulong target_mq_attr_addr)
1301 {
1302     struct target_mq_attr *target_mq_attr;
1303 
1304     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1305                           target_mq_attr_addr, 1))
1306         return -TARGET_EFAULT;
1307 
1308     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1309     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1310     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1311     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1312 
1313     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1314 
1315     return 0;
1316 }
1317 
1318 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1319                                             const struct mq_attr *attr)
1320 {
1321     struct target_mq_attr *target_mq_attr;
1322 
1323     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1324                           target_mq_attr_addr, 0))
1325         return -TARGET_EFAULT;
1326 
1327     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1328     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1329     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1330     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1331 
1332     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1333 
1334     return 0;
1335 }
1336 #endif
1337 
1338 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1339 /* do_select() must return target values and target errnos. */
1340 static abi_long do_select(int n,
1341                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1342                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1343 {
1344     fd_set rfds, wfds, efds;
1345     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1346     struct timeval tv;
1347     struct timespec ts, *ts_ptr;
1348     abi_long ret;
1349 
1350     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1351     if (ret) {
1352         return ret;
1353     }
1354     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1355     if (ret) {
1356         return ret;
1357     }
1358     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1359     if (ret) {
1360         return ret;
1361     }
1362 
1363     if (target_tv_addr) {
1364         if (copy_from_user_timeval(&tv, target_tv_addr))
1365             return -TARGET_EFAULT;
1366         ts.tv_sec = tv.tv_sec;
1367         ts.tv_nsec = tv.tv_usec * 1000;
1368         ts_ptr = &ts;
1369     } else {
1370         ts_ptr = NULL;
1371     }
1372 
1373     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1374                                   ts_ptr, NULL));
1375 
1376     if (!is_error(ret)) {
1377         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1378             return -TARGET_EFAULT;
1379         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1380             return -TARGET_EFAULT;
1381         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1382             return -TARGET_EFAULT;
1383 
1384         if (target_tv_addr) {
1385             tv.tv_sec = ts.tv_sec;
1386             tv.tv_usec = ts.tv_nsec / 1000;
1387             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1388                 return -TARGET_EFAULT;
1389             }
1390         }
1391     }
1392 
1393     return ret;
1394 }
1395 
1396 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1397 static abi_long do_old_select(abi_ulong arg1)
1398 {
1399     struct target_sel_arg_struct *sel;
1400     abi_ulong inp, outp, exp, tvp;
1401     long nsel;
1402 
1403     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1404         return -TARGET_EFAULT;
1405     }
1406 
1407     nsel = tswapal(sel->n);
1408     inp = tswapal(sel->inp);
1409     outp = tswapal(sel->outp);
1410     exp = tswapal(sel->exp);
1411     tvp = tswapal(sel->tvp);
1412 
1413     unlock_user_struct(sel, arg1, 0);
1414 
1415     return do_select(nsel, inp, outp, exp, tvp);
1416 }
1417 #endif
1418 #endif
1419 
1420 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1421 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1422                             abi_long arg4, abi_long arg5, abi_long arg6,
1423                             bool time64)
1424 {
1425     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1426     fd_set rfds, wfds, efds;
1427     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1428     struct timespec ts, *ts_ptr;
1429     abi_long ret;
1430 
1431     /*
1432      * The 6th arg is actually two args smashed together,
1433      * so we cannot use the C library.
1434      */
1435     struct {
1436         sigset_t *set;
1437         size_t size;
1438     } sig, *sig_ptr;
1439 
1440     abi_ulong arg_sigset, arg_sigsize, *arg7;
1441 
1442     n = arg1;
1443     rfd_addr = arg2;
1444     wfd_addr = arg3;
1445     efd_addr = arg4;
1446     ts_addr = arg5;
1447 
1448     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1449     if (ret) {
1450         return ret;
1451     }
1452     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1453     if (ret) {
1454         return ret;
1455     }
1456     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1457     if (ret) {
1458         return ret;
1459     }
1460 
1461     /*
1462      * This takes a timespec, and not a timeval, so we cannot
1463      * use the do_select() helper ...
1464      */
1465     if (ts_addr) {
1466         if (time64) {
1467             if (target_to_host_timespec64(&ts, ts_addr)) {
1468                 return -TARGET_EFAULT;
1469             }
1470         } else {
1471             if (target_to_host_timespec(&ts, ts_addr)) {
1472                 return -TARGET_EFAULT;
1473             }
1474         }
1475             ts_ptr = &ts;
1476     } else {
1477         ts_ptr = NULL;
1478     }
1479 
1480     /* Extract the two packed args for the sigset */
1481     sig_ptr = NULL;
1482     if (arg6) {
1483         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1484         if (!arg7) {
1485             return -TARGET_EFAULT;
1486         }
1487         arg_sigset = tswapal(arg7[0]);
1488         arg_sigsize = tswapal(arg7[1]);
1489         unlock_user(arg7, arg6, 0);
1490 
1491         if (arg_sigset) {
1492             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1493             if (ret != 0) {
1494                 return ret;
1495             }
1496             sig_ptr = &sig;
1497             sig.size = SIGSET_T_SIZE;
1498         }
1499     }
1500 
1501     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1502                                   ts_ptr, sig_ptr));
1503 
1504     if (sig_ptr) {
1505         finish_sigsuspend_mask(ret);
1506     }
1507 
1508     if (!is_error(ret)) {
1509         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1510             return -TARGET_EFAULT;
1511         }
1512         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1513             return -TARGET_EFAULT;
1514         }
1515         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1516             return -TARGET_EFAULT;
1517         }
1518         if (time64) {
1519             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1520                 return -TARGET_EFAULT;
1521             }
1522         } else {
1523             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1524                 return -TARGET_EFAULT;
1525             }
1526         }
1527     }
1528     return ret;
1529 }
1530 #endif
1531 
1532 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1533     defined(TARGET_NR_ppoll_time64)
1534 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1535                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1536 {
1537     struct target_pollfd *target_pfd;
1538     unsigned int nfds = arg2;
1539     struct pollfd *pfd;
1540     unsigned int i;
1541     abi_long ret;
1542 
1543     pfd = NULL;
1544     target_pfd = NULL;
1545     if (nfds) {
1546         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1547             return -TARGET_EINVAL;
1548         }
1549         target_pfd = lock_user(VERIFY_WRITE, arg1,
1550                                sizeof(struct target_pollfd) * nfds, 1);
1551         if (!target_pfd) {
1552             return -TARGET_EFAULT;
1553         }
1554 
1555         pfd = alloca(sizeof(struct pollfd) * nfds);
1556         for (i = 0; i < nfds; i++) {
1557             pfd[i].fd = tswap32(target_pfd[i].fd);
1558             pfd[i].events = tswap16(target_pfd[i].events);
1559         }
1560     }
1561     if (ppoll) {
1562         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1563         sigset_t *set = NULL;
1564 
1565         if (arg3) {
1566             if (time64) {
1567                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1568                     unlock_user(target_pfd, arg1, 0);
1569                     return -TARGET_EFAULT;
1570                 }
1571             } else {
1572                 if (target_to_host_timespec(timeout_ts, arg3)) {
1573                     unlock_user(target_pfd, arg1, 0);
1574                     return -TARGET_EFAULT;
1575                 }
1576             }
1577         } else {
1578             timeout_ts = NULL;
1579         }
1580 
1581         if (arg4) {
1582             ret = process_sigsuspend_mask(&set, arg4, arg5);
1583             if (ret != 0) {
1584                 unlock_user(target_pfd, arg1, 0);
1585                 return ret;
1586             }
1587         }
1588 
1589         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1590                                    set, SIGSET_T_SIZE));
1591 
1592         if (set) {
1593             finish_sigsuspend_mask(ret);
1594         }
1595         if (!is_error(ret) && arg3) {
1596             if (time64) {
1597                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1598                     return -TARGET_EFAULT;
1599                 }
1600             } else {
1601                 if (host_to_target_timespec(arg3, timeout_ts)) {
1602                     return -TARGET_EFAULT;
1603                 }
1604             }
1605         }
1606     } else {
1607           struct timespec ts, *pts;
1608 
1609           if (arg3 >= 0) {
1610               /* Convert ms to secs, ns */
1611               ts.tv_sec = arg3 / 1000;
1612               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1613               pts = &ts;
1614           } else {
1615               /* -ve poll() timeout means "infinite" */
1616               pts = NULL;
1617           }
1618           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1619     }
1620 
1621     if (!is_error(ret)) {
1622         for (i = 0; i < nfds; i++) {
1623             target_pfd[i].revents = tswap16(pfd[i].revents);
1624         }
1625     }
1626     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1627     return ret;
1628 }
1629 #endif
1630 
1631 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1632                         int flags, int is_pipe2)
1633 {
1634     int host_pipe[2];
1635     abi_long ret;
1636     ret = pipe2(host_pipe, flags);
1637 
1638     if (is_error(ret))
1639         return get_errno(ret);
1640 
1641     /* Several targets have special calling conventions for the original
1642        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1643     if (!is_pipe2) {
1644 #if defined(TARGET_ALPHA)
1645         cpu_env->ir[IR_A4] = host_pipe[1];
1646         return host_pipe[0];
1647 #elif defined(TARGET_MIPS)
1648         cpu_env->active_tc.gpr[3] = host_pipe[1];
1649         return host_pipe[0];
1650 #elif defined(TARGET_SH4)
1651         cpu_env->gregs[1] = host_pipe[1];
1652         return host_pipe[0];
1653 #elif defined(TARGET_SPARC)
1654         cpu_env->regwptr[1] = host_pipe[1];
1655         return host_pipe[0];
1656 #endif
1657     }
1658 
1659     if (put_user_s32(host_pipe[0], pipedes)
1660         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1661         return -TARGET_EFAULT;
1662     return get_errno(ret);
1663 }
1664 
1665 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1666                                               abi_ulong target_addr,
1667                                               socklen_t len)
1668 {
1669     struct target_ip_mreqn *target_smreqn;
1670 
1671     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1672     if (!target_smreqn)
1673         return -TARGET_EFAULT;
1674     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1675     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1676     if (len == sizeof(struct target_ip_mreqn))
1677         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1678     unlock_user(target_smreqn, target_addr, 0);
1679 
1680     return 0;
1681 }
1682 
1683 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1684                                                abi_ulong target_addr,
1685                                                socklen_t len)
1686 {
1687     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1688     sa_family_t sa_family;
1689     struct target_sockaddr *target_saddr;
1690 
1691     if (fd_trans_target_to_host_addr(fd)) {
1692         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1693     }
1694 
1695     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1696     if (!target_saddr)
1697         return -TARGET_EFAULT;
1698 
1699     sa_family = tswap16(target_saddr->sa_family);
1700 
1701     /* Oops. The caller might send a incomplete sun_path; sun_path
1702      * must be terminated by \0 (see the manual page), but
1703      * unfortunately it is quite common to specify sockaddr_un
1704      * length as "strlen(x->sun_path)" while it should be
1705      * "strlen(...) + 1". We'll fix that here if needed.
1706      * Linux kernel has a similar feature.
1707      */
1708 
1709     if (sa_family == AF_UNIX) {
1710         if (len < unix_maxlen && len > 0) {
1711             char *cp = (char*)target_saddr;
1712 
1713             if ( cp[len-1] && !cp[len] )
1714                 len++;
1715         }
1716         if (len > unix_maxlen)
1717             len = unix_maxlen;
1718     }
1719 
1720     memcpy(addr, target_saddr, len);
1721     addr->sa_family = sa_family;
1722     if (sa_family == AF_NETLINK) {
1723         struct sockaddr_nl *nladdr;
1724 
1725         nladdr = (struct sockaddr_nl *)addr;
1726         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1727         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1728     } else if (sa_family == AF_PACKET) {
1729 	struct target_sockaddr_ll *lladdr;
1730 
1731 	lladdr = (struct target_sockaddr_ll *)addr;
1732 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1733 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1734     }
1735     unlock_user(target_saddr, target_addr, 0);
1736 
1737     return 0;
1738 }
1739 
1740 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1741                                                struct sockaddr *addr,
1742                                                socklen_t len)
1743 {
1744     struct target_sockaddr *target_saddr;
1745 
1746     if (len == 0) {
1747         return 0;
1748     }
1749     assert(addr);
1750 
1751     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1752     if (!target_saddr)
1753         return -TARGET_EFAULT;
1754     memcpy(target_saddr, addr, len);
1755     if (len >= offsetof(struct target_sockaddr, sa_family) +
1756         sizeof(target_saddr->sa_family)) {
1757         target_saddr->sa_family = tswap16(addr->sa_family);
1758     }
1759     if (addr->sa_family == AF_NETLINK &&
1760         len >= sizeof(struct target_sockaddr_nl)) {
1761         struct target_sockaddr_nl *target_nl =
1762                (struct target_sockaddr_nl *)target_saddr;
1763         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1764         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1765     } else if (addr->sa_family == AF_PACKET) {
1766         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1767         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1768         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1769     } else if (addr->sa_family == AF_INET6 &&
1770                len >= sizeof(struct target_sockaddr_in6)) {
1771         struct target_sockaddr_in6 *target_in6 =
1772                (struct target_sockaddr_in6 *)target_saddr;
1773         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1774     }
1775     unlock_user(target_saddr, target_addr, len);
1776 
1777     return 0;
1778 }
1779 
1780 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1781                                            struct target_msghdr *target_msgh)
1782 {
1783     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1784     abi_long msg_controllen;
1785     abi_ulong target_cmsg_addr;
1786     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1787     socklen_t space = 0;
1788 
1789     msg_controllen = tswapal(target_msgh->msg_controllen);
1790     if (msg_controllen < sizeof (struct target_cmsghdr))
1791         goto the_end;
1792     target_cmsg_addr = tswapal(target_msgh->msg_control);
1793     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1794     target_cmsg_start = target_cmsg;
1795     if (!target_cmsg)
1796         return -TARGET_EFAULT;
1797 
1798     while (cmsg && target_cmsg) {
1799         void *data = CMSG_DATA(cmsg);
1800         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1801 
1802         int len = tswapal(target_cmsg->cmsg_len)
1803             - sizeof(struct target_cmsghdr);
1804 
1805         space += CMSG_SPACE(len);
1806         if (space > msgh->msg_controllen) {
1807             space -= CMSG_SPACE(len);
1808             /* This is a QEMU bug, since we allocated the payload
1809              * area ourselves (unlike overflow in host-to-target
1810              * conversion, which is just the guest giving us a buffer
1811              * that's too small). It can't happen for the payload types
1812              * we currently support; if it becomes an issue in future
1813              * we would need to improve our allocation strategy to
1814              * something more intelligent than "twice the size of the
1815              * target buffer we're reading from".
1816              */
1817             qemu_log_mask(LOG_UNIMP,
1818                           ("Unsupported ancillary data %d/%d: "
1819                            "unhandled msg size\n"),
1820                           tswap32(target_cmsg->cmsg_level),
1821                           tswap32(target_cmsg->cmsg_type));
1822             break;
1823         }
1824 
1825         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1826             cmsg->cmsg_level = SOL_SOCKET;
1827         } else {
1828             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1829         }
1830         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1831         cmsg->cmsg_len = CMSG_LEN(len);
1832 
1833         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1834             int *fd = (int *)data;
1835             int *target_fd = (int *)target_data;
1836             int i, numfds = len / sizeof(int);
1837 
1838             for (i = 0; i < numfds; i++) {
1839                 __get_user(fd[i], target_fd + i);
1840             }
1841         } else if (cmsg->cmsg_level == SOL_SOCKET
1842                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1843             struct ucred *cred = (struct ucred *)data;
1844             struct target_ucred *target_cred =
1845                 (struct target_ucred *)target_data;
1846 
1847             __get_user(cred->pid, &target_cred->pid);
1848             __get_user(cred->uid, &target_cred->uid);
1849             __get_user(cred->gid, &target_cred->gid);
1850         } else {
1851             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1852                           cmsg->cmsg_level, cmsg->cmsg_type);
1853             memcpy(data, target_data, len);
1854         }
1855 
1856         cmsg = CMSG_NXTHDR(msgh, cmsg);
1857         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1858                                          target_cmsg_start);
1859     }
1860     unlock_user(target_cmsg, target_cmsg_addr, 0);
1861  the_end:
1862     msgh->msg_controllen = space;
1863     return 0;
1864 }
1865 
1866 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1867                                            struct msghdr *msgh)
1868 {
1869     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1870     abi_long msg_controllen;
1871     abi_ulong target_cmsg_addr;
1872     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1873     socklen_t space = 0;
1874 
1875     msg_controllen = tswapal(target_msgh->msg_controllen);
1876     if (msg_controllen < sizeof (struct target_cmsghdr))
1877         goto the_end;
1878     target_cmsg_addr = tswapal(target_msgh->msg_control);
1879     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1880     target_cmsg_start = target_cmsg;
1881     if (!target_cmsg)
1882         return -TARGET_EFAULT;
1883 
1884     while (cmsg && target_cmsg) {
1885         void *data = CMSG_DATA(cmsg);
1886         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1887 
1888         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1889         int tgt_len, tgt_space;
1890 
1891         /* We never copy a half-header but may copy half-data;
1892          * this is Linux's behaviour in put_cmsg(). Note that
1893          * truncation here is a guest problem (which we report
1894          * to the guest via the CTRUNC bit), unlike truncation
1895          * in target_to_host_cmsg, which is a QEMU bug.
1896          */
1897         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1898             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1899             break;
1900         }
1901 
1902         if (cmsg->cmsg_level == SOL_SOCKET) {
1903             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1904         } else {
1905             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1906         }
1907         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1908 
1909         /* Payload types which need a different size of payload on
1910          * the target must adjust tgt_len here.
1911          */
1912         tgt_len = len;
1913         switch (cmsg->cmsg_level) {
1914         case SOL_SOCKET:
1915             switch (cmsg->cmsg_type) {
1916             case SO_TIMESTAMP:
1917                 tgt_len = sizeof(struct target_timeval);
1918                 break;
1919             default:
1920                 break;
1921             }
1922             break;
1923         default:
1924             break;
1925         }
1926 
1927         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1928             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1929             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1930         }
1931 
1932         /* We must now copy-and-convert len bytes of payload
1933          * into tgt_len bytes of destination space. Bear in mind
1934          * that in both source and destination we may be dealing
1935          * with a truncated value!
1936          */
1937         switch (cmsg->cmsg_level) {
1938         case SOL_SOCKET:
1939             switch (cmsg->cmsg_type) {
1940             case SCM_RIGHTS:
1941             {
1942                 int *fd = (int *)data;
1943                 int *target_fd = (int *)target_data;
1944                 int i, numfds = tgt_len / sizeof(int);
1945 
1946                 for (i = 0; i < numfds; i++) {
1947                     __put_user(fd[i], target_fd + i);
1948                 }
1949                 break;
1950             }
1951             case SO_TIMESTAMP:
1952             {
1953                 struct timeval *tv = (struct timeval *)data;
1954                 struct target_timeval *target_tv =
1955                     (struct target_timeval *)target_data;
1956 
1957                 if (len != sizeof(struct timeval) ||
1958                     tgt_len != sizeof(struct target_timeval)) {
1959                     goto unimplemented;
1960                 }
1961 
1962                 /* copy struct timeval to target */
1963                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1964                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1965                 break;
1966             }
1967             case SCM_CREDENTIALS:
1968             {
1969                 struct ucred *cred = (struct ucred *)data;
1970                 struct target_ucred *target_cred =
1971                     (struct target_ucred *)target_data;
1972 
1973                 __put_user(cred->pid, &target_cred->pid);
1974                 __put_user(cred->uid, &target_cred->uid);
1975                 __put_user(cred->gid, &target_cred->gid);
1976                 break;
1977             }
1978             default:
1979                 goto unimplemented;
1980             }
1981             break;
1982 
1983         case SOL_IP:
1984             switch (cmsg->cmsg_type) {
1985             case IP_TTL:
1986             {
1987                 uint32_t *v = (uint32_t *)data;
1988                 uint32_t *t_int = (uint32_t *)target_data;
1989 
1990                 if (len != sizeof(uint32_t) ||
1991                     tgt_len != sizeof(uint32_t)) {
1992                     goto unimplemented;
1993                 }
1994                 __put_user(*v, t_int);
1995                 break;
1996             }
1997             case IP_RECVERR:
1998             {
1999                 struct errhdr_t {
2000                    struct sock_extended_err ee;
2001                    struct sockaddr_in offender;
2002                 };
2003                 struct errhdr_t *errh = (struct errhdr_t *)data;
2004                 struct errhdr_t *target_errh =
2005                     (struct errhdr_t *)target_data;
2006 
2007                 if (len != sizeof(struct errhdr_t) ||
2008                     tgt_len != sizeof(struct errhdr_t)) {
2009                     goto unimplemented;
2010                 }
2011                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2012                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2013                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2014                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2015                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2016                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2017                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2018                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2019                     (void *) &errh->offender, sizeof(errh->offender));
2020                 break;
2021             }
2022             default:
2023                 goto unimplemented;
2024             }
2025             break;
2026 
2027         case SOL_IPV6:
2028             switch (cmsg->cmsg_type) {
2029             case IPV6_HOPLIMIT:
2030             {
2031                 uint32_t *v = (uint32_t *)data;
2032                 uint32_t *t_int = (uint32_t *)target_data;
2033 
2034                 if (len != sizeof(uint32_t) ||
2035                     tgt_len != sizeof(uint32_t)) {
2036                     goto unimplemented;
2037                 }
2038                 __put_user(*v, t_int);
2039                 break;
2040             }
2041             case IPV6_RECVERR:
2042             {
2043                 struct errhdr6_t {
2044                    struct sock_extended_err ee;
2045                    struct sockaddr_in6 offender;
2046                 };
2047                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2048                 struct errhdr6_t *target_errh =
2049                     (struct errhdr6_t *)target_data;
2050 
2051                 if (len != sizeof(struct errhdr6_t) ||
2052                     tgt_len != sizeof(struct errhdr6_t)) {
2053                     goto unimplemented;
2054                 }
2055                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2056                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2057                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2058                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2059                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2060                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2061                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2062                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2063                     (void *) &errh->offender, sizeof(errh->offender));
2064                 break;
2065             }
2066             default:
2067                 goto unimplemented;
2068             }
2069             break;
2070 
2071         default:
2072         unimplemented:
2073             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2074                           cmsg->cmsg_level, cmsg->cmsg_type);
2075             memcpy(target_data, data, MIN(len, tgt_len));
2076             if (tgt_len > len) {
2077                 memset(target_data + len, 0, tgt_len - len);
2078             }
2079         }
2080 
2081         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2082         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2083         if (msg_controllen < tgt_space) {
2084             tgt_space = msg_controllen;
2085         }
2086         msg_controllen -= tgt_space;
2087         space += tgt_space;
2088         cmsg = CMSG_NXTHDR(msgh, cmsg);
2089         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2090                                          target_cmsg_start);
2091     }
2092     unlock_user(target_cmsg, target_cmsg_addr, space);
2093  the_end:
2094     target_msgh->msg_controllen = tswapal(space);
2095     return 0;
2096 }
2097 
2098 /* do_setsockopt() Must return target values and target errnos. */
2099 static abi_long do_setsockopt(int sockfd, int level, int optname,
2100                               abi_ulong optval_addr, socklen_t optlen)
2101 {
2102     abi_long ret;
2103     int val;
2104     struct ip_mreqn *ip_mreq;
2105     struct ip_mreq_source *ip_mreq_source;
2106 
2107     switch(level) {
2108     case SOL_TCP:
2109     case SOL_UDP:
2110         /* TCP and UDP options all take an 'int' value.  */
2111         if (optlen < sizeof(uint32_t))
2112             return -TARGET_EINVAL;
2113 
2114         if (get_user_u32(val, optval_addr))
2115             return -TARGET_EFAULT;
2116         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2117         break;
2118     case SOL_IP:
2119         switch(optname) {
2120         case IP_TOS:
2121         case IP_TTL:
2122         case IP_HDRINCL:
2123         case IP_ROUTER_ALERT:
2124         case IP_RECVOPTS:
2125         case IP_RETOPTS:
2126         case IP_PKTINFO:
2127         case IP_MTU_DISCOVER:
2128         case IP_RECVERR:
2129         case IP_RECVTTL:
2130         case IP_RECVTOS:
2131 #ifdef IP_FREEBIND
2132         case IP_FREEBIND:
2133 #endif
2134         case IP_MULTICAST_TTL:
2135         case IP_MULTICAST_LOOP:
2136             val = 0;
2137             if (optlen >= sizeof(uint32_t)) {
2138                 if (get_user_u32(val, optval_addr))
2139                     return -TARGET_EFAULT;
2140             } else if (optlen >= 1) {
2141                 if (get_user_u8(val, optval_addr))
2142                     return -TARGET_EFAULT;
2143             }
2144             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2145             break;
2146         case IP_ADD_MEMBERSHIP:
2147         case IP_DROP_MEMBERSHIP:
2148             if (optlen < sizeof (struct target_ip_mreq) ||
2149                 optlen > sizeof (struct target_ip_mreqn))
2150                 return -TARGET_EINVAL;
2151 
2152             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2153             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2154             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2155             break;
2156 
2157         case IP_BLOCK_SOURCE:
2158         case IP_UNBLOCK_SOURCE:
2159         case IP_ADD_SOURCE_MEMBERSHIP:
2160         case IP_DROP_SOURCE_MEMBERSHIP:
2161             if (optlen != sizeof (struct target_ip_mreq_source))
2162                 return -TARGET_EINVAL;
2163 
2164             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2165             if (!ip_mreq_source) {
2166                 return -TARGET_EFAULT;
2167             }
2168             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2169             unlock_user (ip_mreq_source, optval_addr, 0);
2170             break;
2171 
2172         default:
2173             goto unimplemented;
2174         }
2175         break;
2176     case SOL_IPV6:
2177         switch (optname) {
2178         case IPV6_MTU_DISCOVER:
2179         case IPV6_MTU:
2180         case IPV6_V6ONLY:
2181         case IPV6_RECVPKTINFO:
2182         case IPV6_UNICAST_HOPS:
2183         case IPV6_MULTICAST_HOPS:
2184         case IPV6_MULTICAST_LOOP:
2185         case IPV6_RECVERR:
2186         case IPV6_RECVHOPLIMIT:
2187         case IPV6_2292HOPLIMIT:
2188         case IPV6_CHECKSUM:
2189         case IPV6_ADDRFORM:
2190         case IPV6_2292PKTINFO:
2191         case IPV6_RECVTCLASS:
2192         case IPV6_RECVRTHDR:
2193         case IPV6_2292RTHDR:
2194         case IPV6_RECVHOPOPTS:
2195         case IPV6_2292HOPOPTS:
2196         case IPV6_RECVDSTOPTS:
2197         case IPV6_2292DSTOPTS:
2198         case IPV6_TCLASS:
2199         case IPV6_ADDR_PREFERENCES:
2200 #ifdef IPV6_RECVPATHMTU
2201         case IPV6_RECVPATHMTU:
2202 #endif
2203 #ifdef IPV6_TRANSPARENT
2204         case IPV6_TRANSPARENT:
2205 #endif
2206 #ifdef IPV6_FREEBIND
2207         case IPV6_FREEBIND:
2208 #endif
2209 #ifdef IPV6_RECVORIGDSTADDR
2210         case IPV6_RECVORIGDSTADDR:
2211 #endif
2212             val = 0;
2213             if (optlen < sizeof(uint32_t)) {
2214                 return -TARGET_EINVAL;
2215             }
2216             if (get_user_u32(val, optval_addr)) {
2217                 return -TARGET_EFAULT;
2218             }
2219             ret = get_errno(setsockopt(sockfd, level, optname,
2220                                        &val, sizeof(val)));
2221             break;
2222         case IPV6_PKTINFO:
2223         {
2224             struct in6_pktinfo pki;
2225 
2226             if (optlen < sizeof(pki)) {
2227                 return -TARGET_EINVAL;
2228             }
2229 
2230             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2231                 return -TARGET_EFAULT;
2232             }
2233 
2234             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2235 
2236             ret = get_errno(setsockopt(sockfd, level, optname,
2237                                        &pki, sizeof(pki)));
2238             break;
2239         }
2240         case IPV6_ADD_MEMBERSHIP:
2241         case IPV6_DROP_MEMBERSHIP:
2242         {
2243             struct ipv6_mreq ipv6mreq;
2244 
2245             if (optlen < sizeof(ipv6mreq)) {
2246                 return -TARGET_EINVAL;
2247             }
2248 
2249             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2250                 return -TARGET_EFAULT;
2251             }
2252 
2253             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2254 
2255             ret = get_errno(setsockopt(sockfd, level, optname,
2256                                        &ipv6mreq, sizeof(ipv6mreq)));
2257             break;
2258         }
2259         default:
2260             goto unimplemented;
2261         }
2262         break;
2263     case SOL_ICMPV6:
2264         switch (optname) {
2265         case ICMPV6_FILTER:
2266         {
2267             struct icmp6_filter icmp6f;
2268 
2269             if (optlen > sizeof(icmp6f)) {
2270                 optlen = sizeof(icmp6f);
2271             }
2272 
2273             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2274                 return -TARGET_EFAULT;
2275             }
2276 
2277             for (val = 0; val < 8; val++) {
2278                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2279             }
2280 
2281             ret = get_errno(setsockopt(sockfd, level, optname,
2282                                        &icmp6f, optlen));
2283             break;
2284         }
2285         default:
2286             goto unimplemented;
2287         }
2288         break;
2289     case SOL_RAW:
2290         switch (optname) {
2291         case ICMP_FILTER:
2292         case IPV6_CHECKSUM:
2293             /* those take an u32 value */
2294             if (optlen < sizeof(uint32_t)) {
2295                 return -TARGET_EINVAL;
2296             }
2297 
2298             if (get_user_u32(val, optval_addr)) {
2299                 return -TARGET_EFAULT;
2300             }
2301             ret = get_errno(setsockopt(sockfd, level, optname,
2302                                        &val, sizeof(val)));
2303             break;
2304 
2305         default:
2306             goto unimplemented;
2307         }
2308         break;
2309 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2310     case SOL_ALG:
2311         switch (optname) {
2312         case ALG_SET_KEY:
2313         {
2314             char *alg_key = g_malloc(optlen);
2315 
2316             if (!alg_key) {
2317                 return -TARGET_ENOMEM;
2318             }
2319             if (copy_from_user(alg_key, optval_addr, optlen)) {
2320                 g_free(alg_key);
2321                 return -TARGET_EFAULT;
2322             }
2323             ret = get_errno(setsockopt(sockfd, level, optname,
2324                                        alg_key, optlen));
2325             g_free(alg_key);
2326             break;
2327         }
2328         case ALG_SET_AEAD_AUTHSIZE:
2329         {
2330             ret = get_errno(setsockopt(sockfd, level, optname,
2331                                        NULL, optlen));
2332             break;
2333         }
2334         default:
2335             goto unimplemented;
2336         }
2337         break;
2338 #endif
2339     case TARGET_SOL_SOCKET:
2340         switch (optname) {
2341         case TARGET_SO_RCVTIMEO:
2342         {
2343                 struct timeval tv;
2344 
2345                 optname = SO_RCVTIMEO;
2346 
2347 set_timeout:
2348                 if (optlen != sizeof(struct target_timeval)) {
2349                     return -TARGET_EINVAL;
2350                 }
2351 
2352                 if (copy_from_user_timeval(&tv, optval_addr)) {
2353                     return -TARGET_EFAULT;
2354                 }
2355 
2356                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2357                                 &tv, sizeof(tv)));
2358                 return ret;
2359         }
2360         case TARGET_SO_SNDTIMEO:
2361                 optname = SO_SNDTIMEO;
2362                 goto set_timeout;
2363         case TARGET_SO_ATTACH_FILTER:
2364         {
2365                 struct target_sock_fprog *tfprog;
2366                 struct target_sock_filter *tfilter;
2367                 struct sock_fprog fprog;
2368                 struct sock_filter *filter;
2369                 int i;
2370 
2371                 if (optlen != sizeof(*tfprog)) {
2372                     return -TARGET_EINVAL;
2373                 }
2374                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2375                     return -TARGET_EFAULT;
2376                 }
2377                 if (!lock_user_struct(VERIFY_READ, tfilter,
2378                                       tswapal(tfprog->filter), 0)) {
2379                     unlock_user_struct(tfprog, optval_addr, 1);
2380                     return -TARGET_EFAULT;
2381                 }
2382 
2383                 fprog.len = tswap16(tfprog->len);
2384                 filter = g_try_new(struct sock_filter, fprog.len);
2385                 if (filter == NULL) {
2386                     unlock_user_struct(tfilter, tfprog->filter, 1);
2387                     unlock_user_struct(tfprog, optval_addr, 1);
2388                     return -TARGET_ENOMEM;
2389                 }
2390                 for (i = 0; i < fprog.len; i++) {
2391                     filter[i].code = tswap16(tfilter[i].code);
2392                     filter[i].jt = tfilter[i].jt;
2393                     filter[i].jf = tfilter[i].jf;
2394                     filter[i].k = tswap32(tfilter[i].k);
2395                 }
2396                 fprog.filter = filter;
2397 
2398                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2399                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2400                 g_free(filter);
2401 
2402                 unlock_user_struct(tfilter, tfprog->filter, 1);
2403                 unlock_user_struct(tfprog, optval_addr, 1);
2404                 return ret;
2405         }
2406 	case TARGET_SO_BINDTODEVICE:
2407 	{
2408 		char *dev_ifname, *addr_ifname;
2409 
2410 		if (optlen > IFNAMSIZ - 1) {
2411 		    optlen = IFNAMSIZ - 1;
2412 		}
2413 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2414 		if (!dev_ifname) {
2415 		    return -TARGET_EFAULT;
2416 		}
2417 		optname = SO_BINDTODEVICE;
2418 		addr_ifname = alloca(IFNAMSIZ);
2419 		memcpy(addr_ifname, dev_ifname, optlen);
2420 		addr_ifname[optlen] = 0;
2421 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2422                                            addr_ifname, optlen));
2423 		unlock_user (dev_ifname, optval_addr, 0);
2424 		return ret;
2425 	}
2426         case TARGET_SO_LINGER:
2427         {
2428                 struct linger lg;
2429                 struct target_linger *tlg;
2430 
2431                 if (optlen != sizeof(struct target_linger)) {
2432                     return -TARGET_EINVAL;
2433                 }
2434                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2435                     return -TARGET_EFAULT;
2436                 }
2437                 __get_user(lg.l_onoff, &tlg->l_onoff);
2438                 __get_user(lg.l_linger, &tlg->l_linger);
2439                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2440                                 &lg, sizeof(lg)));
2441                 unlock_user_struct(tlg, optval_addr, 0);
2442                 return ret;
2443         }
2444             /* Options with 'int' argument.  */
2445         case TARGET_SO_DEBUG:
2446 		optname = SO_DEBUG;
2447 		break;
2448         case TARGET_SO_REUSEADDR:
2449 		optname = SO_REUSEADDR;
2450 		break;
2451 #ifdef SO_REUSEPORT
2452         case TARGET_SO_REUSEPORT:
2453                 optname = SO_REUSEPORT;
2454                 break;
2455 #endif
2456         case TARGET_SO_TYPE:
2457 		optname = SO_TYPE;
2458 		break;
2459         case TARGET_SO_ERROR:
2460 		optname = SO_ERROR;
2461 		break;
2462         case TARGET_SO_DONTROUTE:
2463 		optname = SO_DONTROUTE;
2464 		break;
2465         case TARGET_SO_BROADCAST:
2466 		optname = SO_BROADCAST;
2467 		break;
2468         case TARGET_SO_SNDBUF:
2469 		optname = SO_SNDBUF;
2470 		break;
2471         case TARGET_SO_SNDBUFFORCE:
2472                 optname = SO_SNDBUFFORCE;
2473                 break;
2474         case TARGET_SO_RCVBUF:
2475 		optname = SO_RCVBUF;
2476 		break;
2477         case TARGET_SO_RCVBUFFORCE:
2478                 optname = SO_RCVBUFFORCE;
2479                 break;
2480         case TARGET_SO_KEEPALIVE:
2481 		optname = SO_KEEPALIVE;
2482 		break;
2483         case TARGET_SO_OOBINLINE:
2484 		optname = SO_OOBINLINE;
2485 		break;
2486         case TARGET_SO_NO_CHECK:
2487 		optname = SO_NO_CHECK;
2488 		break;
2489         case TARGET_SO_PRIORITY:
2490 		optname = SO_PRIORITY;
2491 		break;
2492 #ifdef SO_BSDCOMPAT
2493         case TARGET_SO_BSDCOMPAT:
2494 		optname = SO_BSDCOMPAT;
2495 		break;
2496 #endif
2497         case TARGET_SO_PASSCRED:
2498 		optname = SO_PASSCRED;
2499 		break;
2500         case TARGET_SO_PASSSEC:
2501                 optname = SO_PASSSEC;
2502                 break;
2503         case TARGET_SO_TIMESTAMP:
2504 		optname = SO_TIMESTAMP;
2505 		break;
2506         case TARGET_SO_RCVLOWAT:
2507 		optname = SO_RCVLOWAT;
2508 		break;
2509         default:
2510             goto unimplemented;
2511         }
2512 	if (optlen < sizeof(uint32_t))
2513             return -TARGET_EINVAL;
2514 
2515 	if (get_user_u32(val, optval_addr))
2516             return -TARGET_EFAULT;
2517 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2518         break;
2519 #ifdef SOL_NETLINK
2520     case SOL_NETLINK:
2521         switch (optname) {
2522         case NETLINK_PKTINFO:
2523         case NETLINK_ADD_MEMBERSHIP:
2524         case NETLINK_DROP_MEMBERSHIP:
2525         case NETLINK_BROADCAST_ERROR:
2526         case NETLINK_NO_ENOBUFS:
2527 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2528         case NETLINK_LISTEN_ALL_NSID:
2529         case NETLINK_CAP_ACK:
2530 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2531 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2532         case NETLINK_EXT_ACK:
2533 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2534 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2535         case NETLINK_GET_STRICT_CHK:
2536 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2537             break;
2538         default:
2539             goto unimplemented;
2540         }
2541         val = 0;
2542         if (optlen < sizeof(uint32_t)) {
2543             return -TARGET_EINVAL;
2544         }
2545         if (get_user_u32(val, optval_addr)) {
2546             return -TARGET_EFAULT;
2547         }
2548         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2549                                    sizeof(val)));
2550         break;
2551 #endif /* SOL_NETLINK */
2552     default:
2553     unimplemented:
2554         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2555                       level, optname);
2556         ret = -TARGET_ENOPROTOOPT;
2557     }
2558     return ret;
2559 }
2560 
2561 /* do_getsockopt() Must return target values and target errnos. */
2562 static abi_long do_getsockopt(int sockfd, int level, int optname,
2563                               abi_ulong optval_addr, abi_ulong optlen)
2564 {
2565     abi_long ret;
2566     int len, val;
2567     socklen_t lv;
2568 
2569     switch(level) {
2570     case TARGET_SOL_SOCKET:
2571         level = SOL_SOCKET;
2572         switch (optname) {
2573         /* These don't just return a single integer */
2574         case TARGET_SO_PEERNAME:
2575             goto unimplemented;
2576         case TARGET_SO_RCVTIMEO: {
2577             struct timeval tv;
2578             socklen_t tvlen;
2579 
2580             optname = SO_RCVTIMEO;
2581 
2582 get_timeout:
2583             if (get_user_u32(len, optlen)) {
2584                 return -TARGET_EFAULT;
2585             }
2586             if (len < 0) {
2587                 return -TARGET_EINVAL;
2588             }
2589 
2590             tvlen = sizeof(tv);
2591             ret = get_errno(getsockopt(sockfd, level, optname,
2592                                        &tv, &tvlen));
2593             if (ret < 0) {
2594                 return ret;
2595             }
2596             if (len > sizeof(struct target_timeval)) {
2597                 len = sizeof(struct target_timeval);
2598             }
2599             if (copy_to_user_timeval(optval_addr, &tv)) {
2600                 return -TARGET_EFAULT;
2601             }
2602             if (put_user_u32(len, optlen)) {
2603                 return -TARGET_EFAULT;
2604             }
2605             break;
2606         }
2607         case TARGET_SO_SNDTIMEO:
2608             optname = SO_SNDTIMEO;
2609             goto get_timeout;
2610         case TARGET_SO_PEERCRED: {
2611             struct ucred cr;
2612             socklen_t crlen;
2613             struct target_ucred *tcr;
2614 
2615             if (get_user_u32(len, optlen)) {
2616                 return -TARGET_EFAULT;
2617             }
2618             if (len < 0) {
2619                 return -TARGET_EINVAL;
2620             }
2621 
2622             crlen = sizeof(cr);
2623             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2624                                        &cr, &crlen));
2625             if (ret < 0) {
2626                 return ret;
2627             }
2628             if (len > crlen) {
2629                 len = crlen;
2630             }
2631             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2632                 return -TARGET_EFAULT;
2633             }
2634             __put_user(cr.pid, &tcr->pid);
2635             __put_user(cr.uid, &tcr->uid);
2636             __put_user(cr.gid, &tcr->gid);
2637             unlock_user_struct(tcr, optval_addr, 1);
2638             if (put_user_u32(len, optlen)) {
2639                 return -TARGET_EFAULT;
2640             }
2641             break;
2642         }
2643         case TARGET_SO_PEERSEC: {
2644             char *name;
2645 
2646             if (get_user_u32(len, optlen)) {
2647                 return -TARGET_EFAULT;
2648             }
2649             if (len < 0) {
2650                 return -TARGET_EINVAL;
2651             }
2652             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2653             if (!name) {
2654                 return -TARGET_EFAULT;
2655             }
2656             lv = len;
2657             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2658                                        name, &lv));
2659             if (put_user_u32(lv, optlen)) {
2660                 ret = -TARGET_EFAULT;
2661             }
2662             unlock_user(name, optval_addr, lv);
2663             break;
2664         }
2665         case TARGET_SO_LINGER:
2666         {
2667             struct linger lg;
2668             socklen_t lglen;
2669             struct target_linger *tlg;
2670 
2671             if (get_user_u32(len, optlen)) {
2672                 return -TARGET_EFAULT;
2673             }
2674             if (len < 0) {
2675                 return -TARGET_EINVAL;
2676             }
2677 
2678             lglen = sizeof(lg);
2679             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2680                                        &lg, &lglen));
2681             if (ret < 0) {
2682                 return ret;
2683             }
2684             if (len > lglen) {
2685                 len = lglen;
2686             }
2687             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2688                 return -TARGET_EFAULT;
2689             }
2690             __put_user(lg.l_onoff, &tlg->l_onoff);
2691             __put_user(lg.l_linger, &tlg->l_linger);
2692             unlock_user_struct(tlg, optval_addr, 1);
2693             if (put_user_u32(len, optlen)) {
2694                 return -TARGET_EFAULT;
2695             }
2696             break;
2697         }
2698         /* Options with 'int' argument.  */
2699         case TARGET_SO_DEBUG:
2700             optname = SO_DEBUG;
2701             goto int_case;
2702         case TARGET_SO_REUSEADDR:
2703             optname = SO_REUSEADDR;
2704             goto int_case;
2705 #ifdef SO_REUSEPORT
2706         case TARGET_SO_REUSEPORT:
2707             optname = SO_REUSEPORT;
2708             goto int_case;
2709 #endif
2710         case TARGET_SO_TYPE:
2711             optname = SO_TYPE;
2712             goto int_case;
2713         case TARGET_SO_ERROR:
2714             optname = SO_ERROR;
2715             goto int_case;
2716         case TARGET_SO_DONTROUTE:
2717             optname = SO_DONTROUTE;
2718             goto int_case;
2719         case TARGET_SO_BROADCAST:
2720             optname = SO_BROADCAST;
2721             goto int_case;
2722         case TARGET_SO_SNDBUF:
2723             optname = SO_SNDBUF;
2724             goto int_case;
2725         case TARGET_SO_RCVBUF:
2726             optname = SO_RCVBUF;
2727             goto int_case;
2728         case TARGET_SO_KEEPALIVE:
2729             optname = SO_KEEPALIVE;
2730             goto int_case;
2731         case TARGET_SO_OOBINLINE:
2732             optname = SO_OOBINLINE;
2733             goto int_case;
2734         case TARGET_SO_NO_CHECK:
2735             optname = SO_NO_CHECK;
2736             goto int_case;
2737         case TARGET_SO_PRIORITY:
2738             optname = SO_PRIORITY;
2739             goto int_case;
2740 #ifdef SO_BSDCOMPAT
2741         case TARGET_SO_BSDCOMPAT:
2742             optname = SO_BSDCOMPAT;
2743             goto int_case;
2744 #endif
2745         case TARGET_SO_PASSCRED:
2746             optname = SO_PASSCRED;
2747             goto int_case;
2748         case TARGET_SO_TIMESTAMP:
2749             optname = SO_TIMESTAMP;
2750             goto int_case;
2751         case TARGET_SO_RCVLOWAT:
2752             optname = SO_RCVLOWAT;
2753             goto int_case;
2754         case TARGET_SO_ACCEPTCONN:
2755             optname = SO_ACCEPTCONN;
2756             goto int_case;
2757         case TARGET_SO_PROTOCOL:
2758             optname = SO_PROTOCOL;
2759             goto int_case;
2760         case TARGET_SO_DOMAIN:
2761             optname = SO_DOMAIN;
2762             goto int_case;
2763         default:
2764             goto int_case;
2765         }
2766         break;
2767     case SOL_TCP:
2768     case SOL_UDP:
2769         /* TCP and UDP options all take an 'int' value.  */
2770     int_case:
2771         if (get_user_u32(len, optlen))
2772             return -TARGET_EFAULT;
2773         if (len < 0)
2774             return -TARGET_EINVAL;
2775         lv = sizeof(lv);
2776         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2777         if (ret < 0)
2778             return ret;
2779         if (optname == SO_TYPE) {
2780             val = host_to_target_sock_type(val);
2781         }
2782         if (len > lv)
2783             len = lv;
2784         if (len == 4) {
2785             if (put_user_u32(val, optval_addr))
2786                 return -TARGET_EFAULT;
2787         } else {
2788             if (put_user_u8(val, optval_addr))
2789                 return -TARGET_EFAULT;
2790         }
2791         if (put_user_u32(len, optlen))
2792             return -TARGET_EFAULT;
2793         break;
2794     case SOL_IP:
2795         switch(optname) {
2796         case IP_TOS:
2797         case IP_TTL:
2798         case IP_HDRINCL:
2799         case IP_ROUTER_ALERT:
2800         case IP_RECVOPTS:
2801         case IP_RETOPTS:
2802         case IP_PKTINFO:
2803         case IP_MTU_DISCOVER:
2804         case IP_RECVERR:
2805         case IP_RECVTOS:
2806 #ifdef IP_FREEBIND
2807         case IP_FREEBIND:
2808 #endif
2809         case IP_MULTICAST_TTL:
2810         case IP_MULTICAST_LOOP:
2811             if (get_user_u32(len, optlen))
2812                 return -TARGET_EFAULT;
2813             if (len < 0)
2814                 return -TARGET_EINVAL;
2815             lv = sizeof(lv);
2816             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2817             if (ret < 0)
2818                 return ret;
2819             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2820                 len = 1;
2821                 if (put_user_u32(len, optlen)
2822                     || put_user_u8(val, optval_addr))
2823                     return -TARGET_EFAULT;
2824             } else {
2825                 if (len > sizeof(int))
2826                     len = sizeof(int);
2827                 if (put_user_u32(len, optlen)
2828                     || put_user_u32(val, optval_addr))
2829                     return -TARGET_EFAULT;
2830             }
2831             break;
2832         default:
2833             ret = -TARGET_ENOPROTOOPT;
2834             break;
2835         }
2836         break;
2837     case SOL_IPV6:
2838         switch (optname) {
2839         case IPV6_MTU_DISCOVER:
2840         case IPV6_MTU:
2841         case IPV6_V6ONLY:
2842         case IPV6_RECVPKTINFO:
2843         case IPV6_UNICAST_HOPS:
2844         case IPV6_MULTICAST_HOPS:
2845         case IPV6_MULTICAST_LOOP:
2846         case IPV6_RECVERR:
2847         case IPV6_RECVHOPLIMIT:
2848         case IPV6_2292HOPLIMIT:
2849         case IPV6_CHECKSUM:
2850         case IPV6_ADDRFORM:
2851         case IPV6_2292PKTINFO:
2852         case IPV6_RECVTCLASS:
2853         case IPV6_RECVRTHDR:
2854         case IPV6_2292RTHDR:
2855         case IPV6_RECVHOPOPTS:
2856         case IPV6_2292HOPOPTS:
2857         case IPV6_RECVDSTOPTS:
2858         case IPV6_2292DSTOPTS:
2859         case IPV6_TCLASS:
2860         case IPV6_ADDR_PREFERENCES:
2861 #ifdef IPV6_RECVPATHMTU
2862         case IPV6_RECVPATHMTU:
2863 #endif
2864 #ifdef IPV6_TRANSPARENT
2865         case IPV6_TRANSPARENT:
2866 #endif
2867 #ifdef IPV6_FREEBIND
2868         case IPV6_FREEBIND:
2869 #endif
2870 #ifdef IPV6_RECVORIGDSTADDR
2871         case IPV6_RECVORIGDSTADDR:
2872 #endif
2873             if (get_user_u32(len, optlen))
2874                 return -TARGET_EFAULT;
2875             if (len < 0)
2876                 return -TARGET_EINVAL;
2877             lv = sizeof(lv);
2878             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2879             if (ret < 0)
2880                 return ret;
2881             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2882                 len = 1;
2883                 if (put_user_u32(len, optlen)
2884                     || put_user_u8(val, optval_addr))
2885                     return -TARGET_EFAULT;
2886             } else {
2887                 if (len > sizeof(int))
2888                     len = sizeof(int);
2889                 if (put_user_u32(len, optlen)
2890                     || put_user_u32(val, optval_addr))
2891                     return -TARGET_EFAULT;
2892             }
2893             break;
2894         default:
2895             ret = -TARGET_ENOPROTOOPT;
2896             break;
2897         }
2898         break;
2899 #ifdef SOL_NETLINK
2900     case SOL_NETLINK:
2901         switch (optname) {
2902         case NETLINK_PKTINFO:
2903         case NETLINK_BROADCAST_ERROR:
2904         case NETLINK_NO_ENOBUFS:
2905 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2906         case NETLINK_LISTEN_ALL_NSID:
2907         case NETLINK_CAP_ACK:
2908 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2909 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2910         case NETLINK_EXT_ACK:
2911 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2912 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2913         case NETLINK_GET_STRICT_CHK:
2914 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2915             if (get_user_u32(len, optlen)) {
2916                 return -TARGET_EFAULT;
2917             }
2918             if (len != sizeof(val)) {
2919                 return -TARGET_EINVAL;
2920             }
2921             lv = len;
2922             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2923             if (ret < 0) {
2924                 return ret;
2925             }
2926             if (put_user_u32(lv, optlen)
2927                 || put_user_u32(val, optval_addr)) {
2928                 return -TARGET_EFAULT;
2929             }
2930             break;
2931 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2932         case NETLINK_LIST_MEMBERSHIPS:
2933         {
2934             uint32_t *results;
2935             int i;
2936             if (get_user_u32(len, optlen)) {
2937                 return -TARGET_EFAULT;
2938             }
2939             if (len < 0) {
2940                 return -TARGET_EINVAL;
2941             }
2942             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2943             if (!results && len > 0) {
2944                 return -TARGET_EFAULT;
2945             }
2946             lv = len;
2947             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2948             if (ret < 0) {
2949                 unlock_user(results, optval_addr, 0);
2950                 return ret;
2951             }
2952             /* swap host endianess to target endianess. */
2953             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2954                 results[i] = tswap32(results[i]);
2955             }
2956             if (put_user_u32(lv, optlen)) {
2957                 return -TARGET_EFAULT;
2958             }
2959             unlock_user(results, optval_addr, 0);
2960             break;
2961         }
2962 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2963         default:
2964             goto unimplemented;
2965         }
2966         break;
2967 #endif /* SOL_NETLINK */
2968     default:
2969     unimplemented:
2970         qemu_log_mask(LOG_UNIMP,
2971                       "getsockopt level=%d optname=%d not yet supported\n",
2972                       level, optname);
2973         ret = -TARGET_EOPNOTSUPP;
2974         break;
2975     }
2976     return ret;
2977 }
2978 
2979 /* Convert target low/high pair representing file offset into the host
2980  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2981  * as the kernel doesn't handle them either.
2982  */
2983 static void target_to_host_low_high(abi_ulong tlow,
2984                                     abi_ulong thigh,
2985                                     unsigned long *hlow,
2986                                     unsigned long *hhigh)
2987 {
2988     uint64_t off = tlow |
2989         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2990         TARGET_LONG_BITS / 2;
2991 
2992     *hlow = off;
2993     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2994 }
2995 
2996 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2997                                 abi_ulong count, int copy)
2998 {
2999     struct target_iovec *target_vec;
3000     struct iovec *vec;
3001     abi_ulong total_len, max_len;
3002     int i;
3003     int err = 0;
3004     bool bad_address = false;
3005 
3006     if (count == 0) {
3007         errno = 0;
3008         return NULL;
3009     }
3010     if (count > IOV_MAX) {
3011         errno = EINVAL;
3012         return NULL;
3013     }
3014 
3015     vec = g_try_new0(struct iovec, count);
3016     if (vec == NULL) {
3017         errno = ENOMEM;
3018         return NULL;
3019     }
3020 
3021     target_vec = lock_user(VERIFY_READ, target_addr,
3022                            count * sizeof(struct target_iovec), 1);
3023     if (target_vec == NULL) {
3024         err = EFAULT;
3025         goto fail2;
3026     }
3027 
3028     /* ??? If host page size > target page size, this will result in a
3029        value larger than what we can actually support.  */
3030     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3031     total_len = 0;
3032 
3033     for (i = 0; i < count; i++) {
3034         abi_ulong base = tswapal(target_vec[i].iov_base);
3035         abi_long len = tswapal(target_vec[i].iov_len);
3036 
3037         if (len < 0) {
3038             err = EINVAL;
3039             goto fail;
3040         } else if (len == 0) {
3041             /* Zero length pointer is ignored.  */
3042             vec[i].iov_base = 0;
3043         } else {
3044             vec[i].iov_base = lock_user(type, base, len, copy);
3045             /* If the first buffer pointer is bad, this is a fault.  But
3046              * subsequent bad buffers will result in a partial write; this
3047              * is realized by filling the vector with null pointers and
3048              * zero lengths. */
3049             if (!vec[i].iov_base) {
3050                 if (i == 0) {
3051                     err = EFAULT;
3052                     goto fail;
3053                 } else {
3054                     bad_address = true;
3055                 }
3056             }
3057             if (bad_address) {
3058                 len = 0;
3059             }
3060             if (len > max_len - total_len) {
3061                 len = max_len - total_len;
3062             }
3063         }
3064         vec[i].iov_len = len;
3065         total_len += len;
3066     }
3067 
3068     unlock_user(target_vec, target_addr, 0);
3069     return vec;
3070 
3071  fail:
3072     while (--i >= 0) {
3073         if (tswapal(target_vec[i].iov_len) > 0) {
3074             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3075         }
3076     }
3077     unlock_user(target_vec, target_addr, 0);
3078  fail2:
3079     g_free(vec);
3080     errno = err;
3081     return NULL;
3082 }
3083 
3084 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3085                          abi_ulong count, int copy)
3086 {
3087     struct target_iovec *target_vec;
3088     int i;
3089 
3090     target_vec = lock_user(VERIFY_READ, target_addr,
3091                            count * sizeof(struct target_iovec), 1);
3092     if (target_vec) {
3093         for (i = 0; i < count; i++) {
3094             abi_ulong base = tswapal(target_vec[i].iov_base);
3095             abi_long len = tswapal(target_vec[i].iov_len);
3096             if (len < 0) {
3097                 break;
3098             }
3099             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3100         }
3101         unlock_user(target_vec, target_addr, 0);
3102     }
3103 
3104     g_free(vec);
3105 }
3106 
3107 static inline int target_to_host_sock_type(int *type)
3108 {
3109     int host_type = 0;
3110     int target_type = *type;
3111 
3112     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3113     case TARGET_SOCK_DGRAM:
3114         host_type = SOCK_DGRAM;
3115         break;
3116     case TARGET_SOCK_STREAM:
3117         host_type = SOCK_STREAM;
3118         break;
3119     default:
3120         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3121         break;
3122     }
3123     if (target_type & TARGET_SOCK_CLOEXEC) {
3124 #if defined(SOCK_CLOEXEC)
3125         host_type |= SOCK_CLOEXEC;
3126 #else
3127         return -TARGET_EINVAL;
3128 #endif
3129     }
3130     if (target_type & TARGET_SOCK_NONBLOCK) {
3131 #if defined(SOCK_NONBLOCK)
3132         host_type |= SOCK_NONBLOCK;
3133 #elif !defined(O_NONBLOCK)
3134         return -TARGET_EINVAL;
3135 #endif
3136     }
3137     *type = host_type;
3138     return 0;
3139 }
3140 
3141 /* Try to emulate socket type flags after socket creation.  */
3142 static int sock_flags_fixup(int fd, int target_type)
3143 {
3144 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3145     if (target_type & TARGET_SOCK_NONBLOCK) {
3146         int flags = fcntl(fd, F_GETFL);
3147         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3148             close(fd);
3149             return -TARGET_EINVAL;
3150         }
3151     }
3152 #endif
3153     return fd;
3154 }
3155 
3156 /* do_socket() Must return target values and target errnos. */
3157 static abi_long do_socket(int domain, int type, int protocol)
3158 {
3159     int target_type = type;
3160     int ret;
3161 
3162     ret = target_to_host_sock_type(&type);
3163     if (ret) {
3164         return ret;
3165     }
3166 
3167     if (domain == PF_NETLINK && !(
3168 #ifdef CONFIG_RTNETLINK
3169          protocol == NETLINK_ROUTE ||
3170 #endif
3171          protocol == NETLINK_KOBJECT_UEVENT ||
3172          protocol == NETLINK_AUDIT)) {
3173         return -TARGET_EPROTONOSUPPORT;
3174     }
3175 
3176     if (domain == AF_PACKET ||
3177         (domain == AF_INET && type == SOCK_PACKET)) {
3178         protocol = tswap16(protocol);
3179     }
3180 
3181     ret = get_errno(socket(domain, type, protocol));
3182     if (ret >= 0) {
3183         ret = sock_flags_fixup(ret, target_type);
3184         if (type == SOCK_PACKET) {
3185             /* Manage an obsolete case :
3186              * if socket type is SOCK_PACKET, bind by name
3187              */
3188             fd_trans_register(ret, &target_packet_trans);
3189         } else if (domain == PF_NETLINK) {
3190             switch (protocol) {
3191 #ifdef CONFIG_RTNETLINK
3192             case NETLINK_ROUTE:
3193                 fd_trans_register(ret, &target_netlink_route_trans);
3194                 break;
3195 #endif
3196             case NETLINK_KOBJECT_UEVENT:
3197                 /* nothing to do: messages are strings */
3198                 break;
3199             case NETLINK_AUDIT:
3200                 fd_trans_register(ret, &target_netlink_audit_trans);
3201                 break;
3202             default:
3203                 g_assert_not_reached();
3204             }
3205         }
3206     }
3207     return ret;
3208 }
3209 
3210 /* do_bind() Must return target values and target errnos. */
3211 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3212                         socklen_t addrlen)
3213 {
3214     void *addr;
3215     abi_long ret;
3216 
3217     if ((int)addrlen < 0) {
3218         return -TARGET_EINVAL;
3219     }
3220 
3221     addr = alloca(addrlen+1);
3222 
3223     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3224     if (ret)
3225         return ret;
3226 
3227     return get_errno(bind(sockfd, addr, addrlen));
3228 }
3229 
3230 /* do_connect() Must return target values and target errnos. */
3231 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3232                            socklen_t addrlen)
3233 {
3234     void *addr;
3235     abi_long ret;
3236 
3237     if ((int)addrlen < 0) {
3238         return -TARGET_EINVAL;
3239     }
3240 
3241     addr = alloca(addrlen+1);
3242 
3243     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3244     if (ret)
3245         return ret;
3246 
3247     return get_errno(safe_connect(sockfd, addr, addrlen));
3248 }
3249 
3250 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3251 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3252                                       int flags, int send)
3253 {
3254     abi_long ret, len;
3255     struct msghdr msg;
3256     abi_ulong count;
3257     struct iovec *vec;
3258     abi_ulong target_vec;
3259 
3260     if (msgp->msg_name) {
3261         msg.msg_namelen = tswap32(msgp->msg_namelen);
3262         msg.msg_name = alloca(msg.msg_namelen+1);
3263         ret = target_to_host_sockaddr(fd, msg.msg_name,
3264                                       tswapal(msgp->msg_name),
3265                                       msg.msg_namelen);
3266         if (ret == -TARGET_EFAULT) {
3267             /* For connected sockets msg_name and msg_namelen must
3268              * be ignored, so returning EFAULT immediately is wrong.
3269              * Instead, pass a bad msg_name to the host kernel, and
3270              * let it decide whether to return EFAULT or not.
3271              */
3272             msg.msg_name = (void *)-1;
3273         } else if (ret) {
3274             goto out2;
3275         }
3276     } else {
3277         msg.msg_name = NULL;
3278         msg.msg_namelen = 0;
3279     }
3280     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3281     msg.msg_control = alloca(msg.msg_controllen);
3282     memset(msg.msg_control, 0, msg.msg_controllen);
3283 
3284     msg.msg_flags = tswap32(msgp->msg_flags);
3285 
3286     count = tswapal(msgp->msg_iovlen);
3287     target_vec = tswapal(msgp->msg_iov);
3288 
3289     if (count > IOV_MAX) {
3290         /* sendrcvmsg returns a different errno for this condition than
3291          * readv/writev, so we must catch it here before lock_iovec() does.
3292          */
3293         ret = -TARGET_EMSGSIZE;
3294         goto out2;
3295     }
3296 
3297     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3298                      target_vec, count, send);
3299     if (vec == NULL) {
3300         ret = -host_to_target_errno(errno);
3301         goto out2;
3302     }
3303     msg.msg_iovlen = count;
3304     msg.msg_iov = vec;
3305 
3306     if (send) {
3307         if (fd_trans_target_to_host_data(fd)) {
3308             void *host_msg;
3309 
3310             host_msg = g_malloc(msg.msg_iov->iov_len);
3311             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3312             ret = fd_trans_target_to_host_data(fd)(host_msg,
3313                                                    msg.msg_iov->iov_len);
3314             if (ret >= 0) {
3315                 msg.msg_iov->iov_base = host_msg;
3316                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3317             }
3318             g_free(host_msg);
3319         } else {
3320             ret = target_to_host_cmsg(&msg, msgp);
3321             if (ret == 0) {
3322                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3323             }
3324         }
3325     } else {
3326         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3327         if (!is_error(ret)) {
3328             len = ret;
3329             if (fd_trans_host_to_target_data(fd)) {
3330                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3331                                                MIN(msg.msg_iov->iov_len, len));
3332             }
3333             if (!is_error(ret)) {
3334                 ret = host_to_target_cmsg(msgp, &msg);
3335             }
3336             if (!is_error(ret)) {
3337                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3338                 msgp->msg_flags = tswap32(msg.msg_flags);
3339                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3340                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3341                                     msg.msg_name, msg.msg_namelen);
3342                     if (ret) {
3343                         goto out;
3344                     }
3345                 }
3346 
3347                 ret = len;
3348             }
3349         }
3350     }
3351 
3352 out:
3353     unlock_iovec(vec, target_vec, count, !send);
3354 out2:
3355     return ret;
3356 }
3357 
3358 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3359                                int flags, int send)
3360 {
3361     abi_long ret;
3362     struct target_msghdr *msgp;
3363 
3364     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3365                           msgp,
3366                           target_msg,
3367                           send ? 1 : 0)) {
3368         return -TARGET_EFAULT;
3369     }
3370     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3371     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3372     return ret;
3373 }
3374 
3375 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3376  * so it might not have this *mmsg-specific flag either.
3377  */
3378 #ifndef MSG_WAITFORONE
3379 #define MSG_WAITFORONE 0x10000
3380 #endif
3381 
3382 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3383                                 unsigned int vlen, unsigned int flags,
3384                                 int send)
3385 {
3386     struct target_mmsghdr *mmsgp;
3387     abi_long ret = 0;
3388     int i;
3389 
3390     if (vlen > UIO_MAXIOV) {
3391         vlen = UIO_MAXIOV;
3392     }
3393 
3394     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3395     if (!mmsgp) {
3396         return -TARGET_EFAULT;
3397     }
3398 
3399     for (i = 0; i < vlen; i++) {
3400         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3401         if (is_error(ret)) {
3402             break;
3403         }
3404         mmsgp[i].msg_len = tswap32(ret);
3405         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3406         if (flags & MSG_WAITFORONE) {
3407             flags |= MSG_DONTWAIT;
3408         }
3409     }
3410 
3411     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3412 
3413     /* Return number of datagrams sent if we sent any at all;
3414      * otherwise return the error.
3415      */
3416     if (i) {
3417         return i;
3418     }
3419     return ret;
3420 }
3421 
3422 /* do_accept4() Must return target values and target errnos. */
3423 static abi_long do_accept4(int fd, abi_ulong target_addr,
3424                            abi_ulong target_addrlen_addr, int flags)
3425 {
3426     socklen_t addrlen, ret_addrlen;
3427     void *addr;
3428     abi_long ret;
3429     int host_flags;
3430 
3431     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3432 
3433     if (target_addr == 0) {
3434         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3435     }
3436 
3437     /* linux returns EFAULT if addrlen pointer is invalid */
3438     if (get_user_u32(addrlen, target_addrlen_addr))
3439         return -TARGET_EFAULT;
3440 
3441     if ((int)addrlen < 0) {
3442         return -TARGET_EINVAL;
3443     }
3444 
3445     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3446         return -TARGET_EFAULT;
3447     }
3448 
3449     addr = alloca(addrlen);
3450 
3451     ret_addrlen = addrlen;
3452     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3453     if (!is_error(ret)) {
3454         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3455         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3456             ret = -TARGET_EFAULT;
3457         }
3458     }
3459     return ret;
3460 }
3461 
3462 /* do_getpeername() Must return target values and target errnos. */
3463 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3464                                abi_ulong target_addrlen_addr)
3465 {
3466     socklen_t addrlen, ret_addrlen;
3467     void *addr;
3468     abi_long ret;
3469 
3470     if (get_user_u32(addrlen, target_addrlen_addr))
3471         return -TARGET_EFAULT;
3472 
3473     if ((int)addrlen < 0) {
3474         return -TARGET_EINVAL;
3475     }
3476 
3477     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3478         return -TARGET_EFAULT;
3479     }
3480 
3481     addr = alloca(addrlen);
3482 
3483     ret_addrlen = addrlen;
3484     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3485     if (!is_error(ret)) {
3486         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3487         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3488             ret = -TARGET_EFAULT;
3489         }
3490     }
3491     return ret;
3492 }
3493 
3494 /* do_getsockname() Must return target values and target errnos. */
3495 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3496                                abi_ulong target_addrlen_addr)
3497 {
3498     socklen_t addrlen, ret_addrlen;
3499     void *addr;
3500     abi_long ret;
3501 
3502     if (get_user_u32(addrlen, target_addrlen_addr))
3503         return -TARGET_EFAULT;
3504 
3505     if ((int)addrlen < 0) {
3506         return -TARGET_EINVAL;
3507     }
3508 
3509     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3510         return -TARGET_EFAULT;
3511     }
3512 
3513     addr = alloca(addrlen);
3514 
3515     ret_addrlen = addrlen;
3516     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3517     if (!is_error(ret)) {
3518         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3519         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3520             ret = -TARGET_EFAULT;
3521         }
3522     }
3523     return ret;
3524 }
3525 
3526 /* do_socketpair() Must return target values and target errnos. */
3527 static abi_long do_socketpair(int domain, int type, int protocol,
3528                               abi_ulong target_tab_addr)
3529 {
3530     int tab[2];
3531     abi_long ret;
3532 
3533     target_to_host_sock_type(&type);
3534 
3535     ret = get_errno(socketpair(domain, type, protocol, tab));
3536     if (!is_error(ret)) {
3537         if (put_user_s32(tab[0], target_tab_addr)
3538             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3539             ret = -TARGET_EFAULT;
3540     }
3541     return ret;
3542 }
3543 
3544 /* do_sendto() Must return target values and target errnos. */
3545 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3546                           abi_ulong target_addr, socklen_t addrlen)
3547 {
3548     void *addr;
3549     void *host_msg;
3550     void *copy_msg = NULL;
3551     abi_long ret;
3552 
3553     if ((int)addrlen < 0) {
3554         return -TARGET_EINVAL;
3555     }
3556 
3557     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3558     if (!host_msg)
3559         return -TARGET_EFAULT;
3560     if (fd_trans_target_to_host_data(fd)) {
3561         copy_msg = host_msg;
3562         host_msg = g_malloc(len);
3563         memcpy(host_msg, copy_msg, len);
3564         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3565         if (ret < 0) {
3566             goto fail;
3567         }
3568     }
3569     if (target_addr) {
3570         addr = alloca(addrlen+1);
3571         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3572         if (ret) {
3573             goto fail;
3574         }
3575         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3576     } else {
3577         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3578     }
3579 fail:
3580     if (copy_msg) {
3581         g_free(host_msg);
3582         host_msg = copy_msg;
3583     }
3584     unlock_user(host_msg, msg, 0);
3585     return ret;
3586 }
3587 
3588 /* do_recvfrom() Must return target values and target errnos. */
3589 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3590                             abi_ulong target_addr,
3591                             abi_ulong target_addrlen)
3592 {
3593     socklen_t addrlen, ret_addrlen;
3594     void *addr;
3595     void *host_msg;
3596     abi_long ret;
3597 
3598     if (!msg) {
3599         host_msg = NULL;
3600     } else {
3601         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3602         if (!host_msg) {
3603             return -TARGET_EFAULT;
3604         }
3605     }
3606     if (target_addr) {
3607         if (get_user_u32(addrlen, target_addrlen)) {
3608             ret = -TARGET_EFAULT;
3609             goto fail;
3610         }
3611         if ((int)addrlen < 0) {
3612             ret = -TARGET_EINVAL;
3613             goto fail;
3614         }
3615         addr = alloca(addrlen);
3616         ret_addrlen = addrlen;
3617         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3618                                       addr, &ret_addrlen));
3619     } else {
3620         addr = NULL; /* To keep compiler quiet.  */
3621         addrlen = 0; /* To keep compiler quiet.  */
3622         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3623     }
3624     if (!is_error(ret)) {
3625         if (fd_trans_host_to_target_data(fd)) {
3626             abi_long trans;
3627             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3628             if (is_error(trans)) {
3629                 ret = trans;
3630                 goto fail;
3631             }
3632         }
3633         if (target_addr) {
3634             host_to_target_sockaddr(target_addr, addr,
3635                                     MIN(addrlen, ret_addrlen));
3636             if (put_user_u32(ret_addrlen, target_addrlen)) {
3637                 ret = -TARGET_EFAULT;
3638                 goto fail;
3639             }
3640         }
3641         unlock_user(host_msg, msg, len);
3642     } else {
3643 fail:
3644         unlock_user(host_msg, msg, 0);
3645     }
3646     return ret;
3647 }
3648 
3649 #ifdef TARGET_NR_socketcall
3650 /* do_socketcall() must return target values and target errnos. */
3651 static abi_long do_socketcall(int num, abi_ulong vptr)
3652 {
3653     static const unsigned nargs[] = { /* number of arguments per operation */
3654         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3655         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3656         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3657         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3658         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3659         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3660         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3661         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3662         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3663         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3664         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3665         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3666         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3667         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3668         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3669         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3670         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3671         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3672         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3673         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3674     };
3675     abi_long a[6]; /* max 6 args */
3676     unsigned i;
3677 
3678     /* check the range of the first argument num */
3679     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3680     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3681         return -TARGET_EINVAL;
3682     }
3683     /* ensure we have space for args */
3684     if (nargs[num] > ARRAY_SIZE(a)) {
3685         return -TARGET_EINVAL;
3686     }
3687     /* collect the arguments in a[] according to nargs[] */
3688     for (i = 0; i < nargs[num]; ++i) {
3689         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3690             return -TARGET_EFAULT;
3691         }
3692     }
3693     /* now when we have the args, invoke the appropriate underlying function */
3694     switch (num) {
3695     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3696         return do_socket(a[0], a[1], a[2]);
3697     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3698         return do_bind(a[0], a[1], a[2]);
3699     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3700         return do_connect(a[0], a[1], a[2]);
3701     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3702         return get_errno(listen(a[0], a[1]));
3703     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3704         return do_accept4(a[0], a[1], a[2], 0);
3705     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3706         return do_getsockname(a[0], a[1], a[2]);
3707     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3708         return do_getpeername(a[0], a[1], a[2]);
3709     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3710         return do_socketpair(a[0], a[1], a[2], a[3]);
3711     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3712         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3713     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3714         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3715     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3716         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3717     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3718         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3719     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3720         return get_errno(shutdown(a[0], a[1]));
3721     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3722         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3723     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3724         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3725     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3726         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3727     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3728         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3729     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3730         return do_accept4(a[0], a[1], a[2], a[3]);
3731     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3732         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3733     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3734         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3735     default:
3736         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3737         return -TARGET_EINVAL;
3738     }
3739 }
3740 #endif
3741 
3742 #define N_SHM_REGIONS	32
3743 
3744 static struct shm_region {
3745     abi_ulong start;
3746     abi_ulong size;
3747     bool in_use;
3748 } shm_regions[N_SHM_REGIONS];
3749 
3750 #ifndef TARGET_SEMID64_DS
3751 /* asm-generic version of this struct */
3752 struct target_semid64_ds
3753 {
3754   struct target_ipc_perm sem_perm;
3755   abi_ulong sem_otime;
3756 #if TARGET_ABI_BITS == 32
3757   abi_ulong __unused1;
3758 #endif
3759   abi_ulong sem_ctime;
3760 #if TARGET_ABI_BITS == 32
3761   abi_ulong __unused2;
3762 #endif
3763   abi_ulong sem_nsems;
3764   abi_ulong __unused3;
3765   abi_ulong __unused4;
3766 };
3767 #endif
3768 
3769 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3770                                                abi_ulong target_addr)
3771 {
3772     struct target_ipc_perm *target_ip;
3773     struct target_semid64_ds *target_sd;
3774 
3775     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3776         return -TARGET_EFAULT;
3777     target_ip = &(target_sd->sem_perm);
3778     host_ip->__key = tswap32(target_ip->__key);
3779     host_ip->uid = tswap32(target_ip->uid);
3780     host_ip->gid = tswap32(target_ip->gid);
3781     host_ip->cuid = tswap32(target_ip->cuid);
3782     host_ip->cgid = tswap32(target_ip->cgid);
3783 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3784     host_ip->mode = tswap32(target_ip->mode);
3785 #else
3786     host_ip->mode = tswap16(target_ip->mode);
3787 #endif
3788 #if defined(TARGET_PPC)
3789     host_ip->__seq = tswap32(target_ip->__seq);
3790 #else
3791     host_ip->__seq = tswap16(target_ip->__seq);
3792 #endif
3793     unlock_user_struct(target_sd, target_addr, 0);
3794     return 0;
3795 }
3796 
3797 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3798                                                struct ipc_perm *host_ip)
3799 {
3800     struct target_ipc_perm *target_ip;
3801     struct target_semid64_ds *target_sd;
3802 
3803     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3804         return -TARGET_EFAULT;
3805     target_ip = &(target_sd->sem_perm);
3806     target_ip->__key = tswap32(host_ip->__key);
3807     target_ip->uid = tswap32(host_ip->uid);
3808     target_ip->gid = tswap32(host_ip->gid);
3809     target_ip->cuid = tswap32(host_ip->cuid);
3810     target_ip->cgid = tswap32(host_ip->cgid);
3811 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3812     target_ip->mode = tswap32(host_ip->mode);
3813 #else
3814     target_ip->mode = tswap16(host_ip->mode);
3815 #endif
3816 #if defined(TARGET_PPC)
3817     target_ip->__seq = tswap32(host_ip->__seq);
3818 #else
3819     target_ip->__seq = tswap16(host_ip->__seq);
3820 #endif
3821     unlock_user_struct(target_sd, target_addr, 1);
3822     return 0;
3823 }
3824 
3825 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3826                                                abi_ulong target_addr)
3827 {
3828     struct target_semid64_ds *target_sd;
3829 
3830     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3831         return -TARGET_EFAULT;
3832     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3833         return -TARGET_EFAULT;
3834     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3835     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3836     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3837     unlock_user_struct(target_sd, target_addr, 0);
3838     return 0;
3839 }
3840 
3841 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3842                                                struct semid_ds *host_sd)
3843 {
3844     struct target_semid64_ds *target_sd;
3845 
3846     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3847         return -TARGET_EFAULT;
3848     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3849         return -TARGET_EFAULT;
3850     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3851     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3852     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3853     unlock_user_struct(target_sd, target_addr, 1);
3854     return 0;
3855 }
3856 
3857 struct target_seminfo {
3858     int semmap;
3859     int semmni;
3860     int semmns;
3861     int semmnu;
3862     int semmsl;
3863     int semopm;
3864     int semume;
3865     int semusz;
3866     int semvmx;
3867     int semaem;
3868 };
3869 
3870 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3871                                               struct seminfo *host_seminfo)
3872 {
3873     struct target_seminfo *target_seminfo;
3874     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3875         return -TARGET_EFAULT;
3876     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3877     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3878     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3879     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3880     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3881     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3882     __put_user(host_seminfo->semume, &target_seminfo->semume);
3883     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3884     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3885     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3886     unlock_user_struct(target_seminfo, target_addr, 1);
3887     return 0;
3888 }
3889 
3890 union semun {
3891 	int val;
3892 	struct semid_ds *buf;
3893 	unsigned short *array;
3894 	struct seminfo *__buf;
3895 };
3896 
3897 union target_semun {
3898 	int val;
3899 	abi_ulong buf;
3900 	abi_ulong array;
3901 	abi_ulong __buf;
3902 };
3903 
3904 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3905                                                abi_ulong target_addr)
3906 {
3907     int nsems;
3908     unsigned short *array;
3909     union semun semun;
3910     struct semid_ds semid_ds;
3911     int i, ret;
3912 
3913     semun.buf = &semid_ds;
3914 
3915     ret = semctl(semid, 0, IPC_STAT, semun);
3916     if (ret == -1)
3917         return get_errno(ret);
3918 
3919     nsems = semid_ds.sem_nsems;
3920 
3921     *host_array = g_try_new(unsigned short, nsems);
3922     if (!*host_array) {
3923         return -TARGET_ENOMEM;
3924     }
3925     array = lock_user(VERIFY_READ, target_addr,
3926                       nsems*sizeof(unsigned short), 1);
3927     if (!array) {
3928         g_free(*host_array);
3929         return -TARGET_EFAULT;
3930     }
3931 
3932     for(i=0; i<nsems; i++) {
3933         __get_user((*host_array)[i], &array[i]);
3934     }
3935     unlock_user(array, target_addr, 0);
3936 
3937     return 0;
3938 }
3939 
3940 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3941                                                unsigned short **host_array)
3942 {
3943     int nsems;
3944     unsigned short *array;
3945     union semun semun;
3946     struct semid_ds semid_ds;
3947     int i, ret;
3948 
3949     semun.buf = &semid_ds;
3950 
3951     ret = semctl(semid, 0, IPC_STAT, semun);
3952     if (ret == -1)
3953         return get_errno(ret);
3954 
3955     nsems = semid_ds.sem_nsems;
3956 
3957     array = lock_user(VERIFY_WRITE, target_addr,
3958                       nsems*sizeof(unsigned short), 0);
3959     if (!array)
3960         return -TARGET_EFAULT;
3961 
3962     for(i=0; i<nsems; i++) {
3963         __put_user((*host_array)[i], &array[i]);
3964     }
3965     g_free(*host_array);
3966     unlock_user(array, target_addr, 1);
3967 
3968     return 0;
3969 }
3970 
3971 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3972                                  abi_ulong target_arg)
3973 {
3974     union target_semun target_su = { .buf = target_arg };
3975     union semun arg;
3976     struct semid_ds dsarg;
3977     unsigned short *array = NULL;
3978     struct seminfo seminfo;
3979     abi_long ret = -TARGET_EINVAL;
3980     abi_long err;
3981     cmd &= 0xff;
3982 
3983     switch( cmd ) {
3984 	case GETVAL:
3985 	case SETVAL:
3986             /* In 64 bit cross-endian situations, we will erroneously pick up
3987              * the wrong half of the union for the "val" element.  To rectify
3988              * this, the entire 8-byte structure is byteswapped, followed by
3989 	     * a swap of the 4 byte val field. In other cases, the data is
3990 	     * already in proper host byte order. */
3991 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3992 		target_su.buf = tswapal(target_su.buf);
3993 		arg.val = tswap32(target_su.val);
3994 	    } else {
3995 		arg.val = target_su.val;
3996 	    }
3997             ret = get_errno(semctl(semid, semnum, cmd, arg));
3998             break;
3999 	case GETALL:
4000 	case SETALL:
4001             err = target_to_host_semarray(semid, &array, target_su.array);
4002             if (err)
4003                 return err;
4004             arg.array = array;
4005             ret = get_errno(semctl(semid, semnum, cmd, arg));
4006             err = host_to_target_semarray(semid, target_su.array, &array);
4007             if (err)
4008                 return err;
4009             break;
4010 	case IPC_STAT:
4011 	case IPC_SET:
4012 	case SEM_STAT:
4013             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4014             if (err)
4015                 return err;
4016             arg.buf = &dsarg;
4017             ret = get_errno(semctl(semid, semnum, cmd, arg));
4018             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4019             if (err)
4020                 return err;
4021             break;
4022 	case IPC_INFO:
4023 	case SEM_INFO:
4024             arg.__buf = &seminfo;
4025             ret = get_errno(semctl(semid, semnum, cmd, arg));
4026             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4027             if (err)
4028                 return err;
4029             break;
4030 	case IPC_RMID:
4031 	case GETPID:
4032 	case GETNCNT:
4033 	case GETZCNT:
4034             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4035             break;
4036     }
4037 
4038     return ret;
4039 }
4040 
4041 struct target_sembuf {
4042     unsigned short sem_num;
4043     short sem_op;
4044     short sem_flg;
4045 };
4046 
4047 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4048                                              abi_ulong target_addr,
4049                                              unsigned nsops)
4050 {
4051     struct target_sembuf *target_sembuf;
4052     int i;
4053 
4054     target_sembuf = lock_user(VERIFY_READ, target_addr,
4055                               nsops*sizeof(struct target_sembuf), 1);
4056     if (!target_sembuf)
4057         return -TARGET_EFAULT;
4058 
4059     for(i=0; i<nsops; i++) {
4060         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4061         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4062         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4063     }
4064 
4065     unlock_user(target_sembuf, target_addr, 0);
4066 
4067     return 0;
4068 }
4069 
4070 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4071     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4072 
4073 /*
4074  * This macro is required to handle the s390 variants, which passes the
4075  * arguments in a different order than default.
4076  */
4077 #ifdef __s390x__
4078 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4079   (__nsops), (__timeout), (__sops)
4080 #else
4081 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4082   (__nsops), 0, (__sops), (__timeout)
4083 #endif
4084 
4085 static inline abi_long do_semtimedop(int semid,
4086                                      abi_long ptr,
4087                                      unsigned nsops,
4088                                      abi_long timeout, bool time64)
4089 {
4090     struct sembuf *sops;
4091     struct timespec ts, *pts = NULL;
4092     abi_long ret;
4093 
4094     if (timeout) {
4095         pts = &ts;
4096         if (time64) {
4097             if (target_to_host_timespec64(pts, timeout)) {
4098                 return -TARGET_EFAULT;
4099             }
4100         } else {
4101             if (target_to_host_timespec(pts, timeout)) {
4102                 return -TARGET_EFAULT;
4103             }
4104         }
4105     }
4106 
4107     if (nsops > TARGET_SEMOPM) {
4108         return -TARGET_E2BIG;
4109     }
4110 
4111     sops = g_new(struct sembuf, nsops);
4112 
4113     if (target_to_host_sembuf(sops, ptr, nsops)) {
4114         g_free(sops);
4115         return -TARGET_EFAULT;
4116     }
4117 
4118     ret = -TARGET_ENOSYS;
4119 #ifdef __NR_semtimedop
4120     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4121 #endif
4122 #ifdef __NR_ipc
4123     if (ret == -TARGET_ENOSYS) {
4124         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4125                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4126     }
4127 #endif
4128     g_free(sops);
4129     return ret;
4130 }
4131 #endif
4132 
4133 struct target_msqid_ds
4134 {
4135     struct target_ipc_perm msg_perm;
4136     abi_ulong msg_stime;
4137 #if TARGET_ABI_BITS == 32
4138     abi_ulong __unused1;
4139 #endif
4140     abi_ulong msg_rtime;
4141 #if TARGET_ABI_BITS == 32
4142     abi_ulong __unused2;
4143 #endif
4144     abi_ulong msg_ctime;
4145 #if TARGET_ABI_BITS == 32
4146     abi_ulong __unused3;
4147 #endif
4148     abi_ulong __msg_cbytes;
4149     abi_ulong msg_qnum;
4150     abi_ulong msg_qbytes;
4151     abi_ulong msg_lspid;
4152     abi_ulong msg_lrpid;
4153     abi_ulong __unused4;
4154     abi_ulong __unused5;
4155 };
4156 
4157 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4158                                                abi_ulong target_addr)
4159 {
4160     struct target_msqid_ds *target_md;
4161 
4162     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4163         return -TARGET_EFAULT;
4164     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4165         return -TARGET_EFAULT;
4166     host_md->msg_stime = tswapal(target_md->msg_stime);
4167     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4168     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4169     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4170     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4171     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4172     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4173     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4174     unlock_user_struct(target_md, target_addr, 0);
4175     return 0;
4176 }
4177 
4178 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4179                                                struct msqid_ds *host_md)
4180 {
4181     struct target_msqid_ds *target_md;
4182 
4183     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4184         return -TARGET_EFAULT;
4185     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4186         return -TARGET_EFAULT;
4187     target_md->msg_stime = tswapal(host_md->msg_stime);
4188     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4189     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4190     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4191     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4192     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4193     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4194     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4195     unlock_user_struct(target_md, target_addr, 1);
4196     return 0;
4197 }
4198 
4199 struct target_msginfo {
4200     int msgpool;
4201     int msgmap;
4202     int msgmax;
4203     int msgmnb;
4204     int msgmni;
4205     int msgssz;
4206     int msgtql;
4207     unsigned short int msgseg;
4208 };
4209 
4210 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4211                                               struct msginfo *host_msginfo)
4212 {
4213     struct target_msginfo *target_msginfo;
4214     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4215         return -TARGET_EFAULT;
4216     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4217     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4218     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4219     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4220     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4221     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4222     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4223     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4224     unlock_user_struct(target_msginfo, target_addr, 1);
4225     return 0;
4226 }
4227 
4228 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4229 {
4230     struct msqid_ds dsarg;
4231     struct msginfo msginfo;
4232     abi_long ret = -TARGET_EINVAL;
4233 
4234     cmd &= 0xff;
4235 
4236     switch (cmd) {
4237     case IPC_STAT:
4238     case IPC_SET:
4239     case MSG_STAT:
4240         if (target_to_host_msqid_ds(&dsarg,ptr))
4241             return -TARGET_EFAULT;
4242         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4243         if (host_to_target_msqid_ds(ptr,&dsarg))
4244             return -TARGET_EFAULT;
4245         break;
4246     case IPC_RMID:
4247         ret = get_errno(msgctl(msgid, cmd, NULL));
4248         break;
4249     case IPC_INFO:
4250     case MSG_INFO:
4251         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4252         if (host_to_target_msginfo(ptr, &msginfo))
4253             return -TARGET_EFAULT;
4254         break;
4255     }
4256 
4257     return ret;
4258 }
4259 
4260 struct target_msgbuf {
4261     abi_long mtype;
4262     char	mtext[1];
4263 };
4264 
4265 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4266                                  ssize_t msgsz, int msgflg)
4267 {
4268     struct target_msgbuf *target_mb;
4269     struct msgbuf *host_mb;
4270     abi_long ret = 0;
4271 
4272     if (msgsz < 0) {
4273         return -TARGET_EINVAL;
4274     }
4275 
4276     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4277         return -TARGET_EFAULT;
4278     host_mb = g_try_malloc(msgsz + sizeof(long));
4279     if (!host_mb) {
4280         unlock_user_struct(target_mb, msgp, 0);
4281         return -TARGET_ENOMEM;
4282     }
4283     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4284     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4285     ret = -TARGET_ENOSYS;
4286 #ifdef __NR_msgsnd
4287     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4288 #endif
4289 #ifdef __NR_ipc
4290     if (ret == -TARGET_ENOSYS) {
4291 #ifdef __s390x__
4292         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4293                                  host_mb));
4294 #else
4295         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4296                                  host_mb, 0));
4297 #endif
4298     }
4299 #endif
4300     g_free(host_mb);
4301     unlock_user_struct(target_mb, msgp, 0);
4302 
4303     return ret;
4304 }
4305 
4306 #ifdef __NR_ipc
4307 #if defined(__sparc__)
4308 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4309 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4310 #elif defined(__s390x__)
4311 /* The s390 sys_ipc variant has only five parameters.  */
4312 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4313     ((long int[]){(long int)__msgp, __msgtyp})
4314 #else
4315 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4316     ((long int[]){(long int)__msgp, __msgtyp}), 0
4317 #endif
4318 #endif
4319 
4320 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4321                                  ssize_t msgsz, abi_long msgtyp,
4322                                  int msgflg)
4323 {
4324     struct target_msgbuf *target_mb;
4325     char *target_mtext;
4326     struct msgbuf *host_mb;
4327     abi_long ret = 0;
4328 
4329     if (msgsz < 0) {
4330         return -TARGET_EINVAL;
4331     }
4332 
4333     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4334         return -TARGET_EFAULT;
4335 
4336     host_mb = g_try_malloc(msgsz + sizeof(long));
4337     if (!host_mb) {
4338         ret = -TARGET_ENOMEM;
4339         goto end;
4340     }
4341     ret = -TARGET_ENOSYS;
4342 #ifdef __NR_msgrcv
4343     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4344 #endif
4345 #ifdef __NR_ipc
4346     if (ret == -TARGET_ENOSYS) {
4347         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4348                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4349     }
4350 #endif
4351 
4352     if (ret > 0) {
4353         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4354         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4355         if (!target_mtext) {
4356             ret = -TARGET_EFAULT;
4357             goto end;
4358         }
4359         memcpy(target_mb->mtext, host_mb->mtext, ret);
4360         unlock_user(target_mtext, target_mtext_addr, ret);
4361     }
4362 
4363     target_mb->mtype = tswapal(host_mb->mtype);
4364 
4365 end:
4366     if (target_mb)
4367         unlock_user_struct(target_mb, msgp, 1);
4368     g_free(host_mb);
4369     return ret;
4370 }
4371 
4372 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4373                                                abi_ulong target_addr)
4374 {
4375     struct target_shmid_ds *target_sd;
4376 
4377     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4378         return -TARGET_EFAULT;
4379     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4380         return -TARGET_EFAULT;
4381     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4382     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4383     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4384     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4385     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4386     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4387     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4388     unlock_user_struct(target_sd, target_addr, 0);
4389     return 0;
4390 }
4391 
4392 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4393                                                struct shmid_ds *host_sd)
4394 {
4395     struct target_shmid_ds *target_sd;
4396 
4397     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4398         return -TARGET_EFAULT;
4399     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4400         return -TARGET_EFAULT;
4401     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4402     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4403     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4404     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4405     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4406     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4407     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4408     unlock_user_struct(target_sd, target_addr, 1);
4409     return 0;
4410 }
4411 
4412 struct  target_shminfo {
4413     abi_ulong shmmax;
4414     abi_ulong shmmin;
4415     abi_ulong shmmni;
4416     abi_ulong shmseg;
4417     abi_ulong shmall;
4418 };
4419 
4420 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4421                                               struct shminfo *host_shminfo)
4422 {
4423     struct target_shminfo *target_shminfo;
4424     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4425         return -TARGET_EFAULT;
4426     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4427     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4428     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4429     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4430     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4431     unlock_user_struct(target_shminfo, target_addr, 1);
4432     return 0;
4433 }
4434 
4435 struct target_shm_info {
4436     int used_ids;
4437     abi_ulong shm_tot;
4438     abi_ulong shm_rss;
4439     abi_ulong shm_swp;
4440     abi_ulong swap_attempts;
4441     abi_ulong swap_successes;
4442 };
4443 
4444 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4445                                                struct shm_info *host_shm_info)
4446 {
4447     struct target_shm_info *target_shm_info;
4448     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4449         return -TARGET_EFAULT;
4450     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4451     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4452     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4453     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4454     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4455     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4456     unlock_user_struct(target_shm_info, target_addr, 1);
4457     return 0;
4458 }
4459 
4460 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4461 {
4462     struct shmid_ds dsarg;
4463     struct shminfo shminfo;
4464     struct shm_info shm_info;
4465     abi_long ret = -TARGET_EINVAL;
4466 
4467     cmd &= 0xff;
4468 
4469     switch(cmd) {
4470     case IPC_STAT:
4471     case IPC_SET:
4472     case SHM_STAT:
4473         if (target_to_host_shmid_ds(&dsarg, buf))
4474             return -TARGET_EFAULT;
4475         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4476         if (host_to_target_shmid_ds(buf, &dsarg))
4477             return -TARGET_EFAULT;
4478         break;
4479     case IPC_INFO:
4480         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4481         if (host_to_target_shminfo(buf, &shminfo))
4482             return -TARGET_EFAULT;
4483         break;
4484     case SHM_INFO:
4485         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4486         if (host_to_target_shm_info(buf, &shm_info))
4487             return -TARGET_EFAULT;
4488         break;
4489     case IPC_RMID:
4490     case SHM_LOCK:
4491     case SHM_UNLOCK:
4492         ret = get_errno(shmctl(shmid, cmd, NULL));
4493         break;
4494     }
4495 
4496     return ret;
4497 }
4498 
4499 #ifndef TARGET_FORCE_SHMLBA
4500 /* For most architectures, SHMLBA is the same as the page size;
4501  * some architectures have larger values, in which case they should
4502  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4503  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4504  * and defining its own value for SHMLBA.
4505  *
4506  * The kernel also permits SHMLBA to be set by the architecture to a
4507  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4508  * this means that addresses are rounded to the large size if
4509  * SHM_RND is set but addresses not aligned to that size are not rejected
4510  * as long as they are at least page-aligned. Since the only architecture
4511  * which uses this is ia64 this code doesn't provide for that oddity.
4512  */
4513 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4514 {
4515     return TARGET_PAGE_SIZE;
4516 }
4517 #endif
4518 
4519 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4520                                  int shmid, abi_ulong shmaddr, int shmflg)
4521 {
4522     CPUState *cpu = env_cpu(cpu_env);
4523     abi_long raddr;
4524     void *host_raddr;
4525     struct shmid_ds shm_info;
4526     int i,ret;
4527     abi_ulong shmlba;
4528 
4529     /* shmat pointers are always untagged */
4530 
4531     /* find out the length of the shared memory segment */
4532     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4533     if (is_error(ret)) {
4534         /* can't get length, bail out */
4535         return ret;
4536     }
4537 
4538     shmlba = target_shmlba(cpu_env);
4539 
4540     if (shmaddr & (shmlba - 1)) {
4541         if (shmflg & SHM_RND) {
4542             shmaddr &= ~(shmlba - 1);
4543         } else {
4544             return -TARGET_EINVAL;
4545         }
4546     }
4547     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4548         return -TARGET_EINVAL;
4549     }
4550 
4551     mmap_lock();
4552 
4553     /*
4554      * We're mapping shared memory, so ensure we generate code for parallel
4555      * execution and flush old translations.  This will work up to the level
4556      * supported by the host -- anything that requires EXCP_ATOMIC will not
4557      * be atomic with respect to an external process.
4558      */
4559     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4560         cpu->tcg_cflags |= CF_PARALLEL;
4561         tb_flush(cpu);
4562     }
4563 
4564     if (shmaddr)
4565         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4566     else {
4567         abi_ulong mmap_start;
4568 
4569         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4570         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4571 
4572         if (mmap_start == -1) {
4573             errno = ENOMEM;
4574             host_raddr = (void *)-1;
4575         } else
4576             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4577                                shmflg | SHM_REMAP);
4578     }
4579 
4580     if (host_raddr == (void *)-1) {
4581         mmap_unlock();
4582         return get_errno((long)host_raddr);
4583     }
4584     raddr=h2g((unsigned long)host_raddr);
4585 
4586     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4587                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4588                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4589 
4590     for (i = 0; i < N_SHM_REGIONS; i++) {
4591         if (!shm_regions[i].in_use) {
4592             shm_regions[i].in_use = true;
4593             shm_regions[i].start = raddr;
4594             shm_regions[i].size = shm_info.shm_segsz;
4595             break;
4596         }
4597     }
4598 
4599     mmap_unlock();
4600     return raddr;
4601 
4602 }
4603 
4604 static inline abi_long do_shmdt(abi_ulong shmaddr)
4605 {
4606     int i;
4607     abi_long rv;
4608 
4609     /* shmdt pointers are always untagged */
4610 
4611     mmap_lock();
4612 
4613     for (i = 0; i < N_SHM_REGIONS; ++i) {
4614         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4615             shm_regions[i].in_use = false;
4616             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4617             break;
4618         }
4619     }
4620     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4621 
4622     mmap_unlock();
4623 
4624     return rv;
4625 }
4626 
4627 #ifdef TARGET_NR_ipc
4628 /* ??? This only works with linear mappings.  */
4629 /* do_ipc() must return target values and target errnos. */
4630 static abi_long do_ipc(CPUArchState *cpu_env,
4631                        unsigned int call, abi_long first,
4632                        abi_long second, abi_long third,
4633                        abi_long ptr, abi_long fifth)
4634 {
4635     int version;
4636     abi_long ret = 0;
4637 
4638     version = call >> 16;
4639     call &= 0xffff;
4640 
4641     switch (call) {
4642     case IPCOP_semop:
4643         ret = do_semtimedop(first, ptr, second, 0, false);
4644         break;
4645     case IPCOP_semtimedop:
4646     /*
4647      * The s390 sys_ipc variant has only five parameters instead of six
4648      * (as for default variant) and the only difference is the handling of
4649      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4650      * to a struct timespec where the generic variant uses fifth parameter.
4651      */
4652 #if defined(TARGET_S390X)
4653         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4654 #else
4655         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4656 #endif
4657         break;
4658 
4659     case IPCOP_semget:
4660         ret = get_errno(semget(first, second, third));
4661         break;
4662 
4663     case IPCOP_semctl: {
4664         /* The semun argument to semctl is passed by value, so dereference the
4665          * ptr argument. */
4666         abi_ulong atptr;
4667         get_user_ual(atptr, ptr);
4668         ret = do_semctl(first, second, third, atptr);
4669         break;
4670     }
4671 
4672     case IPCOP_msgget:
4673         ret = get_errno(msgget(first, second));
4674         break;
4675 
4676     case IPCOP_msgsnd:
4677         ret = do_msgsnd(first, ptr, second, third);
4678         break;
4679 
4680     case IPCOP_msgctl:
4681         ret = do_msgctl(first, second, ptr);
4682         break;
4683 
4684     case IPCOP_msgrcv:
4685         switch (version) {
4686         case 0:
4687             {
4688                 struct target_ipc_kludge {
4689                     abi_long msgp;
4690                     abi_long msgtyp;
4691                 } *tmp;
4692 
4693                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4694                     ret = -TARGET_EFAULT;
4695                     break;
4696                 }
4697 
4698                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4699 
4700                 unlock_user_struct(tmp, ptr, 0);
4701                 break;
4702             }
4703         default:
4704             ret = do_msgrcv(first, ptr, second, fifth, third);
4705         }
4706         break;
4707 
4708     case IPCOP_shmat:
4709         switch (version) {
4710         default:
4711         {
4712             abi_ulong raddr;
4713             raddr = do_shmat(cpu_env, first, ptr, second);
4714             if (is_error(raddr))
4715                 return get_errno(raddr);
4716             if (put_user_ual(raddr, third))
4717                 return -TARGET_EFAULT;
4718             break;
4719         }
4720         case 1:
4721             ret = -TARGET_EINVAL;
4722             break;
4723         }
4724 	break;
4725     case IPCOP_shmdt:
4726         ret = do_shmdt(ptr);
4727 	break;
4728 
4729     case IPCOP_shmget:
4730 	/* IPC_* flag values are the same on all linux platforms */
4731 	ret = get_errno(shmget(first, second, third));
4732 	break;
4733 
4734 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4735     case IPCOP_shmctl:
4736         ret = do_shmctl(first, second, ptr);
4737         break;
4738     default:
4739         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4740                       call, version);
4741 	ret = -TARGET_ENOSYS;
4742 	break;
4743     }
4744     return ret;
4745 }
4746 #endif
4747 
4748 /* kernel structure types definitions */
4749 
4750 #define STRUCT(name, ...) STRUCT_ ## name,
4751 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4752 enum {
4753 #include "syscall_types.h"
4754 STRUCT_MAX
4755 };
4756 #undef STRUCT
4757 #undef STRUCT_SPECIAL
4758 
4759 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4760 #define STRUCT_SPECIAL(name)
4761 #include "syscall_types.h"
4762 #undef STRUCT
4763 #undef STRUCT_SPECIAL
4764 
4765 #define MAX_STRUCT_SIZE 4096
4766 
4767 #ifdef CONFIG_FIEMAP
4768 /* So fiemap access checks don't overflow on 32 bit systems.
4769  * This is very slightly smaller than the limit imposed by
4770  * the underlying kernel.
4771  */
4772 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4773                             / sizeof(struct fiemap_extent))
4774 
4775 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4776                                        int fd, int cmd, abi_long arg)
4777 {
4778     /* The parameter for this ioctl is a struct fiemap followed
4779      * by an array of struct fiemap_extent whose size is set
4780      * in fiemap->fm_extent_count. The array is filled in by the
4781      * ioctl.
4782      */
4783     int target_size_in, target_size_out;
4784     struct fiemap *fm;
4785     const argtype *arg_type = ie->arg_type;
4786     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4787     void *argptr, *p;
4788     abi_long ret;
4789     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4790     uint32_t outbufsz;
4791     int free_fm = 0;
4792 
4793     assert(arg_type[0] == TYPE_PTR);
4794     assert(ie->access == IOC_RW);
4795     arg_type++;
4796     target_size_in = thunk_type_size(arg_type, 0);
4797     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4798     if (!argptr) {
4799         return -TARGET_EFAULT;
4800     }
4801     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4802     unlock_user(argptr, arg, 0);
4803     fm = (struct fiemap *)buf_temp;
4804     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4805         return -TARGET_EINVAL;
4806     }
4807 
4808     outbufsz = sizeof (*fm) +
4809         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4810 
4811     if (outbufsz > MAX_STRUCT_SIZE) {
4812         /* We can't fit all the extents into the fixed size buffer.
4813          * Allocate one that is large enough and use it instead.
4814          */
4815         fm = g_try_malloc(outbufsz);
4816         if (!fm) {
4817             return -TARGET_ENOMEM;
4818         }
4819         memcpy(fm, buf_temp, sizeof(struct fiemap));
4820         free_fm = 1;
4821     }
4822     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4823     if (!is_error(ret)) {
4824         target_size_out = target_size_in;
4825         /* An extent_count of 0 means we were only counting the extents
4826          * so there are no structs to copy
4827          */
4828         if (fm->fm_extent_count != 0) {
4829             target_size_out += fm->fm_mapped_extents * extent_size;
4830         }
4831         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4832         if (!argptr) {
4833             ret = -TARGET_EFAULT;
4834         } else {
4835             /* Convert the struct fiemap */
4836             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4837             if (fm->fm_extent_count != 0) {
4838                 p = argptr + target_size_in;
4839                 /* ...and then all the struct fiemap_extents */
4840                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4841                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4842                                   THUNK_TARGET);
4843                     p += extent_size;
4844                 }
4845             }
4846             unlock_user(argptr, arg, target_size_out);
4847         }
4848     }
4849     if (free_fm) {
4850         g_free(fm);
4851     }
4852     return ret;
4853 }
4854 #endif
4855 
4856 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4857                                 int fd, int cmd, abi_long arg)
4858 {
4859     const argtype *arg_type = ie->arg_type;
4860     int target_size;
4861     void *argptr;
4862     int ret;
4863     struct ifconf *host_ifconf;
4864     uint32_t outbufsz;
4865     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4866     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4867     int target_ifreq_size;
4868     int nb_ifreq;
4869     int free_buf = 0;
4870     int i;
4871     int target_ifc_len;
4872     abi_long target_ifc_buf;
4873     int host_ifc_len;
4874     char *host_ifc_buf;
4875 
4876     assert(arg_type[0] == TYPE_PTR);
4877     assert(ie->access == IOC_RW);
4878 
4879     arg_type++;
4880     target_size = thunk_type_size(arg_type, 0);
4881 
4882     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4883     if (!argptr)
4884         return -TARGET_EFAULT;
4885     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4886     unlock_user(argptr, arg, 0);
4887 
4888     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4889     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4890     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4891 
4892     if (target_ifc_buf != 0) {
4893         target_ifc_len = host_ifconf->ifc_len;
4894         nb_ifreq = target_ifc_len / target_ifreq_size;
4895         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4896 
4897         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4898         if (outbufsz > MAX_STRUCT_SIZE) {
4899             /*
4900              * We can't fit all the extents into the fixed size buffer.
4901              * Allocate one that is large enough and use it instead.
4902              */
4903             host_ifconf = g_try_malloc(outbufsz);
4904             if (!host_ifconf) {
4905                 return -TARGET_ENOMEM;
4906             }
4907             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4908             free_buf = 1;
4909         }
4910         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4911 
4912         host_ifconf->ifc_len = host_ifc_len;
4913     } else {
4914       host_ifc_buf = NULL;
4915     }
4916     host_ifconf->ifc_buf = host_ifc_buf;
4917 
4918     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4919     if (!is_error(ret)) {
4920 	/* convert host ifc_len to target ifc_len */
4921 
4922         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4923         target_ifc_len = nb_ifreq * target_ifreq_size;
4924         host_ifconf->ifc_len = target_ifc_len;
4925 
4926 	/* restore target ifc_buf */
4927 
4928         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4929 
4930 	/* copy struct ifconf to target user */
4931 
4932         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4933         if (!argptr)
4934             return -TARGET_EFAULT;
4935         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4936         unlock_user(argptr, arg, target_size);
4937 
4938         if (target_ifc_buf != 0) {
4939             /* copy ifreq[] to target user */
4940             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4941             for (i = 0; i < nb_ifreq ; i++) {
4942                 thunk_convert(argptr + i * target_ifreq_size,
4943                               host_ifc_buf + i * sizeof(struct ifreq),
4944                               ifreq_arg_type, THUNK_TARGET);
4945             }
4946             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4947         }
4948     }
4949 
4950     if (free_buf) {
4951         g_free(host_ifconf);
4952     }
4953 
4954     return ret;
4955 }
4956 
4957 #if defined(CONFIG_USBFS)
4958 #if HOST_LONG_BITS > 64
4959 #error USBDEVFS thunks do not support >64 bit hosts yet.
4960 #endif
4961 struct live_urb {
4962     uint64_t target_urb_adr;
4963     uint64_t target_buf_adr;
4964     char *target_buf_ptr;
4965     struct usbdevfs_urb host_urb;
4966 };
4967 
4968 static GHashTable *usbdevfs_urb_hashtable(void)
4969 {
4970     static GHashTable *urb_hashtable;
4971 
4972     if (!urb_hashtable) {
4973         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4974     }
4975     return urb_hashtable;
4976 }
4977 
4978 static void urb_hashtable_insert(struct live_urb *urb)
4979 {
4980     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4981     g_hash_table_insert(urb_hashtable, urb, urb);
4982 }
4983 
4984 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4985 {
4986     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4987     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4988 }
4989 
4990 static void urb_hashtable_remove(struct live_urb *urb)
4991 {
4992     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4993     g_hash_table_remove(urb_hashtable, urb);
4994 }
4995 
4996 static abi_long
4997 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4998                           int fd, int cmd, abi_long arg)
4999 {
5000     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5001     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5002     struct live_urb *lurb;
5003     void *argptr;
5004     uint64_t hurb;
5005     int target_size;
5006     uintptr_t target_urb_adr;
5007     abi_long ret;
5008 
5009     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5010 
5011     memset(buf_temp, 0, sizeof(uint64_t));
5012     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5013     if (is_error(ret)) {
5014         return ret;
5015     }
5016 
5017     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5018     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5019     if (!lurb->target_urb_adr) {
5020         return -TARGET_EFAULT;
5021     }
5022     urb_hashtable_remove(lurb);
5023     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5024         lurb->host_urb.buffer_length);
5025     lurb->target_buf_ptr = NULL;
5026 
5027     /* restore the guest buffer pointer */
5028     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5029 
5030     /* update the guest urb struct */
5031     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5032     if (!argptr) {
5033         g_free(lurb);
5034         return -TARGET_EFAULT;
5035     }
5036     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5037     unlock_user(argptr, lurb->target_urb_adr, target_size);
5038 
5039     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5040     /* write back the urb handle */
5041     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5042     if (!argptr) {
5043         g_free(lurb);
5044         return -TARGET_EFAULT;
5045     }
5046 
5047     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5048     target_urb_adr = lurb->target_urb_adr;
5049     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5050     unlock_user(argptr, arg, target_size);
5051 
5052     g_free(lurb);
5053     return ret;
5054 }
5055 
5056 static abi_long
5057 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5058                              uint8_t *buf_temp __attribute__((unused)),
5059                              int fd, int cmd, abi_long arg)
5060 {
5061     struct live_urb *lurb;
5062 
5063     /* map target address back to host URB with metadata. */
5064     lurb = urb_hashtable_lookup(arg);
5065     if (!lurb) {
5066         return -TARGET_EFAULT;
5067     }
5068     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5069 }
5070 
5071 static abi_long
5072 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5073                             int fd, int cmd, abi_long arg)
5074 {
5075     const argtype *arg_type = ie->arg_type;
5076     int target_size;
5077     abi_long ret;
5078     void *argptr;
5079     int rw_dir;
5080     struct live_urb *lurb;
5081 
5082     /*
5083      * each submitted URB needs to map to a unique ID for the
5084      * kernel, and that unique ID needs to be a pointer to
5085      * host memory.  hence, we need to malloc for each URB.
5086      * isochronous transfers have a variable length struct.
5087      */
5088     arg_type++;
5089     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5090 
5091     /* construct host copy of urb and metadata */
5092     lurb = g_try_new0(struct live_urb, 1);
5093     if (!lurb) {
5094         return -TARGET_ENOMEM;
5095     }
5096 
5097     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5098     if (!argptr) {
5099         g_free(lurb);
5100         return -TARGET_EFAULT;
5101     }
5102     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5103     unlock_user(argptr, arg, 0);
5104 
5105     lurb->target_urb_adr = arg;
5106     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5107 
5108     /* buffer space used depends on endpoint type so lock the entire buffer */
5109     /* control type urbs should check the buffer contents for true direction */
5110     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5111     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5112         lurb->host_urb.buffer_length, 1);
5113     if (lurb->target_buf_ptr == NULL) {
5114         g_free(lurb);
5115         return -TARGET_EFAULT;
5116     }
5117 
5118     /* update buffer pointer in host copy */
5119     lurb->host_urb.buffer = lurb->target_buf_ptr;
5120 
5121     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5122     if (is_error(ret)) {
5123         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5124         g_free(lurb);
5125     } else {
5126         urb_hashtable_insert(lurb);
5127     }
5128 
5129     return ret;
5130 }
5131 #endif /* CONFIG_USBFS */
5132 
5133 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5134                             int cmd, abi_long arg)
5135 {
5136     void *argptr;
5137     struct dm_ioctl *host_dm;
5138     abi_long guest_data;
5139     uint32_t guest_data_size;
5140     int target_size;
5141     const argtype *arg_type = ie->arg_type;
5142     abi_long ret;
5143     void *big_buf = NULL;
5144     char *host_data;
5145 
5146     arg_type++;
5147     target_size = thunk_type_size(arg_type, 0);
5148     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5149     if (!argptr) {
5150         ret = -TARGET_EFAULT;
5151         goto out;
5152     }
5153     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5154     unlock_user(argptr, arg, 0);
5155 
5156     /* buf_temp is too small, so fetch things into a bigger buffer */
5157     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5158     memcpy(big_buf, buf_temp, target_size);
5159     buf_temp = big_buf;
5160     host_dm = big_buf;
5161 
5162     guest_data = arg + host_dm->data_start;
5163     if ((guest_data - arg) < 0) {
5164         ret = -TARGET_EINVAL;
5165         goto out;
5166     }
5167     guest_data_size = host_dm->data_size - host_dm->data_start;
5168     host_data = (char*)host_dm + host_dm->data_start;
5169 
5170     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5171     if (!argptr) {
5172         ret = -TARGET_EFAULT;
5173         goto out;
5174     }
5175 
5176     switch (ie->host_cmd) {
5177     case DM_REMOVE_ALL:
5178     case DM_LIST_DEVICES:
5179     case DM_DEV_CREATE:
5180     case DM_DEV_REMOVE:
5181     case DM_DEV_SUSPEND:
5182     case DM_DEV_STATUS:
5183     case DM_DEV_WAIT:
5184     case DM_TABLE_STATUS:
5185     case DM_TABLE_CLEAR:
5186     case DM_TABLE_DEPS:
5187     case DM_LIST_VERSIONS:
5188         /* no input data */
5189         break;
5190     case DM_DEV_RENAME:
5191     case DM_DEV_SET_GEOMETRY:
5192         /* data contains only strings */
5193         memcpy(host_data, argptr, guest_data_size);
5194         break;
5195     case DM_TARGET_MSG:
5196         memcpy(host_data, argptr, guest_data_size);
5197         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5198         break;
5199     case DM_TABLE_LOAD:
5200     {
5201         void *gspec = argptr;
5202         void *cur_data = host_data;
5203         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5204         int spec_size = thunk_type_size(arg_type, 0);
5205         int i;
5206 
5207         for (i = 0; i < host_dm->target_count; i++) {
5208             struct dm_target_spec *spec = cur_data;
5209             uint32_t next;
5210             int slen;
5211 
5212             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5213             slen = strlen((char*)gspec + spec_size) + 1;
5214             next = spec->next;
5215             spec->next = sizeof(*spec) + slen;
5216             strcpy((char*)&spec[1], gspec + spec_size);
5217             gspec += next;
5218             cur_data += spec->next;
5219         }
5220         break;
5221     }
5222     default:
5223         ret = -TARGET_EINVAL;
5224         unlock_user(argptr, guest_data, 0);
5225         goto out;
5226     }
5227     unlock_user(argptr, guest_data, 0);
5228 
5229     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5230     if (!is_error(ret)) {
5231         guest_data = arg + host_dm->data_start;
5232         guest_data_size = host_dm->data_size - host_dm->data_start;
5233         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5234         switch (ie->host_cmd) {
5235         case DM_REMOVE_ALL:
5236         case DM_DEV_CREATE:
5237         case DM_DEV_REMOVE:
5238         case DM_DEV_RENAME:
5239         case DM_DEV_SUSPEND:
5240         case DM_DEV_STATUS:
5241         case DM_TABLE_LOAD:
5242         case DM_TABLE_CLEAR:
5243         case DM_TARGET_MSG:
5244         case DM_DEV_SET_GEOMETRY:
5245             /* no return data */
5246             break;
5247         case DM_LIST_DEVICES:
5248         {
5249             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5250             uint32_t remaining_data = guest_data_size;
5251             void *cur_data = argptr;
5252             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5253             int nl_size = 12; /* can't use thunk_size due to alignment */
5254 
5255             while (1) {
5256                 uint32_t next = nl->next;
5257                 if (next) {
5258                     nl->next = nl_size + (strlen(nl->name) + 1);
5259                 }
5260                 if (remaining_data < nl->next) {
5261                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5262                     break;
5263                 }
5264                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5265                 strcpy(cur_data + nl_size, nl->name);
5266                 cur_data += nl->next;
5267                 remaining_data -= nl->next;
5268                 if (!next) {
5269                     break;
5270                 }
5271                 nl = (void*)nl + next;
5272             }
5273             break;
5274         }
5275         case DM_DEV_WAIT:
5276         case DM_TABLE_STATUS:
5277         {
5278             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5279             void *cur_data = argptr;
5280             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5281             int spec_size = thunk_type_size(arg_type, 0);
5282             int i;
5283 
5284             for (i = 0; i < host_dm->target_count; i++) {
5285                 uint32_t next = spec->next;
5286                 int slen = strlen((char*)&spec[1]) + 1;
5287                 spec->next = (cur_data - argptr) + spec_size + slen;
5288                 if (guest_data_size < spec->next) {
5289                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5290                     break;
5291                 }
5292                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5293                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5294                 cur_data = argptr + spec->next;
5295                 spec = (void*)host_dm + host_dm->data_start + next;
5296             }
5297             break;
5298         }
5299         case DM_TABLE_DEPS:
5300         {
5301             void *hdata = (void*)host_dm + host_dm->data_start;
5302             int count = *(uint32_t*)hdata;
5303             uint64_t *hdev = hdata + 8;
5304             uint64_t *gdev = argptr + 8;
5305             int i;
5306 
5307             *(uint32_t*)argptr = tswap32(count);
5308             for (i = 0; i < count; i++) {
5309                 *gdev = tswap64(*hdev);
5310                 gdev++;
5311                 hdev++;
5312             }
5313             break;
5314         }
5315         case DM_LIST_VERSIONS:
5316         {
5317             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5318             uint32_t remaining_data = guest_data_size;
5319             void *cur_data = argptr;
5320             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5321             int vers_size = thunk_type_size(arg_type, 0);
5322 
5323             while (1) {
5324                 uint32_t next = vers->next;
5325                 if (next) {
5326                     vers->next = vers_size + (strlen(vers->name) + 1);
5327                 }
5328                 if (remaining_data < vers->next) {
5329                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5330                     break;
5331                 }
5332                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5333                 strcpy(cur_data + vers_size, vers->name);
5334                 cur_data += vers->next;
5335                 remaining_data -= vers->next;
5336                 if (!next) {
5337                     break;
5338                 }
5339                 vers = (void*)vers + next;
5340             }
5341             break;
5342         }
5343         default:
5344             unlock_user(argptr, guest_data, 0);
5345             ret = -TARGET_EINVAL;
5346             goto out;
5347         }
5348         unlock_user(argptr, guest_data, guest_data_size);
5349 
5350         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5351         if (!argptr) {
5352             ret = -TARGET_EFAULT;
5353             goto out;
5354         }
5355         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5356         unlock_user(argptr, arg, target_size);
5357     }
5358 out:
5359     g_free(big_buf);
5360     return ret;
5361 }
5362 
5363 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5364                                int cmd, abi_long arg)
5365 {
5366     void *argptr;
5367     int target_size;
5368     const argtype *arg_type = ie->arg_type;
5369     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5370     abi_long ret;
5371 
5372     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5373     struct blkpg_partition host_part;
5374 
5375     /* Read and convert blkpg */
5376     arg_type++;
5377     target_size = thunk_type_size(arg_type, 0);
5378     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5379     if (!argptr) {
5380         ret = -TARGET_EFAULT;
5381         goto out;
5382     }
5383     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5384     unlock_user(argptr, arg, 0);
5385 
5386     switch (host_blkpg->op) {
5387     case BLKPG_ADD_PARTITION:
5388     case BLKPG_DEL_PARTITION:
5389         /* payload is struct blkpg_partition */
5390         break;
5391     default:
5392         /* Unknown opcode */
5393         ret = -TARGET_EINVAL;
5394         goto out;
5395     }
5396 
5397     /* Read and convert blkpg->data */
5398     arg = (abi_long)(uintptr_t)host_blkpg->data;
5399     target_size = thunk_type_size(part_arg_type, 0);
5400     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5401     if (!argptr) {
5402         ret = -TARGET_EFAULT;
5403         goto out;
5404     }
5405     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5406     unlock_user(argptr, arg, 0);
5407 
5408     /* Swizzle the data pointer to our local copy and call! */
5409     host_blkpg->data = &host_part;
5410     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5411 
5412 out:
5413     return ret;
5414 }
5415 
5416 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5417                                 int fd, int cmd, abi_long arg)
5418 {
5419     const argtype *arg_type = ie->arg_type;
5420     const StructEntry *se;
5421     const argtype *field_types;
5422     const int *dst_offsets, *src_offsets;
5423     int target_size;
5424     void *argptr;
5425     abi_ulong *target_rt_dev_ptr = NULL;
5426     unsigned long *host_rt_dev_ptr = NULL;
5427     abi_long ret;
5428     int i;
5429 
5430     assert(ie->access == IOC_W);
5431     assert(*arg_type == TYPE_PTR);
5432     arg_type++;
5433     assert(*arg_type == TYPE_STRUCT);
5434     target_size = thunk_type_size(arg_type, 0);
5435     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5436     if (!argptr) {
5437         return -TARGET_EFAULT;
5438     }
5439     arg_type++;
5440     assert(*arg_type == (int)STRUCT_rtentry);
5441     se = struct_entries + *arg_type++;
5442     assert(se->convert[0] == NULL);
5443     /* convert struct here to be able to catch rt_dev string */
5444     field_types = se->field_types;
5445     dst_offsets = se->field_offsets[THUNK_HOST];
5446     src_offsets = se->field_offsets[THUNK_TARGET];
5447     for (i = 0; i < se->nb_fields; i++) {
5448         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5449             assert(*field_types == TYPE_PTRVOID);
5450             target_rt_dev_ptr = argptr + src_offsets[i];
5451             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5452             if (*target_rt_dev_ptr != 0) {
5453                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5454                                                   tswapal(*target_rt_dev_ptr));
5455                 if (!*host_rt_dev_ptr) {
5456                     unlock_user(argptr, arg, 0);
5457                     return -TARGET_EFAULT;
5458                 }
5459             } else {
5460                 *host_rt_dev_ptr = 0;
5461             }
5462             field_types++;
5463             continue;
5464         }
5465         field_types = thunk_convert(buf_temp + dst_offsets[i],
5466                                     argptr + src_offsets[i],
5467                                     field_types, THUNK_HOST);
5468     }
5469     unlock_user(argptr, arg, 0);
5470 
5471     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5472 
5473     assert(host_rt_dev_ptr != NULL);
5474     assert(target_rt_dev_ptr != NULL);
5475     if (*host_rt_dev_ptr != 0) {
5476         unlock_user((void *)*host_rt_dev_ptr,
5477                     *target_rt_dev_ptr, 0);
5478     }
5479     return ret;
5480 }
5481 
5482 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5483                                      int fd, int cmd, abi_long arg)
5484 {
5485     int sig = target_to_host_signal(arg);
5486     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5487 }
5488 
5489 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5490                                     int fd, int cmd, abi_long arg)
5491 {
5492     struct timeval tv;
5493     abi_long ret;
5494 
5495     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5496     if (is_error(ret)) {
5497         return ret;
5498     }
5499 
5500     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5501         if (copy_to_user_timeval(arg, &tv)) {
5502             return -TARGET_EFAULT;
5503         }
5504     } else {
5505         if (copy_to_user_timeval64(arg, &tv)) {
5506             return -TARGET_EFAULT;
5507         }
5508     }
5509 
5510     return ret;
5511 }
5512 
5513 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5514                                       int fd, int cmd, abi_long arg)
5515 {
5516     struct timespec ts;
5517     abi_long ret;
5518 
5519     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5520     if (is_error(ret)) {
5521         return ret;
5522     }
5523 
5524     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5525         if (host_to_target_timespec(arg, &ts)) {
5526             return -TARGET_EFAULT;
5527         }
5528     } else{
5529         if (host_to_target_timespec64(arg, &ts)) {
5530             return -TARGET_EFAULT;
5531         }
5532     }
5533 
5534     return ret;
5535 }
5536 
5537 #ifdef TIOCGPTPEER
5538 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5539                                      int fd, int cmd, abi_long arg)
5540 {
5541     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5542     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5543 }
5544 #endif
5545 
5546 #ifdef HAVE_DRM_H
5547 
5548 static void unlock_drm_version(struct drm_version *host_ver,
5549                                struct target_drm_version *target_ver,
5550                                bool copy)
5551 {
5552     unlock_user(host_ver->name, target_ver->name,
5553                                 copy ? host_ver->name_len : 0);
5554     unlock_user(host_ver->date, target_ver->date,
5555                                 copy ? host_ver->date_len : 0);
5556     unlock_user(host_ver->desc, target_ver->desc,
5557                                 copy ? host_ver->desc_len : 0);
5558 }
5559 
5560 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5561                                           struct target_drm_version *target_ver)
5562 {
5563     memset(host_ver, 0, sizeof(*host_ver));
5564 
5565     __get_user(host_ver->name_len, &target_ver->name_len);
5566     if (host_ver->name_len) {
5567         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5568                                    target_ver->name_len, 0);
5569         if (!host_ver->name) {
5570             return -EFAULT;
5571         }
5572     }
5573 
5574     __get_user(host_ver->date_len, &target_ver->date_len);
5575     if (host_ver->date_len) {
5576         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5577                                    target_ver->date_len, 0);
5578         if (!host_ver->date) {
5579             goto err;
5580         }
5581     }
5582 
5583     __get_user(host_ver->desc_len, &target_ver->desc_len);
5584     if (host_ver->desc_len) {
5585         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5586                                    target_ver->desc_len, 0);
5587         if (!host_ver->desc) {
5588             goto err;
5589         }
5590     }
5591 
5592     return 0;
5593 err:
5594     unlock_drm_version(host_ver, target_ver, false);
5595     return -EFAULT;
5596 }
5597 
5598 static inline void host_to_target_drmversion(
5599                                           struct target_drm_version *target_ver,
5600                                           struct drm_version *host_ver)
5601 {
5602     __put_user(host_ver->version_major, &target_ver->version_major);
5603     __put_user(host_ver->version_minor, &target_ver->version_minor);
5604     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5605     __put_user(host_ver->name_len, &target_ver->name_len);
5606     __put_user(host_ver->date_len, &target_ver->date_len);
5607     __put_user(host_ver->desc_len, &target_ver->desc_len);
5608     unlock_drm_version(host_ver, target_ver, true);
5609 }
5610 
5611 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5612                              int fd, int cmd, abi_long arg)
5613 {
5614     struct drm_version *ver;
5615     struct target_drm_version *target_ver;
5616     abi_long ret;
5617 
5618     switch (ie->host_cmd) {
5619     case DRM_IOCTL_VERSION:
5620         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5621             return -TARGET_EFAULT;
5622         }
5623         ver = (struct drm_version *)buf_temp;
5624         ret = target_to_host_drmversion(ver, target_ver);
5625         if (!is_error(ret)) {
5626             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5627             if (is_error(ret)) {
5628                 unlock_drm_version(ver, target_ver, false);
5629             } else {
5630                 host_to_target_drmversion(target_ver, ver);
5631             }
5632         }
5633         unlock_user_struct(target_ver, arg, 0);
5634         return ret;
5635     }
5636     return -TARGET_ENOSYS;
5637 }
5638 
5639 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5640                                            struct drm_i915_getparam *gparam,
5641                                            int fd, abi_long arg)
5642 {
5643     abi_long ret;
5644     int value;
5645     struct target_drm_i915_getparam *target_gparam;
5646 
5647     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5648         return -TARGET_EFAULT;
5649     }
5650 
5651     __get_user(gparam->param, &target_gparam->param);
5652     gparam->value = &value;
5653     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5654     put_user_s32(value, target_gparam->value);
5655 
5656     unlock_user_struct(target_gparam, arg, 0);
5657     return ret;
5658 }
5659 
5660 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5661                                   int fd, int cmd, abi_long arg)
5662 {
5663     switch (ie->host_cmd) {
5664     case DRM_IOCTL_I915_GETPARAM:
5665         return do_ioctl_drm_i915_getparam(ie,
5666                                           (struct drm_i915_getparam *)buf_temp,
5667                                           fd, arg);
5668     default:
5669         return -TARGET_ENOSYS;
5670     }
5671 }
5672 
5673 #endif
5674 
5675 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5676                                         int fd, int cmd, abi_long arg)
5677 {
5678     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5679     struct tun_filter *target_filter;
5680     char *target_addr;
5681 
5682     assert(ie->access == IOC_W);
5683 
5684     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5685     if (!target_filter) {
5686         return -TARGET_EFAULT;
5687     }
5688     filter->flags = tswap16(target_filter->flags);
5689     filter->count = tswap16(target_filter->count);
5690     unlock_user(target_filter, arg, 0);
5691 
5692     if (filter->count) {
5693         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5694             MAX_STRUCT_SIZE) {
5695             return -TARGET_EFAULT;
5696         }
5697 
5698         target_addr = lock_user(VERIFY_READ,
5699                                 arg + offsetof(struct tun_filter, addr),
5700                                 filter->count * ETH_ALEN, 1);
5701         if (!target_addr) {
5702             return -TARGET_EFAULT;
5703         }
5704         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5705         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5706     }
5707 
5708     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5709 }
5710 
5711 IOCTLEntry ioctl_entries[] = {
5712 #define IOCTL(cmd, access, ...) \
5713     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5714 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5715     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5716 #define IOCTL_IGNORE(cmd) \
5717     { TARGET_ ## cmd, 0, #cmd },
5718 #include "ioctls.h"
5719     { 0, 0, },
5720 };
5721 
5722 /* ??? Implement proper locking for ioctls.  */
5723 /* do_ioctl() Must return target values and target errnos. */
5724 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5725 {
5726     const IOCTLEntry *ie;
5727     const argtype *arg_type;
5728     abi_long ret;
5729     uint8_t buf_temp[MAX_STRUCT_SIZE];
5730     int target_size;
5731     void *argptr;
5732 
5733     ie = ioctl_entries;
5734     for(;;) {
5735         if (ie->target_cmd == 0) {
5736             qemu_log_mask(
5737                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5738             return -TARGET_ENOSYS;
5739         }
5740         if (ie->target_cmd == cmd)
5741             break;
5742         ie++;
5743     }
5744     arg_type = ie->arg_type;
5745     if (ie->do_ioctl) {
5746         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5747     } else if (!ie->host_cmd) {
5748         /* Some architectures define BSD ioctls in their headers
5749            that are not implemented in Linux.  */
5750         return -TARGET_ENOSYS;
5751     }
5752 
5753     switch(arg_type[0]) {
5754     case TYPE_NULL:
5755         /* no argument */
5756         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5757         break;
5758     case TYPE_PTRVOID:
5759     case TYPE_INT:
5760     case TYPE_LONG:
5761     case TYPE_ULONG:
5762         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5763         break;
5764     case TYPE_PTR:
5765         arg_type++;
5766         target_size = thunk_type_size(arg_type, 0);
5767         switch(ie->access) {
5768         case IOC_R:
5769             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5770             if (!is_error(ret)) {
5771                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5772                 if (!argptr)
5773                     return -TARGET_EFAULT;
5774                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5775                 unlock_user(argptr, arg, target_size);
5776             }
5777             break;
5778         case IOC_W:
5779             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5780             if (!argptr)
5781                 return -TARGET_EFAULT;
5782             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5783             unlock_user(argptr, arg, 0);
5784             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5785             break;
5786         default:
5787         case IOC_RW:
5788             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5789             if (!argptr)
5790                 return -TARGET_EFAULT;
5791             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5792             unlock_user(argptr, arg, 0);
5793             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5794             if (!is_error(ret)) {
5795                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5796                 if (!argptr)
5797                     return -TARGET_EFAULT;
5798                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5799                 unlock_user(argptr, arg, target_size);
5800             }
5801             break;
5802         }
5803         break;
5804     default:
5805         qemu_log_mask(LOG_UNIMP,
5806                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5807                       (long)cmd, arg_type[0]);
5808         ret = -TARGET_ENOSYS;
5809         break;
5810     }
5811     return ret;
5812 }
5813 
5814 static const bitmask_transtbl iflag_tbl[] = {
5815         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5816         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5817         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5818         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5819         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5820         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5821         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5822         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5823         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5824         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5825         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5826         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5827         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5828         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5829         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5830         { 0, 0, 0, 0 }
5831 };
5832 
5833 static const bitmask_transtbl oflag_tbl[] = {
5834 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5835 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5836 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5837 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5838 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5839 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5840 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5841 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5842 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5843 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5844 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5845 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5846 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5847 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5848 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5849 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5850 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5851 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5852 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5853 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5854 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5855 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5856 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5857 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5858 	{ 0, 0, 0, 0 }
5859 };
5860 
5861 static const bitmask_transtbl cflag_tbl[] = {
5862 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5863 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5864 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5865 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5866 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5867 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5868 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5869 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5870 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5871 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5872 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5873 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5874 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5875 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5876 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5877 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5878 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5879 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5880 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5881 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5882 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5883 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5884 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5885 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5886 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5887 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5888 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5889 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5890 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5891 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5892 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5893 	{ 0, 0, 0, 0 }
5894 };
5895 
5896 static const bitmask_transtbl lflag_tbl[] = {
5897   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5898   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5899   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5900   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5901   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5902   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5903   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5904   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5905   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5906   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5907   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5908   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5909   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5910   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5911   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5912   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5913   { 0, 0, 0, 0 }
5914 };
5915 
5916 static void target_to_host_termios (void *dst, const void *src)
5917 {
5918     struct host_termios *host = dst;
5919     const struct target_termios *target = src;
5920 
5921     host->c_iflag =
5922         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5923     host->c_oflag =
5924         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5925     host->c_cflag =
5926         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5927     host->c_lflag =
5928         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5929     host->c_line = target->c_line;
5930 
5931     memset(host->c_cc, 0, sizeof(host->c_cc));
5932     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5933     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5934     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5935     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5936     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5937     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5938     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5939     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5940     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5941     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5942     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5943     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5944     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5945     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5946     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5947     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5948     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5949 }
5950 
5951 static void host_to_target_termios (void *dst, const void *src)
5952 {
5953     struct target_termios *target = dst;
5954     const struct host_termios *host = src;
5955 
5956     target->c_iflag =
5957         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5958     target->c_oflag =
5959         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5960     target->c_cflag =
5961         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5962     target->c_lflag =
5963         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5964     target->c_line = host->c_line;
5965 
5966     memset(target->c_cc, 0, sizeof(target->c_cc));
5967     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5968     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5969     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5970     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5971     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5972     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5973     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5974     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5975     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5976     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5977     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5978     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5979     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5980     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5981     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5982     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5983     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5984 }
5985 
5986 static const StructEntry struct_termios_def = {
5987     .convert = { host_to_target_termios, target_to_host_termios },
5988     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5989     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5990     .print = print_termios,
5991 };
5992 
5993 static const bitmask_transtbl mmap_flags_tbl[] = {
5994     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5995     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5996     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5997     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5998       MAP_ANONYMOUS, MAP_ANONYMOUS },
5999     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6000       MAP_GROWSDOWN, MAP_GROWSDOWN },
6001     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6002       MAP_DENYWRITE, MAP_DENYWRITE },
6003     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6004       MAP_EXECUTABLE, MAP_EXECUTABLE },
6005     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6006     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6007       MAP_NORESERVE, MAP_NORESERVE },
6008     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6009     /* MAP_STACK had been ignored by the kernel for quite some time.
6010        Recognize it for the target insofar as we do not want to pass
6011        it through to the host.  */
6012     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6013     { 0, 0, 0, 0 }
6014 };
6015 
6016 /*
6017  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6018  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6019  */
6020 #if defined(TARGET_I386)
6021 
6022 /* NOTE: there is really one LDT for all the threads */
6023 static uint8_t *ldt_table;
6024 
6025 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6026 {
6027     int size;
6028     void *p;
6029 
6030     if (!ldt_table)
6031         return 0;
6032     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6033     if (size > bytecount)
6034         size = bytecount;
6035     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6036     if (!p)
6037         return -TARGET_EFAULT;
6038     /* ??? Should this by byteswapped?  */
6039     memcpy(p, ldt_table, size);
6040     unlock_user(p, ptr, size);
6041     return size;
6042 }
6043 
6044 /* XXX: add locking support */
6045 static abi_long write_ldt(CPUX86State *env,
6046                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6047 {
6048     struct target_modify_ldt_ldt_s ldt_info;
6049     struct target_modify_ldt_ldt_s *target_ldt_info;
6050     int seg_32bit, contents, read_exec_only, limit_in_pages;
6051     int seg_not_present, useable, lm;
6052     uint32_t *lp, entry_1, entry_2;
6053 
6054     if (bytecount != sizeof(ldt_info))
6055         return -TARGET_EINVAL;
6056     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6057         return -TARGET_EFAULT;
6058     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6059     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6060     ldt_info.limit = tswap32(target_ldt_info->limit);
6061     ldt_info.flags = tswap32(target_ldt_info->flags);
6062     unlock_user_struct(target_ldt_info, ptr, 0);
6063 
6064     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6065         return -TARGET_EINVAL;
6066     seg_32bit = ldt_info.flags & 1;
6067     contents = (ldt_info.flags >> 1) & 3;
6068     read_exec_only = (ldt_info.flags >> 3) & 1;
6069     limit_in_pages = (ldt_info.flags >> 4) & 1;
6070     seg_not_present = (ldt_info.flags >> 5) & 1;
6071     useable = (ldt_info.flags >> 6) & 1;
6072 #ifdef TARGET_ABI32
6073     lm = 0;
6074 #else
6075     lm = (ldt_info.flags >> 7) & 1;
6076 #endif
6077     if (contents == 3) {
6078         if (oldmode)
6079             return -TARGET_EINVAL;
6080         if (seg_not_present == 0)
6081             return -TARGET_EINVAL;
6082     }
6083     /* allocate the LDT */
6084     if (!ldt_table) {
6085         env->ldt.base = target_mmap(0,
6086                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6087                                     PROT_READ|PROT_WRITE,
6088                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6089         if (env->ldt.base == -1)
6090             return -TARGET_ENOMEM;
6091         memset(g2h_untagged(env->ldt.base), 0,
6092                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6093         env->ldt.limit = 0xffff;
6094         ldt_table = g2h_untagged(env->ldt.base);
6095     }
6096 
6097     /* NOTE: same code as Linux kernel */
6098     /* Allow LDTs to be cleared by the user. */
6099     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6100         if (oldmode ||
6101             (contents == 0		&&
6102              read_exec_only == 1	&&
6103              seg_32bit == 0		&&
6104              limit_in_pages == 0	&&
6105              seg_not_present == 1	&&
6106              useable == 0 )) {
6107             entry_1 = 0;
6108             entry_2 = 0;
6109             goto install;
6110         }
6111     }
6112 
6113     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6114         (ldt_info.limit & 0x0ffff);
6115     entry_2 = (ldt_info.base_addr & 0xff000000) |
6116         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6117         (ldt_info.limit & 0xf0000) |
6118         ((read_exec_only ^ 1) << 9) |
6119         (contents << 10) |
6120         ((seg_not_present ^ 1) << 15) |
6121         (seg_32bit << 22) |
6122         (limit_in_pages << 23) |
6123         (lm << 21) |
6124         0x7000;
6125     if (!oldmode)
6126         entry_2 |= (useable << 20);
6127 
6128     /* Install the new entry ...  */
6129 install:
6130     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6131     lp[0] = tswap32(entry_1);
6132     lp[1] = tswap32(entry_2);
6133     return 0;
6134 }
6135 
6136 /* specific and weird i386 syscalls */
6137 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6138                               unsigned long bytecount)
6139 {
6140     abi_long ret;
6141 
6142     switch (func) {
6143     case 0:
6144         ret = read_ldt(ptr, bytecount);
6145         break;
6146     case 1:
6147         ret = write_ldt(env, ptr, bytecount, 1);
6148         break;
6149     case 0x11:
6150         ret = write_ldt(env, ptr, bytecount, 0);
6151         break;
6152     default:
6153         ret = -TARGET_ENOSYS;
6154         break;
6155     }
6156     return ret;
6157 }
6158 
6159 #if defined(TARGET_ABI32)
6160 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6161 {
6162     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6163     struct target_modify_ldt_ldt_s ldt_info;
6164     struct target_modify_ldt_ldt_s *target_ldt_info;
6165     int seg_32bit, contents, read_exec_only, limit_in_pages;
6166     int seg_not_present, useable, lm;
6167     uint32_t *lp, entry_1, entry_2;
6168     int i;
6169 
6170     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6171     if (!target_ldt_info)
6172         return -TARGET_EFAULT;
6173     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6174     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6175     ldt_info.limit = tswap32(target_ldt_info->limit);
6176     ldt_info.flags = tswap32(target_ldt_info->flags);
6177     if (ldt_info.entry_number == -1) {
6178         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6179             if (gdt_table[i] == 0) {
6180                 ldt_info.entry_number = i;
6181                 target_ldt_info->entry_number = tswap32(i);
6182                 break;
6183             }
6184         }
6185     }
6186     unlock_user_struct(target_ldt_info, ptr, 1);
6187 
6188     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6189         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6190            return -TARGET_EINVAL;
6191     seg_32bit = ldt_info.flags & 1;
6192     contents = (ldt_info.flags >> 1) & 3;
6193     read_exec_only = (ldt_info.flags >> 3) & 1;
6194     limit_in_pages = (ldt_info.flags >> 4) & 1;
6195     seg_not_present = (ldt_info.flags >> 5) & 1;
6196     useable = (ldt_info.flags >> 6) & 1;
6197 #ifdef TARGET_ABI32
6198     lm = 0;
6199 #else
6200     lm = (ldt_info.flags >> 7) & 1;
6201 #endif
6202 
6203     if (contents == 3) {
6204         if (seg_not_present == 0)
6205             return -TARGET_EINVAL;
6206     }
6207 
6208     /* NOTE: same code as Linux kernel */
6209     /* Allow LDTs to be cleared by the user. */
6210     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6211         if ((contents == 0             &&
6212              read_exec_only == 1       &&
6213              seg_32bit == 0            &&
6214              limit_in_pages == 0       &&
6215              seg_not_present == 1      &&
6216              useable == 0 )) {
6217             entry_1 = 0;
6218             entry_2 = 0;
6219             goto install;
6220         }
6221     }
6222 
6223     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6224         (ldt_info.limit & 0x0ffff);
6225     entry_2 = (ldt_info.base_addr & 0xff000000) |
6226         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6227         (ldt_info.limit & 0xf0000) |
6228         ((read_exec_only ^ 1) << 9) |
6229         (contents << 10) |
6230         ((seg_not_present ^ 1) << 15) |
6231         (seg_32bit << 22) |
6232         (limit_in_pages << 23) |
6233         (useable << 20) |
6234         (lm << 21) |
6235         0x7000;
6236 
6237     /* Install the new entry ...  */
6238 install:
6239     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6240     lp[0] = tswap32(entry_1);
6241     lp[1] = tswap32(entry_2);
6242     return 0;
6243 }
6244 
6245 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6246 {
6247     struct target_modify_ldt_ldt_s *target_ldt_info;
6248     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6249     uint32_t base_addr, limit, flags;
6250     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6251     int seg_not_present, useable, lm;
6252     uint32_t *lp, entry_1, entry_2;
6253 
6254     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6255     if (!target_ldt_info)
6256         return -TARGET_EFAULT;
6257     idx = tswap32(target_ldt_info->entry_number);
6258     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6259         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6260         unlock_user_struct(target_ldt_info, ptr, 1);
6261         return -TARGET_EINVAL;
6262     }
6263     lp = (uint32_t *)(gdt_table + idx);
6264     entry_1 = tswap32(lp[0]);
6265     entry_2 = tswap32(lp[1]);
6266 
6267     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6268     contents = (entry_2 >> 10) & 3;
6269     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6270     seg_32bit = (entry_2 >> 22) & 1;
6271     limit_in_pages = (entry_2 >> 23) & 1;
6272     useable = (entry_2 >> 20) & 1;
6273 #ifdef TARGET_ABI32
6274     lm = 0;
6275 #else
6276     lm = (entry_2 >> 21) & 1;
6277 #endif
6278     flags = (seg_32bit << 0) | (contents << 1) |
6279         (read_exec_only << 3) | (limit_in_pages << 4) |
6280         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6281     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6282     base_addr = (entry_1 >> 16) |
6283         (entry_2 & 0xff000000) |
6284         ((entry_2 & 0xff) << 16);
6285     target_ldt_info->base_addr = tswapal(base_addr);
6286     target_ldt_info->limit = tswap32(limit);
6287     target_ldt_info->flags = tswap32(flags);
6288     unlock_user_struct(target_ldt_info, ptr, 1);
6289     return 0;
6290 }
6291 
6292 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6293 {
6294     return -TARGET_ENOSYS;
6295 }
6296 #else
6297 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6298 {
6299     abi_long ret = 0;
6300     abi_ulong val;
6301     int idx;
6302 
6303     switch(code) {
6304     case TARGET_ARCH_SET_GS:
6305     case TARGET_ARCH_SET_FS:
6306         if (code == TARGET_ARCH_SET_GS)
6307             idx = R_GS;
6308         else
6309             idx = R_FS;
6310         cpu_x86_load_seg(env, idx, 0);
6311         env->segs[idx].base = addr;
6312         break;
6313     case TARGET_ARCH_GET_GS:
6314     case TARGET_ARCH_GET_FS:
6315         if (code == TARGET_ARCH_GET_GS)
6316             idx = R_GS;
6317         else
6318             idx = R_FS;
6319         val = env->segs[idx].base;
6320         if (put_user(val, addr, abi_ulong))
6321             ret = -TARGET_EFAULT;
6322         break;
6323     default:
6324         ret = -TARGET_EINVAL;
6325         break;
6326     }
6327     return ret;
6328 }
6329 #endif /* defined(TARGET_ABI32 */
6330 #endif /* defined(TARGET_I386) */
6331 
6332 /*
6333  * These constants are generic.  Supply any that are missing from the host.
6334  */
6335 #ifndef PR_SET_NAME
6336 # define PR_SET_NAME    15
6337 # define PR_GET_NAME    16
6338 #endif
6339 #ifndef PR_SET_FP_MODE
6340 # define PR_SET_FP_MODE 45
6341 # define PR_GET_FP_MODE 46
6342 # define PR_FP_MODE_FR   (1 << 0)
6343 # define PR_FP_MODE_FRE  (1 << 1)
6344 #endif
6345 #ifndef PR_SVE_SET_VL
6346 # define PR_SVE_SET_VL  50
6347 # define PR_SVE_GET_VL  51
6348 # define PR_SVE_VL_LEN_MASK  0xffff
6349 # define PR_SVE_VL_INHERIT   (1 << 17)
6350 #endif
6351 #ifndef PR_PAC_RESET_KEYS
6352 # define PR_PAC_RESET_KEYS  54
6353 # define PR_PAC_APIAKEY   (1 << 0)
6354 # define PR_PAC_APIBKEY   (1 << 1)
6355 # define PR_PAC_APDAKEY   (1 << 2)
6356 # define PR_PAC_APDBKEY   (1 << 3)
6357 # define PR_PAC_APGAKEY   (1 << 4)
6358 #endif
6359 #ifndef PR_SET_TAGGED_ADDR_CTRL
6360 # define PR_SET_TAGGED_ADDR_CTRL 55
6361 # define PR_GET_TAGGED_ADDR_CTRL 56
6362 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6363 #endif
6364 #ifndef PR_MTE_TCF_SHIFT
6365 # define PR_MTE_TCF_SHIFT       1
6366 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6367 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6368 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6369 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6370 # define PR_MTE_TAG_SHIFT       3
6371 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6372 #endif
6373 #ifndef PR_SET_IO_FLUSHER
6374 # define PR_SET_IO_FLUSHER 57
6375 # define PR_GET_IO_FLUSHER 58
6376 #endif
6377 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6378 # define PR_SET_SYSCALL_USER_DISPATCH 59
6379 #endif
6380 #ifndef PR_SME_SET_VL
6381 # define PR_SME_SET_VL  63
6382 # define PR_SME_GET_VL  64
6383 # define PR_SME_VL_LEN_MASK  0xffff
6384 # define PR_SME_VL_INHERIT   (1 << 17)
6385 #endif
6386 
6387 #include "target_prctl.h"
6388 
6389 static abi_long do_prctl_inval0(CPUArchState *env)
6390 {
6391     return -TARGET_EINVAL;
6392 }
6393 
6394 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6395 {
6396     return -TARGET_EINVAL;
6397 }
6398 
6399 #ifndef do_prctl_get_fp_mode
6400 #define do_prctl_get_fp_mode do_prctl_inval0
6401 #endif
6402 #ifndef do_prctl_set_fp_mode
6403 #define do_prctl_set_fp_mode do_prctl_inval1
6404 #endif
6405 #ifndef do_prctl_sve_get_vl
6406 #define do_prctl_sve_get_vl do_prctl_inval0
6407 #endif
6408 #ifndef do_prctl_sve_set_vl
6409 #define do_prctl_sve_set_vl do_prctl_inval1
6410 #endif
6411 #ifndef do_prctl_reset_keys
6412 #define do_prctl_reset_keys do_prctl_inval1
6413 #endif
6414 #ifndef do_prctl_set_tagged_addr_ctrl
6415 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6416 #endif
6417 #ifndef do_prctl_get_tagged_addr_ctrl
6418 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6419 #endif
6420 #ifndef do_prctl_get_unalign
6421 #define do_prctl_get_unalign do_prctl_inval1
6422 #endif
6423 #ifndef do_prctl_set_unalign
6424 #define do_prctl_set_unalign do_prctl_inval1
6425 #endif
6426 #ifndef do_prctl_sme_get_vl
6427 #define do_prctl_sme_get_vl do_prctl_inval0
6428 #endif
6429 #ifndef do_prctl_sme_set_vl
6430 #define do_prctl_sme_set_vl do_prctl_inval1
6431 #endif
6432 
6433 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6434                          abi_long arg3, abi_long arg4, abi_long arg5)
6435 {
6436     abi_long ret;
6437 
6438     switch (option) {
6439     case PR_GET_PDEATHSIG:
6440         {
6441             int deathsig;
6442             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6443                                   arg3, arg4, arg5));
6444             if (!is_error(ret) &&
6445                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6446                 return -TARGET_EFAULT;
6447             }
6448             return ret;
6449         }
6450     case PR_SET_PDEATHSIG:
6451         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6452                                arg3, arg4, arg5));
6453     case PR_GET_NAME:
6454         {
6455             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6456             if (!name) {
6457                 return -TARGET_EFAULT;
6458             }
6459             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6460                                   arg3, arg4, arg5));
6461             unlock_user(name, arg2, 16);
6462             return ret;
6463         }
6464     case PR_SET_NAME:
6465         {
6466             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6467             if (!name) {
6468                 return -TARGET_EFAULT;
6469             }
6470             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6471                                   arg3, arg4, arg5));
6472             unlock_user(name, arg2, 0);
6473             return ret;
6474         }
6475     case PR_GET_FP_MODE:
6476         return do_prctl_get_fp_mode(env);
6477     case PR_SET_FP_MODE:
6478         return do_prctl_set_fp_mode(env, arg2);
6479     case PR_SVE_GET_VL:
6480         return do_prctl_sve_get_vl(env);
6481     case PR_SVE_SET_VL:
6482         return do_prctl_sve_set_vl(env, arg2);
6483     case PR_SME_GET_VL:
6484         return do_prctl_sme_get_vl(env);
6485     case PR_SME_SET_VL:
6486         return do_prctl_sme_set_vl(env, arg2);
6487     case PR_PAC_RESET_KEYS:
6488         if (arg3 || arg4 || arg5) {
6489             return -TARGET_EINVAL;
6490         }
6491         return do_prctl_reset_keys(env, arg2);
6492     case PR_SET_TAGGED_ADDR_CTRL:
6493         if (arg3 || arg4 || arg5) {
6494             return -TARGET_EINVAL;
6495         }
6496         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6497     case PR_GET_TAGGED_ADDR_CTRL:
6498         if (arg2 || arg3 || arg4 || arg5) {
6499             return -TARGET_EINVAL;
6500         }
6501         return do_prctl_get_tagged_addr_ctrl(env);
6502 
6503     case PR_GET_UNALIGN:
6504         return do_prctl_get_unalign(env, arg2);
6505     case PR_SET_UNALIGN:
6506         return do_prctl_set_unalign(env, arg2);
6507 
6508     case PR_CAP_AMBIENT:
6509     case PR_CAPBSET_READ:
6510     case PR_CAPBSET_DROP:
6511     case PR_GET_DUMPABLE:
6512     case PR_SET_DUMPABLE:
6513     case PR_GET_KEEPCAPS:
6514     case PR_SET_KEEPCAPS:
6515     case PR_GET_SECUREBITS:
6516     case PR_SET_SECUREBITS:
6517     case PR_GET_TIMING:
6518     case PR_SET_TIMING:
6519     case PR_GET_TIMERSLACK:
6520     case PR_SET_TIMERSLACK:
6521     case PR_MCE_KILL:
6522     case PR_MCE_KILL_GET:
6523     case PR_GET_NO_NEW_PRIVS:
6524     case PR_SET_NO_NEW_PRIVS:
6525     case PR_GET_IO_FLUSHER:
6526     case PR_SET_IO_FLUSHER:
6527         /* Some prctl options have no pointer arguments and we can pass on. */
6528         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6529 
6530     case PR_GET_CHILD_SUBREAPER:
6531     case PR_SET_CHILD_SUBREAPER:
6532     case PR_GET_SPECULATION_CTRL:
6533     case PR_SET_SPECULATION_CTRL:
6534     case PR_GET_TID_ADDRESS:
6535         /* TODO */
6536         return -TARGET_EINVAL;
6537 
6538     case PR_GET_FPEXC:
6539     case PR_SET_FPEXC:
6540         /* Was used for SPE on PowerPC. */
6541         return -TARGET_EINVAL;
6542 
6543     case PR_GET_ENDIAN:
6544     case PR_SET_ENDIAN:
6545     case PR_GET_FPEMU:
6546     case PR_SET_FPEMU:
6547     case PR_SET_MM:
6548     case PR_GET_SECCOMP:
6549     case PR_SET_SECCOMP:
6550     case PR_SET_SYSCALL_USER_DISPATCH:
6551     case PR_GET_THP_DISABLE:
6552     case PR_SET_THP_DISABLE:
6553     case PR_GET_TSC:
6554     case PR_SET_TSC:
6555         /* Disable to prevent the target disabling stuff we need. */
6556         return -TARGET_EINVAL;
6557 
6558     default:
6559         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6560                       option);
6561         return -TARGET_EINVAL;
6562     }
6563 }
6564 
6565 #define NEW_STACK_SIZE 0x40000
6566 
6567 
6568 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6569 typedef struct {
6570     CPUArchState *env;
6571     pthread_mutex_t mutex;
6572     pthread_cond_t cond;
6573     pthread_t thread;
6574     uint32_t tid;
6575     abi_ulong child_tidptr;
6576     abi_ulong parent_tidptr;
6577     sigset_t sigmask;
6578 } new_thread_info;
6579 
6580 static void *clone_func(void *arg)
6581 {
6582     new_thread_info *info = arg;
6583     CPUArchState *env;
6584     CPUState *cpu;
6585     TaskState *ts;
6586 
6587     rcu_register_thread();
6588     tcg_register_thread();
6589     env = info->env;
6590     cpu = env_cpu(env);
6591     thread_cpu = cpu;
6592     ts = (TaskState *)cpu->opaque;
6593     info->tid = sys_gettid();
6594     task_settid(ts);
6595     if (info->child_tidptr)
6596         put_user_u32(info->tid, info->child_tidptr);
6597     if (info->parent_tidptr)
6598         put_user_u32(info->tid, info->parent_tidptr);
6599     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6600     /* Enable signals.  */
6601     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6602     /* Signal to the parent that we're ready.  */
6603     pthread_mutex_lock(&info->mutex);
6604     pthread_cond_broadcast(&info->cond);
6605     pthread_mutex_unlock(&info->mutex);
6606     /* Wait until the parent has finished initializing the tls state.  */
6607     pthread_mutex_lock(&clone_lock);
6608     pthread_mutex_unlock(&clone_lock);
6609     cpu_loop(env);
6610     /* never exits */
6611     return NULL;
6612 }
6613 
6614 /* do_fork() Must return host values and target errnos (unlike most
6615    do_*() functions). */
6616 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6617                    abi_ulong parent_tidptr, target_ulong newtls,
6618                    abi_ulong child_tidptr)
6619 {
6620     CPUState *cpu = env_cpu(env);
6621     int ret;
6622     TaskState *ts;
6623     CPUState *new_cpu;
6624     CPUArchState *new_env;
6625     sigset_t sigmask;
6626 
6627     flags &= ~CLONE_IGNORED_FLAGS;
6628 
6629     /* Emulate vfork() with fork() */
6630     if (flags & CLONE_VFORK)
6631         flags &= ~(CLONE_VFORK | CLONE_VM);
6632 
6633     if (flags & CLONE_VM) {
6634         TaskState *parent_ts = (TaskState *)cpu->opaque;
6635         new_thread_info info;
6636         pthread_attr_t attr;
6637 
6638         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6639             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6640             return -TARGET_EINVAL;
6641         }
6642 
6643         ts = g_new0(TaskState, 1);
6644         init_task_state(ts);
6645 
6646         /* Grab a mutex so that thread setup appears atomic.  */
6647         pthread_mutex_lock(&clone_lock);
6648 
6649         /*
6650          * If this is our first additional thread, we need to ensure we
6651          * generate code for parallel execution and flush old translations.
6652          * Do this now so that the copy gets CF_PARALLEL too.
6653          */
6654         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6655             cpu->tcg_cflags |= CF_PARALLEL;
6656             tb_flush(cpu);
6657         }
6658 
6659         /* we create a new CPU instance. */
6660         new_env = cpu_copy(env);
6661         /* Init regs that differ from the parent.  */
6662         cpu_clone_regs_child(new_env, newsp, flags);
6663         cpu_clone_regs_parent(env, flags);
6664         new_cpu = env_cpu(new_env);
6665         new_cpu->opaque = ts;
6666         ts->bprm = parent_ts->bprm;
6667         ts->info = parent_ts->info;
6668         ts->signal_mask = parent_ts->signal_mask;
6669 
6670         if (flags & CLONE_CHILD_CLEARTID) {
6671             ts->child_tidptr = child_tidptr;
6672         }
6673 
6674         if (flags & CLONE_SETTLS) {
6675             cpu_set_tls (new_env, newtls);
6676         }
6677 
6678         memset(&info, 0, sizeof(info));
6679         pthread_mutex_init(&info.mutex, NULL);
6680         pthread_mutex_lock(&info.mutex);
6681         pthread_cond_init(&info.cond, NULL);
6682         info.env = new_env;
6683         if (flags & CLONE_CHILD_SETTID) {
6684             info.child_tidptr = child_tidptr;
6685         }
6686         if (flags & CLONE_PARENT_SETTID) {
6687             info.parent_tidptr = parent_tidptr;
6688         }
6689 
6690         ret = pthread_attr_init(&attr);
6691         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6692         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6693         /* It is not safe to deliver signals until the child has finished
6694            initializing, so temporarily block all signals.  */
6695         sigfillset(&sigmask);
6696         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6697         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6698 
6699         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6700         /* TODO: Free new CPU state if thread creation failed.  */
6701 
6702         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6703         pthread_attr_destroy(&attr);
6704         if (ret == 0) {
6705             /* Wait for the child to initialize.  */
6706             pthread_cond_wait(&info.cond, &info.mutex);
6707             ret = info.tid;
6708         } else {
6709             ret = -1;
6710         }
6711         pthread_mutex_unlock(&info.mutex);
6712         pthread_cond_destroy(&info.cond);
6713         pthread_mutex_destroy(&info.mutex);
6714         pthread_mutex_unlock(&clone_lock);
6715     } else {
6716         /* if no CLONE_VM, we consider it is a fork */
6717         if (flags & CLONE_INVALID_FORK_FLAGS) {
6718             return -TARGET_EINVAL;
6719         }
6720 
6721         /* We can't support custom termination signals */
6722         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6723             return -TARGET_EINVAL;
6724         }
6725 
6726         if (block_signals()) {
6727             return -QEMU_ERESTARTSYS;
6728         }
6729 
6730         fork_start();
6731         ret = fork();
6732         if (ret == 0) {
6733             /* Child Process.  */
6734             cpu_clone_regs_child(env, newsp, flags);
6735             fork_end(1);
6736             /* There is a race condition here.  The parent process could
6737                theoretically read the TID in the child process before the child
6738                tid is set.  This would require using either ptrace
6739                (not implemented) or having *_tidptr to point at a shared memory
6740                mapping.  We can't repeat the spinlock hack used above because
6741                the child process gets its own copy of the lock.  */
6742             if (flags & CLONE_CHILD_SETTID)
6743                 put_user_u32(sys_gettid(), child_tidptr);
6744             if (flags & CLONE_PARENT_SETTID)
6745                 put_user_u32(sys_gettid(), parent_tidptr);
6746             ts = (TaskState *)cpu->opaque;
6747             if (flags & CLONE_SETTLS)
6748                 cpu_set_tls (env, newtls);
6749             if (flags & CLONE_CHILD_CLEARTID)
6750                 ts->child_tidptr = child_tidptr;
6751         } else {
6752             cpu_clone_regs_parent(env, flags);
6753             fork_end(0);
6754         }
6755     }
6756     return ret;
6757 }
6758 
6759 /* warning : doesn't handle linux specific flags... */
6760 static int target_to_host_fcntl_cmd(int cmd)
6761 {
6762     int ret;
6763 
6764     switch(cmd) {
6765     case TARGET_F_DUPFD:
6766     case TARGET_F_GETFD:
6767     case TARGET_F_SETFD:
6768     case TARGET_F_GETFL:
6769     case TARGET_F_SETFL:
6770     case TARGET_F_OFD_GETLK:
6771     case TARGET_F_OFD_SETLK:
6772     case TARGET_F_OFD_SETLKW:
6773         ret = cmd;
6774         break;
6775     case TARGET_F_GETLK:
6776         ret = F_GETLK64;
6777         break;
6778     case TARGET_F_SETLK:
6779         ret = F_SETLK64;
6780         break;
6781     case TARGET_F_SETLKW:
6782         ret = F_SETLKW64;
6783         break;
6784     case TARGET_F_GETOWN:
6785         ret = F_GETOWN;
6786         break;
6787     case TARGET_F_SETOWN:
6788         ret = F_SETOWN;
6789         break;
6790     case TARGET_F_GETSIG:
6791         ret = F_GETSIG;
6792         break;
6793     case TARGET_F_SETSIG:
6794         ret = F_SETSIG;
6795         break;
6796 #if TARGET_ABI_BITS == 32
6797     case TARGET_F_GETLK64:
6798         ret = F_GETLK64;
6799         break;
6800     case TARGET_F_SETLK64:
6801         ret = F_SETLK64;
6802         break;
6803     case TARGET_F_SETLKW64:
6804         ret = F_SETLKW64;
6805         break;
6806 #endif
6807     case TARGET_F_SETLEASE:
6808         ret = F_SETLEASE;
6809         break;
6810     case TARGET_F_GETLEASE:
6811         ret = F_GETLEASE;
6812         break;
6813 #ifdef F_DUPFD_CLOEXEC
6814     case TARGET_F_DUPFD_CLOEXEC:
6815         ret = F_DUPFD_CLOEXEC;
6816         break;
6817 #endif
6818     case TARGET_F_NOTIFY:
6819         ret = F_NOTIFY;
6820         break;
6821 #ifdef F_GETOWN_EX
6822     case TARGET_F_GETOWN_EX:
6823         ret = F_GETOWN_EX;
6824         break;
6825 #endif
6826 #ifdef F_SETOWN_EX
6827     case TARGET_F_SETOWN_EX:
6828         ret = F_SETOWN_EX;
6829         break;
6830 #endif
6831 #ifdef F_SETPIPE_SZ
6832     case TARGET_F_SETPIPE_SZ:
6833         ret = F_SETPIPE_SZ;
6834         break;
6835     case TARGET_F_GETPIPE_SZ:
6836         ret = F_GETPIPE_SZ;
6837         break;
6838 #endif
6839 #ifdef F_ADD_SEALS
6840     case TARGET_F_ADD_SEALS:
6841         ret = F_ADD_SEALS;
6842         break;
6843     case TARGET_F_GET_SEALS:
6844         ret = F_GET_SEALS;
6845         break;
6846 #endif
6847     default:
6848         ret = -TARGET_EINVAL;
6849         break;
6850     }
6851 
6852 #if defined(__powerpc64__)
6853     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6854      * is not supported by kernel. The glibc fcntl call actually adjusts
6855      * them to 5, 6 and 7 before making the syscall(). Since we make the
6856      * syscall directly, adjust to what is supported by the kernel.
6857      */
6858     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6859         ret -= F_GETLK64 - 5;
6860     }
6861 #endif
6862 
6863     return ret;
6864 }
6865 
6866 #define FLOCK_TRANSTBL \
6867     switch (type) { \
6868     TRANSTBL_CONVERT(F_RDLCK); \
6869     TRANSTBL_CONVERT(F_WRLCK); \
6870     TRANSTBL_CONVERT(F_UNLCK); \
6871     }
6872 
6873 static int target_to_host_flock(int type)
6874 {
6875 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6876     FLOCK_TRANSTBL
6877 #undef  TRANSTBL_CONVERT
6878     return -TARGET_EINVAL;
6879 }
6880 
6881 static int host_to_target_flock(int type)
6882 {
6883 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6884     FLOCK_TRANSTBL
6885 #undef  TRANSTBL_CONVERT
6886     /* if we don't know how to convert the value coming
6887      * from the host we copy to the target field as-is
6888      */
6889     return type;
6890 }
6891 
6892 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6893                                             abi_ulong target_flock_addr)
6894 {
6895     struct target_flock *target_fl;
6896     int l_type;
6897 
6898     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6899         return -TARGET_EFAULT;
6900     }
6901 
6902     __get_user(l_type, &target_fl->l_type);
6903     l_type = target_to_host_flock(l_type);
6904     if (l_type < 0) {
6905         return l_type;
6906     }
6907     fl->l_type = l_type;
6908     __get_user(fl->l_whence, &target_fl->l_whence);
6909     __get_user(fl->l_start, &target_fl->l_start);
6910     __get_user(fl->l_len, &target_fl->l_len);
6911     __get_user(fl->l_pid, &target_fl->l_pid);
6912     unlock_user_struct(target_fl, target_flock_addr, 0);
6913     return 0;
6914 }
6915 
6916 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6917                                           const struct flock64 *fl)
6918 {
6919     struct target_flock *target_fl;
6920     short l_type;
6921 
6922     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6923         return -TARGET_EFAULT;
6924     }
6925 
6926     l_type = host_to_target_flock(fl->l_type);
6927     __put_user(l_type, &target_fl->l_type);
6928     __put_user(fl->l_whence, &target_fl->l_whence);
6929     __put_user(fl->l_start, &target_fl->l_start);
6930     __put_user(fl->l_len, &target_fl->l_len);
6931     __put_user(fl->l_pid, &target_fl->l_pid);
6932     unlock_user_struct(target_fl, target_flock_addr, 1);
6933     return 0;
6934 }
6935 
6936 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6937 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6938 
6939 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6940 struct target_oabi_flock64 {
6941     abi_short l_type;
6942     abi_short l_whence;
6943     abi_llong l_start;
6944     abi_llong l_len;
6945     abi_int   l_pid;
6946 } QEMU_PACKED;
6947 
6948 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6949                                                    abi_ulong target_flock_addr)
6950 {
6951     struct target_oabi_flock64 *target_fl;
6952     int l_type;
6953 
6954     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6955         return -TARGET_EFAULT;
6956     }
6957 
6958     __get_user(l_type, &target_fl->l_type);
6959     l_type = target_to_host_flock(l_type);
6960     if (l_type < 0) {
6961         return l_type;
6962     }
6963     fl->l_type = l_type;
6964     __get_user(fl->l_whence, &target_fl->l_whence);
6965     __get_user(fl->l_start, &target_fl->l_start);
6966     __get_user(fl->l_len, &target_fl->l_len);
6967     __get_user(fl->l_pid, &target_fl->l_pid);
6968     unlock_user_struct(target_fl, target_flock_addr, 0);
6969     return 0;
6970 }
6971 
6972 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6973                                                  const struct flock64 *fl)
6974 {
6975     struct target_oabi_flock64 *target_fl;
6976     short l_type;
6977 
6978     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6979         return -TARGET_EFAULT;
6980     }
6981 
6982     l_type = host_to_target_flock(fl->l_type);
6983     __put_user(l_type, &target_fl->l_type);
6984     __put_user(fl->l_whence, &target_fl->l_whence);
6985     __put_user(fl->l_start, &target_fl->l_start);
6986     __put_user(fl->l_len, &target_fl->l_len);
6987     __put_user(fl->l_pid, &target_fl->l_pid);
6988     unlock_user_struct(target_fl, target_flock_addr, 1);
6989     return 0;
6990 }
6991 #endif
6992 
6993 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6994                                               abi_ulong target_flock_addr)
6995 {
6996     struct target_flock64 *target_fl;
6997     int l_type;
6998 
6999     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7000         return -TARGET_EFAULT;
7001     }
7002 
7003     __get_user(l_type, &target_fl->l_type);
7004     l_type = target_to_host_flock(l_type);
7005     if (l_type < 0) {
7006         return l_type;
7007     }
7008     fl->l_type = l_type;
7009     __get_user(fl->l_whence, &target_fl->l_whence);
7010     __get_user(fl->l_start, &target_fl->l_start);
7011     __get_user(fl->l_len, &target_fl->l_len);
7012     __get_user(fl->l_pid, &target_fl->l_pid);
7013     unlock_user_struct(target_fl, target_flock_addr, 0);
7014     return 0;
7015 }
7016 
7017 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7018                                             const struct flock64 *fl)
7019 {
7020     struct target_flock64 *target_fl;
7021     short l_type;
7022 
7023     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7024         return -TARGET_EFAULT;
7025     }
7026 
7027     l_type = host_to_target_flock(fl->l_type);
7028     __put_user(l_type, &target_fl->l_type);
7029     __put_user(fl->l_whence, &target_fl->l_whence);
7030     __put_user(fl->l_start, &target_fl->l_start);
7031     __put_user(fl->l_len, &target_fl->l_len);
7032     __put_user(fl->l_pid, &target_fl->l_pid);
7033     unlock_user_struct(target_fl, target_flock_addr, 1);
7034     return 0;
7035 }
7036 
7037 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7038 {
7039     struct flock64 fl64;
7040 #ifdef F_GETOWN_EX
7041     struct f_owner_ex fox;
7042     struct target_f_owner_ex *target_fox;
7043 #endif
7044     abi_long ret;
7045     int host_cmd = target_to_host_fcntl_cmd(cmd);
7046 
7047     if (host_cmd == -TARGET_EINVAL)
7048 	    return host_cmd;
7049 
7050     switch(cmd) {
7051     case TARGET_F_GETLK:
7052         ret = copy_from_user_flock(&fl64, arg);
7053         if (ret) {
7054             return ret;
7055         }
7056         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7057         if (ret == 0) {
7058             ret = copy_to_user_flock(arg, &fl64);
7059         }
7060         break;
7061 
7062     case TARGET_F_SETLK:
7063     case TARGET_F_SETLKW:
7064         ret = copy_from_user_flock(&fl64, arg);
7065         if (ret) {
7066             return ret;
7067         }
7068         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7069         break;
7070 
7071     case TARGET_F_GETLK64:
7072     case TARGET_F_OFD_GETLK:
7073         ret = copy_from_user_flock64(&fl64, arg);
7074         if (ret) {
7075             return ret;
7076         }
7077         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7078         if (ret == 0) {
7079             ret = copy_to_user_flock64(arg, &fl64);
7080         }
7081         break;
7082     case TARGET_F_SETLK64:
7083     case TARGET_F_SETLKW64:
7084     case TARGET_F_OFD_SETLK:
7085     case TARGET_F_OFD_SETLKW:
7086         ret = copy_from_user_flock64(&fl64, arg);
7087         if (ret) {
7088             return ret;
7089         }
7090         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7091         break;
7092 
7093     case TARGET_F_GETFL:
7094         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7095         if (ret >= 0) {
7096             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7097         }
7098         break;
7099 
7100     case TARGET_F_SETFL:
7101         ret = get_errno(safe_fcntl(fd, host_cmd,
7102                                    target_to_host_bitmask(arg,
7103                                                           fcntl_flags_tbl)));
7104         break;
7105 
7106 #ifdef F_GETOWN_EX
7107     case TARGET_F_GETOWN_EX:
7108         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7109         if (ret >= 0) {
7110             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7111                 return -TARGET_EFAULT;
7112             target_fox->type = tswap32(fox.type);
7113             target_fox->pid = tswap32(fox.pid);
7114             unlock_user_struct(target_fox, arg, 1);
7115         }
7116         break;
7117 #endif
7118 
7119 #ifdef F_SETOWN_EX
7120     case TARGET_F_SETOWN_EX:
7121         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7122             return -TARGET_EFAULT;
7123         fox.type = tswap32(target_fox->type);
7124         fox.pid = tswap32(target_fox->pid);
7125         unlock_user_struct(target_fox, arg, 0);
7126         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7127         break;
7128 #endif
7129 
7130     case TARGET_F_SETSIG:
7131         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7132         break;
7133 
7134     case TARGET_F_GETSIG:
7135         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7136         break;
7137 
7138     case TARGET_F_SETOWN:
7139     case TARGET_F_GETOWN:
7140     case TARGET_F_SETLEASE:
7141     case TARGET_F_GETLEASE:
7142     case TARGET_F_SETPIPE_SZ:
7143     case TARGET_F_GETPIPE_SZ:
7144     case TARGET_F_ADD_SEALS:
7145     case TARGET_F_GET_SEALS:
7146         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7147         break;
7148 
7149     default:
7150         ret = get_errno(safe_fcntl(fd, cmd, arg));
7151         break;
7152     }
7153     return ret;
7154 }
7155 
7156 #ifdef USE_UID16
7157 
7158 static inline int high2lowuid(int uid)
7159 {
7160     if (uid > 65535)
7161         return 65534;
7162     else
7163         return uid;
7164 }
7165 
7166 static inline int high2lowgid(int gid)
7167 {
7168     if (gid > 65535)
7169         return 65534;
7170     else
7171         return gid;
7172 }
7173 
7174 static inline int low2highuid(int uid)
7175 {
7176     if ((int16_t)uid == -1)
7177         return -1;
7178     else
7179         return uid;
7180 }
7181 
7182 static inline int low2highgid(int gid)
7183 {
7184     if ((int16_t)gid == -1)
7185         return -1;
7186     else
7187         return gid;
7188 }
7189 static inline int tswapid(int id)
7190 {
7191     return tswap16(id);
7192 }
7193 
7194 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7195 
7196 #else /* !USE_UID16 */
7197 static inline int high2lowuid(int uid)
7198 {
7199     return uid;
7200 }
7201 static inline int high2lowgid(int gid)
7202 {
7203     return gid;
7204 }
7205 static inline int low2highuid(int uid)
7206 {
7207     return uid;
7208 }
7209 static inline int low2highgid(int gid)
7210 {
7211     return gid;
7212 }
7213 static inline int tswapid(int id)
7214 {
7215     return tswap32(id);
7216 }
7217 
7218 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7219 
7220 #endif /* USE_UID16 */
7221 
7222 /* We must do direct syscalls for setting UID/GID, because we want to
7223  * implement the Linux system call semantics of "change only for this thread",
7224  * not the libc/POSIX semantics of "change for all threads in process".
7225  * (See http://ewontfix.com/17/ for more details.)
7226  * We use the 32-bit version of the syscalls if present; if it is not
7227  * then either the host architecture supports 32-bit UIDs natively with
7228  * the standard syscall, or the 16-bit UID is the best we can do.
7229  */
7230 #ifdef __NR_setuid32
7231 #define __NR_sys_setuid __NR_setuid32
7232 #else
7233 #define __NR_sys_setuid __NR_setuid
7234 #endif
7235 #ifdef __NR_setgid32
7236 #define __NR_sys_setgid __NR_setgid32
7237 #else
7238 #define __NR_sys_setgid __NR_setgid
7239 #endif
7240 #ifdef __NR_setresuid32
7241 #define __NR_sys_setresuid __NR_setresuid32
7242 #else
7243 #define __NR_sys_setresuid __NR_setresuid
7244 #endif
7245 #ifdef __NR_setresgid32
7246 #define __NR_sys_setresgid __NR_setresgid32
7247 #else
7248 #define __NR_sys_setresgid __NR_setresgid
7249 #endif
7250 
7251 _syscall1(int, sys_setuid, uid_t, uid)
7252 _syscall1(int, sys_setgid, gid_t, gid)
7253 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7254 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7255 
7256 void syscall_init(void)
7257 {
7258     IOCTLEntry *ie;
7259     const argtype *arg_type;
7260     int size;
7261 
7262     thunk_init(STRUCT_MAX);
7263 
7264 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7265 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7266 #include "syscall_types.h"
7267 #undef STRUCT
7268 #undef STRUCT_SPECIAL
7269 
7270     /* we patch the ioctl size if necessary. We rely on the fact that
7271        no ioctl has all the bits at '1' in the size field */
7272     ie = ioctl_entries;
7273     while (ie->target_cmd != 0) {
7274         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7275             TARGET_IOC_SIZEMASK) {
7276             arg_type = ie->arg_type;
7277             if (arg_type[0] != TYPE_PTR) {
7278                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7279                         ie->target_cmd);
7280                 exit(1);
7281             }
7282             arg_type++;
7283             size = thunk_type_size(arg_type, 0);
7284             ie->target_cmd = (ie->target_cmd &
7285                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7286                 (size << TARGET_IOC_SIZESHIFT);
7287         }
7288 
7289         /* automatic consistency check if same arch */
7290 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7291     (defined(__x86_64__) && defined(TARGET_X86_64))
7292         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7293             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7294                     ie->name, ie->target_cmd, ie->host_cmd);
7295         }
7296 #endif
7297         ie++;
7298     }
7299 }
7300 
7301 #ifdef TARGET_NR_truncate64
7302 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7303                                          abi_long arg2,
7304                                          abi_long arg3,
7305                                          abi_long arg4)
7306 {
7307     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7308         arg2 = arg3;
7309         arg3 = arg4;
7310     }
7311     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7312 }
7313 #endif
7314 
7315 #ifdef TARGET_NR_ftruncate64
7316 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7317                                           abi_long arg2,
7318                                           abi_long arg3,
7319                                           abi_long arg4)
7320 {
7321     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7322         arg2 = arg3;
7323         arg3 = arg4;
7324     }
7325     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7326 }
7327 #endif
7328 
7329 #if defined(TARGET_NR_timer_settime) || \
7330     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7331 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7332                                                  abi_ulong target_addr)
7333 {
7334     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7335                                 offsetof(struct target_itimerspec,
7336                                          it_interval)) ||
7337         target_to_host_timespec(&host_its->it_value, target_addr +
7338                                 offsetof(struct target_itimerspec,
7339                                          it_value))) {
7340         return -TARGET_EFAULT;
7341     }
7342 
7343     return 0;
7344 }
7345 #endif
7346 
7347 #if defined(TARGET_NR_timer_settime64) || \
7348     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7349 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7350                                                    abi_ulong target_addr)
7351 {
7352     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7353                                   offsetof(struct target__kernel_itimerspec,
7354                                            it_interval)) ||
7355         target_to_host_timespec64(&host_its->it_value, target_addr +
7356                                   offsetof(struct target__kernel_itimerspec,
7357                                            it_value))) {
7358         return -TARGET_EFAULT;
7359     }
7360 
7361     return 0;
7362 }
7363 #endif
7364 
7365 #if ((defined(TARGET_NR_timerfd_gettime) || \
7366       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7367       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7368 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7369                                                  struct itimerspec *host_its)
7370 {
7371     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7372                                                        it_interval),
7373                                 &host_its->it_interval) ||
7374         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7375                                                        it_value),
7376                                 &host_its->it_value)) {
7377         return -TARGET_EFAULT;
7378     }
7379     return 0;
7380 }
7381 #endif
7382 
7383 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7384       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7385       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7386 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7387                                                    struct itimerspec *host_its)
7388 {
7389     if (host_to_target_timespec64(target_addr +
7390                                   offsetof(struct target__kernel_itimerspec,
7391                                            it_interval),
7392                                   &host_its->it_interval) ||
7393         host_to_target_timespec64(target_addr +
7394                                   offsetof(struct target__kernel_itimerspec,
7395                                            it_value),
7396                                   &host_its->it_value)) {
7397         return -TARGET_EFAULT;
7398     }
7399     return 0;
7400 }
7401 #endif
7402 
7403 #if defined(TARGET_NR_adjtimex) || \
7404     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7405 static inline abi_long target_to_host_timex(struct timex *host_tx,
7406                                             abi_long target_addr)
7407 {
7408     struct target_timex *target_tx;
7409 
7410     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7411         return -TARGET_EFAULT;
7412     }
7413 
7414     __get_user(host_tx->modes, &target_tx->modes);
7415     __get_user(host_tx->offset, &target_tx->offset);
7416     __get_user(host_tx->freq, &target_tx->freq);
7417     __get_user(host_tx->maxerror, &target_tx->maxerror);
7418     __get_user(host_tx->esterror, &target_tx->esterror);
7419     __get_user(host_tx->status, &target_tx->status);
7420     __get_user(host_tx->constant, &target_tx->constant);
7421     __get_user(host_tx->precision, &target_tx->precision);
7422     __get_user(host_tx->tolerance, &target_tx->tolerance);
7423     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7424     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7425     __get_user(host_tx->tick, &target_tx->tick);
7426     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7427     __get_user(host_tx->jitter, &target_tx->jitter);
7428     __get_user(host_tx->shift, &target_tx->shift);
7429     __get_user(host_tx->stabil, &target_tx->stabil);
7430     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7431     __get_user(host_tx->calcnt, &target_tx->calcnt);
7432     __get_user(host_tx->errcnt, &target_tx->errcnt);
7433     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7434     __get_user(host_tx->tai, &target_tx->tai);
7435 
7436     unlock_user_struct(target_tx, target_addr, 0);
7437     return 0;
7438 }
7439 
7440 static inline abi_long host_to_target_timex(abi_long target_addr,
7441                                             struct timex *host_tx)
7442 {
7443     struct target_timex *target_tx;
7444 
7445     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7446         return -TARGET_EFAULT;
7447     }
7448 
7449     __put_user(host_tx->modes, &target_tx->modes);
7450     __put_user(host_tx->offset, &target_tx->offset);
7451     __put_user(host_tx->freq, &target_tx->freq);
7452     __put_user(host_tx->maxerror, &target_tx->maxerror);
7453     __put_user(host_tx->esterror, &target_tx->esterror);
7454     __put_user(host_tx->status, &target_tx->status);
7455     __put_user(host_tx->constant, &target_tx->constant);
7456     __put_user(host_tx->precision, &target_tx->precision);
7457     __put_user(host_tx->tolerance, &target_tx->tolerance);
7458     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7459     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7460     __put_user(host_tx->tick, &target_tx->tick);
7461     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7462     __put_user(host_tx->jitter, &target_tx->jitter);
7463     __put_user(host_tx->shift, &target_tx->shift);
7464     __put_user(host_tx->stabil, &target_tx->stabil);
7465     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7466     __put_user(host_tx->calcnt, &target_tx->calcnt);
7467     __put_user(host_tx->errcnt, &target_tx->errcnt);
7468     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7469     __put_user(host_tx->tai, &target_tx->tai);
7470 
7471     unlock_user_struct(target_tx, target_addr, 1);
7472     return 0;
7473 }
7474 #endif
7475 
7476 
7477 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7478 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7479                                               abi_long target_addr)
7480 {
7481     struct target__kernel_timex *target_tx;
7482 
7483     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7484                                  offsetof(struct target__kernel_timex,
7485                                           time))) {
7486         return -TARGET_EFAULT;
7487     }
7488 
7489     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7490         return -TARGET_EFAULT;
7491     }
7492 
7493     __get_user(host_tx->modes, &target_tx->modes);
7494     __get_user(host_tx->offset, &target_tx->offset);
7495     __get_user(host_tx->freq, &target_tx->freq);
7496     __get_user(host_tx->maxerror, &target_tx->maxerror);
7497     __get_user(host_tx->esterror, &target_tx->esterror);
7498     __get_user(host_tx->status, &target_tx->status);
7499     __get_user(host_tx->constant, &target_tx->constant);
7500     __get_user(host_tx->precision, &target_tx->precision);
7501     __get_user(host_tx->tolerance, &target_tx->tolerance);
7502     __get_user(host_tx->tick, &target_tx->tick);
7503     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7504     __get_user(host_tx->jitter, &target_tx->jitter);
7505     __get_user(host_tx->shift, &target_tx->shift);
7506     __get_user(host_tx->stabil, &target_tx->stabil);
7507     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7508     __get_user(host_tx->calcnt, &target_tx->calcnt);
7509     __get_user(host_tx->errcnt, &target_tx->errcnt);
7510     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7511     __get_user(host_tx->tai, &target_tx->tai);
7512 
7513     unlock_user_struct(target_tx, target_addr, 0);
7514     return 0;
7515 }
7516 
7517 static inline abi_long host_to_target_timex64(abi_long target_addr,
7518                                               struct timex *host_tx)
7519 {
7520     struct target__kernel_timex *target_tx;
7521 
7522    if (copy_to_user_timeval64(target_addr +
7523                               offsetof(struct target__kernel_timex, time),
7524                               &host_tx->time)) {
7525         return -TARGET_EFAULT;
7526     }
7527 
7528     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7529         return -TARGET_EFAULT;
7530     }
7531 
7532     __put_user(host_tx->modes, &target_tx->modes);
7533     __put_user(host_tx->offset, &target_tx->offset);
7534     __put_user(host_tx->freq, &target_tx->freq);
7535     __put_user(host_tx->maxerror, &target_tx->maxerror);
7536     __put_user(host_tx->esterror, &target_tx->esterror);
7537     __put_user(host_tx->status, &target_tx->status);
7538     __put_user(host_tx->constant, &target_tx->constant);
7539     __put_user(host_tx->precision, &target_tx->precision);
7540     __put_user(host_tx->tolerance, &target_tx->tolerance);
7541     __put_user(host_tx->tick, &target_tx->tick);
7542     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7543     __put_user(host_tx->jitter, &target_tx->jitter);
7544     __put_user(host_tx->shift, &target_tx->shift);
7545     __put_user(host_tx->stabil, &target_tx->stabil);
7546     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7547     __put_user(host_tx->calcnt, &target_tx->calcnt);
7548     __put_user(host_tx->errcnt, &target_tx->errcnt);
7549     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7550     __put_user(host_tx->tai, &target_tx->tai);
7551 
7552     unlock_user_struct(target_tx, target_addr, 1);
7553     return 0;
7554 }
7555 #endif
7556 
7557 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7558 #define sigev_notify_thread_id _sigev_un._tid
7559 #endif
7560 
7561 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7562                                                abi_ulong target_addr)
7563 {
7564     struct target_sigevent *target_sevp;
7565 
7566     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7567         return -TARGET_EFAULT;
7568     }
7569 
7570     /* This union is awkward on 64 bit systems because it has a 32 bit
7571      * integer and a pointer in it; we follow the conversion approach
7572      * used for handling sigval types in signal.c so the guest should get
7573      * the correct value back even if we did a 64 bit byteswap and it's
7574      * using the 32 bit integer.
7575      */
7576     host_sevp->sigev_value.sival_ptr =
7577         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7578     host_sevp->sigev_signo =
7579         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7580     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7581     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7582 
7583     unlock_user_struct(target_sevp, target_addr, 1);
7584     return 0;
7585 }
7586 
7587 #if defined(TARGET_NR_mlockall)
7588 static inline int target_to_host_mlockall_arg(int arg)
7589 {
7590     int result = 0;
7591 
7592     if (arg & TARGET_MCL_CURRENT) {
7593         result |= MCL_CURRENT;
7594     }
7595     if (arg & TARGET_MCL_FUTURE) {
7596         result |= MCL_FUTURE;
7597     }
7598 #ifdef MCL_ONFAULT
7599     if (arg & TARGET_MCL_ONFAULT) {
7600         result |= MCL_ONFAULT;
7601     }
7602 #endif
7603 
7604     return result;
7605 }
7606 #endif
7607 
7608 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7609      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7610      defined(TARGET_NR_newfstatat))
7611 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7612                                              abi_ulong target_addr,
7613                                              struct stat *host_st)
7614 {
7615 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7616     if (cpu_env->eabi) {
7617         struct target_eabi_stat64 *target_st;
7618 
7619         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7620             return -TARGET_EFAULT;
7621         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7622         __put_user(host_st->st_dev, &target_st->st_dev);
7623         __put_user(host_st->st_ino, &target_st->st_ino);
7624 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7625         __put_user(host_st->st_ino, &target_st->__st_ino);
7626 #endif
7627         __put_user(host_st->st_mode, &target_st->st_mode);
7628         __put_user(host_st->st_nlink, &target_st->st_nlink);
7629         __put_user(host_st->st_uid, &target_st->st_uid);
7630         __put_user(host_st->st_gid, &target_st->st_gid);
7631         __put_user(host_st->st_rdev, &target_st->st_rdev);
7632         __put_user(host_st->st_size, &target_st->st_size);
7633         __put_user(host_st->st_blksize, &target_st->st_blksize);
7634         __put_user(host_st->st_blocks, &target_st->st_blocks);
7635         __put_user(host_st->st_atime, &target_st->target_st_atime);
7636         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7637         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7638 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7639         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7640         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7641         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7642 #endif
7643         unlock_user_struct(target_st, target_addr, 1);
7644     } else
7645 #endif
7646     {
7647 #if defined(TARGET_HAS_STRUCT_STAT64)
7648         struct target_stat64 *target_st;
7649 #else
7650         struct target_stat *target_st;
7651 #endif
7652 
7653         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7654             return -TARGET_EFAULT;
7655         memset(target_st, 0, sizeof(*target_st));
7656         __put_user(host_st->st_dev, &target_st->st_dev);
7657         __put_user(host_st->st_ino, &target_st->st_ino);
7658 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7659         __put_user(host_st->st_ino, &target_st->__st_ino);
7660 #endif
7661         __put_user(host_st->st_mode, &target_st->st_mode);
7662         __put_user(host_st->st_nlink, &target_st->st_nlink);
7663         __put_user(host_st->st_uid, &target_st->st_uid);
7664         __put_user(host_st->st_gid, &target_st->st_gid);
7665         __put_user(host_st->st_rdev, &target_st->st_rdev);
7666         /* XXX: better use of kernel struct */
7667         __put_user(host_st->st_size, &target_st->st_size);
7668         __put_user(host_st->st_blksize, &target_st->st_blksize);
7669         __put_user(host_st->st_blocks, &target_st->st_blocks);
7670         __put_user(host_st->st_atime, &target_st->target_st_atime);
7671         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7672         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7673 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7674         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7675         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7676         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7677 #endif
7678         unlock_user_struct(target_st, target_addr, 1);
7679     }
7680 
7681     return 0;
7682 }
7683 #endif
7684 
7685 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7686 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7687                                             abi_ulong target_addr)
7688 {
7689     struct target_statx *target_stx;
7690 
7691     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7692         return -TARGET_EFAULT;
7693     }
7694     memset(target_stx, 0, sizeof(*target_stx));
7695 
7696     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7697     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7698     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7699     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7700     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7701     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7702     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7703     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7704     __put_user(host_stx->stx_size, &target_stx->stx_size);
7705     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7706     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7707     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7708     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7709     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7710     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7711     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7712     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7713     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7714     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7715     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7716     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7717     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7718     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7719 
7720     unlock_user_struct(target_stx, target_addr, 1);
7721 
7722     return 0;
7723 }
7724 #endif
7725 
7726 static int do_sys_futex(int *uaddr, int op, int val,
7727                          const struct timespec *timeout, int *uaddr2,
7728                          int val3)
7729 {
7730 #if HOST_LONG_BITS == 64
7731 #if defined(__NR_futex)
7732     /* always a 64-bit time_t, it doesn't define _time64 version  */
7733     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7734 
7735 #endif
7736 #else /* HOST_LONG_BITS == 64 */
7737 #if defined(__NR_futex_time64)
7738     if (sizeof(timeout->tv_sec) == 8) {
7739         /* _time64 function on 32bit arch */
7740         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7741     }
7742 #endif
7743 #if defined(__NR_futex)
7744     /* old function on 32bit arch */
7745     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7746 #endif
7747 #endif /* HOST_LONG_BITS == 64 */
7748     g_assert_not_reached();
7749 }
7750 
7751 static int do_safe_futex(int *uaddr, int op, int val,
7752                          const struct timespec *timeout, int *uaddr2,
7753                          int val3)
7754 {
7755 #if HOST_LONG_BITS == 64
7756 #if defined(__NR_futex)
7757     /* always a 64-bit time_t, it doesn't define _time64 version  */
7758     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7759 #endif
7760 #else /* HOST_LONG_BITS == 64 */
7761 #if defined(__NR_futex_time64)
7762     if (sizeof(timeout->tv_sec) == 8) {
7763         /* _time64 function on 32bit arch */
7764         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7765                                            val3));
7766     }
7767 #endif
7768 #if defined(__NR_futex)
7769     /* old function on 32bit arch */
7770     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7771 #endif
7772 #endif /* HOST_LONG_BITS == 64 */
7773     return -TARGET_ENOSYS;
7774 }
7775 
7776 /* ??? Using host futex calls even when target atomic operations
7777    are not really atomic probably breaks things.  However implementing
7778    futexes locally would make futexes shared between multiple processes
7779    tricky.  However they're probably useless because guest atomic
7780    operations won't work either.  */
7781 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7782 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7783                     int op, int val, target_ulong timeout,
7784                     target_ulong uaddr2, int val3)
7785 {
7786     struct timespec ts, *pts = NULL;
7787     void *haddr2 = NULL;
7788     int base_op;
7789 
7790     /* We assume FUTEX_* constants are the same on both host and target. */
7791 #ifdef FUTEX_CMD_MASK
7792     base_op = op & FUTEX_CMD_MASK;
7793 #else
7794     base_op = op;
7795 #endif
7796     switch (base_op) {
7797     case FUTEX_WAIT:
7798     case FUTEX_WAIT_BITSET:
7799         val = tswap32(val);
7800         break;
7801     case FUTEX_WAIT_REQUEUE_PI:
7802         val = tswap32(val);
7803         haddr2 = g2h(cpu, uaddr2);
7804         break;
7805     case FUTEX_LOCK_PI:
7806     case FUTEX_LOCK_PI2:
7807         break;
7808     case FUTEX_WAKE:
7809     case FUTEX_WAKE_BITSET:
7810     case FUTEX_TRYLOCK_PI:
7811     case FUTEX_UNLOCK_PI:
7812         timeout = 0;
7813         break;
7814     case FUTEX_FD:
7815         val = target_to_host_signal(val);
7816         timeout = 0;
7817         break;
7818     case FUTEX_CMP_REQUEUE:
7819     case FUTEX_CMP_REQUEUE_PI:
7820         val3 = tswap32(val3);
7821         /* fall through */
7822     case FUTEX_REQUEUE:
7823     case FUTEX_WAKE_OP:
7824         /*
7825          * For these, the 4th argument is not TIMEOUT, but VAL2.
7826          * But the prototype of do_safe_futex takes a pointer, so
7827          * insert casts to satisfy the compiler.  We do not need
7828          * to tswap VAL2 since it's not compared to guest memory.
7829           */
7830         pts = (struct timespec *)(uintptr_t)timeout;
7831         timeout = 0;
7832         haddr2 = g2h(cpu, uaddr2);
7833         break;
7834     default:
7835         return -TARGET_ENOSYS;
7836     }
7837     if (timeout) {
7838         pts = &ts;
7839         if (time64
7840             ? target_to_host_timespec64(pts, timeout)
7841             : target_to_host_timespec(pts, timeout)) {
7842             return -TARGET_EFAULT;
7843         }
7844     }
7845     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7846 }
7847 #endif
7848 
7849 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7850 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7851                                      abi_long handle, abi_long mount_id,
7852                                      abi_long flags)
7853 {
7854     struct file_handle *target_fh;
7855     struct file_handle *fh;
7856     int mid = 0;
7857     abi_long ret;
7858     char *name;
7859     unsigned int size, total_size;
7860 
7861     if (get_user_s32(size, handle)) {
7862         return -TARGET_EFAULT;
7863     }
7864 
7865     name = lock_user_string(pathname);
7866     if (!name) {
7867         return -TARGET_EFAULT;
7868     }
7869 
7870     total_size = sizeof(struct file_handle) + size;
7871     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7872     if (!target_fh) {
7873         unlock_user(name, pathname, 0);
7874         return -TARGET_EFAULT;
7875     }
7876 
7877     fh = g_malloc0(total_size);
7878     fh->handle_bytes = size;
7879 
7880     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7881     unlock_user(name, pathname, 0);
7882 
7883     /* man name_to_handle_at(2):
7884      * Other than the use of the handle_bytes field, the caller should treat
7885      * the file_handle structure as an opaque data type
7886      */
7887 
7888     memcpy(target_fh, fh, total_size);
7889     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7890     target_fh->handle_type = tswap32(fh->handle_type);
7891     g_free(fh);
7892     unlock_user(target_fh, handle, total_size);
7893 
7894     if (put_user_s32(mid, mount_id)) {
7895         return -TARGET_EFAULT;
7896     }
7897 
7898     return ret;
7899 
7900 }
7901 #endif
7902 
7903 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7904 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7905                                      abi_long flags)
7906 {
7907     struct file_handle *target_fh;
7908     struct file_handle *fh;
7909     unsigned int size, total_size;
7910     abi_long ret;
7911 
7912     if (get_user_s32(size, handle)) {
7913         return -TARGET_EFAULT;
7914     }
7915 
7916     total_size = sizeof(struct file_handle) + size;
7917     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7918     if (!target_fh) {
7919         return -TARGET_EFAULT;
7920     }
7921 
7922     fh = g_memdup(target_fh, total_size);
7923     fh->handle_bytes = size;
7924     fh->handle_type = tswap32(target_fh->handle_type);
7925 
7926     ret = get_errno(open_by_handle_at(mount_fd, fh,
7927                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7928 
7929     g_free(fh);
7930 
7931     unlock_user(target_fh, handle, total_size);
7932 
7933     return ret;
7934 }
7935 #endif
7936 
7937 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7938 
7939 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7940 {
7941     int host_flags;
7942     target_sigset_t *target_mask;
7943     sigset_t host_mask;
7944     abi_long ret;
7945 
7946     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7947         return -TARGET_EINVAL;
7948     }
7949     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7950         return -TARGET_EFAULT;
7951     }
7952 
7953     target_to_host_sigset(&host_mask, target_mask);
7954 
7955     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7956 
7957     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7958     if (ret >= 0) {
7959         fd_trans_register(ret, &target_signalfd_trans);
7960     }
7961 
7962     unlock_user_struct(target_mask, mask, 0);
7963 
7964     return ret;
7965 }
7966 #endif
7967 
7968 /* Map host to target signal numbers for the wait family of syscalls.
7969    Assume all other status bits are the same.  */
7970 int host_to_target_waitstatus(int status)
7971 {
7972     if (WIFSIGNALED(status)) {
7973         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7974     }
7975     if (WIFSTOPPED(status)) {
7976         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7977                | (status & 0xff);
7978     }
7979     return status;
7980 }
7981 
7982 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7983 {
7984     CPUState *cpu = env_cpu(cpu_env);
7985     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7986     int i;
7987 
7988     for (i = 0; i < bprm->argc; i++) {
7989         size_t len = strlen(bprm->argv[i]) + 1;
7990 
7991         if (write(fd, bprm->argv[i], len) != len) {
7992             return -1;
7993         }
7994     }
7995 
7996     return 0;
7997 }
7998 
7999 static int open_self_maps(CPUArchState *cpu_env, int fd)
8000 {
8001     CPUState *cpu = env_cpu(cpu_env);
8002     TaskState *ts = cpu->opaque;
8003     GSList *map_info = read_self_maps();
8004     GSList *s;
8005     int count;
8006 
8007     for (s = map_info; s; s = g_slist_next(s)) {
8008         MapInfo *e = (MapInfo *) s->data;
8009 
8010         if (h2g_valid(e->start)) {
8011             unsigned long min = e->start;
8012             unsigned long max = e->end;
8013             int flags = page_get_flags(h2g(min));
8014             const char *path;
8015 
8016             max = h2g_valid(max - 1) ?
8017                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8018 
8019             if (page_check_range(h2g(min), max - min, flags) == -1) {
8020                 continue;
8021             }
8022 
8023 #ifdef TARGET_HPPA
8024             if (h2g(max) == ts->info->stack_limit) {
8025 #else
8026             if (h2g(min) == ts->info->stack_limit) {
8027 #endif
8028                 path = "[stack]";
8029             } else {
8030                 path = e->path;
8031             }
8032 
8033             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8034                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8035                             h2g(min), h2g(max - 1) + 1,
8036                             (flags & PAGE_READ) ? 'r' : '-',
8037                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8038                             (flags & PAGE_EXEC) ? 'x' : '-',
8039                             e->is_priv ? 'p' : 's',
8040                             (uint64_t) e->offset, e->dev, e->inode);
8041             if (path) {
8042                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8043             } else {
8044                 dprintf(fd, "\n");
8045             }
8046         }
8047     }
8048 
8049     free_self_maps(map_info);
8050 
8051 #ifdef TARGET_VSYSCALL_PAGE
8052     /*
8053      * We only support execution from the vsyscall page.
8054      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8055      */
8056     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8057                     " --xp 00000000 00:00 0",
8058                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8059     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8060 #endif
8061 
8062     return 0;
8063 }
8064 
8065 static int open_self_stat(CPUArchState *cpu_env, int fd)
8066 {
8067     CPUState *cpu = env_cpu(cpu_env);
8068     TaskState *ts = cpu->opaque;
8069     g_autoptr(GString) buf = g_string_new(NULL);
8070     int i;
8071 
8072     for (i = 0; i < 44; i++) {
8073         if (i == 0) {
8074             /* pid */
8075             g_string_printf(buf, FMT_pid " ", getpid());
8076         } else if (i == 1) {
8077             /* app name */
8078             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8079             bin = bin ? bin + 1 : ts->bprm->argv[0];
8080             g_string_printf(buf, "(%.15s) ", bin);
8081         } else if (i == 3) {
8082             /* ppid */
8083             g_string_printf(buf, FMT_pid " ", getppid());
8084         } else if (i == 21) {
8085             /* starttime */
8086             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8087         } else if (i == 27) {
8088             /* stack bottom */
8089             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8090         } else {
8091             /* for the rest, there is MasterCard */
8092             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8093         }
8094 
8095         if (write(fd, buf->str, buf->len) != buf->len) {
8096             return -1;
8097         }
8098     }
8099 
8100     return 0;
8101 }
8102 
8103 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8104 {
8105     CPUState *cpu = env_cpu(cpu_env);
8106     TaskState *ts = cpu->opaque;
8107     abi_ulong auxv = ts->info->saved_auxv;
8108     abi_ulong len = ts->info->auxv_len;
8109     char *ptr;
8110 
8111     /*
8112      * Auxiliary vector is stored in target process stack.
8113      * read in whole auxv vector and copy it to file
8114      */
8115     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8116     if (ptr != NULL) {
8117         while (len > 0) {
8118             ssize_t r;
8119             r = write(fd, ptr, len);
8120             if (r <= 0) {
8121                 break;
8122             }
8123             len -= r;
8124             ptr += r;
8125         }
8126         lseek(fd, 0, SEEK_SET);
8127         unlock_user(ptr, auxv, len);
8128     }
8129 
8130     return 0;
8131 }
8132 
8133 static int is_proc_myself(const char *filename, const char *entry)
8134 {
8135     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8136         filename += strlen("/proc/");
8137         if (!strncmp(filename, "self/", strlen("self/"))) {
8138             filename += strlen("self/");
8139         } else if (*filename >= '1' && *filename <= '9') {
8140             char myself[80];
8141             snprintf(myself, sizeof(myself), "%d/", getpid());
8142             if (!strncmp(filename, myself, strlen(myself))) {
8143                 filename += strlen(myself);
8144             } else {
8145                 return 0;
8146             }
8147         } else {
8148             return 0;
8149         }
8150         if (!strcmp(filename, entry)) {
8151             return 1;
8152         }
8153     }
8154     return 0;
8155 }
8156 
8157 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8158                       const char *fmt, int code)
8159 {
8160     if (logfile) {
8161         CPUState *cs = env_cpu(env);
8162 
8163         fprintf(logfile, fmt, code);
8164         fprintf(logfile, "Failing executable: %s\n", exec_path);
8165         cpu_dump_state(cs, logfile, 0);
8166         open_self_maps(env, fileno(logfile));
8167     }
8168 }
8169 
8170 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8171 {
8172     /* dump to console */
8173     excp_dump_file(stderr, env, fmt, code);
8174 
8175     /* dump to log file */
8176     if (qemu_log_separate()) {
8177         FILE *logfile = qemu_log_trylock();
8178 
8179         excp_dump_file(logfile, env, fmt, code);
8180         qemu_log_unlock(logfile);
8181     }
8182 }
8183 
8184 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8185     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8186 static int is_proc(const char *filename, const char *entry)
8187 {
8188     return strcmp(filename, entry) == 0;
8189 }
8190 #endif
8191 
8192 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8193 static int open_net_route(CPUArchState *cpu_env, int fd)
8194 {
8195     FILE *fp;
8196     char *line = NULL;
8197     size_t len = 0;
8198     ssize_t read;
8199 
8200     fp = fopen("/proc/net/route", "r");
8201     if (fp == NULL) {
8202         return -1;
8203     }
8204 
8205     /* read header */
8206 
8207     read = getline(&line, &len, fp);
8208     dprintf(fd, "%s", line);
8209 
8210     /* read routes */
8211 
8212     while ((read = getline(&line, &len, fp)) != -1) {
8213         char iface[16];
8214         uint32_t dest, gw, mask;
8215         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8216         int fields;
8217 
8218         fields = sscanf(line,
8219                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8220                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8221                         &mask, &mtu, &window, &irtt);
8222         if (fields != 11) {
8223             continue;
8224         }
8225         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8226                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8227                 metric, tswap32(mask), mtu, window, irtt);
8228     }
8229 
8230     free(line);
8231     fclose(fp);
8232 
8233     return 0;
8234 }
8235 #endif
8236 
8237 #if defined(TARGET_SPARC)
8238 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8239 {
8240     dprintf(fd, "type\t\t: sun4u\n");
8241     return 0;
8242 }
8243 #endif
8244 
8245 #if defined(TARGET_HPPA)
8246 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8247 {
8248     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8249     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8250     dprintf(fd, "capabilities\t: os32\n");
8251     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8252     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8253     return 0;
8254 }
8255 #endif
8256 
8257 #if defined(TARGET_M68K)
8258 static int open_hardware(CPUArchState *cpu_env, int fd)
8259 {
8260     dprintf(fd, "Model:\t\tqemu-m68k\n");
8261     return 0;
8262 }
8263 #endif
8264 
8265 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8266 {
8267     struct fake_open {
8268         const char *filename;
8269         int (*fill)(CPUArchState *cpu_env, int fd);
8270         int (*cmp)(const char *s1, const char *s2);
8271     };
8272     const struct fake_open *fake_open;
8273     static const struct fake_open fakes[] = {
8274         { "maps", open_self_maps, is_proc_myself },
8275         { "stat", open_self_stat, is_proc_myself },
8276         { "auxv", open_self_auxv, is_proc_myself },
8277         { "cmdline", open_self_cmdline, is_proc_myself },
8278 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8279         { "/proc/net/route", open_net_route, is_proc },
8280 #endif
8281 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8282         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8283 #endif
8284 #if defined(TARGET_M68K)
8285         { "/proc/hardware", open_hardware, is_proc },
8286 #endif
8287         { NULL, NULL, NULL }
8288     };
8289 
8290     if (is_proc_myself(pathname, "exe")) {
8291         return safe_openat(dirfd, exec_path, flags, mode);
8292     }
8293 
8294     for (fake_open = fakes; fake_open->filename; fake_open++) {
8295         if (fake_open->cmp(pathname, fake_open->filename)) {
8296             break;
8297         }
8298     }
8299 
8300     if (fake_open->filename) {
8301         const char *tmpdir;
8302         char filename[PATH_MAX];
8303         int fd, r;
8304 
8305         fd = memfd_create("qemu-open", 0);
8306         if (fd < 0) {
8307             if (errno != ENOSYS) {
8308                 return fd;
8309             }
8310             /* create temporary file to map stat to */
8311             tmpdir = getenv("TMPDIR");
8312             if (!tmpdir)
8313                 tmpdir = "/tmp";
8314             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8315             fd = mkstemp(filename);
8316             if (fd < 0) {
8317                 return fd;
8318             }
8319             unlink(filename);
8320         }
8321 
8322         if ((r = fake_open->fill(cpu_env, fd))) {
8323             int e = errno;
8324             close(fd);
8325             errno = e;
8326             return r;
8327         }
8328         lseek(fd, 0, SEEK_SET);
8329 
8330         return fd;
8331     }
8332 
8333     return safe_openat(dirfd, path(pathname), flags, mode);
8334 }
8335 
8336 static int do_execveat(CPUArchState *cpu_env, int dirfd,
8337                        abi_long pathname, abi_long guest_argp,
8338                        abi_long guest_envp, int flags)
8339 {
8340     int ret;
8341     char **argp, **envp;
8342     int argc, envc;
8343     abi_ulong gp;
8344     abi_ulong addr;
8345     char **q;
8346     void *p;
8347 
8348     argc = 0;
8349 
8350     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8351         if (get_user_ual(addr, gp)) {
8352             return -TARGET_EFAULT;
8353         }
8354         if (!addr) {
8355             break;
8356         }
8357         argc++;
8358     }
8359     envc = 0;
8360     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8361         if (get_user_ual(addr, gp)) {
8362             return -TARGET_EFAULT;
8363         }
8364         if (!addr) {
8365             break;
8366         }
8367         envc++;
8368     }
8369 
8370     argp = g_new0(char *, argc + 1);
8371     envp = g_new0(char *, envc + 1);
8372 
8373     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8374         if (get_user_ual(addr, gp)) {
8375             goto execve_efault;
8376         }
8377         if (!addr) {
8378             break;
8379         }
8380         *q = lock_user_string(addr);
8381         if (!*q) {
8382             goto execve_efault;
8383         }
8384     }
8385     *q = NULL;
8386 
8387     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8388         if (get_user_ual(addr, gp)) {
8389             goto execve_efault;
8390         }
8391         if (!addr) {
8392             break;
8393         }
8394         *q = lock_user_string(addr);
8395         if (!*q) {
8396             goto execve_efault;
8397         }
8398     }
8399     *q = NULL;
8400 
8401     /*
8402      * Although execve() is not an interruptible syscall it is
8403      * a special case where we must use the safe_syscall wrapper:
8404      * if we allow a signal to happen before we make the host
8405      * syscall then we will 'lose' it, because at the point of
8406      * execve the process leaves QEMU's control. So we use the
8407      * safe syscall wrapper to ensure that we either take the
8408      * signal as a guest signal, or else it does not happen
8409      * before the execve completes and makes it the other
8410      * program's problem.
8411      */
8412     p = lock_user_string(pathname);
8413     if (!p) {
8414         goto execve_efault;
8415     }
8416 
8417     if (is_proc_myself(p, "exe")) {
8418         ret = get_errno(safe_execveat(dirfd, exec_path, argp, envp, flags));
8419     } else {
8420         ret = get_errno(safe_execveat(dirfd, p, argp, envp, flags));
8421     }
8422 
8423     unlock_user(p, pathname, 0);
8424 
8425     goto execve_end;
8426 
8427 execve_efault:
8428     ret = -TARGET_EFAULT;
8429 
8430 execve_end:
8431     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8432         if (get_user_ual(addr, gp) || !addr) {
8433             break;
8434         }
8435         unlock_user(*q, addr, 0);
8436     }
8437     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8438         if (get_user_ual(addr, gp) || !addr) {
8439             break;
8440         }
8441         unlock_user(*q, addr, 0);
8442     }
8443 
8444     g_free(argp);
8445     g_free(envp);
8446     return ret;
8447 }
8448 
8449 #define TIMER_MAGIC 0x0caf0000
8450 #define TIMER_MAGIC_MASK 0xffff0000
8451 
8452 /* Convert QEMU provided timer ID back to internal 16bit index format */
8453 static target_timer_t get_timer_id(abi_long arg)
8454 {
8455     target_timer_t timerid = arg;
8456 
8457     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8458         return -TARGET_EINVAL;
8459     }
8460 
8461     timerid &= 0xffff;
8462 
8463     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8464         return -TARGET_EINVAL;
8465     }
8466 
8467     return timerid;
8468 }
8469 
8470 static int target_to_host_cpu_mask(unsigned long *host_mask,
8471                                    size_t host_size,
8472                                    abi_ulong target_addr,
8473                                    size_t target_size)
8474 {
8475     unsigned target_bits = sizeof(abi_ulong) * 8;
8476     unsigned host_bits = sizeof(*host_mask) * 8;
8477     abi_ulong *target_mask;
8478     unsigned i, j;
8479 
8480     assert(host_size >= target_size);
8481 
8482     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8483     if (!target_mask) {
8484         return -TARGET_EFAULT;
8485     }
8486     memset(host_mask, 0, host_size);
8487 
8488     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8489         unsigned bit = i * target_bits;
8490         abi_ulong val;
8491 
8492         __get_user(val, &target_mask[i]);
8493         for (j = 0; j < target_bits; j++, bit++) {
8494             if (val & (1UL << j)) {
8495                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8496             }
8497         }
8498     }
8499 
8500     unlock_user(target_mask, target_addr, 0);
8501     return 0;
8502 }
8503 
8504 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8505                                    size_t host_size,
8506                                    abi_ulong target_addr,
8507                                    size_t target_size)
8508 {
8509     unsigned target_bits = sizeof(abi_ulong) * 8;
8510     unsigned host_bits = sizeof(*host_mask) * 8;
8511     abi_ulong *target_mask;
8512     unsigned i, j;
8513 
8514     assert(host_size >= target_size);
8515 
8516     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8517     if (!target_mask) {
8518         return -TARGET_EFAULT;
8519     }
8520 
8521     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8522         unsigned bit = i * target_bits;
8523         abi_ulong val = 0;
8524 
8525         for (j = 0; j < target_bits; j++, bit++) {
8526             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8527                 val |= 1UL << j;
8528             }
8529         }
8530         __put_user(val, &target_mask[i]);
8531     }
8532 
8533     unlock_user(target_mask, target_addr, target_size);
8534     return 0;
8535 }
8536 
8537 #ifdef TARGET_NR_getdents
8538 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8539 {
8540     g_autofree void *hdirp = NULL;
8541     void *tdirp;
8542     int hlen, hoff, toff;
8543     int hreclen, treclen;
8544     off64_t prev_diroff = 0;
8545 
8546     hdirp = g_try_malloc(count);
8547     if (!hdirp) {
8548         return -TARGET_ENOMEM;
8549     }
8550 
8551 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8552     hlen = sys_getdents(dirfd, hdirp, count);
8553 #else
8554     hlen = sys_getdents64(dirfd, hdirp, count);
8555 #endif
8556 
8557     hlen = get_errno(hlen);
8558     if (is_error(hlen)) {
8559         return hlen;
8560     }
8561 
8562     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8563     if (!tdirp) {
8564         return -TARGET_EFAULT;
8565     }
8566 
8567     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8568 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8569         struct linux_dirent *hde = hdirp + hoff;
8570 #else
8571         struct linux_dirent64 *hde = hdirp + hoff;
8572 #endif
8573         struct target_dirent *tde = tdirp + toff;
8574         int namelen;
8575         uint8_t type;
8576 
8577         namelen = strlen(hde->d_name);
8578         hreclen = hde->d_reclen;
8579         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8580         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8581 
8582         if (toff + treclen > count) {
8583             /*
8584              * If the host struct is smaller than the target struct, or
8585              * requires less alignment and thus packs into less space,
8586              * then the host can return more entries than we can pass
8587              * on to the guest.
8588              */
8589             if (toff == 0) {
8590                 toff = -TARGET_EINVAL; /* result buffer is too small */
8591                 break;
8592             }
8593             /*
8594              * Return what we have, resetting the file pointer to the
8595              * location of the first record not returned.
8596              */
8597             lseek64(dirfd, prev_diroff, SEEK_SET);
8598             break;
8599         }
8600 
8601         prev_diroff = hde->d_off;
8602         tde->d_ino = tswapal(hde->d_ino);
8603         tde->d_off = tswapal(hde->d_off);
8604         tde->d_reclen = tswap16(treclen);
8605         memcpy(tde->d_name, hde->d_name, namelen + 1);
8606 
8607         /*
8608          * The getdents type is in what was formerly a padding byte at the
8609          * end of the structure.
8610          */
8611 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8612         type = *((uint8_t *)hde + hreclen - 1);
8613 #else
8614         type = hde->d_type;
8615 #endif
8616         *((uint8_t *)tde + treclen - 1) = type;
8617     }
8618 
8619     unlock_user(tdirp, arg2, toff);
8620     return toff;
8621 }
8622 #endif /* TARGET_NR_getdents */
8623 
8624 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8625 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8626 {
8627     g_autofree void *hdirp = NULL;
8628     void *tdirp;
8629     int hlen, hoff, toff;
8630     int hreclen, treclen;
8631     off64_t prev_diroff = 0;
8632 
8633     hdirp = g_try_malloc(count);
8634     if (!hdirp) {
8635         return -TARGET_ENOMEM;
8636     }
8637 
8638     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8639     if (is_error(hlen)) {
8640         return hlen;
8641     }
8642 
8643     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8644     if (!tdirp) {
8645         return -TARGET_EFAULT;
8646     }
8647 
8648     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8649         struct linux_dirent64 *hde = hdirp + hoff;
8650         struct target_dirent64 *tde = tdirp + toff;
8651         int namelen;
8652 
8653         namelen = strlen(hde->d_name) + 1;
8654         hreclen = hde->d_reclen;
8655         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8656         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8657 
8658         if (toff + treclen > count) {
8659             /*
8660              * If the host struct is smaller than the target struct, or
8661              * requires less alignment and thus packs into less space,
8662              * then the host can return more entries than we can pass
8663              * on to the guest.
8664              */
8665             if (toff == 0) {
8666                 toff = -TARGET_EINVAL; /* result buffer is too small */
8667                 break;
8668             }
8669             /*
8670              * Return what we have, resetting the file pointer to the
8671              * location of the first record not returned.
8672              */
8673             lseek64(dirfd, prev_diroff, SEEK_SET);
8674             break;
8675         }
8676 
8677         prev_diroff = hde->d_off;
8678         tde->d_ino = tswap64(hde->d_ino);
8679         tde->d_off = tswap64(hde->d_off);
8680         tde->d_reclen = tswap16(treclen);
8681         tde->d_type = hde->d_type;
8682         memcpy(tde->d_name, hde->d_name, namelen);
8683     }
8684 
8685     unlock_user(tdirp, arg2, toff);
8686     return toff;
8687 }
8688 #endif /* TARGET_NR_getdents64 */
8689 
8690 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8691 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8692 #endif
8693 
8694 /* This is an internal helper for do_syscall so that it is easier
8695  * to have a single return point, so that actions, such as logging
8696  * of syscall results, can be performed.
8697  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8698  */
8699 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8700                             abi_long arg2, abi_long arg3, abi_long arg4,
8701                             abi_long arg5, abi_long arg6, abi_long arg7,
8702                             abi_long arg8)
8703 {
8704     CPUState *cpu = env_cpu(cpu_env);
8705     abi_long ret;
8706 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8707     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8708     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8709     || defined(TARGET_NR_statx)
8710     struct stat st;
8711 #endif
8712 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8713     || defined(TARGET_NR_fstatfs)
8714     struct statfs stfs;
8715 #endif
8716     void *p;
8717 
8718     switch(num) {
8719     case TARGET_NR_exit:
8720         /* In old applications this may be used to implement _exit(2).
8721            However in threaded applications it is used for thread termination,
8722            and _exit_group is used for application termination.
8723            Do thread termination if we have more then one thread.  */
8724 
8725         if (block_signals()) {
8726             return -QEMU_ERESTARTSYS;
8727         }
8728 
8729         pthread_mutex_lock(&clone_lock);
8730 
8731         if (CPU_NEXT(first_cpu)) {
8732             TaskState *ts = cpu->opaque;
8733 
8734             if (ts->child_tidptr) {
8735                 put_user_u32(0, ts->child_tidptr);
8736                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8737                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8738             }
8739 
8740             object_unparent(OBJECT(cpu));
8741             object_unref(OBJECT(cpu));
8742             /*
8743              * At this point the CPU should be unrealized and removed
8744              * from cpu lists. We can clean-up the rest of the thread
8745              * data without the lock held.
8746              */
8747 
8748             pthread_mutex_unlock(&clone_lock);
8749 
8750             thread_cpu = NULL;
8751             g_free(ts);
8752             rcu_unregister_thread();
8753             pthread_exit(NULL);
8754         }
8755 
8756         pthread_mutex_unlock(&clone_lock);
8757         preexit_cleanup(cpu_env, arg1);
8758         _exit(arg1);
8759         return 0; /* avoid warning */
8760     case TARGET_NR_read:
8761         if (arg2 == 0 && arg3 == 0) {
8762             return get_errno(safe_read(arg1, 0, 0));
8763         } else {
8764             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8765                 return -TARGET_EFAULT;
8766             ret = get_errno(safe_read(arg1, p, arg3));
8767             if (ret >= 0 &&
8768                 fd_trans_host_to_target_data(arg1)) {
8769                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8770             }
8771             unlock_user(p, arg2, ret);
8772         }
8773         return ret;
8774     case TARGET_NR_write:
8775         if (arg2 == 0 && arg3 == 0) {
8776             return get_errno(safe_write(arg1, 0, 0));
8777         }
8778         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8779             return -TARGET_EFAULT;
8780         if (fd_trans_target_to_host_data(arg1)) {
8781             void *copy = g_malloc(arg3);
8782             memcpy(copy, p, arg3);
8783             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8784             if (ret >= 0) {
8785                 ret = get_errno(safe_write(arg1, copy, ret));
8786             }
8787             g_free(copy);
8788         } else {
8789             ret = get_errno(safe_write(arg1, p, arg3));
8790         }
8791         unlock_user(p, arg2, 0);
8792         return ret;
8793 
8794 #ifdef TARGET_NR_open
8795     case TARGET_NR_open:
8796         if (!(p = lock_user_string(arg1)))
8797             return -TARGET_EFAULT;
8798         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8799                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8800                                   arg3));
8801         fd_trans_unregister(ret);
8802         unlock_user(p, arg1, 0);
8803         return ret;
8804 #endif
8805     case TARGET_NR_openat:
8806         if (!(p = lock_user_string(arg2)))
8807             return -TARGET_EFAULT;
8808         ret = get_errno(do_openat(cpu_env, arg1, p,
8809                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8810                                   arg4));
8811         fd_trans_unregister(ret);
8812         unlock_user(p, arg2, 0);
8813         return ret;
8814 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8815     case TARGET_NR_name_to_handle_at:
8816         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8817         return ret;
8818 #endif
8819 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8820     case TARGET_NR_open_by_handle_at:
8821         ret = do_open_by_handle_at(arg1, arg2, arg3);
8822         fd_trans_unregister(ret);
8823         return ret;
8824 #endif
8825 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8826     case TARGET_NR_pidfd_open:
8827         return get_errno(pidfd_open(arg1, arg2));
8828 #endif
8829 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8830     case TARGET_NR_pidfd_send_signal:
8831         {
8832             siginfo_t uinfo, *puinfo;
8833 
8834             if (arg3) {
8835                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8836                 if (!p) {
8837                     return -TARGET_EFAULT;
8838                  }
8839                  target_to_host_siginfo(&uinfo, p);
8840                  unlock_user(p, arg3, 0);
8841                  puinfo = &uinfo;
8842             } else {
8843                  puinfo = NULL;
8844             }
8845             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
8846                                               puinfo, arg4));
8847         }
8848         return ret;
8849 #endif
8850 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8851     case TARGET_NR_pidfd_getfd:
8852         return get_errno(pidfd_getfd(arg1, arg2, arg3));
8853 #endif
8854     case TARGET_NR_close:
8855         fd_trans_unregister(arg1);
8856         return get_errno(close(arg1));
8857 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
8858     case TARGET_NR_close_range:
8859         ret = get_errno(sys_close_range(arg1, arg2, arg3));
8860         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
8861             abi_long fd, maxfd;
8862             maxfd = MIN(arg2, target_fd_max);
8863             for (fd = arg1; fd < maxfd; fd++) {
8864                 fd_trans_unregister(fd);
8865             }
8866         }
8867         return ret;
8868 #endif
8869 
8870     case TARGET_NR_brk:
8871         return do_brk(arg1);
8872 #ifdef TARGET_NR_fork
8873     case TARGET_NR_fork:
8874         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8875 #endif
8876 #ifdef TARGET_NR_waitpid
8877     case TARGET_NR_waitpid:
8878         {
8879             int status;
8880             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8881             if (!is_error(ret) && arg2 && ret
8882                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8883                 return -TARGET_EFAULT;
8884         }
8885         return ret;
8886 #endif
8887 #ifdef TARGET_NR_waitid
8888     case TARGET_NR_waitid:
8889         {
8890             siginfo_t info;
8891             info.si_pid = 0;
8892             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8893             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8894                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8895                     return -TARGET_EFAULT;
8896                 host_to_target_siginfo(p, &info);
8897                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8898             }
8899         }
8900         return ret;
8901 #endif
8902 #ifdef TARGET_NR_creat /* not on alpha */
8903     case TARGET_NR_creat:
8904         if (!(p = lock_user_string(arg1)))
8905             return -TARGET_EFAULT;
8906         ret = get_errno(creat(p, arg2));
8907         fd_trans_unregister(ret);
8908         unlock_user(p, arg1, 0);
8909         return ret;
8910 #endif
8911 #ifdef TARGET_NR_link
8912     case TARGET_NR_link:
8913         {
8914             void * p2;
8915             p = lock_user_string(arg1);
8916             p2 = lock_user_string(arg2);
8917             if (!p || !p2)
8918                 ret = -TARGET_EFAULT;
8919             else
8920                 ret = get_errno(link(p, p2));
8921             unlock_user(p2, arg2, 0);
8922             unlock_user(p, arg1, 0);
8923         }
8924         return ret;
8925 #endif
8926 #if defined(TARGET_NR_linkat)
8927     case TARGET_NR_linkat:
8928         {
8929             void * p2 = NULL;
8930             if (!arg2 || !arg4)
8931                 return -TARGET_EFAULT;
8932             p  = lock_user_string(arg2);
8933             p2 = lock_user_string(arg4);
8934             if (!p || !p2)
8935                 ret = -TARGET_EFAULT;
8936             else
8937                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8938             unlock_user(p, arg2, 0);
8939             unlock_user(p2, arg4, 0);
8940         }
8941         return ret;
8942 #endif
8943 #ifdef TARGET_NR_unlink
8944     case TARGET_NR_unlink:
8945         if (!(p = lock_user_string(arg1)))
8946             return -TARGET_EFAULT;
8947         ret = get_errno(unlink(p));
8948         unlock_user(p, arg1, 0);
8949         return ret;
8950 #endif
8951 #if defined(TARGET_NR_unlinkat)
8952     case TARGET_NR_unlinkat:
8953         if (!(p = lock_user_string(arg2)))
8954             return -TARGET_EFAULT;
8955         ret = get_errno(unlinkat(arg1, p, arg3));
8956         unlock_user(p, arg2, 0);
8957         return ret;
8958 #endif
8959     case TARGET_NR_execveat:
8960         return do_execveat(cpu_env, arg1, arg2, arg3, arg4, arg5);
8961     case TARGET_NR_execve:
8962         return do_execveat(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0);
8963     case TARGET_NR_chdir:
8964         if (!(p = lock_user_string(arg1)))
8965             return -TARGET_EFAULT;
8966         ret = get_errno(chdir(p));
8967         unlock_user(p, arg1, 0);
8968         return ret;
8969 #ifdef TARGET_NR_time
8970     case TARGET_NR_time:
8971         {
8972             time_t host_time;
8973             ret = get_errno(time(&host_time));
8974             if (!is_error(ret)
8975                 && arg1
8976                 && put_user_sal(host_time, arg1))
8977                 return -TARGET_EFAULT;
8978         }
8979         return ret;
8980 #endif
8981 #ifdef TARGET_NR_mknod
8982     case TARGET_NR_mknod:
8983         if (!(p = lock_user_string(arg1)))
8984             return -TARGET_EFAULT;
8985         ret = get_errno(mknod(p, arg2, arg3));
8986         unlock_user(p, arg1, 0);
8987         return ret;
8988 #endif
8989 #if defined(TARGET_NR_mknodat)
8990     case TARGET_NR_mknodat:
8991         if (!(p = lock_user_string(arg2)))
8992             return -TARGET_EFAULT;
8993         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8994         unlock_user(p, arg2, 0);
8995         return ret;
8996 #endif
8997 #ifdef TARGET_NR_chmod
8998     case TARGET_NR_chmod:
8999         if (!(p = lock_user_string(arg1)))
9000             return -TARGET_EFAULT;
9001         ret = get_errno(chmod(p, arg2));
9002         unlock_user(p, arg1, 0);
9003         return ret;
9004 #endif
9005 #ifdef TARGET_NR_lseek
9006     case TARGET_NR_lseek:
9007         return get_errno(lseek(arg1, arg2, arg3));
9008 #endif
9009 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9010     /* Alpha specific */
9011     case TARGET_NR_getxpid:
9012         cpu_env->ir[IR_A4] = getppid();
9013         return get_errno(getpid());
9014 #endif
9015 #ifdef TARGET_NR_getpid
9016     case TARGET_NR_getpid:
9017         return get_errno(getpid());
9018 #endif
9019     case TARGET_NR_mount:
9020         {
9021             /* need to look at the data field */
9022             void *p2, *p3;
9023 
9024             if (arg1) {
9025                 p = lock_user_string(arg1);
9026                 if (!p) {
9027                     return -TARGET_EFAULT;
9028                 }
9029             } else {
9030                 p = NULL;
9031             }
9032 
9033             p2 = lock_user_string(arg2);
9034             if (!p2) {
9035                 if (arg1) {
9036                     unlock_user(p, arg1, 0);
9037                 }
9038                 return -TARGET_EFAULT;
9039             }
9040 
9041             if (arg3) {
9042                 p3 = lock_user_string(arg3);
9043                 if (!p3) {
9044                     if (arg1) {
9045                         unlock_user(p, arg1, 0);
9046                     }
9047                     unlock_user(p2, arg2, 0);
9048                     return -TARGET_EFAULT;
9049                 }
9050             } else {
9051                 p3 = NULL;
9052             }
9053 
9054             /* FIXME - arg5 should be locked, but it isn't clear how to
9055              * do that since it's not guaranteed to be a NULL-terminated
9056              * string.
9057              */
9058             if (!arg5) {
9059                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9060             } else {
9061                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9062             }
9063             ret = get_errno(ret);
9064 
9065             if (arg1) {
9066                 unlock_user(p, arg1, 0);
9067             }
9068             unlock_user(p2, arg2, 0);
9069             if (arg3) {
9070                 unlock_user(p3, arg3, 0);
9071             }
9072         }
9073         return ret;
9074 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9075 #if defined(TARGET_NR_umount)
9076     case TARGET_NR_umount:
9077 #endif
9078 #if defined(TARGET_NR_oldumount)
9079     case TARGET_NR_oldumount:
9080 #endif
9081         if (!(p = lock_user_string(arg1)))
9082             return -TARGET_EFAULT;
9083         ret = get_errno(umount(p));
9084         unlock_user(p, arg1, 0);
9085         return ret;
9086 #endif
9087 #ifdef TARGET_NR_stime /* not on alpha */
9088     case TARGET_NR_stime:
9089         {
9090             struct timespec ts;
9091             ts.tv_nsec = 0;
9092             if (get_user_sal(ts.tv_sec, arg1)) {
9093                 return -TARGET_EFAULT;
9094             }
9095             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9096         }
9097 #endif
9098 #ifdef TARGET_NR_alarm /* not on alpha */
9099     case TARGET_NR_alarm:
9100         return alarm(arg1);
9101 #endif
9102 #ifdef TARGET_NR_pause /* not on alpha */
9103     case TARGET_NR_pause:
9104         if (!block_signals()) {
9105             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9106         }
9107         return -TARGET_EINTR;
9108 #endif
9109 #ifdef TARGET_NR_utime
9110     case TARGET_NR_utime:
9111         {
9112             struct utimbuf tbuf, *host_tbuf;
9113             struct target_utimbuf *target_tbuf;
9114             if (arg2) {
9115                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9116                     return -TARGET_EFAULT;
9117                 tbuf.actime = tswapal(target_tbuf->actime);
9118                 tbuf.modtime = tswapal(target_tbuf->modtime);
9119                 unlock_user_struct(target_tbuf, arg2, 0);
9120                 host_tbuf = &tbuf;
9121             } else {
9122                 host_tbuf = NULL;
9123             }
9124             if (!(p = lock_user_string(arg1)))
9125                 return -TARGET_EFAULT;
9126             ret = get_errno(utime(p, host_tbuf));
9127             unlock_user(p, arg1, 0);
9128         }
9129         return ret;
9130 #endif
9131 #ifdef TARGET_NR_utimes
9132     case TARGET_NR_utimes:
9133         {
9134             struct timeval *tvp, tv[2];
9135             if (arg2) {
9136                 if (copy_from_user_timeval(&tv[0], arg2)
9137                     || copy_from_user_timeval(&tv[1],
9138                                               arg2 + sizeof(struct target_timeval)))
9139                     return -TARGET_EFAULT;
9140                 tvp = tv;
9141             } else {
9142                 tvp = NULL;
9143             }
9144             if (!(p = lock_user_string(arg1)))
9145                 return -TARGET_EFAULT;
9146             ret = get_errno(utimes(p, tvp));
9147             unlock_user(p, arg1, 0);
9148         }
9149         return ret;
9150 #endif
9151 #if defined(TARGET_NR_futimesat)
9152     case TARGET_NR_futimesat:
9153         {
9154             struct timeval *tvp, tv[2];
9155             if (arg3) {
9156                 if (copy_from_user_timeval(&tv[0], arg3)
9157                     || copy_from_user_timeval(&tv[1],
9158                                               arg3 + sizeof(struct target_timeval)))
9159                     return -TARGET_EFAULT;
9160                 tvp = tv;
9161             } else {
9162                 tvp = NULL;
9163             }
9164             if (!(p = lock_user_string(arg2))) {
9165                 return -TARGET_EFAULT;
9166             }
9167             ret = get_errno(futimesat(arg1, path(p), tvp));
9168             unlock_user(p, arg2, 0);
9169         }
9170         return ret;
9171 #endif
9172 #ifdef TARGET_NR_access
9173     case TARGET_NR_access:
9174         if (!(p = lock_user_string(arg1))) {
9175             return -TARGET_EFAULT;
9176         }
9177         ret = get_errno(access(path(p), arg2));
9178         unlock_user(p, arg1, 0);
9179         return ret;
9180 #endif
9181 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9182     case TARGET_NR_faccessat:
9183         if (!(p = lock_user_string(arg2))) {
9184             return -TARGET_EFAULT;
9185         }
9186         ret = get_errno(faccessat(arg1, p, arg3, 0));
9187         unlock_user(p, arg2, 0);
9188         return ret;
9189 #endif
9190 #if defined(TARGET_NR_faccessat2)
9191     case TARGET_NR_faccessat2:
9192         if (!(p = lock_user_string(arg2))) {
9193             return -TARGET_EFAULT;
9194         }
9195         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9196         unlock_user(p, arg2, 0);
9197         return ret;
9198 #endif
9199 #ifdef TARGET_NR_nice /* not on alpha */
9200     case TARGET_NR_nice:
9201         return get_errno(nice(arg1));
9202 #endif
9203     case TARGET_NR_sync:
9204         sync();
9205         return 0;
9206 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9207     case TARGET_NR_syncfs:
9208         return get_errno(syncfs(arg1));
9209 #endif
9210     case TARGET_NR_kill:
9211         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9212 #ifdef TARGET_NR_rename
9213     case TARGET_NR_rename:
9214         {
9215             void *p2;
9216             p = lock_user_string(arg1);
9217             p2 = lock_user_string(arg2);
9218             if (!p || !p2)
9219                 ret = -TARGET_EFAULT;
9220             else
9221                 ret = get_errno(rename(p, p2));
9222             unlock_user(p2, arg2, 0);
9223             unlock_user(p, arg1, 0);
9224         }
9225         return ret;
9226 #endif
9227 #if defined(TARGET_NR_renameat)
9228     case TARGET_NR_renameat:
9229         {
9230             void *p2;
9231             p  = lock_user_string(arg2);
9232             p2 = lock_user_string(arg4);
9233             if (!p || !p2)
9234                 ret = -TARGET_EFAULT;
9235             else
9236                 ret = get_errno(renameat(arg1, p, arg3, p2));
9237             unlock_user(p2, arg4, 0);
9238             unlock_user(p, arg2, 0);
9239         }
9240         return ret;
9241 #endif
9242 #if defined(TARGET_NR_renameat2)
9243     case TARGET_NR_renameat2:
9244         {
9245             void *p2;
9246             p  = lock_user_string(arg2);
9247             p2 = lock_user_string(arg4);
9248             if (!p || !p2) {
9249                 ret = -TARGET_EFAULT;
9250             } else {
9251                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9252             }
9253             unlock_user(p2, arg4, 0);
9254             unlock_user(p, arg2, 0);
9255         }
9256         return ret;
9257 #endif
9258 #ifdef TARGET_NR_mkdir
9259     case TARGET_NR_mkdir:
9260         if (!(p = lock_user_string(arg1)))
9261             return -TARGET_EFAULT;
9262         ret = get_errno(mkdir(p, arg2));
9263         unlock_user(p, arg1, 0);
9264         return ret;
9265 #endif
9266 #if defined(TARGET_NR_mkdirat)
9267     case TARGET_NR_mkdirat:
9268         if (!(p = lock_user_string(arg2)))
9269             return -TARGET_EFAULT;
9270         ret = get_errno(mkdirat(arg1, p, arg3));
9271         unlock_user(p, arg2, 0);
9272         return ret;
9273 #endif
9274 #ifdef TARGET_NR_rmdir
9275     case TARGET_NR_rmdir:
9276         if (!(p = lock_user_string(arg1)))
9277             return -TARGET_EFAULT;
9278         ret = get_errno(rmdir(p));
9279         unlock_user(p, arg1, 0);
9280         return ret;
9281 #endif
9282     case TARGET_NR_dup:
9283         ret = get_errno(dup(arg1));
9284         if (ret >= 0) {
9285             fd_trans_dup(arg1, ret);
9286         }
9287         return ret;
9288 #ifdef TARGET_NR_pipe
9289     case TARGET_NR_pipe:
9290         return do_pipe(cpu_env, arg1, 0, 0);
9291 #endif
9292 #ifdef TARGET_NR_pipe2
9293     case TARGET_NR_pipe2:
9294         return do_pipe(cpu_env, arg1,
9295                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9296 #endif
9297     case TARGET_NR_times:
9298         {
9299             struct target_tms *tmsp;
9300             struct tms tms;
9301             ret = get_errno(times(&tms));
9302             if (arg1) {
9303                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9304                 if (!tmsp)
9305                     return -TARGET_EFAULT;
9306                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9307                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9308                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9309                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9310             }
9311             if (!is_error(ret))
9312                 ret = host_to_target_clock_t(ret);
9313         }
9314         return ret;
9315     case TARGET_NR_acct:
9316         if (arg1 == 0) {
9317             ret = get_errno(acct(NULL));
9318         } else {
9319             if (!(p = lock_user_string(arg1))) {
9320                 return -TARGET_EFAULT;
9321             }
9322             ret = get_errno(acct(path(p)));
9323             unlock_user(p, arg1, 0);
9324         }
9325         return ret;
9326 #ifdef TARGET_NR_umount2
9327     case TARGET_NR_umount2:
9328         if (!(p = lock_user_string(arg1)))
9329             return -TARGET_EFAULT;
9330         ret = get_errno(umount2(p, arg2));
9331         unlock_user(p, arg1, 0);
9332         return ret;
9333 #endif
9334     case TARGET_NR_ioctl:
9335         return do_ioctl(arg1, arg2, arg3);
9336 #ifdef TARGET_NR_fcntl
9337     case TARGET_NR_fcntl:
9338         return do_fcntl(arg1, arg2, arg3);
9339 #endif
9340     case TARGET_NR_setpgid:
9341         return get_errno(setpgid(arg1, arg2));
9342     case TARGET_NR_umask:
9343         return get_errno(umask(arg1));
9344     case TARGET_NR_chroot:
9345         if (!(p = lock_user_string(arg1)))
9346             return -TARGET_EFAULT;
9347         ret = get_errno(chroot(p));
9348         unlock_user(p, arg1, 0);
9349         return ret;
9350 #ifdef TARGET_NR_dup2
9351     case TARGET_NR_dup2:
9352         ret = get_errno(dup2(arg1, arg2));
9353         if (ret >= 0) {
9354             fd_trans_dup(arg1, arg2);
9355         }
9356         return ret;
9357 #endif
9358 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9359     case TARGET_NR_dup3:
9360     {
9361         int host_flags;
9362 
9363         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9364             return -EINVAL;
9365         }
9366         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9367         ret = get_errno(dup3(arg1, arg2, host_flags));
9368         if (ret >= 0) {
9369             fd_trans_dup(arg1, arg2);
9370         }
9371         return ret;
9372     }
9373 #endif
9374 #ifdef TARGET_NR_getppid /* not on alpha */
9375     case TARGET_NR_getppid:
9376         return get_errno(getppid());
9377 #endif
9378 #ifdef TARGET_NR_getpgrp
9379     case TARGET_NR_getpgrp:
9380         return get_errno(getpgrp());
9381 #endif
9382     case TARGET_NR_setsid:
9383         return get_errno(setsid());
9384 #ifdef TARGET_NR_sigaction
9385     case TARGET_NR_sigaction:
9386         {
9387 #if defined(TARGET_MIPS)
9388 	    struct target_sigaction act, oact, *pact, *old_act;
9389 
9390 	    if (arg2) {
9391                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9392                     return -TARGET_EFAULT;
9393 		act._sa_handler = old_act->_sa_handler;
9394 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9395 		act.sa_flags = old_act->sa_flags;
9396 		unlock_user_struct(old_act, arg2, 0);
9397 		pact = &act;
9398 	    } else {
9399 		pact = NULL;
9400 	    }
9401 
9402         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9403 
9404 	    if (!is_error(ret) && arg3) {
9405                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9406                     return -TARGET_EFAULT;
9407 		old_act->_sa_handler = oact._sa_handler;
9408 		old_act->sa_flags = oact.sa_flags;
9409 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9410 		old_act->sa_mask.sig[1] = 0;
9411 		old_act->sa_mask.sig[2] = 0;
9412 		old_act->sa_mask.sig[3] = 0;
9413 		unlock_user_struct(old_act, arg3, 1);
9414 	    }
9415 #else
9416             struct target_old_sigaction *old_act;
9417             struct target_sigaction act, oact, *pact;
9418             if (arg2) {
9419                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9420                     return -TARGET_EFAULT;
9421                 act._sa_handler = old_act->_sa_handler;
9422                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9423                 act.sa_flags = old_act->sa_flags;
9424 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9425                 act.sa_restorer = old_act->sa_restorer;
9426 #endif
9427                 unlock_user_struct(old_act, arg2, 0);
9428                 pact = &act;
9429             } else {
9430                 pact = NULL;
9431             }
9432             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9433             if (!is_error(ret) && arg3) {
9434                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9435                     return -TARGET_EFAULT;
9436                 old_act->_sa_handler = oact._sa_handler;
9437                 old_act->sa_mask = oact.sa_mask.sig[0];
9438                 old_act->sa_flags = oact.sa_flags;
9439 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9440                 old_act->sa_restorer = oact.sa_restorer;
9441 #endif
9442                 unlock_user_struct(old_act, arg3, 1);
9443             }
9444 #endif
9445         }
9446         return ret;
9447 #endif
9448     case TARGET_NR_rt_sigaction:
9449         {
9450             /*
9451              * For Alpha and SPARC this is a 5 argument syscall, with
9452              * a 'restorer' parameter which must be copied into the
9453              * sa_restorer field of the sigaction struct.
9454              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9455              * and arg5 is the sigsetsize.
9456              */
9457 #if defined(TARGET_ALPHA)
9458             target_ulong sigsetsize = arg4;
9459             target_ulong restorer = arg5;
9460 #elif defined(TARGET_SPARC)
9461             target_ulong restorer = arg4;
9462             target_ulong sigsetsize = arg5;
9463 #else
9464             target_ulong sigsetsize = arg4;
9465             target_ulong restorer = 0;
9466 #endif
9467             struct target_sigaction *act = NULL;
9468             struct target_sigaction *oact = NULL;
9469 
9470             if (sigsetsize != sizeof(target_sigset_t)) {
9471                 return -TARGET_EINVAL;
9472             }
9473             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9474                 return -TARGET_EFAULT;
9475             }
9476             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9477                 ret = -TARGET_EFAULT;
9478             } else {
9479                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9480                 if (oact) {
9481                     unlock_user_struct(oact, arg3, 1);
9482                 }
9483             }
9484             if (act) {
9485                 unlock_user_struct(act, arg2, 0);
9486             }
9487         }
9488         return ret;
9489 #ifdef TARGET_NR_sgetmask /* not on alpha */
9490     case TARGET_NR_sgetmask:
9491         {
9492             sigset_t cur_set;
9493             abi_ulong target_set;
9494             ret = do_sigprocmask(0, NULL, &cur_set);
9495             if (!ret) {
9496                 host_to_target_old_sigset(&target_set, &cur_set);
9497                 ret = target_set;
9498             }
9499         }
9500         return ret;
9501 #endif
9502 #ifdef TARGET_NR_ssetmask /* not on alpha */
9503     case TARGET_NR_ssetmask:
9504         {
9505             sigset_t set, oset;
9506             abi_ulong target_set = arg1;
9507             target_to_host_old_sigset(&set, &target_set);
9508             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9509             if (!ret) {
9510                 host_to_target_old_sigset(&target_set, &oset);
9511                 ret = target_set;
9512             }
9513         }
9514         return ret;
9515 #endif
9516 #ifdef TARGET_NR_sigprocmask
9517     case TARGET_NR_sigprocmask:
9518         {
9519 #if defined(TARGET_ALPHA)
9520             sigset_t set, oldset;
9521             abi_ulong mask;
9522             int how;
9523 
9524             switch (arg1) {
9525             case TARGET_SIG_BLOCK:
9526                 how = SIG_BLOCK;
9527                 break;
9528             case TARGET_SIG_UNBLOCK:
9529                 how = SIG_UNBLOCK;
9530                 break;
9531             case TARGET_SIG_SETMASK:
9532                 how = SIG_SETMASK;
9533                 break;
9534             default:
9535                 return -TARGET_EINVAL;
9536             }
9537             mask = arg2;
9538             target_to_host_old_sigset(&set, &mask);
9539 
9540             ret = do_sigprocmask(how, &set, &oldset);
9541             if (!is_error(ret)) {
9542                 host_to_target_old_sigset(&mask, &oldset);
9543                 ret = mask;
9544                 cpu_env->ir[IR_V0] = 0; /* force no error */
9545             }
9546 #else
9547             sigset_t set, oldset, *set_ptr;
9548             int how;
9549 
9550             if (arg2) {
9551                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9552                 if (!p) {
9553                     return -TARGET_EFAULT;
9554                 }
9555                 target_to_host_old_sigset(&set, p);
9556                 unlock_user(p, arg2, 0);
9557                 set_ptr = &set;
9558                 switch (arg1) {
9559                 case TARGET_SIG_BLOCK:
9560                     how = SIG_BLOCK;
9561                     break;
9562                 case TARGET_SIG_UNBLOCK:
9563                     how = SIG_UNBLOCK;
9564                     break;
9565                 case TARGET_SIG_SETMASK:
9566                     how = SIG_SETMASK;
9567                     break;
9568                 default:
9569                     return -TARGET_EINVAL;
9570                 }
9571             } else {
9572                 how = 0;
9573                 set_ptr = NULL;
9574             }
9575             ret = do_sigprocmask(how, set_ptr, &oldset);
9576             if (!is_error(ret) && arg3) {
9577                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9578                     return -TARGET_EFAULT;
9579                 host_to_target_old_sigset(p, &oldset);
9580                 unlock_user(p, arg3, sizeof(target_sigset_t));
9581             }
9582 #endif
9583         }
9584         return ret;
9585 #endif
9586     case TARGET_NR_rt_sigprocmask:
9587         {
9588             int how = arg1;
9589             sigset_t set, oldset, *set_ptr;
9590 
9591             if (arg4 != sizeof(target_sigset_t)) {
9592                 return -TARGET_EINVAL;
9593             }
9594 
9595             if (arg2) {
9596                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9597                 if (!p) {
9598                     return -TARGET_EFAULT;
9599                 }
9600                 target_to_host_sigset(&set, p);
9601                 unlock_user(p, arg2, 0);
9602                 set_ptr = &set;
9603                 switch(how) {
9604                 case TARGET_SIG_BLOCK:
9605                     how = SIG_BLOCK;
9606                     break;
9607                 case TARGET_SIG_UNBLOCK:
9608                     how = SIG_UNBLOCK;
9609                     break;
9610                 case TARGET_SIG_SETMASK:
9611                     how = SIG_SETMASK;
9612                     break;
9613                 default:
9614                     return -TARGET_EINVAL;
9615                 }
9616             } else {
9617                 how = 0;
9618                 set_ptr = NULL;
9619             }
9620             ret = do_sigprocmask(how, set_ptr, &oldset);
9621             if (!is_error(ret) && arg3) {
9622                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9623                     return -TARGET_EFAULT;
9624                 host_to_target_sigset(p, &oldset);
9625                 unlock_user(p, arg3, sizeof(target_sigset_t));
9626             }
9627         }
9628         return ret;
9629 #ifdef TARGET_NR_sigpending
9630     case TARGET_NR_sigpending:
9631         {
9632             sigset_t set;
9633             ret = get_errno(sigpending(&set));
9634             if (!is_error(ret)) {
9635                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9636                     return -TARGET_EFAULT;
9637                 host_to_target_old_sigset(p, &set);
9638                 unlock_user(p, arg1, sizeof(target_sigset_t));
9639             }
9640         }
9641         return ret;
9642 #endif
9643     case TARGET_NR_rt_sigpending:
9644         {
9645             sigset_t set;
9646 
9647             /* Yes, this check is >, not != like most. We follow the kernel's
9648              * logic and it does it like this because it implements
9649              * NR_sigpending through the same code path, and in that case
9650              * the old_sigset_t is smaller in size.
9651              */
9652             if (arg2 > sizeof(target_sigset_t)) {
9653                 return -TARGET_EINVAL;
9654             }
9655 
9656             ret = get_errno(sigpending(&set));
9657             if (!is_error(ret)) {
9658                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9659                     return -TARGET_EFAULT;
9660                 host_to_target_sigset(p, &set);
9661                 unlock_user(p, arg1, sizeof(target_sigset_t));
9662             }
9663         }
9664         return ret;
9665 #ifdef TARGET_NR_sigsuspend
9666     case TARGET_NR_sigsuspend:
9667         {
9668             sigset_t *set;
9669 
9670 #if defined(TARGET_ALPHA)
9671             TaskState *ts = cpu->opaque;
9672             /* target_to_host_old_sigset will bswap back */
9673             abi_ulong mask = tswapal(arg1);
9674             set = &ts->sigsuspend_mask;
9675             target_to_host_old_sigset(set, &mask);
9676 #else
9677             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9678             if (ret != 0) {
9679                 return ret;
9680             }
9681 #endif
9682             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9683             finish_sigsuspend_mask(ret);
9684         }
9685         return ret;
9686 #endif
9687     case TARGET_NR_rt_sigsuspend:
9688         {
9689             sigset_t *set;
9690 
9691             ret = process_sigsuspend_mask(&set, arg1, arg2);
9692             if (ret != 0) {
9693                 return ret;
9694             }
9695             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9696             finish_sigsuspend_mask(ret);
9697         }
9698         return ret;
9699 #ifdef TARGET_NR_rt_sigtimedwait
9700     case TARGET_NR_rt_sigtimedwait:
9701         {
9702             sigset_t set;
9703             struct timespec uts, *puts;
9704             siginfo_t uinfo;
9705 
9706             if (arg4 != sizeof(target_sigset_t)) {
9707                 return -TARGET_EINVAL;
9708             }
9709 
9710             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9711                 return -TARGET_EFAULT;
9712             target_to_host_sigset(&set, p);
9713             unlock_user(p, arg1, 0);
9714             if (arg3) {
9715                 puts = &uts;
9716                 if (target_to_host_timespec(puts, arg3)) {
9717                     return -TARGET_EFAULT;
9718                 }
9719             } else {
9720                 puts = NULL;
9721             }
9722             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9723                                                  SIGSET_T_SIZE));
9724             if (!is_error(ret)) {
9725                 if (arg2) {
9726                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9727                                   0);
9728                     if (!p) {
9729                         return -TARGET_EFAULT;
9730                     }
9731                     host_to_target_siginfo(p, &uinfo);
9732                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9733                 }
9734                 ret = host_to_target_signal(ret);
9735             }
9736         }
9737         return ret;
9738 #endif
9739 #ifdef TARGET_NR_rt_sigtimedwait_time64
9740     case TARGET_NR_rt_sigtimedwait_time64:
9741         {
9742             sigset_t set;
9743             struct timespec uts, *puts;
9744             siginfo_t uinfo;
9745 
9746             if (arg4 != sizeof(target_sigset_t)) {
9747                 return -TARGET_EINVAL;
9748             }
9749 
9750             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9751             if (!p) {
9752                 return -TARGET_EFAULT;
9753             }
9754             target_to_host_sigset(&set, p);
9755             unlock_user(p, arg1, 0);
9756             if (arg3) {
9757                 puts = &uts;
9758                 if (target_to_host_timespec64(puts, arg3)) {
9759                     return -TARGET_EFAULT;
9760                 }
9761             } else {
9762                 puts = NULL;
9763             }
9764             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9765                                                  SIGSET_T_SIZE));
9766             if (!is_error(ret)) {
9767                 if (arg2) {
9768                     p = lock_user(VERIFY_WRITE, arg2,
9769                                   sizeof(target_siginfo_t), 0);
9770                     if (!p) {
9771                         return -TARGET_EFAULT;
9772                     }
9773                     host_to_target_siginfo(p, &uinfo);
9774                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9775                 }
9776                 ret = host_to_target_signal(ret);
9777             }
9778         }
9779         return ret;
9780 #endif
9781     case TARGET_NR_rt_sigqueueinfo:
9782         {
9783             siginfo_t uinfo;
9784 
9785             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9786             if (!p) {
9787                 return -TARGET_EFAULT;
9788             }
9789             target_to_host_siginfo(&uinfo, p);
9790             unlock_user(p, arg3, 0);
9791             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
9792         }
9793         return ret;
9794     case TARGET_NR_rt_tgsigqueueinfo:
9795         {
9796             siginfo_t uinfo;
9797 
9798             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9799             if (!p) {
9800                 return -TARGET_EFAULT;
9801             }
9802             target_to_host_siginfo(&uinfo, p);
9803             unlock_user(p, arg4, 0);
9804             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
9805         }
9806         return ret;
9807 #ifdef TARGET_NR_sigreturn
9808     case TARGET_NR_sigreturn:
9809         if (block_signals()) {
9810             return -QEMU_ERESTARTSYS;
9811         }
9812         return do_sigreturn(cpu_env);
9813 #endif
9814     case TARGET_NR_rt_sigreturn:
9815         if (block_signals()) {
9816             return -QEMU_ERESTARTSYS;
9817         }
9818         return do_rt_sigreturn(cpu_env);
9819     case TARGET_NR_sethostname:
9820         if (!(p = lock_user_string(arg1)))
9821             return -TARGET_EFAULT;
9822         ret = get_errno(sethostname(p, arg2));
9823         unlock_user(p, arg1, 0);
9824         return ret;
9825 #ifdef TARGET_NR_setrlimit
9826     case TARGET_NR_setrlimit:
9827         {
9828             int resource = target_to_host_resource(arg1);
9829             struct target_rlimit *target_rlim;
9830             struct rlimit rlim;
9831             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9832                 return -TARGET_EFAULT;
9833             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9834             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9835             unlock_user_struct(target_rlim, arg2, 0);
9836             /*
9837              * If we just passed through resource limit settings for memory then
9838              * they would also apply to QEMU's own allocations, and QEMU will
9839              * crash or hang or die if its allocations fail. Ideally we would
9840              * track the guest allocations in QEMU and apply the limits ourselves.
9841              * For now, just tell the guest the call succeeded but don't actually
9842              * limit anything.
9843              */
9844             if (resource != RLIMIT_AS &&
9845                 resource != RLIMIT_DATA &&
9846                 resource != RLIMIT_STACK) {
9847                 return get_errno(setrlimit(resource, &rlim));
9848             } else {
9849                 return 0;
9850             }
9851         }
9852 #endif
9853 #ifdef TARGET_NR_getrlimit
9854     case TARGET_NR_getrlimit:
9855         {
9856             int resource = target_to_host_resource(arg1);
9857             struct target_rlimit *target_rlim;
9858             struct rlimit rlim;
9859 
9860             ret = get_errno(getrlimit(resource, &rlim));
9861             if (!is_error(ret)) {
9862                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9863                     return -TARGET_EFAULT;
9864                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9865                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9866                 unlock_user_struct(target_rlim, arg2, 1);
9867             }
9868         }
9869         return ret;
9870 #endif
9871     case TARGET_NR_getrusage:
9872         {
9873             struct rusage rusage;
9874             ret = get_errno(getrusage(arg1, &rusage));
9875             if (!is_error(ret)) {
9876                 ret = host_to_target_rusage(arg2, &rusage);
9877             }
9878         }
9879         return ret;
9880 #if defined(TARGET_NR_gettimeofday)
9881     case TARGET_NR_gettimeofday:
9882         {
9883             struct timeval tv;
9884             struct timezone tz;
9885 
9886             ret = get_errno(gettimeofday(&tv, &tz));
9887             if (!is_error(ret)) {
9888                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9889                     return -TARGET_EFAULT;
9890                 }
9891                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9892                     return -TARGET_EFAULT;
9893                 }
9894             }
9895         }
9896         return ret;
9897 #endif
9898 #if defined(TARGET_NR_settimeofday)
9899     case TARGET_NR_settimeofday:
9900         {
9901             struct timeval tv, *ptv = NULL;
9902             struct timezone tz, *ptz = NULL;
9903 
9904             if (arg1) {
9905                 if (copy_from_user_timeval(&tv, arg1)) {
9906                     return -TARGET_EFAULT;
9907                 }
9908                 ptv = &tv;
9909             }
9910 
9911             if (arg2) {
9912                 if (copy_from_user_timezone(&tz, arg2)) {
9913                     return -TARGET_EFAULT;
9914                 }
9915                 ptz = &tz;
9916             }
9917 
9918             return get_errno(settimeofday(ptv, ptz));
9919         }
9920 #endif
9921 #if defined(TARGET_NR_select)
9922     case TARGET_NR_select:
9923 #if defined(TARGET_WANT_NI_OLD_SELECT)
9924         /* some architectures used to have old_select here
9925          * but now ENOSYS it.
9926          */
9927         ret = -TARGET_ENOSYS;
9928 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9929         ret = do_old_select(arg1);
9930 #else
9931         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9932 #endif
9933         return ret;
9934 #endif
9935 #ifdef TARGET_NR_pselect6
9936     case TARGET_NR_pselect6:
9937         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9938 #endif
9939 #ifdef TARGET_NR_pselect6_time64
9940     case TARGET_NR_pselect6_time64:
9941         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9942 #endif
9943 #ifdef TARGET_NR_symlink
9944     case TARGET_NR_symlink:
9945         {
9946             void *p2;
9947             p = lock_user_string(arg1);
9948             p2 = lock_user_string(arg2);
9949             if (!p || !p2)
9950                 ret = -TARGET_EFAULT;
9951             else
9952                 ret = get_errno(symlink(p, p2));
9953             unlock_user(p2, arg2, 0);
9954             unlock_user(p, arg1, 0);
9955         }
9956         return ret;
9957 #endif
9958 #if defined(TARGET_NR_symlinkat)
9959     case TARGET_NR_symlinkat:
9960         {
9961             void *p2;
9962             p  = lock_user_string(arg1);
9963             p2 = lock_user_string(arg3);
9964             if (!p || !p2)
9965                 ret = -TARGET_EFAULT;
9966             else
9967                 ret = get_errno(symlinkat(p, arg2, p2));
9968             unlock_user(p2, arg3, 0);
9969             unlock_user(p, arg1, 0);
9970         }
9971         return ret;
9972 #endif
9973 #ifdef TARGET_NR_readlink
9974     case TARGET_NR_readlink:
9975         {
9976             void *p2;
9977             p = lock_user_string(arg1);
9978             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9979             if (!p || !p2) {
9980                 ret = -TARGET_EFAULT;
9981             } else if (!arg3) {
9982                 /* Short circuit this for the magic exe check. */
9983                 ret = -TARGET_EINVAL;
9984             } else if (is_proc_myself((const char *)p, "exe")) {
9985                 char real[PATH_MAX], *temp;
9986                 temp = realpath(exec_path, real);
9987                 /* Return value is # of bytes that we wrote to the buffer. */
9988                 if (temp == NULL) {
9989                     ret = get_errno(-1);
9990                 } else {
9991                     /* Don't worry about sign mismatch as earlier mapping
9992                      * logic would have thrown a bad address error. */
9993                     ret = MIN(strlen(real), arg3);
9994                     /* We cannot NUL terminate the string. */
9995                     memcpy(p2, real, ret);
9996                 }
9997             } else {
9998                 ret = get_errno(readlink(path(p), p2, arg3));
9999             }
10000             unlock_user(p2, arg2, ret);
10001             unlock_user(p, arg1, 0);
10002         }
10003         return ret;
10004 #endif
10005 #if defined(TARGET_NR_readlinkat)
10006     case TARGET_NR_readlinkat:
10007         {
10008             void *p2;
10009             p  = lock_user_string(arg2);
10010             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10011             if (!p || !p2) {
10012                 ret = -TARGET_EFAULT;
10013             } else if (!arg4) {
10014                 /* Short circuit this for the magic exe check. */
10015                 ret = -TARGET_EINVAL;
10016             } else if (is_proc_myself((const char *)p, "exe")) {
10017                 char real[PATH_MAX], *temp;
10018                 temp = realpath(exec_path, real);
10019                 /* Return value is # of bytes that we wrote to the buffer. */
10020                 if (temp == NULL) {
10021                     ret = get_errno(-1);
10022                 } else {
10023                     /* Don't worry about sign mismatch as earlier mapping
10024                      * logic would have thrown a bad address error. */
10025                     ret = MIN(strlen(real), arg4);
10026                     /* We cannot NUL terminate the string. */
10027                     memcpy(p2, real, ret);
10028                 }
10029             } else {
10030                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10031             }
10032             unlock_user(p2, arg3, ret);
10033             unlock_user(p, arg2, 0);
10034         }
10035         return ret;
10036 #endif
10037 #ifdef TARGET_NR_swapon
10038     case TARGET_NR_swapon:
10039         if (!(p = lock_user_string(arg1)))
10040             return -TARGET_EFAULT;
10041         ret = get_errno(swapon(p, arg2));
10042         unlock_user(p, arg1, 0);
10043         return ret;
10044 #endif
10045     case TARGET_NR_reboot:
10046         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10047            /* arg4 must be ignored in all other cases */
10048            p = lock_user_string(arg4);
10049            if (!p) {
10050                return -TARGET_EFAULT;
10051            }
10052            ret = get_errno(reboot(arg1, arg2, arg3, p));
10053            unlock_user(p, arg4, 0);
10054         } else {
10055            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10056         }
10057         return ret;
10058 #ifdef TARGET_NR_mmap
10059     case TARGET_NR_mmap:
10060 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10061     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10062     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10063     || defined(TARGET_S390X)
10064         {
10065             abi_ulong *v;
10066             abi_ulong v1, v2, v3, v4, v5, v6;
10067             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10068                 return -TARGET_EFAULT;
10069             v1 = tswapal(v[0]);
10070             v2 = tswapal(v[1]);
10071             v3 = tswapal(v[2]);
10072             v4 = tswapal(v[3]);
10073             v5 = tswapal(v[4]);
10074             v6 = tswapal(v[5]);
10075             unlock_user(v, arg1, 0);
10076             ret = get_errno(target_mmap(v1, v2, v3,
10077                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10078                                         v5, v6));
10079         }
10080 #else
10081         /* mmap pointers are always untagged */
10082         ret = get_errno(target_mmap(arg1, arg2, arg3,
10083                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10084                                     arg5,
10085                                     arg6));
10086 #endif
10087         return ret;
10088 #endif
10089 #ifdef TARGET_NR_mmap2
10090     case TARGET_NR_mmap2:
10091 #ifndef MMAP_SHIFT
10092 #define MMAP_SHIFT 12
10093 #endif
10094         ret = target_mmap(arg1, arg2, arg3,
10095                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10096                           arg5, arg6 << MMAP_SHIFT);
10097         return get_errno(ret);
10098 #endif
10099     case TARGET_NR_munmap:
10100         arg1 = cpu_untagged_addr(cpu, arg1);
10101         return get_errno(target_munmap(arg1, arg2));
10102     case TARGET_NR_mprotect:
10103         arg1 = cpu_untagged_addr(cpu, arg1);
10104         {
10105             TaskState *ts = cpu->opaque;
10106             /* Special hack to detect libc making the stack executable.  */
10107             if ((arg3 & PROT_GROWSDOWN)
10108                 && arg1 >= ts->info->stack_limit
10109                 && arg1 <= ts->info->start_stack) {
10110                 arg3 &= ~PROT_GROWSDOWN;
10111                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10112                 arg1 = ts->info->stack_limit;
10113             }
10114         }
10115         return get_errno(target_mprotect(arg1, arg2, arg3));
10116 #ifdef TARGET_NR_mremap
10117     case TARGET_NR_mremap:
10118         arg1 = cpu_untagged_addr(cpu, arg1);
10119         /* mremap new_addr (arg5) is always untagged */
10120         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10121 #endif
10122         /* ??? msync/mlock/munlock are broken for softmmu.  */
10123 #ifdef TARGET_NR_msync
10124     case TARGET_NR_msync:
10125         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10126 #endif
10127 #ifdef TARGET_NR_mlock
10128     case TARGET_NR_mlock:
10129         return get_errno(mlock(g2h(cpu, arg1), arg2));
10130 #endif
10131 #ifdef TARGET_NR_munlock
10132     case TARGET_NR_munlock:
10133         return get_errno(munlock(g2h(cpu, arg1), arg2));
10134 #endif
10135 #ifdef TARGET_NR_mlockall
10136     case TARGET_NR_mlockall:
10137         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10138 #endif
10139 #ifdef TARGET_NR_munlockall
10140     case TARGET_NR_munlockall:
10141         return get_errno(munlockall());
10142 #endif
10143 #ifdef TARGET_NR_truncate
10144     case TARGET_NR_truncate:
10145         if (!(p = lock_user_string(arg1)))
10146             return -TARGET_EFAULT;
10147         ret = get_errno(truncate(p, arg2));
10148         unlock_user(p, arg1, 0);
10149         return ret;
10150 #endif
10151 #ifdef TARGET_NR_ftruncate
10152     case TARGET_NR_ftruncate:
10153         return get_errno(ftruncate(arg1, arg2));
10154 #endif
10155     case TARGET_NR_fchmod:
10156         return get_errno(fchmod(arg1, arg2));
10157 #if defined(TARGET_NR_fchmodat)
10158     case TARGET_NR_fchmodat:
10159         if (!(p = lock_user_string(arg2)))
10160             return -TARGET_EFAULT;
10161         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10162         unlock_user(p, arg2, 0);
10163         return ret;
10164 #endif
10165     case TARGET_NR_getpriority:
10166         /* Note that negative values are valid for getpriority, so we must
10167            differentiate based on errno settings.  */
10168         errno = 0;
10169         ret = getpriority(arg1, arg2);
10170         if (ret == -1 && errno != 0) {
10171             return -host_to_target_errno(errno);
10172         }
10173 #ifdef TARGET_ALPHA
10174         /* Return value is the unbiased priority.  Signal no error.  */
10175         cpu_env->ir[IR_V0] = 0;
10176 #else
10177         /* Return value is a biased priority to avoid negative numbers.  */
10178         ret = 20 - ret;
10179 #endif
10180         return ret;
10181     case TARGET_NR_setpriority:
10182         return get_errno(setpriority(arg1, arg2, arg3));
10183 #ifdef TARGET_NR_statfs
10184     case TARGET_NR_statfs:
10185         if (!(p = lock_user_string(arg1))) {
10186             return -TARGET_EFAULT;
10187         }
10188         ret = get_errno(statfs(path(p), &stfs));
10189         unlock_user(p, arg1, 0);
10190     convert_statfs:
10191         if (!is_error(ret)) {
10192             struct target_statfs *target_stfs;
10193 
10194             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10195                 return -TARGET_EFAULT;
10196             __put_user(stfs.f_type, &target_stfs->f_type);
10197             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10198             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10199             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10200             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10201             __put_user(stfs.f_files, &target_stfs->f_files);
10202             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10203             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10204             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10205             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10206             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10207 #ifdef _STATFS_F_FLAGS
10208             __put_user(stfs.f_flags, &target_stfs->f_flags);
10209 #else
10210             __put_user(0, &target_stfs->f_flags);
10211 #endif
10212             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10213             unlock_user_struct(target_stfs, arg2, 1);
10214         }
10215         return ret;
10216 #endif
10217 #ifdef TARGET_NR_fstatfs
10218     case TARGET_NR_fstatfs:
10219         ret = get_errno(fstatfs(arg1, &stfs));
10220         goto convert_statfs;
10221 #endif
10222 #ifdef TARGET_NR_statfs64
10223     case TARGET_NR_statfs64:
10224         if (!(p = lock_user_string(arg1))) {
10225             return -TARGET_EFAULT;
10226         }
10227         ret = get_errno(statfs(path(p), &stfs));
10228         unlock_user(p, arg1, 0);
10229     convert_statfs64:
10230         if (!is_error(ret)) {
10231             struct target_statfs64 *target_stfs;
10232 
10233             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10234                 return -TARGET_EFAULT;
10235             __put_user(stfs.f_type, &target_stfs->f_type);
10236             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10237             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10238             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10239             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10240             __put_user(stfs.f_files, &target_stfs->f_files);
10241             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10242             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10243             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10244             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10245             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10246 #ifdef _STATFS_F_FLAGS
10247             __put_user(stfs.f_flags, &target_stfs->f_flags);
10248 #else
10249             __put_user(0, &target_stfs->f_flags);
10250 #endif
10251             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10252             unlock_user_struct(target_stfs, arg3, 1);
10253         }
10254         return ret;
10255     case TARGET_NR_fstatfs64:
10256         ret = get_errno(fstatfs(arg1, &stfs));
10257         goto convert_statfs64;
10258 #endif
10259 #ifdef TARGET_NR_socketcall
10260     case TARGET_NR_socketcall:
10261         return do_socketcall(arg1, arg2);
10262 #endif
10263 #ifdef TARGET_NR_accept
10264     case TARGET_NR_accept:
10265         return do_accept4(arg1, arg2, arg3, 0);
10266 #endif
10267 #ifdef TARGET_NR_accept4
10268     case TARGET_NR_accept4:
10269         return do_accept4(arg1, arg2, arg3, arg4);
10270 #endif
10271 #ifdef TARGET_NR_bind
10272     case TARGET_NR_bind:
10273         return do_bind(arg1, arg2, arg3);
10274 #endif
10275 #ifdef TARGET_NR_connect
10276     case TARGET_NR_connect:
10277         return do_connect(arg1, arg2, arg3);
10278 #endif
10279 #ifdef TARGET_NR_getpeername
10280     case TARGET_NR_getpeername:
10281         return do_getpeername(arg1, arg2, arg3);
10282 #endif
10283 #ifdef TARGET_NR_getsockname
10284     case TARGET_NR_getsockname:
10285         return do_getsockname(arg1, arg2, arg3);
10286 #endif
10287 #ifdef TARGET_NR_getsockopt
10288     case TARGET_NR_getsockopt:
10289         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10290 #endif
10291 #ifdef TARGET_NR_listen
10292     case TARGET_NR_listen:
10293         return get_errno(listen(arg1, arg2));
10294 #endif
10295 #ifdef TARGET_NR_recv
10296     case TARGET_NR_recv:
10297         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10298 #endif
10299 #ifdef TARGET_NR_recvfrom
10300     case TARGET_NR_recvfrom:
10301         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10302 #endif
10303 #ifdef TARGET_NR_recvmsg
10304     case TARGET_NR_recvmsg:
10305         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10306 #endif
10307 #ifdef TARGET_NR_send
10308     case TARGET_NR_send:
10309         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10310 #endif
10311 #ifdef TARGET_NR_sendmsg
10312     case TARGET_NR_sendmsg:
10313         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10314 #endif
10315 #ifdef TARGET_NR_sendmmsg
10316     case TARGET_NR_sendmmsg:
10317         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10318 #endif
10319 #ifdef TARGET_NR_recvmmsg
10320     case TARGET_NR_recvmmsg:
10321         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10322 #endif
10323 #ifdef TARGET_NR_sendto
10324     case TARGET_NR_sendto:
10325         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10326 #endif
10327 #ifdef TARGET_NR_shutdown
10328     case TARGET_NR_shutdown:
10329         return get_errno(shutdown(arg1, arg2));
10330 #endif
10331 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10332     case TARGET_NR_getrandom:
10333         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10334         if (!p) {
10335             return -TARGET_EFAULT;
10336         }
10337         ret = get_errno(getrandom(p, arg2, arg3));
10338         unlock_user(p, arg1, ret);
10339         return ret;
10340 #endif
10341 #ifdef TARGET_NR_socket
10342     case TARGET_NR_socket:
10343         return do_socket(arg1, arg2, arg3);
10344 #endif
10345 #ifdef TARGET_NR_socketpair
10346     case TARGET_NR_socketpair:
10347         return do_socketpair(arg1, arg2, arg3, arg4);
10348 #endif
10349 #ifdef TARGET_NR_setsockopt
10350     case TARGET_NR_setsockopt:
10351         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10352 #endif
10353 #if defined(TARGET_NR_syslog)
10354     case TARGET_NR_syslog:
10355         {
10356             int len = arg2;
10357 
10358             switch (arg1) {
10359             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10360             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10361             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10362             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10363             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10364             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10365             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10366             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10367                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10368             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10369             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10370             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10371                 {
10372                     if (len < 0) {
10373                         return -TARGET_EINVAL;
10374                     }
10375                     if (len == 0) {
10376                         return 0;
10377                     }
10378                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10379                     if (!p) {
10380                         return -TARGET_EFAULT;
10381                     }
10382                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10383                     unlock_user(p, arg2, arg3);
10384                 }
10385                 return ret;
10386             default:
10387                 return -TARGET_EINVAL;
10388             }
10389         }
10390         break;
10391 #endif
10392     case TARGET_NR_setitimer:
10393         {
10394             struct itimerval value, ovalue, *pvalue;
10395 
10396             if (arg2) {
10397                 pvalue = &value;
10398                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10399                     || copy_from_user_timeval(&pvalue->it_value,
10400                                               arg2 + sizeof(struct target_timeval)))
10401                     return -TARGET_EFAULT;
10402             } else {
10403                 pvalue = NULL;
10404             }
10405             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10406             if (!is_error(ret) && arg3) {
10407                 if (copy_to_user_timeval(arg3,
10408                                          &ovalue.it_interval)
10409                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10410                                             &ovalue.it_value))
10411                     return -TARGET_EFAULT;
10412             }
10413         }
10414         return ret;
10415     case TARGET_NR_getitimer:
10416         {
10417             struct itimerval value;
10418 
10419             ret = get_errno(getitimer(arg1, &value));
10420             if (!is_error(ret) && arg2) {
10421                 if (copy_to_user_timeval(arg2,
10422                                          &value.it_interval)
10423                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10424                                             &value.it_value))
10425                     return -TARGET_EFAULT;
10426             }
10427         }
10428         return ret;
10429 #ifdef TARGET_NR_stat
10430     case TARGET_NR_stat:
10431         if (!(p = lock_user_string(arg1))) {
10432             return -TARGET_EFAULT;
10433         }
10434         ret = get_errno(stat(path(p), &st));
10435         unlock_user(p, arg1, 0);
10436         goto do_stat;
10437 #endif
10438 #ifdef TARGET_NR_lstat
10439     case TARGET_NR_lstat:
10440         if (!(p = lock_user_string(arg1))) {
10441             return -TARGET_EFAULT;
10442         }
10443         ret = get_errno(lstat(path(p), &st));
10444         unlock_user(p, arg1, 0);
10445         goto do_stat;
10446 #endif
10447 #ifdef TARGET_NR_fstat
10448     case TARGET_NR_fstat:
10449         {
10450             ret = get_errno(fstat(arg1, &st));
10451 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10452         do_stat:
10453 #endif
10454             if (!is_error(ret)) {
10455                 struct target_stat *target_st;
10456 
10457                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10458                     return -TARGET_EFAULT;
10459                 memset(target_st, 0, sizeof(*target_st));
10460                 __put_user(st.st_dev, &target_st->st_dev);
10461                 __put_user(st.st_ino, &target_st->st_ino);
10462                 __put_user(st.st_mode, &target_st->st_mode);
10463                 __put_user(st.st_uid, &target_st->st_uid);
10464                 __put_user(st.st_gid, &target_st->st_gid);
10465                 __put_user(st.st_nlink, &target_st->st_nlink);
10466                 __put_user(st.st_rdev, &target_st->st_rdev);
10467                 __put_user(st.st_size, &target_st->st_size);
10468                 __put_user(st.st_blksize, &target_st->st_blksize);
10469                 __put_user(st.st_blocks, &target_st->st_blocks);
10470                 __put_user(st.st_atime, &target_st->target_st_atime);
10471                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10472                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10473 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10474                 __put_user(st.st_atim.tv_nsec,
10475                            &target_st->target_st_atime_nsec);
10476                 __put_user(st.st_mtim.tv_nsec,
10477                            &target_st->target_st_mtime_nsec);
10478                 __put_user(st.st_ctim.tv_nsec,
10479                            &target_st->target_st_ctime_nsec);
10480 #endif
10481                 unlock_user_struct(target_st, arg2, 1);
10482             }
10483         }
10484         return ret;
10485 #endif
10486     case TARGET_NR_vhangup:
10487         return get_errno(vhangup());
10488 #ifdef TARGET_NR_syscall
10489     case TARGET_NR_syscall:
10490         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10491                           arg6, arg7, arg8, 0);
10492 #endif
10493 #if defined(TARGET_NR_wait4)
10494     case TARGET_NR_wait4:
10495         {
10496             int status;
10497             abi_long status_ptr = arg2;
10498             struct rusage rusage, *rusage_ptr;
10499             abi_ulong target_rusage = arg4;
10500             abi_long rusage_err;
10501             if (target_rusage)
10502                 rusage_ptr = &rusage;
10503             else
10504                 rusage_ptr = NULL;
10505             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10506             if (!is_error(ret)) {
10507                 if (status_ptr && ret) {
10508                     status = host_to_target_waitstatus(status);
10509                     if (put_user_s32(status, status_ptr))
10510                         return -TARGET_EFAULT;
10511                 }
10512                 if (target_rusage) {
10513                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10514                     if (rusage_err) {
10515                         ret = rusage_err;
10516                     }
10517                 }
10518             }
10519         }
10520         return ret;
10521 #endif
10522 #ifdef TARGET_NR_swapoff
10523     case TARGET_NR_swapoff:
10524         if (!(p = lock_user_string(arg1)))
10525             return -TARGET_EFAULT;
10526         ret = get_errno(swapoff(p));
10527         unlock_user(p, arg1, 0);
10528         return ret;
10529 #endif
10530     case TARGET_NR_sysinfo:
10531         {
10532             struct target_sysinfo *target_value;
10533             struct sysinfo value;
10534             ret = get_errno(sysinfo(&value));
10535             if (!is_error(ret) && arg1)
10536             {
10537                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10538                     return -TARGET_EFAULT;
10539                 __put_user(value.uptime, &target_value->uptime);
10540                 __put_user(value.loads[0], &target_value->loads[0]);
10541                 __put_user(value.loads[1], &target_value->loads[1]);
10542                 __put_user(value.loads[2], &target_value->loads[2]);
10543                 __put_user(value.totalram, &target_value->totalram);
10544                 __put_user(value.freeram, &target_value->freeram);
10545                 __put_user(value.sharedram, &target_value->sharedram);
10546                 __put_user(value.bufferram, &target_value->bufferram);
10547                 __put_user(value.totalswap, &target_value->totalswap);
10548                 __put_user(value.freeswap, &target_value->freeswap);
10549                 __put_user(value.procs, &target_value->procs);
10550                 __put_user(value.totalhigh, &target_value->totalhigh);
10551                 __put_user(value.freehigh, &target_value->freehigh);
10552                 __put_user(value.mem_unit, &target_value->mem_unit);
10553                 unlock_user_struct(target_value, arg1, 1);
10554             }
10555         }
10556         return ret;
10557 #ifdef TARGET_NR_ipc
10558     case TARGET_NR_ipc:
10559         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10560 #endif
10561 #ifdef TARGET_NR_semget
10562     case TARGET_NR_semget:
10563         return get_errno(semget(arg1, arg2, arg3));
10564 #endif
10565 #ifdef TARGET_NR_semop
10566     case TARGET_NR_semop:
10567         return do_semtimedop(arg1, arg2, arg3, 0, false);
10568 #endif
10569 #ifdef TARGET_NR_semtimedop
10570     case TARGET_NR_semtimedop:
10571         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10572 #endif
10573 #ifdef TARGET_NR_semtimedop_time64
10574     case TARGET_NR_semtimedop_time64:
10575         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10576 #endif
10577 #ifdef TARGET_NR_semctl
10578     case TARGET_NR_semctl:
10579         return do_semctl(arg1, arg2, arg3, arg4);
10580 #endif
10581 #ifdef TARGET_NR_msgctl
10582     case TARGET_NR_msgctl:
10583         return do_msgctl(arg1, arg2, arg3);
10584 #endif
10585 #ifdef TARGET_NR_msgget
10586     case TARGET_NR_msgget:
10587         return get_errno(msgget(arg1, arg2));
10588 #endif
10589 #ifdef TARGET_NR_msgrcv
10590     case TARGET_NR_msgrcv:
10591         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10592 #endif
10593 #ifdef TARGET_NR_msgsnd
10594     case TARGET_NR_msgsnd:
10595         return do_msgsnd(arg1, arg2, arg3, arg4);
10596 #endif
10597 #ifdef TARGET_NR_shmget
10598     case TARGET_NR_shmget:
10599         return get_errno(shmget(arg1, arg2, arg3));
10600 #endif
10601 #ifdef TARGET_NR_shmctl
10602     case TARGET_NR_shmctl:
10603         return do_shmctl(arg1, arg2, arg3);
10604 #endif
10605 #ifdef TARGET_NR_shmat
10606     case TARGET_NR_shmat:
10607         return do_shmat(cpu_env, arg1, arg2, arg3);
10608 #endif
10609 #ifdef TARGET_NR_shmdt
10610     case TARGET_NR_shmdt:
10611         return do_shmdt(arg1);
10612 #endif
10613     case TARGET_NR_fsync:
10614         return get_errno(fsync(arg1));
10615     case TARGET_NR_clone:
10616         /* Linux manages to have three different orderings for its
10617          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10618          * match the kernel's CONFIG_CLONE_* settings.
10619          * Microblaze is further special in that it uses a sixth
10620          * implicit argument to clone for the TLS pointer.
10621          */
10622 #if defined(TARGET_MICROBLAZE)
10623         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10624 #elif defined(TARGET_CLONE_BACKWARDS)
10625         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10626 #elif defined(TARGET_CLONE_BACKWARDS2)
10627         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10628 #else
10629         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10630 #endif
10631         return ret;
10632 #ifdef __NR_exit_group
10633         /* new thread calls */
10634     case TARGET_NR_exit_group:
10635         preexit_cleanup(cpu_env, arg1);
10636         return get_errno(exit_group(arg1));
10637 #endif
10638     case TARGET_NR_setdomainname:
10639         if (!(p = lock_user_string(arg1)))
10640             return -TARGET_EFAULT;
10641         ret = get_errno(setdomainname(p, arg2));
10642         unlock_user(p, arg1, 0);
10643         return ret;
10644     case TARGET_NR_uname:
10645         /* no need to transcode because we use the linux syscall */
10646         {
10647             struct new_utsname * buf;
10648 
10649             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10650                 return -TARGET_EFAULT;
10651             ret = get_errno(sys_uname(buf));
10652             if (!is_error(ret)) {
10653                 /* Overwrite the native machine name with whatever is being
10654                    emulated. */
10655                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10656                           sizeof(buf->machine));
10657                 /* Allow the user to override the reported release.  */
10658                 if (qemu_uname_release && *qemu_uname_release) {
10659                     g_strlcpy(buf->release, qemu_uname_release,
10660                               sizeof(buf->release));
10661                 }
10662             }
10663             unlock_user_struct(buf, arg1, 1);
10664         }
10665         return ret;
10666 #ifdef TARGET_I386
10667     case TARGET_NR_modify_ldt:
10668         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10669 #if !defined(TARGET_X86_64)
10670     case TARGET_NR_vm86:
10671         return do_vm86(cpu_env, arg1, arg2);
10672 #endif
10673 #endif
10674 #if defined(TARGET_NR_adjtimex)
10675     case TARGET_NR_adjtimex:
10676         {
10677             struct timex host_buf;
10678 
10679             if (target_to_host_timex(&host_buf, arg1) != 0) {
10680                 return -TARGET_EFAULT;
10681             }
10682             ret = get_errno(adjtimex(&host_buf));
10683             if (!is_error(ret)) {
10684                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10685                     return -TARGET_EFAULT;
10686                 }
10687             }
10688         }
10689         return ret;
10690 #endif
10691 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10692     case TARGET_NR_clock_adjtime:
10693         {
10694             struct timex htx, *phtx = &htx;
10695 
10696             if (target_to_host_timex(phtx, arg2) != 0) {
10697                 return -TARGET_EFAULT;
10698             }
10699             ret = get_errno(clock_adjtime(arg1, phtx));
10700             if (!is_error(ret) && phtx) {
10701                 if (host_to_target_timex(arg2, phtx) != 0) {
10702                     return -TARGET_EFAULT;
10703                 }
10704             }
10705         }
10706         return ret;
10707 #endif
10708 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10709     case TARGET_NR_clock_adjtime64:
10710         {
10711             struct timex htx;
10712 
10713             if (target_to_host_timex64(&htx, arg2) != 0) {
10714                 return -TARGET_EFAULT;
10715             }
10716             ret = get_errno(clock_adjtime(arg1, &htx));
10717             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10718                     return -TARGET_EFAULT;
10719             }
10720         }
10721         return ret;
10722 #endif
10723     case TARGET_NR_getpgid:
10724         return get_errno(getpgid(arg1));
10725     case TARGET_NR_fchdir:
10726         return get_errno(fchdir(arg1));
10727     case TARGET_NR_personality:
10728         return get_errno(personality(arg1));
10729 #ifdef TARGET_NR__llseek /* Not on alpha */
10730     case TARGET_NR__llseek:
10731         {
10732             int64_t res;
10733 #if !defined(__NR_llseek)
10734             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10735             if (res == -1) {
10736                 ret = get_errno(res);
10737             } else {
10738                 ret = 0;
10739             }
10740 #else
10741             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10742 #endif
10743             if ((ret == 0) && put_user_s64(res, arg4)) {
10744                 return -TARGET_EFAULT;
10745             }
10746         }
10747         return ret;
10748 #endif
10749 #ifdef TARGET_NR_getdents
10750     case TARGET_NR_getdents:
10751         return do_getdents(arg1, arg2, arg3);
10752 #endif /* TARGET_NR_getdents */
10753 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10754     case TARGET_NR_getdents64:
10755         return do_getdents64(arg1, arg2, arg3);
10756 #endif /* TARGET_NR_getdents64 */
10757 #if defined(TARGET_NR__newselect)
10758     case TARGET_NR__newselect:
10759         return do_select(arg1, arg2, arg3, arg4, arg5);
10760 #endif
10761 #ifdef TARGET_NR_poll
10762     case TARGET_NR_poll:
10763         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10764 #endif
10765 #ifdef TARGET_NR_ppoll
10766     case TARGET_NR_ppoll:
10767         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10768 #endif
10769 #ifdef TARGET_NR_ppoll_time64
10770     case TARGET_NR_ppoll_time64:
10771         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10772 #endif
10773     case TARGET_NR_flock:
10774         /* NOTE: the flock constant seems to be the same for every
10775            Linux platform */
10776         return get_errno(safe_flock(arg1, arg2));
10777     case TARGET_NR_readv:
10778         {
10779             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10780             if (vec != NULL) {
10781                 ret = get_errno(safe_readv(arg1, vec, arg3));
10782                 unlock_iovec(vec, arg2, arg3, 1);
10783             } else {
10784                 ret = -host_to_target_errno(errno);
10785             }
10786         }
10787         return ret;
10788     case TARGET_NR_writev:
10789         {
10790             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10791             if (vec != NULL) {
10792                 ret = get_errno(safe_writev(arg1, vec, arg3));
10793                 unlock_iovec(vec, arg2, arg3, 0);
10794             } else {
10795                 ret = -host_to_target_errno(errno);
10796             }
10797         }
10798         return ret;
10799 #if defined(TARGET_NR_preadv)
10800     case TARGET_NR_preadv:
10801         {
10802             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10803             if (vec != NULL) {
10804                 unsigned long low, high;
10805 
10806                 target_to_host_low_high(arg4, arg5, &low, &high);
10807                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10808                 unlock_iovec(vec, arg2, arg3, 1);
10809             } else {
10810                 ret = -host_to_target_errno(errno);
10811            }
10812         }
10813         return ret;
10814 #endif
10815 #if defined(TARGET_NR_pwritev)
10816     case TARGET_NR_pwritev:
10817         {
10818             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10819             if (vec != NULL) {
10820                 unsigned long low, high;
10821 
10822                 target_to_host_low_high(arg4, arg5, &low, &high);
10823                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10824                 unlock_iovec(vec, arg2, arg3, 0);
10825             } else {
10826                 ret = -host_to_target_errno(errno);
10827            }
10828         }
10829         return ret;
10830 #endif
10831     case TARGET_NR_getsid:
10832         return get_errno(getsid(arg1));
10833 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10834     case TARGET_NR_fdatasync:
10835         return get_errno(fdatasync(arg1));
10836 #endif
10837     case TARGET_NR_sched_getaffinity:
10838         {
10839             unsigned int mask_size;
10840             unsigned long *mask;
10841 
10842             /*
10843              * sched_getaffinity needs multiples of ulong, so need to take
10844              * care of mismatches between target ulong and host ulong sizes.
10845              */
10846             if (arg2 & (sizeof(abi_ulong) - 1)) {
10847                 return -TARGET_EINVAL;
10848             }
10849             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10850 
10851             mask = alloca(mask_size);
10852             memset(mask, 0, mask_size);
10853             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10854 
10855             if (!is_error(ret)) {
10856                 if (ret > arg2) {
10857                     /* More data returned than the caller's buffer will fit.
10858                      * This only happens if sizeof(abi_long) < sizeof(long)
10859                      * and the caller passed us a buffer holding an odd number
10860                      * of abi_longs. If the host kernel is actually using the
10861                      * extra 4 bytes then fail EINVAL; otherwise we can just
10862                      * ignore them and only copy the interesting part.
10863                      */
10864                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10865                     if (numcpus > arg2 * 8) {
10866                         return -TARGET_EINVAL;
10867                     }
10868                     ret = arg2;
10869                 }
10870 
10871                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10872                     return -TARGET_EFAULT;
10873                 }
10874             }
10875         }
10876         return ret;
10877     case TARGET_NR_sched_setaffinity:
10878         {
10879             unsigned int mask_size;
10880             unsigned long *mask;
10881 
10882             /*
10883              * sched_setaffinity needs multiples of ulong, so need to take
10884              * care of mismatches between target ulong and host ulong sizes.
10885              */
10886             if (arg2 & (sizeof(abi_ulong) - 1)) {
10887                 return -TARGET_EINVAL;
10888             }
10889             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10890             mask = alloca(mask_size);
10891 
10892             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10893             if (ret) {
10894                 return ret;
10895             }
10896 
10897             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10898         }
10899     case TARGET_NR_getcpu:
10900         {
10901             unsigned cpu, node;
10902             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10903                                        arg2 ? &node : NULL,
10904                                        NULL));
10905             if (is_error(ret)) {
10906                 return ret;
10907             }
10908             if (arg1 && put_user_u32(cpu, arg1)) {
10909                 return -TARGET_EFAULT;
10910             }
10911             if (arg2 && put_user_u32(node, arg2)) {
10912                 return -TARGET_EFAULT;
10913             }
10914         }
10915         return ret;
10916     case TARGET_NR_sched_setparam:
10917         {
10918             struct target_sched_param *target_schp;
10919             struct sched_param schp;
10920 
10921             if (arg2 == 0) {
10922                 return -TARGET_EINVAL;
10923             }
10924             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10925                 return -TARGET_EFAULT;
10926             }
10927             schp.sched_priority = tswap32(target_schp->sched_priority);
10928             unlock_user_struct(target_schp, arg2, 0);
10929             return get_errno(sys_sched_setparam(arg1, &schp));
10930         }
10931     case TARGET_NR_sched_getparam:
10932         {
10933             struct target_sched_param *target_schp;
10934             struct sched_param schp;
10935 
10936             if (arg2 == 0) {
10937                 return -TARGET_EINVAL;
10938             }
10939             ret = get_errno(sys_sched_getparam(arg1, &schp));
10940             if (!is_error(ret)) {
10941                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10942                     return -TARGET_EFAULT;
10943                 }
10944                 target_schp->sched_priority = tswap32(schp.sched_priority);
10945                 unlock_user_struct(target_schp, arg2, 1);
10946             }
10947         }
10948         return ret;
10949     case TARGET_NR_sched_setscheduler:
10950         {
10951             struct target_sched_param *target_schp;
10952             struct sched_param schp;
10953             if (arg3 == 0) {
10954                 return -TARGET_EINVAL;
10955             }
10956             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10957                 return -TARGET_EFAULT;
10958             }
10959             schp.sched_priority = tswap32(target_schp->sched_priority);
10960             unlock_user_struct(target_schp, arg3, 0);
10961             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10962         }
10963     case TARGET_NR_sched_getscheduler:
10964         return get_errno(sys_sched_getscheduler(arg1));
10965     case TARGET_NR_sched_getattr:
10966         {
10967             struct target_sched_attr *target_scha;
10968             struct sched_attr scha;
10969             if (arg2 == 0) {
10970                 return -TARGET_EINVAL;
10971             }
10972             if (arg3 > sizeof(scha)) {
10973                 arg3 = sizeof(scha);
10974             }
10975             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10976             if (!is_error(ret)) {
10977                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10978                 if (!target_scha) {
10979                     return -TARGET_EFAULT;
10980                 }
10981                 target_scha->size = tswap32(scha.size);
10982                 target_scha->sched_policy = tswap32(scha.sched_policy);
10983                 target_scha->sched_flags = tswap64(scha.sched_flags);
10984                 target_scha->sched_nice = tswap32(scha.sched_nice);
10985                 target_scha->sched_priority = tswap32(scha.sched_priority);
10986                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10987                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10988                 target_scha->sched_period = tswap64(scha.sched_period);
10989                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10990                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10991                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10992                 }
10993                 unlock_user(target_scha, arg2, arg3);
10994             }
10995             return ret;
10996         }
10997     case TARGET_NR_sched_setattr:
10998         {
10999             struct target_sched_attr *target_scha;
11000             struct sched_attr scha;
11001             uint32_t size;
11002             int zeroed;
11003             if (arg2 == 0) {
11004                 return -TARGET_EINVAL;
11005             }
11006             if (get_user_u32(size, arg2)) {
11007                 return -TARGET_EFAULT;
11008             }
11009             if (!size) {
11010                 size = offsetof(struct target_sched_attr, sched_util_min);
11011             }
11012             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11013                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11014                     return -TARGET_EFAULT;
11015                 }
11016                 return -TARGET_E2BIG;
11017             }
11018 
11019             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11020             if (zeroed < 0) {
11021                 return zeroed;
11022             } else if (zeroed == 0) {
11023                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11024                     return -TARGET_EFAULT;
11025                 }
11026                 return -TARGET_E2BIG;
11027             }
11028             if (size > sizeof(struct target_sched_attr)) {
11029                 size = sizeof(struct target_sched_attr);
11030             }
11031 
11032             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11033             if (!target_scha) {
11034                 return -TARGET_EFAULT;
11035             }
11036             scha.size = size;
11037             scha.sched_policy = tswap32(target_scha->sched_policy);
11038             scha.sched_flags = tswap64(target_scha->sched_flags);
11039             scha.sched_nice = tswap32(target_scha->sched_nice);
11040             scha.sched_priority = tswap32(target_scha->sched_priority);
11041             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11042             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11043             scha.sched_period = tswap64(target_scha->sched_period);
11044             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11045                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11046                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11047             }
11048             unlock_user(target_scha, arg2, 0);
11049             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11050         }
11051     case TARGET_NR_sched_yield:
11052         return get_errno(sched_yield());
11053     case TARGET_NR_sched_get_priority_max:
11054         return get_errno(sched_get_priority_max(arg1));
11055     case TARGET_NR_sched_get_priority_min:
11056         return get_errno(sched_get_priority_min(arg1));
11057 #ifdef TARGET_NR_sched_rr_get_interval
11058     case TARGET_NR_sched_rr_get_interval:
11059         {
11060             struct timespec ts;
11061             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11062             if (!is_error(ret)) {
11063                 ret = host_to_target_timespec(arg2, &ts);
11064             }
11065         }
11066         return ret;
11067 #endif
11068 #ifdef TARGET_NR_sched_rr_get_interval_time64
11069     case TARGET_NR_sched_rr_get_interval_time64:
11070         {
11071             struct timespec ts;
11072             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11073             if (!is_error(ret)) {
11074                 ret = host_to_target_timespec64(arg2, &ts);
11075             }
11076         }
11077         return ret;
11078 #endif
11079 #if defined(TARGET_NR_nanosleep)
11080     case TARGET_NR_nanosleep:
11081         {
11082             struct timespec req, rem;
11083             target_to_host_timespec(&req, arg1);
11084             ret = get_errno(safe_nanosleep(&req, &rem));
11085             if (is_error(ret) && arg2) {
11086                 host_to_target_timespec(arg2, &rem);
11087             }
11088         }
11089         return ret;
11090 #endif
11091     case TARGET_NR_prctl:
11092         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11093         break;
11094 #ifdef TARGET_NR_arch_prctl
11095     case TARGET_NR_arch_prctl:
11096         return do_arch_prctl(cpu_env, arg1, arg2);
11097 #endif
11098 #ifdef TARGET_NR_pread64
11099     case TARGET_NR_pread64:
11100         if (regpairs_aligned(cpu_env, num)) {
11101             arg4 = arg5;
11102             arg5 = arg6;
11103         }
11104         if (arg2 == 0 && arg3 == 0) {
11105             /* Special-case NULL buffer and zero length, which should succeed */
11106             p = 0;
11107         } else {
11108             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11109             if (!p) {
11110                 return -TARGET_EFAULT;
11111             }
11112         }
11113         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11114         unlock_user(p, arg2, ret);
11115         return ret;
11116     case TARGET_NR_pwrite64:
11117         if (regpairs_aligned(cpu_env, num)) {
11118             arg4 = arg5;
11119             arg5 = arg6;
11120         }
11121         if (arg2 == 0 && arg3 == 0) {
11122             /* Special-case NULL buffer and zero length, which should succeed */
11123             p = 0;
11124         } else {
11125             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11126             if (!p) {
11127                 return -TARGET_EFAULT;
11128             }
11129         }
11130         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11131         unlock_user(p, arg2, 0);
11132         return ret;
11133 #endif
11134     case TARGET_NR_getcwd:
11135         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11136             return -TARGET_EFAULT;
11137         ret = get_errno(sys_getcwd1(p, arg2));
11138         unlock_user(p, arg1, ret);
11139         return ret;
11140     case TARGET_NR_capget:
11141     case TARGET_NR_capset:
11142     {
11143         struct target_user_cap_header *target_header;
11144         struct target_user_cap_data *target_data = NULL;
11145         struct __user_cap_header_struct header;
11146         struct __user_cap_data_struct data[2];
11147         struct __user_cap_data_struct *dataptr = NULL;
11148         int i, target_datalen;
11149         int data_items = 1;
11150 
11151         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11152             return -TARGET_EFAULT;
11153         }
11154         header.version = tswap32(target_header->version);
11155         header.pid = tswap32(target_header->pid);
11156 
11157         if (header.version != _LINUX_CAPABILITY_VERSION) {
11158             /* Version 2 and up takes pointer to two user_data structs */
11159             data_items = 2;
11160         }
11161 
11162         target_datalen = sizeof(*target_data) * data_items;
11163 
11164         if (arg2) {
11165             if (num == TARGET_NR_capget) {
11166                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11167             } else {
11168                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11169             }
11170             if (!target_data) {
11171                 unlock_user_struct(target_header, arg1, 0);
11172                 return -TARGET_EFAULT;
11173             }
11174 
11175             if (num == TARGET_NR_capset) {
11176                 for (i = 0; i < data_items; i++) {
11177                     data[i].effective = tswap32(target_data[i].effective);
11178                     data[i].permitted = tswap32(target_data[i].permitted);
11179                     data[i].inheritable = tswap32(target_data[i].inheritable);
11180                 }
11181             }
11182 
11183             dataptr = data;
11184         }
11185 
11186         if (num == TARGET_NR_capget) {
11187             ret = get_errno(capget(&header, dataptr));
11188         } else {
11189             ret = get_errno(capset(&header, dataptr));
11190         }
11191 
11192         /* The kernel always updates version for both capget and capset */
11193         target_header->version = tswap32(header.version);
11194         unlock_user_struct(target_header, arg1, 1);
11195 
11196         if (arg2) {
11197             if (num == TARGET_NR_capget) {
11198                 for (i = 0; i < data_items; i++) {
11199                     target_data[i].effective = tswap32(data[i].effective);
11200                     target_data[i].permitted = tswap32(data[i].permitted);
11201                     target_data[i].inheritable = tswap32(data[i].inheritable);
11202                 }
11203                 unlock_user(target_data, arg2, target_datalen);
11204             } else {
11205                 unlock_user(target_data, arg2, 0);
11206             }
11207         }
11208         return ret;
11209     }
11210     case TARGET_NR_sigaltstack:
11211         return do_sigaltstack(arg1, arg2, cpu_env);
11212 
11213 #ifdef CONFIG_SENDFILE
11214 #ifdef TARGET_NR_sendfile
11215     case TARGET_NR_sendfile:
11216     {
11217         off_t *offp = NULL;
11218         off_t off;
11219         if (arg3) {
11220             ret = get_user_sal(off, arg3);
11221             if (is_error(ret)) {
11222                 return ret;
11223             }
11224             offp = &off;
11225         }
11226         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11227         if (!is_error(ret) && arg3) {
11228             abi_long ret2 = put_user_sal(off, arg3);
11229             if (is_error(ret2)) {
11230                 ret = ret2;
11231             }
11232         }
11233         return ret;
11234     }
11235 #endif
11236 #ifdef TARGET_NR_sendfile64
11237     case TARGET_NR_sendfile64:
11238     {
11239         off_t *offp = NULL;
11240         off_t off;
11241         if (arg3) {
11242             ret = get_user_s64(off, arg3);
11243             if (is_error(ret)) {
11244                 return ret;
11245             }
11246             offp = &off;
11247         }
11248         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11249         if (!is_error(ret) && arg3) {
11250             abi_long ret2 = put_user_s64(off, arg3);
11251             if (is_error(ret2)) {
11252                 ret = ret2;
11253             }
11254         }
11255         return ret;
11256     }
11257 #endif
11258 #endif
11259 #ifdef TARGET_NR_vfork
11260     case TARGET_NR_vfork:
11261         return get_errno(do_fork(cpu_env,
11262                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11263                          0, 0, 0, 0));
11264 #endif
11265 #ifdef TARGET_NR_ugetrlimit
11266     case TARGET_NR_ugetrlimit:
11267     {
11268 	struct rlimit rlim;
11269 	int resource = target_to_host_resource(arg1);
11270 	ret = get_errno(getrlimit(resource, &rlim));
11271 	if (!is_error(ret)) {
11272 	    struct target_rlimit *target_rlim;
11273             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11274                 return -TARGET_EFAULT;
11275 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11276 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11277             unlock_user_struct(target_rlim, arg2, 1);
11278 	}
11279         return ret;
11280     }
11281 #endif
11282 #ifdef TARGET_NR_truncate64
11283     case TARGET_NR_truncate64:
11284         if (!(p = lock_user_string(arg1)))
11285             return -TARGET_EFAULT;
11286 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11287         unlock_user(p, arg1, 0);
11288         return ret;
11289 #endif
11290 #ifdef TARGET_NR_ftruncate64
11291     case TARGET_NR_ftruncate64:
11292         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11293 #endif
11294 #ifdef TARGET_NR_stat64
11295     case TARGET_NR_stat64:
11296         if (!(p = lock_user_string(arg1))) {
11297             return -TARGET_EFAULT;
11298         }
11299         ret = get_errno(stat(path(p), &st));
11300         unlock_user(p, arg1, 0);
11301         if (!is_error(ret))
11302             ret = host_to_target_stat64(cpu_env, arg2, &st);
11303         return ret;
11304 #endif
11305 #ifdef TARGET_NR_lstat64
11306     case TARGET_NR_lstat64:
11307         if (!(p = lock_user_string(arg1))) {
11308             return -TARGET_EFAULT;
11309         }
11310         ret = get_errno(lstat(path(p), &st));
11311         unlock_user(p, arg1, 0);
11312         if (!is_error(ret))
11313             ret = host_to_target_stat64(cpu_env, arg2, &st);
11314         return ret;
11315 #endif
11316 #ifdef TARGET_NR_fstat64
11317     case TARGET_NR_fstat64:
11318         ret = get_errno(fstat(arg1, &st));
11319         if (!is_error(ret))
11320             ret = host_to_target_stat64(cpu_env, arg2, &st);
11321         return ret;
11322 #endif
11323 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11324 #ifdef TARGET_NR_fstatat64
11325     case TARGET_NR_fstatat64:
11326 #endif
11327 #ifdef TARGET_NR_newfstatat
11328     case TARGET_NR_newfstatat:
11329 #endif
11330         if (!(p = lock_user_string(arg2))) {
11331             return -TARGET_EFAULT;
11332         }
11333         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11334         unlock_user(p, arg2, 0);
11335         if (!is_error(ret))
11336             ret = host_to_target_stat64(cpu_env, arg3, &st);
11337         return ret;
11338 #endif
11339 #if defined(TARGET_NR_statx)
11340     case TARGET_NR_statx:
11341         {
11342             struct target_statx *target_stx;
11343             int dirfd = arg1;
11344             int flags = arg3;
11345 
11346             p = lock_user_string(arg2);
11347             if (p == NULL) {
11348                 return -TARGET_EFAULT;
11349             }
11350 #if defined(__NR_statx)
11351             {
11352                 /*
11353                  * It is assumed that struct statx is architecture independent.
11354                  */
11355                 struct target_statx host_stx;
11356                 int mask = arg4;
11357 
11358                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11359                 if (!is_error(ret)) {
11360                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11361                         unlock_user(p, arg2, 0);
11362                         return -TARGET_EFAULT;
11363                     }
11364                 }
11365 
11366                 if (ret != -TARGET_ENOSYS) {
11367                     unlock_user(p, arg2, 0);
11368                     return ret;
11369                 }
11370             }
11371 #endif
11372             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11373             unlock_user(p, arg2, 0);
11374 
11375             if (!is_error(ret)) {
11376                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11377                     return -TARGET_EFAULT;
11378                 }
11379                 memset(target_stx, 0, sizeof(*target_stx));
11380                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11381                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11382                 __put_user(st.st_ino, &target_stx->stx_ino);
11383                 __put_user(st.st_mode, &target_stx->stx_mode);
11384                 __put_user(st.st_uid, &target_stx->stx_uid);
11385                 __put_user(st.st_gid, &target_stx->stx_gid);
11386                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11387                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11388                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11389                 __put_user(st.st_size, &target_stx->stx_size);
11390                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11391                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11392                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11393                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11394                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11395                 unlock_user_struct(target_stx, arg5, 1);
11396             }
11397         }
11398         return ret;
11399 #endif
11400 #ifdef TARGET_NR_lchown
11401     case TARGET_NR_lchown:
11402         if (!(p = lock_user_string(arg1)))
11403             return -TARGET_EFAULT;
11404         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11405         unlock_user(p, arg1, 0);
11406         return ret;
11407 #endif
11408 #ifdef TARGET_NR_getuid
11409     case TARGET_NR_getuid:
11410         return get_errno(high2lowuid(getuid()));
11411 #endif
11412 #ifdef TARGET_NR_getgid
11413     case TARGET_NR_getgid:
11414         return get_errno(high2lowgid(getgid()));
11415 #endif
11416 #ifdef TARGET_NR_geteuid
11417     case TARGET_NR_geteuid:
11418         return get_errno(high2lowuid(geteuid()));
11419 #endif
11420 #ifdef TARGET_NR_getegid
11421     case TARGET_NR_getegid:
11422         return get_errno(high2lowgid(getegid()));
11423 #endif
11424     case TARGET_NR_setreuid:
11425         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11426     case TARGET_NR_setregid:
11427         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11428     case TARGET_NR_getgroups:
11429         {
11430             int gidsetsize = arg1;
11431             target_id *target_grouplist;
11432             gid_t *grouplist;
11433             int i;
11434 
11435             grouplist = alloca(gidsetsize * sizeof(gid_t));
11436             ret = get_errno(getgroups(gidsetsize, grouplist));
11437             if (gidsetsize == 0)
11438                 return ret;
11439             if (!is_error(ret)) {
11440                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11441                 if (!target_grouplist)
11442                     return -TARGET_EFAULT;
11443                 for(i = 0;i < ret; i++)
11444                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11445                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11446             }
11447         }
11448         return ret;
11449     case TARGET_NR_setgroups:
11450         {
11451             int gidsetsize = arg1;
11452             target_id *target_grouplist;
11453             gid_t *grouplist = NULL;
11454             int i;
11455             if (gidsetsize) {
11456                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11457                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11458                 if (!target_grouplist) {
11459                     return -TARGET_EFAULT;
11460                 }
11461                 for (i = 0; i < gidsetsize; i++) {
11462                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11463                 }
11464                 unlock_user(target_grouplist, arg2, 0);
11465             }
11466             return get_errno(setgroups(gidsetsize, grouplist));
11467         }
11468     case TARGET_NR_fchown:
11469         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11470 #if defined(TARGET_NR_fchownat)
11471     case TARGET_NR_fchownat:
11472         if (!(p = lock_user_string(arg2)))
11473             return -TARGET_EFAULT;
11474         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11475                                  low2highgid(arg4), arg5));
11476         unlock_user(p, arg2, 0);
11477         return ret;
11478 #endif
11479 #ifdef TARGET_NR_setresuid
11480     case TARGET_NR_setresuid:
11481         return get_errno(sys_setresuid(low2highuid(arg1),
11482                                        low2highuid(arg2),
11483                                        low2highuid(arg3)));
11484 #endif
11485 #ifdef TARGET_NR_getresuid
11486     case TARGET_NR_getresuid:
11487         {
11488             uid_t ruid, euid, suid;
11489             ret = get_errno(getresuid(&ruid, &euid, &suid));
11490             if (!is_error(ret)) {
11491                 if (put_user_id(high2lowuid(ruid), arg1)
11492                     || put_user_id(high2lowuid(euid), arg2)
11493                     || put_user_id(high2lowuid(suid), arg3))
11494                     return -TARGET_EFAULT;
11495             }
11496         }
11497         return ret;
11498 #endif
11499 #ifdef TARGET_NR_getresgid
11500     case TARGET_NR_setresgid:
11501         return get_errno(sys_setresgid(low2highgid(arg1),
11502                                        low2highgid(arg2),
11503                                        low2highgid(arg3)));
11504 #endif
11505 #ifdef TARGET_NR_getresgid
11506     case TARGET_NR_getresgid:
11507         {
11508             gid_t rgid, egid, sgid;
11509             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11510             if (!is_error(ret)) {
11511                 if (put_user_id(high2lowgid(rgid), arg1)
11512                     || put_user_id(high2lowgid(egid), arg2)
11513                     || put_user_id(high2lowgid(sgid), arg3))
11514                     return -TARGET_EFAULT;
11515             }
11516         }
11517         return ret;
11518 #endif
11519 #ifdef TARGET_NR_chown
11520     case TARGET_NR_chown:
11521         if (!(p = lock_user_string(arg1)))
11522             return -TARGET_EFAULT;
11523         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11524         unlock_user(p, arg1, 0);
11525         return ret;
11526 #endif
11527     case TARGET_NR_setuid:
11528         return get_errno(sys_setuid(low2highuid(arg1)));
11529     case TARGET_NR_setgid:
11530         return get_errno(sys_setgid(low2highgid(arg1)));
11531     case TARGET_NR_setfsuid:
11532         return get_errno(setfsuid(arg1));
11533     case TARGET_NR_setfsgid:
11534         return get_errno(setfsgid(arg1));
11535 
11536 #ifdef TARGET_NR_lchown32
11537     case TARGET_NR_lchown32:
11538         if (!(p = lock_user_string(arg1)))
11539             return -TARGET_EFAULT;
11540         ret = get_errno(lchown(p, arg2, arg3));
11541         unlock_user(p, arg1, 0);
11542         return ret;
11543 #endif
11544 #ifdef TARGET_NR_getuid32
11545     case TARGET_NR_getuid32:
11546         return get_errno(getuid());
11547 #endif
11548 
11549 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11550    /* Alpha specific */
11551     case TARGET_NR_getxuid:
11552          {
11553             uid_t euid;
11554             euid=geteuid();
11555             cpu_env->ir[IR_A4]=euid;
11556          }
11557         return get_errno(getuid());
11558 #endif
11559 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11560    /* Alpha specific */
11561     case TARGET_NR_getxgid:
11562          {
11563             uid_t egid;
11564             egid=getegid();
11565             cpu_env->ir[IR_A4]=egid;
11566          }
11567         return get_errno(getgid());
11568 #endif
11569 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11570     /* Alpha specific */
11571     case TARGET_NR_osf_getsysinfo:
11572         ret = -TARGET_EOPNOTSUPP;
11573         switch (arg1) {
11574           case TARGET_GSI_IEEE_FP_CONTROL:
11575             {
11576                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11577                 uint64_t swcr = cpu_env->swcr;
11578 
11579                 swcr &= ~SWCR_STATUS_MASK;
11580                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11581 
11582                 if (put_user_u64 (swcr, arg2))
11583                         return -TARGET_EFAULT;
11584                 ret = 0;
11585             }
11586             break;
11587 
11588           /* case GSI_IEEE_STATE_AT_SIGNAL:
11589              -- Not implemented in linux kernel.
11590              case GSI_UACPROC:
11591              -- Retrieves current unaligned access state; not much used.
11592              case GSI_PROC_TYPE:
11593              -- Retrieves implver information; surely not used.
11594              case GSI_GET_HWRPB:
11595              -- Grabs a copy of the HWRPB; surely not used.
11596           */
11597         }
11598         return ret;
11599 #endif
11600 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11601     /* Alpha specific */
11602     case TARGET_NR_osf_setsysinfo:
11603         ret = -TARGET_EOPNOTSUPP;
11604         switch (arg1) {
11605           case TARGET_SSI_IEEE_FP_CONTROL:
11606             {
11607                 uint64_t swcr, fpcr;
11608 
11609                 if (get_user_u64 (swcr, arg2)) {
11610                     return -TARGET_EFAULT;
11611                 }
11612 
11613                 /*
11614                  * The kernel calls swcr_update_status to update the
11615                  * status bits from the fpcr at every point that it
11616                  * could be queried.  Therefore, we store the status
11617                  * bits only in FPCR.
11618                  */
11619                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11620 
11621                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11622                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11623                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11624                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11625                 ret = 0;
11626             }
11627             break;
11628 
11629           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11630             {
11631                 uint64_t exc, fpcr, fex;
11632 
11633                 if (get_user_u64(exc, arg2)) {
11634                     return -TARGET_EFAULT;
11635                 }
11636                 exc &= SWCR_STATUS_MASK;
11637                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11638 
11639                 /* Old exceptions are not signaled.  */
11640                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11641                 fex = exc & ~fex;
11642                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11643                 fex &= (cpu_env)->swcr;
11644 
11645                 /* Update the hardware fpcr.  */
11646                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11647                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11648 
11649                 if (fex) {
11650                     int si_code = TARGET_FPE_FLTUNK;
11651                     target_siginfo_t info;
11652 
11653                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11654                         si_code = TARGET_FPE_FLTUND;
11655                     }
11656                     if (fex & SWCR_TRAP_ENABLE_INE) {
11657                         si_code = TARGET_FPE_FLTRES;
11658                     }
11659                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11660                         si_code = TARGET_FPE_FLTUND;
11661                     }
11662                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11663                         si_code = TARGET_FPE_FLTOVF;
11664                     }
11665                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11666                         si_code = TARGET_FPE_FLTDIV;
11667                     }
11668                     if (fex & SWCR_TRAP_ENABLE_INV) {
11669                         si_code = TARGET_FPE_FLTINV;
11670                     }
11671 
11672                     info.si_signo = SIGFPE;
11673                     info.si_errno = 0;
11674                     info.si_code = si_code;
11675                     info._sifields._sigfault._addr = (cpu_env)->pc;
11676                     queue_signal(cpu_env, info.si_signo,
11677                                  QEMU_SI_FAULT, &info);
11678                 }
11679                 ret = 0;
11680             }
11681             break;
11682 
11683           /* case SSI_NVPAIRS:
11684              -- Used with SSIN_UACPROC to enable unaligned accesses.
11685              case SSI_IEEE_STATE_AT_SIGNAL:
11686              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11687              -- Not implemented in linux kernel
11688           */
11689         }
11690         return ret;
11691 #endif
11692 #ifdef TARGET_NR_osf_sigprocmask
11693     /* Alpha specific.  */
11694     case TARGET_NR_osf_sigprocmask:
11695         {
11696             abi_ulong mask;
11697             int how;
11698             sigset_t set, oldset;
11699 
11700             switch(arg1) {
11701             case TARGET_SIG_BLOCK:
11702                 how = SIG_BLOCK;
11703                 break;
11704             case TARGET_SIG_UNBLOCK:
11705                 how = SIG_UNBLOCK;
11706                 break;
11707             case TARGET_SIG_SETMASK:
11708                 how = SIG_SETMASK;
11709                 break;
11710             default:
11711                 return -TARGET_EINVAL;
11712             }
11713             mask = arg2;
11714             target_to_host_old_sigset(&set, &mask);
11715             ret = do_sigprocmask(how, &set, &oldset);
11716             if (!ret) {
11717                 host_to_target_old_sigset(&mask, &oldset);
11718                 ret = mask;
11719             }
11720         }
11721         return ret;
11722 #endif
11723 
11724 #ifdef TARGET_NR_getgid32
11725     case TARGET_NR_getgid32:
11726         return get_errno(getgid());
11727 #endif
11728 #ifdef TARGET_NR_geteuid32
11729     case TARGET_NR_geteuid32:
11730         return get_errno(geteuid());
11731 #endif
11732 #ifdef TARGET_NR_getegid32
11733     case TARGET_NR_getegid32:
11734         return get_errno(getegid());
11735 #endif
11736 #ifdef TARGET_NR_setreuid32
11737     case TARGET_NR_setreuid32:
11738         return get_errno(setreuid(arg1, arg2));
11739 #endif
11740 #ifdef TARGET_NR_setregid32
11741     case TARGET_NR_setregid32:
11742         return get_errno(setregid(arg1, arg2));
11743 #endif
11744 #ifdef TARGET_NR_getgroups32
11745     case TARGET_NR_getgroups32:
11746         {
11747             int gidsetsize = arg1;
11748             uint32_t *target_grouplist;
11749             gid_t *grouplist;
11750             int i;
11751 
11752             grouplist = alloca(gidsetsize * sizeof(gid_t));
11753             ret = get_errno(getgroups(gidsetsize, grouplist));
11754             if (gidsetsize == 0)
11755                 return ret;
11756             if (!is_error(ret)) {
11757                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11758                 if (!target_grouplist) {
11759                     return -TARGET_EFAULT;
11760                 }
11761                 for(i = 0;i < ret; i++)
11762                     target_grouplist[i] = tswap32(grouplist[i]);
11763                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11764             }
11765         }
11766         return ret;
11767 #endif
11768 #ifdef TARGET_NR_setgroups32
11769     case TARGET_NR_setgroups32:
11770         {
11771             int gidsetsize = arg1;
11772             uint32_t *target_grouplist;
11773             gid_t *grouplist;
11774             int i;
11775 
11776             grouplist = alloca(gidsetsize * sizeof(gid_t));
11777             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11778             if (!target_grouplist) {
11779                 return -TARGET_EFAULT;
11780             }
11781             for(i = 0;i < gidsetsize; i++)
11782                 grouplist[i] = tswap32(target_grouplist[i]);
11783             unlock_user(target_grouplist, arg2, 0);
11784             return get_errno(setgroups(gidsetsize, grouplist));
11785         }
11786 #endif
11787 #ifdef TARGET_NR_fchown32
11788     case TARGET_NR_fchown32:
11789         return get_errno(fchown(arg1, arg2, arg3));
11790 #endif
11791 #ifdef TARGET_NR_setresuid32
11792     case TARGET_NR_setresuid32:
11793         return get_errno(sys_setresuid(arg1, arg2, arg3));
11794 #endif
11795 #ifdef TARGET_NR_getresuid32
11796     case TARGET_NR_getresuid32:
11797         {
11798             uid_t ruid, euid, suid;
11799             ret = get_errno(getresuid(&ruid, &euid, &suid));
11800             if (!is_error(ret)) {
11801                 if (put_user_u32(ruid, arg1)
11802                     || put_user_u32(euid, arg2)
11803                     || put_user_u32(suid, arg3))
11804                     return -TARGET_EFAULT;
11805             }
11806         }
11807         return ret;
11808 #endif
11809 #ifdef TARGET_NR_setresgid32
11810     case TARGET_NR_setresgid32:
11811         return get_errno(sys_setresgid(arg1, arg2, arg3));
11812 #endif
11813 #ifdef TARGET_NR_getresgid32
11814     case TARGET_NR_getresgid32:
11815         {
11816             gid_t rgid, egid, sgid;
11817             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11818             if (!is_error(ret)) {
11819                 if (put_user_u32(rgid, arg1)
11820                     || put_user_u32(egid, arg2)
11821                     || put_user_u32(sgid, arg3))
11822                     return -TARGET_EFAULT;
11823             }
11824         }
11825         return ret;
11826 #endif
11827 #ifdef TARGET_NR_chown32
11828     case TARGET_NR_chown32:
11829         if (!(p = lock_user_string(arg1)))
11830             return -TARGET_EFAULT;
11831         ret = get_errno(chown(p, arg2, arg3));
11832         unlock_user(p, arg1, 0);
11833         return ret;
11834 #endif
11835 #ifdef TARGET_NR_setuid32
11836     case TARGET_NR_setuid32:
11837         return get_errno(sys_setuid(arg1));
11838 #endif
11839 #ifdef TARGET_NR_setgid32
11840     case TARGET_NR_setgid32:
11841         return get_errno(sys_setgid(arg1));
11842 #endif
11843 #ifdef TARGET_NR_setfsuid32
11844     case TARGET_NR_setfsuid32:
11845         return get_errno(setfsuid(arg1));
11846 #endif
11847 #ifdef TARGET_NR_setfsgid32
11848     case TARGET_NR_setfsgid32:
11849         return get_errno(setfsgid(arg1));
11850 #endif
11851 #ifdef TARGET_NR_mincore
11852     case TARGET_NR_mincore:
11853         {
11854             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11855             if (!a) {
11856                 return -TARGET_ENOMEM;
11857             }
11858             p = lock_user_string(arg3);
11859             if (!p) {
11860                 ret = -TARGET_EFAULT;
11861             } else {
11862                 ret = get_errno(mincore(a, arg2, p));
11863                 unlock_user(p, arg3, ret);
11864             }
11865             unlock_user(a, arg1, 0);
11866         }
11867         return ret;
11868 #endif
11869 #ifdef TARGET_NR_arm_fadvise64_64
11870     case TARGET_NR_arm_fadvise64_64:
11871         /* arm_fadvise64_64 looks like fadvise64_64 but
11872          * with different argument order: fd, advice, offset, len
11873          * rather than the usual fd, offset, len, advice.
11874          * Note that offset and len are both 64-bit so appear as
11875          * pairs of 32-bit registers.
11876          */
11877         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11878                             target_offset64(arg5, arg6), arg2);
11879         return -host_to_target_errno(ret);
11880 #endif
11881 
11882 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11883 
11884 #ifdef TARGET_NR_fadvise64_64
11885     case TARGET_NR_fadvise64_64:
11886 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11887         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11888         ret = arg2;
11889         arg2 = arg3;
11890         arg3 = arg4;
11891         arg4 = arg5;
11892         arg5 = arg6;
11893         arg6 = ret;
11894 #else
11895         /* 6 args: fd, offset (high, low), len (high, low), advice */
11896         if (regpairs_aligned(cpu_env, num)) {
11897             /* offset is in (3,4), len in (5,6) and advice in 7 */
11898             arg2 = arg3;
11899             arg3 = arg4;
11900             arg4 = arg5;
11901             arg5 = arg6;
11902             arg6 = arg7;
11903         }
11904 #endif
11905         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11906                             target_offset64(arg4, arg5), arg6);
11907         return -host_to_target_errno(ret);
11908 #endif
11909 
11910 #ifdef TARGET_NR_fadvise64
11911     case TARGET_NR_fadvise64:
11912         /* 5 args: fd, offset (high, low), len, advice */
11913         if (regpairs_aligned(cpu_env, num)) {
11914             /* offset is in (3,4), len in 5 and advice in 6 */
11915             arg2 = arg3;
11916             arg3 = arg4;
11917             arg4 = arg5;
11918             arg5 = arg6;
11919         }
11920         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11921         return -host_to_target_errno(ret);
11922 #endif
11923 
11924 #else /* not a 32-bit ABI */
11925 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11926 #ifdef TARGET_NR_fadvise64_64
11927     case TARGET_NR_fadvise64_64:
11928 #endif
11929 #ifdef TARGET_NR_fadvise64
11930     case TARGET_NR_fadvise64:
11931 #endif
11932 #ifdef TARGET_S390X
11933         switch (arg4) {
11934         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11935         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11936         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11937         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11938         default: break;
11939         }
11940 #endif
11941         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11942 #endif
11943 #endif /* end of 64-bit ABI fadvise handling */
11944 
11945 #ifdef TARGET_NR_madvise
11946     case TARGET_NR_madvise:
11947         return target_madvise(arg1, arg2, arg3);
11948 #endif
11949 #ifdef TARGET_NR_fcntl64
11950     case TARGET_NR_fcntl64:
11951     {
11952         int cmd;
11953         struct flock64 fl;
11954         from_flock64_fn *copyfrom = copy_from_user_flock64;
11955         to_flock64_fn *copyto = copy_to_user_flock64;
11956 
11957 #ifdef TARGET_ARM
11958         if (!cpu_env->eabi) {
11959             copyfrom = copy_from_user_oabi_flock64;
11960             copyto = copy_to_user_oabi_flock64;
11961         }
11962 #endif
11963 
11964         cmd = target_to_host_fcntl_cmd(arg2);
11965         if (cmd == -TARGET_EINVAL) {
11966             return cmd;
11967         }
11968 
11969         switch(arg2) {
11970         case TARGET_F_GETLK64:
11971             ret = copyfrom(&fl, arg3);
11972             if (ret) {
11973                 break;
11974             }
11975             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11976             if (ret == 0) {
11977                 ret = copyto(arg3, &fl);
11978             }
11979 	    break;
11980 
11981         case TARGET_F_SETLK64:
11982         case TARGET_F_SETLKW64:
11983             ret = copyfrom(&fl, arg3);
11984             if (ret) {
11985                 break;
11986             }
11987             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11988 	    break;
11989         default:
11990             ret = do_fcntl(arg1, arg2, arg3);
11991             break;
11992         }
11993         return ret;
11994     }
11995 #endif
11996 #ifdef TARGET_NR_cacheflush
11997     case TARGET_NR_cacheflush:
11998         /* self-modifying code is handled automatically, so nothing needed */
11999         return 0;
12000 #endif
12001 #ifdef TARGET_NR_getpagesize
12002     case TARGET_NR_getpagesize:
12003         return TARGET_PAGE_SIZE;
12004 #endif
12005     case TARGET_NR_gettid:
12006         return get_errno(sys_gettid());
12007 #ifdef TARGET_NR_readahead
12008     case TARGET_NR_readahead:
12009 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12010         if (regpairs_aligned(cpu_env, num)) {
12011             arg2 = arg3;
12012             arg3 = arg4;
12013             arg4 = arg5;
12014         }
12015         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12016 #else
12017         ret = get_errno(readahead(arg1, arg2, arg3));
12018 #endif
12019         return ret;
12020 #endif
12021 #ifdef CONFIG_ATTR
12022 #ifdef TARGET_NR_setxattr
12023     case TARGET_NR_listxattr:
12024     case TARGET_NR_llistxattr:
12025     {
12026         void *p, *b = 0;
12027         if (arg2) {
12028             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12029             if (!b) {
12030                 return -TARGET_EFAULT;
12031             }
12032         }
12033         p = lock_user_string(arg1);
12034         if (p) {
12035             if (num == TARGET_NR_listxattr) {
12036                 ret = get_errno(listxattr(p, b, arg3));
12037             } else {
12038                 ret = get_errno(llistxattr(p, b, arg3));
12039             }
12040         } else {
12041             ret = -TARGET_EFAULT;
12042         }
12043         unlock_user(p, arg1, 0);
12044         unlock_user(b, arg2, arg3);
12045         return ret;
12046     }
12047     case TARGET_NR_flistxattr:
12048     {
12049         void *b = 0;
12050         if (arg2) {
12051             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12052             if (!b) {
12053                 return -TARGET_EFAULT;
12054             }
12055         }
12056         ret = get_errno(flistxattr(arg1, b, arg3));
12057         unlock_user(b, arg2, arg3);
12058         return ret;
12059     }
12060     case TARGET_NR_setxattr:
12061     case TARGET_NR_lsetxattr:
12062         {
12063             void *p, *n, *v = 0;
12064             if (arg3) {
12065                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12066                 if (!v) {
12067                     return -TARGET_EFAULT;
12068                 }
12069             }
12070             p = lock_user_string(arg1);
12071             n = lock_user_string(arg2);
12072             if (p && n) {
12073                 if (num == TARGET_NR_setxattr) {
12074                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12075                 } else {
12076                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12077                 }
12078             } else {
12079                 ret = -TARGET_EFAULT;
12080             }
12081             unlock_user(p, arg1, 0);
12082             unlock_user(n, arg2, 0);
12083             unlock_user(v, arg3, 0);
12084         }
12085         return ret;
12086     case TARGET_NR_fsetxattr:
12087         {
12088             void *n, *v = 0;
12089             if (arg3) {
12090                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12091                 if (!v) {
12092                     return -TARGET_EFAULT;
12093                 }
12094             }
12095             n = lock_user_string(arg2);
12096             if (n) {
12097                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12098             } else {
12099                 ret = -TARGET_EFAULT;
12100             }
12101             unlock_user(n, arg2, 0);
12102             unlock_user(v, arg3, 0);
12103         }
12104         return ret;
12105     case TARGET_NR_getxattr:
12106     case TARGET_NR_lgetxattr:
12107         {
12108             void *p, *n, *v = 0;
12109             if (arg3) {
12110                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12111                 if (!v) {
12112                     return -TARGET_EFAULT;
12113                 }
12114             }
12115             p = lock_user_string(arg1);
12116             n = lock_user_string(arg2);
12117             if (p && n) {
12118                 if (num == TARGET_NR_getxattr) {
12119                     ret = get_errno(getxattr(p, n, v, arg4));
12120                 } else {
12121                     ret = get_errno(lgetxattr(p, n, v, arg4));
12122                 }
12123             } else {
12124                 ret = -TARGET_EFAULT;
12125             }
12126             unlock_user(p, arg1, 0);
12127             unlock_user(n, arg2, 0);
12128             unlock_user(v, arg3, arg4);
12129         }
12130         return ret;
12131     case TARGET_NR_fgetxattr:
12132         {
12133             void *n, *v = 0;
12134             if (arg3) {
12135                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12136                 if (!v) {
12137                     return -TARGET_EFAULT;
12138                 }
12139             }
12140             n = lock_user_string(arg2);
12141             if (n) {
12142                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12143             } else {
12144                 ret = -TARGET_EFAULT;
12145             }
12146             unlock_user(n, arg2, 0);
12147             unlock_user(v, arg3, arg4);
12148         }
12149         return ret;
12150     case TARGET_NR_removexattr:
12151     case TARGET_NR_lremovexattr:
12152         {
12153             void *p, *n;
12154             p = lock_user_string(arg1);
12155             n = lock_user_string(arg2);
12156             if (p && n) {
12157                 if (num == TARGET_NR_removexattr) {
12158                     ret = get_errno(removexattr(p, n));
12159                 } else {
12160                     ret = get_errno(lremovexattr(p, n));
12161                 }
12162             } else {
12163                 ret = -TARGET_EFAULT;
12164             }
12165             unlock_user(p, arg1, 0);
12166             unlock_user(n, arg2, 0);
12167         }
12168         return ret;
12169     case TARGET_NR_fremovexattr:
12170         {
12171             void *n;
12172             n = lock_user_string(arg2);
12173             if (n) {
12174                 ret = get_errno(fremovexattr(arg1, n));
12175             } else {
12176                 ret = -TARGET_EFAULT;
12177             }
12178             unlock_user(n, arg2, 0);
12179         }
12180         return ret;
12181 #endif
12182 #endif /* CONFIG_ATTR */
12183 #ifdef TARGET_NR_set_thread_area
12184     case TARGET_NR_set_thread_area:
12185 #if defined(TARGET_MIPS)
12186       cpu_env->active_tc.CP0_UserLocal = arg1;
12187       return 0;
12188 #elif defined(TARGET_CRIS)
12189       if (arg1 & 0xff)
12190           ret = -TARGET_EINVAL;
12191       else {
12192           cpu_env->pregs[PR_PID] = arg1;
12193           ret = 0;
12194       }
12195       return ret;
12196 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12197       return do_set_thread_area(cpu_env, arg1);
12198 #elif defined(TARGET_M68K)
12199       {
12200           TaskState *ts = cpu->opaque;
12201           ts->tp_value = arg1;
12202           return 0;
12203       }
12204 #else
12205       return -TARGET_ENOSYS;
12206 #endif
12207 #endif
12208 #ifdef TARGET_NR_get_thread_area
12209     case TARGET_NR_get_thread_area:
12210 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12211         return do_get_thread_area(cpu_env, arg1);
12212 #elif defined(TARGET_M68K)
12213         {
12214             TaskState *ts = cpu->opaque;
12215             return ts->tp_value;
12216         }
12217 #else
12218         return -TARGET_ENOSYS;
12219 #endif
12220 #endif
12221 #ifdef TARGET_NR_getdomainname
12222     case TARGET_NR_getdomainname:
12223         return -TARGET_ENOSYS;
12224 #endif
12225 
12226 #ifdef TARGET_NR_clock_settime
12227     case TARGET_NR_clock_settime:
12228     {
12229         struct timespec ts;
12230 
12231         ret = target_to_host_timespec(&ts, arg2);
12232         if (!is_error(ret)) {
12233             ret = get_errno(clock_settime(arg1, &ts));
12234         }
12235         return ret;
12236     }
12237 #endif
12238 #ifdef TARGET_NR_clock_settime64
12239     case TARGET_NR_clock_settime64:
12240     {
12241         struct timespec ts;
12242 
12243         ret = target_to_host_timespec64(&ts, arg2);
12244         if (!is_error(ret)) {
12245             ret = get_errno(clock_settime(arg1, &ts));
12246         }
12247         return ret;
12248     }
12249 #endif
12250 #ifdef TARGET_NR_clock_gettime
12251     case TARGET_NR_clock_gettime:
12252     {
12253         struct timespec ts;
12254         ret = get_errno(clock_gettime(arg1, &ts));
12255         if (!is_error(ret)) {
12256             ret = host_to_target_timespec(arg2, &ts);
12257         }
12258         return ret;
12259     }
12260 #endif
12261 #ifdef TARGET_NR_clock_gettime64
12262     case TARGET_NR_clock_gettime64:
12263     {
12264         struct timespec ts;
12265         ret = get_errno(clock_gettime(arg1, &ts));
12266         if (!is_error(ret)) {
12267             ret = host_to_target_timespec64(arg2, &ts);
12268         }
12269         return ret;
12270     }
12271 #endif
12272 #ifdef TARGET_NR_clock_getres
12273     case TARGET_NR_clock_getres:
12274     {
12275         struct timespec ts;
12276         ret = get_errno(clock_getres(arg1, &ts));
12277         if (!is_error(ret)) {
12278             host_to_target_timespec(arg2, &ts);
12279         }
12280         return ret;
12281     }
12282 #endif
12283 #ifdef TARGET_NR_clock_getres_time64
12284     case TARGET_NR_clock_getres_time64:
12285     {
12286         struct timespec ts;
12287         ret = get_errno(clock_getres(arg1, &ts));
12288         if (!is_error(ret)) {
12289             host_to_target_timespec64(arg2, &ts);
12290         }
12291         return ret;
12292     }
12293 #endif
12294 #ifdef TARGET_NR_clock_nanosleep
12295     case TARGET_NR_clock_nanosleep:
12296     {
12297         struct timespec ts;
12298         if (target_to_host_timespec(&ts, arg3)) {
12299             return -TARGET_EFAULT;
12300         }
12301         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12302                                              &ts, arg4 ? &ts : NULL));
12303         /*
12304          * if the call is interrupted by a signal handler, it fails
12305          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12306          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12307          */
12308         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12309             host_to_target_timespec(arg4, &ts)) {
12310               return -TARGET_EFAULT;
12311         }
12312 
12313         return ret;
12314     }
12315 #endif
12316 #ifdef TARGET_NR_clock_nanosleep_time64
12317     case TARGET_NR_clock_nanosleep_time64:
12318     {
12319         struct timespec ts;
12320 
12321         if (target_to_host_timespec64(&ts, arg3)) {
12322             return -TARGET_EFAULT;
12323         }
12324 
12325         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12326                                              &ts, arg4 ? &ts : NULL));
12327 
12328         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12329             host_to_target_timespec64(arg4, &ts)) {
12330             return -TARGET_EFAULT;
12331         }
12332         return ret;
12333     }
12334 #endif
12335 
12336 #if defined(TARGET_NR_set_tid_address)
12337     case TARGET_NR_set_tid_address:
12338     {
12339         TaskState *ts = cpu->opaque;
12340         ts->child_tidptr = arg1;
12341         /* do not call host set_tid_address() syscall, instead return tid() */
12342         return get_errno(sys_gettid());
12343     }
12344 #endif
12345 
12346     case TARGET_NR_tkill:
12347         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12348 
12349     case TARGET_NR_tgkill:
12350         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12351                          target_to_host_signal(arg3)));
12352 
12353 #ifdef TARGET_NR_set_robust_list
12354     case TARGET_NR_set_robust_list:
12355     case TARGET_NR_get_robust_list:
12356         /* The ABI for supporting robust futexes has userspace pass
12357          * the kernel a pointer to a linked list which is updated by
12358          * userspace after the syscall; the list is walked by the kernel
12359          * when the thread exits. Since the linked list in QEMU guest
12360          * memory isn't a valid linked list for the host and we have
12361          * no way to reliably intercept the thread-death event, we can't
12362          * support these. Silently return ENOSYS so that guest userspace
12363          * falls back to a non-robust futex implementation (which should
12364          * be OK except in the corner case of the guest crashing while
12365          * holding a mutex that is shared with another process via
12366          * shared memory).
12367          */
12368         return -TARGET_ENOSYS;
12369 #endif
12370 
12371 #if defined(TARGET_NR_utimensat)
12372     case TARGET_NR_utimensat:
12373         {
12374             struct timespec *tsp, ts[2];
12375             if (!arg3) {
12376                 tsp = NULL;
12377             } else {
12378                 if (target_to_host_timespec(ts, arg3)) {
12379                     return -TARGET_EFAULT;
12380                 }
12381                 if (target_to_host_timespec(ts + 1, arg3 +
12382                                             sizeof(struct target_timespec))) {
12383                     return -TARGET_EFAULT;
12384                 }
12385                 tsp = ts;
12386             }
12387             if (!arg2)
12388                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12389             else {
12390                 if (!(p = lock_user_string(arg2))) {
12391                     return -TARGET_EFAULT;
12392                 }
12393                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12394                 unlock_user(p, arg2, 0);
12395             }
12396         }
12397         return ret;
12398 #endif
12399 #ifdef TARGET_NR_utimensat_time64
12400     case TARGET_NR_utimensat_time64:
12401         {
12402             struct timespec *tsp, ts[2];
12403             if (!arg3) {
12404                 tsp = NULL;
12405             } else {
12406                 if (target_to_host_timespec64(ts, arg3)) {
12407                     return -TARGET_EFAULT;
12408                 }
12409                 if (target_to_host_timespec64(ts + 1, arg3 +
12410                                      sizeof(struct target__kernel_timespec))) {
12411                     return -TARGET_EFAULT;
12412                 }
12413                 tsp = ts;
12414             }
12415             if (!arg2)
12416                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12417             else {
12418                 p = lock_user_string(arg2);
12419                 if (!p) {
12420                     return -TARGET_EFAULT;
12421                 }
12422                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12423                 unlock_user(p, arg2, 0);
12424             }
12425         }
12426         return ret;
12427 #endif
12428 #ifdef TARGET_NR_futex
12429     case TARGET_NR_futex:
12430         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12431 #endif
12432 #ifdef TARGET_NR_futex_time64
12433     case TARGET_NR_futex_time64:
12434         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12435 #endif
12436 #ifdef CONFIG_INOTIFY
12437 #if defined(TARGET_NR_inotify_init)
12438     case TARGET_NR_inotify_init:
12439         ret = get_errno(inotify_init());
12440         if (ret >= 0) {
12441             fd_trans_register(ret, &target_inotify_trans);
12442         }
12443         return ret;
12444 #endif
12445 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12446     case TARGET_NR_inotify_init1:
12447         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12448                                           fcntl_flags_tbl)));
12449         if (ret >= 0) {
12450             fd_trans_register(ret, &target_inotify_trans);
12451         }
12452         return ret;
12453 #endif
12454 #if defined(TARGET_NR_inotify_add_watch)
12455     case TARGET_NR_inotify_add_watch:
12456         p = lock_user_string(arg2);
12457         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12458         unlock_user(p, arg2, 0);
12459         return ret;
12460 #endif
12461 #if defined(TARGET_NR_inotify_rm_watch)
12462     case TARGET_NR_inotify_rm_watch:
12463         return get_errno(inotify_rm_watch(arg1, arg2));
12464 #endif
12465 #endif
12466 
12467 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12468     case TARGET_NR_mq_open:
12469         {
12470             struct mq_attr posix_mq_attr;
12471             struct mq_attr *pposix_mq_attr;
12472             int host_flags;
12473 
12474             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12475             pposix_mq_attr = NULL;
12476             if (arg4) {
12477                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12478                     return -TARGET_EFAULT;
12479                 }
12480                 pposix_mq_attr = &posix_mq_attr;
12481             }
12482             p = lock_user_string(arg1 - 1);
12483             if (!p) {
12484                 return -TARGET_EFAULT;
12485             }
12486             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12487             unlock_user (p, arg1, 0);
12488         }
12489         return ret;
12490 
12491     case TARGET_NR_mq_unlink:
12492         p = lock_user_string(arg1 - 1);
12493         if (!p) {
12494             return -TARGET_EFAULT;
12495         }
12496         ret = get_errno(mq_unlink(p));
12497         unlock_user (p, arg1, 0);
12498         return ret;
12499 
12500 #ifdef TARGET_NR_mq_timedsend
12501     case TARGET_NR_mq_timedsend:
12502         {
12503             struct timespec ts;
12504 
12505             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12506             if (arg5 != 0) {
12507                 if (target_to_host_timespec(&ts, arg5)) {
12508                     return -TARGET_EFAULT;
12509                 }
12510                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12511                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12512                     return -TARGET_EFAULT;
12513                 }
12514             } else {
12515                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12516             }
12517             unlock_user (p, arg2, arg3);
12518         }
12519         return ret;
12520 #endif
12521 #ifdef TARGET_NR_mq_timedsend_time64
12522     case TARGET_NR_mq_timedsend_time64:
12523         {
12524             struct timespec ts;
12525 
12526             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12527             if (arg5 != 0) {
12528                 if (target_to_host_timespec64(&ts, arg5)) {
12529                     return -TARGET_EFAULT;
12530                 }
12531                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12532                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12533                     return -TARGET_EFAULT;
12534                 }
12535             } else {
12536                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12537             }
12538             unlock_user(p, arg2, arg3);
12539         }
12540         return ret;
12541 #endif
12542 
12543 #ifdef TARGET_NR_mq_timedreceive
12544     case TARGET_NR_mq_timedreceive:
12545         {
12546             struct timespec ts;
12547             unsigned int prio;
12548 
12549             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12550             if (arg5 != 0) {
12551                 if (target_to_host_timespec(&ts, arg5)) {
12552                     return -TARGET_EFAULT;
12553                 }
12554                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12555                                                      &prio, &ts));
12556                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12557                     return -TARGET_EFAULT;
12558                 }
12559             } else {
12560                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12561                                                      &prio, NULL));
12562             }
12563             unlock_user (p, arg2, arg3);
12564             if (arg4 != 0)
12565                 put_user_u32(prio, arg4);
12566         }
12567         return ret;
12568 #endif
12569 #ifdef TARGET_NR_mq_timedreceive_time64
12570     case TARGET_NR_mq_timedreceive_time64:
12571         {
12572             struct timespec ts;
12573             unsigned int prio;
12574 
12575             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12576             if (arg5 != 0) {
12577                 if (target_to_host_timespec64(&ts, arg5)) {
12578                     return -TARGET_EFAULT;
12579                 }
12580                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12581                                                      &prio, &ts));
12582                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12583                     return -TARGET_EFAULT;
12584                 }
12585             } else {
12586                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12587                                                      &prio, NULL));
12588             }
12589             unlock_user(p, arg2, arg3);
12590             if (arg4 != 0) {
12591                 put_user_u32(prio, arg4);
12592             }
12593         }
12594         return ret;
12595 #endif
12596 
12597     /* Not implemented for now... */
12598 /*     case TARGET_NR_mq_notify: */
12599 /*         break; */
12600 
12601     case TARGET_NR_mq_getsetattr:
12602         {
12603             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12604             ret = 0;
12605             if (arg2 != 0) {
12606                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12607                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12608                                            &posix_mq_attr_out));
12609             } else if (arg3 != 0) {
12610                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12611             }
12612             if (ret == 0 && arg3 != 0) {
12613                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12614             }
12615         }
12616         return ret;
12617 #endif
12618 
12619 #ifdef CONFIG_SPLICE
12620 #ifdef TARGET_NR_tee
12621     case TARGET_NR_tee:
12622         {
12623             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12624         }
12625         return ret;
12626 #endif
12627 #ifdef TARGET_NR_splice
12628     case TARGET_NR_splice:
12629         {
12630             loff_t loff_in, loff_out;
12631             loff_t *ploff_in = NULL, *ploff_out = NULL;
12632             if (arg2) {
12633                 if (get_user_u64(loff_in, arg2)) {
12634                     return -TARGET_EFAULT;
12635                 }
12636                 ploff_in = &loff_in;
12637             }
12638             if (arg4) {
12639                 if (get_user_u64(loff_out, arg4)) {
12640                     return -TARGET_EFAULT;
12641                 }
12642                 ploff_out = &loff_out;
12643             }
12644             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12645             if (arg2) {
12646                 if (put_user_u64(loff_in, arg2)) {
12647                     return -TARGET_EFAULT;
12648                 }
12649             }
12650             if (arg4) {
12651                 if (put_user_u64(loff_out, arg4)) {
12652                     return -TARGET_EFAULT;
12653                 }
12654             }
12655         }
12656         return ret;
12657 #endif
12658 #ifdef TARGET_NR_vmsplice
12659 	case TARGET_NR_vmsplice:
12660         {
12661             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12662             if (vec != NULL) {
12663                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12664                 unlock_iovec(vec, arg2, arg3, 0);
12665             } else {
12666                 ret = -host_to_target_errno(errno);
12667             }
12668         }
12669         return ret;
12670 #endif
12671 #endif /* CONFIG_SPLICE */
12672 #ifdef CONFIG_EVENTFD
12673 #if defined(TARGET_NR_eventfd)
12674     case TARGET_NR_eventfd:
12675         ret = get_errno(eventfd(arg1, 0));
12676         if (ret >= 0) {
12677             fd_trans_register(ret, &target_eventfd_trans);
12678         }
12679         return ret;
12680 #endif
12681 #if defined(TARGET_NR_eventfd2)
12682     case TARGET_NR_eventfd2:
12683     {
12684         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12685         if (arg2 & TARGET_O_NONBLOCK) {
12686             host_flags |= O_NONBLOCK;
12687         }
12688         if (arg2 & TARGET_O_CLOEXEC) {
12689             host_flags |= O_CLOEXEC;
12690         }
12691         ret = get_errno(eventfd(arg1, host_flags));
12692         if (ret >= 0) {
12693             fd_trans_register(ret, &target_eventfd_trans);
12694         }
12695         return ret;
12696     }
12697 #endif
12698 #endif /* CONFIG_EVENTFD  */
12699 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12700     case TARGET_NR_fallocate:
12701 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12702         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12703                                   target_offset64(arg5, arg6)));
12704 #else
12705         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12706 #endif
12707         return ret;
12708 #endif
12709 #if defined(CONFIG_SYNC_FILE_RANGE)
12710 #if defined(TARGET_NR_sync_file_range)
12711     case TARGET_NR_sync_file_range:
12712 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12713 #if defined(TARGET_MIPS)
12714         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12715                                         target_offset64(arg5, arg6), arg7));
12716 #else
12717         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12718                                         target_offset64(arg4, arg5), arg6));
12719 #endif /* !TARGET_MIPS */
12720 #else
12721         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12722 #endif
12723         return ret;
12724 #endif
12725 #if defined(TARGET_NR_sync_file_range2) || \
12726     defined(TARGET_NR_arm_sync_file_range)
12727 #if defined(TARGET_NR_sync_file_range2)
12728     case TARGET_NR_sync_file_range2:
12729 #endif
12730 #if defined(TARGET_NR_arm_sync_file_range)
12731     case TARGET_NR_arm_sync_file_range:
12732 #endif
12733         /* This is like sync_file_range but the arguments are reordered */
12734 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12735         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12736                                         target_offset64(arg5, arg6), arg2));
12737 #else
12738         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12739 #endif
12740         return ret;
12741 #endif
12742 #endif
12743 #if defined(TARGET_NR_signalfd4)
12744     case TARGET_NR_signalfd4:
12745         return do_signalfd4(arg1, arg2, arg4);
12746 #endif
12747 #if defined(TARGET_NR_signalfd)
12748     case TARGET_NR_signalfd:
12749         return do_signalfd4(arg1, arg2, 0);
12750 #endif
12751 #if defined(CONFIG_EPOLL)
12752 #if defined(TARGET_NR_epoll_create)
12753     case TARGET_NR_epoll_create:
12754         return get_errno(epoll_create(arg1));
12755 #endif
12756 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12757     case TARGET_NR_epoll_create1:
12758         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12759 #endif
12760 #if defined(TARGET_NR_epoll_ctl)
12761     case TARGET_NR_epoll_ctl:
12762     {
12763         struct epoll_event ep;
12764         struct epoll_event *epp = 0;
12765         if (arg4) {
12766             if (arg2 != EPOLL_CTL_DEL) {
12767                 struct target_epoll_event *target_ep;
12768                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12769                     return -TARGET_EFAULT;
12770                 }
12771                 ep.events = tswap32(target_ep->events);
12772                 /*
12773                  * The epoll_data_t union is just opaque data to the kernel,
12774                  * so we transfer all 64 bits across and need not worry what
12775                  * actual data type it is.
12776                  */
12777                 ep.data.u64 = tswap64(target_ep->data.u64);
12778                 unlock_user_struct(target_ep, arg4, 0);
12779             }
12780             /*
12781              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12782              * non-null pointer, even though this argument is ignored.
12783              *
12784              */
12785             epp = &ep;
12786         }
12787         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12788     }
12789 #endif
12790 
12791 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12792 #if defined(TARGET_NR_epoll_wait)
12793     case TARGET_NR_epoll_wait:
12794 #endif
12795 #if defined(TARGET_NR_epoll_pwait)
12796     case TARGET_NR_epoll_pwait:
12797 #endif
12798     {
12799         struct target_epoll_event *target_ep;
12800         struct epoll_event *ep;
12801         int epfd = arg1;
12802         int maxevents = arg3;
12803         int timeout = arg4;
12804 
12805         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12806             return -TARGET_EINVAL;
12807         }
12808 
12809         target_ep = lock_user(VERIFY_WRITE, arg2,
12810                               maxevents * sizeof(struct target_epoll_event), 1);
12811         if (!target_ep) {
12812             return -TARGET_EFAULT;
12813         }
12814 
12815         ep = g_try_new(struct epoll_event, maxevents);
12816         if (!ep) {
12817             unlock_user(target_ep, arg2, 0);
12818             return -TARGET_ENOMEM;
12819         }
12820 
12821         switch (num) {
12822 #if defined(TARGET_NR_epoll_pwait)
12823         case TARGET_NR_epoll_pwait:
12824         {
12825             sigset_t *set = NULL;
12826 
12827             if (arg5) {
12828                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12829                 if (ret != 0) {
12830                     break;
12831                 }
12832             }
12833 
12834             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12835                                              set, SIGSET_T_SIZE));
12836 
12837             if (set) {
12838                 finish_sigsuspend_mask(ret);
12839             }
12840             break;
12841         }
12842 #endif
12843 #if defined(TARGET_NR_epoll_wait)
12844         case TARGET_NR_epoll_wait:
12845             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12846                                              NULL, 0));
12847             break;
12848 #endif
12849         default:
12850             ret = -TARGET_ENOSYS;
12851         }
12852         if (!is_error(ret)) {
12853             int i;
12854             for (i = 0; i < ret; i++) {
12855                 target_ep[i].events = tswap32(ep[i].events);
12856                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12857             }
12858             unlock_user(target_ep, arg2,
12859                         ret * sizeof(struct target_epoll_event));
12860         } else {
12861             unlock_user(target_ep, arg2, 0);
12862         }
12863         g_free(ep);
12864         return ret;
12865     }
12866 #endif
12867 #endif
12868 #ifdef TARGET_NR_prlimit64
12869     case TARGET_NR_prlimit64:
12870     {
12871         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12872         struct target_rlimit64 *target_rnew, *target_rold;
12873         struct host_rlimit64 rnew, rold, *rnewp = 0;
12874         int resource = target_to_host_resource(arg2);
12875 
12876         if (arg3 && (resource != RLIMIT_AS &&
12877                      resource != RLIMIT_DATA &&
12878                      resource != RLIMIT_STACK)) {
12879             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12880                 return -TARGET_EFAULT;
12881             }
12882             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12883             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12884             unlock_user_struct(target_rnew, arg3, 0);
12885             rnewp = &rnew;
12886         }
12887 
12888         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12889         if (!is_error(ret) && arg4) {
12890             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12891                 return -TARGET_EFAULT;
12892             }
12893             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12894             target_rold->rlim_max = tswap64(rold.rlim_max);
12895             unlock_user_struct(target_rold, arg4, 1);
12896         }
12897         return ret;
12898     }
12899 #endif
12900 #ifdef TARGET_NR_gethostname
12901     case TARGET_NR_gethostname:
12902     {
12903         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12904         if (name) {
12905             ret = get_errno(gethostname(name, arg2));
12906             unlock_user(name, arg1, arg2);
12907         } else {
12908             ret = -TARGET_EFAULT;
12909         }
12910         return ret;
12911     }
12912 #endif
12913 #ifdef TARGET_NR_atomic_cmpxchg_32
12914     case TARGET_NR_atomic_cmpxchg_32:
12915     {
12916         /* should use start_exclusive from main.c */
12917         abi_ulong mem_value;
12918         if (get_user_u32(mem_value, arg6)) {
12919             target_siginfo_t info;
12920             info.si_signo = SIGSEGV;
12921             info.si_errno = 0;
12922             info.si_code = TARGET_SEGV_MAPERR;
12923             info._sifields._sigfault._addr = arg6;
12924             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12925             ret = 0xdeadbeef;
12926 
12927         }
12928         if (mem_value == arg2)
12929             put_user_u32(arg1, arg6);
12930         return mem_value;
12931     }
12932 #endif
12933 #ifdef TARGET_NR_atomic_barrier
12934     case TARGET_NR_atomic_barrier:
12935         /* Like the kernel implementation and the
12936            qemu arm barrier, no-op this? */
12937         return 0;
12938 #endif
12939 
12940 #ifdef TARGET_NR_timer_create
12941     case TARGET_NR_timer_create:
12942     {
12943         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12944 
12945         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12946 
12947         int clkid = arg1;
12948         int timer_index = next_free_host_timer();
12949 
12950         if (timer_index < 0) {
12951             ret = -TARGET_EAGAIN;
12952         } else {
12953             timer_t *phtimer = g_posix_timers  + timer_index;
12954 
12955             if (arg2) {
12956                 phost_sevp = &host_sevp;
12957                 ret = target_to_host_sigevent(phost_sevp, arg2);
12958                 if (ret != 0) {
12959                     free_host_timer_slot(timer_index);
12960                     return ret;
12961                 }
12962             }
12963 
12964             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12965             if (ret) {
12966                 free_host_timer_slot(timer_index);
12967             } else {
12968                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12969                     timer_delete(*phtimer);
12970                     free_host_timer_slot(timer_index);
12971                     return -TARGET_EFAULT;
12972                 }
12973             }
12974         }
12975         return ret;
12976     }
12977 #endif
12978 
12979 #ifdef TARGET_NR_timer_settime
12980     case TARGET_NR_timer_settime:
12981     {
12982         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12983          * struct itimerspec * old_value */
12984         target_timer_t timerid = get_timer_id(arg1);
12985 
12986         if (timerid < 0) {
12987             ret = timerid;
12988         } else if (arg3 == 0) {
12989             ret = -TARGET_EINVAL;
12990         } else {
12991             timer_t htimer = g_posix_timers[timerid];
12992             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12993 
12994             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12995                 return -TARGET_EFAULT;
12996             }
12997             ret = get_errno(
12998                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12999             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13000                 return -TARGET_EFAULT;
13001             }
13002         }
13003         return ret;
13004     }
13005 #endif
13006 
13007 #ifdef TARGET_NR_timer_settime64
13008     case TARGET_NR_timer_settime64:
13009     {
13010         target_timer_t timerid = get_timer_id(arg1);
13011 
13012         if (timerid < 0) {
13013             ret = timerid;
13014         } else if (arg3 == 0) {
13015             ret = -TARGET_EINVAL;
13016         } else {
13017             timer_t htimer = g_posix_timers[timerid];
13018             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13019 
13020             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13021                 return -TARGET_EFAULT;
13022             }
13023             ret = get_errno(
13024                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13025             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13026                 return -TARGET_EFAULT;
13027             }
13028         }
13029         return ret;
13030     }
13031 #endif
13032 
13033 #ifdef TARGET_NR_timer_gettime
13034     case TARGET_NR_timer_gettime:
13035     {
13036         /* args: timer_t timerid, struct itimerspec *curr_value */
13037         target_timer_t timerid = get_timer_id(arg1);
13038 
13039         if (timerid < 0) {
13040             ret = timerid;
13041         } else if (!arg2) {
13042             ret = -TARGET_EFAULT;
13043         } else {
13044             timer_t htimer = g_posix_timers[timerid];
13045             struct itimerspec hspec;
13046             ret = get_errno(timer_gettime(htimer, &hspec));
13047 
13048             if (host_to_target_itimerspec(arg2, &hspec)) {
13049                 ret = -TARGET_EFAULT;
13050             }
13051         }
13052         return ret;
13053     }
13054 #endif
13055 
13056 #ifdef TARGET_NR_timer_gettime64
13057     case TARGET_NR_timer_gettime64:
13058     {
13059         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13060         target_timer_t timerid = get_timer_id(arg1);
13061 
13062         if (timerid < 0) {
13063             ret = timerid;
13064         } else if (!arg2) {
13065             ret = -TARGET_EFAULT;
13066         } else {
13067             timer_t htimer = g_posix_timers[timerid];
13068             struct itimerspec hspec;
13069             ret = get_errno(timer_gettime(htimer, &hspec));
13070 
13071             if (host_to_target_itimerspec64(arg2, &hspec)) {
13072                 ret = -TARGET_EFAULT;
13073             }
13074         }
13075         return ret;
13076     }
13077 #endif
13078 
13079 #ifdef TARGET_NR_timer_getoverrun
13080     case TARGET_NR_timer_getoverrun:
13081     {
13082         /* args: timer_t timerid */
13083         target_timer_t timerid = get_timer_id(arg1);
13084 
13085         if (timerid < 0) {
13086             ret = timerid;
13087         } else {
13088             timer_t htimer = g_posix_timers[timerid];
13089             ret = get_errno(timer_getoverrun(htimer));
13090         }
13091         return ret;
13092     }
13093 #endif
13094 
13095 #ifdef TARGET_NR_timer_delete
13096     case TARGET_NR_timer_delete:
13097     {
13098         /* args: timer_t timerid */
13099         target_timer_t timerid = get_timer_id(arg1);
13100 
13101         if (timerid < 0) {
13102             ret = timerid;
13103         } else {
13104             timer_t htimer = g_posix_timers[timerid];
13105             ret = get_errno(timer_delete(htimer));
13106             free_host_timer_slot(timerid);
13107         }
13108         return ret;
13109     }
13110 #endif
13111 
13112 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13113     case TARGET_NR_timerfd_create:
13114         return get_errno(timerfd_create(arg1,
13115                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13116 #endif
13117 
13118 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13119     case TARGET_NR_timerfd_gettime:
13120         {
13121             struct itimerspec its_curr;
13122 
13123             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13124 
13125             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13126                 return -TARGET_EFAULT;
13127             }
13128         }
13129         return ret;
13130 #endif
13131 
13132 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13133     case TARGET_NR_timerfd_gettime64:
13134         {
13135             struct itimerspec its_curr;
13136 
13137             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13138 
13139             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13140                 return -TARGET_EFAULT;
13141             }
13142         }
13143         return ret;
13144 #endif
13145 
13146 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13147     case TARGET_NR_timerfd_settime:
13148         {
13149             struct itimerspec its_new, its_old, *p_new;
13150 
13151             if (arg3) {
13152                 if (target_to_host_itimerspec(&its_new, arg3)) {
13153                     return -TARGET_EFAULT;
13154                 }
13155                 p_new = &its_new;
13156             } else {
13157                 p_new = NULL;
13158             }
13159 
13160             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13161 
13162             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13163                 return -TARGET_EFAULT;
13164             }
13165         }
13166         return ret;
13167 #endif
13168 
13169 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13170     case TARGET_NR_timerfd_settime64:
13171         {
13172             struct itimerspec its_new, its_old, *p_new;
13173 
13174             if (arg3) {
13175                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13176                     return -TARGET_EFAULT;
13177                 }
13178                 p_new = &its_new;
13179             } else {
13180                 p_new = NULL;
13181             }
13182 
13183             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13184 
13185             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13186                 return -TARGET_EFAULT;
13187             }
13188         }
13189         return ret;
13190 #endif
13191 
13192 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13193     case TARGET_NR_ioprio_get:
13194         return get_errno(ioprio_get(arg1, arg2));
13195 #endif
13196 
13197 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13198     case TARGET_NR_ioprio_set:
13199         return get_errno(ioprio_set(arg1, arg2, arg3));
13200 #endif
13201 
13202 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13203     case TARGET_NR_setns:
13204         return get_errno(setns(arg1, arg2));
13205 #endif
13206 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13207     case TARGET_NR_unshare:
13208         return get_errno(unshare(arg1));
13209 #endif
13210 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13211     case TARGET_NR_kcmp:
13212         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13213 #endif
13214 #ifdef TARGET_NR_swapcontext
13215     case TARGET_NR_swapcontext:
13216         /* PowerPC specific.  */
13217         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13218 #endif
13219 #ifdef TARGET_NR_memfd_create
13220     case TARGET_NR_memfd_create:
13221         p = lock_user_string(arg1);
13222         if (!p) {
13223             return -TARGET_EFAULT;
13224         }
13225         ret = get_errno(memfd_create(p, arg2));
13226         fd_trans_unregister(ret);
13227         unlock_user(p, arg1, 0);
13228         return ret;
13229 #endif
13230 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13231     case TARGET_NR_membarrier:
13232         return get_errno(membarrier(arg1, arg2));
13233 #endif
13234 
13235 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13236     case TARGET_NR_copy_file_range:
13237         {
13238             loff_t inoff, outoff;
13239             loff_t *pinoff = NULL, *poutoff = NULL;
13240 
13241             if (arg2) {
13242                 if (get_user_u64(inoff, arg2)) {
13243                     return -TARGET_EFAULT;
13244                 }
13245                 pinoff = &inoff;
13246             }
13247             if (arg4) {
13248                 if (get_user_u64(outoff, arg4)) {
13249                     return -TARGET_EFAULT;
13250                 }
13251                 poutoff = &outoff;
13252             }
13253             /* Do not sign-extend the count parameter. */
13254             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13255                                                  (abi_ulong)arg5, arg6));
13256             if (!is_error(ret) && ret > 0) {
13257                 if (arg2) {
13258                     if (put_user_u64(inoff, arg2)) {
13259                         return -TARGET_EFAULT;
13260                     }
13261                 }
13262                 if (arg4) {
13263                     if (put_user_u64(outoff, arg4)) {
13264                         return -TARGET_EFAULT;
13265                     }
13266                 }
13267             }
13268         }
13269         return ret;
13270 #endif
13271 
13272 #if defined(TARGET_NR_pivot_root)
13273     case TARGET_NR_pivot_root:
13274         {
13275             void *p2;
13276             p = lock_user_string(arg1); /* new_root */
13277             p2 = lock_user_string(arg2); /* put_old */
13278             if (!p || !p2) {
13279                 ret = -TARGET_EFAULT;
13280             } else {
13281                 ret = get_errno(pivot_root(p, p2));
13282             }
13283             unlock_user(p2, arg2, 0);
13284             unlock_user(p, arg1, 0);
13285         }
13286         return ret;
13287 #endif
13288 
13289     default:
13290         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13291         return -TARGET_ENOSYS;
13292     }
13293     return ret;
13294 }
13295 
13296 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13297                     abi_long arg2, abi_long arg3, abi_long arg4,
13298                     abi_long arg5, abi_long arg6, abi_long arg7,
13299                     abi_long arg8)
13300 {
13301     CPUState *cpu = env_cpu(cpu_env);
13302     abi_long ret;
13303 
13304 #ifdef DEBUG_ERESTARTSYS
13305     /* Debug-only code for exercising the syscall-restart code paths
13306      * in the per-architecture cpu main loops: restart every syscall
13307      * the guest makes once before letting it through.
13308      */
13309     {
13310         static bool flag;
13311         flag = !flag;
13312         if (flag) {
13313             return -QEMU_ERESTARTSYS;
13314         }
13315     }
13316 #endif
13317 
13318     record_syscall_start(cpu, num, arg1,
13319                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13320 
13321     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13322         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13323     }
13324 
13325     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13326                       arg5, arg6, arg7, arg8);
13327 
13328     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13329         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13330                           arg3, arg4, arg5, arg6);
13331     }
13332 
13333     record_syscall_return(cpu, num, ret);
13334     return ret;
13335 }
13336