xref: /qemu/linux-user/syscall.c (revision 99174ce3)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include <elf.h>
30 #include <endian.h>
31 #include <grp.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/mount.h>
36 #include <sys/file.h>
37 #include <sys/fsuid.h>
38 #include <sys/personality.h>
39 #include <sys/prctl.h>
40 #include <sys/resource.h>
41 #include <sys/swap.h>
42 #include <linux/capability.h>
43 #include <sched.h>
44 #include <sys/timex.h>
45 #include <sys/socket.h>
46 #include <linux/sockios.h>
47 #include <sys/un.h>
48 #include <sys/uio.h>
49 #include <poll.h>
50 #include <sys/times.h>
51 #include <sys/shm.h>
52 #include <sys/sem.h>
53 #include <sys/statfs.h>
54 #include <utime.h>
55 #include <sys/sysinfo.h>
56 #include <sys/signalfd.h>
57 //#include <sys/user.h>
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/tcp.h>
61 #include <netinet/udp.h>
62 #include <linux/wireless.h>
63 #include <linux/icmp.h>
64 #include <linux/icmpv6.h>
65 #include <linux/if_tun.h>
66 #include <linux/in6.h>
67 #include <linux/errqueue.h>
68 #include <linux/random.h>
69 #ifdef CONFIG_TIMERFD
70 #include <sys/timerfd.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
84 #ifdef HAVE_SYS_KCOV_H
85 #include <sys/kcov.h>
86 #endif
87 
88 #define termios host_termios
89 #define winsize host_winsize
90 #define termio host_termio
91 #define sgttyb host_sgttyb /* same as target */
92 #define tchars host_tchars /* same as target */
93 #define ltchars host_ltchars /* same as target */
94 
95 #include <linux/termios.h>
96 #include <linux/unistd.h>
97 #include <linux/cdrom.h>
98 #include <linux/hdreg.h>
99 #include <linux/soundcard.h>
100 #include <linux/kd.h>
101 #include <linux/mtio.h>
102 #include <linux/fs.h>
103 #include <linux/fd.h>
104 #if defined(CONFIG_FIEMAP)
105 #include <linux/fiemap.h>
106 #endif
107 #include <linux/fb.h>
108 #if defined(CONFIG_USBFS)
109 #include <linux/usbdevice_fs.h>
110 #include <linux/usb/ch9.h>
111 #endif
112 #include <linux/vt.h>
113 #include <linux/dm-ioctl.h>
114 #include <linux/reboot.h>
115 #include <linux/route.h>
116 #include <linux/filter.h>
117 #include <linux/blkpg.h>
118 #include <netpacket/packet.h>
119 #include <linux/netlink.h>
120 #include <linux/if_alg.h>
121 #include <linux/rtc.h>
122 #include <sound/asound.h>
123 #ifdef HAVE_BTRFS_H
124 #include <linux/btrfs.h>
125 #endif
126 #ifdef HAVE_DRM_H
127 #include <libdrm/drm.h>
128 #include <libdrm/i915_drm.h>
129 #endif
130 #include "linux_loop.h"
131 #include "uname.h"
132 
133 #include "qemu.h"
134 #include "user-internals.h"
135 #include "strace.h"
136 #include "signal-common.h"
137 #include "loader.h"
138 #include "user-mmap.h"
139 #include "user/safe-syscall.h"
140 #include "qemu/guest-random.h"
141 #include "qemu/selfmap.h"
142 #include "user/syscall-trace.h"
143 #include "special-errno.h"
144 #include "qapi/error.h"
145 #include "fd-trans.h"
146 #include "cpu_loop-common.h"
147 
148 #ifndef CLONE_IO
149 #define CLONE_IO                0x80000000      /* Clone io context */
150 #endif
151 
152 /* We can't directly call the host clone syscall, because this will
153  * badly confuse libc (breaking mutexes, for example). So we must
154  * divide clone flags into:
155  *  * flag combinations that look like pthread_create()
156  *  * flag combinations that look like fork()
157  *  * flags we can implement within QEMU itself
158  *  * flags we can't support and will return an error for
159  */
160 /* For thread creation, all these flags must be present; for
161  * fork, none must be present.
162  */
163 #define CLONE_THREAD_FLAGS                              \
164     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
165      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
166 
167 /* These flags are ignored:
168  * CLONE_DETACHED is now ignored by the kernel;
169  * CLONE_IO is just an optimisation hint to the I/O scheduler
170  */
171 #define CLONE_IGNORED_FLAGS                     \
172     (CLONE_DETACHED | CLONE_IO)
173 
174 #ifndef CLONE_PIDFD
175 # define CLONE_PIDFD 0x00001000
176 #endif
177 
178 /* Flags for fork which we can implement within QEMU itself */
179 #define CLONE_OPTIONAL_FORK_FLAGS               \
180     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
181      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
182 
183 /* Flags for thread creation which we can implement within QEMU itself */
184 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
185     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
186      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
187 
188 #define CLONE_INVALID_FORK_FLAGS                                        \
189     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
190 
191 #define CLONE_INVALID_THREAD_FLAGS                                      \
192     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
193        CLONE_IGNORED_FLAGS))
194 
195 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
196  * have almost all been allocated. We cannot support any of
197  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
198  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
199  * The checks against the invalid thread masks above will catch these.
200  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
201  */
202 
203 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
204  * once. This exercises the codepaths for restart.
205  */
206 //#define DEBUG_ERESTARTSYS
207 
208 //#include <linux/msdos_fs.h>
209 #define VFAT_IOCTL_READDIR_BOTH \
210     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
211 #define VFAT_IOCTL_READDIR_SHORT \
212     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
213 
214 #undef _syscall0
215 #undef _syscall1
216 #undef _syscall2
217 #undef _syscall3
218 #undef _syscall4
219 #undef _syscall5
220 #undef _syscall6
221 
222 #define _syscall0(type,name)		\
223 static type name (void)			\
224 {					\
225 	return syscall(__NR_##name);	\
226 }
227 
228 #define _syscall1(type,name,type1,arg1)		\
229 static type name (type1 arg1)			\
230 {						\
231 	return syscall(__NR_##name, arg1);	\
232 }
233 
234 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
235 static type name (type1 arg1,type2 arg2)		\
236 {							\
237 	return syscall(__NR_##name, arg1, arg2);	\
238 }
239 
240 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
241 static type name (type1 arg1,type2 arg2,type3 arg3)		\
242 {								\
243 	return syscall(__NR_##name, arg1, arg2, arg3);		\
244 }
245 
246 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
247 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
248 {										\
249 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
250 }
251 
252 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
253 		  type5,arg5)							\
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
255 {										\
256 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
257 }
258 
259 
260 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
261 		  type5,arg5,type6,arg6)					\
262 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
263                   type6 arg6)							\
264 {										\
265 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
266 }
267 
268 
269 #define __NR_sys_uname __NR_uname
270 #define __NR_sys_getcwd1 __NR_getcwd
271 #define __NR_sys_getdents __NR_getdents
272 #define __NR_sys_getdents64 __NR_getdents64
273 #define __NR_sys_getpriority __NR_getpriority
274 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
275 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
276 #define __NR_sys_syslog __NR_syslog
277 #if defined(__NR_futex)
278 # define __NR_sys_futex __NR_futex
279 #endif
280 #if defined(__NR_futex_time64)
281 # define __NR_sys_futex_time64 __NR_futex_time64
282 #endif
283 #define __NR_sys_statx __NR_statx
284 
285 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
286 #define __NR__llseek __NR_lseek
287 #endif
288 
289 /* Newer kernel ports have llseek() instead of _llseek() */
290 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
291 #define TARGET_NR__llseek TARGET_NR_llseek
292 #endif
293 
294 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
295 #ifndef TARGET_O_NONBLOCK_MASK
296 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
297 #endif
298 
299 #define __NR_sys_gettid __NR_gettid
300 _syscall0(int, sys_gettid)
301 
302 /* For the 64-bit guest on 32-bit host case we must emulate
303  * getdents using getdents64, because otherwise the host
304  * might hand us back more dirent records than we can fit
305  * into the guest buffer after structure format conversion.
306  * Otherwise we emulate getdents with getdents if the host has it.
307  */
308 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
309 #define EMULATE_GETDENTS_WITH_GETDENTS
310 #endif
311 
312 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
313 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
314 #endif
315 #if (defined(TARGET_NR_getdents) && \
316       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
317     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
318 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
319 #endif
320 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
321 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
322           loff_t *, res, unsigned int, wh);
323 #endif
324 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
325 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
326           siginfo_t *, uinfo)
327 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
328 #ifdef __NR_exit_group
329 _syscall1(int,exit_group,int,error_code)
330 #endif
331 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
332 #define __NR_sys_close_range __NR_close_range
333 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
334 #ifndef CLOSE_RANGE_CLOEXEC
335 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
336 #endif
337 #endif
338 #if defined(__NR_futex)
339 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
340           const struct timespec *,timeout,int *,uaddr2,int,val3)
341 #endif
342 #if defined(__NR_futex_time64)
343 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
344           const struct timespec *,timeout,int *,uaddr2,int,val3)
345 #endif
346 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
347 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
348 #endif
349 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
350 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
351                              unsigned int, flags);
352 #endif
353 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
354 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
355 #endif
356 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
357 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
358           unsigned long *, user_mask_ptr);
359 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
360 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
361           unsigned long *, user_mask_ptr);
362 /* sched_attr is not defined in glibc */
363 struct sched_attr {
364     uint32_t size;
365     uint32_t sched_policy;
366     uint64_t sched_flags;
367     int32_t sched_nice;
368     uint32_t sched_priority;
369     uint64_t sched_runtime;
370     uint64_t sched_deadline;
371     uint64_t sched_period;
372     uint32_t sched_util_min;
373     uint32_t sched_util_max;
374 };
375 #define __NR_sys_sched_getattr __NR_sched_getattr
376 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
377           unsigned int, size, unsigned int, flags);
378 #define __NR_sys_sched_setattr __NR_sched_setattr
379 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
380           unsigned int, flags);
381 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
382 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
383 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
384 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
385           const struct sched_param *, param);
386 #define __NR_sys_sched_getparam __NR_sched_getparam
387 _syscall2(int, sys_sched_getparam, pid_t, pid,
388           struct sched_param *, param);
389 #define __NR_sys_sched_setparam __NR_sched_setparam
390 _syscall2(int, sys_sched_setparam, pid_t, pid,
391           const struct sched_param *, param);
392 #define __NR_sys_getcpu __NR_getcpu
393 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
394 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
395           void *, arg);
396 _syscall2(int, capget, struct __user_cap_header_struct *, header,
397           struct __user_cap_data_struct *, data);
398 _syscall2(int, capset, struct __user_cap_header_struct *, header,
399           struct __user_cap_data_struct *, data);
400 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
401 _syscall2(int, ioprio_get, int, which, int, who)
402 #endif
403 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
404 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
405 #endif
406 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
407 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
408 #endif
409 
410 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
411 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
412           unsigned long, idx1, unsigned long, idx2)
413 #endif
414 
415 /*
416  * It is assumed that struct statx is architecture independent.
417  */
418 #if defined(TARGET_NR_statx) && defined(__NR_statx)
419 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
420           unsigned int, mask, struct target_statx *, statxbuf)
421 #endif
422 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
423 _syscall2(int, membarrier, int, cmd, int, flags)
424 #endif
425 
426 static const bitmask_transtbl fcntl_flags_tbl[] = {
427   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
428   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
429   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
430   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
431   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
432   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
433   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
434   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
435   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
436   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
437   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
438   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
439   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
440 #if defined(O_DIRECT)
441   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
442 #endif
443 #if defined(O_NOATIME)
444   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
445 #endif
446 #if defined(O_CLOEXEC)
447   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
448 #endif
449 #if defined(O_PATH)
450   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
451 #endif
452 #if defined(O_TMPFILE)
453   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
454 #endif
455   /* Don't terminate the list prematurely on 64-bit host+guest.  */
456 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
457   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
458 #endif
459 };
460 
461 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
462 
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
467           const struct timespec *,tsp,int,flags)
468 #else
469 static int sys_utimensat(int dirfd, const char *pathname,
470                          const struct timespec times[2], int flags)
471 {
472     errno = ENOSYS;
473     return -1;
474 }
475 #endif
476 #endif /* TARGET_NR_utimensat */
477 
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
482           const char *, new, unsigned int, flags)
483 #else
484 static int sys_renameat2(int oldfd, const char *old,
485                          int newfd, const char *new, int flags)
486 {
487     if (flags == 0) {
488         return renameat(oldfd, old, newfd, new);
489     }
490     errno = ENOSYS;
491     return -1;
492 }
493 #endif
494 #endif /* TARGET_NR_renameat2 */
495 
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
498 #else
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY  */
505 
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
509 #endif
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64 {
513     uint64_t rlim_cur;
514     uint64_t rlim_max;
515 };
516 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
517           const struct host_rlimit64 *, new_limit,
518           struct host_rlimit64 *, old_limit)
519 #endif
520 
521 
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers[GUEST_TIMER_MAX];
526 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
527 
next_free_host_timer(void)528 static inline int next_free_host_timer(void)
529 {
530     int k;
531     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
532         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
533             return k;
534         }
535     }
536     return -1;
537 }
538 
free_host_timer_slot(int id)539 static inline void free_host_timer_slot(int id)
540 {
541     qatomic_store_release(g_posix_timer_allocated + id, 0);
542 }
543 #endif
544 
host_to_target_errno(int host_errno)545 static inline int host_to_target_errno(int host_errno)
546 {
547     switch (host_errno) {
548 #define E(X)  case X: return TARGET_##X;
549 #include "errnos.c.inc"
550 #undef E
551     default:
552         return host_errno;
553     }
554 }
555 
target_to_host_errno(int target_errno)556 static inline int target_to_host_errno(int target_errno)
557 {
558     switch (target_errno) {
559 #define E(X)  case TARGET_##X: return X;
560 #include "errnos.c.inc"
561 #undef E
562     default:
563         return target_errno;
564     }
565 }
566 
get_errno(abi_long ret)567 abi_long get_errno(abi_long ret)
568 {
569     if (ret == -1)
570         return -host_to_target_errno(errno);
571     else
572         return ret;
573 }
574 
target_strerror(int err)575 const char *target_strerror(int err)
576 {
577     if (err == QEMU_ERESTARTSYS) {
578         return "To be restarted";
579     }
580     if (err == QEMU_ESIGRETURN) {
581         return "Successful exit from sigreturn";
582     }
583 
584     return strerror(target_to_host_errno(err));
585 }
586 
check_zeroed_user(abi_long addr,size_t ksize,size_t usize)587 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
588 {
589     int i;
590     uint8_t b;
591     if (usize <= ksize) {
592         return 1;
593     }
594     for (i = ksize; i < usize; i++) {
595         if (get_user_u8(b, addr + i)) {
596             return -TARGET_EFAULT;
597         }
598         if (b != 0) {
599             return 0;
600         }
601     }
602     return 1;
603 }
604 
605 #define safe_syscall0(type, name) \
606 static type safe_##name(void) \
607 { \
608     return safe_syscall(__NR_##name); \
609 }
610 
611 #define safe_syscall1(type, name, type1, arg1) \
612 static type safe_##name(type1 arg1) \
613 { \
614     return safe_syscall(__NR_##name, arg1); \
615 }
616 
617 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
618 static type safe_##name(type1 arg1, type2 arg2) \
619 { \
620     return safe_syscall(__NR_##name, arg1, arg2); \
621 }
622 
623 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
625 { \
626     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
627 }
628 
629 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
630     type4, arg4) \
631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
632 { \
633     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
634 }
635 
636 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
637     type4, arg4, type5, arg5) \
638 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
639     type5 arg5) \
640 { \
641     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
642 }
643 
644 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
645     type4, arg4, type5, arg5, type6, arg6) \
646 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
647     type5 arg5, type6 arg6) \
648 { \
649     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
650 }
651 
safe_syscall3(ssize_t,read,int,fd,void *,buff,size_t,count)652 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
653 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
654 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
655               int, flags, mode_t, mode)
656 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
657 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
658               struct rusage *, rusage)
659 #endif
660 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
661               int, options, struct rusage *, rusage)
662 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
663 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
664               char **, argv, char **, envp, int, flags)
665 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
666     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
667 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
668               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
669 #endif
670 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
671 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
672               struct timespec *, tsp, const sigset_t *, sigmask,
673               size_t, sigsetsize)
674 #endif
675 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
676               int, maxevents, int, timeout, const sigset_t *, sigmask,
677               size_t, sigsetsize)
678 #if defined(__NR_futex)
679 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
680               const struct timespec *,timeout,int *,uaddr2,int,val3)
681 #endif
682 #if defined(__NR_futex_time64)
683 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
684               const struct timespec *,timeout,int *,uaddr2,int,val3)
685 #endif
686 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
687 safe_syscall2(int, kill, pid_t, pid, int, sig)
688 safe_syscall2(int, tkill, int, tid, int, sig)
689 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
690 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
691 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
692 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
693               unsigned long, pos_l, unsigned long, pos_h)
694 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
695               unsigned long, pos_l, unsigned long, pos_h)
696 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
697               socklen_t, addrlen)
698 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
699               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
700 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
701               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
702 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
703 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
704 safe_syscall2(int, flock, int, fd, int, operation)
705 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
706 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
707               const struct timespec *, uts, size_t, sigsetsize)
708 #endif
709 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
710               int, flags)
711 #if defined(TARGET_NR_nanosleep)
712 safe_syscall2(int, nanosleep, const struct timespec *, req,
713               struct timespec *, rem)
714 #endif
715 #if defined(TARGET_NR_clock_nanosleep) || \
716     defined(TARGET_NR_clock_nanosleep_time64)
717 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
718               const struct timespec *, req, struct timespec *, rem)
719 #endif
720 #ifdef __NR_ipc
721 #ifdef __s390x__
722 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
723               void *, ptr)
724 #else
725 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
726               void *, ptr, long, fifth)
727 #endif
728 #endif
729 #ifdef __NR_msgsnd
730 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
731               int, flags)
732 #endif
733 #ifdef __NR_msgrcv
734 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
735               long, msgtype, int, flags)
736 #endif
737 #ifdef __NR_semtimedop
738 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
739               unsigned, nsops, const struct timespec *, timeout)
740 #endif
741 #if defined(TARGET_NR_mq_timedsend) || \
742     defined(TARGET_NR_mq_timedsend_time64)
743 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
744               size_t, len, unsigned, prio, const struct timespec *, timeout)
745 #endif
746 #if defined(TARGET_NR_mq_timedreceive) || \
747     defined(TARGET_NR_mq_timedreceive_time64)
748 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
749               size_t, len, unsigned *, prio, const struct timespec *, timeout)
750 #endif
751 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
752 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
753               int, outfd, loff_t *, poutoff, size_t, length,
754               unsigned int, flags)
755 #endif
756 
757 /* We do ioctl like this rather than via safe_syscall3 to preserve the
758  * "third argument might be integer or pointer or not present" behaviour of
759  * the libc function.
760  */
761 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
762 /* Similarly for fcntl. Since we always build with LFS enabled,
763  * we should be using the 64-bit structures automatically.
764  */
765 #ifdef __NR_fcntl64
766 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
767 #else
768 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
769 #endif
770 
771 static inline int host_to_target_sock_type(int host_type)
772 {
773     int target_type;
774 
775     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
776     case SOCK_DGRAM:
777         target_type = TARGET_SOCK_DGRAM;
778         break;
779     case SOCK_STREAM:
780         target_type = TARGET_SOCK_STREAM;
781         break;
782     default:
783         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
784         break;
785     }
786 
787 #if defined(SOCK_CLOEXEC)
788     if (host_type & SOCK_CLOEXEC) {
789         target_type |= TARGET_SOCK_CLOEXEC;
790     }
791 #endif
792 
793 #if defined(SOCK_NONBLOCK)
794     if (host_type & SOCK_NONBLOCK) {
795         target_type |= TARGET_SOCK_NONBLOCK;
796     }
797 #endif
798 
799     return target_type;
800 }
801 
802 static abi_ulong target_brk, initial_target_brk;
803 
target_set_brk(abi_ulong new_brk)804 void target_set_brk(abi_ulong new_brk)
805 {
806     target_brk = TARGET_PAGE_ALIGN(new_brk);
807     initial_target_brk = target_brk;
808 }
809 
810 /* do_brk() must return target values and target errnos. */
do_brk(abi_ulong brk_val)811 abi_long do_brk(abi_ulong brk_val)
812 {
813     abi_long mapped_addr;
814     abi_ulong new_brk;
815     abi_ulong old_brk;
816 
817     /* brk pointers are always untagged */
818 
819     /* do not allow to shrink below initial brk value */
820     if (brk_val < initial_target_brk) {
821         return target_brk;
822     }
823 
824     new_brk = TARGET_PAGE_ALIGN(brk_val);
825     old_brk = TARGET_PAGE_ALIGN(target_brk);
826 
827     /* new and old target_brk might be on the same page */
828     if (new_brk == old_brk) {
829         target_brk = brk_val;
830         return target_brk;
831     }
832 
833     /* Release heap if necessary */
834     if (new_brk < old_brk) {
835         target_munmap(new_brk, old_brk - new_brk);
836 
837         target_brk = brk_val;
838         return target_brk;
839     }
840 
841     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
842                               PROT_READ | PROT_WRITE,
843                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
844                               -1, 0);
845 
846     if (mapped_addr == old_brk) {
847         target_brk = brk_val;
848         return target_brk;
849     }
850 
851 #if defined(TARGET_ALPHA)
852     /* We (partially) emulate OSF/1 on Alpha, which requires we
853        return a proper errno, not an unchanged brk value.  */
854     return -TARGET_ENOMEM;
855 #endif
856     /* For everything else, return the previous break. */
857     return target_brk;
858 }
859 
860 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
861     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
copy_from_user_fdset(fd_set * fds,abi_ulong target_fds_addr,int n)862 static inline abi_long copy_from_user_fdset(fd_set *fds,
863                                             abi_ulong target_fds_addr,
864                                             int n)
865 {
866     int i, nw, j, k;
867     abi_ulong b, *target_fds;
868 
869     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
870     if (!(target_fds = lock_user(VERIFY_READ,
871                                  target_fds_addr,
872                                  sizeof(abi_ulong) * nw,
873                                  1)))
874         return -TARGET_EFAULT;
875 
876     FD_ZERO(fds);
877     k = 0;
878     for (i = 0; i < nw; i++) {
879         /* grab the abi_ulong */
880         __get_user(b, &target_fds[i]);
881         for (j = 0; j < TARGET_ABI_BITS; j++) {
882             /* check the bit inside the abi_ulong */
883             if ((b >> j) & 1)
884                 FD_SET(k, fds);
885             k++;
886         }
887     }
888 
889     unlock_user(target_fds, target_fds_addr, 0);
890 
891     return 0;
892 }
893 
copy_from_user_fdset_ptr(fd_set * fds,fd_set ** fds_ptr,abi_ulong target_fds_addr,int n)894 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
895                                                  abi_ulong target_fds_addr,
896                                                  int n)
897 {
898     if (target_fds_addr) {
899         if (copy_from_user_fdset(fds, target_fds_addr, n))
900             return -TARGET_EFAULT;
901         *fds_ptr = fds;
902     } else {
903         *fds_ptr = NULL;
904     }
905     return 0;
906 }
907 
copy_to_user_fdset(abi_ulong target_fds_addr,const fd_set * fds,int n)908 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
909                                           const fd_set *fds,
910                                           int n)
911 {
912     int i, nw, j, k;
913     abi_long v;
914     abi_ulong *target_fds;
915 
916     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
917     if (!(target_fds = lock_user(VERIFY_WRITE,
918                                  target_fds_addr,
919                                  sizeof(abi_ulong) * nw,
920                                  0)))
921         return -TARGET_EFAULT;
922 
923     k = 0;
924     for (i = 0; i < nw; i++) {
925         v = 0;
926         for (j = 0; j < TARGET_ABI_BITS; j++) {
927             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
928             k++;
929         }
930         __put_user(v, &target_fds[i]);
931     }
932 
933     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
934 
935     return 0;
936 }
937 #endif
938 
939 #if defined(__alpha__)
940 #define HOST_HZ 1024
941 #else
942 #define HOST_HZ 100
943 #endif
944 
host_to_target_clock_t(long ticks)945 static inline abi_long host_to_target_clock_t(long ticks)
946 {
947 #if HOST_HZ == TARGET_HZ
948     return ticks;
949 #else
950     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
951 #endif
952 }
953 
host_to_target_rusage(abi_ulong target_addr,const struct rusage * rusage)954 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
955                                              const struct rusage *rusage)
956 {
957     struct target_rusage *target_rusage;
958 
959     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
960         return -TARGET_EFAULT;
961     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
962     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
963     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
964     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
965     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
966     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
967     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
968     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
969     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
970     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
971     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
972     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
973     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
974     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
975     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
976     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
977     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
978     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
979     unlock_user_struct(target_rusage, target_addr, 1);
980 
981     return 0;
982 }
983 
984 #ifdef TARGET_NR_setrlimit
target_to_host_rlim(abi_ulong target_rlim)985 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
986 {
987     abi_ulong target_rlim_swap;
988     rlim_t result;
989 
990     target_rlim_swap = tswapal(target_rlim);
991     if (target_rlim_swap == TARGET_RLIM_INFINITY)
992         return RLIM_INFINITY;
993 
994     result = target_rlim_swap;
995     if (target_rlim_swap != (rlim_t)result)
996         return RLIM_INFINITY;
997 
998     return result;
999 }
1000 #endif
1001 
1002 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
host_to_target_rlim(rlim_t rlim)1003 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1004 {
1005     abi_ulong target_rlim_swap;
1006     abi_ulong result;
1007 
1008     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1009         target_rlim_swap = TARGET_RLIM_INFINITY;
1010     else
1011         target_rlim_swap = rlim;
1012     result = tswapal(target_rlim_swap);
1013 
1014     return result;
1015 }
1016 #endif
1017 
target_to_host_resource(int code)1018 static inline int target_to_host_resource(int code)
1019 {
1020     switch (code) {
1021     case TARGET_RLIMIT_AS:
1022         return RLIMIT_AS;
1023     case TARGET_RLIMIT_CORE:
1024         return RLIMIT_CORE;
1025     case TARGET_RLIMIT_CPU:
1026         return RLIMIT_CPU;
1027     case TARGET_RLIMIT_DATA:
1028         return RLIMIT_DATA;
1029     case TARGET_RLIMIT_FSIZE:
1030         return RLIMIT_FSIZE;
1031     case TARGET_RLIMIT_LOCKS:
1032         return RLIMIT_LOCKS;
1033     case TARGET_RLIMIT_MEMLOCK:
1034         return RLIMIT_MEMLOCK;
1035     case TARGET_RLIMIT_MSGQUEUE:
1036         return RLIMIT_MSGQUEUE;
1037     case TARGET_RLIMIT_NICE:
1038         return RLIMIT_NICE;
1039     case TARGET_RLIMIT_NOFILE:
1040         return RLIMIT_NOFILE;
1041     case TARGET_RLIMIT_NPROC:
1042         return RLIMIT_NPROC;
1043     case TARGET_RLIMIT_RSS:
1044         return RLIMIT_RSS;
1045     case TARGET_RLIMIT_RTPRIO:
1046         return RLIMIT_RTPRIO;
1047 #ifdef RLIMIT_RTTIME
1048     case TARGET_RLIMIT_RTTIME:
1049         return RLIMIT_RTTIME;
1050 #endif
1051     case TARGET_RLIMIT_SIGPENDING:
1052         return RLIMIT_SIGPENDING;
1053     case TARGET_RLIMIT_STACK:
1054         return RLIMIT_STACK;
1055     default:
1056         return code;
1057     }
1058 }
1059 
copy_from_user_timeval(struct timeval * tv,abi_ulong target_tv_addr)1060 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1061                                               abi_ulong target_tv_addr)
1062 {
1063     struct target_timeval *target_tv;
1064 
1065     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1066         return -TARGET_EFAULT;
1067     }
1068 
1069     __get_user(tv->tv_sec, &target_tv->tv_sec);
1070     __get_user(tv->tv_usec, &target_tv->tv_usec);
1071 
1072     unlock_user_struct(target_tv, target_tv_addr, 0);
1073 
1074     return 0;
1075 }
1076 
copy_to_user_timeval(abi_ulong target_tv_addr,const struct timeval * tv)1077 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1078                                             const struct timeval *tv)
1079 {
1080     struct target_timeval *target_tv;
1081 
1082     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1083         return -TARGET_EFAULT;
1084     }
1085 
1086     __put_user(tv->tv_sec, &target_tv->tv_sec);
1087     __put_user(tv->tv_usec, &target_tv->tv_usec);
1088 
1089     unlock_user_struct(target_tv, target_tv_addr, 1);
1090 
1091     return 0;
1092 }
1093 
1094 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
copy_from_user_timeval64(struct timeval * tv,abi_ulong target_tv_addr)1095 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1096                                                 abi_ulong target_tv_addr)
1097 {
1098     struct target__kernel_sock_timeval *target_tv;
1099 
1100     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1101         return -TARGET_EFAULT;
1102     }
1103 
1104     __get_user(tv->tv_sec, &target_tv->tv_sec);
1105     __get_user(tv->tv_usec, &target_tv->tv_usec);
1106 
1107     unlock_user_struct(target_tv, target_tv_addr, 0);
1108 
1109     return 0;
1110 }
1111 #endif
1112 
copy_to_user_timeval64(abi_ulong target_tv_addr,const struct timeval * tv)1113 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1114                                               const struct timeval *tv)
1115 {
1116     struct target__kernel_sock_timeval *target_tv;
1117 
1118     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1119         return -TARGET_EFAULT;
1120     }
1121 
1122     __put_user(tv->tv_sec, &target_tv->tv_sec);
1123     __put_user(tv->tv_usec, &target_tv->tv_usec);
1124 
1125     unlock_user_struct(target_tv, target_tv_addr, 1);
1126 
1127     return 0;
1128 }
1129 
1130 #if defined(TARGET_NR_futex) || \
1131     defined(TARGET_NR_rt_sigtimedwait) || \
1132     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1133     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1134     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1135     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1136     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1137     defined(TARGET_NR_timer_settime) || \
1138     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_timespec(struct timespec * host_ts,abi_ulong target_addr)1139 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1140                                                abi_ulong target_addr)
1141 {
1142     struct target_timespec *target_ts;
1143 
1144     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1145         return -TARGET_EFAULT;
1146     }
1147     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1148     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1149     unlock_user_struct(target_ts, target_addr, 0);
1150     return 0;
1151 }
1152 #endif
1153 
1154 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1155     defined(TARGET_NR_timer_settime64) || \
1156     defined(TARGET_NR_mq_timedsend_time64) || \
1157     defined(TARGET_NR_mq_timedreceive_time64) || \
1158     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1159     defined(TARGET_NR_clock_nanosleep_time64) || \
1160     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1161     defined(TARGET_NR_utimensat) || \
1162     defined(TARGET_NR_utimensat_time64) || \
1163     defined(TARGET_NR_semtimedop_time64) || \
1164     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
target_to_host_timespec64(struct timespec * host_ts,abi_ulong target_addr)1165 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1166                                                  abi_ulong target_addr)
1167 {
1168     struct target__kernel_timespec *target_ts;
1169 
1170     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1171         return -TARGET_EFAULT;
1172     }
1173     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1174     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1175     /* in 32bit mode, this drops the padding */
1176     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1177     unlock_user_struct(target_ts, target_addr, 0);
1178     return 0;
1179 }
1180 #endif
1181 
host_to_target_timespec(abi_ulong target_addr,struct timespec * host_ts)1182 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1183                                                struct timespec *host_ts)
1184 {
1185     struct target_timespec *target_ts;
1186 
1187     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1188         return -TARGET_EFAULT;
1189     }
1190     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1191     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1192     unlock_user_struct(target_ts, target_addr, 1);
1193     return 0;
1194 }
1195 
host_to_target_timespec64(abi_ulong target_addr,struct timespec * host_ts)1196 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1197                                                  struct timespec *host_ts)
1198 {
1199     struct target__kernel_timespec *target_ts;
1200 
1201     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1202         return -TARGET_EFAULT;
1203     }
1204     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1205     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1206     unlock_user_struct(target_ts, target_addr, 1);
1207     return 0;
1208 }
1209 
1210 #if defined(TARGET_NR_gettimeofday)
copy_to_user_timezone(abi_ulong target_tz_addr,struct timezone * tz)1211 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1212                                              struct timezone *tz)
1213 {
1214     struct target_timezone *target_tz;
1215 
1216     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1217         return -TARGET_EFAULT;
1218     }
1219 
1220     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1221     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1222 
1223     unlock_user_struct(target_tz, target_tz_addr, 1);
1224 
1225     return 0;
1226 }
1227 #endif
1228 
1229 #if defined(TARGET_NR_settimeofday)
copy_from_user_timezone(struct timezone * tz,abi_ulong target_tz_addr)1230 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1231                                                abi_ulong target_tz_addr)
1232 {
1233     struct target_timezone *target_tz;
1234 
1235     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1236         return -TARGET_EFAULT;
1237     }
1238 
1239     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1240     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1241 
1242     unlock_user_struct(target_tz, target_tz_addr, 0);
1243 
1244     return 0;
1245 }
1246 #endif
1247 
1248 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1249 #include <mqueue.h>
1250 
copy_from_user_mq_attr(struct mq_attr * attr,abi_ulong target_mq_attr_addr)1251 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1252                                               abi_ulong target_mq_attr_addr)
1253 {
1254     struct target_mq_attr *target_mq_attr;
1255 
1256     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1257                           target_mq_attr_addr, 1))
1258         return -TARGET_EFAULT;
1259 
1260     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1261     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1262     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1263     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1264 
1265     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1266 
1267     return 0;
1268 }
1269 
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,const struct mq_attr * attr)1270 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1271                                             const struct mq_attr *attr)
1272 {
1273     struct target_mq_attr *target_mq_attr;
1274 
1275     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1276                           target_mq_attr_addr, 0))
1277         return -TARGET_EFAULT;
1278 
1279     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1280     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1281     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1282     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1283 
1284     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1285 
1286     return 0;
1287 }
1288 #endif
1289 
1290 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1291 /* do_select() must return target values and target errnos. */
do_select(int n,abi_ulong rfd_addr,abi_ulong wfd_addr,abi_ulong efd_addr,abi_ulong target_tv_addr)1292 static abi_long do_select(int n,
1293                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1294                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1295 {
1296     fd_set rfds, wfds, efds;
1297     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1298     struct timeval tv;
1299     struct timespec ts, *ts_ptr;
1300     abi_long ret;
1301 
1302     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1303     if (ret) {
1304         return ret;
1305     }
1306     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1307     if (ret) {
1308         return ret;
1309     }
1310     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1311     if (ret) {
1312         return ret;
1313     }
1314 
1315     if (target_tv_addr) {
1316         if (copy_from_user_timeval(&tv, target_tv_addr))
1317             return -TARGET_EFAULT;
1318         ts.tv_sec = tv.tv_sec;
1319         ts.tv_nsec = tv.tv_usec * 1000;
1320         ts_ptr = &ts;
1321     } else {
1322         ts_ptr = NULL;
1323     }
1324 
1325     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1326                                   ts_ptr, NULL));
1327 
1328     if (!is_error(ret)) {
1329         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1330             return -TARGET_EFAULT;
1331         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1332             return -TARGET_EFAULT;
1333         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1334             return -TARGET_EFAULT;
1335 
1336         if (target_tv_addr) {
1337             tv.tv_sec = ts.tv_sec;
1338             tv.tv_usec = ts.tv_nsec / 1000;
1339             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1340                 return -TARGET_EFAULT;
1341             }
1342         }
1343     }
1344 
1345     return ret;
1346 }
1347 
1348 #if defined(TARGET_WANT_OLD_SYS_SELECT)
do_old_select(abi_ulong arg1)1349 static abi_long do_old_select(abi_ulong arg1)
1350 {
1351     struct target_sel_arg_struct *sel;
1352     abi_ulong inp, outp, exp, tvp;
1353     long nsel;
1354 
1355     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1356         return -TARGET_EFAULT;
1357     }
1358 
1359     nsel = tswapal(sel->n);
1360     inp = tswapal(sel->inp);
1361     outp = tswapal(sel->outp);
1362     exp = tswapal(sel->exp);
1363     tvp = tswapal(sel->tvp);
1364 
1365     unlock_user_struct(sel, arg1, 0);
1366 
1367     return do_select(nsel, inp, outp, exp, tvp);
1368 }
1369 #endif
1370 #endif
1371 
1372 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
do_pselect6(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,bool time64)1373 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1374                             abi_long arg4, abi_long arg5, abi_long arg6,
1375                             bool time64)
1376 {
1377     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1378     fd_set rfds, wfds, efds;
1379     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1380     struct timespec ts, *ts_ptr;
1381     abi_long ret;
1382 
1383     /*
1384      * The 6th arg is actually two args smashed together,
1385      * so we cannot use the C library.
1386      */
1387     struct {
1388         sigset_t *set;
1389         size_t size;
1390     } sig, *sig_ptr;
1391 
1392     abi_ulong arg_sigset, arg_sigsize, *arg7;
1393 
1394     n = arg1;
1395     rfd_addr = arg2;
1396     wfd_addr = arg3;
1397     efd_addr = arg4;
1398     ts_addr = arg5;
1399 
1400     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1401     if (ret) {
1402         return ret;
1403     }
1404     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1405     if (ret) {
1406         return ret;
1407     }
1408     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1409     if (ret) {
1410         return ret;
1411     }
1412 
1413     /*
1414      * This takes a timespec, and not a timeval, so we cannot
1415      * use the do_select() helper ...
1416      */
1417     if (ts_addr) {
1418         if (time64) {
1419             if (target_to_host_timespec64(&ts, ts_addr)) {
1420                 return -TARGET_EFAULT;
1421             }
1422         } else {
1423             if (target_to_host_timespec(&ts, ts_addr)) {
1424                 return -TARGET_EFAULT;
1425             }
1426         }
1427             ts_ptr = &ts;
1428     } else {
1429         ts_ptr = NULL;
1430     }
1431 
1432     /* Extract the two packed args for the sigset */
1433     sig_ptr = NULL;
1434     if (arg6) {
1435         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1436         if (!arg7) {
1437             return -TARGET_EFAULT;
1438         }
1439         arg_sigset = tswapal(arg7[0]);
1440         arg_sigsize = tswapal(arg7[1]);
1441         unlock_user(arg7, arg6, 0);
1442 
1443         if (arg_sigset) {
1444             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1445             if (ret != 0) {
1446                 return ret;
1447             }
1448             sig_ptr = &sig;
1449             sig.size = SIGSET_T_SIZE;
1450         }
1451     }
1452 
1453     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1454                                   ts_ptr, sig_ptr));
1455 
1456     if (sig_ptr) {
1457         finish_sigsuspend_mask(ret);
1458     }
1459 
1460     if (!is_error(ret)) {
1461         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1462             return -TARGET_EFAULT;
1463         }
1464         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1465             return -TARGET_EFAULT;
1466         }
1467         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1468             return -TARGET_EFAULT;
1469         }
1470         if (time64) {
1471             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1472                 return -TARGET_EFAULT;
1473             }
1474         } else {
1475             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1476                 return -TARGET_EFAULT;
1477             }
1478         }
1479     }
1480     return ret;
1481 }
1482 #endif
1483 
1484 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1485     defined(TARGET_NR_ppoll_time64)
do_ppoll(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,bool ppoll,bool time64)1486 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1487                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1488 {
1489     struct target_pollfd *target_pfd;
1490     unsigned int nfds = arg2;
1491     struct pollfd *pfd;
1492     unsigned int i;
1493     abi_long ret;
1494 
1495     pfd = NULL;
1496     target_pfd = NULL;
1497     if (nfds) {
1498         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1499             return -TARGET_EINVAL;
1500         }
1501         target_pfd = lock_user(VERIFY_WRITE, arg1,
1502                                sizeof(struct target_pollfd) * nfds, 1);
1503         if (!target_pfd) {
1504             return -TARGET_EFAULT;
1505         }
1506 
1507         pfd = alloca(sizeof(struct pollfd) * nfds);
1508         for (i = 0; i < nfds; i++) {
1509             pfd[i].fd = tswap32(target_pfd[i].fd);
1510             pfd[i].events = tswap16(target_pfd[i].events);
1511         }
1512     }
1513     if (ppoll) {
1514         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1515         sigset_t *set = NULL;
1516 
1517         if (arg3) {
1518             if (time64) {
1519                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1520                     unlock_user(target_pfd, arg1, 0);
1521                     return -TARGET_EFAULT;
1522                 }
1523             } else {
1524                 if (target_to_host_timespec(timeout_ts, arg3)) {
1525                     unlock_user(target_pfd, arg1, 0);
1526                     return -TARGET_EFAULT;
1527                 }
1528             }
1529         } else {
1530             timeout_ts = NULL;
1531         }
1532 
1533         if (arg4) {
1534             ret = process_sigsuspend_mask(&set, arg4, arg5);
1535             if (ret != 0) {
1536                 unlock_user(target_pfd, arg1, 0);
1537                 return ret;
1538             }
1539         }
1540 
1541         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1542                                    set, SIGSET_T_SIZE));
1543 
1544         if (set) {
1545             finish_sigsuspend_mask(ret);
1546         }
1547         if (!is_error(ret) && arg3) {
1548             if (time64) {
1549                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1550                     return -TARGET_EFAULT;
1551                 }
1552             } else {
1553                 if (host_to_target_timespec(arg3, timeout_ts)) {
1554                     return -TARGET_EFAULT;
1555                 }
1556             }
1557         }
1558     } else {
1559           struct timespec ts, *pts;
1560 
1561           if (arg3 >= 0) {
1562               /* Convert ms to secs, ns */
1563               ts.tv_sec = arg3 / 1000;
1564               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1565               pts = &ts;
1566           } else {
1567               /* -ve poll() timeout means "infinite" */
1568               pts = NULL;
1569           }
1570           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1571     }
1572 
1573     if (!is_error(ret)) {
1574         for (i = 0; i < nfds; i++) {
1575             target_pfd[i].revents = tswap16(pfd[i].revents);
1576         }
1577     }
1578     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1579     return ret;
1580 }
1581 #endif
1582 
do_pipe(CPUArchState * cpu_env,abi_ulong pipedes,int flags,int is_pipe2)1583 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1584                         int flags, int is_pipe2)
1585 {
1586     int host_pipe[2];
1587     abi_long ret;
1588     ret = pipe2(host_pipe, flags);
1589 
1590     if (is_error(ret))
1591         return get_errno(ret);
1592 
1593     /* Several targets have special calling conventions for the original
1594        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1595     if (!is_pipe2) {
1596 #if defined(TARGET_ALPHA)
1597         cpu_env->ir[IR_A4] = host_pipe[1];
1598         return host_pipe[0];
1599 #elif defined(TARGET_MIPS)
1600         cpu_env->active_tc.gpr[3] = host_pipe[1];
1601         return host_pipe[0];
1602 #elif defined(TARGET_SH4)
1603         cpu_env->gregs[1] = host_pipe[1];
1604         return host_pipe[0];
1605 #elif defined(TARGET_SPARC)
1606         cpu_env->regwptr[1] = host_pipe[1];
1607         return host_pipe[0];
1608 #endif
1609     }
1610 
1611     if (put_user_s32(host_pipe[0], pipedes)
1612         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1613         return -TARGET_EFAULT;
1614     return get_errno(ret);
1615 }
1616 
target_to_host_sockaddr(int fd,struct sockaddr * addr,abi_ulong target_addr,socklen_t len)1617 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1618                                                abi_ulong target_addr,
1619                                                socklen_t len)
1620 {
1621     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1622     sa_family_t sa_family;
1623     struct target_sockaddr *target_saddr;
1624 
1625     if (fd_trans_target_to_host_addr(fd)) {
1626         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1627     }
1628 
1629     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1630     if (!target_saddr)
1631         return -TARGET_EFAULT;
1632 
1633     sa_family = tswap16(target_saddr->sa_family);
1634 
1635     /* Oops. The caller might send a incomplete sun_path; sun_path
1636      * must be terminated by \0 (see the manual page), but
1637      * unfortunately it is quite common to specify sockaddr_un
1638      * length as "strlen(x->sun_path)" while it should be
1639      * "strlen(...) + 1". We'll fix that here if needed.
1640      * Linux kernel has a similar feature.
1641      */
1642 
1643     if (sa_family == AF_UNIX) {
1644         if (len < unix_maxlen && len > 0) {
1645             char *cp = (char*)target_saddr;
1646 
1647             if ( cp[len-1] && !cp[len] )
1648                 len++;
1649         }
1650         if (len > unix_maxlen)
1651             len = unix_maxlen;
1652     }
1653 
1654     memcpy(addr, target_saddr, len);
1655     addr->sa_family = sa_family;
1656     if (sa_family == AF_NETLINK) {
1657         struct sockaddr_nl *nladdr;
1658 
1659         nladdr = (struct sockaddr_nl *)addr;
1660         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1661         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1662     } else if (sa_family == AF_PACKET) {
1663 	struct target_sockaddr_ll *lladdr;
1664 
1665 	lladdr = (struct target_sockaddr_ll *)addr;
1666 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1667 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1668     } else if (sa_family == AF_INET6) {
1669         struct sockaddr_in6 *in6addr;
1670 
1671         in6addr = (struct sockaddr_in6 *)addr;
1672         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1673     }
1674     unlock_user(target_saddr, target_addr, 0);
1675 
1676     return 0;
1677 }
1678 
host_to_target_sockaddr(abi_ulong target_addr,struct sockaddr * addr,socklen_t len)1679 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1680                                                struct sockaddr *addr,
1681                                                socklen_t len)
1682 {
1683     struct target_sockaddr *target_saddr;
1684 
1685     if (len == 0) {
1686         return 0;
1687     }
1688     assert(addr);
1689 
1690     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1691     if (!target_saddr)
1692         return -TARGET_EFAULT;
1693     memcpy(target_saddr, addr, len);
1694     if (len >= offsetof(struct target_sockaddr, sa_family) +
1695         sizeof(target_saddr->sa_family)) {
1696         target_saddr->sa_family = tswap16(addr->sa_family);
1697     }
1698     if (addr->sa_family == AF_NETLINK &&
1699         len >= sizeof(struct target_sockaddr_nl)) {
1700         struct target_sockaddr_nl *target_nl =
1701                (struct target_sockaddr_nl *)target_saddr;
1702         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1703         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1704     } else if (addr->sa_family == AF_PACKET) {
1705         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1706         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1707         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1708     } else if (addr->sa_family == AF_INET6 &&
1709                len >= sizeof(struct target_sockaddr_in6)) {
1710         struct target_sockaddr_in6 *target_in6 =
1711                (struct target_sockaddr_in6 *)target_saddr;
1712         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1713     }
1714     unlock_user(target_saddr, target_addr, len);
1715 
1716     return 0;
1717 }
1718 
target_to_host_cmsg(struct msghdr * msgh,struct target_msghdr * target_msgh)1719 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1720                                            struct target_msghdr *target_msgh)
1721 {
1722     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1723     abi_long msg_controllen;
1724     abi_ulong target_cmsg_addr;
1725     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1726     socklen_t space = 0;
1727 
1728     msg_controllen = tswapal(target_msgh->msg_controllen);
1729     if (msg_controllen < sizeof (struct target_cmsghdr))
1730         goto the_end;
1731     target_cmsg_addr = tswapal(target_msgh->msg_control);
1732     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1733     target_cmsg_start = target_cmsg;
1734     if (!target_cmsg)
1735         return -TARGET_EFAULT;
1736 
1737     while (cmsg && target_cmsg) {
1738         void *data = CMSG_DATA(cmsg);
1739         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1740 
1741         int len = tswapal(target_cmsg->cmsg_len)
1742             - sizeof(struct target_cmsghdr);
1743 
1744         space += CMSG_SPACE(len);
1745         if (space > msgh->msg_controllen) {
1746             space -= CMSG_SPACE(len);
1747             /* This is a QEMU bug, since we allocated the payload
1748              * area ourselves (unlike overflow in host-to-target
1749              * conversion, which is just the guest giving us a buffer
1750              * that's too small). It can't happen for the payload types
1751              * we currently support; if it becomes an issue in future
1752              * we would need to improve our allocation strategy to
1753              * something more intelligent than "twice the size of the
1754              * target buffer we're reading from".
1755              */
1756             qemu_log_mask(LOG_UNIMP,
1757                           ("Unsupported ancillary data %d/%d: "
1758                            "unhandled msg size\n"),
1759                           tswap32(target_cmsg->cmsg_level),
1760                           tswap32(target_cmsg->cmsg_type));
1761             break;
1762         }
1763 
1764         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1765             cmsg->cmsg_level = SOL_SOCKET;
1766         } else {
1767             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1768         }
1769         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1770         cmsg->cmsg_len = CMSG_LEN(len);
1771 
1772         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1773             int *fd = (int *)data;
1774             int *target_fd = (int *)target_data;
1775             int i, numfds = len / sizeof(int);
1776 
1777             for (i = 0; i < numfds; i++) {
1778                 __get_user(fd[i], target_fd + i);
1779             }
1780         } else if (cmsg->cmsg_level == SOL_SOCKET
1781                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1782             struct ucred *cred = (struct ucred *)data;
1783             struct target_ucred *target_cred =
1784                 (struct target_ucred *)target_data;
1785 
1786             __get_user(cred->pid, &target_cred->pid);
1787             __get_user(cred->uid, &target_cred->uid);
1788             __get_user(cred->gid, &target_cred->gid);
1789         } else if (cmsg->cmsg_level == SOL_ALG) {
1790             uint32_t *dst = (uint32_t *)data;
1791 
1792             memcpy(dst, target_data, len);
1793             /* fix endianness of first 32-bit word */
1794             if (len >= sizeof(uint32_t)) {
1795                 *dst = tswap32(*dst);
1796             }
1797         } else {
1798             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1799                           cmsg->cmsg_level, cmsg->cmsg_type);
1800             memcpy(data, target_data, len);
1801         }
1802 
1803         cmsg = CMSG_NXTHDR(msgh, cmsg);
1804         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1805                                          target_cmsg_start);
1806     }
1807     unlock_user(target_cmsg, target_cmsg_addr, 0);
1808  the_end:
1809     msgh->msg_controllen = space;
1810     return 0;
1811 }
1812 
host_to_target_cmsg(struct target_msghdr * target_msgh,struct msghdr * msgh)1813 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1814                                            struct msghdr *msgh)
1815 {
1816     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1817     abi_long msg_controllen;
1818     abi_ulong target_cmsg_addr;
1819     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1820     socklen_t space = 0;
1821 
1822     msg_controllen = tswapal(target_msgh->msg_controllen);
1823     if (msg_controllen < sizeof (struct target_cmsghdr))
1824         goto the_end;
1825     target_cmsg_addr = tswapal(target_msgh->msg_control);
1826     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1827     target_cmsg_start = target_cmsg;
1828     if (!target_cmsg)
1829         return -TARGET_EFAULT;
1830 
1831     while (cmsg && target_cmsg) {
1832         void *data = CMSG_DATA(cmsg);
1833         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1834 
1835         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1836         int tgt_len, tgt_space;
1837 
1838         /* We never copy a half-header but may copy half-data;
1839          * this is Linux's behaviour in put_cmsg(). Note that
1840          * truncation here is a guest problem (which we report
1841          * to the guest via the CTRUNC bit), unlike truncation
1842          * in target_to_host_cmsg, which is a QEMU bug.
1843          */
1844         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1845             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1846             break;
1847         }
1848 
1849         if (cmsg->cmsg_level == SOL_SOCKET) {
1850             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1851         } else {
1852             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1853         }
1854         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1855 
1856         /* Payload types which need a different size of payload on
1857          * the target must adjust tgt_len here.
1858          */
1859         tgt_len = len;
1860         switch (cmsg->cmsg_level) {
1861         case SOL_SOCKET:
1862             switch (cmsg->cmsg_type) {
1863             case SO_TIMESTAMP:
1864                 tgt_len = sizeof(struct target_timeval);
1865                 break;
1866             default:
1867                 break;
1868             }
1869             break;
1870         default:
1871             break;
1872         }
1873 
1874         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1875             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1876             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1877         }
1878 
1879         /* We must now copy-and-convert len bytes of payload
1880          * into tgt_len bytes of destination space. Bear in mind
1881          * that in both source and destination we may be dealing
1882          * with a truncated value!
1883          */
1884         switch (cmsg->cmsg_level) {
1885         case SOL_SOCKET:
1886             switch (cmsg->cmsg_type) {
1887             case SCM_RIGHTS:
1888             {
1889                 int *fd = (int *)data;
1890                 int *target_fd = (int *)target_data;
1891                 int i, numfds = tgt_len / sizeof(int);
1892 
1893                 for (i = 0; i < numfds; i++) {
1894                     __put_user(fd[i], target_fd + i);
1895                 }
1896                 break;
1897             }
1898             case SO_TIMESTAMP:
1899             {
1900                 struct timeval *tv = (struct timeval *)data;
1901                 struct target_timeval *target_tv =
1902                     (struct target_timeval *)target_data;
1903 
1904                 if (len != sizeof(struct timeval) ||
1905                     tgt_len != sizeof(struct target_timeval)) {
1906                     goto unimplemented;
1907                 }
1908 
1909                 /* copy struct timeval to target */
1910                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1911                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1912                 break;
1913             }
1914             case SCM_CREDENTIALS:
1915             {
1916                 struct ucred *cred = (struct ucred *)data;
1917                 struct target_ucred *target_cred =
1918                     (struct target_ucred *)target_data;
1919 
1920                 __put_user(cred->pid, &target_cred->pid);
1921                 __put_user(cred->uid, &target_cred->uid);
1922                 __put_user(cred->gid, &target_cred->gid);
1923                 break;
1924             }
1925             default:
1926                 goto unimplemented;
1927             }
1928             break;
1929 
1930         case SOL_IP:
1931             switch (cmsg->cmsg_type) {
1932             case IP_TTL:
1933             {
1934                 uint32_t *v = (uint32_t *)data;
1935                 uint32_t *t_int = (uint32_t *)target_data;
1936 
1937                 if (len != sizeof(uint32_t) ||
1938                     tgt_len != sizeof(uint32_t)) {
1939                     goto unimplemented;
1940                 }
1941                 __put_user(*v, t_int);
1942                 break;
1943             }
1944             case IP_RECVERR:
1945             {
1946                 struct errhdr_t {
1947                    struct sock_extended_err ee;
1948                    struct sockaddr_in offender;
1949                 };
1950                 struct errhdr_t *errh = (struct errhdr_t *)data;
1951                 struct errhdr_t *target_errh =
1952                     (struct errhdr_t *)target_data;
1953 
1954                 if (len != sizeof(struct errhdr_t) ||
1955                     tgt_len != sizeof(struct errhdr_t)) {
1956                     goto unimplemented;
1957                 }
1958                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1959                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1960                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1961                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1962                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1963                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1964                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1965                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1966                     (void *) &errh->offender, sizeof(errh->offender));
1967                 break;
1968             }
1969             default:
1970                 goto unimplemented;
1971             }
1972             break;
1973 
1974         case SOL_IPV6:
1975             switch (cmsg->cmsg_type) {
1976             case IPV6_HOPLIMIT:
1977             {
1978                 uint32_t *v = (uint32_t *)data;
1979                 uint32_t *t_int = (uint32_t *)target_data;
1980 
1981                 if (len != sizeof(uint32_t) ||
1982                     tgt_len != sizeof(uint32_t)) {
1983                     goto unimplemented;
1984                 }
1985                 __put_user(*v, t_int);
1986                 break;
1987             }
1988             case IPV6_RECVERR:
1989             {
1990                 struct errhdr6_t {
1991                    struct sock_extended_err ee;
1992                    struct sockaddr_in6 offender;
1993                 };
1994                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1995                 struct errhdr6_t *target_errh =
1996                     (struct errhdr6_t *)target_data;
1997 
1998                 if (len != sizeof(struct errhdr6_t) ||
1999                     tgt_len != sizeof(struct errhdr6_t)) {
2000                     goto unimplemented;
2001                 }
2002                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2003                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2004                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2005                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2006                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2007                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2008                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2009                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2010                     (void *) &errh->offender, sizeof(errh->offender));
2011                 break;
2012             }
2013             default:
2014                 goto unimplemented;
2015             }
2016             break;
2017 
2018         default:
2019         unimplemented:
2020             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2021                           cmsg->cmsg_level, cmsg->cmsg_type);
2022             memcpy(target_data, data, MIN(len, tgt_len));
2023             if (tgt_len > len) {
2024                 memset(target_data + len, 0, tgt_len - len);
2025             }
2026         }
2027 
2028         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2029         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2030         if (msg_controllen < tgt_space) {
2031             tgt_space = msg_controllen;
2032         }
2033         msg_controllen -= tgt_space;
2034         space += tgt_space;
2035         cmsg = CMSG_NXTHDR(msgh, cmsg);
2036         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2037                                          target_cmsg_start);
2038     }
2039     unlock_user(target_cmsg, target_cmsg_addr, space);
2040  the_end:
2041     target_msgh->msg_controllen = tswapal(space);
2042     return 0;
2043 }
2044 
2045 /* do_setsockopt() Must return target values and target errnos. */
do_setsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,socklen_t optlen)2046 static abi_long do_setsockopt(int sockfd, int level, int optname,
2047                               abi_ulong optval_addr, socklen_t optlen)
2048 {
2049     abi_long ret;
2050     int val;
2051 
2052     switch(level) {
2053     case SOL_TCP:
2054     case SOL_UDP:
2055         /* TCP and UDP options all take an 'int' value.  */
2056         if (optlen < sizeof(uint32_t))
2057             return -TARGET_EINVAL;
2058 
2059         if (get_user_u32(val, optval_addr))
2060             return -TARGET_EFAULT;
2061         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2062         break;
2063     case SOL_IP:
2064         switch(optname) {
2065         case IP_TOS:
2066         case IP_TTL:
2067         case IP_HDRINCL:
2068         case IP_ROUTER_ALERT:
2069         case IP_RECVOPTS:
2070         case IP_RETOPTS:
2071         case IP_PKTINFO:
2072         case IP_MTU_DISCOVER:
2073         case IP_RECVERR:
2074         case IP_RECVTTL:
2075         case IP_RECVTOS:
2076 #ifdef IP_FREEBIND
2077         case IP_FREEBIND:
2078 #endif
2079         case IP_MULTICAST_TTL:
2080         case IP_MULTICAST_LOOP:
2081             val = 0;
2082             if (optlen >= sizeof(uint32_t)) {
2083                 if (get_user_u32(val, optval_addr))
2084                     return -TARGET_EFAULT;
2085             } else if (optlen >= 1) {
2086                 if (get_user_u8(val, optval_addr))
2087                     return -TARGET_EFAULT;
2088             }
2089             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2090             break;
2091         case IP_ADD_MEMBERSHIP:
2092         case IP_DROP_MEMBERSHIP:
2093         {
2094             struct ip_mreqn ip_mreq;
2095             struct target_ip_mreqn *target_smreqn;
2096 
2097             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2098                               sizeof(struct target_ip_mreq));
2099 
2100             if (optlen < sizeof (struct target_ip_mreq) ||
2101                 optlen > sizeof (struct target_ip_mreqn)) {
2102                 return -TARGET_EINVAL;
2103             }
2104 
2105             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2106             if (!target_smreqn) {
2107                 return -TARGET_EFAULT;
2108             }
2109             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2110             ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2111             if (optlen == sizeof(struct target_ip_mreqn)) {
2112                 ip_mreq.imr_ifindex = tswapal(target_smreqn->imr_ifindex);
2113                 optlen = sizeof(struct ip_mreqn);
2114             }
2115             unlock_user(target_smreqn, optval_addr, 0);
2116 
2117             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2118             break;
2119         }
2120         case IP_BLOCK_SOURCE:
2121         case IP_UNBLOCK_SOURCE:
2122         case IP_ADD_SOURCE_MEMBERSHIP:
2123         case IP_DROP_SOURCE_MEMBERSHIP:
2124         {
2125             struct ip_mreq_source *ip_mreq_source;
2126 
2127             if (optlen != sizeof (struct target_ip_mreq_source))
2128                 return -TARGET_EINVAL;
2129 
2130             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2131             if (!ip_mreq_source) {
2132                 return -TARGET_EFAULT;
2133             }
2134             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2135             unlock_user (ip_mreq_source, optval_addr, 0);
2136             break;
2137         }
2138         default:
2139             goto unimplemented;
2140         }
2141         break;
2142     case SOL_IPV6:
2143         switch (optname) {
2144         case IPV6_MTU_DISCOVER:
2145         case IPV6_MTU:
2146         case IPV6_V6ONLY:
2147         case IPV6_RECVPKTINFO:
2148         case IPV6_UNICAST_HOPS:
2149         case IPV6_MULTICAST_HOPS:
2150         case IPV6_MULTICAST_LOOP:
2151         case IPV6_RECVERR:
2152         case IPV6_RECVHOPLIMIT:
2153         case IPV6_2292HOPLIMIT:
2154         case IPV6_CHECKSUM:
2155         case IPV6_ADDRFORM:
2156         case IPV6_2292PKTINFO:
2157         case IPV6_RECVTCLASS:
2158         case IPV6_RECVRTHDR:
2159         case IPV6_2292RTHDR:
2160         case IPV6_RECVHOPOPTS:
2161         case IPV6_2292HOPOPTS:
2162         case IPV6_RECVDSTOPTS:
2163         case IPV6_2292DSTOPTS:
2164         case IPV6_TCLASS:
2165         case IPV6_ADDR_PREFERENCES:
2166 #ifdef IPV6_RECVPATHMTU
2167         case IPV6_RECVPATHMTU:
2168 #endif
2169 #ifdef IPV6_TRANSPARENT
2170         case IPV6_TRANSPARENT:
2171 #endif
2172 #ifdef IPV6_FREEBIND
2173         case IPV6_FREEBIND:
2174 #endif
2175 #ifdef IPV6_RECVORIGDSTADDR
2176         case IPV6_RECVORIGDSTADDR:
2177 #endif
2178             val = 0;
2179             if (optlen < sizeof(uint32_t)) {
2180                 return -TARGET_EINVAL;
2181             }
2182             if (get_user_u32(val, optval_addr)) {
2183                 return -TARGET_EFAULT;
2184             }
2185             ret = get_errno(setsockopt(sockfd, level, optname,
2186                                        &val, sizeof(val)));
2187             break;
2188         case IPV6_PKTINFO:
2189         {
2190             struct in6_pktinfo pki;
2191 
2192             if (optlen < sizeof(pki)) {
2193                 return -TARGET_EINVAL;
2194             }
2195 
2196             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2197                 return -TARGET_EFAULT;
2198             }
2199 
2200             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2201 
2202             ret = get_errno(setsockopt(sockfd, level, optname,
2203                                        &pki, sizeof(pki)));
2204             break;
2205         }
2206         case IPV6_ADD_MEMBERSHIP:
2207         case IPV6_DROP_MEMBERSHIP:
2208         {
2209             struct ipv6_mreq ipv6mreq;
2210 
2211             if (optlen < sizeof(ipv6mreq)) {
2212                 return -TARGET_EINVAL;
2213             }
2214 
2215             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2216                 return -TARGET_EFAULT;
2217             }
2218 
2219             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2220 
2221             ret = get_errno(setsockopt(sockfd, level, optname,
2222                                        &ipv6mreq, sizeof(ipv6mreq)));
2223             break;
2224         }
2225         default:
2226             goto unimplemented;
2227         }
2228         break;
2229     case SOL_ICMPV6:
2230         switch (optname) {
2231         case ICMPV6_FILTER:
2232         {
2233             struct icmp6_filter icmp6f;
2234 
2235             if (optlen > sizeof(icmp6f)) {
2236                 optlen = sizeof(icmp6f);
2237             }
2238 
2239             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2240                 return -TARGET_EFAULT;
2241             }
2242 
2243             for (val = 0; val < 8; val++) {
2244                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2245             }
2246 
2247             ret = get_errno(setsockopt(sockfd, level, optname,
2248                                        &icmp6f, optlen));
2249             break;
2250         }
2251         default:
2252             goto unimplemented;
2253         }
2254         break;
2255     case SOL_RAW:
2256         switch (optname) {
2257         case ICMP_FILTER:
2258         case IPV6_CHECKSUM:
2259             /* those take an u32 value */
2260             if (optlen < sizeof(uint32_t)) {
2261                 return -TARGET_EINVAL;
2262             }
2263 
2264             if (get_user_u32(val, optval_addr)) {
2265                 return -TARGET_EFAULT;
2266             }
2267             ret = get_errno(setsockopt(sockfd, level, optname,
2268                                        &val, sizeof(val)));
2269             break;
2270 
2271         default:
2272             goto unimplemented;
2273         }
2274         break;
2275 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2276     case SOL_ALG:
2277         switch (optname) {
2278         case ALG_SET_KEY:
2279         {
2280             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2281             if (!alg_key) {
2282                 return -TARGET_EFAULT;
2283             }
2284             ret = get_errno(setsockopt(sockfd, level, optname,
2285                                        alg_key, optlen));
2286             unlock_user(alg_key, optval_addr, optlen);
2287             break;
2288         }
2289         case ALG_SET_AEAD_AUTHSIZE:
2290         {
2291             ret = get_errno(setsockopt(sockfd, level, optname,
2292                                        NULL, optlen));
2293             break;
2294         }
2295         default:
2296             goto unimplemented;
2297         }
2298         break;
2299 #endif
2300     case TARGET_SOL_SOCKET:
2301         switch (optname) {
2302         case TARGET_SO_RCVTIMEO:
2303         case TARGET_SO_SNDTIMEO:
2304         {
2305                 struct timeval tv;
2306 
2307                 if (optlen != sizeof(struct target_timeval)) {
2308                     return -TARGET_EINVAL;
2309                 }
2310 
2311                 if (copy_from_user_timeval(&tv, optval_addr)) {
2312                     return -TARGET_EFAULT;
2313                 }
2314 
2315                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2316                                 optname == TARGET_SO_RCVTIMEO ?
2317                                     SO_RCVTIMEO : SO_SNDTIMEO,
2318                                 &tv, sizeof(tv)));
2319                 return ret;
2320         }
2321         case TARGET_SO_ATTACH_FILTER:
2322         {
2323                 struct target_sock_fprog *tfprog;
2324                 struct target_sock_filter *tfilter;
2325                 struct sock_fprog fprog;
2326                 struct sock_filter *filter;
2327                 int i;
2328 
2329                 if (optlen != sizeof(*tfprog)) {
2330                     return -TARGET_EINVAL;
2331                 }
2332                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2333                     return -TARGET_EFAULT;
2334                 }
2335                 if (!lock_user_struct(VERIFY_READ, tfilter,
2336                                       tswapal(tfprog->filter), 0)) {
2337                     unlock_user_struct(tfprog, optval_addr, 1);
2338                     return -TARGET_EFAULT;
2339                 }
2340 
2341                 fprog.len = tswap16(tfprog->len);
2342                 filter = g_try_new(struct sock_filter, fprog.len);
2343                 if (filter == NULL) {
2344                     unlock_user_struct(tfilter, tfprog->filter, 1);
2345                     unlock_user_struct(tfprog, optval_addr, 1);
2346                     return -TARGET_ENOMEM;
2347                 }
2348                 for (i = 0; i < fprog.len; i++) {
2349                     filter[i].code = tswap16(tfilter[i].code);
2350                     filter[i].jt = tfilter[i].jt;
2351                     filter[i].jf = tfilter[i].jf;
2352                     filter[i].k = tswap32(tfilter[i].k);
2353                 }
2354                 fprog.filter = filter;
2355 
2356                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2357                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2358                 g_free(filter);
2359 
2360                 unlock_user_struct(tfilter, tfprog->filter, 1);
2361                 unlock_user_struct(tfprog, optval_addr, 1);
2362                 return ret;
2363         }
2364 	case TARGET_SO_BINDTODEVICE:
2365 	{
2366 		char *dev_ifname, *addr_ifname;
2367 
2368 		if (optlen > IFNAMSIZ - 1) {
2369 		    optlen = IFNAMSIZ - 1;
2370 		}
2371 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2372 		if (!dev_ifname) {
2373 		    return -TARGET_EFAULT;
2374 		}
2375 		optname = SO_BINDTODEVICE;
2376 		addr_ifname = alloca(IFNAMSIZ);
2377 		memcpy(addr_ifname, dev_ifname, optlen);
2378 		addr_ifname[optlen] = 0;
2379 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2380                                            addr_ifname, optlen));
2381 		unlock_user (dev_ifname, optval_addr, 0);
2382 		return ret;
2383 	}
2384         case TARGET_SO_LINGER:
2385         {
2386                 struct linger lg;
2387                 struct target_linger *tlg;
2388 
2389                 if (optlen != sizeof(struct target_linger)) {
2390                     return -TARGET_EINVAL;
2391                 }
2392                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2393                     return -TARGET_EFAULT;
2394                 }
2395                 __get_user(lg.l_onoff, &tlg->l_onoff);
2396                 __get_user(lg.l_linger, &tlg->l_linger);
2397                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2398                                 &lg, sizeof(lg)));
2399                 unlock_user_struct(tlg, optval_addr, 0);
2400                 return ret;
2401         }
2402             /* Options with 'int' argument.  */
2403         case TARGET_SO_DEBUG:
2404 		optname = SO_DEBUG;
2405 		break;
2406         case TARGET_SO_REUSEADDR:
2407 		optname = SO_REUSEADDR;
2408 		break;
2409 #ifdef SO_REUSEPORT
2410         case TARGET_SO_REUSEPORT:
2411                 optname = SO_REUSEPORT;
2412                 break;
2413 #endif
2414         case TARGET_SO_TYPE:
2415 		optname = SO_TYPE;
2416 		break;
2417         case TARGET_SO_ERROR:
2418 		optname = SO_ERROR;
2419 		break;
2420         case TARGET_SO_DONTROUTE:
2421 		optname = SO_DONTROUTE;
2422 		break;
2423         case TARGET_SO_BROADCAST:
2424 		optname = SO_BROADCAST;
2425 		break;
2426         case TARGET_SO_SNDBUF:
2427 		optname = SO_SNDBUF;
2428 		break;
2429         case TARGET_SO_SNDBUFFORCE:
2430                 optname = SO_SNDBUFFORCE;
2431                 break;
2432         case TARGET_SO_RCVBUF:
2433 		optname = SO_RCVBUF;
2434 		break;
2435         case TARGET_SO_RCVBUFFORCE:
2436                 optname = SO_RCVBUFFORCE;
2437                 break;
2438         case TARGET_SO_KEEPALIVE:
2439 		optname = SO_KEEPALIVE;
2440 		break;
2441         case TARGET_SO_OOBINLINE:
2442 		optname = SO_OOBINLINE;
2443 		break;
2444         case TARGET_SO_NO_CHECK:
2445 		optname = SO_NO_CHECK;
2446 		break;
2447         case TARGET_SO_PRIORITY:
2448 		optname = SO_PRIORITY;
2449 		break;
2450 #ifdef SO_BSDCOMPAT
2451         case TARGET_SO_BSDCOMPAT:
2452 		optname = SO_BSDCOMPAT;
2453 		break;
2454 #endif
2455         case TARGET_SO_PASSCRED:
2456 		optname = SO_PASSCRED;
2457 		break;
2458         case TARGET_SO_PASSSEC:
2459                 optname = SO_PASSSEC;
2460                 break;
2461         case TARGET_SO_TIMESTAMP:
2462 		optname = SO_TIMESTAMP;
2463 		break;
2464         case TARGET_SO_RCVLOWAT:
2465 		optname = SO_RCVLOWAT;
2466 		break;
2467         default:
2468             goto unimplemented;
2469         }
2470 	if (optlen < sizeof(uint32_t))
2471             return -TARGET_EINVAL;
2472 
2473 	if (get_user_u32(val, optval_addr))
2474             return -TARGET_EFAULT;
2475 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2476         break;
2477 #ifdef SOL_NETLINK
2478     case SOL_NETLINK:
2479         switch (optname) {
2480         case NETLINK_PKTINFO:
2481         case NETLINK_ADD_MEMBERSHIP:
2482         case NETLINK_DROP_MEMBERSHIP:
2483         case NETLINK_BROADCAST_ERROR:
2484         case NETLINK_NO_ENOBUFS:
2485 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2486         case NETLINK_LISTEN_ALL_NSID:
2487         case NETLINK_CAP_ACK:
2488 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2489 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2490         case NETLINK_EXT_ACK:
2491 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2492 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2493         case NETLINK_GET_STRICT_CHK:
2494 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2495             break;
2496         default:
2497             goto unimplemented;
2498         }
2499         val = 0;
2500         if (optlen < sizeof(uint32_t)) {
2501             return -TARGET_EINVAL;
2502         }
2503         if (get_user_u32(val, optval_addr)) {
2504             return -TARGET_EFAULT;
2505         }
2506         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2507                                    sizeof(val)));
2508         break;
2509 #endif /* SOL_NETLINK */
2510     default:
2511     unimplemented:
2512         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2513                       level, optname);
2514         ret = -TARGET_ENOPROTOOPT;
2515     }
2516     return ret;
2517 }
2518 
2519 /* do_getsockopt() Must return target values and target errnos. */
do_getsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,abi_ulong optlen)2520 static abi_long do_getsockopt(int sockfd, int level, int optname,
2521                               abi_ulong optval_addr, abi_ulong optlen)
2522 {
2523     abi_long ret;
2524     int len, val;
2525     socklen_t lv;
2526 
2527     switch(level) {
2528     case TARGET_SOL_SOCKET:
2529         level = SOL_SOCKET;
2530         switch (optname) {
2531         /* These don't just return a single integer */
2532         case TARGET_SO_PEERNAME:
2533             goto unimplemented;
2534         case TARGET_SO_RCVTIMEO: {
2535             struct timeval tv;
2536             socklen_t tvlen;
2537 
2538             optname = SO_RCVTIMEO;
2539 
2540 get_timeout:
2541             if (get_user_u32(len, optlen)) {
2542                 return -TARGET_EFAULT;
2543             }
2544             if (len < 0) {
2545                 return -TARGET_EINVAL;
2546             }
2547 
2548             tvlen = sizeof(tv);
2549             ret = get_errno(getsockopt(sockfd, level, optname,
2550                                        &tv, &tvlen));
2551             if (ret < 0) {
2552                 return ret;
2553             }
2554             if (len > sizeof(struct target_timeval)) {
2555                 len = sizeof(struct target_timeval);
2556             }
2557             if (copy_to_user_timeval(optval_addr, &tv)) {
2558                 return -TARGET_EFAULT;
2559             }
2560             if (put_user_u32(len, optlen)) {
2561                 return -TARGET_EFAULT;
2562             }
2563             break;
2564         }
2565         case TARGET_SO_SNDTIMEO:
2566             optname = SO_SNDTIMEO;
2567             goto get_timeout;
2568         case TARGET_SO_PEERCRED: {
2569             struct ucred cr;
2570             socklen_t crlen;
2571             struct target_ucred *tcr;
2572 
2573             if (get_user_u32(len, optlen)) {
2574                 return -TARGET_EFAULT;
2575             }
2576             if (len < 0) {
2577                 return -TARGET_EINVAL;
2578             }
2579 
2580             crlen = sizeof(cr);
2581             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2582                                        &cr, &crlen));
2583             if (ret < 0) {
2584                 return ret;
2585             }
2586             if (len > crlen) {
2587                 len = crlen;
2588             }
2589             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2590                 return -TARGET_EFAULT;
2591             }
2592             __put_user(cr.pid, &tcr->pid);
2593             __put_user(cr.uid, &tcr->uid);
2594             __put_user(cr.gid, &tcr->gid);
2595             unlock_user_struct(tcr, optval_addr, 1);
2596             if (put_user_u32(len, optlen)) {
2597                 return -TARGET_EFAULT;
2598             }
2599             break;
2600         }
2601         case TARGET_SO_PEERSEC: {
2602             char *name;
2603 
2604             if (get_user_u32(len, optlen)) {
2605                 return -TARGET_EFAULT;
2606             }
2607             if (len < 0) {
2608                 return -TARGET_EINVAL;
2609             }
2610             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2611             if (!name) {
2612                 return -TARGET_EFAULT;
2613             }
2614             lv = len;
2615             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2616                                        name, &lv));
2617             if (put_user_u32(lv, optlen)) {
2618                 ret = -TARGET_EFAULT;
2619             }
2620             unlock_user(name, optval_addr, lv);
2621             break;
2622         }
2623         case TARGET_SO_LINGER:
2624         {
2625             struct linger lg;
2626             socklen_t lglen;
2627             struct target_linger *tlg;
2628 
2629             if (get_user_u32(len, optlen)) {
2630                 return -TARGET_EFAULT;
2631             }
2632             if (len < 0) {
2633                 return -TARGET_EINVAL;
2634             }
2635 
2636             lglen = sizeof(lg);
2637             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2638                                        &lg, &lglen));
2639             if (ret < 0) {
2640                 return ret;
2641             }
2642             if (len > lglen) {
2643                 len = lglen;
2644             }
2645             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2646                 return -TARGET_EFAULT;
2647             }
2648             __put_user(lg.l_onoff, &tlg->l_onoff);
2649             __put_user(lg.l_linger, &tlg->l_linger);
2650             unlock_user_struct(tlg, optval_addr, 1);
2651             if (put_user_u32(len, optlen)) {
2652                 return -TARGET_EFAULT;
2653             }
2654             break;
2655         }
2656         /* Options with 'int' argument.  */
2657         case TARGET_SO_DEBUG:
2658             optname = SO_DEBUG;
2659             goto int_case;
2660         case TARGET_SO_REUSEADDR:
2661             optname = SO_REUSEADDR;
2662             goto int_case;
2663 #ifdef SO_REUSEPORT
2664         case TARGET_SO_REUSEPORT:
2665             optname = SO_REUSEPORT;
2666             goto int_case;
2667 #endif
2668         case TARGET_SO_TYPE:
2669             optname = SO_TYPE;
2670             goto int_case;
2671         case TARGET_SO_ERROR:
2672             optname = SO_ERROR;
2673             goto int_case;
2674         case TARGET_SO_DONTROUTE:
2675             optname = SO_DONTROUTE;
2676             goto int_case;
2677         case TARGET_SO_BROADCAST:
2678             optname = SO_BROADCAST;
2679             goto int_case;
2680         case TARGET_SO_SNDBUF:
2681             optname = SO_SNDBUF;
2682             goto int_case;
2683         case TARGET_SO_RCVBUF:
2684             optname = SO_RCVBUF;
2685             goto int_case;
2686         case TARGET_SO_KEEPALIVE:
2687             optname = SO_KEEPALIVE;
2688             goto int_case;
2689         case TARGET_SO_OOBINLINE:
2690             optname = SO_OOBINLINE;
2691             goto int_case;
2692         case TARGET_SO_NO_CHECK:
2693             optname = SO_NO_CHECK;
2694             goto int_case;
2695         case TARGET_SO_PRIORITY:
2696             optname = SO_PRIORITY;
2697             goto int_case;
2698 #ifdef SO_BSDCOMPAT
2699         case TARGET_SO_BSDCOMPAT:
2700             optname = SO_BSDCOMPAT;
2701             goto int_case;
2702 #endif
2703         case TARGET_SO_PASSCRED:
2704             optname = SO_PASSCRED;
2705             goto int_case;
2706         case TARGET_SO_TIMESTAMP:
2707             optname = SO_TIMESTAMP;
2708             goto int_case;
2709         case TARGET_SO_RCVLOWAT:
2710             optname = SO_RCVLOWAT;
2711             goto int_case;
2712         case TARGET_SO_ACCEPTCONN:
2713             optname = SO_ACCEPTCONN;
2714             goto int_case;
2715         case TARGET_SO_PROTOCOL:
2716             optname = SO_PROTOCOL;
2717             goto int_case;
2718         case TARGET_SO_DOMAIN:
2719             optname = SO_DOMAIN;
2720             goto int_case;
2721         default:
2722             goto int_case;
2723         }
2724         break;
2725     case SOL_TCP:
2726     case SOL_UDP:
2727         /* TCP and UDP options all take an 'int' value.  */
2728     int_case:
2729         if (get_user_u32(len, optlen))
2730             return -TARGET_EFAULT;
2731         if (len < 0)
2732             return -TARGET_EINVAL;
2733         lv = sizeof(lv);
2734         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2735         if (ret < 0)
2736             return ret;
2737         switch (optname) {
2738         case SO_TYPE:
2739             val = host_to_target_sock_type(val);
2740             break;
2741         case SO_ERROR:
2742             val = host_to_target_errno(val);
2743             break;
2744         }
2745         if (len > lv)
2746             len = lv;
2747         if (len == 4) {
2748             if (put_user_u32(val, optval_addr))
2749                 return -TARGET_EFAULT;
2750         } else {
2751             if (put_user_u8(val, optval_addr))
2752                 return -TARGET_EFAULT;
2753         }
2754         if (put_user_u32(len, optlen))
2755             return -TARGET_EFAULT;
2756         break;
2757     case SOL_IP:
2758         switch(optname) {
2759         case IP_TOS:
2760         case IP_TTL:
2761         case IP_HDRINCL:
2762         case IP_ROUTER_ALERT:
2763         case IP_RECVOPTS:
2764         case IP_RETOPTS:
2765         case IP_PKTINFO:
2766         case IP_MTU_DISCOVER:
2767         case IP_RECVERR:
2768         case IP_RECVTOS:
2769 #ifdef IP_FREEBIND
2770         case IP_FREEBIND:
2771 #endif
2772         case IP_MULTICAST_TTL:
2773         case IP_MULTICAST_LOOP:
2774             if (get_user_u32(len, optlen))
2775                 return -TARGET_EFAULT;
2776             if (len < 0)
2777                 return -TARGET_EINVAL;
2778             lv = sizeof(lv);
2779             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2780             if (ret < 0)
2781                 return ret;
2782             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2783                 len = 1;
2784                 if (put_user_u32(len, optlen)
2785                     || put_user_u8(val, optval_addr))
2786                     return -TARGET_EFAULT;
2787             } else {
2788                 if (len > sizeof(int))
2789                     len = sizeof(int);
2790                 if (put_user_u32(len, optlen)
2791                     || put_user_u32(val, optval_addr))
2792                     return -TARGET_EFAULT;
2793             }
2794             break;
2795         default:
2796             ret = -TARGET_ENOPROTOOPT;
2797             break;
2798         }
2799         break;
2800     case SOL_IPV6:
2801         switch (optname) {
2802         case IPV6_MTU_DISCOVER:
2803         case IPV6_MTU:
2804         case IPV6_V6ONLY:
2805         case IPV6_RECVPKTINFO:
2806         case IPV6_UNICAST_HOPS:
2807         case IPV6_MULTICAST_HOPS:
2808         case IPV6_MULTICAST_LOOP:
2809         case IPV6_RECVERR:
2810         case IPV6_RECVHOPLIMIT:
2811         case IPV6_2292HOPLIMIT:
2812         case IPV6_CHECKSUM:
2813         case IPV6_ADDRFORM:
2814         case IPV6_2292PKTINFO:
2815         case IPV6_RECVTCLASS:
2816         case IPV6_RECVRTHDR:
2817         case IPV6_2292RTHDR:
2818         case IPV6_RECVHOPOPTS:
2819         case IPV6_2292HOPOPTS:
2820         case IPV6_RECVDSTOPTS:
2821         case IPV6_2292DSTOPTS:
2822         case IPV6_TCLASS:
2823         case IPV6_ADDR_PREFERENCES:
2824 #ifdef IPV6_RECVPATHMTU
2825         case IPV6_RECVPATHMTU:
2826 #endif
2827 #ifdef IPV6_TRANSPARENT
2828         case IPV6_TRANSPARENT:
2829 #endif
2830 #ifdef IPV6_FREEBIND
2831         case IPV6_FREEBIND:
2832 #endif
2833 #ifdef IPV6_RECVORIGDSTADDR
2834         case IPV6_RECVORIGDSTADDR:
2835 #endif
2836             if (get_user_u32(len, optlen))
2837                 return -TARGET_EFAULT;
2838             if (len < 0)
2839                 return -TARGET_EINVAL;
2840             lv = sizeof(lv);
2841             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2842             if (ret < 0)
2843                 return ret;
2844             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2845                 len = 1;
2846                 if (put_user_u32(len, optlen)
2847                     || put_user_u8(val, optval_addr))
2848                     return -TARGET_EFAULT;
2849             } else {
2850                 if (len > sizeof(int))
2851                     len = sizeof(int);
2852                 if (put_user_u32(len, optlen)
2853                     || put_user_u32(val, optval_addr))
2854                     return -TARGET_EFAULT;
2855             }
2856             break;
2857         default:
2858             ret = -TARGET_ENOPROTOOPT;
2859             break;
2860         }
2861         break;
2862 #ifdef SOL_NETLINK
2863     case SOL_NETLINK:
2864         switch (optname) {
2865         case NETLINK_PKTINFO:
2866         case NETLINK_BROADCAST_ERROR:
2867         case NETLINK_NO_ENOBUFS:
2868 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2869         case NETLINK_LISTEN_ALL_NSID:
2870         case NETLINK_CAP_ACK:
2871 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2872 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2873         case NETLINK_EXT_ACK:
2874 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2875 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2876         case NETLINK_GET_STRICT_CHK:
2877 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2878             if (get_user_u32(len, optlen)) {
2879                 return -TARGET_EFAULT;
2880             }
2881             if (len != sizeof(val)) {
2882                 return -TARGET_EINVAL;
2883             }
2884             lv = len;
2885             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2886             if (ret < 0) {
2887                 return ret;
2888             }
2889             if (put_user_u32(lv, optlen)
2890                 || put_user_u32(val, optval_addr)) {
2891                 return -TARGET_EFAULT;
2892             }
2893             break;
2894 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2895         case NETLINK_LIST_MEMBERSHIPS:
2896         {
2897             uint32_t *results;
2898             int i;
2899             if (get_user_u32(len, optlen)) {
2900                 return -TARGET_EFAULT;
2901             }
2902             if (len < 0) {
2903                 return -TARGET_EINVAL;
2904             }
2905             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2906             if (!results && len > 0) {
2907                 return -TARGET_EFAULT;
2908             }
2909             lv = len;
2910             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2911             if (ret < 0) {
2912                 unlock_user(results, optval_addr, 0);
2913                 return ret;
2914             }
2915             /* swap host endianness to target endianness. */
2916             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2917                 results[i] = tswap32(results[i]);
2918             }
2919             if (put_user_u32(lv, optlen)) {
2920                 return -TARGET_EFAULT;
2921             }
2922             unlock_user(results, optval_addr, 0);
2923             break;
2924         }
2925 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2926         default:
2927             goto unimplemented;
2928         }
2929         break;
2930 #endif /* SOL_NETLINK */
2931     default:
2932     unimplemented:
2933         qemu_log_mask(LOG_UNIMP,
2934                       "getsockopt level=%d optname=%d not yet supported\n",
2935                       level, optname);
2936         ret = -TARGET_EOPNOTSUPP;
2937         break;
2938     }
2939     return ret;
2940 }
2941 
2942 /* Convert target low/high pair representing file offset into the host
2943  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2944  * as the kernel doesn't handle them either.
2945  */
target_to_host_low_high(abi_ulong tlow,abi_ulong thigh,unsigned long * hlow,unsigned long * hhigh)2946 static void target_to_host_low_high(abi_ulong tlow,
2947                                     abi_ulong thigh,
2948                                     unsigned long *hlow,
2949                                     unsigned long *hhigh)
2950 {
2951     uint64_t off = tlow |
2952         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2953         TARGET_LONG_BITS / 2;
2954 
2955     *hlow = off;
2956     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2957 }
2958 
lock_iovec(int type,abi_ulong target_addr,abi_ulong count,int copy)2959 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2960                                 abi_ulong count, int copy)
2961 {
2962     struct target_iovec *target_vec;
2963     struct iovec *vec;
2964     abi_ulong total_len, max_len;
2965     int i;
2966     int err = 0;
2967     bool bad_address = false;
2968 
2969     if (count == 0) {
2970         errno = 0;
2971         return NULL;
2972     }
2973     if (count > IOV_MAX) {
2974         errno = EINVAL;
2975         return NULL;
2976     }
2977 
2978     vec = g_try_new0(struct iovec, count);
2979     if (vec == NULL) {
2980         errno = ENOMEM;
2981         return NULL;
2982     }
2983 
2984     target_vec = lock_user(VERIFY_READ, target_addr,
2985                            count * sizeof(struct target_iovec), 1);
2986     if (target_vec == NULL) {
2987         err = EFAULT;
2988         goto fail2;
2989     }
2990 
2991     /* ??? If host page size > target page size, this will result in a
2992        value larger than what we can actually support.  */
2993     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2994     total_len = 0;
2995 
2996     for (i = 0; i < count; i++) {
2997         abi_ulong base = tswapal(target_vec[i].iov_base);
2998         abi_long len = tswapal(target_vec[i].iov_len);
2999 
3000         if (len < 0) {
3001             err = EINVAL;
3002             goto fail;
3003         } else if (len == 0) {
3004             /* Zero length pointer is ignored.  */
3005             vec[i].iov_base = 0;
3006         } else {
3007             vec[i].iov_base = lock_user(type, base, len, copy);
3008             /* If the first buffer pointer is bad, this is a fault.  But
3009              * subsequent bad buffers will result in a partial write; this
3010              * is realized by filling the vector with null pointers and
3011              * zero lengths. */
3012             if (!vec[i].iov_base) {
3013                 if (i == 0) {
3014                     err = EFAULT;
3015                     goto fail;
3016                 } else {
3017                     bad_address = true;
3018                 }
3019             }
3020             if (bad_address) {
3021                 len = 0;
3022             }
3023             if (len > max_len - total_len) {
3024                 len = max_len - total_len;
3025             }
3026         }
3027         vec[i].iov_len = len;
3028         total_len += len;
3029     }
3030 
3031     unlock_user(target_vec, target_addr, 0);
3032     return vec;
3033 
3034  fail:
3035     while (--i >= 0) {
3036         if (tswapal(target_vec[i].iov_len) > 0) {
3037             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3038         }
3039     }
3040     unlock_user(target_vec, target_addr, 0);
3041  fail2:
3042     g_free(vec);
3043     errno = err;
3044     return NULL;
3045 }
3046 
unlock_iovec(struct iovec * vec,abi_ulong target_addr,abi_ulong count,int copy)3047 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3048                          abi_ulong count, int copy)
3049 {
3050     struct target_iovec *target_vec;
3051     int i;
3052 
3053     target_vec = lock_user(VERIFY_READ, target_addr,
3054                            count * sizeof(struct target_iovec), 1);
3055     if (target_vec) {
3056         for (i = 0; i < count; i++) {
3057             abi_ulong base = tswapal(target_vec[i].iov_base);
3058             abi_long len = tswapal(target_vec[i].iov_len);
3059             if (len < 0) {
3060                 break;
3061             }
3062             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3063         }
3064         unlock_user(target_vec, target_addr, 0);
3065     }
3066 
3067     g_free(vec);
3068 }
3069 
target_to_host_sock_type(int * type)3070 static inline int target_to_host_sock_type(int *type)
3071 {
3072     int host_type = 0;
3073     int target_type = *type;
3074 
3075     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3076     case TARGET_SOCK_DGRAM:
3077         host_type = SOCK_DGRAM;
3078         break;
3079     case TARGET_SOCK_STREAM:
3080         host_type = SOCK_STREAM;
3081         break;
3082     default:
3083         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3084         break;
3085     }
3086     if (target_type & TARGET_SOCK_CLOEXEC) {
3087 #if defined(SOCK_CLOEXEC)
3088         host_type |= SOCK_CLOEXEC;
3089 #else
3090         return -TARGET_EINVAL;
3091 #endif
3092     }
3093     if (target_type & TARGET_SOCK_NONBLOCK) {
3094 #if defined(SOCK_NONBLOCK)
3095         host_type |= SOCK_NONBLOCK;
3096 #elif !defined(O_NONBLOCK)
3097         return -TARGET_EINVAL;
3098 #endif
3099     }
3100     *type = host_type;
3101     return 0;
3102 }
3103 
3104 /* Try to emulate socket type flags after socket creation.  */
sock_flags_fixup(int fd,int target_type)3105 static int sock_flags_fixup(int fd, int target_type)
3106 {
3107 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3108     if (target_type & TARGET_SOCK_NONBLOCK) {
3109         int flags = fcntl(fd, F_GETFL);
3110         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3111             close(fd);
3112             return -TARGET_EINVAL;
3113         }
3114     }
3115 #endif
3116     return fd;
3117 }
3118 
3119 /* do_socket() Must return target values and target errnos. */
do_socket(int domain,int type,int protocol)3120 static abi_long do_socket(int domain, int type, int protocol)
3121 {
3122     int target_type = type;
3123     int ret;
3124 
3125     ret = target_to_host_sock_type(&type);
3126     if (ret) {
3127         return ret;
3128     }
3129 
3130     if (domain == PF_NETLINK && !(
3131 #ifdef CONFIG_RTNETLINK
3132          protocol == NETLINK_ROUTE ||
3133 #endif
3134          protocol == NETLINK_KOBJECT_UEVENT ||
3135          protocol == NETLINK_AUDIT)) {
3136         return -TARGET_EPROTONOSUPPORT;
3137     }
3138 
3139     if (domain == AF_PACKET ||
3140         (domain == AF_INET && type == SOCK_PACKET)) {
3141         protocol = tswap16(protocol);
3142     }
3143 
3144     ret = get_errno(socket(domain, type, protocol));
3145     if (ret >= 0) {
3146         ret = sock_flags_fixup(ret, target_type);
3147         if (type == SOCK_PACKET) {
3148             /* Manage an obsolete case :
3149              * if socket type is SOCK_PACKET, bind by name
3150              */
3151             fd_trans_register(ret, &target_packet_trans);
3152         } else if (domain == PF_NETLINK) {
3153             switch (protocol) {
3154 #ifdef CONFIG_RTNETLINK
3155             case NETLINK_ROUTE:
3156                 fd_trans_register(ret, &target_netlink_route_trans);
3157                 break;
3158 #endif
3159             case NETLINK_KOBJECT_UEVENT:
3160                 /* nothing to do: messages are strings */
3161                 break;
3162             case NETLINK_AUDIT:
3163                 fd_trans_register(ret, &target_netlink_audit_trans);
3164                 break;
3165             default:
3166                 g_assert_not_reached();
3167             }
3168         }
3169     }
3170     return ret;
3171 }
3172 
3173 /* do_bind() Must return target values and target errnos. */
do_bind(int sockfd,abi_ulong target_addr,socklen_t addrlen)3174 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3175                         socklen_t addrlen)
3176 {
3177     void *addr;
3178     abi_long ret;
3179 
3180     if ((int)addrlen < 0) {
3181         return -TARGET_EINVAL;
3182     }
3183 
3184     addr = alloca(addrlen+1);
3185 
3186     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3187     if (ret)
3188         return ret;
3189 
3190     return get_errno(bind(sockfd, addr, addrlen));
3191 }
3192 
3193 /* do_connect() Must return target values and target errnos. */
do_connect(int sockfd,abi_ulong target_addr,socklen_t addrlen)3194 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3195                            socklen_t addrlen)
3196 {
3197     void *addr;
3198     abi_long ret;
3199 
3200     if ((int)addrlen < 0) {
3201         return -TARGET_EINVAL;
3202     }
3203 
3204     addr = alloca(addrlen+1);
3205 
3206     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3207     if (ret)
3208         return ret;
3209 
3210     return get_errno(safe_connect(sockfd, addr, addrlen));
3211 }
3212 
3213 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
do_sendrecvmsg_locked(int fd,struct target_msghdr * msgp,int flags,int send)3214 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3215                                       int flags, int send)
3216 {
3217     abi_long ret, len;
3218     struct msghdr msg;
3219     abi_ulong count;
3220     struct iovec *vec;
3221     abi_ulong target_vec;
3222 
3223     if (msgp->msg_name) {
3224         msg.msg_namelen = tswap32(msgp->msg_namelen);
3225         msg.msg_name = alloca(msg.msg_namelen+1);
3226         ret = target_to_host_sockaddr(fd, msg.msg_name,
3227                                       tswapal(msgp->msg_name),
3228                                       msg.msg_namelen);
3229         if (ret == -TARGET_EFAULT) {
3230             /* For connected sockets msg_name and msg_namelen must
3231              * be ignored, so returning EFAULT immediately is wrong.
3232              * Instead, pass a bad msg_name to the host kernel, and
3233              * let it decide whether to return EFAULT or not.
3234              */
3235             msg.msg_name = (void *)-1;
3236         } else if (ret) {
3237             goto out2;
3238         }
3239     } else {
3240         msg.msg_name = NULL;
3241         msg.msg_namelen = 0;
3242     }
3243     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3244     msg.msg_control = alloca(msg.msg_controllen);
3245     memset(msg.msg_control, 0, msg.msg_controllen);
3246 
3247     msg.msg_flags = tswap32(msgp->msg_flags);
3248 
3249     count = tswapal(msgp->msg_iovlen);
3250     target_vec = tswapal(msgp->msg_iov);
3251 
3252     if (count > IOV_MAX) {
3253         /* sendrcvmsg returns a different errno for this condition than
3254          * readv/writev, so we must catch it here before lock_iovec() does.
3255          */
3256         ret = -TARGET_EMSGSIZE;
3257         goto out2;
3258     }
3259 
3260     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3261                      target_vec, count, send);
3262     if (vec == NULL) {
3263         ret = -host_to_target_errno(errno);
3264         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3265         if (!send || ret) {
3266             goto out2;
3267         }
3268     }
3269     msg.msg_iovlen = count;
3270     msg.msg_iov = vec;
3271 
3272     if (send) {
3273         if (fd_trans_target_to_host_data(fd)) {
3274             void *host_msg;
3275 
3276             host_msg = g_malloc(msg.msg_iov->iov_len);
3277             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3278             ret = fd_trans_target_to_host_data(fd)(host_msg,
3279                                                    msg.msg_iov->iov_len);
3280             if (ret >= 0) {
3281                 msg.msg_iov->iov_base = host_msg;
3282                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3283             }
3284             g_free(host_msg);
3285         } else {
3286             ret = target_to_host_cmsg(&msg, msgp);
3287             if (ret == 0) {
3288                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3289             }
3290         }
3291     } else {
3292         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3293         if (!is_error(ret)) {
3294             len = ret;
3295             if (fd_trans_host_to_target_data(fd)) {
3296                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3297                                                MIN(msg.msg_iov->iov_len, len));
3298             }
3299             if (!is_error(ret)) {
3300                 ret = host_to_target_cmsg(msgp, &msg);
3301             }
3302             if (!is_error(ret)) {
3303                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3304                 msgp->msg_flags = tswap32(msg.msg_flags);
3305                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3306                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3307                                     msg.msg_name, msg.msg_namelen);
3308                     if (ret) {
3309                         goto out;
3310                     }
3311                 }
3312 
3313                 ret = len;
3314             }
3315         }
3316     }
3317 
3318 out:
3319     if (vec) {
3320         unlock_iovec(vec, target_vec, count, !send);
3321     }
3322 out2:
3323     return ret;
3324 }
3325 
do_sendrecvmsg(int fd,abi_ulong target_msg,int flags,int send)3326 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3327                                int flags, int send)
3328 {
3329     abi_long ret;
3330     struct target_msghdr *msgp;
3331 
3332     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3333                           msgp,
3334                           target_msg,
3335                           send ? 1 : 0)) {
3336         return -TARGET_EFAULT;
3337     }
3338     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3339     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3340     return ret;
3341 }
3342 
3343 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3344  * so it might not have this *mmsg-specific flag either.
3345  */
3346 #ifndef MSG_WAITFORONE
3347 #define MSG_WAITFORONE 0x10000
3348 #endif
3349 
do_sendrecvmmsg(int fd,abi_ulong target_msgvec,unsigned int vlen,unsigned int flags,int send)3350 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3351                                 unsigned int vlen, unsigned int flags,
3352                                 int send)
3353 {
3354     struct target_mmsghdr *mmsgp;
3355     abi_long ret = 0;
3356     int i;
3357 
3358     if (vlen > UIO_MAXIOV) {
3359         vlen = UIO_MAXIOV;
3360     }
3361 
3362     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3363     if (!mmsgp) {
3364         return -TARGET_EFAULT;
3365     }
3366 
3367     for (i = 0; i < vlen; i++) {
3368         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3369         if (is_error(ret)) {
3370             break;
3371         }
3372         mmsgp[i].msg_len = tswap32(ret);
3373         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3374         if (flags & MSG_WAITFORONE) {
3375             flags |= MSG_DONTWAIT;
3376         }
3377     }
3378 
3379     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3380 
3381     /* Return number of datagrams sent if we sent any at all;
3382      * otherwise return the error.
3383      */
3384     if (i) {
3385         return i;
3386     }
3387     return ret;
3388 }
3389 
3390 /* do_accept4() Must return target values and target errnos. */
do_accept4(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr,int flags)3391 static abi_long do_accept4(int fd, abi_ulong target_addr,
3392                            abi_ulong target_addrlen_addr, int flags)
3393 {
3394     socklen_t addrlen, ret_addrlen;
3395     void *addr;
3396     abi_long ret;
3397     int host_flags;
3398 
3399     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3400         return -TARGET_EINVAL;
3401     }
3402 
3403     host_flags = 0;
3404     if (flags & TARGET_SOCK_NONBLOCK) {
3405         host_flags |= SOCK_NONBLOCK;
3406     }
3407     if (flags & TARGET_SOCK_CLOEXEC) {
3408         host_flags |= SOCK_CLOEXEC;
3409     }
3410 
3411     if (target_addr == 0) {
3412         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3413     }
3414 
3415     /* linux returns EFAULT if addrlen pointer is invalid */
3416     if (get_user_u32(addrlen, target_addrlen_addr))
3417         return -TARGET_EFAULT;
3418 
3419     if ((int)addrlen < 0) {
3420         return -TARGET_EINVAL;
3421     }
3422 
3423     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3424         return -TARGET_EFAULT;
3425     }
3426 
3427     addr = alloca(addrlen);
3428 
3429     ret_addrlen = addrlen;
3430     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3431     if (!is_error(ret)) {
3432         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3433         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3434             ret = -TARGET_EFAULT;
3435         }
3436     }
3437     return ret;
3438 }
3439 
3440 /* do_getpeername() Must return target values and target errnos. */
do_getpeername(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3441 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3442                                abi_ulong target_addrlen_addr)
3443 {
3444     socklen_t addrlen, ret_addrlen;
3445     void *addr;
3446     abi_long ret;
3447 
3448     if (get_user_u32(addrlen, target_addrlen_addr))
3449         return -TARGET_EFAULT;
3450 
3451     if ((int)addrlen < 0) {
3452         return -TARGET_EINVAL;
3453     }
3454 
3455     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3456         return -TARGET_EFAULT;
3457     }
3458 
3459     addr = alloca(addrlen);
3460 
3461     ret_addrlen = addrlen;
3462     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3463     if (!is_error(ret)) {
3464         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3465         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3466             ret = -TARGET_EFAULT;
3467         }
3468     }
3469     return ret;
3470 }
3471 
3472 /* do_getsockname() Must return target values and target errnos. */
do_getsockname(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3473 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3474                                abi_ulong target_addrlen_addr)
3475 {
3476     socklen_t addrlen, ret_addrlen;
3477     void *addr;
3478     abi_long ret;
3479 
3480     if (get_user_u32(addrlen, target_addrlen_addr))
3481         return -TARGET_EFAULT;
3482 
3483     if ((int)addrlen < 0) {
3484         return -TARGET_EINVAL;
3485     }
3486 
3487     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3488         return -TARGET_EFAULT;
3489     }
3490 
3491     addr = alloca(addrlen);
3492 
3493     ret_addrlen = addrlen;
3494     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3495     if (!is_error(ret)) {
3496         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3497         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3498             ret = -TARGET_EFAULT;
3499         }
3500     }
3501     return ret;
3502 }
3503 
3504 /* do_socketpair() Must return target values and target errnos. */
do_socketpair(int domain,int type,int protocol,abi_ulong target_tab_addr)3505 static abi_long do_socketpair(int domain, int type, int protocol,
3506                               abi_ulong target_tab_addr)
3507 {
3508     int tab[2];
3509     abi_long ret;
3510 
3511     target_to_host_sock_type(&type);
3512 
3513     ret = get_errno(socketpair(domain, type, protocol, tab));
3514     if (!is_error(ret)) {
3515         if (put_user_s32(tab[0], target_tab_addr)
3516             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3517             ret = -TARGET_EFAULT;
3518     }
3519     return ret;
3520 }
3521 
3522 /* do_sendto() Must return target values and target errnos. */
do_sendto(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,socklen_t addrlen)3523 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3524                           abi_ulong target_addr, socklen_t addrlen)
3525 {
3526     void *addr;
3527     void *host_msg;
3528     void *copy_msg = NULL;
3529     abi_long ret;
3530 
3531     if ((int)addrlen < 0) {
3532         return -TARGET_EINVAL;
3533     }
3534 
3535     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3536     if (!host_msg)
3537         return -TARGET_EFAULT;
3538     if (fd_trans_target_to_host_data(fd)) {
3539         copy_msg = host_msg;
3540         host_msg = g_malloc(len);
3541         memcpy(host_msg, copy_msg, len);
3542         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3543         if (ret < 0) {
3544             goto fail;
3545         }
3546     }
3547     if (target_addr) {
3548         addr = alloca(addrlen+1);
3549         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3550         if (ret) {
3551             goto fail;
3552         }
3553         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3554     } else {
3555         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3556     }
3557 fail:
3558     if (copy_msg) {
3559         g_free(host_msg);
3560         host_msg = copy_msg;
3561     }
3562     unlock_user(host_msg, msg, 0);
3563     return ret;
3564 }
3565 
3566 /* do_recvfrom() Must return target values and target errnos. */
do_recvfrom(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,abi_ulong target_addrlen)3567 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3568                             abi_ulong target_addr,
3569                             abi_ulong target_addrlen)
3570 {
3571     socklen_t addrlen, ret_addrlen;
3572     void *addr;
3573     void *host_msg;
3574     abi_long ret;
3575 
3576     if (!msg) {
3577         host_msg = NULL;
3578     } else {
3579         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3580         if (!host_msg) {
3581             return -TARGET_EFAULT;
3582         }
3583     }
3584     if (target_addr) {
3585         if (get_user_u32(addrlen, target_addrlen)) {
3586             ret = -TARGET_EFAULT;
3587             goto fail;
3588         }
3589         if ((int)addrlen < 0) {
3590             ret = -TARGET_EINVAL;
3591             goto fail;
3592         }
3593         addr = alloca(addrlen);
3594         ret_addrlen = addrlen;
3595         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3596                                       addr, &ret_addrlen));
3597     } else {
3598         addr = NULL; /* To keep compiler quiet.  */
3599         addrlen = 0; /* To keep compiler quiet.  */
3600         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3601     }
3602     if (!is_error(ret)) {
3603         if (fd_trans_host_to_target_data(fd)) {
3604             abi_long trans;
3605             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3606             if (is_error(trans)) {
3607                 ret = trans;
3608                 goto fail;
3609             }
3610         }
3611         if (target_addr) {
3612             host_to_target_sockaddr(target_addr, addr,
3613                                     MIN(addrlen, ret_addrlen));
3614             if (put_user_u32(ret_addrlen, target_addrlen)) {
3615                 ret = -TARGET_EFAULT;
3616                 goto fail;
3617             }
3618         }
3619         unlock_user(host_msg, msg, len);
3620     } else {
3621 fail:
3622         unlock_user(host_msg, msg, 0);
3623     }
3624     return ret;
3625 }
3626 
3627 #ifdef TARGET_NR_socketcall
3628 /* do_socketcall() must return target values and target errnos. */
do_socketcall(int num,abi_ulong vptr)3629 static abi_long do_socketcall(int num, abi_ulong vptr)
3630 {
3631     static const unsigned nargs[] = { /* number of arguments per operation */
3632         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3633         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3634         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3635         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3636         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3637         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3638         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3639         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3640         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3641         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3642         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3643         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3644         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3645         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3646         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3647         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3648         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3649         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3650         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3651         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3652     };
3653     abi_long a[6]; /* max 6 args */
3654     unsigned i;
3655 
3656     /* check the range of the first argument num */
3657     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3658     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3659         return -TARGET_EINVAL;
3660     }
3661     /* ensure we have space for args */
3662     if (nargs[num] > ARRAY_SIZE(a)) {
3663         return -TARGET_EINVAL;
3664     }
3665     /* collect the arguments in a[] according to nargs[] */
3666     for (i = 0; i < nargs[num]; ++i) {
3667         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3668             return -TARGET_EFAULT;
3669         }
3670     }
3671     /* now when we have the args, invoke the appropriate underlying function */
3672     switch (num) {
3673     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3674         return do_socket(a[0], a[1], a[2]);
3675     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3676         return do_bind(a[0], a[1], a[2]);
3677     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3678         return do_connect(a[0], a[1], a[2]);
3679     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3680         return get_errno(listen(a[0], a[1]));
3681     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3682         return do_accept4(a[0], a[1], a[2], 0);
3683     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3684         return do_getsockname(a[0], a[1], a[2]);
3685     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3686         return do_getpeername(a[0], a[1], a[2]);
3687     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3688         return do_socketpair(a[0], a[1], a[2], a[3]);
3689     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3690         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3691     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3692         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3693     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3694         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3695     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3696         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3697     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3698         return get_errno(shutdown(a[0], a[1]));
3699     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3700         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3701     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3702         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3703     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3704         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3705     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3706         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3707     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3708         return do_accept4(a[0], a[1], a[2], a[3]);
3709     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3710         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3711     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3712         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3713     default:
3714         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3715         return -TARGET_EINVAL;
3716     }
3717 }
3718 #endif
3719 
3720 #ifndef TARGET_SEMID64_DS
3721 /* asm-generic version of this struct */
3722 struct target_semid64_ds
3723 {
3724   struct target_ipc_perm sem_perm;
3725   abi_ulong sem_otime;
3726 #if TARGET_ABI_BITS == 32
3727   abi_ulong __unused1;
3728 #endif
3729   abi_ulong sem_ctime;
3730 #if TARGET_ABI_BITS == 32
3731   abi_ulong __unused2;
3732 #endif
3733   abi_ulong sem_nsems;
3734   abi_ulong __unused3;
3735   abi_ulong __unused4;
3736 };
3737 #endif
3738 
target_to_host_ipc_perm(struct ipc_perm * host_ip,abi_ulong target_addr)3739 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3740                                                abi_ulong target_addr)
3741 {
3742     struct target_ipc_perm *target_ip;
3743     struct target_semid64_ds *target_sd;
3744 
3745     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3746         return -TARGET_EFAULT;
3747     target_ip = &(target_sd->sem_perm);
3748     host_ip->__key = tswap32(target_ip->__key);
3749     host_ip->uid = tswap32(target_ip->uid);
3750     host_ip->gid = tswap32(target_ip->gid);
3751     host_ip->cuid = tswap32(target_ip->cuid);
3752     host_ip->cgid = tswap32(target_ip->cgid);
3753 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3754     host_ip->mode = tswap32(target_ip->mode);
3755 #else
3756     host_ip->mode = tswap16(target_ip->mode);
3757 #endif
3758 #if defined(TARGET_PPC)
3759     host_ip->__seq = tswap32(target_ip->__seq);
3760 #else
3761     host_ip->__seq = tswap16(target_ip->__seq);
3762 #endif
3763     unlock_user_struct(target_sd, target_addr, 0);
3764     return 0;
3765 }
3766 
host_to_target_ipc_perm(abi_ulong target_addr,struct ipc_perm * host_ip)3767 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3768                                                struct ipc_perm *host_ip)
3769 {
3770     struct target_ipc_perm *target_ip;
3771     struct target_semid64_ds *target_sd;
3772 
3773     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3774         return -TARGET_EFAULT;
3775     target_ip = &(target_sd->sem_perm);
3776     target_ip->__key = tswap32(host_ip->__key);
3777     target_ip->uid = tswap32(host_ip->uid);
3778     target_ip->gid = tswap32(host_ip->gid);
3779     target_ip->cuid = tswap32(host_ip->cuid);
3780     target_ip->cgid = tswap32(host_ip->cgid);
3781 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3782     target_ip->mode = tswap32(host_ip->mode);
3783 #else
3784     target_ip->mode = tswap16(host_ip->mode);
3785 #endif
3786 #if defined(TARGET_PPC)
3787     target_ip->__seq = tswap32(host_ip->__seq);
3788 #else
3789     target_ip->__seq = tswap16(host_ip->__seq);
3790 #endif
3791     unlock_user_struct(target_sd, target_addr, 1);
3792     return 0;
3793 }
3794 
target_to_host_semid_ds(struct semid_ds * host_sd,abi_ulong target_addr)3795 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3796                                                abi_ulong target_addr)
3797 {
3798     struct target_semid64_ds *target_sd;
3799 
3800     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3801         return -TARGET_EFAULT;
3802     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3803         return -TARGET_EFAULT;
3804     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3805     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3806     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3807     unlock_user_struct(target_sd, target_addr, 0);
3808     return 0;
3809 }
3810 
host_to_target_semid_ds(abi_ulong target_addr,struct semid_ds * host_sd)3811 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3812                                                struct semid_ds *host_sd)
3813 {
3814     struct target_semid64_ds *target_sd;
3815 
3816     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3817         return -TARGET_EFAULT;
3818     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3819         return -TARGET_EFAULT;
3820     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3821     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3822     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3823     unlock_user_struct(target_sd, target_addr, 1);
3824     return 0;
3825 }
3826 
3827 struct target_seminfo {
3828     int semmap;
3829     int semmni;
3830     int semmns;
3831     int semmnu;
3832     int semmsl;
3833     int semopm;
3834     int semume;
3835     int semusz;
3836     int semvmx;
3837     int semaem;
3838 };
3839 
host_to_target_seminfo(abi_ulong target_addr,struct seminfo * host_seminfo)3840 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3841                                               struct seminfo *host_seminfo)
3842 {
3843     struct target_seminfo *target_seminfo;
3844     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3845         return -TARGET_EFAULT;
3846     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3847     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3848     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3849     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3850     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3851     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3852     __put_user(host_seminfo->semume, &target_seminfo->semume);
3853     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3854     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3855     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3856     unlock_user_struct(target_seminfo, target_addr, 1);
3857     return 0;
3858 }
3859 
3860 union semun {
3861 	int val;
3862 	struct semid_ds *buf;
3863 	unsigned short *array;
3864 	struct seminfo *__buf;
3865 };
3866 
3867 union target_semun {
3868 	int val;
3869 	abi_ulong buf;
3870 	abi_ulong array;
3871 	abi_ulong __buf;
3872 };
3873 
target_to_host_semarray(int semid,unsigned short ** host_array,abi_ulong target_addr)3874 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3875                                                abi_ulong target_addr)
3876 {
3877     int nsems;
3878     unsigned short *array;
3879     union semun semun;
3880     struct semid_ds semid_ds;
3881     int i, ret;
3882 
3883     semun.buf = &semid_ds;
3884 
3885     ret = semctl(semid, 0, IPC_STAT, semun);
3886     if (ret == -1)
3887         return get_errno(ret);
3888 
3889     nsems = semid_ds.sem_nsems;
3890 
3891     *host_array = g_try_new(unsigned short, nsems);
3892     if (!*host_array) {
3893         return -TARGET_ENOMEM;
3894     }
3895     array = lock_user(VERIFY_READ, target_addr,
3896                       nsems*sizeof(unsigned short), 1);
3897     if (!array) {
3898         g_free(*host_array);
3899         return -TARGET_EFAULT;
3900     }
3901 
3902     for(i=0; i<nsems; i++) {
3903         __get_user((*host_array)[i], &array[i]);
3904     }
3905     unlock_user(array, target_addr, 0);
3906 
3907     return 0;
3908 }
3909 
host_to_target_semarray(int semid,abi_ulong target_addr,unsigned short ** host_array)3910 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3911                                                unsigned short **host_array)
3912 {
3913     int nsems;
3914     unsigned short *array;
3915     union semun semun;
3916     struct semid_ds semid_ds;
3917     int i, ret;
3918 
3919     semun.buf = &semid_ds;
3920 
3921     ret = semctl(semid, 0, IPC_STAT, semun);
3922     if (ret == -1)
3923         return get_errno(ret);
3924 
3925     nsems = semid_ds.sem_nsems;
3926 
3927     array = lock_user(VERIFY_WRITE, target_addr,
3928                       nsems*sizeof(unsigned short), 0);
3929     if (!array)
3930         return -TARGET_EFAULT;
3931 
3932     for(i=0; i<nsems; i++) {
3933         __put_user((*host_array)[i], &array[i]);
3934     }
3935     g_free(*host_array);
3936     unlock_user(array, target_addr, 1);
3937 
3938     return 0;
3939 }
3940 
do_semctl(int semid,int semnum,int cmd,abi_ulong target_arg)3941 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3942                                  abi_ulong target_arg)
3943 {
3944     union target_semun target_su = { .buf = target_arg };
3945     union semun arg;
3946     struct semid_ds dsarg;
3947     unsigned short *array = NULL;
3948     struct seminfo seminfo;
3949     abi_long ret = -TARGET_EINVAL;
3950     abi_long err;
3951     cmd &= 0xff;
3952 
3953     switch( cmd ) {
3954 	case GETVAL:
3955 	case SETVAL:
3956             /* In 64 bit cross-endian situations, we will erroneously pick up
3957              * the wrong half of the union for the "val" element.  To rectify
3958              * this, the entire 8-byte structure is byteswapped, followed by
3959 	     * a swap of the 4 byte val field. In other cases, the data is
3960 	     * already in proper host byte order. */
3961 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3962 		target_su.buf = tswapal(target_su.buf);
3963 		arg.val = tswap32(target_su.val);
3964 	    } else {
3965 		arg.val = target_su.val;
3966 	    }
3967             ret = get_errno(semctl(semid, semnum, cmd, arg));
3968             break;
3969 	case GETALL:
3970 	case SETALL:
3971             err = target_to_host_semarray(semid, &array, target_su.array);
3972             if (err)
3973                 return err;
3974             arg.array = array;
3975             ret = get_errno(semctl(semid, semnum, cmd, arg));
3976             err = host_to_target_semarray(semid, target_su.array, &array);
3977             if (err)
3978                 return err;
3979             break;
3980 	case IPC_STAT:
3981 	case IPC_SET:
3982 	case SEM_STAT:
3983             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3984             if (err)
3985                 return err;
3986             arg.buf = &dsarg;
3987             ret = get_errno(semctl(semid, semnum, cmd, arg));
3988             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3989             if (err)
3990                 return err;
3991             break;
3992 	case IPC_INFO:
3993 	case SEM_INFO:
3994             arg.__buf = &seminfo;
3995             ret = get_errno(semctl(semid, semnum, cmd, arg));
3996             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3997             if (err)
3998                 return err;
3999             break;
4000 	case IPC_RMID:
4001 	case GETPID:
4002 	case GETNCNT:
4003 	case GETZCNT:
4004             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4005             break;
4006     }
4007 
4008     return ret;
4009 }
4010 
4011 struct target_sembuf {
4012     unsigned short sem_num;
4013     short sem_op;
4014     short sem_flg;
4015 };
4016 
target_to_host_sembuf(struct sembuf * host_sembuf,abi_ulong target_addr,unsigned nsops)4017 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4018                                              abi_ulong target_addr,
4019                                              unsigned nsops)
4020 {
4021     struct target_sembuf *target_sembuf;
4022     int i;
4023 
4024     target_sembuf = lock_user(VERIFY_READ, target_addr,
4025                               nsops*sizeof(struct target_sembuf), 1);
4026     if (!target_sembuf)
4027         return -TARGET_EFAULT;
4028 
4029     for(i=0; i<nsops; i++) {
4030         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4031         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4032         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4033     }
4034 
4035     unlock_user(target_sembuf, target_addr, 0);
4036 
4037     return 0;
4038 }
4039 
4040 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4041     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4042 
4043 /*
4044  * This macro is required to handle the s390 variants, which passes the
4045  * arguments in a different order than default.
4046  */
4047 #ifdef __s390x__
4048 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4049   (__nsops), (__timeout), (__sops)
4050 #else
4051 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4052   (__nsops), 0, (__sops), (__timeout)
4053 #endif
4054 
do_semtimedop(int semid,abi_long ptr,unsigned nsops,abi_long timeout,bool time64)4055 static inline abi_long do_semtimedop(int semid,
4056                                      abi_long ptr,
4057                                      unsigned nsops,
4058                                      abi_long timeout, bool time64)
4059 {
4060     struct sembuf *sops;
4061     struct timespec ts, *pts = NULL;
4062     abi_long ret;
4063 
4064     if (timeout) {
4065         pts = &ts;
4066         if (time64) {
4067             if (target_to_host_timespec64(pts, timeout)) {
4068                 return -TARGET_EFAULT;
4069             }
4070         } else {
4071             if (target_to_host_timespec(pts, timeout)) {
4072                 return -TARGET_EFAULT;
4073             }
4074         }
4075     }
4076 
4077     if (nsops > TARGET_SEMOPM) {
4078         return -TARGET_E2BIG;
4079     }
4080 
4081     sops = g_new(struct sembuf, nsops);
4082 
4083     if (target_to_host_sembuf(sops, ptr, nsops)) {
4084         g_free(sops);
4085         return -TARGET_EFAULT;
4086     }
4087 
4088     ret = -TARGET_ENOSYS;
4089 #ifdef __NR_semtimedop
4090     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4091 #endif
4092 #ifdef __NR_ipc
4093     if (ret == -TARGET_ENOSYS) {
4094         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4095                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4096     }
4097 #endif
4098     g_free(sops);
4099     return ret;
4100 }
4101 #endif
4102 
4103 struct target_msqid_ds
4104 {
4105     struct target_ipc_perm msg_perm;
4106     abi_ulong msg_stime;
4107 #if TARGET_ABI_BITS == 32
4108     abi_ulong __unused1;
4109 #endif
4110     abi_ulong msg_rtime;
4111 #if TARGET_ABI_BITS == 32
4112     abi_ulong __unused2;
4113 #endif
4114     abi_ulong msg_ctime;
4115 #if TARGET_ABI_BITS == 32
4116     abi_ulong __unused3;
4117 #endif
4118     abi_ulong __msg_cbytes;
4119     abi_ulong msg_qnum;
4120     abi_ulong msg_qbytes;
4121     abi_ulong msg_lspid;
4122     abi_ulong msg_lrpid;
4123     abi_ulong __unused4;
4124     abi_ulong __unused5;
4125 };
4126 
target_to_host_msqid_ds(struct msqid_ds * host_md,abi_ulong target_addr)4127 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4128                                                abi_ulong target_addr)
4129 {
4130     struct target_msqid_ds *target_md;
4131 
4132     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4133         return -TARGET_EFAULT;
4134     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4135         return -TARGET_EFAULT;
4136     host_md->msg_stime = tswapal(target_md->msg_stime);
4137     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4138     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4139     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4140     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4141     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4142     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4143     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4144     unlock_user_struct(target_md, target_addr, 0);
4145     return 0;
4146 }
4147 
host_to_target_msqid_ds(abi_ulong target_addr,struct msqid_ds * host_md)4148 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4149                                                struct msqid_ds *host_md)
4150 {
4151     struct target_msqid_ds *target_md;
4152 
4153     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4154         return -TARGET_EFAULT;
4155     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4156         return -TARGET_EFAULT;
4157     target_md->msg_stime = tswapal(host_md->msg_stime);
4158     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4159     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4160     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4161     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4162     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4163     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4164     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4165     unlock_user_struct(target_md, target_addr, 1);
4166     return 0;
4167 }
4168 
4169 struct target_msginfo {
4170     int msgpool;
4171     int msgmap;
4172     int msgmax;
4173     int msgmnb;
4174     int msgmni;
4175     int msgssz;
4176     int msgtql;
4177     unsigned short int msgseg;
4178 };
4179 
host_to_target_msginfo(abi_ulong target_addr,struct msginfo * host_msginfo)4180 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4181                                               struct msginfo *host_msginfo)
4182 {
4183     struct target_msginfo *target_msginfo;
4184     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4185         return -TARGET_EFAULT;
4186     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4187     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4188     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4189     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4190     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4191     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4192     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4193     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4194     unlock_user_struct(target_msginfo, target_addr, 1);
4195     return 0;
4196 }
4197 
do_msgctl(int msgid,int cmd,abi_long ptr)4198 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4199 {
4200     struct msqid_ds dsarg;
4201     struct msginfo msginfo;
4202     abi_long ret = -TARGET_EINVAL;
4203 
4204     cmd &= 0xff;
4205 
4206     switch (cmd) {
4207     case IPC_STAT:
4208     case IPC_SET:
4209     case MSG_STAT:
4210         if (target_to_host_msqid_ds(&dsarg,ptr))
4211             return -TARGET_EFAULT;
4212         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4213         if (host_to_target_msqid_ds(ptr,&dsarg))
4214             return -TARGET_EFAULT;
4215         break;
4216     case IPC_RMID:
4217         ret = get_errno(msgctl(msgid, cmd, NULL));
4218         break;
4219     case IPC_INFO:
4220     case MSG_INFO:
4221         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4222         if (host_to_target_msginfo(ptr, &msginfo))
4223             return -TARGET_EFAULT;
4224         break;
4225     }
4226 
4227     return ret;
4228 }
4229 
4230 struct target_msgbuf {
4231     abi_long mtype;
4232     char	mtext[1];
4233 };
4234 
do_msgsnd(int msqid,abi_long msgp,ssize_t msgsz,int msgflg)4235 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4236                                  ssize_t msgsz, int msgflg)
4237 {
4238     struct target_msgbuf *target_mb;
4239     struct msgbuf *host_mb;
4240     abi_long ret = 0;
4241 
4242     if (msgsz < 0) {
4243         return -TARGET_EINVAL;
4244     }
4245 
4246     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4247         return -TARGET_EFAULT;
4248     host_mb = g_try_malloc(msgsz + sizeof(long));
4249     if (!host_mb) {
4250         unlock_user_struct(target_mb, msgp, 0);
4251         return -TARGET_ENOMEM;
4252     }
4253     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4254     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4255     ret = -TARGET_ENOSYS;
4256 #ifdef __NR_msgsnd
4257     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4258 #endif
4259 #ifdef __NR_ipc
4260     if (ret == -TARGET_ENOSYS) {
4261 #ifdef __s390x__
4262         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4263                                  host_mb));
4264 #else
4265         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4266                                  host_mb, 0));
4267 #endif
4268     }
4269 #endif
4270     g_free(host_mb);
4271     unlock_user_struct(target_mb, msgp, 0);
4272 
4273     return ret;
4274 }
4275 
4276 #ifdef __NR_ipc
4277 #if defined(__sparc__)
4278 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4279 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4280 #elif defined(__s390x__)
4281 /* The s390 sys_ipc variant has only five parameters.  */
4282 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4283     ((long int[]){(long int)__msgp, __msgtyp})
4284 #else
4285 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4286     ((long int[]){(long int)__msgp, __msgtyp}), 0
4287 #endif
4288 #endif
4289 
do_msgrcv(int msqid,abi_long msgp,ssize_t msgsz,abi_long msgtyp,int msgflg)4290 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4291                                  ssize_t msgsz, abi_long msgtyp,
4292                                  int msgflg)
4293 {
4294     struct target_msgbuf *target_mb;
4295     char *target_mtext;
4296     struct msgbuf *host_mb;
4297     abi_long ret = 0;
4298 
4299     if (msgsz < 0) {
4300         return -TARGET_EINVAL;
4301     }
4302 
4303     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4304         return -TARGET_EFAULT;
4305 
4306     host_mb = g_try_malloc(msgsz + sizeof(long));
4307     if (!host_mb) {
4308         ret = -TARGET_ENOMEM;
4309         goto end;
4310     }
4311     ret = -TARGET_ENOSYS;
4312 #ifdef __NR_msgrcv
4313     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4314 #endif
4315 #ifdef __NR_ipc
4316     if (ret == -TARGET_ENOSYS) {
4317         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4318                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4319     }
4320 #endif
4321 
4322     if (ret > 0) {
4323         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4324         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4325         if (!target_mtext) {
4326             ret = -TARGET_EFAULT;
4327             goto end;
4328         }
4329         memcpy(target_mb->mtext, host_mb->mtext, ret);
4330         unlock_user(target_mtext, target_mtext_addr, ret);
4331     }
4332 
4333     target_mb->mtype = tswapal(host_mb->mtype);
4334 
4335 end:
4336     if (target_mb)
4337         unlock_user_struct(target_mb, msgp, 1);
4338     g_free(host_mb);
4339     return ret;
4340 }
4341 
target_to_host_shmid_ds(struct shmid_ds * host_sd,abi_ulong target_addr)4342 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4343                                                abi_ulong target_addr)
4344 {
4345     struct target_shmid_ds *target_sd;
4346 
4347     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4348         return -TARGET_EFAULT;
4349     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4350         return -TARGET_EFAULT;
4351     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4352     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4353     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4354     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4355     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4356     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4357     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4358     unlock_user_struct(target_sd, target_addr, 0);
4359     return 0;
4360 }
4361 
host_to_target_shmid_ds(abi_ulong target_addr,struct shmid_ds * host_sd)4362 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4363                                                struct shmid_ds *host_sd)
4364 {
4365     struct target_shmid_ds *target_sd;
4366 
4367     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4368         return -TARGET_EFAULT;
4369     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4370         return -TARGET_EFAULT;
4371     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4372     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4373     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4374     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4375     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4376     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4377     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4378     unlock_user_struct(target_sd, target_addr, 1);
4379     return 0;
4380 }
4381 
4382 struct  target_shminfo {
4383     abi_ulong shmmax;
4384     abi_ulong shmmin;
4385     abi_ulong shmmni;
4386     abi_ulong shmseg;
4387     abi_ulong shmall;
4388 };
4389 
host_to_target_shminfo(abi_ulong target_addr,struct shminfo * host_shminfo)4390 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4391                                               struct shminfo *host_shminfo)
4392 {
4393     struct target_shminfo *target_shminfo;
4394     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4395         return -TARGET_EFAULT;
4396     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4397     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4398     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4399     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4400     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4401     unlock_user_struct(target_shminfo, target_addr, 1);
4402     return 0;
4403 }
4404 
4405 struct target_shm_info {
4406     int used_ids;
4407     abi_ulong shm_tot;
4408     abi_ulong shm_rss;
4409     abi_ulong shm_swp;
4410     abi_ulong swap_attempts;
4411     abi_ulong swap_successes;
4412 };
4413 
host_to_target_shm_info(abi_ulong target_addr,struct shm_info * host_shm_info)4414 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4415                                                struct shm_info *host_shm_info)
4416 {
4417     struct target_shm_info *target_shm_info;
4418     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4419         return -TARGET_EFAULT;
4420     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4421     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4422     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4423     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4424     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4425     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4426     unlock_user_struct(target_shm_info, target_addr, 1);
4427     return 0;
4428 }
4429 
do_shmctl(int shmid,int cmd,abi_long buf)4430 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4431 {
4432     struct shmid_ds dsarg;
4433     struct shminfo shminfo;
4434     struct shm_info shm_info;
4435     abi_long ret = -TARGET_EINVAL;
4436 
4437     cmd &= 0xff;
4438 
4439     switch(cmd) {
4440     case IPC_STAT:
4441     case IPC_SET:
4442     case SHM_STAT:
4443         if (target_to_host_shmid_ds(&dsarg, buf))
4444             return -TARGET_EFAULT;
4445         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4446         if (host_to_target_shmid_ds(buf, &dsarg))
4447             return -TARGET_EFAULT;
4448         break;
4449     case IPC_INFO:
4450         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4451         if (host_to_target_shminfo(buf, &shminfo))
4452             return -TARGET_EFAULT;
4453         break;
4454     case SHM_INFO:
4455         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4456         if (host_to_target_shm_info(buf, &shm_info))
4457             return -TARGET_EFAULT;
4458         break;
4459     case IPC_RMID:
4460     case SHM_LOCK:
4461     case SHM_UNLOCK:
4462         ret = get_errno(shmctl(shmid, cmd, NULL));
4463         break;
4464     }
4465 
4466     return ret;
4467 }
4468 
4469 #ifdef TARGET_NR_ipc
4470 /* ??? This only works with linear mappings.  */
4471 /* do_ipc() must return target values and target errnos. */
do_ipc(CPUArchState * cpu_env,unsigned int call,abi_long first,abi_long second,abi_long third,abi_long ptr,abi_long fifth)4472 static abi_long do_ipc(CPUArchState *cpu_env,
4473                        unsigned int call, abi_long first,
4474                        abi_long second, abi_long third,
4475                        abi_long ptr, abi_long fifth)
4476 {
4477     int version;
4478     abi_long ret = 0;
4479 
4480     version = call >> 16;
4481     call &= 0xffff;
4482 
4483     switch (call) {
4484     case IPCOP_semop:
4485         ret = do_semtimedop(first, ptr, second, 0, false);
4486         break;
4487     case IPCOP_semtimedop:
4488     /*
4489      * The s390 sys_ipc variant has only five parameters instead of six
4490      * (as for default variant) and the only difference is the handling of
4491      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4492      * to a struct timespec where the generic variant uses fifth parameter.
4493      */
4494 #if defined(TARGET_S390X)
4495         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4496 #else
4497         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4498 #endif
4499         break;
4500 
4501     case IPCOP_semget:
4502         ret = get_errno(semget(first, second, third));
4503         break;
4504 
4505     case IPCOP_semctl: {
4506         /* The semun argument to semctl is passed by value, so dereference the
4507          * ptr argument. */
4508         abi_ulong atptr;
4509         get_user_ual(atptr, ptr);
4510         ret = do_semctl(first, second, third, atptr);
4511         break;
4512     }
4513 
4514     case IPCOP_msgget:
4515         ret = get_errno(msgget(first, second));
4516         break;
4517 
4518     case IPCOP_msgsnd:
4519         ret = do_msgsnd(first, ptr, second, third);
4520         break;
4521 
4522     case IPCOP_msgctl:
4523         ret = do_msgctl(first, second, ptr);
4524         break;
4525 
4526     case IPCOP_msgrcv:
4527         switch (version) {
4528         case 0:
4529             {
4530                 struct target_ipc_kludge {
4531                     abi_long msgp;
4532                     abi_long msgtyp;
4533                 } *tmp;
4534 
4535                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4536                     ret = -TARGET_EFAULT;
4537                     break;
4538                 }
4539 
4540                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4541 
4542                 unlock_user_struct(tmp, ptr, 0);
4543                 break;
4544             }
4545         default:
4546             ret = do_msgrcv(first, ptr, second, fifth, third);
4547         }
4548         break;
4549 
4550     case IPCOP_shmat:
4551         switch (version) {
4552         default:
4553         {
4554             abi_ulong raddr;
4555             raddr = target_shmat(cpu_env, first, ptr, second);
4556             if (is_error(raddr))
4557                 return get_errno(raddr);
4558             if (put_user_ual(raddr, third))
4559                 return -TARGET_EFAULT;
4560             break;
4561         }
4562         case 1:
4563             ret = -TARGET_EINVAL;
4564             break;
4565         }
4566 	break;
4567     case IPCOP_shmdt:
4568         ret = target_shmdt(ptr);
4569 	break;
4570 
4571     case IPCOP_shmget:
4572 	/* IPC_* flag values are the same on all linux platforms */
4573 	ret = get_errno(shmget(first, second, third));
4574 	break;
4575 
4576 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4577     case IPCOP_shmctl:
4578         ret = do_shmctl(first, second, ptr);
4579         break;
4580     default:
4581         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4582                       call, version);
4583 	ret = -TARGET_ENOSYS;
4584 	break;
4585     }
4586     return ret;
4587 }
4588 #endif
4589 
4590 /* kernel structure types definitions */
4591 
4592 #define STRUCT(name, ...) STRUCT_ ## name,
4593 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4594 enum {
4595 #include "syscall_types.h"
4596 STRUCT_MAX
4597 };
4598 #undef STRUCT
4599 #undef STRUCT_SPECIAL
4600 
4601 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4602 #define STRUCT_SPECIAL(name)
4603 #include "syscall_types.h"
4604 #undef STRUCT
4605 #undef STRUCT_SPECIAL
4606 
4607 #define MAX_STRUCT_SIZE 4096
4608 
4609 #ifdef CONFIG_FIEMAP
4610 /* So fiemap access checks don't overflow on 32 bit systems.
4611  * This is very slightly smaller than the limit imposed by
4612  * the underlying kernel.
4613  */
4614 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4615                             / sizeof(struct fiemap_extent))
4616 
do_ioctl_fs_ioc_fiemap(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4617 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4618                                        int fd, int cmd, abi_long arg)
4619 {
4620     /* The parameter for this ioctl is a struct fiemap followed
4621      * by an array of struct fiemap_extent whose size is set
4622      * in fiemap->fm_extent_count. The array is filled in by the
4623      * ioctl.
4624      */
4625     int target_size_in, target_size_out;
4626     struct fiemap *fm;
4627     const argtype *arg_type = ie->arg_type;
4628     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4629     void *argptr, *p;
4630     abi_long ret;
4631     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4632     uint32_t outbufsz;
4633     int free_fm = 0;
4634 
4635     assert(arg_type[0] == TYPE_PTR);
4636     assert(ie->access == IOC_RW);
4637     arg_type++;
4638     target_size_in = thunk_type_size(arg_type, 0);
4639     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4640     if (!argptr) {
4641         return -TARGET_EFAULT;
4642     }
4643     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4644     unlock_user(argptr, arg, 0);
4645     fm = (struct fiemap *)buf_temp;
4646     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4647         return -TARGET_EINVAL;
4648     }
4649 
4650     outbufsz = sizeof (*fm) +
4651         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4652 
4653     if (outbufsz > MAX_STRUCT_SIZE) {
4654         /* We can't fit all the extents into the fixed size buffer.
4655          * Allocate one that is large enough and use it instead.
4656          */
4657         fm = g_try_malloc(outbufsz);
4658         if (!fm) {
4659             return -TARGET_ENOMEM;
4660         }
4661         memcpy(fm, buf_temp, sizeof(struct fiemap));
4662         free_fm = 1;
4663     }
4664     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4665     if (!is_error(ret)) {
4666         target_size_out = target_size_in;
4667         /* An extent_count of 0 means we were only counting the extents
4668          * so there are no structs to copy
4669          */
4670         if (fm->fm_extent_count != 0) {
4671             target_size_out += fm->fm_mapped_extents * extent_size;
4672         }
4673         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4674         if (!argptr) {
4675             ret = -TARGET_EFAULT;
4676         } else {
4677             /* Convert the struct fiemap */
4678             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4679             if (fm->fm_extent_count != 0) {
4680                 p = argptr + target_size_in;
4681                 /* ...and then all the struct fiemap_extents */
4682                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4683                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4684                                   THUNK_TARGET);
4685                     p += extent_size;
4686                 }
4687             }
4688             unlock_user(argptr, arg, target_size_out);
4689         }
4690     }
4691     if (free_fm) {
4692         g_free(fm);
4693     }
4694     return ret;
4695 }
4696 #endif
4697 
do_ioctl_ifconf(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4698 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4699                                 int fd, int cmd, abi_long arg)
4700 {
4701     const argtype *arg_type = ie->arg_type;
4702     int target_size;
4703     void *argptr;
4704     int ret;
4705     struct ifconf *host_ifconf;
4706     uint32_t outbufsz;
4707     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4708     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4709     int target_ifreq_size;
4710     int nb_ifreq;
4711     int free_buf = 0;
4712     int i;
4713     int target_ifc_len;
4714     abi_long target_ifc_buf;
4715     int host_ifc_len;
4716     char *host_ifc_buf;
4717 
4718     assert(arg_type[0] == TYPE_PTR);
4719     assert(ie->access == IOC_RW);
4720 
4721     arg_type++;
4722     target_size = thunk_type_size(arg_type, 0);
4723 
4724     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4725     if (!argptr)
4726         return -TARGET_EFAULT;
4727     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4728     unlock_user(argptr, arg, 0);
4729 
4730     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4731     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4732     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4733 
4734     if (target_ifc_buf != 0) {
4735         target_ifc_len = host_ifconf->ifc_len;
4736         nb_ifreq = target_ifc_len / target_ifreq_size;
4737         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4738 
4739         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4740         if (outbufsz > MAX_STRUCT_SIZE) {
4741             /*
4742              * We can't fit all the extents into the fixed size buffer.
4743              * Allocate one that is large enough and use it instead.
4744              */
4745             host_ifconf = g_try_malloc(outbufsz);
4746             if (!host_ifconf) {
4747                 return -TARGET_ENOMEM;
4748             }
4749             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4750             free_buf = 1;
4751         }
4752         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4753 
4754         host_ifconf->ifc_len = host_ifc_len;
4755     } else {
4756       host_ifc_buf = NULL;
4757     }
4758     host_ifconf->ifc_buf = host_ifc_buf;
4759 
4760     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4761     if (!is_error(ret)) {
4762 	/* convert host ifc_len to target ifc_len */
4763 
4764         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4765         target_ifc_len = nb_ifreq * target_ifreq_size;
4766         host_ifconf->ifc_len = target_ifc_len;
4767 
4768 	/* restore target ifc_buf */
4769 
4770         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4771 
4772 	/* copy struct ifconf to target user */
4773 
4774         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4775         if (!argptr)
4776             return -TARGET_EFAULT;
4777         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4778         unlock_user(argptr, arg, target_size);
4779 
4780         if (target_ifc_buf != 0) {
4781             /* copy ifreq[] to target user */
4782             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4783             for (i = 0; i < nb_ifreq ; i++) {
4784                 thunk_convert(argptr + i * target_ifreq_size,
4785                               host_ifc_buf + i * sizeof(struct ifreq),
4786                               ifreq_arg_type, THUNK_TARGET);
4787             }
4788             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4789         }
4790     }
4791 
4792     if (free_buf) {
4793         g_free(host_ifconf);
4794     }
4795 
4796     return ret;
4797 }
4798 
4799 #if defined(CONFIG_USBFS)
4800 #if HOST_LONG_BITS > 64
4801 #error USBDEVFS thunks do not support >64 bit hosts yet.
4802 #endif
4803 struct live_urb {
4804     uint64_t target_urb_adr;
4805     uint64_t target_buf_adr;
4806     char *target_buf_ptr;
4807     struct usbdevfs_urb host_urb;
4808 };
4809 
usbdevfs_urb_hashtable(void)4810 static GHashTable *usbdevfs_urb_hashtable(void)
4811 {
4812     static GHashTable *urb_hashtable;
4813 
4814     if (!urb_hashtable) {
4815         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4816     }
4817     return urb_hashtable;
4818 }
4819 
urb_hashtable_insert(struct live_urb * urb)4820 static void urb_hashtable_insert(struct live_urb *urb)
4821 {
4822     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4823     g_hash_table_insert(urb_hashtable, urb, urb);
4824 }
4825 
urb_hashtable_lookup(uint64_t target_urb_adr)4826 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4827 {
4828     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4829     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4830 }
4831 
urb_hashtable_remove(struct live_urb * urb)4832 static void urb_hashtable_remove(struct live_urb *urb)
4833 {
4834     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4835     g_hash_table_remove(urb_hashtable, urb);
4836 }
4837 
4838 static abi_long
do_ioctl_usbdevfs_reapurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4839 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4840                           int fd, int cmd, abi_long arg)
4841 {
4842     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4843     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4844     struct live_urb *lurb;
4845     void *argptr;
4846     uint64_t hurb;
4847     int target_size;
4848     uintptr_t target_urb_adr;
4849     abi_long ret;
4850 
4851     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4852 
4853     memset(buf_temp, 0, sizeof(uint64_t));
4854     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4855     if (is_error(ret)) {
4856         return ret;
4857     }
4858 
4859     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4860     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4861     if (!lurb->target_urb_adr) {
4862         return -TARGET_EFAULT;
4863     }
4864     urb_hashtable_remove(lurb);
4865     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4866         lurb->host_urb.buffer_length);
4867     lurb->target_buf_ptr = NULL;
4868 
4869     /* restore the guest buffer pointer */
4870     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4871 
4872     /* update the guest urb struct */
4873     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4874     if (!argptr) {
4875         g_free(lurb);
4876         return -TARGET_EFAULT;
4877     }
4878     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4879     unlock_user(argptr, lurb->target_urb_adr, target_size);
4880 
4881     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4882     /* write back the urb handle */
4883     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4884     if (!argptr) {
4885         g_free(lurb);
4886         return -TARGET_EFAULT;
4887     }
4888 
4889     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4890     target_urb_adr = lurb->target_urb_adr;
4891     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4892     unlock_user(argptr, arg, target_size);
4893 
4894     g_free(lurb);
4895     return ret;
4896 }
4897 
4898 static abi_long
do_ioctl_usbdevfs_discardurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4899 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4900                              uint8_t *buf_temp __attribute__((unused)),
4901                              int fd, int cmd, abi_long arg)
4902 {
4903     struct live_urb *lurb;
4904 
4905     /* map target address back to host URB with metadata. */
4906     lurb = urb_hashtable_lookup(arg);
4907     if (!lurb) {
4908         return -TARGET_EFAULT;
4909     }
4910     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4911 }
4912 
4913 static abi_long
do_ioctl_usbdevfs_submiturb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4914 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4915                             int fd, int cmd, abi_long arg)
4916 {
4917     const argtype *arg_type = ie->arg_type;
4918     int target_size;
4919     abi_long ret;
4920     void *argptr;
4921     int rw_dir;
4922     struct live_urb *lurb;
4923 
4924     /*
4925      * each submitted URB needs to map to a unique ID for the
4926      * kernel, and that unique ID needs to be a pointer to
4927      * host memory.  hence, we need to malloc for each URB.
4928      * isochronous transfers have a variable length struct.
4929      */
4930     arg_type++;
4931     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4932 
4933     /* construct host copy of urb and metadata */
4934     lurb = g_try_new0(struct live_urb, 1);
4935     if (!lurb) {
4936         return -TARGET_ENOMEM;
4937     }
4938 
4939     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4940     if (!argptr) {
4941         g_free(lurb);
4942         return -TARGET_EFAULT;
4943     }
4944     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4945     unlock_user(argptr, arg, 0);
4946 
4947     lurb->target_urb_adr = arg;
4948     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4949 
4950     /* buffer space used depends on endpoint type so lock the entire buffer */
4951     /* control type urbs should check the buffer contents for true direction */
4952     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4953     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4954         lurb->host_urb.buffer_length, 1);
4955     if (lurb->target_buf_ptr == NULL) {
4956         g_free(lurb);
4957         return -TARGET_EFAULT;
4958     }
4959 
4960     /* update buffer pointer in host copy */
4961     lurb->host_urb.buffer = lurb->target_buf_ptr;
4962 
4963     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4964     if (is_error(ret)) {
4965         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4966         g_free(lurb);
4967     } else {
4968         urb_hashtable_insert(lurb);
4969     }
4970 
4971     return ret;
4972 }
4973 #endif /* CONFIG_USBFS */
4974 
do_ioctl_dm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4975 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4976                             int cmd, abi_long arg)
4977 {
4978     void *argptr;
4979     struct dm_ioctl *host_dm;
4980     abi_long guest_data;
4981     uint32_t guest_data_size;
4982     int target_size;
4983     const argtype *arg_type = ie->arg_type;
4984     abi_long ret;
4985     void *big_buf = NULL;
4986     char *host_data;
4987 
4988     arg_type++;
4989     target_size = thunk_type_size(arg_type, 0);
4990     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4991     if (!argptr) {
4992         ret = -TARGET_EFAULT;
4993         goto out;
4994     }
4995     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4996     unlock_user(argptr, arg, 0);
4997 
4998     /* buf_temp is too small, so fetch things into a bigger buffer */
4999     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5000     memcpy(big_buf, buf_temp, target_size);
5001     buf_temp = big_buf;
5002     host_dm = big_buf;
5003 
5004     guest_data = arg + host_dm->data_start;
5005     if ((guest_data - arg) < 0) {
5006         ret = -TARGET_EINVAL;
5007         goto out;
5008     }
5009     guest_data_size = host_dm->data_size - host_dm->data_start;
5010     host_data = (char*)host_dm + host_dm->data_start;
5011 
5012     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5013     if (!argptr) {
5014         ret = -TARGET_EFAULT;
5015         goto out;
5016     }
5017 
5018     switch (ie->host_cmd) {
5019     case DM_REMOVE_ALL:
5020     case DM_LIST_DEVICES:
5021     case DM_DEV_CREATE:
5022     case DM_DEV_REMOVE:
5023     case DM_DEV_SUSPEND:
5024     case DM_DEV_STATUS:
5025     case DM_DEV_WAIT:
5026     case DM_TABLE_STATUS:
5027     case DM_TABLE_CLEAR:
5028     case DM_TABLE_DEPS:
5029     case DM_LIST_VERSIONS:
5030         /* no input data */
5031         break;
5032     case DM_DEV_RENAME:
5033     case DM_DEV_SET_GEOMETRY:
5034         /* data contains only strings */
5035         memcpy(host_data, argptr, guest_data_size);
5036         break;
5037     case DM_TARGET_MSG:
5038         memcpy(host_data, argptr, guest_data_size);
5039         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5040         break;
5041     case DM_TABLE_LOAD:
5042     {
5043         void *gspec = argptr;
5044         void *cur_data = host_data;
5045         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5046         int spec_size = thunk_type_size(dm_arg_type, 0);
5047         int i;
5048 
5049         for (i = 0; i < host_dm->target_count; i++) {
5050             struct dm_target_spec *spec = cur_data;
5051             uint32_t next;
5052             int slen;
5053 
5054             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5055             slen = strlen((char*)gspec + spec_size) + 1;
5056             next = spec->next;
5057             spec->next = sizeof(*spec) + slen;
5058             strcpy((char*)&spec[1], gspec + spec_size);
5059             gspec += next;
5060             cur_data += spec->next;
5061         }
5062         break;
5063     }
5064     default:
5065         ret = -TARGET_EINVAL;
5066         unlock_user(argptr, guest_data, 0);
5067         goto out;
5068     }
5069     unlock_user(argptr, guest_data, 0);
5070 
5071     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5072     if (!is_error(ret)) {
5073         guest_data = arg + host_dm->data_start;
5074         guest_data_size = host_dm->data_size - host_dm->data_start;
5075         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5076         switch (ie->host_cmd) {
5077         case DM_REMOVE_ALL:
5078         case DM_DEV_CREATE:
5079         case DM_DEV_REMOVE:
5080         case DM_DEV_RENAME:
5081         case DM_DEV_SUSPEND:
5082         case DM_DEV_STATUS:
5083         case DM_TABLE_LOAD:
5084         case DM_TABLE_CLEAR:
5085         case DM_TARGET_MSG:
5086         case DM_DEV_SET_GEOMETRY:
5087             /* no return data */
5088             break;
5089         case DM_LIST_DEVICES:
5090         {
5091             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5092             uint32_t remaining_data = guest_data_size;
5093             void *cur_data = argptr;
5094             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5095             int nl_size = 12; /* can't use thunk_size due to alignment */
5096 
5097             while (1) {
5098                 uint32_t next = nl->next;
5099                 if (next) {
5100                     nl->next = nl_size + (strlen(nl->name) + 1);
5101                 }
5102                 if (remaining_data < nl->next) {
5103                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5104                     break;
5105                 }
5106                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5107                 strcpy(cur_data + nl_size, nl->name);
5108                 cur_data += nl->next;
5109                 remaining_data -= nl->next;
5110                 if (!next) {
5111                     break;
5112                 }
5113                 nl = (void*)nl + next;
5114             }
5115             break;
5116         }
5117         case DM_DEV_WAIT:
5118         case DM_TABLE_STATUS:
5119         {
5120             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5121             void *cur_data = argptr;
5122             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5123             int spec_size = thunk_type_size(dm_arg_type, 0);
5124             int i;
5125 
5126             for (i = 0; i < host_dm->target_count; i++) {
5127                 uint32_t next = spec->next;
5128                 int slen = strlen((char*)&spec[1]) + 1;
5129                 spec->next = (cur_data - argptr) + spec_size + slen;
5130                 if (guest_data_size < spec->next) {
5131                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5132                     break;
5133                 }
5134                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5135                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5136                 cur_data = argptr + spec->next;
5137                 spec = (void*)host_dm + host_dm->data_start + next;
5138             }
5139             break;
5140         }
5141         case DM_TABLE_DEPS:
5142         {
5143             void *hdata = (void*)host_dm + host_dm->data_start;
5144             int count = *(uint32_t*)hdata;
5145             uint64_t *hdev = hdata + 8;
5146             uint64_t *gdev = argptr + 8;
5147             int i;
5148 
5149             *(uint32_t*)argptr = tswap32(count);
5150             for (i = 0; i < count; i++) {
5151                 *gdev = tswap64(*hdev);
5152                 gdev++;
5153                 hdev++;
5154             }
5155             break;
5156         }
5157         case DM_LIST_VERSIONS:
5158         {
5159             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5160             uint32_t remaining_data = guest_data_size;
5161             void *cur_data = argptr;
5162             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5163             int vers_size = thunk_type_size(dm_arg_type, 0);
5164 
5165             while (1) {
5166                 uint32_t next = vers->next;
5167                 if (next) {
5168                     vers->next = vers_size + (strlen(vers->name) + 1);
5169                 }
5170                 if (remaining_data < vers->next) {
5171                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5172                     break;
5173                 }
5174                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5175                 strcpy(cur_data + vers_size, vers->name);
5176                 cur_data += vers->next;
5177                 remaining_data -= vers->next;
5178                 if (!next) {
5179                     break;
5180                 }
5181                 vers = (void*)vers + next;
5182             }
5183             break;
5184         }
5185         default:
5186             unlock_user(argptr, guest_data, 0);
5187             ret = -TARGET_EINVAL;
5188             goto out;
5189         }
5190         unlock_user(argptr, guest_data, guest_data_size);
5191 
5192         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5193         if (!argptr) {
5194             ret = -TARGET_EFAULT;
5195             goto out;
5196         }
5197         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5198         unlock_user(argptr, arg, target_size);
5199     }
5200 out:
5201     g_free(big_buf);
5202     return ret;
5203 }
5204 
do_ioctl_blkpg(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5205 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5206                                int cmd, abi_long arg)
5207 {
5208     void *argptr;
5209     int target_size;
5210     const argtype *arg_type = ie->arg_type;
5211     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5212     abi_long ret;
5213 
5214     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5215     struct blkpg_partition host_part;
5216 
5217     /* Read and convert blkpg */
5218     arg_type++;
5219     target_size = thunk_type_size(arg_type, 0);
5220     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5221     if (!argptr) {
5222         ret = -TARGET_EFAULT;
5223         goto out;
5224     }
5225     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5226     unlock_user(argptr, arg, 0);
5227 
5228     switch (host_blkpg->op) {
5229     case BLKPG_ADD_PARTITION:
5230     case BLKPG_DEL_PARTITION:
5231         /* payload is struct blkpg_partition */
5232         break;
5233     default:
5234         /* Unknown opcode */
5235         ret = -TARGET_EINVAL;
5236         goto out;
5237     }
5238 
5239     /* Read and convert blkpg->data */
5240     arg = (abi_long)(uintptr_t)host_blkpg->data;
5241     target_size = thunk_type_size(part_arg_type, 0);
5242     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5243     if (!argptr) {
5244         ret = -TARGET_EFAULT;
5245         goto out;
5246     }
5247     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5248     unlock_user(argptr, arg, 0);
5249 
5250     /* Swizzle the data pointer to our local copy and call! */
5251     host_blkpg->data = &host_part;
5252     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5253 
5254 out:
5255     return ret;
5256 }
5257 
do_ioctl_rt(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5258 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5259                                 int fd, int cmd, abi_long arg)
5260 {
5261     const argtype *arg_type = ie->arg_type;
5262     const StructEntry *se;
5263     const argtype *field_types;
5264     const int *dst_offsets, *src_offsets;
5265     int target_size;
5266     void *argptr;
5267     abi_ulong *target_rt_dev_ptr = NULL;
5268     unsigned long *host_rt_dev_ptr = NULL;
5269     abi_long ret;
5270     int i;
5271 
5272     assert(ie->access == IOC_W);
5273     assert(*arg_type == TYPE_PTR);
5274     arg_type++;
5275     assert(*arg_type == TYPE_STRUCT);
5276     target_size = thunk_type_size(arg_type, 0);
5277     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5278     if (!argptr) {
5279         return -TARGET_EFAULT;
5280     }
5281     arg_type++;
5282     assert(*arg_type == (int)STRUCT_rtentry);
5283     se = struct_entries + *arg_type++;
5284     assert(se->convert[0] == NULL);
5285     /* convert struct here to be able to catch rt_dev string */
5286     field_types = se->field_types;
5287     dst_offsets = se->field_offsets[THUNK_HOST];
5288     src_offsets = se->field_offsets[THUNK_TARGET];
5289     for (i = 0; i < se->nb_fields; i++) {
5290         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5291             assert(*field_types == TYPE_PTRVOID);
5292             target_rt_dev_ptr = argptr + src_offsets[i];
5293             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5294             if (*target_rt_dev_ptr != 0) {
5295                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5296                                                   tswapal(*target_rt_dev_ptr));
5297                 if (!*host_rt_dev_ptr) {
5298                     unlock_user(argptr, arg, 0);
5299                     return -TARGET_EFAULT;
5300                 }
5301             } else {
5302                 *host_rt_dev_ptr = 0;
5303             }
5304             field_types++;
5305             continue;
5306         }
5307         field_types = thunk_convert(buf_temp + dst_offsets[i],
5308                                     argptr + src_offsets[i],
5309                                     field_types, THUNK_HOST);
5310     }
5311     unlock_user(argptr, arg, 0);
5312 
5313     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5314 
5315     assert(host_rt_dev_ptr != NULL);
5316     assert(target_rt_dev_ptr != NULL);
5317     if (*host_rt_dev_ptr != 0) {
5318         unlock_user((void *)*host_rt_dev_ptr,
5319                     *target_rt_dev_ptr, 0);
5320     }
5321     return ret;
5322 }
5323 
do_ioctl_kdsigaccept(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5324 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5325                                      int fd, int cmd, abi_long arg)
5326 {
5327     int sig = target_to_host_signal(arg);
5328     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5329 }
5330 
do_ioctl_SIOCGSTAMP(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5331 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5332                                     int fd, int cmd, abi_long arg)
5333 {
5334     struct timeval tv;
5335     abi_long ret;
5336 
5337     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5338     if (is_error(ret)) {
5339         return ret;
5340     }
5341 
5342     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5343         if (copy_to_user_timeval(arg, &tv)) {
5344             return -TARGET_EFAULT;
5345         }
5346     } else {
5347         if (copy_to_user_timeval64(arg, &tv)) {
5348             return -TARGET_EFAULT;
5349         }
5350     }
5351 
5352     return ret;
5353 }
5354 
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5355 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5356                                       int fd, int cmd, abi_long arg)
5357 {
5358     struct timespec ts;
5359     abi_long ret;
5360 
5361     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5362     if (is_error(ret)) {
5363         return ret;
5364     }
5365 
5366     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5367         if (host_to_target_timespec(arg, &ts)) {
5368             return -TARGET_EFAULT;
5369         }
5370     } else{
5371         if (host_to_target_timespec64(arg, &ts)) {
5372             return -TARGET_EFAULT;
5373         }
5374     }
5375 
5376     return ret;
5377 }
5378 
5379 #ifdef TIOCGPTPEER
do_ioctl_tiocgptpeer(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5380 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5381                                      int fd, int cmd, abi_long arg)
5382 {
5383     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5384     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5385 }
5386 #endif
5387 
5388 #ifdef HAVE_DRM_H
5389 
unlock_drm_version(struct drm_version * host_ver,struct target_drm_version * target_ver,bool copy)5390 static void unlock_drm_version(struct drm_version *host_ver,
5391                                struct target_drm_version *target_ver,
5392                                bool copy)
5393 {
5394     unlock_user(host_ver->name, target_ver->name,
5395                                 copy ? host_ver->name_len : 0);
5396     unlock_user(host_ver->date, target_ver->date,
5397                                 copy ? host_ver->date_len : 0);
5398     unlock_user(host_ver->desc, target_ver->desc,
5399                                 copy ? host_ver->desc_len : 0);
5400 }
5401 
target_to_host_drmversion(struct drm_version * host_ver,struct target_drm_version * target_ver)5402 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5403                                           struct target_drm_version *target_ver)
5404 {
5405     memset(host_ver, 0, sizeof(*host_ver));
5406 
5407     __get_user(host_ver->name_len, &target_ver->name_len);
5408     if (host_ver->name_len) {
5409         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5410                                    target_ver->name_len, 0);
5411         if (!host_ver->name) {
5412             return -EFAULT;
5413         }
5414     }
5415 
5416     __get_user(host_ver->date_len, &target_ver->date_len);
5417     if (host_ver->date_len) {
5418         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5419                                    target_ver->date_len, 0);
5420         if (!host_ver->date) {
5421             goto err;
5422         }
5423     }
5424 
5425     __get_user(host_ver->desc_len, &target_ver->desc_len);
5426     if (host_ver->desc_len) {
5427         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5428                                    target_ver->desc_len, 0);
5429         if (!host_ver->desc) {
5430             goto err;
5431         }
5432     }
5433 
5434     return 0;
5435 err:
5436     unlock_drm_version(host_ver, target_ver, false);
5437     return -EFAULT;
5438 }
5439 
host_to_target_drmversion(struct target_drm_version * target_ver,struct drm_version * host_ver)5440 static inline void host_to_target_drmversion(
5441                                           struct target_drm_version *target_ver,
5442                                           struct drm_version *host_ver)
5443 {
5444     __put_user(host_ver->version_major, &target_ver->version_major);
5445     __put_user(host_ver->version_minor, &target_ver->version_minor);
5446     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5447     __put_user(host_ver->name_len, &target_ver->name_len);
5448     __put_user(host_ver->date_len, &target_ver->date_len);
5449     __put_user(host_ver->desc_len, &target_ver->desc_len);
5450     unlock_drm_version(host_ver, target_ver, true);
5451 }
5452 
do_ioctl_drm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5453 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5454                              int fd, int cmd, abi_long arg)
5455 {
5456     struct drm_version *ver;
5457     struct target_drm_version *target_ver;
5458     abi_long ret;
5459 
5460     switch (ie->host_cmd) {
5461     case DRM_IOCTL_VERSION:
5462         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5463             return -TARGET_EFAULT;
5464         }
5465         ver = (struct drm_version *)buf_temp;
5466         ret = target_to_host_drmversion(ver, target_ver);
5467         if (!is_error(ret)) {
5468             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5469             if (is_error(ret)) {
5470                 unlock_drm_version(ver, target_ver, false);
5471             } else {
5472                 host_to_target_drmversion(target_ver, ver);
5473             }
5474         }
5475         unlock_user_struct(target_ver, arg, 0);
5476         return ret;
5477     }
5478     return -TARGET_ENOSYS;
5479 }
5480 
do_ioctl_drm_i915_getparam(const IOCTLEntry * ie,struct drm_i915_getparam * gparam,int fd,abi_long arg)5481 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5482                                            struct drm_i915_getparam *gparam,
5483                                            int fd, abi_long arg)
5484 {
5485     abi_long ret;
5486     int value;
5487     struct target_drm_i915_getparam *target_gparam;
5488 
5489     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5490         return -TARGET_EFAULT;
5491     }
5492 
5493     __get_user(gparam->param, &target_gparam->param);
5494     gparam->value = &value;
5495     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5496     put_user_s32(value, target_gparam->value);
5497 
5498     unlock_user_struct(target_gparam, arg, 0);
5499     return ret;
5500 }
5501 
do_ioctl_drm_i915(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5502 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5503                                   int fd, int cmd, abi_long arg)
5504 {
5505     switch (ie->host_cmd) {
5506     case DRM_IOCTL_I915_GETPARAM:
5507         return do_ioctl_drm_i915_getparam(ie,
5508                                           (struct drm_i915_getparam *)buf_temp,
5509                                           fd, arg);
5510     default:
5511         return -TARGET_ENOSYS;
5512     }
5513 }
5514 
5515 #endif
5516 
do_ioctl_TUNSETTXFILTER(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5517 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5518                                         int fd, int cmd, abi_long arg)
5519 {
5520     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5521     struct tun_filter *target_filter;
5522     char *target_addr;
5523 
5524     assert(ie->access == IOC_W);
5525 
5526     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5527     if (!target_filter) {
5528         return -TARGET_EFAULT;
5529     }
5530     filter->flags = tswap16(target_filter->flags);
5531     filter->count = tswap16(target_filter->count);
5532     unlock_user(target_filter, arg, 0);
5533 
5534     if (filter->count) {
5535         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5536             MAX_STRUCT_SIZE) {
5537             return -TARGET_EFAULT;
5538         }
5539 
5540         target_addr = lock_user(VERIFY_READ,
5541                                 arg + offsetof(struct tun_filter, addr),
5542                                 filter->count * ETH_ALEN, 1);
5543         if (!target_addr) {
5544             return -TARGET_EFAULT;
5545         }
5546         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5547         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5548     }
5549 
5550     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5551 }
5552 
5553 IOCTLEntry ioctl_entries[] = {
5554 #define IOCTL(cmd, access, ...) \
5555     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5556 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5557     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5558 #define IOCTL_IGNORE(cmd) \
5559     { TARGET_ ## cmd, 0, #cmd },
5560 #include "ioctls.h"
5561     { 0, 0, },
5562 };
5563 
5564 /* ??? Implement proper locking for ioctls.  */
5565 /* do_ioctl() Must return target values and target errnos. */
do_ioctl(int fd,int cmd,abi_long arg)5566 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5567 {
5568     const IOCTLEntry *ie;
5569     const argtype *arg_type;
5570     abi_long ret;
5571     uint8_t buf_temp[MAX_STRUCT_SIZE];
5572     int target_size;
5573     void *argptr;
5574 
5575     ie = ioctl_entries;
5576     for(;;) {
5577         if (ie->target_cmd == 0) {
5578             qemu_log_mask(
5579                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5580             return -TARGET_ENOTTY;
5581         }
5582         if (ie->target_cmd == cmd)
5583             break;
5584         ie++;
5585     }
5586     arg_type = ie->arg_type;
5587     if (ie->do_ioctl) {
5588         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5589     } else if (!ie->host_cmd) {
5590         /* Some architectures define BSD ioctls in their headers
5591            that are not implemented in Linux.  */
5592         return -TARGET_ENOTTY;
5593     }
5594 
5595     switch(arg_type[0]) {
5596     case TYPE_NULL:
5597         /* no argument */
5598         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5599         break;
5600     case TYPE_PTRVOID:
5601     case TYPE_INT:
5602     case TYPE_LONG:
5603     case TYPE_ULONG:
5604         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5605         break;
5606     case TYPE_PTR:
5607         arg_type++;
5608         target_size = thunk_type_size(arg_type, 0);
5609         switch(ie->access) {
5610         case IOC_R:
5611             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5612             if (!is_error(ret)) {
5613                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5614                 if (!argptr)
5615                     return -TARGET_EFAULT;
5616                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5617                 unlock_user(argptr, arg, target_size);
5618             }
5619             break;
5620         case IOC_W:
5621             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5622             if (!argptr)
5623                 return -TARGET_EFAULT;
5624             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5625             unlock_user(argptr, arg, 0);
5626             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5627             break;
5628         default:
5629         case IOC_RW:
5630             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5631             if (!argptr)
5632                 return -TARGET_EFAULT;
5633             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5634             unlock_user(argptr, arg, 0);
5635             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5636             if (!is_error(ret)) {
5637                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5638                 if (!argptr)
5639                     return -TARGET_EFAULT;
5640                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5641                 unlock_user(argptr, arg, target_size);
5642             }
5643             break;
5644         }
5645         break;
5646     default:
5647         qemu_log_mask(LOG_UNIMP,
5648                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5649                       (long)cmd, arg_type[0]);
5650         ret = -TARGET_ENOTTY;
5651         break;
5652     }
5653     return ret;
5654 }
5655 
5656 static const bitmask_transtbl iflag_tbl[] = {
5657         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5658         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5659         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5660         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5661         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5662         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5663         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5664         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5665         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5666         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5667         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5668         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5669         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5670         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5671         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5672 };
5673 
5674 static const bitmask_transtbl oflag_tbl[] = {
5675 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5676 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5677 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5678 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5679 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5680 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5681 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5682 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5683 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5684 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5685 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5686 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5687 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5688 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5689 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5690 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5691 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5692 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5693 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5694 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5695 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5696 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5697 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5698 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5699 };
5700 
5701 static const bitmask_transtbl cflag_tbl[] = {
5702 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5703 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5704 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5705 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5706 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5707 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5708 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5709 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5710 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5711 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5712 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5713 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5714 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5715 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5716 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5717 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5718 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5719 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5720 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5721 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5722 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5723 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5724 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5725 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5726 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5727 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5728 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5729 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5730 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5731 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5732 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5733 };
5734 
5735 static const bitmask_transtbl lflag_tbl[] = {
5736   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5737   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5738   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5739   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5740   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5741   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5742   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5743   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5744   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5745   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5746   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5747   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5748   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5749   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5750   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5751   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5752 };
5753 
target_to_host_termios(void * dst,const void * src)5754 static void target_to_host_termios (void *dst, const void *src)
5755 {
5756     struct host_termios *host = dst;
5757     const struct target_termios *target = src;
5758 
5759     host->c_iflag =
5760         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5761     host->c_oflag =
5762         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5763     host->c_cflag =
5764         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5765     host->c_lflag =
5766         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5767     host->c_line = target->c_line;
5768 
5769     memset(host->c_cc, 0, sizeof(host->c_cc));
5770     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5771     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5772     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5773     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5774     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5775     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5776     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5777     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5778     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5779     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5780     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5781     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5782     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5783     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5784     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5785     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5786     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5787 }
5788 
host_to_target_termios(void * dst,const void * src)5789 static void host_to_target_termios (void *dst, const void *src)
5790 {
5791     struct target_termios *target = dst;
5792     const struct host_termios *host = src;
5793 
5794     target->c_iflag =
5795         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5796     target->c_oflag =
5797         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5798     target->c_cflag =
5799         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5800     target->c_lflag =
5801         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5802     target->c_line = host->c_line;
5803 
5804     memset(target->c_cc, 0, sizeof(target->c_cc));
5805     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5806     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5807     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5808     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5809     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5810     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5811     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5812     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5813     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5814     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5815     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5816     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5817     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5818     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5819     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5820     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5821     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5822 }
5823 
5824 static const StructEntry struct_termios_def = {
5825     .convert = { host_to_target_termios, target_to_host_termios },
5826     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5827     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5828     .print = print_termios,
5829 };
5830 
5831 /* If the host does not provide these bits, they may be safely discarded. */
5832 #ifndef MAP_SYNC
5833 #define MAP_SYNC 0
5834 #endif
5835 #ifndef MAP_UNINITIALIZED
5836 #define MAP_UNINITIALIZED 0
5837 #endif
5838 
5839 static const bitmask_transtbl mmap_flags_tbl[] = {
5840     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5841     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5842       MAP_ANONYMOUS, MAP_ANONYMOUS },
5843     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5844       MAP_GROWSDOWN, MAP_GROWSDOWN },
5845     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5846       MAP_DENYWRITE, MAP_DENYWRITE },
5847     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5848       MAP_EXECUTABLE, MAP_EXECUTABLE },
5849     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5850     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5851       MAP_NORESERVE, MAP_NORESERVE },
5852     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5853     /* MAP_STACK had been ignored by the kernel for quite some time.
5854        Recognize it for the target insofar as we do not want to pass
5855        it through to the host.  */
5856     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5857     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5858     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5859     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5860       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5861     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5862       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5863 };
5864 
5865 /*
5866  * Arrange for legacy / undefined architecture specific flags to be
5867  * ignored by mmap handling code.
5868  */
5869 #ifndef TARGET_MAP_32BIT
5870 #define TARGET_MAP_32BIT 0
5871 #endif
5872 #ifndef TARGET_MAP_HUGE_2MB
5873 #define TARGET_MAP_HUGE_2MB 0
5874 #endif
5875 #ifndef TARGET_MAP_HUGE_1GB
5876 #define TARGET_MAP_HUGE_1GB 0
5877 #endif
5878 
do_mmap(abi_ulong addr,abi_ulong len,int prot,int target_flags,int fd,off_t offset)5879 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5880                         int target_flags, int fd, off_t offset)
5881 {
5882     /*
5883      * The historical set of flags that all mmap types implicitly support.
5884      */
5885     enum {
5886         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5887                                | TARGET_MAP_PRIVATE
5888                                | TARGET_MAP_FIXED
5889                                | TARGET_MAP_ANONYMOUS
5890                                | TARGET_MAP_DENYWRITE
5891                                | TARGET_MAP_EXECUTABLE
5892                                | TARGET_MAP_UNINITIALIZED
5893                                | TARGET_MAP_GROWSDOWN
5894                                | TARGET_MAP_LOCKED
5895                                | TARGET_MAP_NORESERVE
5896                                | TARGET_MAP_POPULATE
5897                                | TARGET_MAP_NONBLOCK
5898                                | TARGET_MAP_STACK
5899                                | TARGET_MAP_HUGETLB
5900                                | TARGET_MAP_32BIT
5901                                | TARGET_MAP_HUGE_2MB
5902                                | TARGET_MAP_HUGE_1GB
5903     };
5904     int host_flags;
5905 
5906     switch (target_flags & TARGET_MAP_TYPE) {
5907     case TARGET_MAP_PRIVATE:
5908         host_flags = MAP_PRIVATE;
5909         break;
5910     case TARGET_MAP_SHARED:
5911         host_flags = MAP_SHARED;
5912         break;
5913     case TARGET_MAP_SHARED_VALIDATE:
5914         /*
5915          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5916          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5917          */
5918         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5919             return -TARGET_EOPNOTSUPP;
5920         }
5921         host_flags = MAP_SHARED_VALIDATE;
5922         if (target_flags & TARGET_MAP_SYNC) {
5923             host_flags |= MAP_SYNC;
5924         }
5925         break;
5926     default:
5927         return -TARGET_EINVAL;
5928     }
5929     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5930 
5931     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5932 }
5933 
5934 /*
5935  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5936  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5937  */
5938 #if defined(TARGET_I386)
5939 
5940 /* NOTE: there is really one LDT for all the threads */
5941 static uint8_t *ldt_table;
5942 
read_ldt(abi_ulong ptr,unsigned long bytecount)5943 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5944 {
5945     int size;
5946     void *p;
5947 
5948     if (!ldt_table)
5949         return 0;
5950     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5951     if (size > bytecount)
5952         size = bytecount;
5953     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5954     if (!p)
5955         return -TARGET_EFAULT;
5956     /* ??? Should this by byteswapped?  */
5957     memcpy(p, ldt_table, size);
5958     unlock_user(p, ptr, size);
5959     return size;
5960 }
5961 
5962 /* XXX: add locking support */
write_ldt(CPUX86State * env,abi_ulong ptr,unsigned long bytecount,int oldmode)5963 static abi_long write_ldt(CPUX86State *env,
5964                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5965 {
5966     struct target_modify_ldt_ldt_s ldt_info;
5967     struct target_modify_ldt_ldt_s *target_ldt_info;
5968     int seg_32bit, contents, read_exec_only, limit_in_pages;
5969     int seg_not_present, useable, lm;
5970     uint32_t *lp, entry_1, entry_2;
5971 
5972     if (bytecount != sizeof(ldt_info))
5973         return -TARGET_EINVAL;
5974     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5975         return -TARGET_EFAULT;
5976     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5977     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5978     ldt_info.limit = tswap32(target_ldt_info->limit);
5979     ldt_info.flags = tswap32(target_ldt_info->flags);
5980     unlock_user_struct(target_ldt_info, ptr, 0);
5981 
5982     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5983         return -TARGET_EINVAL;
5984     seg_32bit = ldt_info.flags & 1;
5985     contents = (ldt_info.flags >> 1) & 3;
5986     read_exec_only = (ldt_info.flags >> 3) & 1;
5987     limit_in_pages = (ldt_info.flags >> 4) & 1;
5988     seg_not_present = (ldt_info.flags >> 5) & 1;
5989     useable = (ldt_info.flags >> 6) & 1;
5990 #ifdef TARGET_ABI32
5991     lm = 0;
5992 #else
5993     lm = (ldt_info.flags >> 7) & 1;
5994 #endif
5995     if (contents == 3) {
5996         if (oldmode)
5997             return -TARGET_EINVAL;
5998         if (seg_not_present == 0)
5999             return -TARGET_EINVAL;
6000     }
6001     /* allocate the LDT */
6002     if (!ldt_table) {
6003         env->ldt.base = target_mmap(0,
6004                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6005                                     PROT_READ|PROT_WRITE,
6006                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6007         if (env->ldt.base == -1)
6008             return -TARGET_ENOMEM;
6009         memset(g2h_untagged(env->ldt.base), 0,
6010                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6011         env->ldt.limit = 0xffff;
6012         ldt_table = g2h_untagged(env->ldt.base);
6013     }
6014 
6015     /* NOTE: same code as Linux kernel */
6016     /* Allow LDTs to be cleared by the user. */
6017     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6018         if (oldmode ||
6019             (contents == 0		&&
6020              read_exec_only == 1	&&
6021              seg_32bit == 0		&&
6022              limit_in_pages == 0	&&
6023              seg_not_present == 1	&&
6024              useable == 0 )) {
6025             entry_1 = 0;
6026             entry_2 = 0;
6027             goto install;
6028         }
6029     }
6030 
6031     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6032         (ldt_info.limit & 0x0ffff);
6033     entry_2 = (ldt_info.base_addr & 0xff000000) |
6034         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6035         (ldt_info.limit & 0xf0000) |
6036         ((read_exec_only ^ 1) << 9) |
6037         (contents << 10) |
6038         ((seg_not_present ^ 1) << 15) |
6039         (seg_32bit << 22) |
6040         (limit_in_pages << 23) |
6041         (lm << 21) |
6042         0x7000;
6043     if (!oldmode)
6044         entry_2 |= (useable << 20);
6045 
6046     /* Install the new entry ...  */
6047 install:
6048     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6049     lp[0] = tswap32(entry_1);
6050     lp[1] = tswap32(entry_2);
6051     return 0;
6052 }
6053 
6054 /* specific and weird i386 syscalls */
do_modify_ldt(CPUX86State * env,int func,abi_ulong ptr,unsigned long bytecount)6055 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6056                               unsigned long bytecount)
6057 {
6058     abi_long ret;
6059 
6060     switch (func) {
6061     case 0:
6062         ret = read_ldt(ptr, bytecount);
6063         break;
6064     case 1:
6065         ret = write_ldt(env, ptr, bytecount, 1);
6066         break;
6067     case 0x11:
6068         ret = write_ldt(env, ptr, bytecount, 0);
6069         break;
6070     default:
6071         ret = -TARGET_ENOSYS;
6072         break;
6073     }
6074     return ret;
6075 }
6076 
6077 #if defined(TARGET_ABI32)
do_set_thread_area(CPUX86State * env,abi_ulong ptr)6078 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6079 {
6080     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6081     struct target_modify_ldt_ldt_s ldt_info;
6082     struct target_modify_ldt_ldt_s *target_ldt_info;
6083     int seg_32bit, contents, read_exec_only, limit_in_pages;
6084     int seg_not_present, useable, lm;
6085     uint32_t *lp, entry_1, entry_2;
6086     int i;
6087 
6088     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6089     if (!target_ldt_info)
6090         return -TARGET_EFAULT;
6091     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6092     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6093     ldt_info.limit = tswap32(target_ldt_info->limit);
6094     ldt_info.flags = tswap32(target_ldt_info->flags);
6095     if (ldt_info.entry_number == -1) {
6096         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6097             if (gdt_table[i] == 0) {
6098                 ldt_info.entry_number = i;
6099                 target_ldt_info->entry_number = tswap32(i);
6100                 break;
6101             }
6102         }
6103     }
6104     unlock_user_struct(target_ldt_info, ptr, 1);
6105 
6106     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6107         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6108            return -TARGET_EINVAL;
6109     seg_32bit = ldt_info.flags & 1;
6110     contents = (ldt_info.flags >> 1) & 3;
6111     read_exec_only = (ldt_info.flags >> 3) & 1;
6112     limit_in_pages = (ldt_info.flags >> 4) & 1;
6113     seg_not_present = (ldt_info.flags >> 5) & 1;
6114     useable = (ldt_info.flags >> 6) & 1;
6115 #ifdef TARGET_ABI32
6116     lm = 0;
6117 #else
6118     lm = (ldt_info.flags >> 7) & 1;
6119 #endif
6120 
6121     if (contents == 3) {
6122         if (seg_not_present == 0)
6123             return -TARGET_EINVAL;
6124     }
6125 
6126     /* NOTE: same code as Linux kernel */
6127     /* Allow LDTs to be cleared by the user. */
6128     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6129         if ((contents == 0             &&
6130              read_exec_only == 1       &&
6131              seg_32bit == 0            &&
6132              limit_in_pages == 0       &&
6133              seg_not_present == 1      &&
6134              useable == 0 )) {
6135             entry_1 = 0;
6136             entry_2 = 0;
6137             goto install;
6138         }
6139     }
6140 
6141     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6142         (ldt_info.limit & 0x0ffff);
6143     entry_2 = (ldt_info.base_addr & 0xff000000) |
6144         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6145         (ldt_info.limit & 0xf0000) |
6146         ((read_exec_only ^ 1) << 9) |
6147         (contents << 10) |
6148         ((seg_not_present ^ 1) << 15) |
6149         (seg_32bit << 22) |
6150         (limit_in_pages << 23) |
6151         (useable << 20) |
6152         (lm << 21) |
6153         0x7000;
6154 
6155     /* Install the new entry ...  */
6156 install:
6157     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6158     lp[0] = tswap32(entry_1);
6159     lp[1] = tswap32(entry_2);
6160     return 0;
6161 }
6162 
do_get_thread_area(CPUX86State * env,abi_ulong ptr)6163 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6164 {
6165     struct target_modify_ldt_ldt_s *target_ldt_info;
6166     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6167     uint32_t base_addr, limit, flags;
6168     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6169     int seg_not_present, useable, lm;
6170     uint32_t *lp, entry_1, entry_2;
6171 
6172     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6173     if (!target_ldt_info)
6174         return -TARGET_EFAULT;
6175     idx = tswap32(target_ldt_info->entry_number);
6176     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6177         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6178         unlock_user_struct(target_ldt_info, ptr, 1);
6179         return -TARGET_EINVAL;
6180     }
6181     lp = (uint32_t *)(gdt_table + idx);
6182     entry_1 = tswap32(lp[0]);
6183     entry_2 = tswap32(lp[1]);
6184 
6185     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6186     contents = (entry_2 >> 10) & 3;
6187     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6188     seg_32bit = (entry_2 >> 22) & 1;
6189     limit_in_pages = (entry_2 >> 23) & 1;
6190     useable = (entry_2 >> 20) & 1;
6191 #ifdef TARGET_ABI32
6192     lm = 0;
6193 #else
6194     lm = (entry_2 >> 21) & 1;
6195 #endif
6196     flags = (seg_32bit << 0) | (contents << 1) |
6197         (read_exec_only << 3) | (limit_in_pages << 4) |
6198         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6199     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6200     base_addr = (entry_1 >> 16) |
6201         (entry_2 & 0xff000000) |
6202         ((entry_2 & 0xff) << 16);
6203     target_ldt_info->base_addr = tswapal(base_addr);
6204     target_ldt_info->limit = tswap32(limit);
6205     target_ldt_info->flags = tswap32(flags);
6206     unlock_user_struct(target_ldt_info, ptr, 1);
6207     return 0;
6208 }
6209 
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6210 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6211 {
6212     return -TARGET_ENOSYS;
6213 }
6214 #else
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6215 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6216 {
6217     abi_long ret = 0;
6218     abi_ulong val;
6219     int idx;
6220 
6221     switch(code) {
6222     case TARGET_ARCH_SET_GS:
6223     case TARGET_ARCH_SET_FS:
6224         if (code == TARGET_ARCH_SET_GS)
6225             idx = R_GS;
6226         else
6227             idx = R_FS;
6228         cpu_x86_load_seg(env, idx, 0);
6229         env->segs[idx].base = addr;
6230         break;
6231     case TARGET_ARCH_GET_GS:
6232     case TARGET_ARCH_GET_FS:
6233         if (code == TARGET_ARCH_GET_GS)
6234             idx = R_GS;
6235         else
6236             idx = R_FS;
6237         val = env->segs[idx].base;
6238         if (put_user(val, addr, abi_ulong))
6239             ret = -TARGET_EFAULT;
6240         break;
6241     default:
6242         ret = -TARGET_EINVAL;
6243         break;
6244     }
6245     return ret;
6246 }
6247 #endif /* defined(TARGET_ABI32 */
6248 #endif /* defined(TARGET_I386) */
6249 
6250 /*
6251  * These constants are generic.  Supply any that are missing from the host.
6252  */
6253 #ifndef PR_SET_NAME
6254 # define PR_SET_NAME    15
6255 # define PR_GET_NAME    16
6256 #endif
6257 #ifndef PR_SET_FP_MODE
6258 # define PR_SET_FP_MODE 45
6259 # define PR_GET_FP_MODE 46
6260 # define PR_FP_MODE_FR   (1 << 0)
6261 # define PR_FP_MODE_FRE  (1 << 1)
6262 #endif
6263 #ifndef PR_SVE_SET_VL
6264 # define PR_SVE_SET_VL  50
6265 # define PR_SVE_GET_VL  51
6266 # define PR_SVE_VL_LEN_MASK  0xffff
6267 # define PR_SVE_VL_INHERIT   (1 << 17)
6268 #endif
6269 #ifndef PR_PAC_RESET_KEYS
6270 # define PR_PAC_RESET_KEYS  54
6271 # define PR_PAC_APIAKEY   (1 << 0)
6272 # define PR_PAC_APIBKEY   (1 << 1)
6273 # define PR_PAC_APDAKEY   (1 << 2)
6274 # define PR_PAC_APDBKEY   (1 << 3)
6275 # define PR_PAC_APGAKEY   (1 << 4)
6276 #endif
6277 #ifndef PR_SET_TAGGED_ADDR_CTRL
6278 # define PR_SET_TAGGED_ADDR_CTRL 55
6279 # define PR_GET_TAGGED_ADDR_CTRL 56
6280 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6281 #endif
6282 #ifndef PR_SET_IO_FLUSHER
6283 # define PR_SET_IO_FLUSHER 57
6284 # define PR_GET_IO_FLUSHER 58
6285 #endif
6286 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6287 # define PR_SET_SYSCALL_USER_DISPATCH 59
6288 #endif
6289 #ifndef PR_SME_SET_VL
6290 # define PR_SME_SET_VL  63
6291 # define PR_SME_GET_VL  64
6292 # define PR_SME_VL_LEN_MASK  0xffff
6293 # define PR_SME_VL_INHERIT   (1 << 17)
6294 #endif
6295 
6296 #include "target_prctl.h"
6297 
do_prctl_inval0(CPUArchState * env)6298 static abi_long do_prctl_inval0(CPUArchState *env)
6299 {
6300     return -TARGET_EINVAL;
6301 }
6302 
do_prctl_inval1(CPUArchState * env,abi_long arg2)6303 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6304 {
6305     return -TARGET_EINVAL;
6306 }
6307 
6308 #ifndef do_prctl_get_fp_mode
6309 #define do_prctl_get_fp_mode do_prctl_inval0
6310 #endif
6311 #ifndef do_prctl_set_fp_mode
6312 #define do_prctl_set_fp_mode do_prctl_inval1
6313 #endif
6314 #ifndef do_prctl_sve_get_vl
6315 #define do_prctl_sve_get_vl do_prctl_inval0
6316 #endif
6317 #ifndef do_prctl_sve_set_vl
6318 #define do_prctl_sve_set_vl do_prctl_inval1
6319 #endif
6320 #ifndef do_prctl_reset_keys
6321 #define do_prctl_reset_keys do_prctl_inval1
6322 #endif
6323 #ifndef do_prctl_set_tagged_addr_ctrl
6324 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6325 #endif
6326 #ifndef do_prctl_get_tagged_addr_ctrl
6327 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6328 #endif
6329 #ifndef do_prctl_get_unalign
6330 #define do_prctl_get_unalign do_prctl_inval1
6331 #endif
6332 #ifndef do_prctl_set_unalign
6333 #define do_prctl_set_unalign do_prctl_inval1
6334 #endif
6335 #ifndef do_prctl_sme_get_vl
6336 #define do_prctl_sme_get_vl do_prctl_inval0
6337 #endif
6338 #ifndef do_prctl_sme_set_vl
6339 #define do_prctl_sme_set_vl do_prctl_inval1
6340 #endif
6341 
do_prctl(CPUArchState * env,abi_long option,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)6342 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6343                          abi_long arg3, abi_long arg4, abi_long arg5)
6344 {
6345     abi_long ret;
6346 
6347     switch (option) {
6348     case PR_GET_PDEATHSIG:
6349         {
6350             int deathsig;
6351             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6352                                   arg3, arg4, arg5));
6353             if (!is_error(ret) &&
6354                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6355                 return -TARGET_EFAULT;
6356             }
6357             return ret;
6358         }
6359     case PR_SET_PDEATHSIG:
6360         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6361                                arg3, arg4, arg5));
6362     case PR_GET_NAME:
6363         {
6364             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6365             if (!name) {
6366                 return -TARGET_EFAULT;
6367             }
6368             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6369                                   arg3, arg4, arg5));
6370             unlock_user(name, arg2, 16);
6371             return ret;
6372         }
6373     case PR_SET_NAME:
6374         {
6375             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6376             if (!name) {
6377                 return -TARGET_EFAULT;
6378             }
6379             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6380                                   arg3, arg4, arg5));
6381             unlock_user(name, arg2, 0);
6382             return ret;
6383         }
6384     case PR_GET_FP_MODE:
6385         return do_prctl_get_fp_mode(env);
6386     case PR_SET_FP_MODE:
6387         return do_prctl_set_fp_mode(env, arg2);
6388     case PR_SVE_GET_VL:
6389         return do_prctl_sve_get_vl(env);
6390     case PR_SVE_SET_VL:
6391         return do_prctl_sve_set_vl(env, arg2);
6392     case PR_SME_GET_VL:
6393         return do_prctl_sme_get_vl(env);
6394     case PR_SME_SET_VL:
6395         return do_prctl_sme_set_vl(env, arg2);
6396     case PR_PAC_RESET_KEYS:
6397         if (arg3 || arg4 || arg5) {
6398             return -TARGET_EINVAL;
6399         }
6400         return do_prctl_reset_keys(env, arg2);
6401     case PR_SET_TAGGED_ADDR_CTRL:
6402         if (arg3 || arg4 || arg5) {
6403             return -TARGET_EINVAL;
6404         }
6405         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6406     case PR_GET_TAGGED_ADDR_CTRL:
6407         if (arg2 || arg3 || arg4 || arg5) {
6408             return -TARGET_EINVAL;
6409         }
6410         return do_prctl_get_tagged_addr_ctrl(env);
6411 
6412     case PR_GET_UNALIGN:
6413         return do_prctl_get_unalign(env, arg2);
6414     case PR_SET_UNALIGN:
6415         return do_prctl_set_unalign(env, arg2);
6416 
6417     case PR_CAP_AMBIENT:
6418     case PR_CAPBSET_READ:
6419     case PR_CAPBSET_DROP:
6420     case PR_GET_DUMPABLE:
6421     case PR_SET_DUMPABLE:
6422     case PR_GET_KEEPCAPS:
6423     case PR_SET_KEEPCAPS:
6424     case PR_GET_SECUREBITS:
6425     case PR_SET_SECUREBITS:
6426     case PR_GET_TIMING:
6427     case PR_SET_TIMING:
6428     case PR_GET_TIMERSLACK:
6429     case PR_SET_TIMERSLACK:
6430     case PR_MCE_KILL:
6431     case PR_MCE_KILL_GET:
6432     case PR_GET_NO_NEW_PRIVS:
6433     case PR_SET_NO_NEW_PRIVS:
6434     case PR_GET_IO_FLUSHER:
6435     case PR_SET_IO_FLUSHER:
6436     case PR_SET_CHILD_SUBREAPER:
6437     case PR_GET_SPECULATION_CTRL:
6438     case PR_SET_SPECULATION_CTRL:
6439         /* Some prctl options have no pointer arguments and we can pass on. */
6440         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6441 
6442     case PR_GET_CHILD_SUBREAPER:
6443         {
6444             int val;
6445             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6446                                   arg3, arg4, arg5));
6447             if (!is_error(ret) && put_user_s32(val, arg2)) {
6448                 return -TARGET_EFAULT;
6449             }
6450             return ret;
6451         }
6452 
6453     case PR_GET_TID_ADDRESS:
6454         {
6455             TaskState *ts = get_task_state(env_cpu(env));
6456             return put_user_ual(ts->child_tidptr, arg2);
6457         }
6458 
6459     case PR_GET_FPEXC:
6460     case PR_SET_FPEXC:
6461         /* Was used for SPE on PowerPC. */
6462         return -TARGET_EINVAL;
6463 
6464     case PR_GET_ENDIAN:
6465     case PR_SET_ENDIAN:
6466     case PR_GET_FPEMU:
6467     case PR_SET_FPEMU:
6468     case PR_SET_MM:
6469     case PR_GET_SECCOMP:
6470     case PR_SET_SECCOMP:
6471     case PR_SET_SYSCALL_USER_DISPATCH:
6472     case PR_GET_THP_DISABLE:
6473     case PR_SET_THP_DISABLE:
6474     case PR_GET_TSC:
6475     case PR_SET_TSC:
6476         /* Disable to prevent the target disabling stuff we need. */
6477         return -TARGET_EINVAL;
6478 
6479     default:
6480         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6481                       option);
6482         return -TARGET_EINVAL;
6483     }
6484 }
6485 
6486 #define NEW_STACK_SIZE 0x40000
6487 
6488 
6489 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6490 typedef struct {
6491     CPUArchState *env;
6492     pthread_mutex_t mutex;
6493     pthread_cond_t cond;
6494     pthread_t thread;
6495     uint32_t tid;
6496     abi_ulong child_tidptr;
6497     abi_ulong parent_tidptr;
6498     sigset_t sigmask;
6499 } new_thread_info;
6500 
clone_func(void * arg)6501 static void *clone_func(void *arg)
6502 {
6503     new_thread_info *info = arg;
6504     CPUArchState *env;
6505     CPUState *cpu;
6506     TaskState *ts;
6507 
6508     rcu_register_thread();
6509     tcg_register_thread();
6510     env = info->env;
6511     cpu = env_cpu(env);
6512     thread_cpu = cpu;
6513     ts = get_task_state(cpu);
6514     info->tid = sys_gettid();
6515     task_settid(ts);
6516     if (info->child_tidptr)
6517         put_user_u32(info->tid, info->child_tidptr);
6518     if (info->parent_tidptr)
6519         put_user_u32(info->tid, info->parent_tidptr);
6520     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6521     /* Enable signals.  */
6522     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6523     /* Signal to the parent that we're ready.  */
6524     pthread_mutex_lock(&info->mutex);
6525     pthread_cond_broadcast(&info->cond);
6526     pthread_mutex_unlock(&info->mutex);
6527     /* Wait until the parent has finished initializing the tls state.  */
6528     pthread_mutex_lock(&clone_lock);
6529     pthread_mutex_unlock(&clone_lock);
6530     cpu_loop(env);
6531     /* never exits */
6532     return NULL;
6533 }
6534 
6535 /* do_fork() Must return host values and target errnos (unlike most
6536    do_*() functions). */
do_fork(CPUArchState * env,unsigned int flags,abi_ulong newsp,abi_ulong parent_tidptr,target_ulong newtls,abi_ulong child_tidptr)6537 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6538                    abi_ulong parent_tidptr, target_ulong newtls,
6539                    abi_ulong child_tidptr)
6540 {
6541     CPUState *cpu = env_cpu(env);
6542     int ret;
6543     TaskState *ts;
6544     CPUState *new_cpu;
6545     CPUArchState *new_env;
6546     sigset_t sigmask;
6547 
6548     flags &= ~CLONE_IGNORED_FLAGS;
6549 
6550     /* Emulate vfork() with fork() */
6551     if (flags & CLONE_VFORK)
6552         flags &= ~(CLONE_VFORK | CLONE_VM);
6553 
6554     if (flags & CLONE_VM) {
6555         TaskState *parent_ts = get_task_state(cpu);
6556         new_thread_info info;
6557         pthread_attr_t attr;
6558 
6559         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6560             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6561             return -TARGET_EINVAL;
6562         }
6563 
6564         ts = g_new0(TaskState, 1);
6565         init_task_state(ts);
6566 
6567         /* Grab a mutex so that thread setup appears atomic.  */
6568         pthread_mutex_lock(&clone_lock);
6569 
6570         /*
6571          * If this is our first additional thread, we need to ensure we
6572          * generate code for parallel execution and flush old translations.
6573          * Do this now so that the copy gets CF_PARALLEL too.
6574          */
6575         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
6576             tcg_cflags_set(cpu, CF_PARALLEL);
6577             tb_flush(cpu);
6578         }
6579 
6580         /* we create a new CPU instance. */
6581         new_env = cpu_copy(env);
6582         /* Init regs that differ from the parent.  */
6583         cpu_clone_regs_child(new_env, newsp, flags);
6584         cpu_clone_regs_parent(env, flags);
6585         new_cpu = env_cpu(new_env);
6586         new_cpu->opaque = ts;
6587         ts->bprm = parent_ts->bprm;
6588         ts->info = parent_ts->info;
6589         ts->signal_mask = parent_ts->signal_mask;
6590 
6591         if (flags & CLONE_CHILD_CLEARTID) {
6592             ts->child_tidptr = child_tidptr;
6593         }
6594 
6595         if (flags & CLONE_SETTLS) {
6596             cpu_set_tls (new_env, newtls);
6597         }
6598 
6599         memset(&info, 0, sizeof(info));
6600         pthread_mutex_init(&info.mutex, NULL);
6601         pthread_mutex_lock(&info.mutex);
6602         pthread_cond_init(&info.cond, NULL);
6603         info.env = new_env;
6604         if (flags & CLONE_CHILD_SETTID) {
6605             info.child_tidptr = child_tidptr;
6606         }
6607         if (flags & CLONE_PARENT_SETTID) {
6608             info.parent_tidptr = parent_tidptr;
6609         }
6610 
6611         ret = pthread_attr_init(&attr);
6612         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6613         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6614         /* It is not safe to deliver signals until the child has finished
6615            initializing, so temporarily block all signals.  */
6616         sigfillset(&sigmask);
6617         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6618         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6619 
6620         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6621         /* TODO: Free new CPU state if thread creation failed.  */
6622 
6623         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6624         pthread_attr_destroy(&attr);
6625         if (ret == 0) {
6626             /* Wait for the child to initialize.  */
6627             pthread_cond_wait(&info.cond, &info.mutex);
6628             ret = info.tid;
6629         } else {
6630             ret = -1;
6631         }
6632         pthread_mutex_unlock(&info.mutex);
6633         pthread_cond_destroy(&info.cond);
6634         pthread_mutex_destroy(&info.mutex);
6635         pthread_mutex_unlock(&clone_lock);
6636     } else {
6637         /* if no CLONE_VM, we consider it is a fork */
6638         if (flags & CLONE_INVALID_FORK_FLAGS) {
6639             return -TARGET_EINVAL;
6640         }
6641 
6642         /* We can't support custom termination signals */
6643         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6644             return -TARGET_EINVAL;
6645         }
6646 
6647 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6648         if (flags & CLONE_PIDFD) {
6649             return -TARGET_EINVAL;
6650         }
6651 #endif
6652 
6653         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6654         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6655             return -TARGET_EINVAL;
6656         }
6657 
6658         if (block_signals()) {
6659             return -QEMU_ERESTARTSYS;
6660         }
6661 
6662         fork_start();
6663         ret = fork();
6664         if (ret == 0) {
6665             /* Child Process.  */
6666             cpu_clone_regs_child(env, newsp, flags);
6667             fork_end(ret);
6668             /* There is a race condition here.  The parent process could
6669                theoretically read the TID in the child process before the child
6670                tid is set.  This would require using either ptrace
6671                (not implemented) or having *_tidptr to point at a shared memory
6672                mapping.  We can't repeat the spinlock hack used above because
6673                the child process gets its own copy of the lock.  */
6674             if (flags & CLONE_CHILD_SETTID)
6675                 put_user_u32(sys_gettid(), child_tidptr);
6676             if (flags & CLONE_PARENT_SETTID)
6677                 put_user_u32(sys_gettid(), parent_tidptr);
6678             ts = get_task_state(cpu);
6679             if (flags & CLONE_SETTLS)
6680                 cpu_set_tls (env, newtls);
6681             if (flags & CLONE_CHILD_CLEARTID)
6682                 ts->child_tidptr = child_tidptr;
6683         } else {
6684             cpu_clone_regs_parent(env, flags);
6685             if (flags & CLONE_PIDFD) {
6686                 int pid_fd = 0;
6687 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6688                 int pid_child = ret;
6689                 pid_fd = pidfd_open(pid_child, 0);
6690                 if (pid_fd >= 0) {
6691                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6692                                                | FD_CLOEXEC);
6693                 } else {
6694                         pid_fd = 0;
6695                 }
6696 #endif
6697                 put_user_u32(pid_fd, parent_tidptr);
6698             }
6699             fork_end(ret);
6700         }
6701         g_assert(!cpu_in_exclusive_context(cpu));
6702     }
6703     return ret;
6704 }
6705 
6706 /* warning : doesn't handle linux specific flags... */
target_to_host_fcntl_cmd(int cmd)6707 static int target_to_host_fcntl_cmd(int cmd)
6708 {
6709     int ret;
6710 
6711     switch(cmd) {
6712     case TARGET_F_DUPFD:
6713     case TARGET_F_GETFD:
6714     case TARGET_F_SETFD:
6715     case TARGET_F_GETFL:
6716     case TARGET_F_SETFL:
6717     case TARGET_F_OFD_GETLK:
6718     case TARGET_F_OFD_SETLK:
6719     case TARGET_F_OFD_SETLKW:
6720         ret = cmd;
6721         break;
6722     case TARGET_F_GETLK:
6723         ret = F_GETLK;
6724         break;
6725     case TARGET_F_SETLK:
6726         ret = F_SETLK;
6727         break;
6728     case TARGET_F_SETLKW:
6729         ret = F_SETLKW;
6730         break;
6731     case TARGET_F_GETOWN:
6732         ret = F_GETOWN;
6733         break;
6734     case TARGET_F_SETOWN:
6735         ret = F_SETOWN;
6736         break;
6737     case TARGET_F_GETSIG:
6738         ret = F_GETSIG;
6739         break;
6740     case TARGET_F_SETSIG:
6741         ret = F_SETSIG;
6742         break;
6743 #if TARGET_ABI_BITS == 32
6744     case TARGET_F_GETLK64:
6745         ret = F_GETLK;
6746         break;
6747     case TARGET_F_SETLK64:
6748         ret = F_SETLK;
6749         break;
6750     case TARGET_F_SETLKW64:
6751         ret = F_SETLKW;
6752         break;
6753 #endif
6754     case TARGET_F_SETLEASE:
6755         ret = F_SETLEASE;
6756         break;
6757     case TARGET_F_GETLEASE:
6758         ret = F_GETLEASE;
6759         break;
6760 #ifdef F_DUPFD_CLOEXEC
6761     case TARGET_F_DUPFD_CLOEXEC:
6762         ret = F_DUPFD_CLOEXEC;
6763         break;
6764 #endif
6765     case TARGET_F_NOTIFY:
6766         ret = F_NOTIFY;
6767         break;
6768 #ifdef F_GETOWN_EX
6769     case TARGET_F_GETOWN_EX:
6770         ret = F_GETOWN_EX;
6771         break;
6772 #endif
6773 #ifdef F_SETOWN_EX
6774     case TARGET_F_SETOWN_EX:
6775         ret = F_SETOWN_EX;
6776         break;
6777 #endif
6778 #ifdef F_SETPIPE_SZ
6779     case TARGET_F_SETPIPE_SZ:
6780         ret = F_SETPIPE_SZ;
6781         break;
6782     case TARGET_F_GETPIPE_SZ:
6783         ret = F_GETPIPE_SZ;
6784         break;
6785 #endif
6786 #ifdef F_ADD_SEALS
6787     case TARGET_F_ADD_SEALS:
6788         ret = F_ADD_SEALS;
6789         break;
6790     case TARGET_F_GET_SEALS:
6791         ret = F_GET_SEALS;
6792         break;
6793 #endif
6794     default:
6795         ret = -TARGET_EINVAL;
6796         break;
6797     }
6798 
6799 #if defined(__powerpc64__)
6800     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6801      * is not supported by kernel. The glibc fcntl call actually adjusts
6802      * them to 5, 6 and 7 before making the syscall(). Since we make the
6803      * syscall directly, adjust to what is supported by the kernel.
6804      */
6805     if (ret >= F_GETLK && ret <= F_SETLKW) {
6806         ret -= F_GETLK - 5;
6807     }
6808 #endif
6809 
6810     return ret;
6811 }
6812 
6813 #define FLOCK_TRANSTBL \
6814     switch (type) { \
6815     TRANSTBL_CONVERT(F_RDLCK); \
6816     TRANSTBL_CONVERT(F_WRLCK); \
6817     TRANSTBL_CONVERT(F_UNLCK); \
6818     }
6819 
target_to_host_flock(int type)6820 static int target_to_host_flock(int type)
6821 {
6822 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6823     FLOCK_TRANSTBL
6824 #undef  TRANSTBL_CONVERT
6825     return -TARGET_EINVAL;
6826 }
6827 
host_to_target_flock(int type)6828 static int host_to_target_flock(int type)
6829 {
6830 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6831     FLOCK_TRANSTBL
6832 #undef  TRANSTBL_CONVERT
6833     /* if we don't know how to convert the value coming
6834      * from the host we copy to the target field as-is
6835      */
6836     return type;
6837 }
6838 
copy_from_user_flock(struct flock * fl,abi_ulong target_flock_addr)6839 static inline abi_long copy_from_user_flock(struct flock *fl,
6840                                             abi_ulong target_flock_addr)
6841 {
6842     struct target_flock *target_fl;
6843     int l_type;
6844 
6845     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6846         return -TARGET_EFAULT;
6847     }
6848 
6849     __get_user(l_type, &target_fl->l_type);
6850     l_type = target_to_host_flock(l_type);
6851     if (l_type < 0) {
6852         return l_type;
6853     }
6854     fl->l_type = l_type;
6855     __get_user(fl->l_whence, &target_fl->l_whence);
6856     __get_user(fl->l_start, &target_fl->l_start);
6857     __get_user(fl->l_len, &target_fl->l_len);
6858     __get_user(fl->l_pid, &target_fl->l_pid);
6859     unlock_user_struct(target_fl, target_flock_addr, 0);
6860     return 0;
6861 }
6862 
copy_to_user_flock(abi_ulong target_flock_addr,const struct flock * fl)6863 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6864                                           const struct flock *fl)
6865 {
6866     struct target_flock *target_fl;
6867     short l_type;
6868 
6869     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6870         return -TARGET_EFAULT;
6871     }
6872 
6873     l_type = host_to_target_flock(fl->l_type);
6874     __put_user(l_type, &target_fl->l_type);
6875     __put_user(fl->l_whence, &target_fl->l_whence);
6876     __put_user(fl->l_start, &target_fl->l_start);
6877     __put_user(fl->l_len, &target_fl->l_len);
6878     __put_user(fl->l_pid, &target_fl->l_pid);
6879     unlock_user_struct(target_fl, target_flock_addr, 1);
6880     return 0;
6881 }
6882 
6883 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
6884 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
6885 
6886 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6887 struct target_oabi_flock64 {
6888     abi_short l_type;
6889     abi_short l_whence;
6890     abi_llong l_start;
6891     abi_llong l_len;
6892     abi_int   l_pid;
6893 } QEMU_PACKED;
6894 
copy_from_user_oabi_flock64(struct flock * fl,abi_ulong target_flock_addr)6895 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
6896                                                    abi_ulong target_flock_addr)
6897 {
6898     struct target_oabi_flock64 *target_fl;
6899     int l_type;
6900 
6901     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6902         return -TARGET_EFAULT;
6903     }
6904 
6905     __get_user(l_type, &target_fl->l_type);
6906     l_type = target_to_host_flock(l_type);
6907     if (l_type < 0) {
6908         return l_type;
6909     }
6910     fl->l_type = l_type;
6911     __get_user(fl->l_whence, &target_fl->l_whence);
6912     __get_user(fl->l_start, &target_fl->l_start);
6913     __get_user(fl->l_len, &target_fl->l_len);
6914     __get_user(fl->l_pid, &target_fl->l_pid);
6915     unlock_user_struct(target_fl, target_flock_addr, 0);
6916     return 0;
6917 }
6918 
copy_to_user_oabi_flock64(abi_ulong target_flock_addr,const struct flock * fl)6919 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6920                                                  const struct flock *fl)
6921 {
6922     struct target_oabi_flock64 *target_fl;
6923     short l_type;
6924 
6925     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6926         return -TARGET_EFAULT;
6927     }
6928 
6929     l_type = host_to_target_flock(fl->l_type);
6930     __put_user(l_type, &target_fl->l_type);
6931     __put_user(fl->l_whence, &target_fl->l_whence);
6932     __put_user(fl->l_start, &target_fl->l_start);
6933     __put_user(fl->l_len, &target_fl->l_len);
6934     __put_user(fl->l_pid, &target_fl->l_pid);
6935     unlock_user_struct(target_fl, target_flock_addr, 1);
6936     return 0;
6937 }
6938 #endif
6939 
copy_from_user_flock64(struct flock * fl,abi_ulong target_flock_addr)6940 static inline abi_long copy_from_user_flock64(struct flock *fl,
6941                                               abi_ulong target_flock_addr)
6942 {
6943     struct target_flock64 *target_fl;
6944     int l_type;
6945 
6946     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6947         return -TARGET_EFAULT;
6948     }
6949 
6950     __get_user(l_type, &target_fl->l_type);
6951     l_type = target_to_host_flock(l_type);
6952     if (l_type < 0) {
6953         return l_type;
6954     }
6955     fl->l_type = l_type;
6956     __get_user(fl->l_whence, &target_fl->l_whence);
6957     __get_user(fl->l_start, &target_fl->l_start);
6958     __get_user(fl->l_len, &target_fl->l_len);
6959     __get_user(fl->l_pid, &target_fl->l_pid);
6960     unlock_user_struct(target_fl, target_flock_addr, 0);
6961     return 0;
6962 }
6963 
copy_to_user_flock64(abi_ulong target_flock_addr,const struct flock * fl)6964 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6965                                             const struct flock *fl)
6966 {
6967     struct target_flock64 *target_fl;
6968     short l_type;
6969 
6970     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6971         return -TARGET_EFAULT;
6972     }
6973 
6974     l_type = host_to_target_flock(fl->l_type);
6975     __put_user(l_type, &target_fl->l_type);
6976     __put_user(fl->l_whence, &target_fl->l_whence);
6977     __put_user(fl->l_start, &target_fl->l_start);
6978     __put_user(fl->l_len, &target_fl->l_len);
6979     __put_user(fl->l_pid, &target_fl->l_pid);
6980     unlock_user_struct(target_fl, target_flock_addr, 1);
6981     return 0;
6982 }
6983 
do_fcntl(int fd,int cmd,abi_ulong arg)6984 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6985 {
6986     struct flock fl;
6987 #ifdef F_GETOWN_EX
6988     struct f_owner_ex fox;
6989     struct target_f_owner_ex *target_fox;
6990 #endif
6991     abi_long ret;
6992     int host_cmd = target_to_host_fcntl_cmd(cmd);
6993 
6994     if (host_cmd == -TARGET_EINVAL)
6995 	    return host_cmd;
6996 
6997     switch(cmd) {
6998     case TARGET_F_GETLK:
6999         ret = copy_from_user_flock(&fl, arg);
7000         if (ret) {
7001             return ret;
7002         }
7003         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7004         if (ret == 0) {
7005             ret = copy_to_user_flock(arg, &fl);
7006         }
7007         break;
7008 
7009     case TARGET_F_SETLK:
7010     case TARGET_F_SETLKW:
7011         ret = copy_from_user_flock(&fl, arg);
7012         if (ret) {
7013             return ret;
7014         }
7015         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7016         break;
7017 
7018     case TARGET_F_GETLK64:
7019     case TARGET_F_OFD_GETLK:
7020         ret = copy_from_user_flock64(&fl, arg);
7021         if (ret) {
7022             return ret;
7023         }
7024         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7025         if (ret == 0) {
7026             ret = copy_to_user_flock64(arg, &fl);
7027         }
7028         break;
7029     case TARGET_F_SETLK64:
7030     case TARGET_F_SETLKW64:
7031     case TARGET_F_OFD_SETLK:
7032     case TARGET_F_OFD_SETLKW:
7033         ret = copy_from_user_flock64(&fl, arg);
7034         if (ret) {
7035             return ret;
7036         }
7037         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7038         break;
7039 
7040     case TARGET_F_GETFL:
7041         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7042         if (ret >= 0) {
7043             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7044             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7045             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7046                 ret |= TARGET_O_LARGEFILE;
7047             }
7048         }
7049         break;
7050 
7051     case TARGET_F_SETFL:
7052         ret = get_errno(safe_fcntl(fd, host_cmd,
7053                                    target_to_host_bitmask(arg,
7054                                                           fcntl_flags_tbl)));
7055         break;
7056 
7057 #ifdef F_GETOWN_EX
7058     case TARGET_F_GETOWN_EX:
7059         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7060         if (ret >= 0) {
7061             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7062                 return -TARGET_EFAULT;
7063             target_fox->type = tswap32(fox.type);
7064             target_fox->pid = tswap32(fox.pid);
7065             unlock_user_struct(target_fox, arg, 1);
7066         }
7067         break;
7068 #endif
7069 
7070 #ifdef F_SETOWN_EX
7071     case TARGET_F_SETOWN_EX:
7072         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7073             return -TARGET_EFAULT;
7074         fox.type = tswap32(target_fox->type);
7075         fox.pid = tswap32(target_fox->pid);
7076         unlock_user_struct(target_fox, arg, 0);
7077         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7078         break;
7079 #endif
7080 
7081     case TARGET_F_SETSIG:
7082         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7083         break;
7084 
7085     case TARGET_F_GETSIG:
7086         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7087         break;
7088 
7089     case TARGET_F_SETOWN:
7090     case TARGET_F_GETOWN:
7091     case TARGET_F_SETLEASE:
7092     case TARGET_F_GETLEASE:
7093     case TARGET_F_SETPIPE_SZ:
7094     case TARGET_F_GETPIPE_SZ:
7095     case TARGET_F_ADD_SEALS:
7096     case TARGET_F_GET_SEALS:
7097         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7098         break;
7099 
7100     default:
7101         ret = get_errno(safe_fcntl(fd, cmd, arg));
7102         break;
7103     }
7104     return ret;
7105 }
7106 
7107 #ifdef USE_UID16
7108 
high2lowuid(int uid)7109 static inline int high2lowuid(int uid)
7110 {
7111     if (uid > 65535)
7112         return 65534;
7113     else
7114         return uid;
7115 }
7116 
high2lowgid(int gid)7117 static inline int high2lowgid(int gid)
7118 {
7119     if (gid > 65535)
7120         return 65534;
7121     else
7122         return gid;
7123 }
7124 
low2highuid(int uid)7125 static inline int low2highuid(int uid)
7126 {
7127     if ((int16_t)uid == -1)
7128         return -1;
7129     else
7130         return uid;
7131 }
7132 
low2highgid(int gid)7133 static inline int low2highgid(int gid)
7134 {
7135     if ((int16_t)gid == -1)
7136         return -1;
7137     else
7138         return gid;
7139 }
tswapid(int id)7140 static inline int tswapid(int id)
7141 {
7142     return tswap16(id);
7143 }
7144 
7145 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7146 
7147 #else /* !USE_UID16 */
high2lowuid(int uid)7148 static inline int high2lowuid(int uid)
7149 {
7150     return uid;
7151 }
high2lowgid(int gid)7152 static inline int high2lowgid(int gid)
7153 {
7154     return gid;
7155 }
low2highuid(int uid)7156 static inline int low2highuid(int uid)
7157 {
7158     return uid;
7159 }
low2highgid(int gid)7160 static inline int low2highgid(int gid)
7161 {
7162     return gid;
7163 }
tswapid(int id)7164 static inline int tswapid(int id)
7165 {
7166     return tswap32(id);
7167 }
7168 
7169 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7170 
7171 #endif /* USE_UID16 */
7172 
7173 /* We must do direct syscalls for setting UID/GID, because we want to
7174  * implement the Linux system call semantics of "change only for this thread",
7175  * not the libc/POSIX semantics of "change for all threads in process".
7176  * (See http://ewontfix.com/17/ for more details.)
7177  * We use the 32-bit version of the syscalls if present; if it is not
7178  * then either the host architecture supports 32-bit UIDs natively with
7179  * the standard syscall, or the 16-bit UID is the best we can do.
7180  */
7181 #ifdef __NR_setuid32
7182 #define __NR_sys_setuid __NR_setuid32
7183 #else
7184 #define __NR_sys_setuid __NR_setuid
7185 #endif
7186 #ifdef __NR_setgid32
7187 #define __NR_sys_setgid __NR_setgid32
7188 #else
7189 #define __NR_sys_setgid __NR_setgid
7190 #endif
7191 #ifdef __NR_setresuid32
7192 #define __NR_sys_setresuid __NR_setresuid32
7193 #else
7194 #define __NR_sys_setresuid __NR_setresuid
7195 #endif
7196 #ifdef __NR_setresgid32
7197 #define __NR_sys_setresgid __NR_setresgid32
7198 #else
7199 #define __NR_sys_setresgid __NR_setresgid
7200 #endif
7201 #ifdef __NR_setgroups32
7202 #define __NR_sys_setgroups __NR_setgroups32
7203 #else
7204 #define __NR_sys_setgroups __NR_setgroups
7205 #endif
7206 
_syscall1(int,sys_setuid,uid_t,uid)7207 _syscall1(int, sys_setuid, uid_t, uid)
7208 _syscall1(int, sys_setgid, gid_t, gid)
7209 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7210 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7211 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7212 
7213 void syscall_init(void)
7214 {
7215     IOCTLEntry *ie;
7216     const argtype *arg_type;
7217     int size;
7218 
7219     thunk_init(STRUCT_MAX);
7220 
7221 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7222 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7223 #include "syscall_types.h"
7224 #undef STRUCT
7225 #undef STRUCT_SPECIAL
7226 
7227     /* we patch the ioctl size if necessary. We rely on the fact that
7228        no ioctl has all the bits at '1' in the size field */
7229     ie = ioctl_entries;
7230     while (ie->target_cmd != 0) {
7231         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7232             TARGET_IOC_SIZEMASK) {
7233             arg_type = ie->arg_type;
7234             if (arg_type[0] != TYPE_PTR) {
7235                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7236                         ie->target_cmd);
7237                 exit(1);
7238             }
7239             arg_type++;
7240             size = thunk_type_size(arg_type, 0);
7241             ie->target_cmd = (ie->target_cmd &
7242                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7243                 (size << TARGET_IOC_SIZESHIFT);
7244         }
7245 
7246         /* automatic consistency check if same arch */
7247 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7248     (defined(__x86_64__) && defined(TARGET_X86_64))
7249         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7250             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7251                     ie->name, ie->target_cmd, ie->host_cmd);
7252         }
7253 #endif
7254         ie++;
7255     }
7256 }
7257 
7258 #ifdef TARGET_NR_truncate64
target_truncate64(CPUArchState * cpu_env,const char * arg1,abi_long arg2,abi_long arg3,abi_long arg4)7259 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7260                                          abi_long arg2,
7261                                          abi_long arg3,
7262                                          abi_long arg4)
7263 {
7264     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7265         arg2 = arg3;
7266         arg3 = arg4;
7267     }
7268     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7269 }
7270 #endif
7271 
7272 #ifdef TARGET_NR_ftruncate64
target_ftruncate64(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4)7273 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7274                                           abi_long arg2,
7275                                           abi_long arg3,
7276                                           abi_long arg4)
7277 {
7278     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7279         arg2 = arg3;
7280         arg3 = arg4;
7281     }
7282     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7283 }
7284 #endif
7285 
7286 #if defined(TARGET_NR_timer_settime) || \
7287     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec(struct itimerspec * host_its,abi_ulong target_addr)7288 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7289                                                  abi_ulong target_addr)
7290 {
7291     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7292                                 offsetof(struct target_itimerspec,
7293                                          it_interval)) ||
7294         target_to_host_timespec(&host_its->it_value, target_addr +
7295                                 offsetof(struct target_itimerspec,
7296                                          it_value))) {
7297         return -TARGET_EFAULT;
7298     }
7299 
7300     return 0;
7301 }
7302 #endif
7303 
7304 #if defined(TARGET_NR_timer_settime64) || \
7305     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec64(struct itimerspec * host_its,abi_ulong target_addr)7306 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7307                                                    abi_ulong target_addr)
7308 {
7309     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7310                                   offsetof(struct target__kernel_itimerspec,
7311                                            it_interval)) ||
7312         target_to_host_timespec64(&host_its->it_value, target_addr +
7313                                   offsetof(struct target__kernel_itimerspec,
7314                                            it_value))) {
7315         return -TARGET_EFAULT;
7316     }
7317 
7318     return 0;
7319 }
7320 #endif
7321 
7322 #if ((defined(TARGET_NR_timerfd_gettime) || \
7323       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7324       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
host_to_target_itimerspec(abi_ulong target_addr,struct itimerspec * host_its)7325 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7326                                                  struct itimerspec *host_its)
7327 {
7328     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7329                                                        it_interval),
7330                                 &host_its->it_interval) ||
7331         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7332                                                        it_value),
7333                                 &host_its->it_value)) {
7334         return -TARGET_EFAULT;
7335     }
7336     return 0;
7337 }
7338 #endif
7339 
7340 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7341       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7342       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
host_to_target_itimerspec64(abi_ulong target_addr,struct itimerspec * host_its)7343 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7344                                                    struct itimerspec *host_its)
7345 {
7346     if (host_to_target_timespec64(target_addr +
7347                                   offsetof(struct target__kernel_itimerspec,
7348                                            it_interval),
7349                                   &host_its->it_interval) ||
7350         host_to_target_timespec64(target_addr +
7351                                   offsetof(struct target__kernel_itimerspec,
7352                                            it_value),
7353                                   &host_its->it_value)) {
7354         return -TARGET_EFAULT;
7355     }
7356     return 0;
7357 }
7358 #endif
7359 
7360 #if defined(TARGET_NR_adjtimex) || \
7361     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
target_to_host_timex(struct timex * host_tx,abi_long target_addr)7362 static inline abi_long target_to_host_timex(struct timex *host_tx,
7363                                             abi_long target_addr)
7364 {
7365     struct target_timex *target_tx;
7366 
7367     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7368         return -TARGET_EFAULT;
7369     }
7370 
7371     __get_user(host_tx->modes, &target_tx->modes);
7372     __get_user(host_tx->offset, &target_tx->offset);
7373     __get_user(host_tx->freq, &target_tx->freq);
7374     __get_user(host_tx->maxerror, &target_tx->maxerror);
7375     __get_user(host_tx->esterror, &target_tx->esterror);
7376     __get_user(host_tx->status, &target_tx->status);
7377     __get_user(host_tx->constant, &target_tx->constant);
7378     __get_user(host_tx->precision, &target_tx->precision);
7379     __get_user(host_tx->tolerance, &target_tx->tolerance);
7380     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7381     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7382     __get_user(host_tx->tick, &target_tx->tick);
7383     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7384     __get_user(host_tx->jitter, &target_tx->jitter);
7385     __get_user(host_tx->shift, &target_tx->shift);
7386     __get_user(host_tx->stabil, &target_tx->stabil);
7387     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7388     __get_user(host_tx->calcnt, &target_tx->calcnt);
7389     __get_user(host_tx->errcnt, &target_tx->errcnt);
7390     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7391     __get_user(host_tx->tai, &target_tx->tai);
7392 
7393     unlock_user_struct(target_tx, target_addr, 0);
7394     return 0;
7395 }
7396 
host_to_target_timex(abi_long target_addr,struct timex * host_tx)7397 static inline abi_long host_to_target_timex(abi_long target_addr,
7398                                             struct timex *host_tx)
7399 {
7400     struct target_timex *target_tx;
7401 
7402     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7403         return -TARGET_EFAULT;
7404     }
7405 
7406     __put_user(host_tx->modes, &target_tx->modes);
7407     __put_user(host_tx->offset, &target_tx->offset);
7408     __put_user(host_tx->freq, &target_tx->freq);
7409     __put_user(host_tx->maxerror, &target_tx->maxerror);
7410     __put_user(host_tx->esterror, &target_tx->esterror);
7411     __put_user(host_tx->status, &target_tx->status);
7412     __put_user(host_tx->constant, &target_tx->constant);
7413     __put_user(host_tx->precision, &target_tx->precision);
7414     __put_user(host_tx->tolerance, &target_tx->tolerance);
7415     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7416     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7417     __put_user(host_tx->tick, &target_tx->tick);
7418     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7419     __put_user(host_tx->jitter, &target_tx->jitter);
7420     __put_user(host_tx->shift, &target_tx->shift);
7421     __put_user(host_tx->stabil, &target_tx->stabil);
7422     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7423     __put_user(host_tx->calcnt, &target_tx->calcnt);
7424     __put_user(host_tx->errcnt, &target_tx->errcnt);
7425     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7426     __put_user(host_tx->tai, &target_tx->tai);
7427 
7428     unlock_user_struct(target_tx, target_addr, 1);
7429     return 0;
7430 }
7431 #endif
7432 
7433 
7434 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
target_to_host_timex64(struct timex * host_tx,abi_long target_addr)7435 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7436                                               abi_long target_addr)
7437 {
7438     struct target__kernel_timex *target_tx;
7439 
7440     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7441                                  offsetof(struct target__kernel_timex,
7442                                           time))) {
7443         return -TARGET_EFAULT;
7444     }
7445 
7446     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7447         return -TARGET_EFAULT;
7448     }
7449 
7450     __get_user(host_tx->modes, &target_tx->modes);
7451     __get_user(host_tx->offset, &target_tx->offset);
7452     __get_user(host_tx->freq, &target_tx->freq);
7453     __get_user(host_tx->maxerror, &target_tx->maxerror);
7454     __get_user(host_tx->esterror, &target_tx->esterror);
7455     __get_user(host_tx->status, &target_tx->status);
7456     __get_user(host_tx->constant, &target_tx->constant);
7457     __get_user(host_tx->precision, &target_tx->precision);
7458     __get_user(host_tx->tolerance, &target_tx->tolerance);
7459     __get_user(host_tx->tick, &target_tx->tick);
7460     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7461     __get_user(host_tx->jitter, &target_tx->jitter);
7462     __get_user(host_tx->shift, &target_tx->shift);
7463     __get_user(host_tx->stabil, &target_tx->stabil);
7464     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7465     __get_user(host_tx->calcnt, &target_tx->calcnt);
7466     __get_user(host_tx->errcnt, &target_tx->errcnt);
7467     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7468     __get_user(host_tx->tai, &target_tx->tai);
7469 
7470     unlock_user_struct(target_tx, target_addr, 0);
7471     return 0;
7472 }
7473 
host_to_target_timex64(abi_long target_addr,struct timex * host_tx)7474 static inline abi_long host_to_target_timex64(abi_long target_addr,
7475                                               struct timex *host_tx)
7476 {
7477     struct target__kernel_timex *target_tx;
7478 
7479    if (copy_to_user_timeval64(target_addr +
7480                               offsetof(struct target__kernel_timex, time),
7481                               &host_tx->time)) {
7482         return -TARGET_EFAULT;
7483     }
7484 
7485     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7486         return -TARGET_EFAULT;
7487     }
7488 
7489     __put_user(host_tx->modes, &target_tx->modes);
7490     __put_user(host_tx->offset, &target_tx->offset);
7491     __put_user(host_tx->freq, &target_tx->freq);
7492     __put_user(host_tx->maxerror, &target_tx->maxerror);
7493     __put_user(host_tx->esterror, &target_tx->esterror);
7494     __put_user(host_tx->status, &target_tx->status);
7495     __put_user(host_tx->constant, &target_tx->constant);
7496     __put_user(host_tx->precision, &target_tx->precision);
7497     __put_user(host_tx->tolerance, &target_tx->tolerance);
7498     __put_user(host_tx->tick, &target_tx->tick);
7499     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7500     __put_user(host_tx->jitter, &target_tx->jitter);
7501     __put_user(host_tx->shift, &target_tx->shift);
7502     __put_user(host_tx->stabil, &target_tx->stabil);
7503     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7504     __put_user(host_tx->calcnt, &target_tx->calcnt);
7505     __put_user(host_tx->errcnt, &target_tx->errcnt);
7506     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7507     __put_user(host_tx->tai, &target_tx->tai);
7508 
7509     unlock_user_struct(target_tx, target_addr, 1);
7510     return 0;
7511 }
7512 #endif
7513 
7514 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7515 #define sigev_notify_thread_id _sigev_un._tid
7516 #endif
7517 
target_to_host_sigevent(struct sigevent * host_sevp,abi_ulong target_addr)7518 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7519                                                abi_ulong target_addr)
7520 {
7521     struct target_sigevent *target_sevp;
7522 
7523     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7524         return -TARGET_EFAULT;
7525     }
7526 
7527     /* This union is awkward on 64 bit systems because it has a 32 bit
7528      * integer and a pointer in it; we follow the conversion approach
7529      * used for handling sigval types in signal.c so the guest should get
7530      * the correct value back even if we did a 64 bit byteswap and it's
7531      * using the 32 bit integer.
7532      */
7533     host_sevp->sigev_value.sival_ptr =
7534         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7535     host_sevp->sigev_signo =
7536         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7537     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7538     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7539 
7540     unlock_user_struct(target_sevp, target_addr, 1);
7541     return 0;
7542 }
7543 
7544 #if defined(TARGET_NR_mlockall)
target_to_host_mlockall_arg(int arg)7545 static inline int target_to_host_mlockall_arg(int arg)
7546 {
7547     int result = 0;
7548 
7549     if (arg & TARGET_MCL_CURRENT) {
7550         result |= MCL_CURRENT;
7551     }
7552     if (arg & TARGET_MCL_FUTURE) {
7553         result |= MCL_FUTURE;
7554     }
7555 #ifdef MCL_ONFAULT
7556     if (arg & TARGET_MCL_ONFAULT) {
7557         result |= MCL_ONFAULT;
7558     }
7559 #endif
7560 
7561     return result;
7562 }
7563 #endif
7564 
target_to_host_msync_arg(abi_long arg)7565 static inline int target_to_host_msync_arg(abi_long arg)
7566 {
7567     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7568            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7569            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7570            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7571 }
7572 
7573 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7574      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7575      defined(TARGET_NR_newfstatat))
host_to_target_stat64(CPUArchState * cpu_env,abi_ulong target_addr,struct stat * host_st)7576 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7577                                              abi_ulong target_addr,
7578                                              struct stat *host_st)
7579 {
7580 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7581     if (cpu_env->eabi) {
7582         struct target_eabi_stat64 *target_st;
7583 
7584         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7585             return -TARGET_EFAULT;
7586         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7587         __put_user(host_st->st_dev, &target_st->st_dev);
7588         __put_user(host_st->st_ino, &target_st->st_ino);
7589 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7590         __put_user(host_st->st_ino, &target_st->__st_ino);
7591 #endif
7592         __put_user(host_st->st_mode, &target_st->st_mode);
7593         __put_user(host_st->st_nlink, &target_st->st_nlink);
7594         __put_user(host_st->st_uid, &target_st->st_uid);
7595         __put_user(host_st->st_gid, &target_st->st_gid);
7596         __put_user(host_st->st_rdev, &target_st->st_rdev);
7597         __put_user(host_st->st_size, &target_st->st_size);
7598         __put_user(host_st->st_blksize, &target_st->st_blksize);
7599         __put_user(host_st->st_blocks, &target_st->st_blocks);
7600         __put_user(host_st->st_atime, &target_st->target_st_atime);
7601         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7602         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7603 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7604         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7605         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7606         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7607 #endif
7608         unlock_user_struct(target_st, target_addr, 1);
7609     } else
7610 #endif
7611     {
7612 #if defined(TARGET_HAS_STRUCT_STAT64)
7613         struct target_stat64 *target_st;
7614 #else
7615         struct target_stat *target_st;
7616 #endif
7617 
7618         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7619             return -TARGET_EFAULT;
7620         memset(target_st, 0, sizeof(*target_st));
7621         __put_user(host_st->st_dev, &target_st->st_dev);
7622         __put_user(host_st->st_ino, &target_st->st_ino);
7623 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7624         __put_user(host_st->st_ino, &target_st->__st_ino);
7625 #endif
7626         __put_user(host_st->st_mode, &target_st->st_mode);
7627         __put_user(host_st->st_nlink, &target_st->st_nlink);
7628         __put_user(host_st->st_uid, &target_st->st_uid);
7629         __put_user(host_st->st_gid, &target_st->st_gid);
7630         __put_user(host_st->st_rdev, &target_st->st_rdev);
7631         /* XXX: better use of kernel struct */
7632         __put_user(host_st->st_size, &target_st->st_size);
7633         __put_user(host_st->st_blksize, &target_st->st_blksize);
7634         __put_user(host_st->st_blocks, &target_st->st_blocks);
7635         __put_user(host_st->st_atime, &target_st->target_st_atime);
7636         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7637         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7638 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7639         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7640         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7641         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7642 #endif
7643         unlock_user_struct(target_st, target_addr, 1);
7644     }
7645 
7646     return 0;
7647 }
7648 #endif
7649 
7650 #if defined(TARGET_NR_statx) && defined(__NR_statx)
host_to_target_statx(struct target_statx * host_stx,abi_ulong target_addr)7651 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7652                                             abi_ulong target_addr)
7653 {
7654     struct target_statx *target_stx;
7655 
7656     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7657         return -TARGET_EFAULT;
7658     }
7659     memset(target_stx, 0, sizeof(*target_stx));
7660 
7661     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7662     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7663     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7664     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7665     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7666     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7667     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7668     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7669     __put_user(host_stx->stx_size, &target_stx->stx_size);
7670     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7671     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7672     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7673     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7674     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7675     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7676     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7677     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7678     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7679     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7680     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7681     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7682     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7683     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7684 
7685     unlock_user_struct(target_stx, target_addr, 1);
7686 
7687     return 0;
7688 }
7689 #endif
7690 
do_sys_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7691 static int do_sys_futex(int *uaddr, int op, int val,
7692                          const struct timespec *timeout, int *uaddr2,
7693                          int val3)
7694 {
7695 #if HOST_LONG_BITS == 64
7696 #if defined(__NR_futex)
7697     /* always a 64-bit time_t, it doesn't define _time64 version  */
7698     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7699 
7700 #endif
7701 #else /* HOST_LONG_BITS == 64 */
7702 #if defined(__NR_futex_time64)
7703     if (sizeof(timeout->tv_sec) == 8) {
7704         /* _time64 function on 32bit arch */
7705         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7706     }
7707 #endif
7708 #if defined(__NR_futex)
7709     /* old function on 32bit arch */
7710     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7711 #endif
7712 #endif /* HOST_LONG_BITS == 64 */
7713     g_assert_not_reached();
7714 }
7715 
do_safe_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7716 static int do_safe_futex(int *uaddr, int op, int val,
7717                          const struct timespec *timeout, int *uaddr2,
7718                          int val3)
7719 {
7720 #if HOST_LONG_BITS == 64
7721 #if defined(__NR_futex)
7722     /* always a 64-bit time_t, it doesn't define _time64 version  */
7723     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7724 #endif
7725 #else /* HOST_LONG_BITS == 64 */
7726 #if defined(__NR_futex_time64)
7727     if (sizeof(timeout->tv_sec) == 8) {
7728         /* _time64 function on 32bit arch */
7729         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7730                                            val3));
7731     }
7732 #endif
7733 #if defined(__NR_futex)
7734     /* old function on 32bit arch */
7735     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7736 #endif
7737 #endif /* HOST_LONG_BITS == 64 */
7738     return -TARGET_ENOSYS;
7739 }
7740 
7741 /* ??? Using host futex calls even when target atomic operations
7742    are not really atomic probably breaks things.  However implementing
7743    futexes locally would make futexes shared between multiple processes
7744    tricky.  However they're probably useless because guest atomic
7745    operations won't work either.  */
7746 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
do_futex(CPUState * cpu,bool time64,target_ulong uaddr,int op,int val,target_ulong timeout,target_ulong uaddr2,int val3)7747 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7748                     int op, int val, target_ulong timeout,
7749                     target_ulong uaddr2, int val3)
7750 {
7751     struct timespec ts, *pts = NULL;
7752     void *haddr2 = NULL;
7753     int base_op;
7754 
7755     /* We assume FUTEX_* constants are the same on both host and target. */
7756 #ifdef FUTEX_CMD_MASK
7757     base_op = op & FUTEX_CMD_MASK;
7758 #else
7759     base_op = op;
7760 #endif
7761     switch (base_op) {
7762     case FUTEX_WAIT:
7763     case FUTEX_WAIT_BITSET:
7764         val = tswap32(val);
7765         break;
7766     case FUTEX_WAIT_REQUEUE_PI:
7767         val = tswap32(val);
7768         haddr2 = g2h(cpu, uaddr2);
7769         break;
7770     case FUTEX_LOCK_PI:
7771     case FUTEX_LOCK_PI2:
7772         break;
7773     case FUTEX_WAKE:
7774     case FUTEX_WAKE_BITSET:
7775     case FUTEX_TRYLOCK_PI:
7776     case FUTEX_UNLOCK_PI:
7777         timeout = 0;
7778         break;
7779     case FUTEX_FD:
7780         val = target_to_host_signal(val);
7781         timeout = 0;
7782         break;
7783     case FUTEX_CMP_REQUEUE:
7784     case FUTEX_CMP_REQUEUE_PI:
7785         val3 = tswap32(val3);
7786         /* fall through */
7787     case FUTEX_REQUEUE:
7788     case FUTEX_WAKE_OP:
7789         /*
7790          * For these, the 4th argument is not TIMEOUT, but VAL2.
7791          * But the prototype of do_safe_futex takes a pointer, so
7792          * insert casts to satisfy the compiler.  We do not need
7793          * to tswap VAL2 since it's not compared to guest memory.
7794           */
7795         pts = (struct timespec *)(uintptr_t)timeout;
7796         timeout = 0;
7797         haddr2 = g2h(cpu, uaddr2);
7798         break;
7799     default:
7800         return -TARGET_ENOSYS;
7801     }
7802     if (timeout) {
7803         pts = &ts;
7804         if (time64
7805             ? target_to_host_timespec64(pts, timeout)
7806             : target_to_host_timespec(pts, timeout)) {
7807             return -TARGET_EFAULT;
7808         }
7809     }
7810     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7811 }
7812 #endif
7813 
7814 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_name_to_handle_at(abi_long dirfd,abi_long pathname,abi_long handle,abi_long mount_id,abi_long flags)7815 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7816                                      abi_long handle, abi_long mount_id,
7817                                      abi_long flags)
7818 {
7819     struct file_handle *target_fh;
7820     struct file_handle *fh;
7821     int mid = 0;
7822     abi_long ret;
7823     char *name;
7824     unsigned int size, total_size;
7825 
7826     if (get_user_s32(size, handle)) {
7827         return -TARGET_EFAULT;
7828     }
7829 
7830     name = lock_user_string(pathname);
7831     if (!name) {
7832         return -TARGET_EFAULT;
7833     }
7834 
7835     total_size = sizeof(struct file_handle) + size;
7836     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7837     if (!target_fh) {
7838         unlock_user(name, pathname, 0);
7839         return -TARGET_EFAULT;
7840     }
7841 
7842     fh = g_malloc0(total_size);
7843     fh->handle_bytes = size;
7844 
7845     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7846     unlock_user(name, pathname, 0);
7847 
7848     /* man name_to_handle_at(2):
7849      * Other than the use of the handle_bytes field, the caller should treat
7850      * the file_handle structure as an opaque data type
7851      */
7852 
7853     memcpy(target_fh, fh, total_size);
7854     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7855     target_fh->handle_type = tswap32(fh->handle_type);
7856     g_free(fh);
7857     unlock_user(target_fh, handle, total_size);
7858 
7859     if (put_user_s32(mid, mount_id)) {
7860         return -TARGET_EFAULT;
7861     }
7862 
7863     return ret;
7864 
7865 }
7866 #endif
7867 
7868 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_open_by_handle_at(abi_long mount_fd,abi_long handle,abi_long flags)7869 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7870                                      abi_long flags)
7871 {
7872     struct file_handle *target_fh;
7873     struct file_handle *fh;
7874     unsigned int size, total_size;
7875     abi_long ret;
7876 
7877     if (get_user_s32(size, handle)) {
7878         return -TARGET_EFAULT;
7879     }
7880 
7881     total_size = sizeof(struct file_handle) + size;
7882     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7883     if (!target_fh) {
7884         return -TARGET_EFAULT;
7885     }
7886 
7887     fh = g_memdup(target_fh, total_size);
7888     fh->handle_bytes = size;
7889     fh->handle_type = tswap32(target_fh->handle_type);
7890 
7891     ret = get_errno(open_by_handle_at(mount_fd, fh,
7892                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7893 
7894     g_free(fh);
7895 
7896     unlock_user(target_fh, handle, total_size);
7897 
7898     return ret;
7899 }
7900 #endif
7901 
7902 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7903 
do_signalfd4(int fd,abi_long mask,int flags)7904 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7905 {
7906     int host_flags;
7907     target_sigset_t *target_mask;
7908     sigset_t host_mask;
7909     abi_long ret;
7910 
7911     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7912         return -TARGET_EINVAL;
7913     }
7914     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7915         return -TARGET_EFAULT;
7916     }
7917 
7918     target_to_host_sigset(&host_mask, target_mask);
7919 
7920     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7921 
7922     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7923     if (ret >= 0) {
7924         fd_trans_register(ret, &target_signalfd_trans);
7925     }
7926 
7927     unlock_user_struct(target_mask, mask, 0);
7928 
7929     return ret;
7930 }
7931 #endif
7932 
7933 /* Map host to target signal numbers for the wait family of syscalls.
7934    Assume all other status bits are the same.  */
host_to_target_waitstatus(int status)7935 int host_to_target_waitstatus(int status)
7936 {
7937     if (WIFSIGNALED(status)) {
7938         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7939     }
7940     if (WIFSTOPPED(status)) {
7941         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7942                | (status & 0xff);
7943     }
7944     return status;
7945 }
7946 
open_self_cmdline(CPUArchState * cpu_env,int fd)7947 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7948 {
7949     CPUState *cpu = env_cpu(cpu_env);
7950     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
7951     int i;
7952 
7953     for (i = 0; i < bprm->argc; i++) {
7954         size_t len = strlen(bprm->argv[i]) + 1;
7955 
7956         if (write(fd, bprm->argv[i], len) != len) {
7957             return -1;
7958         }
7959     }
7960 
7961     return 0;
7962 }
7963 
7964 struct open_self_maps_data {
7965     TaskState *ts;
7966     IntervalTreeRoot *host_maps;
7967     int fd;
7968     bool smaps;
7969 };
7970 
7971 /*
7972  * Subroutine to output one line of /proc/self/maps,
7973  * or one region of /proc/self/smaps.
7974  */
7975 
7976 #ifdef TARGET_HPPA
7977 # define test_stack(S, E, L)  (E == L)
7978 #else
7979 # define test_stack(S, E, L)  (S == L)
7980 #endif
7981 
open_self_maps_4(const struct open_self_maps_data * d,const MapInfo * mi,abi_ptr start,abi_ptr end,unsigned flags)7982 static void open_self_maps_4(const struct open_self_maps_data *d,
7983                              const MapInfo *mi, abi_ptr start,
7984                              abi_ptr end, unsigned flags)
7985 {
7986     const struct image_info *info = d->ts->info;
7987     const char *path = mi->path;
7988     uint64_t offset;
7989     int fd = d->fd;
7990     int count;
7991 
7992     if (test_stack(start, end, info->stack_limit)) {
7993         path = "[stack]";
7994     } else if (start == info->brk) {
7995         path = "[heap]";
7996     } else if (start == info->vdso) {
7997         path = "[vdso]";
7998 #ifdef TARGET_X86_64
7999     } else if (start == TARGET_VSYSCALL_PAGE) {
8000         path = "[vsyscall]";
8001 #endif
8002     }
8003 
8004     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8005     offset = mi->offset;
8006     if (mi->dev) {
8007         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8008         offset += hstart - mi->itree.start;
8009     }
8010 
8011     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8012                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8013                     start, end,
8014                     (flags & PAGE_READ) ? 'r' : '-',
8015                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8016                     (flags & PAGE_EXEC) ? 'x' : '-',
8017                     mi->is_priv ? 'p' : 's',
8018                     offset, major(mi->dev), minor(mi->dev),
8019                     (uint64_t)mi->inode);
8020     if (path) {
8021         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8022     } else {
8023         dprintf(fd, "\n");
8024     }
8025 
8026     if (d->smaps) {
8027         unsigned long size = end - start;
8028         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8029         unsigned long size_kb = size >> 10;
8030 
8031         dprintf(fd, "Size:                  %lu kB\n"
8032                 "KernelPageSize:        %lu kB\n"
8033                 "MMUPageSize:           %lu kB\n"
8034                 "Rss:                   0 kB\n"
8035                 "Pss:                   0 kB\n"
8036                 "Pss_Dirty:             0 kB\n"
8037                 "Shared_Clean:          0 kB\n"
8038                 "Shared_Dirty:          0 kB\n"
8039                 "Private_Clean:         0 kB\n"
8040                 "Private_Dirty:         0 kB\n"
8041                 "Referenced:            0 kB\n"
8042                 "Anonymous:             %lu kB\n"
8043                 "LazyFree:              0 kB\n"
8044                 "AnonHugePages:         0 kB\n"
8045                 "ShmemPmdMapped:        0 kB\n"
8046                 "FilePmdMapped:         0 kB\n"
8047                 "Shared_Hugetlb:        0 kB\n"
8048                 "Private_Hugetlb:       0 kB\n"
8049                 "Swap:                  0 kB\n"
8050                 "SwapPss:               0 kB\n"
8051                 "Locked:                0 kB\n"
8052                 "THPeligible:    0\n"
8053                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8054                 size_kb, page_size_kb, page_size_kb,
8055                 (flags & PAGE_ANON ? size_kb : 0),
8056                 (flags & PAGE_READ) ? " rd" : "",
8057                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8058                 (flags & PAGE_EXEC) ? " ex" : "",
8059                 mi->is_priv ? "" : " sh",
8060                 (flags & PAGE_READ) ? " mr" : "",
8061                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8062                 (flags & PAGE_EXEC) ? " me" : "",
8063                 mi->is_priv ? "" : " ms");
8064     }
8065 }
8066 
8067 /*
8068  * Callback for walk_memory_regions, when read_self_maps() fails.
8069  * Proceed without the benefit of host /proc/self/maps cross-check.
8070  */
open_self_maps_3(void * opaque,target_ulong guest_start,target_ulong guest_end,unsigned long flags)8071 static int open_self_maps_3(void *opaque, target_ulong guest_start,
8072                             target_ulong guest_end, unsigned long flags)
8073 {
8074     static const MapInfo mi = { .is_priv = true };
8075 
8076     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8077     return 0;
8078 }
8079 
8080 /*
8081  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8082  */
open_self_maps_2(void * opaque,target_ulong guest_start,target_ulong guest_end,unsigned long flags)8083 static int open_self_maps_2(void *opaque, target_ulong guest_start,
8084                             target_ulong guest_end, unsigned long flags)
8085 {
8086     const struct open_self_maps_data *d = opaque;
8087     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8088     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8089 
8090 #ifdef TARGET_X86_64
8091     /*
8092      * Because of the extremely high position of the page within the guest
8093      * virtual address space, this is not backed by host memory at all.
8094      * Therefore the loop below would fail.  This is the only instance
8095      * of not having host backing memory.
8096      */
8097     if (guest_start == TARGET_VSYSCALL_PAGE) {
8098         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8099     }
8100 #endif
8101 
8102     while (1) {
8103         IntervalTreeNode *n =
8104             interval_tree_iter_first(d->host_maps, host_start, host_start);
8105         MapInfo *mi = container_of(n, MapInfo, itree);
8106         uintptr_t this_hlast = MIN(host_last, n->last);
8107         target_ulong this_gend = h2g(this_hlast) + 1;
8108 
8109         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8110 
8111         if (this_hlast == host_last) {
8112             return 0;
8113         }
8114         host_start = this_hlast + 1;
8115         guest_start = h2g(host_start);
8116     }
8117 }
8118 
open_self_maps_1(CPUArchState * env,int fd,bool smaps)8119 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8120 {
8121     struct open_self_maps_data d = {
8122         .ts = get_task_state(env_cpu(env)),
8123         .host_maps = read_self_maps(),
8124         .fd = fd,
8125         .smaps = smaps
8126     };
8127 
8128     if (d.host_maps) {
8129         walk_memory_regions(&d, open_self_maps_2);
8130         free_self_maps(d.host_maps);
8131     } else {
8132         walk_memory_regions(&d, open_self_maps_3);
8133     }
8134     return 0;
8135 }
8136 
open_self_maps(CPUArchState * cpu_env,int fd)8137 static int open_self_maps(CPUArchState *cpu_env, int fd)
8138 {
8139     return open_self_maps_1(cpu_env, fd, false);
8140 }
8141 
open_self_smaps(CPUArchState * cpu_env,int fd)8142 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8143 {
8144     return open_self_maps_1(cpu_env, fd, true);
8145 }
8146 
open_self_stat(CPUArchState * cpu_env,int fd)8147 static int open_self_stat(CPUArchState *cpu_env, int fd)
8148 {
8149     CPUState *cpu = env_cpu(cpu_env);
8150     TaskState *ts = get_task_state(cpu);
8151     g_autoptr(GString) buf = g_string_new(NULL);
8152     int i;
8153 
8154     for (i = 0; i < 44; i++) {
8155         if (i == 0) {
8156             /* pid */
8157             g_string_printf(buf, FMT_pid " ", getpid());
8158         } else if (i == 1) {
8159             /* app name */
8160             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8161             bin = bin ? bin + 1 : ts->bprm->argv[0];
8162             g_string_printf(buf, "(%.15s) ", bin);
8163         } else if (i == 2) {
8164             /* task state */
8165             g_string_assign(buf, "R "); /* we are running right now */
8166         } else if (i == 3) {
8167             /* ppid */
8168             g_string_printf(buf, FMT_pid " ", getppid());
8169         } else if (i == 19) {
8170             /* num_threads */
8171             int cpus = 0;
8172             WITH_RCU_READ_LOCK_GUARD() {
8173                 CPUState *cpu_iter;
8174                 CPU_FOREACH(cpu_iter) {
8175                     cpus++;
8176                 }
8177             }
8178             g_string_printf(buf, "%d ", cpus);
8179         } else if (i == 21) {
8180             /* starttime */
8181             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8182         } else if (i == 27) {
8183             /* stack bottom */
8184             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8185         } else {
8186             /* for the rest, there is MasterCard */
8187             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8188         }
8189 
8190         if (write(fd, buf->str, buf->len) != buf->len) {
8191             return -1;
8192         }
8193     }
8194 
8195     return 0;
8196 }
8197 
open_self_auxv(CPUArchState * cpu_env,int fd)8198 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8199 {
8200     CPUState *cpu = env_cpu(cpu_env);
8201     TaskState *ts = get_task_state(cpu);
8202     abi_ulong auxv = ts->info->saved_auxv;
8203     abi_ulong len = ts->info->auxv_len;
8204     char *ptr;
8205 
8206     /*
8207      * Auxiliary vector is stored in target process stack.
8208      * read in whole auxv vector and copy it to file
8209      */
8210     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8211     if (ptr != NULL) {
8212         while (len > 0) {
8213             ssize_t r;
8214             r = write(fd, ptr, len);
8215             if (r <= 0) {
8216                 break;
8217             }
8218             len -= r;
8219             ptr += r;
8220         }
8221         lseek(fd, 0, SEEK_SET);
8222         unlock_user(ptr, auxv, len);
8223     }
8224 
8225     return 0;
8226 }
8227 
is_proc_myself(const char * filename,const char * entry)8228 static int is_proc_myself(const char *filename, const char *entry)
8229 {
8230     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8231         filename += strlen("/proc/");
8232         if (!strncmp(filename, "self/", strlen("self/"))) {
8233             filename += strlen("self/");
8234         } else if (*filename >= '1' && *filename <= '9') {
8235             char myself[80];
8236             snprintf(myself, sizeof(myself), "%d/", getpid());
8237             if (!strncmp(filename, myself, strlen(myself))) {
8238                 filename += strlen(myself);
8239             } else {
8240                 return 0;
8241             }
8242         } else {
8243             return 0;
8244         }
8245         if (!strcmp(filename, entry)) {
8246             return 1;
8247         }
8248     }
8249     return 0;
8250 }
8251 
excp_dump_file(FILE * logfile,CPUArchState * env,const char * fmt,int code)8252 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8253                       const char *fmt, int code)
8254 {
8255     if (logfile) {
8256         CPUState *cs = env_cpu(env);
8257 
8258         fprintf(logfile, fmt, code);
8259         fprintf(logfile, "Failing executable: %s\n", exec_path);
8260         cpu_dump_state(cs, logfile, 0);
8261         open_self_maps(env, fileno(logfile));
8262     }
8263 }
8264 
target_exception_dump(CPUArchState * env,const char * fmt,int code)8265 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8266 {
8267     /* dump to console */
8268     excp_dump_file(stderr, env, fmt, code);
8269 
8270     /* dump to log file */
8271     if (qemu_log_separate()) {
8272         FILE *logfile = qemu_log_trylock();
8273 
8274         excp_dump_file(logfile, env, fmt, code);
8275         qemu_log_unlock(logfile);
8276     }
8277 }
8278 
8279 #include "target_proc.h"
8280 
8281 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8282     defined(HAVE_ARCH_PROC_CPUINFO) || \
8283     defined(HAVE_ARCH_PROC_HARDWARE)
is_proc(const char * filename,const char * entry)8284 static int is_proc(const char *filename, const char *entry)
8285 {
8286     return strcmp(filename, entry) == 0;
8287 }
8288 #endif
8289 
8290 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
open_net_route(CPUArchState * cpu_env,int fd)8291 static int open_net_route(CPUArchState *cpu_env, int fd)
8292 {
8293     FILE *fp;
8294     char *line = NULL;
8295     size_t len = 0;
8296     ssize_t read;
8297 
8298     fp = fopen("/proc/net/route", "r");
8299     if (fp == NULL) {
8300         return -1;
8301     }
8302 
8303     /* read header */
8304 
8305     read = getline(&line, &len, fp);
8306     dprintf(fd, "%s", line);
8307 
8308     /* read routes */
8309 
8310     while ((read = getline(&line, &len, fp)) != -1) {
8311         char iface[16];
8312         uint32_t dest, gw, mask;
8313         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8314         int fields;
8315 
8316         fields = sscanf(line,
8317                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8318                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8319                         &mask, &mtu, &window, &irtt);
8320         if (fields != 11) {
8321             continue;
8322         }
8323         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8324                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8325                 metric, tswap32(mask), mtu, window, irtt);
8326     }
8327 
8328     free(line);
8329     fclose(fp);
8330 
8331     return 0;
8332 }
8333 #endif
8334 
do_guest_openat(CPUArchState * cpu_env,int dirfd,const char * fname,int flags,mode_t mode,bool safe)8335 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *fname,
8336                     int flags, mode_t mode, bool safe)
8337 {
8338     g_autofree char *proc_name = NULL;
8339     const char *pathname;
8340     struct fake_open {
8341         const char *filename;
8342         int (*fill)(CPUArchState *cpu_env, int fd);
8343         int (*cmp)(const char *s1, const char *s2);
8344     };
8345     const struct fake_open *fake_open;
8346     static const struct fake_open fakes[] = {
8347         { "maps", open_self_maps, is_proc_myself },
8348         { "smaps", open_self_smaps, is_proc_myself },
8349         { "stat", open_self_stat, is_proc_myself },
8350         { "auxv", open_self_auxv, is_proc_myself },
8351         { "cmdline", open_self_cmdline, is_proc_myself },
8352 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8353         { "/proc/net/route", open_net_route, is_proc },
8354 #endif
8355 #if defined(HAVE_ARCH_PROC_CPUINFO)
8356         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8357 #endif
8358 #if defined(HAVE_ARCH_PROC_HARDWARE)
8359         { "/proc/hardware", open_hardware, is_proc },
8360 #endif
8361         { NULL, NULL, NULL }
8362     };
8363 
8364     /* if this is a file from /proc/ filesystem, expand full name */
8365     proc_name = realpath(fname, NULL);
8366     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8367         pathname = proc_name;
8368     } else {
8369         pathname = fname;
8370     }
8371 
8372     if (is_proc_myself(pathname, "exe")) {
8373         if (safe) {
8374             return safe_openat(dirfd, exec_path, flags, mode);
8375         } else {
8376             return openat(dirfd, exec_path, flags, mode);
8377         }
8378     }
8379 
8380     for (fake_open = fakes; fake_open->filename; fake_open++) {
8381         if (fake_open->cmp(pathname, fake_open->filename)) {
8382             break;
8383         }
8384     }
8385 
8386     if (fake_open->filename) {
8387         const char *tmpdir;
8388         char filename[PATH_MAX];
8389         int fd, r;
8390 
8391         fd = memfd_create("qemu-open", 0);
8392         if (fd < 0) {
8393             if (errno != ENOSYS) {
8394                 return fd;
8395             }
8396             /* create temporary file to map stat to */
8397             tmpdir = getenv("TMPDIR");
8398             if (!tmpdir)
8399                 tmpdir = "/tmp";
8400             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8401             fd = mkstemp(filename);
8402             if (fd < 0) {
8403                 return fd;
8404             }
8405             unlink(filename);
8406         }
8407 
8408         if ((r = fake_open->fill(cpu_env, fd))) {
8409             int e = errno;
8410             close(fd);
8411             errno = e;
8412             return r;
8413         }
8414         lseek(fd, 0, SEEK_SET);
8415 
8416         return fd;
8417     }
8418 
8419     if (safe) {
8420         return safe_openat(dirfd, path(pathname), flags, mode);
8421     } else {
8422         return openat(dirfd, path(pathname), flags, mode);
8423     }
8424 }
8425 
do_guest_readlink(const char * pathname,char * buf,size_t bufsiz)8426 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8427 {
8428     ssize_t ret;
8429 
8430     if (!pathname || !buf) {
8431         errno = EFAULT;
8432         return -1;
8433     }
8434 
8435     if (!bufsiz) {
8436         /* Short circuit this for the magic exe check. */
8437         errno = EINVAL;
8438         return -1;
8439     }
8440 
8441     if (is_proc_myself((const char *)pathname, "exe")) {
8442         /*
8443          * Don't worry about sign mismatch as earlier mapping
8444          * logic would have thrown a bad address error.
8445          */
8446         ret = MIN(strlen(exec_path), bufsiz);
8447         /* We cannot NUL terminate the string. */
8448         memcpy(buf, exec_path, ret);
8449     } else {
8450         ret = readlink(path(pathname), buf, bufsiz);
8451     }
8452 
8453     return ret;
8454 }
8455 
do_execv(CPUArchState * cpu_env,int dirfd,abi_long pathname,abi_long guest_argp,abi_long guest_envp,int flags,bool is_execveat)8456 static int do_execv(CPUArchState *cpu_env, int dirfd,
8457                     abi_long pathname, abi_long guest_argp,
8458                     abi_long guest_envp, int flags, bool is_execveat)
8459 {
8460     int ret;
8461     char **argp, **envp;
8462     int argc, envc;
8463     abi_ulong gp;
8464     abi_ulong addr;
8465     char **q;
8466     void *p;
8467 
8468     argc = 0;
8469 
8470     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8471         if (get_user_ual(addr, gp)) {
8472             return -TARGET_EFAULT;
8473         }
8474         if (!addr) {
8475             break;
8476         }
8477         argc++;
8478     }
8479     envc = 0;
8480     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8481         if (get_user_ual(addr, gp)) {
8482             return -TARGET_EFAULT;
8483         }
8484         if (!addr) {
8485             break;
8486         }
8487         envc++;
8488     }
8489 
8490     argp = g_new0(char *, argc + 1);
8491     envp = g_new0(char *, envc + 1);
8492 
8493     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8494         if (get_user_ual(addr, gp)) {
8495             goto execve_efault;
8496         }
8497         if (!addr) {
8498             break;
8499         }
8500         *q = lock_user_string(addr);
8501         if (!*q) {
8502             goto execve_efault;
8503         }
8504     }
8505     *q = NULL;
8506 
8507     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8508         if (get_user_ual(addr, gp)) {
8509             goto execve_efault;
8510         }
8511         if (!addr) {
8512             break;
8513         }
8514         *q = lock_user_string(addr);
8515         if (!*q) {
8516             goto execve_efault;
8517         }
8518     }
8519     *q = NULL;
8520 
8521     /*
8522      * Although execve() is not an interruptible syscall it is
8523      * a special case where we must use the safe_syscall wrapper:
8524      * if we allow a signal to happen before we make the host
8525      * syscall then we will 'lose' it, because at the point of
8526      * execve the process leaves QEMU's control. So we use the
8527      * safe syscall wrapper to ensure that we either take the
8528      * signal as a guest signal, or else it does not happen
8529      * before the execve completes and makes it the other
8530      * program's problem.
8531      */
8532     p = lock_user_string(pathname);
8533     if (!p) {
8534         goto execve_efault;
8535     }
8536 
8537     const char *exe = p;
8538     if (is_proc_myself(p, "exe")) {
8539         exe = exec_path;
8540     }
8541     ret = is_execveat
8542         ? safe_execveat(dirfd, exe, argp, envp, flags)
8543         : safe_execve(exe, argp, envp);
8544     ret = get_errno(ret);
8545 
8546     unlock_user(p, pathname, 0);
8547 
8548     goto execve_end;
8549 
8550 execve_efault:
8551     ret = -TARGET_EFAULT;
8552 
8553 execve_end:
8554     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8555         if (get_user_ual(addr, gp) || !addr) {
8556             break;
8557         }
8558         unlock_user(*q, addr, 0);
8559     }
8560     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8561         if (get_user_ual(addr, gp) || !addr) {
8562             break;
8563         }
8564         unlock_user(*q, addr, 0);
8565     }
8566 
8567     g_free(argp);
8568     g_free(envp);
8569     return ret;
8570 }
8571 
8572 #define TIMER_MAGIC 0x0caf0000
8573 #define TIMER_MAGIC_MASK 0xffff0000
8574 
8575 /* Convert QEMU provided timer ID back to internal 16bit index format */
get_timer_id(abi_long arg)8576 static target_timer_t get_timer_id(abi_long arg)
8577 {
8578     target_timer_t timerid = arg;
8579 
8580     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8581         return -TARGET_EINVAL;
8582     }
8583 
8584     timerid &= 0xffff;
8585 
8586     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8587         return -TARGET_EINVAL;
8588     }
8589 
8590     return timerid;
8591 }
8592 
target_to_host_cpu_mask(unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8593 static int target_to_host_cpu_mask(unsigned long *host_mask,
8594                                    size_t host_size,
8595                                    abi_ulong target_addr,
8596                                    size_t target_size)
8597 {
8598     unsigned target_bits = sizeof(abi_ulong) * 8;
8599     unsigned host_bits = sizeof(*host_mask) * 8;
8600     abi_ulong *target_mask;
8601     unsigned i, j;
8602 
8603     assert(host_size >= target_size);
8604 
8605     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8606     if (!target_mask) {
8607         return -TARGET_EFAULT;
8608     }
8609     memset(host_mask, 0, host_size);
8610 
8611     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8612         unsigned bit = i * target_bits;
8613         abi_ulong val;
8614 
8615         __get_user(val, &target_mask[i]);
8616         for (j = 0; j < target_bits; j++, bit++) {
8617             if (val & (1UL << j)) {
8618                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8619             }
8620         }
8621     }
8622 
8623     unlock_user(target_mask, target_addr, 0);
8624     return 0;
8625 }
8626 
host_to_target_cpu_mask(const unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8627 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8628                                    size_t host_size,
8629                                    abi_ulong target_addr,
8630                                    size_t target_size)
8631 {
8632     unsigned target_bits = sizeof(abi_ulong) * 8;
8633     unsigned host_bits = sizeof(*host_mask) * 8;
8634     abi_ulong *target_mask;
8635     unsigned i, j;
8636 
8637     assert(host_size >= target_size);
8638 
8639     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8640     if (!target_mask) {
8641         return -TARGET_EFAULT;
8642     }
8643 
8644     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8645         unsigned bit = i * target_bits;
8646         abi_ulong val = 0;
8647 
8648         for (j = 0; j < target_bits; j++, bit++) {
8649             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8650                 val |= 1UL << j;
8651             }
8652         }
8653         __put_user(val, &target_mask[i]);
8654     }
8655 
8656     unlock_user(target_mask, target_addr, target_size);
8657     return 0;
8658 }
8659 
8660 #ifdef TARGET_NR_getdents
do_getdents(abi_long dirfd,abi_long arg2,abi_long count)8661 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8662 {
8663     g_autofree void *hdirp = NULL;
8664     void *tdirp;
8665     int hlen, hoff, toff;
8666     int hreclen, treclen;
8667     off_t prev_diroff = 0;
8668 
8669     hdirp = g_try_malloc(count);
8670     if (!hdirp) {
8671         return -TARGET_ENOMEM;
8672     }
8673 
8674 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8675     hlen = sys_getdents(dirfd, hdirp, count);
8676 #else
8677     hlen = sys_getdents64(dirfd, hdirp, count);
8678 #endif
8679 
8680     hlen = get_errno(hlen);
8681     if (is_error(hlen)) {
8682         return hlen;
8683     }
8684 
8685     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8686     if (!tdirp) {
8687         return -TARGET_EFAULT;
8688     }
8689 
8690     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8691 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8692         struct linux_dirent *hde = hdirp + hoff;
8693 #else
8694         struct linux_dirent64 *hde = hdirp + hoff;
8695 #endif
8696         struct target_dirent *tde = tdirp + toff;
8697         int namelen;
8698         uint8_t type;
8699 
8700         namelen = strlen(hde->d_name);
8701         hreclen = hde->d_reclen;
8702         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8703         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8704 
8705         if (toff + treclen > count) {
8706             /*
8707              * If the host struct is smaller than the target struct, or
8708              * requires less alignment and thus packs into less space,
8709              * then the host can return more entries than we can pass
8710              * on to the guest.
8711              */
8712             if (toff == 0) {
8713                 toff = -TARGET_EINVAL; /* result buffer is too small */
8714                 break;
8715             }
8716             /*
8717              * Return what we have, resetting the file pointer to the
8718              * location of the first record not returned.
8719              */
8720             lseek(dirfd, prev_diroff, SEEK_SET);
8721             break;
8722         }
8723 
8724         prev_diroff = hde->d_off;
8725         tde->d_ino = tswapal(hde->d_ino);
8726         tde->d_off = tswapal(hde->d_off);
8727         tde->d_reclen = tswap16(treclen);
8728         memcpy(tde->d_name, hde->d_name, namelen + 1);
8729 
8730         /*
8731          * The getdents type is in what was formerly a padding byte at the
8732          * end of the structure.
8733          */
8734 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8735         type = *((uint8_t *)hde + hreclen - 1);
8736 #else
8737         type = hde->d_type;
8738 #endif
8739         *((uint8_t *)tde + treclen - 1) = type;
8740     }
8741 
8742     unlock_user(tdirp, arg2, toff);
8743     return toff;
8744 }
8745 #endif /* TARGET_NR_getdents */
8746 
8747 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
do_getdents64(abi_long dirfd,abi_long arg2,abi_long count)8748 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8749 {
8750     g_autofree void *hdirp = NULL;
8751     void *tdirp;
8752     int hlen, hoff, toff;
8753     int hreclen, treclen;
8754     off_t prev_diroff = 0;
8755 
8756     hdirp = g_try_malloc(count);
8757     if (!hdirp) {
8758         return -TARGET_ENOMEM;
8759     }
8760 
8761     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8762     if (is_error(hlen)) {
8763         return hlen;
8764     }
8765 
8766     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8767     if (!tdirp) {
8768         return -TARGET_EFAULT;
8769     }
8770 
8771     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8772         struct linux_dirent64 *hde = hdirp + hoff;
8773         struct target_dirent64 *tde = tdirp + toff;
8774         int namelen;
8775 
8776         namelen = strlen(hde->d_name) + 1;
8777         hreclen = hde->d_reclen;
8778         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8779         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8780 
8781         if (toff + treclen > count) {
8782             /*
8783              * If the host struct is smaller than the target struct, or
8784              * requires less alignment and thus packs into less space,
8785              * then the host can return more entries than we can pass
8786              * on to the guest.
8787              */
8788             if (toff == 0) {
8789                 toff = -TARGET_EINVAL; /* result buffer is too small */
8790                 break;
8791             }
8792             /*
8793              * Return what we have, resetting the file pointer to the
8794              * location of the first record not returned.
8795              */
8796             lseek(dirfd, prev_diroff, SEEK_SET);
8797             break;
8798         }
8799 
8800         prev_diroff = hde->d_off;
8801         tde->d_ino = tswap64(hde->d_ino);
8802         tde->d_off = tswap64(hde->d_off);
8803         tde->d_reclen = tswap16(treclen);
8804         tde->d_type = hde->d_type;
8805         memcpy(tde->d_name, hde->d_name, namelen);
8806     }
8807 
8808     unlock_user(tdirp, arg2, toff);
8809     return toff;
8810 }
8811 #endif /* TARGET_NR_getdents64 */
8812 
8813 #if defined(TARGET_NR_riscv_hwprobe)
8814 
8815 #define RISCV_HWPROBE_KEY_MVENDORID     0
8816 #define RISCV_HWPROBE_KEY_MARCHID       1
8817 #define RISCV_HWPROBE_KEY_MIMPID        2
8818 
8819 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8820 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8821 
8822 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8823 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8824 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8825 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8826 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8827 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8828 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8829 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8830 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8831 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8832 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8833 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8834 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8835 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8836 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8837 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8838 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8839 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8840 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8841 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8842 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8843 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8844 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8845 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8846 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8847 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8848 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8849 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8850 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8851 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8852 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8853 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8854 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1 << 31)
8855 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8856 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8857 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8858 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8859 
8860 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8861 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8862 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8863 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8864 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8865 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8866 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
8867 
8868 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
8869 
8870 struct riscv_hwprobe {
8871     abi_llong  key;
8872     abi_ullong value;
8873 };
8874 
risc_hwprobe_fill_pairs(CPURISCVState * env,struct riscv_hwprobe * pair,size_t pair_count)8875 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
8876                                     struct riscv_hwprobe *pair,
8877                                     size_t pair_count)
8878 {
8879     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
8880 
8881     for (; pair_count > 0; pair_count--, pair++) {
8882         abi_llong key;
8883         abi_ullong value;
8884         __put_user(0, &pair->value);
8885         __get_user(key, &pair->key);
8886         switch (key) {
8887         case RISCV_HWPROBE_KEY_MVENDORID:
8888             __put_user(cfg->mvendorid, &pair->value);
8889             break;
8890         case RISCV_HWPROBE_KEY_MARCHID:
8891             __put_user(cfg->marchid, &pair->value);
8892             break;
8893         case RISCV_HWPROBE_KEY_MIMPID:
8894             __put_user(cfg->mimpid, &pair->value);
8895             break;
8896         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
8897             value = riscv_has_ext(env, RVI) &&
8898                     riscv_has_ext(env, RVM) &&
8899                     riscv_has_ext(env, RVA) ?
8900                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
8901             __put_user(value, &pair->value);
8902             break;
8903         case RISCV_HWPROBE_KEY_IMA_EXT_0:
8904             value = riscv_has_ext(env, RVF) &&
8905                     riscv_has_ext(env, RVD) ?
8906                     RISCV_HWPROBE_IMA_FD : 0;
8907             value |= riscv_has_ext(env, RVC) ?
8908                      RISCV_HWPROBE_IMA_C : 0;
8909             value |= riscv_has_ext(env, RVV) ?
8910                      RISCV_HWPROBE_IMA_V : 0;
8911             value |= cfg->ext_zba ?
8912                      RISCV_HWPROBE_EXT_ZBA : 0;
8913             value |= cfg->ext_zbb ?
8914                      RISCV_HWPROBE_EXT_ZBB : 0;
8915             value |= cfg->ext_zbs ?
8916                      RISCV_HWPROBE_EXT_ZBS : 0;
8917             value |= cfg->ext_zicboz ?
8918                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
8919             value |= cfg->ext_zbc ?
8920                      RISCV_HWPROBE_EXT_ZBC : 0;
8921             value |= cfg->ext_zbkb ?
8922                      RISCV_HWPROBE_EXT_ZBKB : 0;
8923             value |= cfg->ext_zbkc ?
8924                      RISCV_HWPROBE_EXT_ZBKC : 0;
8925             value |= cfg->ext_zbkx ?
8926                      RISCV_HWPROBE_EXT_ZBKX : 0;
8927             value |= cfg->ext_zknd ?
8928                      RISCV_HWPROBE_EXT_ZKND : 0;
8929             value |= cfg->ext_zkne ?
8930                      RISCV_HWPROBE_EXT_ZKNE : 0;
8931             value |= cfg->ext_zknh ?
8932                      RISCV_HWPROBE_EXT_ZKNH : 0;
8933             value |= cfg->ext_zksed ?
8934                      RISCV_HWPROBE_EXT_ZKSED : 0;
8935             value |= cfg->ext_zksh ?
8936                      RISCV_HWPROBE_EXT_ZKSH : 0;
8937             value |= cfg->ext_zkt ?
8938                      RISCV_HWPROBE_EXT_ZKT : 0;
8939             value |= cfg->ext_zvbb ?
8940                      RISCV_HWPROBE_EXT_ZVBB : 0;
8941             value |= cfg->ext_zvbc ?
8942                      RISCV_HWPROBE_EXT_ZVBC : 0;
8943             value |= cfg->ext_zvkb ?
8944                      RISCV_HWPROBE_EXT_ZVKB : 0;
8945             value |= cfg->ext_zvkg ?
8946                      RISCV_HWPROBE_EXT_ZVKG : 0;
8947             value |= cfg->ext_zvkned ?
8948                      RISCV_HWPROBE_EXT_ZVKNED : 0;
8949             value |= cfg->ext_zvknha ?
8950                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
8951             value |= cfg->ext_zvknhb ?
8952                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
8953             value |= cfg->ext_zvksed ?
8954                      RISCV_HWPROBE_EXT_ZVKSED : 0;
8955             value |= cfg->ext_zvksh ?
8956                      RISCV_HWPROBE_EXT_ZVKSH : 0;
8957             value |= cfg->ext_zvkt ?
8958                      RISCV_HWPROBE_EXT_ZVKT : 0;
8959             value |= cfg->ext_zfh ?
8960                      RISCV_HWPROBE_EXT_ZFH : 0;
8961             value |= cfg->ext_zfhmin ?
8962                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
8963             value |= cfg->ext_zihintntl ?
8964                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
8965             value |= cfg->ext_zvfh ?
8966                      RISCV_HWPROBE_EXT_ZVFH : 0;
8967             value |= cfg->ext_zvfhmin ?
8968                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
8969             value |= cfg->ext_zfa ?
8970                      RISCV_HWPROBE_EXT_ZFA : 0;
8971             value |= cfg->ext_ztso ?
8972                      RISCV_HWPROBE_EXT_ZTSO : 0;
8973             value |= cfg->ext_zacas ?
8974                      RISCV_HWPROBE_EXT_ZACAS : 0;
8975             value |= cfg->ext_zicond ?
8976                      RISCV_HWPROBE_EXT_ZICOND : 0;
8977             __put_user(value, &pair->value);
8978             break;
8979         case RISCV_HWPROBE_KEY_CPUPERF_0:
8980             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
8981             break;
8982         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
8983             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
8984             __put_user(value, &pair->value);
8985             break;
8986         default:
8987             __put_user(-1, &pair->key);
8988             break;
8989         }
8990     }
8991 }
8992 
cpu_set_valid(abi_long arg3,abi_long arg4)8993 static int cpu_set_valid(abi_long arg3, abi_long arg4)
8994 {
8995     int ret, i, tmp;
8996     size_t host_mask_size, target_mask_size;
8997     unsigned long *host_mask;
8998 
8999     /*
9000      * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9001      * arg3 contains the cpu count.
9002      */
9003     tmp = (8 * sizeof(abi_ulong));
9004     target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9005     host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9006                      ~(sizeof(*host_mask) - 1);
9007 
9008     host_mask = alloca(host_mask_size);
9009 
9010     ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9011                                   arg4, target_mask_size);
9012     if (ret != 0) {
9013         return ret;
9014     }
9015 
9016     for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9017         if (host_mask[i] != 0) {
9018             return 0;
9019         }
9020     }
9021     return -TARGET_EINVAL;
9022 }
9023 
do_riscv_hwprobe(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)9024 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9025                                  abi_long arg2, abi_long arg3,
9026                                  abi_long arg4, abi_long arg5)
9027 {
9028     int ret;
9029     struct riscv_hwprobe *host_pairs;
9030 
9031     /* flags must be 0 */
9032     if (arg5 != 0) {
9033         return -TARGET_EINVAL;
9034     }
9035 
9036     /* check cpu_set */
9037     if (arg3 != 0) {
9038         ret = cpu_set_valid(arg3, arg4);
9039         if (ret != 0) {
9040             return ret;
9041         }
9042     } else if (arg4 != 0) {
9043         return -TARGET_EINVAL;
9044     }
9045 
9046     /* no pairs */
9047     if (arg2 == 0) {
9048         return 0;
9049     }
9050 
9051     host_pairs = lock_user(VERIFY_WRITE, arg1,
9052                            sizeof(*host_pairs) * (size_t)arg2, 0);
9053     if (host_pairs == NULL) {
9054         return -TARGET_EFAULT;
9055     }
9056     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9057     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9058     return 0;
9059 }
9060 #endif /* TARGET_NR_riscv_hwprobe */
9061 
9062 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
_syscall2(int,pivot_root,const char *,new_root,const char *,put_old)9063 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9064 #endif
9065 
9066 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9067 #define __NR_sys_open_tree __NR_open_tree
9068 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9069           unsigned int, __flags)
9070 #endif
9071 
9072 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9073 #define __NR_sys_move_mount __NR_move_mount
9074 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9075            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9076 #endif
9077 
9078 /* This is an internal helper for do_syscall so that it is easier
9079  * to have a single return point, so that actions, such as logging
9080  * of syscall results, can be performed.
9081  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9082  */
9083 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9084                             abi_long arg2, abi_long arg3, abi_long arg4,
9085                             abi_long arg5, abi_long arg6, abi_long arg7,
9086                             abi_long arg8)
9087 {
9088     CPUState *cpu = env_cpu(cpu_env);
9089     abi_long ret;
9090 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9091     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9092     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9093     || defined(TARGET_NR_statx)
9094     struct stat st;
9095 #endif
9096 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9097     || defined(TARGET_NR_fstatfs)
9098     struct statfs stfs;
9099 #endif
9100     void *p;
9101 
9102     switch(num) {
9103     case TARGET_NR_exit:
9104         /* In old applications this may be used to implement _exit(2).
9105            However in threaded applications it is used for thread termination,
9106            and _exit_group is used for application termination.
9107            Do thread termination if we have more then one thread.  */
9108 
9109         if (block_signals()) {
9110             return -QEMU_ERESTARTSYS;
9111         }
9112 
9113         pthread_mutex_lock(&clone_lock);
9114 
9115         if (CPU_NEXT(first_cpu)) {
9116             TaskState *ts = get_task_state(cpu);
9117 
9118             if (ts->child_tidptr) {
9119                 put_user_u32(0, ts->child_tidptr);
9120                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9121                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9122             }
9123 
9124             object_unparent(OBJECT(cpu));
9125             object_unref(OBJECT(cpu));
9126             /*
9127              * At this point the CPU should be unrealized and removed
9128              * from cpu lists. We can clean-up the rest of the thread
9129              * data without the lock held.
9130              */
9131 
9132             pthread_mutex_unlock(&clone_lock);
9133 
9134             thread_cpu = NULL;
9135             g_free(ts);
9136             rcu_unregister_thread();
9137             pthread_exit(NULL);
9138         }
9139 
9140         pthread_mutex_unlock(&clone_lock);
9141         preexit_cleanup(cpu_env, arg1);
9142         _exit(arg1);
9143         return 0; /* avoid warning */
9144     case TARGET_NR_read:
9145         if (arg2 == 0 && arg3 == 0) {
9146             return get_errno(safe_read(arg1, 0, 0));
9147         } else {
9148             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9149                 return -TARGET_EFAULT;
9150             ret = get_errno(safe_read(arg1, p, arg3));
9151             if (ret >= 0 &&
9152                 fd_trans_host_to_target_data(arg1)) {
9153                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9154             }
9155             unlock_user(p, arg2, ret);
9156         }
9157         return ret;
9158     case TARGET_NR_write:
9159         if (arg2 == 0 && arg3 == 0) {
9160             return get_errno(safe_write(arg1, 0, 0));
9161         }
9162         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9163             return -TARGET_EFAULT;
9164         if (fd_trans_target_to_host_data(arg1)) {
9165             void *copy = g_malloc(arg3);
9166             memcpy(copy, p, arg3);
9167             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9168             if (ret >= 0) {
9169                 ret = get_errno(safe_write(arg1, copy, ret));
9170             }
9171             g_free(copy);
9172         } else {
9173             ret = get_errno(safe_write(arg1, p, arg3));
9174         }
9175         unlock_user(p, arg2, 0);
9176         return ret;
9177 
9178 #ifdef TARGET_NR_open
9179     case TARGET_NR_open:
9180         if (!(p = lock_user_string(arg1)))
9181             return -TARGET_EFAULT;
9182         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9183                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9184                                   arg3, true));
9185         fd_trans_unregister(ret);
9186         unlock_user(p, arg1, 0);
9187         return ret;
9188 #endif
9189     case TARGET_NR_openat:
9190         if (!(p = lock_user_string(arg2)))
9191             return -TARGET_EFAULT;
9192         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9193                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9194                                   arg4, true));
9195         fd_trans_unregister(ret);
9196         unlock_user(p, arg2, 0);
9197         return ret;
9198 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9199     case TARGET_NR_name_to_handle_at:
9200         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9201         return ret;
9202 #endif
9203 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9204     case TARGET_NR_open_by_handle_at:
9205         ret = do_open_by_handle_at(arg1, arg2, arg3);
9206         fd_trans_unregister(ret);
9207         return ret;
9208 #endif
9209 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9210     case TARGET_NR_pidfd_open:
9211         return get_errno(pidfd_open(arg1, arg2));
9212 #endif
9213 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9214     case TARGET_NR_pidfd_send_signal:
9215         {
9216             siginfo_t uinfo, *puinfo;
9217 
9218             if (arg3) {
9219                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9220                 if (!p) {
9221                     return -TARGET_EFAULT;
9222                  }
9223                  target_to_host_siginfo(&uinfo, p);
9224                  unlock_user(p, arg3, 0);
9225                  puinfo = &uinfo;
9226             } else {
9227                  puinfo = NULL;
9228             }
9229             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9230                                               puinfo, arg4));
9231         }
9232         return ret;
9233 #endif
9234 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9235     case TARGET_NR_pidfd_getfd:
9236         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9237 #endif
9238     case TARGET_NR_close:
9239         fd_trans_unregister(arg1);
9240         return get_errno(close(arg1));
9241 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9242     case TARGET_NR_close_range:
9243         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9244         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9245             abi_long fd, maxfd;
9246             maxfd = MIN(arg2, target_fd_max);
9247             for (fd = arg1; fd < maxfd; fd++) {
9248                 fd_trans_unregister(fd);
9249             }
9250         }
9251         return ret;
9252 #endif
9253 
9254     case TARGET_NR_brk:
9255         return do_brk(arg1);
9256 #ifdef TARGET_NR_fork
9257     case TARGET_NR_fork:
9258         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9259 #endif
9260 #ifdef TARGET_NR_waitpid
9261     case TARGET_NR_waitpid:
9262         {
9263             int status;
9264             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9265             if (!is_error(ret) && arg2 && ret
9266                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9267                 return -TARGET_EFAULT;
9268         }
9269         return ret;
9270 #endif
9271 #ifdef TARGET_NR_waitid
9272     case TARGET_NR_waitid:
9273         {
9274             struct rusage ru;
9275             siginfo_t info;
9276 
9277             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9278                                         arg4, (arg5 ? &ru : NULL)));
9279             if (!is_error(ret)) {
9280                 if (arg3) {
9281                     p = lock_user(VERIFY_WRITE, arg3,
9282                                   sizeof(target_siginfo_t), 0);
9283                     if (!p) {
9284                         return -TARGET_EFAULT;
9285                     }
9286                     host_to_target_siginfo(p, &info);
9287                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9288                 }
9289                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9290                     return -TARGET_EFAULT;
9291                 }
9292             }
9293         }
9294         return ret;
9295 #endif
9296 #ifdef TARGET_NR_creat /* not on alpha */
9297     case TARGET_NR_creat:
9298         if (!(p = lock_user_string(arg1)))
9299             return -TARGET_EFAULT;
9300         ret = get_errno(creat(p, arg2));
9301         fd_trans_unregister(ret);
9302         unlock_user(p, arg1, 0);
9303         return ret;
9304 #endif
9305 #ifdef TARGET_NR_link
9306     case TARGET_NR_link:
9307         {
9308             void * p2;
9309             p = lock_user_string(arg1);
9310             p2 = lock_user_string(arg2);
9311             if (!p || !p2)
9312                 ret = -TARGET_EFAULT;
9313             else
9314                 ret = get_errno(link(p, p2));
9315             unlock_user(p2, arg2, 0);
9316             unlock_user(p, arg1, 0);
9317         }
9318         return ret;
9319 #endif
9320 #if defined(TARGET_NR_linkat)
9321     case TARGET_NR_linkat:
9322         {
9323             void * p2 = NULL;
9324             if (!arg2 || !arg4)
9325                 return -TARGET_EFAULT;
9326             p  = lock_user_string(arg2);
9327             p2 = lock_user_string(arg4);
9328             if (!p || !p2)
9329                 ret = -TARGET_EFAULT;
9330             else
9331                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9332             unlock_user(p, arg2, 0);
9333             unlock_user(p2, arg4, 0);
9334         }
9335         return ret;
9336 #endif
9337 #ifdef TARGET_NR_unlink
9338     case TARGET_NR_unlink:
9339         if (!(p = lock_user_string(arg1)))
9340             return -TARGET_EFAULT;
9341         ret = get_errno(unlink(p));
9342         unlock_user(p, arg1, 0);
9343         return ret;
9344 #endif
9345 #if defined(TARGET_NR_unlinkat)
9346     case TARGET_NR_unlinkat:
9347         if (!(p = lock_user_string(arg2)))
9348             return -TARGET_EFAULT;
9349         ret = get_errno(unlinkat(arg1, p, arg3));
9350         unlock_user(p, arg2, 0);
9351         return ret;
9352 #endif
9353     case TARGET_NR_execveat:
9354         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9355     case TARGET_NR_execve:
9356         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9357     case TARGET_NR_chdir:
9358         if (!(p = lock_user_string(arg1)))
9359             return -TARGET_EFAULT;
9360         ret = get_errno(chdir(p));
9361         unlock_user(p, arg1, 0);
9362         return ret;
9363 #ifdef TARGET_NR_time
9364     case TARGET_NR_time:
9365         {
9366             time_t host_time;
9367             ret = get_errno(time(&host_time));
9368             if (!is_error(ret)
9369                 && arg1
9370                 && put_user_sal(host_time, arg1))
9371                 return -TARGET_EFAULT;
9372         }
9373         return ret;
9374 #endif
9375 #ifdef TARGET_NR_mknod
9376     case TARGET_NR_mknod:
9377         if (!(p = lock_user_string(arg1)))
9378             return -TARGET_EFAULT;
9379         ret = get_errno(mknod(p, arg2, arg3));
9380         unlock_user(p, arg1, 0);
9381         return ret;
9382 #endif
9383 #if defined(TARGET_NR_mknodat)
9384     case TARGET_NR_mknodat:
9385         if (!(p = lock_user_string(arg2)))
9386             return -TARGET_EFAULT;
9387         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9388         unlock_user(p, arg2, 0);
9389         return ret;
9390 #endif
9391 #ifdef TARGET_NR_chmod
9392     case TARGET_NR_chmod:
9393         if (!(p = lock_user_string(arg1)))
9394             return -TARGET_EFAULT;
9395         ret = get_errno(chmod(p, arg2));
9396         unlock_user(p, arg1, 0);
9397         return ret;
9398 #endif
9399 #ifdef TARGET_NR_lseek
9400     case TARGET_NR_lseek:
9401         return get_errno(lseek(arg1, arg2, arg3));
9402 #endif
9403 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9404     /* Alpha specific */
9405     case TARGET_NR_getxpid:
9406         cpu_env->ir[IR_A4] = getppid();
9407         return get_errno(getpid());
9408 #endif
9409 #ifdef TARGET_NR_getpid
9410     case TARGET_NR_getpid:
9411         return get_errno(getpid());
9412 #endif
9413     case TARGET_NR_mount:
9414         {
9415             /* need to look at the data field */
9416             void *p2, *p3;
9417 
9418             if (arg1) {
9419                 p = lock_user_string(arg1);
9420                 if (!p) {
9421                     return -TARGET_EFAULT;
9422                 }
9423             } else {
9424                 p = NULL;
9425             }
9426 
9427             p2 = lock_user_string(arg2);
9428             if (!p2) {
9429                 if (arg1) {
9430                     unlock_user(p, arg1, 0);
9431                 }
9432                 return -TARGET_EFAULT;
9433             }
9434 
9435             if (arg3) {
9436                 p3 = lock_user_string(arg3);
9437                 if (!p3) {
9438                     if (arg1) {
9439                         unlock_user(p, arg1, 0);
9440                     }
9441                     unlock_user(p2, arg2, 0);
9442                     return -TARGET_EFAULT;
9443                 }
9444             } else {
9445                 p3 = NULL;
9446             }
9447 
9448             /* FIXME - arg5 should be locked, but it isn't clear how to
9449              * do that since it's not guaranteed to be a NULL-terminated
9450              * string.
9451              */
9452             if (!arg5) {
9453                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9454             } else {
9455                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9456             }
9457             ret = get_errno(ret);
9458 
9459             if (arg1) {
9460                 unlock_user(p, arg1, 0);
9461             }
9462             unlock_user(p2, arg2, 0);
9463             if (arg3) {
9464                 unlock_user(p3, arg3, 0);
9465             }
9466         }
9467         return ret;
9468 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9469 #if defined(TARGET_NR_umount)
9470     case TARGET_NR_umount:
9471 #endif
9472 #if defined(TARGET_NR_oldumount)
9473     case TARGET_NR_oldumount:
9474 #endif
9475         if (!(p = lock_user_string(arg1)))
9476             return -TARGET_EFAULT;
9477         ret = get_errno(umount(p));
9478         unlock_user(p, arg1, 0);
9479         return ret;
9480 #endif
9481 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9482     case TARGET_NR_move_mount:
9483         {
9484             void *p2, *p4;
9485 
9486             if (!arg2 || !arg4) {
9487                 return -TARGET_EFAULT;
9488             }
9489 
9490             p2 = lock_user_string(arg2);
9491             if (!p2) {
9492                 return -TARGET_EFAULT;
9493             }
9494 
9495             p4 = lock_user_string(arg4);
9496             if (!p4) {
9497                 unlock_user(p2, arg2, 0);
9498                 return -TARGET_EFAULT;
9499             }
9500             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9501 
9502             unlock_user(p2, arg2, 0);
9503             unlock_user(p4, arg4, 0);
9504 
9505             return ret;
9506         }
9507 #endif
9508 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9509     case TARGET_NR_open_tree:
9510         {
9511             void *p2;
9512             int host_flags;
9513 
9514             if (!arg2) {
9515                 return -TARGET_EFAULT;
9516             }
9517 
9518             p2 = lock_user_string(arg2);
9519             if (!p2) {
9520                 return -TARGET_EFAULT;
9521             }
9522 
9523             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9524             if (arg3 & TARGET_O_CLOEXEC) {
9525                 host_flags |= O_CLOEXEC;
9526             }
9527 
9528             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9529 
9530             unlock_user(p2, arg2, 0);
9531 
9532             return ret;
9533         }
9534 #endif
9535 #ifdef TARGET_NR_stime /* not on alpha */
9536     case TARGET_NR_stime:
9537         {
9538             struct timespec ts;
9539             ts.tv_nsec = 0;
9540             if (get_user_sal(ts.tv_sec, arg1)) {
9541                 return -TARGET_EFAULT;
9542             }
9543             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9544         }
9545 #endif
9546 #ifdef TARGET_NR_alarm /* not on alpha */
9547     case TARGET_NR_alarm:
9548         return alarm(arg1);
9549 #endif
9550 #ifdef TARGET_NR_pause /* not on alpha */
9551     case TARGET_NR_pause:
9552         if (!block_signals()) {
9553             sigsuspend(&get_task_state(cpu)->signal_mask);
9554         }
9555         return -TARGET_EINTR;
9556 #endif
9557 #ifdef TARGET_NR_utime
9558     case TARGET_NR_utime:
9559         {
9560             struct utimbuf tbuf, *host_tbuf;
9561             struct target_utimbuf *target_tbuf;
9562             if (arg2) {
9563                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9564                     return -TARGET_EFAULT;
9565                 tbuf.actime = tswapal(target_tbuf->actime);
9566                 tbuf.modtime = tswapal(target_tbuf->modtime);
9567                 unlock_user_struct(target_tbuf, arg2, 0);
9568                 host_tbuf = &tbuf;
9569             } else {
9570                 host_tbuf = NULL;
9571             }
9572             if (!(p = lock_user_string(arg1)))
9573                 return -TARGET_EFAULT;
9574             ret = get_errno(utime(p, host_tbuf));
9575             unlock_user(p, arg1, 0);
9576         }
9577         return ret;
9578 #endif
9579 #ifdef TARGET_NR_utimes
9580     case TARGET_NR_utimes:
9581         {
9582             struct timeval *tvp, tv[2];
9583             if (arg2) {
9584                 if (copy_from_user_timeval(&tv[0], arg2)
9585                     || copy_from_user_timeval(&tv[1],
9586                                               arg2 + sizeof(struct target_timeval)))
9587                     return -TARGET_EFAULT;
9588                 tvp = tv;
9589             } else {
9590                 tvp = NULL;
9591             }
9592             if (!(p = lock_user_string(arg1)))
9593                 return -TARGET_EFAULT;
9594             ret = get_errno(utimes(p, tvp));
9595             unlock_user(p, arg1, 0);
9596         }
9597         return ret;
9598 #endif
9599 #if defined(TARGET_NR_futimesat)
9600     case TARGET_NR_futimesat:
9601         {
9602             struct timeval *tvp, tv[2];
9603             if (arg3) {
9604                 if (copy_from_user_timeval(&tv[0], arg3)
9605                     || copy_from_user_timeval(&tv[1],
9606                                               arg3 + sizeof(struct target_timeval)))
9607                     return -TARGET_EFAULT;
9608                 tvp = tv;
9609             } else {
9610                 tvp = NULL;
9611             }
9612             if (!(p = lock_user_string(arg2))) {
9613                 return -TARGET_EFAULT;
9614             }
9615             ret = get_errno(futimesat(arg1, path(p), tvp));
9616             unlock_user(p, arg2, 0);
9617         }
9618         return ret;
9619 #endif
9620 #ifdef TARGET_NR_access
9621     case TARGET_NR_access:
9622         if (!(p = lock_user_string(arg1))) {
9623             return -TARGET_EFAULT;
9624         }
9625         ret = get_errno(access(path(p), arg2));
9626         unlock_user(p, arg1, 0);
9627         return ret;
9628 #endif
9629 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9630     case TARGET_NR_faccessat:
9631         if (!(p = lock_user_string(arg2))) {
9632             return -TARGET_EFAULT;
9633         }
9634         ret = get_errno(faccessat(arg1, p, arg3, 0));
9635         unlock_user(p, arg2, 0);
9636         return ret;
9637 #endif
9638 #if defined(TARGET_NR_faccessat2)
9639     case TARGET_NR_faccessat2:
9640         if (!(p = lock_user_string(arg2))) {
9641             return -TARGET_EFAULT;
9642         }
9643         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9644         unlock_user(p, arg2, 0);
9645         return ret;
9646 #endif
9647 #ifdef TARGET_NR_nice /* not on alpha */
9648     case TARGET_NR_nice:
9649         return get_errno(nice(arg1));
9650 #endif
9651     case TARGET_NR_sync:
9652         sync();
9653         return 0;
9654 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9655     case TARGET_NR_syncfs:
9656         return get_errno(syncfs(arg1));
9657 #endif
9658     case TARGET_NR_kill:
9659         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9660 #ifdef TARGET_NR_rename
9661     case TARGET_NR_rename:
9662         {
9663             void *p2;
9664             p = lock_user_string(arg1);
9665             p2 = lock_user_string(arg2);
9666             if (!p || !p2)
9667                 ret = -TARGET_EFAULT;
9668             else
9669                 ret = get_errno(rename(p, p2));
9670             unlock_user(p2, arg2, 0);
9671             unlock_user(p, arg1, 0);
9672         }
9673         return ret;
9674 #endif
9675 #if defined(TARGET_NR_renameat)
9676     case TARGET_NR_renameat:
9677         {
9678             void *p2;
9679             p  = lock_user_string(arg2);
9680             p2 = lock_user_string(arg4);
9681             if (!p || !p2)
9682                 ret = -TARGET_EFAULT;
9683             else
9684                 ret = get_errno(renameat(arg1, p, arg3, p2));
9685             unlock_user(p2, arg4, 0);
9686             unlock_user(p, arg2, 0);
9687         }
9688         return ret;
9689 #endif
9690 #if defined(TARGET_NR_renameat2)
9691     case TARGET_NR_renameat2:
9692         {
9693             void *p2;
9694             p  = lock_user_string(arg2);
9695             p2 = lock_user_string(arg4);
9696             if (!p || !p2) {
9697                 ret = -TARGET_EFAULT;
9698             } else {
9699                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9700             }
9701             unlock_user(p2, arg4, 0);
9702             unlock_user(p, arg2, 0);
9703         }
9704         return ret;
9705 #endif
9706 #ifdef TARGET_NR_mkdir
9707     case TARGET_NR_mkdir:
9708         if (!(p = lock_user_string(arg1)))
9709             return -TARGET_EFAULT;
9710         ret = get_errno(mkdir(p, arg2));
9711         unlock_user(p, arg1, 0);
9712         return ret;
9713 #endif
9714 #if defined(TARGET_NR_mkdirat)
9715     case TARGET_NR_mkdirat:
9716         if (!(p = lock_user_string(arg2)))
9717             return -TARGET_EFAULT;
9718         ret = get_errno(mkdirat(arg1, p, arg3));
9719         unlock_user(p, arg2, 0);
9720         return ret;
9721 #endif
9722 #ifdef TARGET_NR_rmdir
9723     case TARGET_NR_rmdir:
9724         if (!(p = lock_user_string(arg1)))
9725             return -TARGET_EFAULT;
9726         ret = get_errno(rmdir(p));
9727         unlock_user(p, arg1, 0);
9728         return ret;
9729 #endif
9730     case TARGET_NR_dup:
9731         ret = get_errno(dup(arg1));
9732         if (ret >= 0) {
9733             fd_trans_dup(arg1, ret);
9734         }
9735         return ret;
9736 #ifdef TARGET_NR_pipe
9737     case TARGET_NR_pipe:
9738         return do_pipe(cpu_env, arg1, 0, 0);
9739 #endif
9740 #ifdef TARGET_NR_pipe2
9741     case TARGET_NR_pipe2:
9742         return do_pipe(cpu_env, arg1,
9743                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9744 #endif
9745     case TARGET_NR_times:
9746         {
9747             struct target_tms *tmsp;
9748             struct tms tms;
9749             ret = get_errno(times(&tms));
9750             if (arg1) {
9751                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9752                 if (!tmsp)
9753                     return -TARGET_EFAULT;
9754                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9755                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9756                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9757                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9758             }
9759             if (!is_error(ret))
9760                 ret = host_to_target_clock_t(ret);
9761         }
9762         return ret;
9763     case TARGET_NR_acct:
9764         if (arg1 == 0) {
9765             ret = get_errno(acct(NULL));
9766         } else {
9767             if (!(p = lock_user_string(arg1))) {
9768                 return -TARGET_EFAULT;
9769             }
9770             ret = get_errno(acct(path(p)));
9771             unlock_user(p, arg1, 0);
9772         }
9773         return ret;
9774 #ifdef TARGET_NR_umount2
9775     case TARGET_NR_umount2:
9776         if (!(p = lock_user_string(arg1)))
9777             return -TARGET_EFAULT;
9778         ret = get_errno(umount2(p, arg2));
9779         unlock_user(p, arg1, 0);
9780         return ret;
9781 #endif
9782     case TARGET_NR_ioctl:
9783         return do_ioctl(arg1, arg2, arg3);
9784 #ifdef TARGET_NR_fcntl
9785     case TARGET_NR_fcntl:
9786         return do_fcntl(arg1, arg2, arg3);
9787 #endif
9788     case TARGET_NR_setpgid:
9789         return get_errno(setpgid(arg1, arg2));
9790     case TARGET_NR_umask:
9791         return get_errno(umask(arg1));
9792     case TARGET_NR_chroot:
9793         if (!(p = lock_user_string(arg1)))
9794             return -TARGET_EFAULT;
9795         ret = get_errno(chroot(p));
9796         unlock_user(p, arg1, 0);
9797         return ret;
9798 #ifdef TARGET_NR_dup2
9799     case TARGET_NR_dup2:
9800         ret = get_errno(dup2(arg1, arg2));
9801         if (ret >= 0) {
9802             fd_trans_dup(arg1, arg2);
9803         }
9804         return ret;
9805 #endif
9806 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9807     case TARGET_NR_dup3:
9808     {
9809         int host_flags;
9810 
9811         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9812             return -EINVAL;
9813         }
9814         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9815         ret = get_errno(dup3(arg1, arg2, host_flags));
9816         if (ret >= 0) {
9817             fd_trans_dup(arg1, arg2);
9818         }
9819         return ret;
9820     }
9821 #endif
9822 #ifdef TARGET_NR_getppid /* not on alpha */
9823     case TARGET_NR_getppid:
9824         return get_errno(getppid());
9825 #endif
9826 #ifdef TARGET_NR_getpgrp
9827     case TARGET_NR_getpgrp:
9828         return get_errno(getpgrp());
9829 #endif
9830     case TARGET_NR_setsid:
9831         return get_errno(setsid());
9832 #ifdef TARGET_NR_sigaction
9833     case TARGET_NR_sigaction:
9834         {
9835 #if defined(TARGET_MIPS)
9836 	    struct target_sigaction act, oact, *pact, *old_act;
9837 
9838 	    if (arg2) {
9839                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9840                     return -TARGET_EFAULT;
9841 		act._sa_handler = old_act->_sa_handler;
9842 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9843 		act.sa_flags = old_act->sa_flags;
9844 		unlock_user_struct(old_act, arg2, 0);
9845 		pact = &act;
9846 	    } else {
9847 		pact = NULL;
9848 	    }
9849 
9850         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9851 
9852 	    if (!is_error(ret) && arg3) {
9853                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9854                     return -TARGET_EFAULT;
9855 		old_act->_sa_handler = oact._sa_handler;
9856 		old_act->sa_flags = oact.sa_flags;
9857 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9858 		old_act->sa_mask.sig[1] = 0;
9859 		old_act->sa_mask.sig[2] = 0;
9860 		old_act->sa_mask.sig[3] = 0;
9861 		unlock_user_struct(old_act, arg3, 1);
9862 	    }
9863 #else
9864             struct target_old_sigaction *old_act;
9865             struct target_sigaction act, oact, *pact;
9866             if (arg2) {
9867                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9868                     return -TARGET_EFAULT;
9869                 act._sa_handler = old_act->_sa_handler;
9870                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9871                 act.sa_flags = old_act->sa_flags;
9872 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9873                 act.sa_restorer = old_act->sa_restorer;
9874 #endif
9875                 unlock_user_struct(old_act, arg2, 0);
9876                 pact = &act;
9877             } else {
9878                 pact = NULL;
9879             }
9880             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9881             if (!is_error(ret) && arg3) {
9882                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9883                     return -TARGET_EFAULT;
9884                 old_act->_sa_handler = oact._sa_handler;
9885                 old_act->sa_mask = oact.sa_mask.sig[0];
9886                 old_act->sa_flags = oact.sa_flags;
9887 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9888                 old_act->sa_restorer = oact.sa_restorer;
9889 #endif
9890                 unlock_user_struct(old_act, arg3, 1);
9891             }
9892 #endif
9893         }
9894         return ret;
9895 #endif
9896     case TARGET_NR_rt_sigaction:
9897         {
9898             /*
9899              * For Alpha and SPARC this is a 5 argument syscall, with
9900              * a 'restorer' parameter which must be copied into the
9901              * sa_restorer field of the sigaction struct.
9902              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9903              * and arg5 is the sigsetsize.
9904              */
9905 #if defined(TARGET_ALPHA)
9906             target_ulong sigsetsize = arg4;
9907             target_ulong restorer = arg5;
9908 #elif defined(TARGET_SPARC)
9909             target_ulong restorer = arg4;
9910             target_ulong sigsetsize = arg5;
9911 #else
9912             target_ulong sigsetsize = arg4;
9913             target_ulong restorer = 0;
9914 #endif
9915             struct target_sigaction *act = NULL;
9916             struct target_sigaction *oact = NULL;
9917 
9918             if (sigsetsize != sizeof(target_sigset_t)) {
9919                 return -TARGET_EINVAL;
9920             }
9921             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9922                 return -TARGET_EFAULT;
9923             }
9924             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9925                 ret = -TARGET_EFAULT;
9926             } else {
9927                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9928                 if (oact) {
9929                     unlock_user_struct(oact, arg3, 1);
9930                 }
9931             }
9932             if (act) {
9933                 unlock_user_struct(act, arg2, 0);
9934             }
9935         }
9936         return ret;
9937 #ifdef TARGET_NR_sgetmask /* not on alpha */
9938     case TARGET_NR_sgetmask:
9939         {
9940             sigset_t cur_set;
9941             abi_ulong target_set;
9942             ret = do_sigprocmask(0, NULL, &cur_set);
9943             if (!ret) {
9944                 host_to_target_old_sigset(&target_set, &cur_set);
9945                 ret = target_set;
9946             }
9947         }
9948         return ret;
9949 #endif
9950 #ifdef TARGET_NR_ssetmask /* not on alpha */
9951     case TARGET_NR_ssetmask:
9952         {
9953             sigset_t set, oset;
9954             abi_ulong target_set = arg1;
9955             target_to_host_old_sigset(&set, &target_set);
9956             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9957             if (!ret) {
9958                 host_to_target_old_sigset(&target_set, &oset);
9959                 ret = target_set;
9960             }
9961         }
9962         return ret;
9963 #endif
9964 #ifdef TARGET_NR_sigprocmask
9965     case TARGET_NR_sigprocmask:
9966         {
9967 #if defined(TARGET_ALPHA)
9968             sigset_t set, oldset;
9969             abi_ulong mask;
9970             int how;
9971 
9972             switch (arg1) {
9973             case TARGET_SIG_BLOCK:
9974                 how = SIG_BLOCK;
9975                 break;
9976             case TARGET_SIG_UNBLOCK:
9977                 how = SIG_UNBLOCK;
9978                 break;
9979             case TARGET_SIG_SETMASK:
9980                 how = SIG_SETMASK;
9981                 break;
9982             default:
9983                 return -TARGET_EINVAL;
9984             }
9985             mask = arg2;
9986             target_to_host_old_sigset(&set, &mask);
9987 
9988             ret = do_sigprocmask(how, &set, &oldset);
9989             if (!is_error(ret)) {
9990                 host_to_target_old_sigset(&mask, &oldset);
9991                 ret = mask;
9992                 cpu_env->ir[IR_V0] = 0; /* force no error */
9993             }
9994 #else
9995             sigset_t set, oldset, *set_ptr;
9996             int how;
9997 
9998             if (arg2) {
9999                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10000                 if (!p) {
10001                     return -TARGET_EFAULT;
10002                 }
10003                 target_to_host_old_sigset(&set, p);
10004                 unlock_user(p, arg2, 0);
10005                 set_ptr = &set;
10006                 switch (arg1) {
10007                 case TARGET_SIG_BLOCK:
10008                     how = SIG_BLOCK;
10009                     break;
10010                 case TARGET_SIG_UNBLOCK:
10011                     how = SIG_UNBLOCK;
10012                     break;
10013                 case TARGET_SIG_SETMASK:
10014                     how = SIG_SETMASK;
10015                     break;
10016                 default:
10017                     return -TARGET_EINVAL;
10018                 }
10019             } else {
10020                 how = 0;
10021                 set_ptr = NULL;
10022             }
10023             ret = do_sigprocmask(how, set_ptr, &oldset);
10024             if (!is_error(ret) && arg3) {
10025                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10026                     return -TARGET_EFAULT;
10027                 host_to_target_old_sigset(p, &oldset);
10028                 unlock_user(p, arg3, sizeof(target_sigset_t));
10029             }
10030 #endif
10031         }
10032         return ret;
10033 #endif
10034     case TARGET_NR_rt_sigprocmask:
10035         {
10036             int how = arg1;
10037             sigset_t set, oldset, *set_ptr;
10038 
10039             if (arg4 != sizeof(target_sigset_t)) {
10040                 return -TARGET_EINVAL;
10041             }
10042 
10043             if (arg2) {
10044                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10045                 if (!p) {
10046                     return -TARGET_EFAULT;
10047                 }
10048                 target_to_host_sigset(&set, p);
10049                 unlock_user(p, arg2, 0);
10050                 set_ptr = &set;
10051                 switch(how) {
10052                 case TARGET_SIG_BLOCK:
10053                     how = SIG_BLOCK;
10054                     break;
10055                 case TARGET_SIG_UNBLOCK:
10056                     how = SIG_UNBLOCK;
10057                     break;
10058                 case TARGET_SIG_SETMASK:
10059                     how = SIG_SETMASK;
10060                     break;
10061                 default:
10062                     return -TARGET_EINVAL;
10063                 }
10064             } else {
10065                 how = 0;
10066                 set_ptr = NULL;
10067             }
10068             ret = do_sigprocmask(how, set_ptr, &oldset);
10069             if (!is_error(ret) && arg3) {
10070                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10071                     return -TARGET_EFAULT;
10072                 host_to_target_sigset(p, &oldset);
10073                 unlock_user(p, arg3, sizeof(target_sigset_t));
10074             }
10075         }
10076         return ret;
10077 #ifdef TARGET_NR_sigpending
10078     case TARGET_NR_sigpending:
10079         {
10080             sigset_t set;
10081             ret = get_errno(sigpending(&set));
10082             if (!is_error(ret)) {
10083                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10084                     return -TARGET_EFAULT;
10085                 host_to_target_old_sigset(p, &set);
10086                 unlock_user(p, arg1, sizeof(target_sigset_t));
10087             }
10088         }
10089         return ret;
10090 #endif
10091     case TARGET_NR_rt_sigpending:
10092         {
10093             sigset_t set;
10094 
10095             /* Yes, this check is >, not != like most. We follow the kernel's
10096              * logic and it does it like this because it implements
10097              * NR_sigpending through the same code path, and in that case
10098              * the old_sigset_t is smaller in size.
10099              */
10100             if (arg2 > sizeof(target_sigset_t)) {
10101                 return -TARGET_EINVAL;
10102             }
10103 
10104             ret = get_errno(sigpending(&set));
10105             if (!is_error(ret)) {
10106                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10107                     return -TARGET_EFAULT;
10108                 host_to_target_sigset(p, &set);
10109                 unlock_user(p, arg1, sizeof(target_sigset_t));
10110             }
10111         }
10112         return ret;
10113 #ifdef TARGET_NR_sigsuspend
10114     case TARGET_NR_sigsuspend:
10115         {
10116             sigset_t *set;
10117 
10118 #if defined(TARGET_ALPHA)
10119             TaskState *ts = get_task_state(cpu);
10120             /* target_to_host_old_sigset will bswap back */
10121             abi_ulong mask = tswapal(arg1);
10122             set = &ts->sigsuspend_mask;
10123             target_to_host_old_sigset(set, &mask);
10124 #else
10125             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10126             if (ret != 0) {
10127                 return ret;
10128             }
10129 #endif
10130             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10131             finish_sigsuspend_mask(ret);
10132         }
10133         return ret;
10134 #endif
10135     case TARGET_NR_rt_sigsuspend:
10136         {
10137             sigset_t *set;
10138 
10139             ret = process_sigsuspend_mask(&set, arg1, arg2);
10140             if (ret != 0) {
10141                 return ret;
10142             }
10143             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10144             finish_sigsuspend_mask(ret);
10145         }
10146         return ret;
10147 #ifdef TARGET_NR_rt_sigtimedwait
10148     case TARGET_NR_rt_sigtimedwait:
10149         {
10150             sigset_t set;
10151             struct timespec uts, *puts;
10152             siginfo_t uinfo;
10153 
10154             if (arg4 != sizeof(target_sigset_t)) {
10155                 return -TARGET_EINVAL;
10156             }
10157 
10158             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10159                 return -TARGET_EFAULT;
10160             target_to_host_sigset(&set, p);
10161             unlock_user(p, arg1, 0);
10162             if (arg3) {
10163                 puts = &uts;
10164                 if (target_to_host_timespec(puts, arg3)) {
10165                     return -TARGET_EFAULT;
10166                 }
10167             } else {
10168                 puts = NULL;
10169             }
10170             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10171                                                  SIGSET_T_SIZE));
10172             if (!is_error(ret)) {
10173                 if (arg2) {
10174                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10175                                   0);
10176                     if (!p) {
10177                         return -TARGET_EFAULT;
10178                     }
10179                     host_to_target_siginfo(p, &uinfo);
10180                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10181                 }
10182                 ret = host_to_target_signal(ret);
10183             }
10184         }
10185         return ret;
10186 #endif
10187 #ifdef TARGET_NR_rt_sigtimedwait_time64
10188     case TARGET_NR_rt_sigtimedwait_time64:
10189         {
10190             sigset_t set;
10191             struct timespec uts, *puts;
10192             siginfo_t uinfo;
10193 
10194             if (arg4 != sizeof(target_sigset_t)) {
10195                 return -TARGET_EINVAL;
10196             }
10197 
10198             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10199             if (!p) {
10200                 return -TARGET_EFAULT;
10201             }
10202             target_to_host_sigset(&set, p);
10203             unlock_user(p, arg1, 0);
10204             if (arg3) {
10205                 puts = &uts;
10206                 if (target_to_host_timespec64(puts, arg3)) {
10207                     return -TARGET_EFAULT;
10208                 }
10209             } else {
10210                 puts = NULL;
10211             }
10212             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10213                                                  SIGSET_T_SIZE));
10214             if (!is_error(ret)) {
10215                 if (arg2) {
10216                     p = lock_user(VERIFY_WRITE, arg2,
10217                                   sizeof(target_siginfo_t), 0);
10218                     if (!p) {
10219                         return -TARGET_EFAULT;
10220                     }
10221                     host_to_target_siginfo(p, &uinfo);
10222                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10223                 }
10224                 ret = host_to_target_signal(ret);
10225             }
10226         }
10227         return ret;
10228 #endif
10229     case TARGET_NR_rt_sigqueueinfo:
10230         {
10231             siginfo_t uinfo;
10232 
10233             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10234             if (!p) {
10235                 return -TARGET_EFAULT;
10236             }
10237             target_to_host_siginfo(&uinfo, p);
10238             unlock_user(p, arg3, 0);
10239             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10240         }
10241         return ret;
10242     case TARGET_NR_rt_tgsigqueueinfo:
10243         {
10244             siginfo_t uinfo;
10245 
10246             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10247             if (!p) {
10248                 return -TARGET_EFAULT;
10249             }
10250             target_to_host_siginfo(&uinfo, p);
10251             unlock_user(p, arg4, 0);
10252             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10253         }
10254         return ret;
10255 #ifdef TARGET_NR_sigreturn
10256     case TARGET_NR_sigreturn:
10257         if (block_signals()) {
10258             return -QEMU_ERESTARTSYS;
10259         }
10260         return do_sigreturn(cpu_env);
10261 #endif
10262     case TARGET_NR_rt_sigreturn:
10263         if (block_signals()) {
10264             return -QEMU_ERESTARTSYS;
10265         }
10266         return do_rt_sigreturn(cpu_env);
10267     case TARGET_NR_sethostname:
10268         if (!(p = lock_user_string(arg1)))
10269             return -TARGET_EFAULT;
10270         ret = get_errno(sethostname(p, arg2));
10271         unlock_user(p, arg1, 0);
10272         return ret;
10273 #ifdef TARGET_NR_setrlimit
10274     case TARGET_NR_setrlimit:
10275         {
10276             int resource = target_to_host_resource(arg1);
10277             struct target_rlimit *target_rlim;
10278             struct rlimit rlim;
10279             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10280                 return -TARGET_EFAULT;
10281             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10282             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10283             unlock_user_struct(target_rlim, arg2, 0);
10284             /*
10285              * If we just passed through resource limit settings for memory then
10286              * they would also apply to QEMU's own allocations, and QEMU will
10287              * crash or hang or die if its allocations fail. Ideally we would
10288              * track the guest allocations in QEMU and apply the limits ourselves.
10289              * For now, just tell the guest the call succeeded but don't actually
10290              * limit anything.
10291              */
10292             if (resource != RLIMIT_AS &&
10293                 resource != RLIMIT_DATA &&
10294                 resource != RLIMIT_STACK) {
10295                 return get_errno(setrlimit(resource, &rlim));
10296             } else {
10297                 return 0;
10298             }
10299         }
10300 #endif
10301 #ifdef TARGET_NR_getrlimit
10302     case TARGET_NR_getrlimit:
10303         {
10304             int resource = target_to_host_resource(arg1);
10305             struct target_rlimit *target_rlim;
10306             struct rlimit rlim;
10307 
10308             ret = get_errno(getrlimit(resource, &rlim));
10309             if (!is_error(ret)) {
10310                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10311                     return -TARGET_EFAULT;
10312                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10313                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10314                 unlock_user_struct(target_rlim, arg2, 1);
10315             }
10316         }
10317         return ret;
10318 #endif
10319     case TARGET_NR_getrusage:
10320         {
10321             struct rusage rusage;
10322             ret = get_errno(getrusage(arg1, &rusage));
10323             if (!is_error(ret)) {
10324                 ret = host_to_target_rusage(arg2, &rusage);
10325             }
10326         }
10327         return ret;
10328 #if defined(TARGET_NR_gettimeofday)
10329     case TARGET_NR_gettimeofday:
10330         {
10331             struct timeval tv;
10332             struct timezone tz;
10333 
10334             ret = get_errno(gettimeofday(&tv, &tz));
10335             if (!is_error(ret)) {
10336                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10337                     return -TARGET_EFAULT;
10338                 }
10339                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10340                     return -TARGET_EFAULT;
10341                 }
10342             }
10343         }
10344         return ret;
10345 #endif
10346 #if defined(TARGET_NR_settimeofday)
10347     case TARGET_NR_settimeofday:
10348         {
10349             struct timeval tv, *ptv = NULL;
10350             struct timezone tz, *ptz = NULL;
10351 
10352             if (arg1) {
10353                 if (copy_from_user_timeval(&tv, arg1)) {
10354                     return -TARGET_EFAULT;
10355                 }
10356                 ptv = &tv;
10357             }
10358 
10359             if (arg2) {
10360                 if (copy_from_user_timezone(&tz, arg2)) {
10361                     return -TARGET_EFAULT;
10362                 }
10363                 ptz = &tz;
10364             }
10365 
10366             return get_errno(settimeofday(ptv, ptz));
10367         }
10368 #endif
10369 #if defined(TARGET_NR_select)
10370     case TARGET_NR_select:
10371 #if defined(TARGET_WANT_NI_OLD_SELECT)
10372         /* some architectures used to have old_select here
10373          * but now ENOSYS it.
10374          */
10375         ret = -TARGET_ENOSYS;
10376 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10377         ret = do_old_select(arg1);
10378 #else
10379         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10380 #endif
10381         return ret;
10382 #endif
10383 #ifdef TARGET_NR_pselect6
10384     case TARGET_NR_pselect6:
10385         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10386 #endif
10387 #ifdef TARGET_NR_pselect6_time64
10388     case TARGET_NR_pselect6_time64:
10389         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10390 #endif
10391 #ifdef TARGET_NR_symlink
10392     case TARGET_NR_symlink:
10393         {
10394             void *p2;
10395             p = lock_user_string(arg1);
10396             p2 = lock_user_string(arg2);
10397             if (!p || !p2)
10398                 ret = -TARGET_EFAULT;
10399             else
10400                 ret = get_errno(symlink(p, p2));
10401             unlock_user(p2, arg2, 0);
10402             unlock_user(p, arg1, 0);
10403         }
10404         return ret;
10405 #endif
10406 #if defined(TARGET_NR_symlinkat)
10407     case TARGET_NR_symlinkat:
10408         {
10409             void *p2;
10410             p  = lock_user_string(arg1);
10411             p2 = lock_user_string(arg3);
10412             if (!p || !p2)
10413                 ret = -TARGET_EFAULT;
10414             else
10415                 ret = get_errno(symlinkat(p, arg2, p2));
10416             unlock_user(p2, arg3, 0);
10417             unlock_user(p, arg1, 0);
10418         }
10419         return ret;
10420 #endif
10421 #ifdef TARGET_NR_readlink
10422     case TARGET_NR_readlink:
10423         {
10424             void *p2;
10425             p = lock_user_string(arg1);
10426             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10427             ret = get_errno(do_guest_readlink(p, p2, arg3));
10428             unlock_user(p2, arg2, ret);
10429             unlock_user(p, arg1, 0);
10430         }
10431         return ret;
10432 #endif
10433 #if defined(TARGET_NR_readlinkat)
10434     case TARGET_NR_readlinkat:
10435         {
10436             void *p2;
10437             p  = lock_user_string(arg2);
10438             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10439             if (!p || !p2) {
10440                 ret = -TARGET_EFAULT;
10441             } else if (!arg4) {
10442                 /* Short circuit this for the magic exe check. */
10443                 ret = -TARGET_EINVAL;
10444             } else if (is_proc_myself((const char *)p, "exe")) {
10445                 /*
10446                  * Don't worry about sign mismatch as earlier mapping
10447                  * logic would have thrown a bad address error.
10448                  */
10449                 ret = MIN(strlen(exec_path), arg4);
10450                 /* We cannot NUL terminate the string. */
10451                 memcpy(p2, exec_path, ret);
10452             } else {
10453                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10454             }
10455             unlock_user(p2, arg3, ret);
10456             unlock_user(p, arg2, 0);
10457         }
10458         return ret;
10459 #endif
10460 #ifdef TARGET_NR_swapon
10461     case TARGET_NR_swapon:
10462         if (!(p = lock_user_string(arg1)))
10463             return -TARGET_EFAULT;
10464         ret = get_errno(swapon(p, arg2));
10465         unlock_user(p, arg1, 0);
10466         return ret;
10467 #endif
10468     case TARGET_NR_reboot:
10469         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10470            /* arg4 must be ignored in all other cases */
10471            p = lock_user_string(arg4);
10472            if (!p) {
10473                return -TARGET_EFAULT;
10474            }
10475            ret = get_errno(reboot(arg1, arg2, arg3, p));
10476            unlock_user(p, arg4, 0);
10477         } else {
10478            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10479         }
10480         return ret;
10481 #ifdef TARGET_NR_mmap
10482     case TARGET_NR_mmap:
10483 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10484     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10485     defined(TARGET_M68K) || defined(TARGET_MICROBLAZE) \
10486     || defined(TARGET_S390X)
10487         {
10488             abi_ulong *v;
10489             abi_ulong v1, v2, v3, v4, v5, v6;
10490             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10491                 return -TARGET_EFAULT;
10492             v1 = tswapal(v[0]);
10493             v2 = tswapal(v[1]);
10494             v3 = tswapal(v[2]);
10495             v4 = tswapal(v[3]);
10496             v5 = tswapal(v[4]);
10497             v6 = tswapal(v[5]);
10498             unlock_user(v, arg1, 0);
10499             return do_mmap(v1, v2, v3, v4, v5, v6);
10500         }
10501 #else
10502         /* mmap pointers are always untagged */
10503         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10504 #endif
10505 #endif
10506 #ifdef TARGET_NR_mmap2
10507     case TARGET_NR_mmap2:
10508 #ifndef MMAP_SHIFT
10509 #define MMAP_SHIFT 12
10510 #endif
10511         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10512                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10513 #endif
10514     case TARGET_NR_munmap:
10515         arg1 = cpu_untagged_addr(cpu, arg1);
10516         return get_errno(target_munmap(arg1, arg2));
10517     case TARGET_NR_mprotect:
10518         arg1 = cpu_untagged_addr(cpu, arg1);
10519         {
10520             TaskState *ts = get_task_state(cpu);
10521             /* Special hack to detect libc making the stack executable.  */
10522             if ((arg3 & PROT_GROWSDOWN)
10523                 && arg1 >= ts->info->stack_limit
10524                 && arg1 <= ts->info->start_stack) {
10525                 arg3 &= ~PROT_GROWSDOWN;
10526                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10527                 arg1 = ts->info->stack_limit;
10528             }
10529         }
10530         return get_errno(target_mprotect(arg1, arg2, arg3));
10531 #ifdef TARGET_NR_mremap
10532     case TARGET_NR_mremap:
10533         arg1 = cpu_untagged_addr(cpu, arg1);
10534         /* mremap new_addr (arg5) is always untagged */
10535         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10536 #endif
10537         /* ??? msync/mlock/munlock are broken for softmmu.  */
10538 #ifdef TARGET_NR_msync
10539     case TARGET_NR_msync:
10540         return get_errno(msync(g2h(cpu, arg1), arg2,
10541                                target_to_host_msync_arg(arg3)));
10542 #endif
10543 #ifdef TARGET_NR_mlock
10544     case TARGET_NR_mlock:
10545         return get_errno(mlock(g2h(cpu, arg1), arg2));
10546 #endif
10547 #ifdef TARGET_NR_munlock
10548     case TARGET_NR_munlock:
10549         return get_errno(munlock(g2h(cpu, arg1), arg2));
10550 #endif
10551 #ifdef TARGET_NR_mlockall
10552     case TARGET_NR_mlockall:
10553         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10554 #endif
10555 #ifdef TARGET_NR_munlockall
10556     case TARGET_NR_munlockall:
10557         return get_errno(munlockall());
10558 #endif
10559 #ifdef TARGET_NR_truncate
10560     case TARGET_NR_truncate:
10561         if (!(p = lock_user_string(arg1)))
10562             return -TARGET_EFAULT;
10563         ret = get_errno(truncate(p, arg2));
10564         unlock_user(p, arg1, 0);
10565         return ret;
10566 #endif
10567 #ifdef TARGET_NR_ftruncate
10568     case TARGET_NR_ftruncate:
10569         return get_errno(ftruncate(arg1, arg2));
10570 #endif
10571     case TARGET_NR_fchmod:
10572         return get_errno(fchmod(arg1, arg2));
10573 #if defined(TARGET_NR_fchmodat)
10574     case TARGET_NR_fchmodat:
10575         if (!(p = lock_user_string(arg2)))
10576             return -TARGET_EFAULT;
10577         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10578         unlock_user(p, arg2, 0);
10579         return ret;
10580 #endif
10581     case TARGET_NR_getpriority:
10582         /* Note that negative values are valid for getpriority, so we must
10583            differentiate based on errno settings.  */
10584         errno = 0;
10585         ret = getpriority(arg1, arg2);
10586         if (ret == -1 && errno != 0) {
10587             return -host_to_target_errno(errno);
10588         }
10589 #ifdef TARGET_ALPHA
10590         /* Return value is the unbiased priority.  Signal no error.  */
10591         cpu_env->ir[IR_V0] = 0;
10592 #else
10593         /* Return value is a biased priority to avoid negative numbers.  */
10594         ret = 20 - ret;
10595 #endif
10596         return ret;
10597     case TARGET_NR_setpriority:
10598         return get_errno(setpriority(arg1, arg2, arg3));
10599 #ifdef TARGET_NR_statfs
10600     case TARGET_NR_statfs:
10601         if (!(p = lock_user_string(arg1))) {
10602             return -TARGET_EFAULT;
10603         }
10604         ret = get_errno(statfs(path(p), &stfs));
10605         unlock_user(p, arg1, 0);
10606     convert_statfs:
10607         if (!is_error(ret)) {
10608             struct target_statfs *target_stfs;
10609 
10610             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10611                 return -TARGET_EFAULT;
10612             __put_user(stfs.f_type, &target_stfs->f_type);
10613             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10614             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10615             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10616             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10617             __put_user(stfs.f_files, &target_stfs->f_files);
10618             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10619             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10620             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10621             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10622             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10623 #ifdef _STATFS_F_FLAGS
10624             __put_user(stfs.f_flags, &target_stfs->f_flags);
10625 #else
10626             __put_user(0, &target_stfs->f_flags);
10627 #endif
10628             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10629             unlock_user_struct(target_stfs, arg2, 1);
10630         }
10631         return ret;
10632 #endif
10633 #ifdef TARGET_NR_fstatfs
10634     case TARGET_NR_fstatfs:
10635         ret = get_errno(fstatfs(arg1, &stfs));
10636         goto convert_statfs;
10637 #endif
10638 #ifdef TARGET_NR_statfs64
10639     case TARGET_NR_statfs64:
10640         if (!(p = lock_user_string(arg1))) {
10641             return -TARGET_EFAULT;
10642         }
10643         ret = get_errno(statfs(path(p), &stfs));
10644         unlock_user(p, arg1, 0);
10645     convert_statfs64:
10646         if (!is_error(ret)) {
10647             struct target_statfs64 *target_stfs;
10648 
10649             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10650                 return -TARGET_EFAULT;
10651             __put_user(stfs.f_type, &target_stfs->f_type);
10652             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10653             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10654             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10655             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10656             __put_user(stfs.f_files, &target_stfs->f_files);
10657             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10658             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10659             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10660             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10661             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10662 #ifdef _STATFS_F_FLAGS
10663             __put_user(stfs.f_flags, &target_stfs->f_flags);
10664 #else
10665             __put_user(0, &target_stfs->f_flags);
10666 #endif
10667             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10668             unlock_user_struct(target_stfs, arg3, 1);
10669         }
10670         return ret;
10671     case TARGET_NR_fstatfs64:
10672         ret = get_errno(fstatfs(arg1, &stfs));
10673         goto convert_statfs64;
10674 #endif
10675 #ifdef TARGET_NR_socketcall
10676     case TARGET_NR_socketcall:
10677         return do_socketcall(arg1, arg2);
10678 #endif
10679 #ifdef TARGET_NR_accept
10680     case TARGET_NR_accept:
10681         return do_accept4(arg1, arg2, arg3, 0);
10682 #endif
10683 #ifdef TARGET_NR_accept4
10684     case TARGET_NR_accept4:
10685         return do_accept4(arg1, arg2, arg3, arg4);
10686 #endif
10687 #ifdef TARGET_NR_bind
10688     case TARGET_NR_bind:
10689         return do_bind(arg1, arg2, arg3);
10690 #endif
10691 #ifdef TARGET_NR_connect
10692     case TARGET_NR_connect:
10693         return do_connect(arg1, arg2, arg3);
10694 #endif
10695 #ifdef TARGET_NR_getpeername
10696     case TARGET_NR_getpeername:
10697         return do_getpeername(arg1, arg2, arg3);
10698 #endif
10699 #ifdef TARGET_NR_getsockname
10700     case TARGET_NR_getsockname:
10701         return do_getsockname(arg1, arg2, arg3);
10702 #endif
10703 #ifdef TARGET_NR_getsockopt
10704     case TARGET_NR_getsockopt:
10705         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10706 #endif
10707 #ifdef TARGET_NR_listen
10708     case TARGET_NR_listen:
10709         return get_errno(listen(arg1, arg2));
10710 #endif
10711 #ifdef TARGET_NR_recv
10712     case TARGET_NR_recv:
10713         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10714 #endif
10715 #ifdef TARGET_NR_recvfrom
10716     case TARGET_NR_recvfrom:
10717         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10718 #endif
10719 #ifdef TARGET_NR_recvmsg
10720     case TARGET_NR_recvmsg:
10721         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10722 #endif
10723 #ifdef TARGET_NR_send
10724     case TARGET_NR_send:
10725         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10726 #endif
10727 #ifdef TARGET_NR_sendmsg
10728     case TARGET_NR_sendmsg:
10729         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10730 #endif
10731 #ifdef TARGET_NR_sendmmsg
10732     case TARGET_NR_sendmmsg:
10733         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10734 #endif
10735 #ifdef TARGET_NR_recvmmsg
10736     case TARGET_NR_recvmmsg:
10737         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10738 #endif
10739 #ifdef TARGET_NR_sendto
10740     case TARGET_NR_sendto:
10741         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10742 #endif
10743 #ifdef TARGET_NR_shutdown
10744     case TARGET_NR_shutdown:
10745         return get_errno(shutdown(arg1, arg2));
10746 #endif
10747 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10748     case TARGET_NR_getrandom:
10749         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10750         if (!p) {
10751             return -TARGET_EFAULT;
10752         }
10753         ret = get_errno(getrandom(p, arg2, arg3));
10754         unlock_user(p, arg1, ret);
10755         return ret;
10756 #endif
10757 #ifdef TARGET_NR_socket
10758     case TARGET_NR_socket:
10759         return do_socket(arg1, arg2, arg3);
10760 #endif
10761 #ifdef TARGET_NR_socketpair
10762     case TARGET_NR_socketpair:
10763         return do_socketpair(arg1, arg2, arg3, arg4);
10764 #endif
10765 #ifdef TARGET_NR_setsockopt
10766     case TARGET_NR_setsockopt:
10767         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10768 #endif
10769 #if defined(TARGET_NR_syslog)
10770     case TARGET_NR_syslog:
10771         {
10772             int len = arg2;
10773 
10774             switch (arg1) {
10775             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10776             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10777             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10778             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10779             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10780             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10781             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10782             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10783                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10784             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10785             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10786             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10787                 {
10788                     if (len < 0) {
10789                         return -TARGET_EINVAL;
10790                     }
10791                     if (len == 0) {
10792                         return 0;
10793                     }
10794                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10795                     if (!p) {
10796                         return -TARGET_EFAULT;
10797                     }
10798                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10799                     unlock_user(p, arg2, arg3);
10800                 }
10801                 return ret;
10802             default:
10803                 return -TARGET_EINVAL;
10804             }
10805         }
10806         break;
10807 #endif
10808     case TARGET_NR_setitimer:
10809         {
10810             struct itimerval value, ovalue, *pvalue;
10811 
10812             if (arg2) {
10813                 pvalue = &value;
10814                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10815                     || copy_from_user_timeval(&pvalue->it_value,
10816                                               arg2 + sizeof(struct target_timeval)))
10817                     return -TARGET_EFAULT;
10818             } else {
10819                 pvalue = NULL;
10820             }
10821             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10822             if (!is_error(ret) && arg3) {
10823                 if (copy_to_user_timeval(arg3,
10824                                          &ovalue.it_interval)
10825                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10826                                             &ovalue.it_value))
10827                     return -TARGET_EFAULT;
10828             }
10829         }
10830         return ret;
10831     case TARGET_NR_getitimer:
10832         {
10833             struct itimerval value;
10834 
10835             ret = get_errno(getitimer(arg1, &value));
10836             if (!is_error(ret) && arg2) {
10837                 if (copy_to_user_timeval(arg2,
10838                                          &value.it_interval)
10839                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10840                                             &value.it_value))
10841                     return -TARGET_EFAULT;
10842             }
10843         }
10844         return ret;
10845 #ifdef TARGET_NR_stat
10846     case TARGET_NR_stat:
10847         if (!(p = lock_user_string(arg1))) {
10848             return -TARGET_EFAULT;
10849         }
10850         ret = get_errno(stat(path(p), &st));
10851         unlock_user(p, arg1, 0);
10852         goto do_stat;
10853 #endif
10854 #ifdef TARGET_NR_lstat
10855     case TARGET_NR_lstat:
10856         if (!(p = lock_user_string(arg1))) {
10857             return -TARGET_EFAULT;
10858         }
10859         ret = get_errno(lstat(path(p), &st));
10860         unlock_user(p, arg1, 0);
10861         goto do_stat;
10862 #endif
10863 #ifdef TARGET_NR_fstat
10864     case TARGET_NR_fstat:
10865         {
10866             ret = get_errno(fstat(arg1, &st));
10867 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10868         do_stat:
10869 #endif
10870             if (!is_error(ret)) {
10871                 struct target_stat *target_st;
10872 
10873                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10874                     return -TARGET_EFAULT;
10875                 memset(target_st, 0, sizeof(*target_st));
10876                 __put_user(st.st_dev, &target_st->st_dev);
10877                 __put_user(st.st_ino, &target_st->st_ino);
10878                 __put_user(st.st_mode, &target_st->st_mode);
10879                 __put_user(st.st_uid, &target_st->st_uid);
10880                 __put_user(st.st_gid, &target_st->st_gid);
10881                 __put_user(st.st_nlink, &target_st->st_nlink);
10882                 __put_user(st.st_rdev, &target_st->st_rdev);
10883                 __put_user(st.st_size, &target_st->st_size);
10884                 __put_user(st.st_blksize, &target_st->st_blksize);
10885                 __put_user(st.st_blocks, &target_st->st_blocks);
10886                 __put_user(st.st_atime, &target_st->target_st_atime);
10887                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10888                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10889 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10890                 __put_user(st.st_atim.tv_nsec,
10891                            &target_st->target_st_atime_nsec);
10892                 __put_user(st.st_mtim.tv_nsec,
10893                            &target_st->target_st_mtime_nsec);
10894                 __put_user(st.st_ctim.tv_nsec,
10895                            &target_st->target_st_ctime_nsec);
10896 #endif
10897                 unlock_user_struct(target_st, arg2, 1);
10898             }
10899         }
10900         return ret;
10901 #endif
10902     case TARGET_NR_vhangup:
10903         return get_errno(vhangup());
10904 #ifdef TARGET_NR_syscall
10905     case TARGET_NR_syscall:
10906         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10907                           arg6, arg7, arg8, 0);
10908 #endif
10909 #if defined(TARGET_NR_wait4)
10910     case TARGET_NR_wait4:
10911         {
10912             int status;
10913             abi_long status_ptr = arg2;
10914             struct rusage rusage, *rusage_ptr;
10915             abi_ulong target_rusage = arg4;
10916             abi_long rusage_err;
10917             if (target_rusage)
10918                 rusage_ptr = &rusage;
10919             else
10920                 rusage_ptr = NULL;
10921             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10922             if (!is_error(ret)) {
10923                 if (status_ptr && ret) {
10924                     status = host_to_target_waitstatus(status);
10925                     if (put_user_s32(status, status_ptr))
10926                         return -TARGET_EFAULT;
10927                 }
10928                 if (target_rusage) {
10929                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10930                     if (rusage_err) {
10931                         ret = rusage_err;
10932                     }
10933                 }
10934             }
10935         }
10936         return ret;
10937 #endif
10938 #ifdef TARGET_NR_swapoff
10939     case TARGET_NR_swapoff:
10940         if (!(p = lock_user_string(arg1)))
10941             return -TARGET_EFAULT;
10942         ret = get_errno(swapoff(p));
10943         unlock_user(p, arg1, 0);
10944         return ret;
10945 #endif
10946     case TARGET_NR_sysinfo:
10947         {
10948             struct target_sysinfo *target_value;
10949             struct sysinfo value;
10950             ret = get_errno(sysinfo(&value));
10951             if (!is_error(ret) && arg1)
10952             {
10953                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10954                     return -TARGET_EFAULT;
10955                 __put_user(value.uptime, &target_value->uptime);
10956                 __put_user(value.loads[0], &target_value->loads[0]);
10957                 __put_user(value.loads[1], &target_value->loads[1]);
10958                 __put_user(value.loads[2], &target_value->loads[2]);
10959                 __put_user(value.totalram, &target_value->totalram);
10960                 __put_user(value.freeram, &target_value->freeram);
10961                 __put_user(value.sharedram, &target_value->sharedram);
10962                 __put_user(value.bufferram, &target_value->bufferram);
10963                 __put_user(value.totalswap, &target_value->totalswap);
10964                 __put_user(value.freeswap, &target_value->freeswap);
10965                 __put_user(value.procs, &target_value->procs);
10966                 __put_user(value.totalhigh, &target_value->totalhigh);
10967                 __put_user(value.freehigh, &target_value->freehigh);
10968                 __put_user(value.mem_unit, &target_value->mem_unit);
10969                 unlock_user_struct(target_value, arg1, 1);
10970             }
10971         }
10972         return ret;
10973 #ifdef TARGET_NR_ipc
10974     case TARGET_NR_ipc:
10975         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10976 #endif
10977 #ifdef TARGET_NR_semget
10978     case TARGET_NR_semget:
10979         return get_errno(semget(arg1, arg2, arg3));
10980 #endif
10981 #ifdef TARGET_NR_semop
10982     case TARGET_NR_semop:
10983         return do_semtimedop(arg1, arg2, arg3, 0, false);
10984 #endif
10985 #ifdef TARGET_NR_semtimedop
10986     case TARGET_NR_semtimedop:
10987         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10988 #endif
10989 #ifdef TARGET_NR_semtimedop_time64
10990     case TARGET_NR_semtimedop_time64:
10991         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10992 #endif
10993 #ifdef TARGET_NR_semctl
10994     case TARGET_NR_semctl:
10995         return do_semctl(arg1, arg2, arg3, arg4);
10996 #endif
10997 #ifdef TARGET_NR_msgctl
10998     case TARGET_NR_msgctl:
10999         return do_msgctl(arg1, arg2, arg3);
11000 #endif
11001 #ifdef TARGET_NR_msgget
11002     case TARGET_NR_msgget:
11003         return get_errno(msgget(arg1, arg2));
11004 #endif
11005 #ifdef TARGET_NR_msgrcv
11006     case TARGET_NR_msgrcv:
11007         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11008 #endif
11009 #ifdef TARGET_NR_msgsnd
11010     case TARGET_NR_msgsnd:
11011         return do_msgsnd(arg1, arg2, arg3, arg4);
11012 #endif
11013 #ifdef TARGET_NR_shmget
11014     case TARGET_NR_shmget:
11015         return get_errno(shmget(arg1, arg2, arg3));
11016 #endif
11017 #ifdef TARGET_NR_shmctl
11018     case TARGET_NR_shmctl:
11019         return do_shmctl(arg1, arg2, arg3);
11020 #endif
11021 #ifdef TARGET_NR_shmat
11022     case TARGET_NR_shmat:
11023         return target_shmat(cpu_env, arg1, arg2, arg3);
11024 #endif
11025 #ifdef TARGET_NR_shmdt
11026     case TARGET_NR_shmdt:
11027         return target_shmdt(arg1);
11028 #endif
11029     case TARGET_NR_fsync:
11030         return get_errno(fsync(arg1));
11031     case TARGET_NR_clone:
11032         /* Linux manages to have three different orderings for its
11033          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11034          * match the kernel's CONFIG_CLONE_* settings.
11035          * Microblaze is further special in that it uses a sixth
11036          * implicit argument to clone for the TLS pointer.
11037          */
11038 #if defined(TARGET_MICROBLAZE)
11039         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11040 #elif defined(TARGET_CLONE_BACKWARDS)
11041         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11042 #elif defined(TARGET_CLONE_BACKWARDS2)
11043         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11044 #else
11045         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11046 #endif
11047         return ret;
11048 #ifdef __NR_exit_group
11049         /* new thread calls */
11050     case TARGET_NR_exit_group:
11051         preexit_cleanup(cpu_env, arg1);
11052         return get_errno(exit_group(arg1));
11053 #endif
11054     case TARGET_NR_setdomainname:
11055         if (!(p = lock_user_string(arg1)))
11056             return -TARGET_EFAULT;
11057         ret = get_errno(setdomainname(p, arg2));
11058         unlock_user(p, arg1, 0);
11059         return ret;
11060     case TARGET_NR_uname:
11061         /* no need to transcode because we use the linux syscall */
11062         {
11063             struct new_utsname * buf;
11064 
11065             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11066                 return -TARGET_EFAULT;
11067             ret = get_errno(sys_uname(buf));
11068             if (!is_error(ret)) {
11069                 /* Overwrite the native machine name with whatever is being
11070                    emulated. */
11071                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11072                           sizeof(buf->machine));
11073                 /* Allow the user to override the reported release.  */
11074                 if (qemu_uname_release && *qemu_uname_release) {
11075                     g_strlcpy(buf->release, qemu_uname_release,
11076                               sizeof(buf->release));
11077                 }
11078             }
11079             unlock_user_struct(buf, arg1, 1);
11080         }
11081         return ret;
11082 #ifdef TARGET_I386
11083     case TARGET_NR_modify_ldt:
11084         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11085 #if !defined(TARGET_X86_64)
11086     case TARGET_NR_vm86:
11087         return do_vm86(cpu_env, arg1, arg2);
11088 #endif
11089 #endif
11090 #if defined(TARGET_NR_adjtimex)
11091     case TARGET_NR_adjtimex:
11092         {
11093             struct timex host_buf;
11094 
11095             if (target_to_host_timex(&host_buf, arg1) != 0) {
11096                 return -TARGET_EFAULT;
11097             }
11098             ret = get_errno(adjtimex(&host_buf));
11099             if (!is_error(ret)) {
11100                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11101                     return -TARGET_EFAULT;
11102                 }
11103             }
11104         }
11105         return ret;
11106 #endif
11107 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11108     case TARGET_NR_clock_adjtime:
11109         {
11110             struct timex htx;
11111 
11112             if (target_to_host_timex(&htx, arg2) != 0) {
11113                 return -TARGET_EFAULT;
11114             }
11115             ret = get_errno(clock_adjtime(arg1, &htx));
11116             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11117                 return -TARGET_EFAULT;
11118             }
11119         }
11120         return ret;
11121 #endif
11122 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11123     case TARGET_NR_clock_adjtime64:
11124         {
11125             struct timex htx;
11126 
11127             if (target_to_host_timex64(&htx, arg2) != 0) {
11128                 return -TARGET_EFAULT;
11129             }
11130             ret = get_errno(clock_adjtime(arg1, &htx));
11131             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11132                     return -TARGET_EFAULT;
11133             }
11134         }
11135         return ret;
11136 #endif
11137     case TARGET_NR_getpgid:
11138         return get_errno(getpgid(arg1));
11139     case TARGET_NR_fchdir:
11140         return get_errno(fchdir(arg1));
11141     case TARGET_NR_personality:
11142         return get_errno(personality(arg1));
11143 #ifdef TARGET_NR__llseek /* Not on alpha */
11144     case TARGET_NR__llseek:
11145         {
11146             int64_t res;
11147 #if !defined(__NR_llseek)
11148             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11149             if (res == -1) {
11150                 ret = get_errno(res);
11151             } else {
11152                 ret = 0;
11153             }
11154 #else
11155             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11156 #endif
11157             if ((ret == 0) && put_user_s64(res, arg4)) {
11158                 return -TARGET_EFAULT;
11159             }
11160         }
11161         return ret;
11162 #endif
11163 #ifdef TARGET_NR_getdents
11164     case TARGET_NR_getdents:
11165         return do_getdents(arg1, arg2, arg3);
11166 #endif /* TARGET_NR_getdents */
11167 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11168     case TARGET_NR_getdents64:
11169         return do_getdents64(arg1, arg2, arg3);
11170 #endif /* TARGET_NR_getdents64 */
11171 #if defined(TARGET_NR__newselect)
11172     case TARGET_NR__newselect:
11173         return do_select(arg1, arg2, arg3, arg4, arg5);
11174 #endif
11175 #ifdef TARGET_NR_poll
11176     case TARGET_NR_poll:
11177         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11178 #endif
11179 #ifdef TARGET_NR_ppoll
11180     case TARGET_NR_ppoll:
11181         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11182 #endif
11183 #ifdef TARGET_NR_ppoll_time64
11184     case TARGET_NR_ppoll_time64:
11185         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11186 #endif
11187     case TARGET_NR_flock:
11188         /* NOTE: the flock constant seems to be the same for every
11189            Linux platform */
11190         return get_errno(safe_flock(arg1, arg2));
11191     case TARGET_NR_readv:
11192         {
11193             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11194             if (vec != NULL) {
11195                 ret = get_errno(safe_readv(arg1, vec, arg3));
11196                 unlock_iovec(vec, arg2, arg3, 1);
11197             } else {
11198                 ret = -host_to_target_errno(errno);
11199             }
11200         }
11201         return ret;
11202     case TARGET_NR_writev:
11203         {
11204             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11205             if (vec != NULL) {
11206                 ret = get_errno(safe_writev(arg1, vec, arg3));
11207                 unlock_iovec(vec, arg2, arg3, 0);
11208             } else {
11209                 ret = -host_to_target_errno(errno);
11210             }
11211         }
11212         return ret;
11213 #if defined(TARGET_NR_preadv)
11214     case TARGET_NR_preadv:
11215         {
11216             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11217             if (vec != NULL) {
11218                 unsigned long low, high;
11219 
11220                 target_to_host_low_high(arg4, arg5, &low, &high);
11221                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11222                 unlock_iovec(vec, arg2, arg3, 1);
11223             } else {
11224                 ret = -host_to_target_errno(errno);
11225            }
11226         }
11227         return ret;
11228 #endif
11229 #if defined(TARGET_NR_pwritev)
11230     case TARGET_NR_pwritev:
11231         {
11232             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11233             if (vec != NULL) {
11234                 unsigned long low, high;
11235 
11236                 target_to_host_low_high(arg4, arg5, &low, &high);
11237                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11238                 unlock_iovec(vec, arg2, arg3, 0);
11239             } else {
11240                 ret = -host_to_target_errno(errno);
11241            }
11242         }
11243         return ret;
11244 #endif
11245     case TARGET_NR_getsid:
11246         return get_errno(getsid(arg1));
11247 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11248     case TARGET_NR_fdatasync:
11249         return get_errno(fdatasync(arg1));
11250 #endif
11251     case TARGET_NR_sched_getaffinity:
11252         {
11253             unsigned int mask_size;
11254             unsigned long *mask;
11255 
11256             /*
11257              * sched_getaffinity needs multiples of ulong, so need to take
11258              * care of mismatches between target ulong and host ulong sizes.
11259              */
11260             if (arg2 & (sizeof(abi_ulong) - 1)) {
11261                 return -TARGET_EINVAL;
11262             }
11263             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11264 
11265             mask = alloca(mask_size);
11266             memset(mask, 0, mask_size);
11267             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11268 
11269             if (!is_error(ret)) {
11270                 if (ret > arg2) {
11271                     /* More data returned than the caller's buffer will fit.
11272                      * This only happens if sizeof(abi_long) < sizeof(long)
11273                      * and the caller passed us a buffer holding an odd number
11274                      * of abi_longs. If the host kernel is actually using the
11275                      * extra 4 bytes then fail EINVAL; otherwise we can just
11276                      * ignore them and only copy the interesting part.
11277                      */
11278                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11279                     if (numcpus > arg2 * 8) {
11280                         return -TARGET_EINVAL;
11281                     }
11282                     ret = arg2;
11283                 }
11284 
11285                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11286                     return -TARGET_EFAULT;
11287                 }
11288             }
11289         }
11290         return ret;
11291     case TARGET_NR_sched_setaffinity:
11292         {
11293             unsigned int mask_size;
11294             unsigned long *mask;
11295 
11296             /*
11297              * sched_setaffinity needs multiples of ulong, so need to take
11298              * care of mismatches between target ulong and host ulong sizes.
11299              */
11300             if (arg2 & (sizeof(abi_ulong) - 1)) {
11301                 return -TARGET_EINVAL;
11302             }
11303             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11304             mask = alloca(mask_size);
11305 
11306             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11307             if (ret) {
11308                 return ret;
11309             }
11310 
11311             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11312         }
11313     case TARGET_NR_getcpu:
11314         {
11315             unsigned cpuid, node;
11316             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11317                                        arg2 ? &node : NULL,
11318                                        NULL));
11319             if (is_error(ret)) {
11320                 return ret;
11321             }
11322             if (arg1 && put_user_u32(cpuid, arg1)) {
11323                 return -TARGET_EFAULT;
11324             }
11325             if (arg2 && put_user_u32(node, arg2)) {
11326                 return -TARGET_EFAULT;
11327             }
11328         }
11329         return ret;
11330     case TARGET_NR_sched_setparam:
11331         {
11332             struct target_sched_param *target_schp;
11333             struct sched_param schp;
11334 
11335             if (arg2 == 0) {
11336                 return -TARGET_EINVAL;
11337             }
11338             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11339                 return -TARGET_EFAULT;
11340             }
11341             schp.sched_priority = tswap32(target_schp->sched_priority);
11342             unlock_user_struct(target_schp, arg2, 0);
11343             return get_errno(sys_sched_setparam(arg1, &schp));
11344         }
11345     case TARGET_NR_sched_getparam:
11346         {
11347             struct target_sched_param *target_schp;
11348             struct sched_param schp;
11349 
11350             if (arg2 == 0) {
11351                 return -TARGET_EINVAL;
11352             }
11353             ret = get_errno(sys_sched_getparam(arg1, &schp));
11354             if (!is_error(ret)) {
11355                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11356                     return -TARGET_EFAULT;
11357                 }
11358                 target_schp->sched_priority = tswap32(schp.sched_priority);
11359                 unlock_user_struct(target_schp, arg2, 1);
11360             }
11361         }
11362         return ret;
11363     case TARGET_NR_sched_setscheduler:
11364         {
11365             struct target_sched_param *target_schp;
11366             struct sched_param schp;
11367             if (arg3 == 0) {
11368                 return -TARGET_EINVAL;
11369             }
11370             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11371                 return -TARGET_EFAULT;
11372             }
11373             schp.sched_priority = tswap32(target_schp->sched_priority);
11374             unlock_user_struct(target_schp, arg3, 0);
11375             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11376         }
11377     case TARGET_NR_sched_getscheduler:
11378         return get_errno(sys_sched_getscheduler(arg1));
11379     case TARGET_NR_sched_getattr:
11380         {
11381             struct target_sched_attr *target_scha;
11382             struct sched_attr scha;
11383             if (arg2 == 0) {
11384                 return -TARGET_EINVAL;
11385             }
11386             if (arg3 > sizeof(scha)) {
11387                 arg3 = sizeof(scha);
11388             }
11389             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11390             if (!is_error(ret)) {
11391                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11392                 if (!target_scha) {
11393                     return -TARGET_EFAULT;
11394                 }
11395                 target_scha->size = tswap32(scha.size);
11396                 target_scha->sched_policy = tswap32(scha.sched_policy);
11397                 target_scha->sched_flags = tswap64(scha.sched_flags);
11398                 target_scha->sched_nice = tswap32(scha.sched_nice);
11399                 target_scha->sched_priority = tswap32(scha.sched_priority);
11400                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11401                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11402                 target_scha->sched_period = tswap64(scha.sched_period);
11403                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11404                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11405                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11406                 }
11407                 unlock_user(target_scha, arg2, arg3);
11408             }
11409             return ret;
11410         }
11411     case TARGET_NR_sched_setattr:
11412         {
11413             struct target_sched_attr *target_scha;
11414             struct sched_attr scha;
11415             uint32_t size;
11416             int zeroed;
11417             if (arg2 == 0) {
11418                 return -TARGET_EINVAL;
11419             }
11420             if (get_user_u32(size, arg2)) {
11421                 return -TARGET_EFAULT;
11422             }
11423             if (!size) {
11424                 size = offsetof(struct target_sched_attr, sched_util_min);
11425             }
11426             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11427                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11428                     return -TARGET_EFAULT;
11429                 }
11430                 return -TARGET_E2BIG;
11431             }
11432 
11433             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11434             if (zeroed < 0) {
11435                 return zeroed;
11436             } else if (zeroed == 0) {
11437                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11438                     return -TARGET_EFAULT;
11439                 }
11440                 return -TARGET_E2BIG;
11441             }
11442             if (size > sizeof(struct target_sched_attr)) {
11443                 size = sizeof(struct target_sched_attr);
11444             }
11445 
11446             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11447             if (!target_scha) {
11448                 return -TARGET_EFAULT;
11449             }
11450             scha.size = size;
11451             scha.sched_policy = tswap32(target_scha->sched_policy);
11452             scha.sched_flags = tswap64(target_scha->sched_flags);
11453             scha.sched_nice = tswap32(target_scha->sched_nice);
11454             scha.sched_priority = tswap32(target_scha->sched_priority);
11455             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11456             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11457             scha.sched_period = tswap64(target_scha->sched_period);
11458             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11459                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11460                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11461             }
11462             unlock_user(target_scha, arg2, 0);
11463             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11464         }
11465     case TARGET_NR_sched_yield:
11466         return get_errno(sched_yield());
11467     case TARGET_NR_sched_get_priority_max:
11468         return get_errno(sched_get_priority_max(arg1));
11469     case TARGET_NR_sched_get_priority_min:
11470         return get_errno(sched_get_priority_min(arg1));
11471 #ifdef TARGET_NR_sched_rr_get_interval
11472     case TARGET_NR_sched_rr_get_interval:
11473         {
11474             struct timespec ts;
11475             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11476             if (!is_error(ret)) {
11477                 ret = host_to_target_timespec(arg2, &ts);
11478             }
11479         }
11480         return ret;
11481 #endif
11482 #ifdef TARGET_NR_sched_rr_get_interval_time64
11483     case TARGET_NR_sched_rr_get_interval_time64:
11484         {
11485             struct timespec ts;
11486             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11487             if (!is_error(ret)) {
11488                 ret = host_to_target_timespec64(arg2, &ts);
11489             }
11490         }
11491         return ret;
11492 #endif
11493 #if defined(TARGET_NR_nanosleep)
11494     case TARGET_NR_nanosleep:
11495         {
11496             struct timespec req, rem;
11497             target_to_host_timespec(&req, arg1);
11498             ret = get_errno(safe_nanosleep(&req, &rem));
11499             if (is_error(ret) && arg2) {
11500                 host_to_target_timespec(arg2, &rem);
11501             }
11502         }
11503         return ret;
11504 #endif
11505     case TARGET_NR_prctl:
11506         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11507         break;
11508 #ifdef TARGET_NR_arch_prctl
11509     case TARGET_NR_arch_prctl:
11510         return do_arch_prctl(cpu_env, arg1, arg2);
11511 #endif
11512 #ifdef TARGET_NR_pread64
11513     case TARGET_NR_pread64:
11514         if (regpairs_aligned(cpu_env, num)) {
11515             arg4 = arg5;
11516             arg5 = arg6;
11517         }
11518         if (arg2 == 0 && arg3 == 0) {
11519             /* Special-case NULL buffer and zero length, which should succeed */
11520             p = 0;
11521         } else {
11522             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11523             if (!p) {
11524                 return -TARGET_EFAULT;
11525             }
11526         }
11527         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11528         unlock_user(p, arg2, ret);
11529         return ret;
11530     case TARGET_NR_pwrite64:
11531         if (regpairs_aligned(cpu_env, num)) {
11532             arg4 = arg5;
11533             arg5 = arg6;
11534         }
11535         if (arg2 == 0 && arg3 == 0) {
11536             /* Special-case NULL buffer and zero length, which should succeed */
11537             p = 0;
11538         } else {
11539             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11540             if (!p) {
11541                 return -TARGET_EFAULT;
11542             }
11543         }
11544         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11545         unlock_user(p, arg2, 0);
11546         return ret;
11547 #endif
11548     case TARGET_NR_getcwd:
11549         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11550             return -TARGET_EFAULT;
11551         ret = get_errno(sys_getcwd1(p, arg2));
11552         unlock_user(p, arg1, ret);
11553         return ret;
11554     case TARGET_NR_capget:
11555     case TARGET_NR_capset:
11556     {
11557         struct target_user_cap_header *target_header;
11558         struct target_user_cap_data *target_data = NULL;
11559         struct __user_cap_header_struct header;
11560         struct __user_cap_data_struct data[2];
11561         struct __user_cap_data_struct *dataptr = NULL;
11562         int i, target_datalen;
11563         int data_items = 1;
11564 
11565         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11566             return -TARGET_EFAULT;
11567         }
11568         header.version = tswap32(target_header->version);
11569         header.pid = tswap32(target_header->pid);
11570 
11571         if (header.version != _LINUX_CAPABILITY_VERSION) {
11572             /* Version 2 and up takes pointer to two user_data structs */
11573             data_items = 2;
11574         }
11575 
11576         target_datalen = sizeof(*target_data) * data_items;
11577 
11578         if (arg2) {
11579             if (num == TARGET_NR_capget) {
11580                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11581             } else {
11582                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11583             }
11584             if (!target_data) {
11585                 unlock_user_struct(target_header, arg1, 0);
11586                 return -TARGET_EFAULT;
11587             }
11588 
11589             if (num == TARGET_NR_capset) {
11590                 for (i = 0; i < data_items; i++) {
11591                     data[i].effective = tswap32(target_data[i].effective);
11592                     data[i].permitted = tswap32(target_data[i].permitted);
11593                     data[i].inheritable = tswap32(target_data[i].inheritable);
11594                 }
11595             }
11596 
11597             dataptr = data;
11598         }
11599 
11600         if (num == TARGET_NR_capget) {
11601             ret = get_errno(capget(&header, dataptr));
11602         } else {
11603             ret = get_errno(capset(&header, dataptr));
11604         }
11605 
11606         /* The kernel always updates version for both capget and capset */
11607         target_header->version = tswap32(header.version);
11608         unlock_user_struct(target_header, arg1, 1);
11609 
11610         if (arg2) {
11611             if (num == TARGET_NR_capget) {
11612                 for (i = 0; i < data_items; i++) {
11613                     target_data[i].effective = tswap32(data[i].effective);
11614                     target_data[i].permitted = tswap32(data[i].permitted);
11615                     target_data[i].inheritable = tswap32(data[i].inheritable);
11616                 }
11617                 unlock_user(target_data, arg2, target_datalen);
11618             } else {
11619                 unlock_user(target_data, arg2, 0);
11620             }
11621         }
11622         return ret;
11623     }
11624     case TARGET_NR_sigaltstack:
11625         return do_sigaltstack(arg1, arg2, cpu_env);
11626 
11627 #ifdef CONFIG_SENDFILE
11628 #ifdef TARGET_NR_sendfile
11629     case TARGET_NR_sendfile:
11630     {
11631         off_t *offp = NULL;
11632         off_t off;
11633         if (arg3) {
11634             ret = get_user_sal(off, arg3);
11635             if (is_error(ret)) {
11636                 return ret;
11637             }
11638             offp = &off;
11639         }
11640         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11641         if (!is_error(ret) && arg3) {
11642             abi_long ret2 = put_user_sal(off, arg3);
11643             if (is_error(ret2)) {
11644                 ret = ret2;
11645             }
11646         }
11647         return ret;
11648     }
11649 #endif
11650 #ifdef TARGET_NR_sendfile64
11651     case TARGET_NR_sendfile64:
11652     {
11653         off_t *offp = NULL;
11654         off_t off;
11655         if (arg3) {
11656             ret = get_user_s64(off, arg3);
11657             if (is_error(ret)) {
11658                 return ret;
11659             }
11660             offp = &off;
11661         }
11662         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11663         if (!is_error(ret) && arg3) {
11664             abi_long ret2 = put_user_s64(off, arg3);
11665             if (is_error(ret2)) {
11666                 ret = ret2;
11667             }
11668         }
11669         return ret;
11670     }
11671 #endif
11672 #endif
11673 #ifdef TARGET_NR_vfork
11674     case TARGET_NR_vfork:
11675         return get_errno(do_fork(cpu_env,
11676                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11677                          0, 0, 0, 0));
11678 #endif
11679 #ifdef TARGET_NR_ugetrlimit
11680     case TARGET_NR_ugetrlimit:
11681     {
11682 	struct rlimit rlim;
11683 	int resource = target_to_host_resource(arg1);
11684 	ret = get_errno(getrlimit(resource, &rlim));
11685 	if (!is_error(ret)) {
11686 	    struct target_rlimit *target_rlim;
11687             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11688                 return -TARGET_EFAULT;
11689 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11690 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11691             unlock_user_struct(target_rlim, arg2, 1);
11692 	}
11693         return ret;
11694     }
11695 #endif
11696 #ifdef TARGET_NR_truncate64
11697     case TARGET_NR_truncate64:
11698         if (!(p = lock_user_string(arg1)))
11699             return -TARGET_EFAULT;
11700 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11701         unlock_user(p, arg1, 0);
11702         return ret;
11703 #endif
11704 #ifdef TARGET_NR_ftruncate64
11705     case TARGET_NR_ftruncate64:
11706         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11707 #endif
11708 #ifdef TARGET_NR_stat64
11709     case TARGET_NR_stat64:
11710         if (!(p = lock_user_string(arg1))) {
11711             return -TARGET_EFAULT;
11712         }
11713         ret = get_errno(stat(path(p), &st));
11714         unlock_user(p, arg1, 0);
11715         if (!is_error(ret))
11716             ret = host_to_target_stat64(cpu_env, arg2, &st);
11717         return ret;
11718 #endif
11719 #ifdef TARGET_NR_lstat64
11720     case TARGET_NR_lstat64:
11721         if (!(p = lock_user_string(arg1))) {
11722             return -TARGET_EFAULT;
11723         }
11724         ret = get_errno(lstat(path(p), &st));
11725         unlock_user(p, arg1, 0);
11726         if (!is_error(ret))
11727             ret = host_to_target_stat64(cpu_env, arg2, &st);
11728         return ret;
11729 #endif
11730 #ifdef TARGET_NR_fstat64
11731     case TARGET_NR_fstat64:
11732         ret = get_errno(fstat(arg1, &st));
11733         if (!is_error(ret))
11734             ret = host_to_target_stat64(cpu_env, arg2, &st);
11735         return ret;
11736 #endif
11737 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11738 #ifdef TARGET_NR_fstatat64
11739     case TARGET_NR_fstatat64:
11740 #endif
11741 #ifdef TARGET_NR_newfstatat
11742     case TARGET_NR_newfstatat:
11743 #endif
11744         if (!(p = lock_user_string(arg2))) {
11745             return -TARGET_EFAULT;
11746         }
11747         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11748         unlock_user(p, arg2, 0);
11749         if (!is_error(ret))
11750             ret = host_to_target_stat64(cpu_env, arg3, &st);
11751         return ret;
11752 #endif
11753 #if defined(TARGET_NR_statx)
11754     case TARGET_NR_statx:
11755         {
11756             struct target_statx *target_stx;
11757             int dirfd = arg1;
11758             int flags = arg3;
11759 
11760             p = lock_user_string(arg2);
11761             if (p == NULL) {
11762                 return -TARGET_EFAULT;
11763             }
11764 #if defined(__NR_statx)
11765             {
11766                 /*
11767                  * It is assumed that struct statx is architecture independent.
11768                  */
11769                 struct target_statx host_stx;
11770                 int mask = arg4;
11771 
11772                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11773                 if (!is_error(ret)) {
11774                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11775                         unlock_user(p, arg2, 0);
11776                         return -TARGET_EFAULT;
11777                     }
11778                 }
11779 
11780                 if (ret != -TARGET_ENOSYS) {
11781                     unlock_user(p, arg2, 0);
11782                     return ret;
11783                 }
11784             }
11785 #endif
11786             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11787             unlock_user(p, arg2, 0);
11788 
11789             if (!is_error(ret)) {
11790                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11791                     return -TARGET_EFAULT;
11792                 }
11793                 memset(target_stx, 0, sizeof(*target_stx));
11794                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11795                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11796                 __put_user(st.st_ino, &target_stx->stx_ino);
11797                 __put_user(st.st_mode, &target_stx->stx_mode);
11798                 __put_user(st.st_uid, &target_stx->stx_uid);
11799                 __put_user(st.st_gid, &target_stx->stx_gid);
11800                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11801                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11802                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11803                 __put_user(st.st_size, &target_stx->stx_size);
11804                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11805                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11806                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11807                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11808                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11809                 unlock_user_struct(target_stx, arg5, 1);
11810             }
11811         }
11812         return ret;
11813 #endif
11814 #ifdef TARGET_NR_lchown
11815     case TARGET_NR_lchown:
11816         if (!(p = lock_user_string(arg1)))
11817             return -TARGET_EFAULT;
11818         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11819         unlock_user(p, arg1, 0);
11820         return ret;
11821 #endif
11822 #ifdef TARGET_NR_getuid
11823     case TARGET_NR_getuid:
11824         return get_errno(high2lowuid(getuid()));
11825 #endif
11826 #ifdef TARGET_NR_getgid
11827     case TARGET_NR_getgid:
11828         return get_errno(high2lowgid(getgid()));
11829 #endif
11830 #ifdef TARGET_NR_geteuid
11831     case TARGET_NR_geteuid:
11832         return get_errno(high2lowuid(geteuid()));
11833 #endif
11834 #ifdef TARGET_NR_getegid
11835     case TARGET_NR_getegid:
11836         return get_errno(high2lowgid(getegid()));
11837 #endif
11838     case TARGET_NR_setreuid:
11839         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11840     case TARGET_NR_setregid:
11841         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11842     case TARGET_NR_getgroups:
11843         { /* the same code as for TARGET_NR_getgroups32 */
11844             int gidsetsize = arg1;
11845             target_id *target_grouplist;
11846             g_autofree gid_t *grouplist = NULL;
11847             int i;
11848 
11849             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11850                 return -TARGET_EINVAL;
11851             }
11852             if (gidsetsize > 0) {
11853                 grouplist = g_try_new(gid_t, gidsetsize);
11854                 if (!grouplist) {
11855                     return -TARGET_ENOMEM;
11856                 }
11857             }
11858             ret = get_errno(getgroups(gidsetsize, grouplist));
11859             if (!is_error(ret) && gidsetsize > 0) {
11860                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11861                                              gidsetsize * sizeof(target_id), 0);
11862                 if (!target_grouplist) {
11863                     return -TARGET_EFAULT;
11864                 }
11865                 for (i = 0; i < ret; i++) {
11866                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11867                 }
11868                 unlock_user(target_grouplist, arg2,
11869                             gidsetsize * sizeof(target_id));
11870             }
11871             return ret;
11872         }
11873     case TARGET_NR_setgroups:
11874         { /* the same code as for TARGET_NR_setgroups32 */
11875             int gidsetsize = arg1;
11876             target_id *target_grouplist;
11877             g_autofree gid_t *grouplist = NULL;
11878             int i;
11879 
11880             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11881                 return -TARGET_EINVAL;
11882             }
11883             if (gidsetsize > 0) {
11884                 grouplist = g_try_new(gid_t, gidsetsize);
11885                 if (!grouplist) {
11886                     return -TARGET_ENOMEM;
11887                 }
11888                 target_grouplist = lock_user(VERIFY_READ, arg2,
11889                                              gidsetsize * sizeof(target_id), 1);
11890                 if (!target_grouplist) {
11891                     return -TARGET_EFAULT;
11892                 }
11893                 for (i = 0; i < gidsetsize; i++) {
11894                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11895                 }
11896                 unlock_user(target_grouplist, arg2,
11897                             gidsetsize * sizeof(target_id));
11898             }
11899             return get_errno(sys_setgroups(gidsetsize, grouplist));
11900         }
11901     case TARGET_NR_fchown:
11902         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11903 #if defined(TARGET_NR_fchownat)
11904     case TARGET_NR_fchownat:
11905         if (!(p = lock_user_string(arg2)))
11906             return -TARGET_EFAULT;
11907         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11908                                  low2highgid(arg4), arg5));
11909         unlock_user(p, arg2, 0);
11910         return ret;
11911 #endif
11912 #ifdef TARGET_NR_setresuid
11913     case TARGET_NR_setresuid:
11914         return get_errno(sys_setresuid(low2highuid(arg1),
11915                                        low2highuid(arg2),
11916                                        low2highuid(arg3)));
11917 #endif
11918 #ifdef TARGET_NR_getresuid
11919     case TARGET_NR_getresuid:
11920         {
11921             uid_t ruid, euid, suid;
11922             ret = get_errno(getresuid(&ruid, &euid, &suid));
11923             if (!is_error(ret)) {
11924                 if (put_user_id(high2lowuid(ruid), arg1)
11925                     || put_user_id(high2lowuid(euid), arg2)
11926                     || put_user_id(high2lowuid(suid), arg3))
11927                     return -TARGET_EFAULT;
11928             }
11929         }
11930         return ret;
11931 #endif
11932 #ifdef TARGET_NR_getresgid
11933     case TARGET_NR_setresgid:
11934         return get_errno(sys_setresgid(low2highgid(arg1),
11935                                        low2highgid(arg2),
11936                                        low2highgid(arg3)));
11937 #endif
11938 #ifdef TARGET_NR_getresgid
11939     case TARGET_NR_getresgid:
11940         {
11941             gid_t rgid, egid, sgid;
11942             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11943             if (!is_error(ret)) {
11944                 if (put_user_id(high2lowgid(rgid), arg1)
11945                     || put_user_id(high2lowgid(egid), arg2)
11946                     || put_user_id(high2lowgid(sgid), arg3))
11947                     return -TARGET_EFAULT;
11948             }
11949         }
11950         return ret;
11951 #endif
11952 #ifdef TARGET_NR_chown
11953     case TARGET_NR_chown:
11954         if (!(p = lock_user_string(arg1)))
11955             return -TARGET_EFAULT;
11956         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11957         unlock_user(p, arg1, 0);
11958         return ret;
11959 #endif
11960     case TARGET_NR_setuid:
11961         return get_errno(sys_setuid(low2highuid(arg1)));
11962     case TARGET_NR_setgid:
11963         return get_errno(sys_setgid(low2highgid(arg1)));
11964     case TARGET_NR_setfsuid:
11965         return get_errno(setfsuid(arg1));
11966     case TARGET_NR_setfsgid:
11967         return get_errno(setfsgid(arg1));
11968 
11969 #ifdef TARGET_NR_lchown32
11970     case TARGET_NR_lchown32:
11971         if (!(p = lock_user_string(arg1)))
11972             return -TARGET_EFAULT;
11973         ret = get_errno(lchown(p, arg2, arg3));
11974         unlock_user(p, arg1, 0);
11975         return ret;
11976 #endif
11977 #ifdef TARGET_NR_getuid32
11978     case TARGET_NR_getuid32:
11979         return get_errno(getuid());
11980 #endif
11981 
11982 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11983    /* Alpha specific */
11984     case TARGET_NR_getxuid:
11985          {
11986             uid_t euid;
11987             euid=geteuid();
11988             cpu_env->ir[IR_A4]=euid;
11989          }
11990         return get_errno(getuid());
11991 #endif
11992 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11993    /* Alpha specific */
11994     case TARGET_NR_getxgid:
11995          {
11996             uid_t egid;
11997             egid=getegid();
11998             cpu_env->ir[IR_A4]=egid;
11999          }
12000         return get_errno(getgid());
12001 #endif
12002 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12003     /* Alpha specific */
12004     case TARGET_NR_osf_getsysinfo:
12005         ret = -TARGET_EOPNOTSUPP;
12006         switch (arg1) {
12007           case TARGET_GSI_IEEE_FP_CONTROL:
12008             {
12009                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12010                 uint64_t swcr = cpu_env->swcr;
12011 
12012                 swcr &= ~SWCR_STATUS_MASK;
12013                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12014 
12015                 if (put_user_u64 (swcr, arg2))
12016                         return -TARGET_EFAULT;
12017                 ret = 0;
12018             }
12019             break;
12020 
12021           /* case GSI_IEEE_STATE_AT_SIGNAL:
12022              -- Not implemented in linux kernel.
12023              case GSI_UACPROC:
12024              -- Retrieves current unaligned access state; not much used.
12025              case GSI_PROC_TYPE:
12026              -- Retrieves implver information; surely not used.
12027              case GSI_GET_HWRPB:
12028              -- Grabs a copy of the HWRPB; surely not used.
12029           */
12030         }
12031         return ret;
12032 #endif
12033 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12034     /* Alpha specific */
12035     case TARGET_NR_osf_setsysinfo:
12036         ret = -TARGET_EOPNOTSUPP;
12037         switch (arg1) {
12038           case TARGET_SSI_IEEE_FP_CONTROL:
12039             {
12040                 uint64_t swcr, fpcr;
12041 
12042                 if (get_user_u64 (swcr, arg2)) {
12043                     return -TARGET_EFAULT;
12044                 }
12045 
12046                 /*
12047                  * The kernel calls swcr_update_status to update the
12048                  * status bits from the fpcr at every point that it
12049                  * could be queried.  Therefore, we store the status
12050                  * bits only in FPCR.
12051                  */
12052                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12053 
12054                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12055                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12056                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12057                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12058                 ret = 0;
12059             }
12060             break;
12061 
12062           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12063             {
12064                 uint64_t exc, fpcr, fex;
12065 
12066                 if (get_user_u64(exc, arg2)) {
12067                     return -TARGET_EFAULT;
12068                 }
12069                 exc &= SWCR_STATUS_MASK;
12070                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12071 
12072                 /* Old exceptions are not signaled.  */
12073                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12074                 fex = exc & ~fex;
12075                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12076                 fex &= (cpu_env)->swcr;
12077 
12078                 /* Update the hardware fpcr.  */
12079                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12080                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12081 
12082                 if (fex) {
12083                     int si_code = TARGET_FPE_FLTUNK;
12084                     target_siginfo_t info;
12085 
12086                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12087                         si_code = TARGET_FPE_FLTUND;
12088                     }
12089                     if (fex & SWCR_TRAP_ENABLE_INE) {
12090                         si_code = TARGET_FPE_FLTRES;
12091                     }
12092                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12093                         si_code = TARGET_FPE_FLTUND;
12094                     }
12095                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12096                         si_code = TARGET_FPE_FLTOVF;
12097                     }
12098                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12099                         si_code = TARGET_FPE_FLTDIV;
12100                     }
12101                     if (fex & SWCR_TRAP_ENABLE_INV) {
12102                         si_code = TARGET_FPE_FLTINV;
12103                     }
12104 
12105                     info.si_signo = SIGFPE;
12106                     info.si_errno = 0;
12107                     info.si_code = si_code;
12108                     info._sifields._sigfault._addr = (cpu_env)->pc;
12109                     queue_signal(cpu_env, info.si_signo,
12110                                  QEMU_SI_FAULT, &info);
12111                 }
12112                 ret = 0;
12113             }
12114             break;
12115 
12116           /* case SSI_NVPAIRS:
12117              -- Used with SSIN_UACPROC to enable unaligned accesses.
12118              case SSI_IEEE_STATE_AT_SIGNAL:
12119              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12120              -- Not implemented in linux kernel
12121           */
12122         }
12123         return ret;
12124 #endif
12125 #ifdef TARGET_NR_osf_sigprocmask
12126     /* Alpha specific.  */
12127     case TARGET_NR_osf_sigprocmask:
12128         {
12129             abi_ulong mask;
12130             int how;
12131             sigset_t set, oldset;
12132 
12133             switch(arg1) {
12134             case TARGET_SIG_BLOCK:
12135                 how = SIG_BLOCK;
12136                 break;
12137             case TARGET_SIG_UNBLOCK:
12138                 how = SIG_UNBLOCK;
12139                 break;
12140             case TARGET_SIG_SETMASK:
12141                 how = SIG_SETMASK;
12142                 break;
12143             default:
12144                 return -TARGET_EINVAL;
12145             }
12146             mask = arg2;
12147             target_to_host_old_sigset(&set, &mask);
12148             ret = do_sigprocmask(how, &set, &oldset);
12149             if (!ret) {
12150                 host_to_target_old_sigset(&mask, &oldset);
12151                 ret = mask;
12152             }
12153         }
12154         return ret;
12155 #endif
12156 
12157 #ifdef TARGET_NR_getgid32
12158     case TARGET_NR_getgid32:
12159         return get_errno(getgid());
12160 #endif
12161 #ifdef TARGET_NR_geteuid32
12162     case TARGET_NR_geteuid32:
12163         return get_errno(geteuid());
12164 #endif
12165 #ifdef TARGET_NR_getegid32
12166     case TARGET_NR_getegid32:
12167         return get_errno(getegid());
12168 #endif
12169 #ifdef TARGET_NR_setreuid32
12170     case TARGET_NR_setreuid32:
12171         return get_errno(setreuid(arg1, arg2));
12172 #endif
12173 #ifdef TARGET_NR_setregid32
12174     case TARGET_NR_setregid32:
12175         return get_errno(setregid(arg1, arg2));
12176 #endif
12177 #ifdef TARGET_NR_getgroups32
12178     case TARGET_NR_getgroups32:
12179         { /* the same code as for TARGET_NR_getgroups */
12180             int gidsetsize = arg1;
12181             uint32_t *target_grouplist;
12182             g_autofree gid_t *grouplist = NULL;
12183             int i;
12184 
12185             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12186                 return -TARGET_EINVAL;
12187             }
12188             if (gidsetsize > 0) {
12189                 grouplist = g_try_new(gid_t, gidsetsize);
12190                 if (!grouplist) {
12191                     return -TARGET_ENOMEM;
12192                 }
12193             }
12194             ret = get_errno(getgroups(gidsetsize, grouplist));
12195             if (!is_error(ret) && gidsetsize > 0) {
12196                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12197                                              gidsetsize * 4, 0);
12198                 if (!target_grouplist) {
12199                     return -TARGET_EFAULT;
12200                 }
12201                 for (i = 0; i < ret; i++) {
12202                     target_grouplist[i] = tswap32(grouplist[i]);
12203                 }
12204                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12205             }
12206             return ret;
12207         }
12208 #endif
12209 #ifdef TARGET_NR_setgroups32
12210     case TARGET_NR_setgroups32:
12211         { /* the same code as for TARGET_NR_setgroups */
12212             int gidsetsize = arg1;
12213             uint32_t *target_grouplist;
12214             g_autofree gid_t *grouplist = NULL;
12215             int i;
12216 
12217             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12218                 return -TARGET_EINVAL;
12219             }
12220             if (gidsetsize > 0) {
12221                 grouplist = g_try_new(gid_t, gidsetsize);
12222                 if (!grouplist) {
12223                     return -TARGET_ENOMEM;
12224                 }
12225                 target_grouplist = lock_user(VERIFY_READ, arg2,
12226                                              gidsetsize * 4, 1);
12227                 if (!target_grouplist) {
12228                     return -TARGET_EFAULT;
12229                 }
12230                 for (i = 0; i < gidsetsize; i++) {
12231                     grouplist[i] = tswap32(target_grouplist[i]);
12232                 }
12233                 unlock_user(target_grouplist, arg2, 0);
12234             }
12235             return get_errno(sys_setgroups(gidsetsize, grouplist));
12236         }
12237 #endif
12238 #ifdef TARGET_NR_fchown32
12239     case TARGET_NR_fchown32:
12240         return get_errno(fchown(arg1, arg2, arg3));
12241 #endif
12242 #ifdef TARGET_NR_setresuid32
12243     case TARGET_NR_setresuid32:
12244         return get_errno(sys_setresuid(arg1, arg2, arg3));
12245 #endif
12246 #ifdef TARGET_NR_getresuid32
12247     case TARGET_NR_getresuid32:
12248         {
12249             uid_t ruid, euid, suid;
12250             ret = get_errno(getresuid(&ruid, &euid, &suid));
12251             if (!is_error(ret)) {
12252                 if (put_user_u32(ruid, arg1)
12253                     || put_user_u32(euid, arg2)
12254                     || put_user_u32(suid, arg3))
12255                     return -TARGET_EFAULT;
12256             }
12257         }
12258         return ret;
12259 #endif
12260 #ifdef TARGET_NR_setresgid32
12261     case TARGET_NR_setresgid32:
12262         return get_errno(sys_setresgid(arg1, arg2, arg3));
12263 #endif
12264 #ifdef TARGET_NR_getresgid32
12265     case TARGET_NR_getresgid32:
12266         {
12267             gid_t rgid, egid, sgid;
12268             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12269             if (!is_error(ret)) {
12270                 if (put_user_u32(rgid, arg1)
12271                     || put_user_u32(egid, arg2)
12272                     || put_user_u32(sgid, arg3))
12273                     return -TARGET_EFAULT;
12274             }
12275         }
12276         return ret;
12277 #endif
12278 #ifdef TARGET_NR_chown32
12279     case TARGET_NR_chown32:
12280         if (!(p = lock_user_string(arg1)))
12281             return -TARGET_EFAULT;
12282         ret = get_errno(chown(p, arg2, arg3));
12283         unlock_user(p, arg1, 0);
12284         return ret;
12285 #endif
12286 #ifdef TARGET_NR_setuid32
12287     case TARGET_NR_setuid32:
12288         return get_errno(sys_setuid(arg1));
12289 #endif
12290 #ifdef TARGET_NR_setgid32
12291     case TARGET_NR_setgid32:
12292         return get_errno(sys_setgid(arg1));
12293 #endif
12294 #ifdef TARGET_NR_setfsuid32
12295     case TARGET_NR_setfsuid32:
12296         return get_errno(setfsuid(arg1));
12297 #endif
12298 #ifdef TARGET_NR_setfsgid32
12299     case TARGET_NR_setfsgid32:
12300         return get_errno(setfsgid(arg1));
12301 #endif
12302 #ifdef TARGET_NR_mincore
12303     case TARGET_NR_mincore:
12304         {
12305             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12306             if (!a) {
12307                 return -TARGET_ENOMEM;
12308             }
12309             p = lock_user_string(arg3);
12310             if (!p) {
12311                 ret = -TARGET_EFAULT;
12312             } else {
12313                 ret = get_errno(mincore(a, arg2, p));
12314                 unlock_user(p, arg3, ret);
12315             }
12316             unlock_user(a, arg1, 0);
12317         }
12318         return ret;
12319 #endif
12320 #ifdef TARGET_NR_arm_fadvise64_64
12321     case TARGET_NR_arm_fadvise64_64:
12322         /* arm_fadvise64_64 looks like fadvise64_64 but
12323          * with different argument order: fd, advice, offset, len
12324          * rather than the usual fd, offset, len, advice.
12325          * Note that offset and len are both 64-bit so appear as
12326          * pairs of 32-bit registers.
12327          */
12328         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12329                             target_offset64(arg5, arg6), arg2);
12330         return -host_to_target_errno(ret);
12331 #endif
12332 
12333 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12334 
12335 #ifdef TARGET_NR_fadvise64_64
12336     case TARGET_NR_fadvise64_64:
12337 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12338         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12339         ret = arg2;
12340         arg2 = arg3;
12341         arg3 = arg4;
12342         arg4 = arg5;
12343         arg5 = arg6;
12344         arg6 = ret;
12345 #else
12346         /* 6 args: fd, offset (high, low), len (high, low), advice */
12347         if (regpairs_aligned(cpu_env, num)) {
12348             /* offset is in (3,4), len in (5,6) and advice in 7 */
12349             arg2 = arg3;
12350             arg3 = arg4;
12351             arg4 = arg5;
12352             arg5 = arg6;
12353             arg6 = arg7;
12354         }
12355 #endif
12356         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12357                             target_offset64(arg4, arg5), arg6);
12358         return -host_to_target_errno(ret);
12359 #endif
12360 
12361 #ifdef TARGET_NR_fadvise64
12362     case TARGET_NR_fadvise64:
12363         /* 5 args: fd, offset (high, low), len, advice */
12364         if (regpairs_aligned(cpu_env, num)) {
12365             /* offset is in (3,4), len in 5 and advice in 6 */
12366             arg2 = arg3;
12367             arg3 = arg4;
12368             arg4 = arg5;
12369             arg5 = arg6;
12370         }
12371         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12372         return -host_to_target_errno(ret);
12373 #endif
12374 
12375 #else /* not a 32-bit ABI */
12376 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12377 #ifdef TARGET_NR_fadvise64_64
12378     case TARGET_NR_fadvise64_64:
12379 #endif
12380 #ifdef TARGET_NR_fadvise64
12381     case TARGET_NR_fadvise64:
12382 #endif
12383 #ifdef TARGET_S390X
12384         switch (arg4) {
12385         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12386         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12387         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12388         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12389         default: break;
12390         }
12391 #endif
12392         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12393 #endif
12394 #endif /* end of 64-bit ABI fadvise handling */
12395 
12396 #ifdef TARGET_NR_madvise
12397     case TARGET_NR_madvise:
12398         return target_madvise(arg1, arg2, arg3);
12399 #endif
12400 #ifdef TARGET_NR_fcntl64
12401     case TARGET_NR_fcntl64:
12402     {
12403         int cmd;
12404         struct flock fl;
12405         from_flock64_fn *copyfrom = copy_from_user_flock64;
12406         to_flock64_fn *copyto = copy_to_user_flock64;
12407 
12408 #ifdef TARGET_ARM
12409         if (!cpu_env->eabi) {
12410             copyfrom = copy_from_user_oabi_flock64;
12411             copyto = copy_to_user_oabi_flock64;
12412         }
12413 #endif
12414 
12415         cmd = target_to_host_fcntl_cmd(arg2);
12416         if (cmd == -TARGET_EINVAL) {
12417             return cmd;
12418         }
12419 
12420         switch(arg2) {
12421         case TARGET_F_GETLK64:
12422             ret = copyfrom(&fl, arg3);
12423             if (ret) {
12424                 break;
12425             }
12426             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12427             if (ret == 0) {
12428                 ret = copyto(arg3, &fl);
12429             }
12430 	    break;
12431 
12432         case TARGET_F_SETLK64:
12433         case TARGET_F_SETLKW64:
12434             ret = copyfrom(&fl, arg3);
12435             if (ret) {
12436                 break;
12437             }
12438             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12439 	    break;
12440         default:
12441             ret = do_fcntl(arg1, arg2, arg3);
12442             break;
12443         }
12444         return ret;
12445     }
12446 #endif
12447 #ifdef TARGET_NR_cacheflush
12448     case TARGET_NR_cacheflush:
12449         /* self-modifying code is handled automatically, so nothing needed */
12450         return 0;
12451 #endif
12452 #ifdef TARGET_NR_getpagesize
12453     case TARGET_NR_getpagesize:
12454         return TARGET_PAGE_SIZE;
12455 #endif
12456     case TARGET_NR_gettid:
12457         return get_errno(sys_gettid());
12458 #ifdef TARGET_NR_readahead
12459     case TARGET_NR_readahead:
12460 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12461         if (regpairs_aligned(cpu_env, num)) {
12462             arg2 = arg3;
12463             arg3 = arg4;
12464             arg4 = arg5;
12465         }
12466         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12467 #else
12468         ret = get_errno(readahead(arg1, arg2, arg3));
12469 #endif
12470         return ret;
12471 #endif
12472 #ifdef CONFIG_ATTR
12473 #ifdef TARGET_NR_setxattr
12474     case TARGET_NR_listxattr:
12475     case TARGET_NR_llistxattr:
12476     {
12477         void *b = 0;
12478         if (arg2) {
12479             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12480             if (!b) {
12481                 return -TARGET_EFAULT;
12482             }
12483         }
12484         p = lock_user_string(arg1);
12485         if (p) {
12486             if (num == TARGET_NR_listxattr) {
12487                 ret = get_errno(listxattr(p, b, arg3));
12488             } else {
12489                 ret = get_errno(llistxattr(p, b, arg3));
12490             }
12491         } else {
12492             ret = -TARGET_EFAULT;
12493         }
12494         unlock_user(p, arg1, 0);
12495         unlock_user(b, arg2, arg3);
12496         return ret;
12497     }
12498     case TARGET_NR_flistxattr:
12499     {
12500         void *b = 0;
12501         if (arg2) {
12502             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12503             if (!b) {
12504                 return -TARGET_EFAULT;
12505             }
12506         }
12507         ret = get_errno(flistxattr(arg1, b, arg3));
12508         unlock_user(b, arg2, arg3);
12509         return ret;
12510     }
12511     case TARGET_NR_setxattr:
12512     case TARGET_NR_lsetxattr:
12513         {
12514             void *n, *v = 0;
12515             if (arg3) {
12516                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12517                 if (!v) {
12518                     return -TARGET_EFAULT;
12519                 }
12520             }
12521             p = lock_user_string(arg1);
12522             n = lock_user_string(arg2);
12523             if (p && n) {
12524                 if (num == TARGET_NR_setxattr) {
12525                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12526                 } else {
12527                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12528                 }
12529             } else {
12530                 ret = -TARGET_EFAULT;
12531             }
12532             unlock_user(p, arg1, 0);
12533             unlock_user(n, arg2, 0);
12534             unlock_user(v, arg3, 0);
12535         }
12536         return ret;
12537     case TARGET_NR_fsetxattr:
12538         {
12539             void *n, *v = 0;
12540             if (arg3) {
12541                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12542                 if (!v) {
12543                     return -TARGET_EFAULT;
12544                 }
12545             }
12546             n = lock_user_string(arg2);
12547             if (n) {
12548                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12549             } else {
12550                 ret = -TARGET_EFAULT;
12551             }
12552             unlock_user(n, arg2, 0);
12553             unlock_user(v, arg3, 0);
12554         }
12555         return ret;
12556     case TARGET_NR_getxattr:
12557     case TARGET_NR_lgetxattr:
12558         {
12559             void *n, *v = 0;
12560             if (arg3) {
12561                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12562                 if (!v) {
12563                     return -TARGET_EFAULT;
12564                 }
12565             }
12566             p = lock_user_string(arg1);
12567             n = lock_user_string(arg2);
12568             if (p && n) {
12569                 if (num == TARGET_NR_getxattr) {
12570                     ret = get_errno(getxattr(p, n, v, arg4));
12571                 } else {
12572                     ret = get_errno(lgetxattr(p, n, v, arg4));
12573                 }
12574             } else {
12575                 ret = -TARGET_EFAULT;
12576             }
12577             unlock_user(p, arg1, 0);
12578             unlock_user(n, arg2, 0);
12579             unlock_user(v, arg3, arg4);
12580         }
12581         return ret;
12582     case TARGET_NR_fgetxattr:
12583         {
12584             void *n, *v = 0;
12585             if (arg3) {
12586                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12587                 if (!v) {
12588                     return -TARGET_EFAULT;
12589                 }
12590             }
12591             n = lock_user_string(arg2);
12592             if (n) {
12593                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12594             } else {
12595                 ret = -TARGET_EFAULT;
12596             }
12597             unlock_user(n, arg2, 0);
12598             unlock_user(v, arg3, arg4);
12599         }
12600         return ret;
12601     case TARGET_NR_removexattr:
12602     case TARGET_NR_lremovexattr:
12603         {
12604             void *n;
12605             p = lock_user_string(arg1);
12606             n = lock_user_string(arg2);
12607             if (p && n) {
12608                 if (num == TARGET_NR_removexattr) {
12609                     ret = get_errno(removexattr(p, n));
12610                 } else {
12611                     ret = get_errno(lremovexattr(p, n));
12612                 }
12613             } else {
12614                 ret = -TARGET_EFAULT;
12615             }
12616             unlock_user(p, arg1, 0);
12617             unlock_user(n, arg2, 0);
12618         }
12619         return ret;
12620     case TARGET_NR_fremovexattr:
12621         {
12622             void *n;
12623             n = lock_user_string(arg2);
12624             if (n) {
12625                 ret = get_errno(fremovexattr(arg1, n));
12626             } else {
12627                 ret = -TARGET_EFAULT;
12628             }
12629             unlock_user(n, arg2, 0);
12630         }
12631         return ret;
12632 #endif
12633 #endif /* CONFIG_ATTR */
12634 #ifdef TARGET_NR_set_thread_area
12635     case TARGET_NR_set_thread_area:
12636 #if defined(TARGET_MIPS)
12637       cpu_env->active_tc.CP0_UserLocal = arg1;
12638       return 0;
12639 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12640       return do_set_thread_area(cpu_env, arg1);
12641 #elif defined(TARGET_M68K)
12642       {
12643           TaskState *ts = get_task_state(cpu);
12644           ts->tp_value = arg1;
12645           return 0;
12646       }
12647 #else
12648       return -TARGET_ENOSYS;
12649 #endif
12650 #endif
12651 #ifdef TARGET_NR_get_thread_area
12652     case TARGET_NR_get_thread_area:
12653 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12654         return do_get_thread_area(cpu_env, arg1);
12655 #elif defined(TARGET_M68K)
12656         {
12657             TaskState *ts = get_task_state(cpu);
12658             return ts->tp_value;
12659         }
12660 #else
12661         return -TARGET_ENOSYS;
12662 #endif
12663 #endif
12664 #ifdef TARGET_NR_getdomainname
12665     case TARGET_NR_getdomainname:
12666         return -TARGET_ENOSYS;
12667 #endif
12668 
12669 #ifdef TARGET_NR_clock_settime
12670     case TARGET_NR_clock_settime:
12671     {
12672         struct timespec ts;
12673 
12674         ret = target_to_host_timespec(&ts, arg2);
12675         if (!is_error(ret)) {
12676             ret = get_errno(clock_settime(arg1, &ts));
12677         }
12678         return ret;
12679     }
12680 #endif
12681 #ifdef TARGET_NR_clock_settime64
12682     case TARGET_NR_clock_settime64:
12683     {
12684         struct timespec ts;
12685 
12686         ret = target_to_host_timespec64(&ts, arg2);
12687         if (!is_error(ret)) {
12688             ret = get_errno(clock_settime(arg1, &ts));
12689         }
12690         return ret;
12691     }
12692 #endif
12693 #ifdef TARGET_NR_clock_gettime
12694     case TARGET_NR_clock_gettime:
12695     {
12696         struct timespec ts;
12697         ret = get_errno(clock_gettime(arg1, &ts));
12698         if (!is_error(ret)) {
12699             ret = host_to_target_timespec(arg2, &ts);
12700         }
12701         return ret;
12702     }
12703 #endif
12704 #ifdef TARGET_NR_clock_gettime64
12705     case TARGET_NR_clock_gettime64:
12706     {
12707         struct timespec ts;
12708         ret = get_errno(clock_gettime(arg1, &ts));
12709         if (!is_error(ret)) {
12710             ret = host_to_target_timespec64(arg2, &ts);
12711         }
12712         return ret;
12713     }
12714 #endif
12715 #ifdef TARGET_NR_clock_getres
12716     case TARGET_NR_clock_getres:
12717     {
12718         struct timespec ts;
12719         ret = get_errno(clock_getres(arg1, &ts));
12720         if (!is_error(ret)) {
12721             host_to_target_timespec(arg2, &ts);
12722         }
12723         return ret;
12724     }
12725 #endif
12726 #ifdef TARGET_NR_clock_getres_time64
12727     case TARGET_NR_clock_getres_time64:
12728     {
12729         struct timespec ts;
12730         ret = get_errno(clock_getres(arg1, &ts));
12731         if (!is_error(ret)) {
12732             host_to_target_timespec64(arg2, &ts);
12733         }
12734         return ret;
12735     }
12736 #endif
12737 #ifdef TARGET_NR_clock_nanosleep
12738     case TARGET_NR_clock_nanosleep:
12739     {
12740         struct timespec ts;
12741         if (target_to_host_timespec(&ts, arg3)) {
12742             return -TARGET_EFAULT;
12743         }
12744         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12745                                              &ts, arg4 ? &ts : NULL));
12746         /*
12747          * if the call is interrupted by a signal handler, it fails
12748          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12749          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12750          */
12751         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12752             host_to_target_timespec(arg4, &ts)) {
12753               return -TARGET_EFAULT;
12754         }
12755 
12756         return ret;
12757     }
12758 #endif
12759 #ifdef TARGET_NR_clock_nanosleep_time64
12760     case TARGET_NR_clock_nanosleep_time64:
12761     {
12762         struct timespec ts;
12763 
12764         if (target_to_host_timespec64(&ts, arg3)) {
12765             return -TARGET_EFAULT;
12766         }
12767 
12768         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12769                                              &ts, arg4 ? &ts : NULL));
12770 
12771         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12772             host_to_target_timespec64(arg4, &ts)) {
12773             return -TARGET_EFAULT;
12774         }
12775         return ret;
12776     }
12777 #endif
12778 
12779 #if defined(TARGET_NR_set_tid_address)
12780     case TARGET_NR_set_tid_address:
12781     {
12782         TaskState *ts = get_task_state(cpu);
12783         ts->child_tidptr = arg1;
12784         /* do not call host set_tid_address() syscall, instead return tid() */
12785         return get_errno(sys_gettid());
12786     }
12787 #endif
12788 
12789     case TARGET_NR_tkill:
12790         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12791 
12792     case TARGET_NR_tgkill:
12793         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12794                          target_to_host_signal(arg3)));
12795 
12796 #ifdef TARGET_NR_set_robust_list
12797     case TARGET_NR_set_robust_list:
12798     case TARGET_NR_get_robust_list:
12799         /* The ABI for supporting robust futexes has userspace pass
12800          * the kernel a pointer to a linked list which is updated by
12801          * userspace after the syscall; the list is walked by the kernel
12802          * when the thread exits. Since the linked list in QEMU guest
12803          * memory isn't a valid linked list for the host and we have
12804          * no way to reliably intercept the thread-death event, we can't
12805          * support these. Silently return ENOSYS so that guest userspace
12806          * falls back to a non-robust futex implementation (which should
12807          * be OK except in the corner case of the guest crashing while
12808          * holding a mutex that is shared with another process via
12809          * shared memory).
12810          */
12811         return -TARGET_ENOSYS;
12812 #endif
12813 
12814 #if defined(TARGET_NR_utimensat)
12815     case TARGET_NR_utimensat:
12816         {
12817             struct timespec *tsp, ts[2];
12818             if (!arg3) {
12819                 tsp = NULL;
12820             } else {
12821                 if (target_to_host_timespec(ts, arg3)) {
12822                     return -TARGET_EFAULT;
12823                 }
12824                 if (target_to_host_timespec(ts + 1, arg3 +
12825                                             sizeof(struct target_timespec))) {
12826                     return -TARGET_EFAULT;
12827                 }
12828                 tsp = ts;
12829             }
12830             if (!arg2)
12831                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12832             else {
12833                 if (!(p = lock_user_string(arg2))) {
12834                     return -TARGET_EFAULT;
12835                 }
12836                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12837                 unlock_user(p, arg2, 0);
12838             }
12839         }
12840         return ret;
12841 #endif
12842 #ifdef TARGET_NR_utimensat_time64
12843     case TARGET_NR_utimensat_time64:
12844         {
12845             struct timespec *tsp, ts[2];
12846             if (!arg3) {
12847                 tsp = NULL;
12848             } else {
12849                 if (target_to_host_timespec64(ts, arg3)) {
12850                     return -TARGET_EFAULT;
12851                 }
12852                 if (target_to_host_timespec64(ts + 1, arg3 +
12853                                      sizeof(struct target__kernel_timespec))) {
12854                     return -TARGET_EFAULT;
12855                 }
12856                 tsp = ts;
12857             }
12858             if (!arg2)
12859                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12860             else {
12861                 p = lock_user_string(arg2);
12862                 if (!p) {
12863                     return -TARGET_EFAULT;
12864                 }
12865                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12866                 unlock_user(p, arg2, 0);
12867             }
12868         }
12869         return ret;
12870 #endif
12871 #ifdef TARGET_NR_futex
12872     case TARGET_NR_futex:
12873         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12874 #endif
12875 #ifdef TARGET_NR_futex_time64
12876     case TARGET_NR_futex_time64:
12877         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12878 #endif
12879 #ifdef CONFIG_INOTIFY
12880 #if defined(TARGET_NR_inotify_init)
12881     case TARGET_NR_inotify_init:
12882         ret = get_errno(inotify_init());
12883         if (ret >= 0) {
12884             fd_trans_register(ret, &target_inotify_trans);
12885         }
12886         return ret;
12887 #endif
12888 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12889     case TARGET_NR_inotify_init1:
12890         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12891                                           fcntl_flags_tbl)));
12892         if (ret >= 0) {
12893             fd_trans_register(ret, &target_inotify_trans);
12894         }
12895         return ret;
12896 #endif
12897 #if defined(TARGET_NR_inotify_add_watch)
12898     case TARGET_NR_inotify_add_watch:
12899         p = lock_user_string(arg2);
12900         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12901         unlock_user(p, arg2, 0);
12902         return ret;
12903 #endif
12904 #if defined(TARGET_NR_inotify_rm_watch)
12905     case TARGET_NR_inotify_rm_watch:
12906         return get_errno(inotify_rm_watch(arg1, arg2));
12907 #endif
12908 #endif
12909 
12910 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12911     case TARGET_NR_mq_open:
12912         {
12913             struct mq_attr posix_mq_attr;
12914             struct mq_attr *pposix_mq_attr;
12915             int host_flags;
12916 
12917             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12918             pposix_mq_attr = NULL;
12919             if (arg4) {
12920                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12921                     return -TARGET_EFAULT;
12922                 }
12923                 pposix_mq_attr = &posix_mq_attr;
12924             }
12925             p = lock_user_string(arg1 - 1);
12926             if (!p) {
12927                 return -TARGET_EFAULT;
12928             }
12929             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12930             unlock_user (p, arg1, 0);
12931         }
12932         return ret;
12933 
12934     case TARGET_NR_mq_unlink:
12935         p = lock_user_string(arg1 - 1);
12936         if (!p) {
12937             return -TARGET_EFAULT;
12938         }
12939         ret = get_errno(mq_unlink(p));
12940         unlock_user (p, arg1, 0);
12941         return ret;
12942 
12943 #ifdef TARGET_NR_mq_timedsend
12944     case TARGET_NR_mq_timedsend:
12945         {
12946             struct timespec ts;
12947 
12948             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12949             if (arg5 != 0) {
12950                 if (target_to_host_timespec(&ts, arg5)) {
12951                     return -TARGET_EFAULT;
12952                 }
12953                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12954                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12955                     return -TARGET_EFAULT;
12956                 }
12957             } else {
12958                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12959             }
12960             unlock_user (p, arg2, arg3);
12961         }
12962         return ret;
12963 #endif
12964 #ifdef TARGET_NR_mq_timedsend_time64
12965     case TARGET_NR_mq_timedsend_time64:
12966         {
12967             struct timespec ts;
12968 
12969             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12970             if (arg5 != 0) {
12971                 if (target_to_host_timespec64(&ts, arg5)) {
12972                     return -TARGET_EFAULT;
12973                 }
12974                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12975                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12976                     return -TARGET_EFAULT;
12977                 }
12978             } else {
12979                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12980             }
12981             unlock_user(p, arg2, arg3);
12982         }
12983         return ret;
12984 #endif
12985 
12986 #ifdef TARGET_NR_mq_timedreceive
12987     case TARGET_NR_mq_timedreceive:
12988         {
12989             struct timespec ts;
12990             unsigned int prio;
12991 
12992             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12993             if (arg5 != 0) {
12994                 if (target_to_host_timespec(&ts, arg5)) {
12995                     return -TARGET_EFAULT;
12996                 }
12997                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12998                                                      &prio, &ts));
12999                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13000                     return -TARGET_EFAULT;
13001                 }
13002             } else {
13003                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13004                                                      &prio, NULL));
13005             }
13006             unlock_user (p, arg2, arg3);
13007             if (arg4 != 0)
13008                 put_user_u32(prio, arg4);
13009         }
13010         return ret;
13011 #endif
13012 #ifdef TARGET_NR_mq_timedreceive_time64
13013     case TARGET_NR_mq_timedreceive_time64:
13014         {
13015             struct timespec ts;
13016             unsigned int prio;
13017 
13018             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13019             if (arg5 != 0) {
13020                 if (target_to_host_timespec64(&ts, arg5)) {
13021                     return -TARGET_EFAULT;
13022                 }
13023                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13024                                                      &prio, &ts));
13025                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13026                     return -TARGET_EFAULT;
13027                 }
13028             } else {
13029                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13030                                                      &prio, NULL));
13031             }
13032             unlock_user(p, arg2, arg3);
13033             if (arg4 != 0) {
13034                 put_user_u32(prio, arg4);
13035             }
13036         }
13037         return ret;
13038 #endif
13039 
13040     /* Not implemented for now... */
13041 /*     case TARGET_NR_mq_notify: */
13042 /*         break; */
13043 
13044     case TARGET_NR_mq_getsetattr:
13045         {
13046             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13047             ret = 0;
13048             if (arg2 != 0) {
13049                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13050                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13051                                            &posix_mq_attr_out));
13052             } else if (arg3 != 0) {
13053                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13054             }
13055             if (ret == 0 && arg3 != 0) {
13056                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13057             }
13058         }
13059         return ret;
13060 #endif
13061 
13062 #ifdef CONFIG_SPLICE
13063 #ifdef TARGET_NR_tee
13064     case TARGET_NR_tee:
13065         {
13066             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13067         }
13068         return ret;
13069 #endif
13070 #ifdef TARGET_NR_splice
13071     case TARGET_NR_splice:
13072         {
13073             loff_t loff_in, loff_out;
13074             loff_t *ploff_in = NULL, *ploff_out = NULL;
13075             if (arg2) {
13076                 if (get_user_u64(loff_in, arg2)) {
13077                     return -TARGET_EFAULT;
13078                 }
13079                 ploff_in = &loff_in;
13080             }
13081             if (arg4) {
13082                 if (get_user_u64(loff_out, arg4)) {
13083                     return -TARGET_EFAULT;
13084                 }
13085                 ploff_out = &loff_out;
13086             }
13087             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13088             if (arg2) {
13089                 if (put_user_u64(loff_in, arg2)) {
13090                     return -TARGET_EFAULT;
13091                 }
13092             }
13093             if (arg4) {
13094                 if (put_user_u64(loff_out, arg4)) {
13095                     return -TARGET_EFAULT;
13096                 }
13097             }
13098         }
13099         return ret;
13100 #endif
13101 #ifdef TARGET_NR_vmsplice
13102 	case TARGET_NR_vmsplice:
13103         {
13104             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13105             if (vec != NULL) {
13106                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13107                 unlock_iovec(vec, arg2, arg3, 0);
13108             } else {
13109                 ret = -host_to_target_errno(errno);
13110             }
13111         }
13112         return ret;
13113 #endif
13114 #endif /* CONFIG_SPLICE */
13115 #ifdef CONFIG_EVENTFD
13116 #if defined(TARGET_NR_eventfd)
13117     case TARGET_NR_eventfd:
13118         ret = get_errno(eventfd(arg1, 0));
13119         if (ret >= 0) {
13120             fd_trans_register(ret, &target_eventfd_trans);
13121         }
13122         return ret;
13123 #endif
13124 #if defined(TARGET_NR_eventfd2)
13125     case TARGET_NR_eventfd2:
13126     {
13127         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13128         if (arg2 & TARGET_O_NONBLOCK) {
13129             host_flags |= O_NONBLOCK;
13130         }
13131         if (arg2 & TARGET_O_CLOEXEC) {
13132             host_flags |= O_CLOEXEC;
13133         }
13134         ret = get_errno(eventfd(arg1, host_flags));
13135         if (ret >= 0) {
13136             fd_trans_register(ret, &target_eventfd_trans);
13137         }
13138         return ret;
13139     }
13140 #endif
13141 #endif /* CONFIG_EVENTFD  */
13142 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13143     case TARGET_NR_fallocate:
13144 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13145         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13146                                   target_offset64(arg5, arg6)));
13147 #else
13148         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13149 #endif
13150         return ret;
13151 #endif
13152 #if defined(CONFIG_SYNC_FILE_RANGE)
13153 #if defined(TARGET_NR_sync_file_range)
13154     case TARGET_NR_sync_file_range:
13155 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13156 #if defined(TARGET_MIPS)
13157         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13158                                         target_offset64(arg5, arg6), arg7));
13159 #else
13160         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13161                                         target_offset64(arg4, arg5), arg6));
13162 #endif /* !TARGET_MIPS */
13163 #else
13164         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13165 #endif
13166         return ret;
13167 #endif
13168 #if defined(TARGET_NR_sync_file_range2) || \
13169     defined(TARGET_NR_arm_sync_file_range)
13170 #if defined(TARGET_NR_sync_file_range2)
13171     case TARGET_NR_sync_file_range2:
13172 #endif
13173 #if defined(TARGET_NR_arm_sync_file_range)
13174     case TARGET_NR_arm_sync_file_range:
13175 #endif
13176         /* This is like sync_file_range but the arguments are reordered */
13177 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13178         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13179                                         target_offset64(arg5, arg6), arg2));
13180 #else
13181         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13182 #endif
13183         return ret;
13184 #endif
13185 #endif
13186 #if defined(TARGET_NR_signalfd4)
13187     case TARGET_NR_signalfd4:
13188         return do_signalfd4(arg1, arg2, arg4);
13189 #endif
13190 #if defined(TARGET_NR_signalfd)
13191     case TARGET_NR_signalfd:
13192         return do_signalfd4(arg1, arg2, 0);
13193 #endif
13194 #if defined(CONFIG_EPOLL)
13195 #if defined(TARGET_NR_epoll_create)
13196     case TARGET_NR_epoll_create:
13197         return get_errno(epoll_create(arg1));
13198 #endif
13199 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13200     case TARGET_NR_epoll_create1:
13201         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13202 #endif
13203 #if defined(TARGET_NR_epoll_ctl)
13204     case TARGET_NR_epoll_ctl:
13205     {
13206         struct epoll_event ep;
13207         struct epoll_event *epp = 0;
13208         if (arg4) {
13209             if (arg2 != EPOLL_CTL_DEL) {
13210                 struct target_epoll_event *target_ep;
13211                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13212                     return -TARGET_EFAULT;
13213                 }
13214                 ep.events = tswap32(target_ep->events);
13215                 /*
13216                  * The epoll_data_t union is just opaque data to the kernel,
13217                  * so we transfer all 64 bits across and need not worry what
13218                  * actual data type it is.
13219                  */
13220                 ep.data.u64 = tswap64(target_ep->data.u64);
13221                 unlock_user_struct(target_ep, arg4, 0);
13222             }
13223             /*
13224              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13225              * non-null pointer, even though this argument is ignored.
13226              *
13227              */
13228             epp = &ep;
13229         }
13230         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13231     }
13232 #endif
13233 
13234 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13235 #if defined(TARGET_NR_epoll_wait)
13236     case TARGET_NR_epoll_wait:
13237 #endif
13238 #if defined(TARGET_NR_epoll_pwait)
13239     case TARGET_NR_epoll_pwait:
13240 #endif
13241     {
13242         struct target_epoll_event *target_ep;
13243         struct epoll_event *ep;
13244         int epfd = arg1;
13245         int maxevents = arg3;
13246         int timeout = arg4;
13247 
13248         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13249             return -TARGET_EINVAL;
13250         }
13251 
13252         target_ep = lock_user(VERIFY_WRITE, arg2,
13253                               maxevents * sizeof(struct target_epoll_event), 1);
13254         if (!target_ep) {
13255             return -TARGET_EFAULT;
13256         }
13257 
13258         ep = g_try_new(struct epoll_event, maxevents);
13259         if (!ep) {
13260             unlock_user(target_ep, arg2, 0);
13261             return -TARGET_ENOMEM;
13262         }
13263 
13264         switch (num) {
13265 #if defined(TARGET_NR_epoll_pwait)
13266         case TARGET_NR_epoll_pwait:
13267         {
13268             sigset_t *set = NULL;
13269 
13270             if (arg5) {
13271                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13272                 if (ret != 0) {
13273                     break;
13274                 }
13275             }
13276 
13277             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13278                                              set, SIGSET_T_SIZE));
13279 
13280             if (set) {
13281                 finish_sigsuspend_mask(ret);
13282             }
13283             break;
13284         }
13285 #endif
13286 #if defined(TARGET_NR_epoll_wait)
13287         case TARGET_NR_epoll_wait:
13288             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13289                                              NULL, 0));
13290             break;
13291 #endif
13292         default:
13293             ret = -TARGET_ENOSYS;
13294         }
13295         if (!is_error(ret)) {
13296             int i;
13297             for (i = 0; i < ret; i++) {
13298                 target_ep[i].events = tswap32(ep[i].events);
13299                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13300             }
13301             unlock_user(target_ep, arg2,
13302                         ret * sizeof(struct target_epoll_event));
13303         } else {
13304             unlock_user(target_ep, arg2, 0);
13305         }
13306         g_free(ep);
13307         return ret;
13308     }
13309 #endif
13310 #endif
13311 #ifdef TARGET_NR_prlimit64
13312     case TARGET_NR_prlimit64:
13313     {
13314         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13315         struct target_rlimit64 *target_rnew, *target_rold;
13316         struct host_rlimit64 rnew, rold, *rnewp = 0;
13317         int resource = target_to_host_resource(arg2);
13318 
13319         if (arg3 && (resource != RLIMIT_AS &&
13320                      resource != RLIMIT_DATA &&
13321                      resource != RLIMIT_STACK)) {
13322             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13323                 return -TARGET_EFAULT;
13324             }
13325             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13326             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13327             unlock_user_struct(target_rnew, arg3, 0);
13328             rnewp = &rnew;
13329         }
13330 
13331         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13332         if (!is_error(ret) && arg4) {
13333             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13334                 return -TARGET_EFAULT;
13335             }
13336             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13337             __put_user(rold.rlim_max, &target_rold->rlim_max);
13338             unlock_user_struct(target_rold, arg4, 1);
13339         }
13340         return ret;
13341     }
13342 #endif
13343 #ifdef TARGET_NR_gethostname
13344     case TARGET_NR_gethostname:
13345     {
13346         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13347         if (name) {
13348             ret = get_errno(gethostname(name, arg2));
13349             unlock_user(name, arg1, arg2);
13350         } else {
13351             ret = -TARGET_EFAULT;
13352         }
13353         return ret;
13354     }
13355 #endif
13356 #ifdef TARGET_NR_atomic_cmpxchg_32
13357     case TARGET_NR_atomic_cmpxchg_32:
13358     {
13359         /* should use start_exclusive from main.c */
13360         abi_ulong mem_value;
13361         if (get_user_u32(mem_value, arg6)) {
13362             target_siginfo_t info;
13363             info.si_signo = SIGSEGV;
13364             info.si_errno = 0;
13365             info.si_code = TARGET_SEGV_MAPERR;
13366             info._sifields._sigfault._addr = arg6;
13367             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13368             ret = 0xdeadbeef;
13369 
13370         }
13371         if (mem_value == arg2)
13372             put_user_u32(arg1, arg6);
13373         return mem_value;
13374     }
13375 #endif
13376 #ifdef TARGET_NR_atomic_barrier
13377     case TARGET_NR_atomic_barrier:
13378         /* Like the kernel implementation and the
13379            qemu arm barrier, no-op this? */
13380         return 0;
13381 #endif
13382 
13383 #ifdef TARGET_NR_timer_create
13384     case TARGET_NR_timer_create:
13385     {
13386         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13387 
13388         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13389 
13390         int clkid = arg1;
13391         int timer_index = next_free_host_timer();
13392 
13393         if (timer_index < 0) {
13394             ret = -TARGET_EAGAIN;
13395         } else {
13396             timer_t *phtimer = g_posix_timers  + timer_index;
13397 
13398             if (arg2) {
13399                 phost_sevp = &host_sevp;
13400                 ret = target_to_host_sigevent(phost_sevp, arg2);
13401                 if (ret != 0) {
13402                     free_host_timer_slot(timer_index);
13403                     return ret;
13404                 }
13405             }
13406 
13407             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13408             if (ret) {
13409                 free_host_timer_slot(timer_index);
13410             } else {
13411                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13412                     timer_delete(*phtimer);
13413                     free_host_timer_slot(timer_index);
13414                     return -TARGET_EFAULT;
13415                 }
13416             }
13417         }
13418         return ret;
13419     }
13420 #endif
13421 
13422 #ifdef TARGET_NR_timer_settime
13423     case TARGET_NR_timer_settime:
13424     {
13425         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13426          * struct itimerspec * old_value */
13427         target_timer_t timerid = get_timer_id(arg1);
13428 
13429         if (timerid < 0) {
13430             ret = timerid;
13431         } else if (arg3 == 0) {
13432             ret = -TARGET_EINVAL;
13433         } else {
13434             timer_t htimer = g_posix_timers[timerid];
13435             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13436 
13437             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13438                 return -TARGET_EFAULT;
13439             }
13440             ret = get_errno(
13441                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13442             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13443                 return -TARGET_EFAULT;
13444             }
13445         }
13446         return ret;
13447     }
13448 #endif
13449 
13450 #ifdef TARGET_NR_timer_settime64
13451     case TARGET_NR_timer_settime64:
13452     {
13453         target_timer_t timerid = get_timer_id(arg1);
13454 
13455         if (timerid < 0) {
13456             ret = timerid;
13457         } else if (arg3 == 0) {
13458             ret = -TARGET_EINVAL;
13459         } else {
13460             timer_t htimer = g_posix_timers[timerid];
13461             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13462 
13463             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13464                 return -TARGET_EFAULT;
13465             }
13466             ret = get_errno(
13467                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13468             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13469                 return -TARGET_EFAULT;
13470             }
13471         }
13472         return ret;
13473     }
13474 #endif
13475 
13476 #ifdef TARGET_NR_timer_gettime
13477     case TARGET_NR_timer_gettime:
13478     {
13479         /* args: timer_t timerid, struct itimerspec *curr_value */
13480         target_timer_t timerid = get_timer_id(arg1);
13481 
13482         if (timerid < 0) {
13483             ret = timerid;
13484         } else if (!arg2) {
13485             ret = -TARGET_EFAULT;
13486         } else {
13487             timer_t htimer = g_posix_timers[timerid];
13488             struct itimerspec hspec;
13489             ret = get_errno(timer_gettime(htimer, &hspec));
13490 
13491             if (host_to_target_itimerspec(arg2, &hspec)) {
13492                 ret = -TARGET_EFAULT;
13493             }
13494         }
13495         return ret;
13496     }
13497 #endif
13498 
13499 #ifdef TARGET_NR_timer_gettime64
13500     case TARGET_NR_timer_gettime64:
13501     {
13502         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13503         target_timer_t timerid = get_timer_id(arg1);
13504 
13505         if (timerid < 0) {
13506             ret = timerid;
13507         } else if (!arg2) {
13508             ret = -TARGET_EFAULT;
13509         } else {
13510             timer_t htimer = g_posix_timers[timerid];
13511             struct itimerspec hspec;
13512             ret = get_errno(timer_gettime(htimer, &hspec));
13513 
13514             if (host_to_target_itimerspec64(arg2, &hspec)) {
13515                 ret = -TARGET_EFAULT;
13516             }
13517         }
13518         return ret;
13519     }
13520 #endif
13521 
13522 #ifdef TARGET_NR_timer_getoverrun
13523     case TARGET_NR_timer_getoverrun:
13524     {
13525         /* args: timer_t timerid */
13526         target_timer_t timerid = get_timer_id(arg1);
13527 
13528         if (timerid < 0) {
13529             ret = timerid;
13530         } else {
13531             timer_t htimer = g_posix_timers[timerid];
13532             ret = get_errno(timer_getoverrun(htimer));
13533         }
13534         return ret;
13535     }
13536 #endif
13537 
13538 #ifdef TARGET_NR_timer_delete
13539     case TARGET_NR_timer_delete:
13540     {
13541         /* args: timer_t timerid */
13542         target_timer_t timerid = get_timer_id(arg1);
13543 
13544         if (timerid < 0) {
13545             ret = timerid;
13546         } else {
13547             timer_t htimer = g_posix_timers[timerid];
13548             ret = get_errno(timer_delete(htimer));
13549             free_host_timer_slot(timerid);
13550         }
13551         return ret;
13552     }
13553 #endif
13554 
13555 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13556     case TARGET_NR_timerfd_create:
13557         ret = get_errno(timerfd_create(arg1,
13558                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13559         if (ret >= 0) {
13560             fd_trans_register(ret, &target_timerfd_trans);
13561         }
13562         return ret;
13563 #endif
13564 
13565 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13566     case TARGET_NR_timerfd_gettime:
13567         {
13568             struct itimerspec its_curr;
13569 
13570             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13571 
13572             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13573                 return -TARGET_EFAULT;
13574             }
13575         }
13576         return ret;
13577 #endif
13578 
13579 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13580     case TARGET_NR_timerfd_gettime64:
13581         {
13582             struct itimerspec its_curr;
13583 
13584             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13585 
13586             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13587                 return -TARGET_EFAULT;
13588             }
13589         }
13590         return ret;
13591 #endif
13592 
13593 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13594     case TARGET_NR_timerfd_settime:
13595         {
13596             struct itimerspec its_new, its_old, *p_new;
13597 
13598             if (arg3) {
13599                 if (target_to_host_itimerspec(&its_new, arg3)) {
13600                     return -TARGET_EFAULT;
13601                 }
13602                 p_new = &its_new;
13603             } else {
13604                 p_new = NULL;
13605             }
13606 
13607             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13608 
13609             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13610                 return -TARGET_EFAULT;
13611             }
13612         }
13613         return ret;
13614 #endif
13615 
13616 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13617     case TARGET_NR_timerfd_settime64:
13618         {
13619             struct itimerspec its_new, its_old, *p_new;
13620 
13621             if (arg3) {
13622                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13623                     return -TARGET_EFAULT;
13624                 }
13625                 p_new = &its_new;
13626             } else {
13627                 p_new = NULL;
13628             }
13629 
13630             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13631 
13632             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13633                 return -TARGET_EFAULT;
13634             }
13635         }
13636         return ret;
13637 #endif
13638 
13639 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13640     case TARGET_NR_ioprio_get:
13641         return get_errno(ioprio_get(arg1, arg2));
13642 #endif
13643 
13644 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13645     case TARGET_NR_ioprio_set:
13646         return get_errno(ioprio_set(arg1, arg2, arg3));
13647 #endif
13648 
13649 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13650     case TARGET_NR_setns:
13651         return get_errno(setns(arg1, arg2));
13652 #endif
13653 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13654     case TARGET_NR_unshare:
13655         return get_errno(unshare(arg1));
13656 #endif
13657 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13658     case TARGET_NR_kcmp:
13659         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13660 #endif
13661 #ifdef TARGET_NR_swapcontext
13662     case TARGET_NR_swapcontext:
13663         /* PowerPC specific.  */
13664         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13665 #endif
13666 #ifdef TARGET_NR_memfd_create
13667     case TARGET_NR_memfd_create:
13668         p = lock_user_string(arg1);
13669         if (!p) {
13670             return -TARGET_EFAULT;
13671         }
13672         ret = get_errno(memfd_create(p, arg2));
13673         fd_trans_unregister(ret);
13674         unlock_user(p, arg1, 0);
13675         return ret;
13676 #endif
13677 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13678     case TARGET_NR_membarrier:
13679         return get_errno(membarrier(arg1, arg2));
13680 #endif
13681 
13682 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13683     case TARGET_NR_copy_file_range:
13684         {
13685             loff_t inoff, outoff;
13686             loff_t *pinoff = NULL, *poutoff = NULL;
13687 
13688             if (arg2) {
13689                 if (get_user_u64(inoff, arg2)) {
13690                     return -TARGET_EFAULT;
13691                 }
13692                 pinoff = &inoff;
13693             }
13694             if (arg4) {
13695                 if (get_user_u64(outoff, arg4)) {
13696                     return -TARGET_EFAULT;
13697                 }
13698                 poutoff = &outoff;
13699             }
13700             /* Do not sign-extend the count parameter. */
13701             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13702                                                  (abi_ulong)arg5, arg6));
13703             if (!is_error(ret) && ret > 0) {
13704                 if (arg2) {
13705                     if (put_user_u64(inoff, arg2)) {
13706                         return -TARGET_EFAULT;
13707                     }
13708                 }
13709                 if (arg4) {
13710                     if (put_user_u64(outoff, arg4)) {
13711                         return -TARGET_EFAULT;
13712                     }
13713                 }
13714             }
13715         }
13716         return ret;
13717 #endif
13718 
13719 #if defined(TARGET_NR_pivot_root)
13720     case TARGET_NR_pivot_root:
13721         {
13722             void *p2;
13723             p = lock_user_string(arg1); /* new_root */
13724             p2 = lock_user_string(arg2); /* put_old */
13725             if (!p || !p2) {
13726                 ret = -TARGET_EFAULT;
13727             } else {
13728                 ret = get_errno(pivot_root(p, p2));
13729             }
13730             unlock_user(p2, arg2, 0);
13731             unlock_user(p, arg1, 0);
13732         }
13733         return ret;
13734 #endif
13735 
13736 #if defined(TARGET_NR_riscv_hwprobe)
13737     case TARGET_NR_riscv_hwprobe:
13738         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13739 #endif
13740 
13741     default:
13742         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13743         return -TARGET_ENOSYS;
13744     }
13745     return ret;
13746 }
13747 
do_syscall(CPUArchState * cpu_env,int num,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,abi_long arg7,abi_long arg8)13748 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13749                     abi_long arg2, abi_long arg3, abi_long arg4,
13750                     abi_long arg5, abi_long arg6, abi_long arg7,
13751                     abi_long arg8)
13752 {
13753     CPUState *cpu = env_cpu(cpu_env);
13754     abi_long ret;
13755 
13756 #ifdef DEBUG_ERESTARTSYS
13757     /* Debug-only code for exercising the syscall-restart code paths
13758      * in the per-architecture cpu main loops: restart every syscall
13759      * the guest makes once before letting it through.
13760      */
13761     {
13762         static bool flag;
13763         flag = !flag;
13764         if (flag) {
13765             return -QEMU_ERESTARTSYS;
13766         }
13767     }
13768 #endif
13769 
13770     record_syscall_start(cpu, num, arg1,
13771                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13772 
13773     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13774         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13775     }
13776 
13777     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13778                       arg5, arg6, arg7, arg8);
13779 
13780     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13781         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13782                           arg3, arg4, arg5, arg6);
13783     }
13784 
13785     record_syscall_return(cpu, num, ret);
13786     return ret;
13787 }
13788