xref: /qemu/linux-user/syscall.c (revision 310df7a9)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include <elf.h>
30 #include <endian.h>
31 #include <grp.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/mount.h>
36 #include <sys/file.h>
37 #include <sys/fsuid.h>
38 #include <sys/personality.h>
39 #include <sys/prctl.h>
40 #include <sys/resource.h>
41 #include <sys/swap.h>
42 #include <linux/capability.h>
43 #include <sched.h>
44 #include <sys/timex.h>
45 #include <sys/socket.h>
46 #include <linux/sockios.h>
47 #include <sys/un.h>
48 #include <sys/uio.h>
49 #include <poll.h>
50 #include <sys/times.h>
51 #include <sys/shm.h>
52 #include <sys/sem.h>
53 #include <sys/statfs.h>
54 #include <utime.h>
55 #include <sys/sysinfo.h>
56 #include <sys/signalfd.h>
57 #include <netinet/in.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 #include <netinet/udp.h>
61 #include <linux/wireless.h>
62 #include <linux/icmp.h>
63 #include <linux/icmpv6.h>
64 #include <linux/if_tun.h>
65 #include <linux/in6.h>
66 #include <linux/errqueue.h>
67 #include <linux/random.h>
68 #ifdef CONFIG_TIMERFD
69 #include <sys/timerfd.h>
70 #endif
71 #ifdef CONFIG_EVENTFD
72 #include <sys/eventfd.h>
73 #endif
74 #ifdef CONFIG_EPOLL
75 #include <sys/epoll.h>
76 #endif
77 #ifdef CONFIG_ATTR
78 #include "qemu/xattr.h"
79 #endif
80 #ifdef CONFIG_SENDFILE
81 #include <sys/sendfile.h>
82 #endif
83 #ifdef HAVE_SYS_KCOV_H
84 #include <sys/kcov.h>
85 #endif
86 
87 #define termios host_termios
88 #define winsize host_winsize
89 #define termio host_termio
90 #define sgttyb host_sgttyb /* same as target */
91 #define tchars host_tchars /* same as target */
92 #define ltchars host_ltchars /* same as target */
93 
94 #include <linux/termios.h>
95 #include <linux/unistd.h>
96 #include <linux/cdrom.h>
97 #include <linux/hdreg.h>
98 #include <linux/soundcard.h>
99 #include <linux/kd.h>
100 #include <linux/mtio.h>
101 #include <linux/fs.h>
102 #include <linux/fd.h>
103 #if defined(CONFIG_FIEMAP)
104 #include <linux/fiemap.h>
105 #endif
106 #include <linux/fb.h>
107 #if defined(CONFIG_USBFS)
108 #include <linux/usbdevice_fs.h>
109 #include <linux/usb/ch9.h>
110 #endif
111 #include <linux/vt.h>
112 #include <linux/dm-ioctl.h>
113 #include <linux/reboot.h>
114 #include <linux/route.h>
115 #include <linux/filter.h>
116 #include <linux/blkpg.h>
117 #include <netpacket/packet.h>
118 #include <linux/netlink.h>
119 #include <linux/if_alg.h>
120 #include <linux/rtc.h>
121 #include <sound/asound.h>
122 #ifdef HAVE_BTRFS_H
123 #include <linux/btrfs.h>
124 #endif
125 #ifdef HAVE_DRM_H
126 #include <libdrm/drm.h>
127 #include <libdrm/i915_drm.h>
128 #endif
129 #include "linux_loop.h"
130 #include "uname.h"
131 
132 #include "qemu.h"
133 #include "user-internals.h"
134 #include "strace.h"
135 #include "signal-common.h"
136 #include "loader.h"
137 #include "user-mmap.h"
138 #include "user/safe-syscall.h"
139 #include "qemu/guest-random.h"
140 #include "qemu/selfmap.h"
141 #include "user/syscall-trace.h"
142 #include "special-errno.h"
143 #include "qapi/error.h"
144 #include "fd-trans.h"
145 #include "cpu_loop-common.h"
146 
147 #ifndef CLONE_IO
148 #define CLONE_IO                0x80000000      /* Clone io context */
149 #endif
150 
151 /* We can't directly call the host clone syscall, because this will
152  * badly confuse libc (breaking mutexes, for example). So we must
153  * divide clone flags into:
154  *  * flag combinations that look like pthread_create()
155  *  * flag combinations that look like fork()
156  *  * flags we can implement within QEMU itself
157  *  * flags we can't support and will return an error for
158  */
159 /* For thread creation, all these flags must be present; for
160  * fork, none must be present.
161  */
162 #define CLONE_THREAD_FLAGS                              \
163     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
164      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
165 
166 /* These flags are ignored:
167  * CLONE_DETACHED is now ignored by the kernel;
168  * CLONE_IO is just an optimisation hint to the I/O scheduler
169  */
170 #define CLONE_IGNORED_FLAGS                     \
171     (CLONE_DETACHED | CLONE_IO)
172 
173 #ifndef CLONE_PIDFD
174 # define CLONE_PIDFD 0x00001000
175 #endif
176 
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS               \
179     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
181 
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
184     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
185      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
186 
187 #define CLONE_INVALID_FORK_FLAGS                                        \
188     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
189 
190 #define CLONE_INVALID_THREAD_FLAGS                                      \
191     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
192        CLONE_IGNORED_FLAGS))
193 
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195  * have almost all been allocated. We cannot support any of
196  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198  * The checks against the invalid thread masks above will catch these.
199  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
200  */
201 
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203  * once. This exercises the codepaths for restart.
204  */
205 //#define DEBUG_ERESTARTSYS
206 
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
212 
213 #undef _syscall0
214 #undef _syscall1
215 #undef _syscall2
216 #undef _syscall3
217 #undef _syscall4
218 #undef _syscall5
219 #undef _syscall6
220 
221 #define _syscall0(type,name)		\
222 static type name (void)			\
223 {					\
224 	return syscall(__NR_##name);	\
225 }
226 
227 #define _syscall1(type,name,type1,arg1)		\
228 static type name (type1 arg1)			\
229 {						\
230 	return syscall(__NR_##name, arg1);	\
231 }
232 
233 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
234 static type name (type1 arg1,type2 arg2)		\
235 {							\
236 	return syscall(__NR_##name, arg1, arg2);	\
237 }
238 
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3)		\
241 {								\
242 	return syscall(__NR_##name, arg1, arg2, arg3);		\
243 }
244 
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
249 }
250 
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
252 		  type5,arg5)							\
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
254 {										\
255 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
256 }
257 
258 
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
260 		  type5,arg5,type6,arg6)					\
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
262                   type6 arg6)							\
263 {										\
264 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
265 }
266 
267 
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
278 #endif
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
281 #endif
282 #define __NR_sys_statx __NR_statx
283 
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
286 #endif
287 
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
291 #endif
292 
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296 #endif
297 
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid)
300 
301 /* For the 64-bit guest on 32-bit host case we must emulate
302  * getdents using getdents64, because otherwise the host
303  * might hand us back more dirent records than we can fit
304  * into the guest buffer after structure format conversion.
305  * Otherwise we emulate getdents with getdents if the host has it.
306  */
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
309 #endif
310 
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
313 #endif
314 #if (defined(TARGET_NR_getdents) && \
315       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
318 #endif
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
321           loff_t *, res, unsigned int, wh);
322 #endif
323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325           siginfo_t *, uinfo)
326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group,int,error_code)
329 #endif
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
335 #endif
336 #endif
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339           const struct timespec *,timeout,int *,uaddr2,int,val3)
340 #endif
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343           const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347 #endif
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350                              unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354 #endif
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357           unsigned long *, user_mask_ptr);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360           unsigned long *, user_mask_ptr);
361 /* sched_attr is not defined in glibc */
362 struct sched_attr {
363     uint32_t size;
364     uint32_t sched_policy;
365     uint64_t sched_flags;
366     int32_t sched_nice;
367     uint32_t sched_priority;
368     uint64_t sched_runtime;
369     uint64_t sched_deadline;
370     uint64_t sched_period;
371     uint32_t sched_util_min;
372     uint32_t sched_util_max;
373 };
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
376           unsigned int, size, unsigned int, flags);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
379           unsigned int, flags);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
384           const struct sched_param *, param);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam, pid_t, pid,
387           struct sched_param *, param);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam, pid_t, pid,
390           const struct sched_param *, param);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
393 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
394           void *, arg);
395 _syscall2(int, capget, struct __user_cap_header_struct *, header,
396           struct __user_cap_data_struct *, data);
397 _syscall2(int, capset, struct __user_cap_header_struct *, header,
398           struct __user_cap_data_struct *, data);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get, int, which, int, who)
401 #endif
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
404 #endif
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
407 #endif
408 
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
411           unsigned long, idx1, unsigned long, idx2)
412 #endif
413 
414 /*
415  * It is assumed that struct statx is architecture independent.
416  */
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
419           unsigned int, mask, struct target_statx *, statxbuf)
420 #endif
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier, int, cmd, int, flags)
423 #endif
424 
425 static const bitmask_transtbl fcntl_flags_tbl[] = {
426   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
427   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
428   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
429   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
430   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
431   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
432   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
433   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
434   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
435   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
436   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
437   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
438   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
439 #if defined(O_DIRECT)
440   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
441 #endif
442 #if defined(O_NOATIME)
443   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
444 #endif
445 #if defined(O_CLOEXEC)
446   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
447 #endif
448 #if defined(O_PATH)
449   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
450 #endif
451 #if defined(O_TMPFILE)
452   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
453 #endif
454   /* Don't terminate the list prematurely on 64-bit host+guest.  */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
457 #endif
458 };
459 
460 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
461 
462 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
463 #if defined(__NR_utimensat)
464 #define __NR_sys_utimensat __NR_utimensat
465 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
466           const struct timespec *,tsp,int,flags)
467 #else
468 static int sys_utimensat(int dirfd, const char *pathname,
469                          const struct timespec times[2], int flags)
470 {
471     errno = ENOSYS;
472     return -1;
473 }
474 #endif
475 #endif /* TARGET_NR_utimensat */
476 
477 #ifdef TARGET_NR_renameat2
478 #if defined(__NR_renameat2)
479 #define __NR_sys_renameat2 __NR_renameat2
480 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
481           const char *, new, unsigned int, flags)
482 #else
483 static int sys_renameat2(int oldfd, const char *old,
484                          int newfd, const char *new, int flags)
485 {
486     if (flags == 0) {
487         return renameat(oldfd, old, newfd, new);
488     }
489     errno = ENOSYS;
490     return -1;
491 }
492 #endif
493 #endif /* TARGET_NR_renameat2 */
494 
495 #ifdef CONFIG_INOTIFY
496 #include <sys/inotify.h>
497 #else
498 /* Userspace can usually survive runtime without inotify */
499 #undef TARGET_NR_inotify_init
500 #undef TARGET_NR_inotify_init1
501 #undef TARGET_NR_inotify_add_watch
502 #undef TARGET_NR_inotify_rm_watch
503 #endif /* CONFIG_INOTIFY  */
504 
505 #if defined(TARGET_NR_prlimit64)
506 #ifndef __NR_prlimit64
507 # define __NR_prlimit64 -1
508 #endif
509 #define __NR_sys_prlimit64 __NR_prlimit64
510 /* The glibc rlimit structure may not be that used by the underlying syscall */
511 struct host_rlimit64 {
512     uint64_t rlim_cur;
513     uint64_t rlim_max;
514 };
515 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
516           const struct host_rlimit64 *, new_limit,
517           struct host_rlimit64 *, old_limit)
518 #endif
519 
520 
521 #if defined(TARGET_NR_timer_create)
522 /* Maximum of 32 active POSIX timers allowed at any one time. */
523 #define GUEST_TIMER_MAX 32
524 static timer_t g_posix_timers[GUEST_TIMER_MAX];
525 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
526 
next_free_host_timer(void)527 static inline int next_free_host_timer(void)
528 {
529     int k;
530     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
531         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
532             return k;
533         }
534     }
535     return -1;
536 }
537 
free_host_timer_slot(int id)538 static inline void free_host_timer_slot(int id)
539 {
540     qatomic_store_release(g_posix_timer_allocated + id, 0);
541 }
542 #endif
543 
host_to_target_errno(int host_errno)544 static inline int host_to_target_errno(int host_errno)
545 {
546     switch (host_errno) {
547 #define E(X)  case X: return TARGET_##X;
548 #include "errnos.c.inc"
549 #undef E
550     default:
551         return host_errno;
552     }
553 }
554 
target_to_host_errno(int target_errno)555 static inline int target_to_host_errno(int target_errno)
556 {
557     switch (target_errno) {
558 #define E(X)  case TARGET_##X: return X;
559 #include "errnos.c.inc"
560 #undef E
561     default:
562         return target_errno;
563     }
564 }
565 
get_errno(abi_long ret)566 abi_long get_errno(abi_long ret)
567 {
568     if (ret == -1)
569         return -host_to_target_errno(errno);
570     else
571         return ret;
572 }
573 
target_strerror(int err)574 const char *target_strerror(int err)
575 {
576     if (err == QEMU_ERESTARTSYS) {
577         return "To be restarted";
578     }
579     if (err == QEMU_ESIGRETURN) {
580         return "Successful exit from sigreturn";
581     }
582 
583     return strerror(target_to_host_errno(err));
584 }
585 
check_zeroed_user(abi_long addr,size_t ksize,size_t usize)586 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
587 {
588     int i;
589     uint8_t b;
590     if (usize <= ksize) {
591         return 1;
592     }
593     for (i = ksize; i < usize; i++) {
594         if (get_user_u8(b, addr + i)) {
595             return -TARGET_EFAULT;
596         }
597         if (b != 0) {
598             return 0;
599         }
600     }
601     return 1;
602 }
603 
604 /*
605  * Copies a target struct to a host struct, in a way that guarantees
606  * backwards-compatibility for struct syscall arguments.
607  *
608  * Similar to kernels uaccess.h:copy_struct_from_user()
609  */
copy_struct_from_user(void * dst,size_t ksize,abi_ptr src,size_t usize)610 int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
611 {
612     size_t size = MIN(ksize, usize);
613     size_t rest = MAX(ksize, usize) - size;
614 
615     /* Deal with trailing bytes. */
616     if (usize < ksize) {
617         memset(dst + size, 0, rest);
618     } else if (usize > ksize) {
619         int ret = check_zeroed_user(src, ksize, usize);
620         if (ret <= 0) {
621             return ret ?: -TARGET_E2BIG;
622         }
623     }
624     /* Copy the interoperable parts of the struct. */
625     if (copy_from_user(dst, src, size)) {
626         return -TARGET_EFAULT;
627     }
628     return 0;
629 }
630 
631 #define safe_syscall0(type, name) \
632 static type safe_##name(void) \
633 { \
634     return safe_syscall(__NR_##name); \
635 }
636 
637 #define safe_syscall1(type, name, type1, arg1) \
638 static type safe_##name(type1 arg1) \
639 { \
640     return safe_syscall(__NR_##name, arg1); \
641 }
642 
643 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
644 static type safe_##name(type1 arg1, type2 arg2) \
645 { \
646     return safe_syscall(__NR_##name, arg1, arg2); \
647 }
648 
649 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
650 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
651 { \
652     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
653 }
654 
655 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
656     type4, arg4) \
657 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
658 { \
659     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
660 }
661 
662 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
663     type4, arg4, type5, arg5) \
664 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
665     type5 arg5) \
666 { \
667     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
668 }
669 
670 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
671     type4, arg4, type5, arg5, type6, arg6) \
672 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
673     type5 arg5, type6 arg6) \
674 { \
675     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
676 }
677 
safe_syscall3(ssize_t,read,int,fd,void *,buff,size_t,count)678 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
679 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
680 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
681               int, flags, mode_t, mode)
682 
683 safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
684               const struct open_how_ver0 *, how, size_t, size)
685 
686 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
687 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
688               struct rusage *, rusage)
689 #endif
690 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
691               int, options, struct rusage *, rusage)
692 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
693 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
694               char **, argv, char **, envp, int, flags)
695 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
696     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
697 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
698               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
699 #endif
700 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
701 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
702               struct timespec *, tsp, const sigset_t *, sigmask,
703               size_t, sigsetsize)
704 #endif
705 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
706               int, maxevents, int, timeout, const sigset_t *, sigmask,
707               size_t, sigsetsize)
708 #if defined(__NR_futex)
709 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
710               const struct timespec *,timeout,int *,uaddr2,int,val3)
711 #endif
712 #if defined(__NR_futex_time64)
713 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
714               const struct timespec *,timeout,int *,uaddr2,int,val3)
715 #endif
716 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
717 safe_syscall2(int, kill, pid_t, pid, int, sig)
718 safe_syscall2(int, tkill, int, tid, int, sig)
719 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
720 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
721 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
722 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
723               unsigned long, pos_l, unsigned long, pos_h)
724 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
725               unsigned long, pos_l, unsigned long, pos_h)
726 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
727               socklen_t, addrlen)
728 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
729               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
730 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
731               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
732 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
733 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
734 safe_syscall2(int, flock, int, fd, int, operation)
735 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
736 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
737               const struct timespec *, uts, size_t, sigsetsize)
738 #endif
739 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
740               int, flags)
741 #if defined(TARGET_NR_nanosleep)
742 safe_syscall2(int, nanosleep, const struct timespec *, req,
743               struct timespec *, rem)
744 #endif
745 #if defined(TARGET_NR_clock_nanosleep) || \
746     defined(TARGET_NR_clock_nanosleep_time64)
747 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
748               const struct timespec *, req, struct timespec *, rem)
749 #endif
750 #ifdef __NR_ipc
751 #ifdef __s390x__
752 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
753               void *, ptr)
754 #else
755 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
756               void *, ptr, long, fifth)
757 #endif
758 #endif
759 #ifdef __NR_msgsnd
760 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
761               int, flags)
762 #endif
763 #ifdef __NR_msgrcv
764 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
765               long, msgtype, int, flags)
766 #endif
767 #ifdef __NR_semtimedop
768 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
769               unsigned, nsops, const struct timespec *, timeout)
770 #endif
771 #if defined(TARGET_NR_mq_timedsend) || \
772     defined(TARGET_NR_mq_timedsend_time64)
773 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
774               size_t, len, unsigned, prio, const struct timespec *, timeout)
775 #endif
776 #if defined(TARGET_NR_mq_timedreceive) || \
777     defined(TARGET_NR_mq_timedreceive_time64)
778 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
779               size_t, len, unsigned *, prio, const struct timespec *, timeout)
780 #endif
781 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
782 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
783               int, outfd, loff_t *, poutoff, size_t, length,
784               unsigned int, flags)
785 #endif
786 
787 /* We do ioctl like this rather than via safe_syscall3 to preserve the
788  * "third argument might be integer or pointer or not present" behaviour of
789  * the libc function.
790  */
791 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
792 /* Similarly for fcntl. Since we always build with LFS enabled,
793  * we should be using the 64-bit structures automatically.
794  */
795 #ifdef __NR_fcntl64
796 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
797 #else
798 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
799 #endif
800 
801 static inline int host_to_target_sock_type(int host_type)
802 {
803     int target_type;
804 
805     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
806     case SOCK_DGRAM:
807         target_type = TARGET_SOCK_DGRAM;
808         break;
809     case SOCK_STREAM:
810         target_type = TARGET_SOCK_STREAM;
811         break;
812     default:
813         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
814         break;
815     }
816 
817 #if defined(SOCK_CLOEXEC)
818     if (host_type & SOCK_CLOEXEC) {
819         target_type |= TARGET_SOCK_CLOEXEC;
820     }
821 #endif
822 
823 #if defined(SOCK_NONBLOCK)
824     if (host_type & SOCK_NONBLOCK) {
825         target_type |= TARGET_SOCK_NONBLOCK;
826     }
827 #endif
828 
829     return target_type;
830 }
831 
832 static abi_ulong target_brk, initial_target_brk;
833 
target_set_brk(abi_ulong new_brk)834 void target_set_brk(abi_ulong new_brk)
835 {
836     target_brk = TARGET_PAGE_ALIGN(new_brk);
837     initial_target_brk = target_brk;
838 }
839 
840 /* do_brk() must return target values and target errnos. */
do_brk(abi_ulong brk_val)841 abi_long do_brk(abi_ulong brk_val)
842 {
843     abi_long mapped_addr;
844     abi_ulong new_brk;
845     abi_ulong old_brk;
846 
847     /* brk pointers are always untagged */
848 
849     /* do not allow to shrink below initial brk value */
850     if (brk_val < initial_target_brk) {
851         return target_brk;
852     }
853 
854     new_brk = TARGET_PAGE_ALIGN(brk_val);
855     old_brk = TARGET_PAGE_ALIGN(target_brk);
856 
857     /* new and old target_brk might be on the same page */
858     if (new_brk == old_brk) {
859         target_brk = brk_val;
860         return target_brk;
861     }
862 
863     /* Release heap if necessary */
864     if (new_brk < old_brk) {
865         target_munmap(new_brk, old_brk - new_brk);
866 
867         target_brk = brk_val;
868         return target_brk;
869     }
870 
871     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
872                               PROT_READ | PROT_WRITE,
873                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
874                               -1, 0);
875 
876     if (mapped_addr == old_brk) {
877         target_brk = brk_val;
878         return target_brk;
879     }
880 
881 #if defined(TARGET_ALPHA)
882     /* We (partially) emulate OSF/1 on Alpha, which requires we
883        return a proper errno, not an unchanged brk value.  */
884     return -TARGET_ENOMEM;
885 #endif
886     /* For everything else, return the previous break. */
887     return target_brk;
888 }
889 
890 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
891     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
copy_from_user_fdset(fd_set * fds,abi_ulong target_fds_addr,int n)892 static inline abi_long copy_from_user_fdset(fd_set *fds,
893                                             abi_ulong target_fds_addr,
894                                             int n)
895 {
896     int i, nw, j, k;
897     abi_ulong b, *target_fds;
898 
899     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
900     if (!(target_fds = lock_user(VERIFY_READ,
901                                  target_fds_addr,
902                                  sizeof(abi_ulong) * nw,
903                                  1)))
904         return -TARGET_EFAULT;
905 
906     FD_ZERO(fds);
907     k = 0;
908     for (i = 0; i < nw; i++) {
909         /* grab the abi_ulong */
910         __get_user(b, &target_fds[i]);
911         for (j = 0; j < TARGET_ABI_BITS; j++) {
912             /* check the bit inside the abi_ulong */
913             if ((b >> j) & 1)
914                 FD_SET(k, fds);
915             k++;
916         }
917     }
918 
919     unlock_user(target_fds, target_fds_addr, 0);
920 
921     return 0;
922 }
923 
copy_from_user_fdset_ptr(fd_set * fds,fd_set ** fds_ptr,abi_ulong target_fds_addr,int n)924 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
925                                                  abi_ulong target_fds_addr,
926                                                  int n)
927 {
928     if (target_fds_addr) {
929         if (copy_from_user_fdset(fds, target_fds_addr, n))
930             return -TARGET_EFAULT;
931         *fds_ptr = fds;
932     } else {
933         *fds_ptr = NULL;
934     }
935     return 0;
936 }
937 
copy_to_user_fdset(abi_ulong target_fds_addr,const fd_set * fds,int n)938 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
939                                           const fd_set *fds,
940                                           int n)
941 {
942     int i, nw, j, k;
943     abi_long v;
944     abi_ulong *target_fds;
945 
946     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
947     if (!(target_fds = lock_user(VERIFY_WRITE,
948                                  target_fds_addr,
949                                  sizeof(abi_ulong) * nw,
950                                  0)))
951         return -TARGET_EFAULT;
952 
953     k = 0;
954     for (i = 0; i < nw; i++) {
955         v = 0;
956         for (j = 0; j < TARGET_ABI_BITS; j++) {
957             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
958             k++;
959         }
960         __put_user(v, &target_fds[i]);
961     }
962 
963     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
964 
965     return 0;
966 }
967 #endif
968 
969 #if defined(__alpha__)
970 #define HOST_HZ 1024
971 #else
972 #define HOST_HZ 100
973 #endif
974 
host_to_target_clock_t(long ticks)975 static inline abi_long host_to_target_clock_t(long ticks)
976 {
977 #if HOST_HZ == TARGET_HZ
978     return ticks;
979 #else
980     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
981 #endif
982 }
983 
host_to_target_rusage(abi_ulong target_addr,const struct rusage * rusage)984 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
985                                              const struct rusage *rusage)
986 {
987     struct target_rusage *target_rusage;
988 
989     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
990         return -TARGET_EFAULT;
991     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
992     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
993     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
994     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
995     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
996     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
997     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
998     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
999     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1000     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1001     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1002     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1003     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1004     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1005     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1006     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1007     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1008     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1009     unlock_user_struct(target_rusage, target_addr, 1);
1010 
1011     return 0;
1012 }
1013 
1014 #ifdef TARGET_NR_setrlimit
target_to_host_rlim(abi_ulong target_rlim)1015 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1016 {
1017     abi_ulong target_rlim_swap;
1018     rlim_t result;
1019 
1020     target_rlim_swap = tswapal(target_rlim);
1021     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1022         return RLIM_INFINITY;
1023 
1024     result = target_rlim_swap;
1025     if (target_rlim_swap != (rlim_t)result)
1026         return RLIM_INFINITY;
1027 
1028     return result;
1029 }
1030 #endif
1031 
1032 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
host_to_target_rlim(rlim_t rlim)1033 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1034 {
1035     abi_ulong target_rlim_swap;
1036     abi_ulong result;
1037 
1038     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1039         target_rlim_swap = TARGET_RLIM_INFINITY;
1040     else
1041         target_rlim_swap = rlim;
1042     result = tswapal(target_rlim_swap);
1043 
1044     return result;
1045 }
1046 #endif
1047 
target_to_host_resource(int code)1048 static inline int target_to_host_resource(int code)
1049 {
1050     switch (code) {
1051     case TARGET_RLIMIT_AS:
1052         return RLIMIT_AS;
1053     case TARGET_RLIMIT_CORE:
1054         return RLIMIT_CORE;
1055     case TARGET_RLIMIT_CPU:
1056         return RLIMIT_CPU;
1057     case TARGET_RLIMIT_DATA:
1058         return RLIMIT_DATA;
1059     case TARGET_RLIMIT_FSIZE:
1060         return RLIMIT_FSIZE;
1061     case TARGET_RLIMIT_LOCKS:
1062         return RLIMIT_LOCKS;
1063     case TARGET_RLIMIT_MEMLOCK:
1064         return RLIMIT_MEMLOCK;
1065     case TARGET_RLIMIT_MSGQUEUE:
1066         return RLIMIT_MSGQUEUE;
1067     case TARGET_RLIMIT_NICE:
1068         return RLIMIT_NICE;
1069     case TARGET_RLIMIT_NOFILE:
1070         return RLIMIT_NOFILE;
1071     case TARGET_RLIMIT_NPROC:
1072         return RLIMIT_NPROC;
1073     case TARGET_RLIMIT_RSS:
1074         return RLIMIT_RSS;
1075     case TARGET_RLIMIT_RTPRIO:
1076         return RLIMIT_RTPRIO;
1077 #ifdef RLIMIT_RTTIME
1078     case TARGET_RLIMIT_RTTIME:
1079         return RLIMIT_RTTIME;
1080 #endif
1081     case TARGET_RLIMIT_SIGPENDING:
1082         return RLIMIT_SIGPENDING;
1083     case TARGET_RLIMIT_STACK:
1084         return RLIMIT_STACK;
1085     default:
1086         return code;
1087     }
1088 }
1089 
copy_from_user_timeval(struct timeval * tv,abi_ulong target_tv_addr)1090 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1091                                               abi_ulong target_tv_addr)
1092 {
1093     struct target_timeval *target_tv;
1094 
1095     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1096         return -TARGET_EFAULT;
1097     }
1098 
1099     __get_user(tv->tv_sec, &target_tv->tv_sec);
1100     __get_user(tv->tv_usec, &target_tv->tv_usec);
1101 
1102     unlock_user_struct(target_tv, target_tv_addr, 0);
1103 
1104     return 0;
1105 }
1106 
copy_to_user_timeval(abi_ulong target_tv_addr,const struct timeval * tv)1107 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1108                                             const struct timeval *tv)
1109 {
1110     struct target_timeval *target_tv;
1111 
1112     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1113         return -TARGET_EFAULT;
1114     }
1115 
1116     __put_user(tv->tv_sec, &target_tv->tv_sec);
1117     __put_user(tv->tv_usec, &target_tv->tv_usec);
1118 
1119     unlock_user_struct(target_tv, target_tv_addr, 1);
1120 
1121     return 0;
1122 }
1123 
1124 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
copy_from_user_timeval64(struct timeval * tv,abi_ulong target_tv_addr)1125 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1126                                                 abi_ulong target_tv_addr)
1127 {
1128     struct target__kernel_sock_timeval *target_tv;
1129 
1130     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1131         return -TARGET_EFAULT;
1132     }
1133 
1134     __get_user(tv->tv_sec, &target_tv->tv_sec);
1135     __get_user(tv->tv_usec, &target_tv->tv_usec);
1136 
1137     unlock_user_struct(target_tv, target_tv_addr, 0);
1138 
1139     return 0;
1140 }
1141 #endif
1142 
copy_to_user_timeval64(abi_ulong target_tv_addr,const struct timeval * tv)1143 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1144                                               const struct timeval *tv)
1145 {
1146     struct target__kernel_sock_timeval *target_tv;
1147 
1148     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1149         return -TARGET_EFAULT;
1150     }
1151 
1152     __put_user(tv->tv_sec, &target_tv->tv_sec);
1153     __put_user(tv->tv_usec, &target_tv->tv_usec);
1154 
1155     unlock_user_struct(target_tv, target_tv_addr, 1);
1156 
1157     return 0;
1158 }
1159 
1160 #if defined(TARGET_NR_futex) || \
1161     defined(TARGET_NR_rt_sigtimedwait) || \
1162     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1163     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1164     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1165     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1166     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1167     defined(TARGET_NR_timer_settime) || \
1168     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_timespec(struct timespec * host_ts,abi_ulong target_addr)1169 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1170                                                abi_ulong target_addr)
1171 {
1172     struct target_timespec *target_ts;
1173 
1174     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1175         return -TARGET_EFAULT;
1176     }
1177     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1178     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1179     unlock_user_struct(target_ts, target_addr, 0);
1180     return 0;
1181 }
1182 #endif
1183 
1184 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1185     defined(TARGET_NR_timer_settime64) || \
1186     defined(TARGET_NR_mq_timedsend_time64) || \
1187     defined(TARGET_NR_mq_timedreceive_time64) || \
1188     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1189     defined(TARGET_NR_clock_nanosleep_time64) || \
1190     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1191     defined(TARGET_NR_utimensat) || \
1192     defined(TARGET_NR_utimensat_time64) || \
1193     defined(TARGET_NR_semtimedop_time64) || \
1194     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
target_to_host_timespec64(struct timespec * host_ts,abi_ulong target_addr)1195 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1196                                                  abi_ulong target_addr)
1197 {
1198     struct target__kernel_timespec *target_ts;
1199 
1200     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1201         return -TARGET_EFAULT;
1202     }
1203     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1204     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1205     /* in 32bit mode, this drops the padding */
1206     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1207     unlock_user_struct(target_ts, target_addr, 0);
1208     return 0;
1209 }
1210 #endif
1211 
host_to_target_timespec(abi_ulong target_addr,struct timespec * host_ts)1212 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1213                                                struct timespec *host_ts)
1214 {
1215     struct target_timespec *target_ts;
1216 
1217     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1218         return -TARGET_EFAULT;
1219     }
1220     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1221     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1222     unlock_user_struct(target_ts, target_addr, 1);
1223     return 0;
1224 }
1225 
host_to_target_timespec64(abi_ulong target_addr,struct timespec * host_ts)1226 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1227                                                  struct timespec *host_ts)
1228 {
1229     struct target__kernel_timespec *target_ts;
1230 
1231     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1232         return -TARGET_EFAULT;
1233     }
1234     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1235     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1236     unlock_user_struct(target_ts, target_addr, 1);
1237     return 0;
1238 }
1239 
1240 #if defined(TARGET_NR_gettimeofday)
copy_to_user_timezone(abi_ulong target_tz_addr,struct timezone * tz)1241 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1242                                              struct timezone *tz)
1243 {
1244     struct target_timezone *target_tz;
1245 
1246     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1247         return -TARGET_EFAULT;
1248     }
1249 
1250     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1251     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1252 
1253     unlock_user_struct(target_tz, target_tz_addr, 1);
1254 
1255     return 0;
1256 }
1257 #endif
1258 
1259 #if defined(TARGET_NR_settimeofday)
copy_from_user_timezone(struct timezone * tz,abi_ulong target_tz_addr)1260 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1261                                                abi_ulong target_tz_addr)
1262 {
1263     struct target_timezone *target_tz;
1264 
1265     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1266         return -TARGET_EFAULT;
1267     }
1268 
1269     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1270     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1271 
1272     unlock_user_struct(target_tz, target_tz_addr, 0);
1273 
1274     return 0;
1275 }
1276 #endif
1277 
1278 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1279 #include <mqueue.h>
1280 
copy_from_user_mq_attr(struct mq_attr * attr,abi_ulong target_mq_attr_addr)1281 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1282                                               abi_ulong target_mq_attr_addr)
1283 {
1284     struct target_mq_attr *target_mq_attr;
1285 
1286     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1287                           target_mq_attr_addr, 1))
1288         return -TARGET_EFAULT;
1289 
1290     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1291     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1292     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1293     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1294 
1295     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1296 
1297     return 0;
1298 }
1299 
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,const struct mq_attr * attr)1300 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1301                                             const struct mq_attr *attr)
1302 {
1303     struct target_mq_attr *target_mq_attr;
1304 
1305     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1306                           target_mq_attr_addr, 0))
1307         return -TARGET_EFAULT;
1308 
1309     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1310     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1311     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1312     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1313 
1314     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1315 
1316     return 0;
1317 }
1318 #endif
1319 
1320 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1321 /* do_select() must return target values and target errnos. */
do_select(int n,abi_ulong rfd_addr,abi_ulong wfd_addr,abi_ulong efd_addr,abi_ulong target_tv_addr)1322 static abi_long do_select(int n,
1323                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1324                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1325 {
1326     fd_set rfds, wfds, efds;
1327     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1328     struct timeval tv;
1329     struct timespec ts, *ts_ptr;
1330     abi_long ret;
1331 
1332     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1333     if (ret) {
1334         return ret;
1335     }
1336     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1337     if (ret) {
1338         return ret;
1339     }
1340     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1341     if (ret) {
1342         return ret;
1343     }
1344 
1345     if (target_tv_addr) {
1346         if (copy_from_user_timeval(&tv, target_tv_addr))
1347             return -TARGET_EFAULT;
1348         ts.tv_sec = tv.tv_sec;
1349         ts.tv_nsec = tv.tv_usec * 1000;
1350         ts_ptr = &ts;
1351     } else {
1352         ts_ptr = NULL;
1353     }
1354 
1355     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1356                                   ts_ptr, NULL));
1357 
1358     if (!is_error(ret)) {
1359         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1360             return -TARGET_EFAULT;
1361         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1362             return -TARGET_EFAULT;
1363         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1364             return -TARGET_EFAULT;
1365 
1366         if (target_tv_addr) {
1367             tv.tv_sec = ts.tv_sec;
1368             tv.tv_usec = ts.tv_nsec / 1000;
1369             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1370                 return -TARGET_EFAULT;
1371             }
1372         }
1373     }
1374 
1375     return ret;
1376 }
1377 
1378 #if defined(TARGET_WANT_OLD_SYS_SELECT)
do_old_select(abi_ulong arg1)1379 static abi_long do_old_select(abi_ulong arg1)
1380 {
1381     struct target_sel_arg_struct *sel;
1382     abi_ulong inp, outp, exp, tvp;
1383     long nsel;
1384 
1385     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1386         return -TARGET_EFAULT;
1387     }
1388 
1389     nsel = tswapal(sel->n);
1390     inp = tswapal(sel->inp);
1391     outp = tswapal(sel->outp);
1392     exp = tswapal(sel->exp);
1393     tvp = tswapal(sel->tvp);
1394 
1395     unlock_user_struct(sel, arg1, 0);
1396 
1397     return do_select(nsel, inp, outp, exp, tvp);
1398 }
1399 #endif
1400 #endif
1401 
1402 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
do_pselect6(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,bool time64)1403 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1404                             abi_long arg4, abi_long arg5, abi_long arg6,
1405                             bool time64)
1406 {
1407     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1408     fd_set rfds, wfds, efds;
1409     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1410     struct timespec ts, *ts_ptr;
1411     abi_long ret;
1412 
1413     /*
1414      * The 6th arg is actually two args smashed together,
1415      * so we cannot use the C library.
1416      */
1417     struct {
1418         sigset_t *set;
1419         size_t size;
1420     } sig, *sig_ptr;
1421 
1422     abi_ulong arg_sigset, arg_sigsize, *arg7;
1423 
1424     n = arg1;
1425     rfd_addr = arg2;
1426     wfd_addr = arg3;
1427     efd_addr = arg4;
1428     ts_addr = arg5;
1429 
1430     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1431     if (ret) {
1432         return ret;
1433     }
1434     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1435     if (ret) {
1436         return ret;
1437     }
1438     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1439     if (ret) {
1440         return ret;
1441     }
1442 
1443     /*
1444      * This takes a timespec, and not a timeval, so we cannot
1445      * use the do_select() helper ...
1446      */
1447     if (ts_addr) {
1448         if (time64) {
1449             if (target_to_host_timespec64(&ts, ts_addr)) {
1450                 return -TARGET_EFAULT;
1451             }
1452         } else {
1453             if (target_to_host_timespec(&ts, ts_addr)) {
1454                 return -TARGET_EFAULT;
1455             }
1456         }
1457             ts_ptr = &ts;
1458     } else {
1459         ts_ptr = NULL;
1460     }
1461 
1462     /* Extract the two packed args for the sigset */
1463     sig_ptr = NULL;
1464     if (arg6) {
1465         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1466         if (!arg7) {
1467             return -TARGET_EFAULT;
1468         }
1469         arg_sigset = tswapal(arg7[0]);
1470         arg_sigsize = tswapal(arg7[1]);
1471         unlock_user(arg7, arg6, 0);
1472 
1473         if (arg_sigset) {
1474             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1475             if (ret != 0) {
1476                 return ret;
1477             }
1478             sig_ptr = &sig;
1479             sig.size = SIGSET_T_SIZE;
1480         }
1481     }
1482 
1483     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1484                                   ts_ptr, sig_ptr));
1485 
1486     if (sig_ptr) {
1487         finish_sigsuspend_mask(ret);
1488     }
1489 
1490     if (!is_error(ret)) {
1491         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1492             return -TARGET_EFAULT;
1493         }
1494         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1495             return -TARGET_EFAULT;
1496         }
1497         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1498             return -TARGET_EFAULT;
1499         }
1500         if (time64) {
1501             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1502                 return -TARGET_EFAULT;
1503             }
1504         } else {
1505             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1506                 return -TARGET_EFAULT;
1507             }
1508         }
1509     }
1510     return ret;
1511 }
1512 #endif
1513 
1514 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1515     defined(TARGET_NR_ppoll_time64)
do_ppoll(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,bool ppoll,bool time64)1516 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1517                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1518 {
1519     struct target_pollfd *target_pfd;
1520     unsigned int nfds = arg2;
1521     struct pollfd *pfd;
1522     unsigned int i;
1523     abi_long ret;
1524 
1525     pfd = NULL;
1526     target_pfd = NULL;
1527     if (nfds) {
1528         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1529             return -TARGET_EINVAL;
1530         }
1531         target_pfd = lock_user(VERIFY_WRITE, arg1,
1532                                sizeof(struct target_pollfd) * nfds, 1);
1533         if (!target_pfd) {
1534             return -TARGET_EFAULT;
1535         }
1536 
1537         pfd = alloca(sizeof(struct pollfd) * nfds);
1538         for (i = 0; i < nfds; i++) {
1539             pfd[i].fd = tswap32(target_pfd[i].fd);
1540             pfd[i].events = tswap16(target_pfd[i].events);
1541         }
1542     }
1543     if (ppoll) {
1544         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1545         sigset_t *set = NULL;
1546 
1547         if (arg3) {
1548             if (time64) {
1549                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1550                     unlock_user(target_pfd, arg1, 0);
1551                     return -TARGET_EFAULT;
1552                 }
1553             } else {
1554                 if (target_to_host_timespec(timeout_ts, arg3)) {
1555                     unlock_user(target_pfd, arg1, 0);
1556                     return -TARGET_EFAULT;
1557                 }
1558             }
1559         } else {
1560             timeout_ts = NULL;
1561         }
1562 
1563         if (arg4) {
1564             ret = process_sigsuspend_mask(&set, arg4, arg5);
1565             if (ret != 0) {
1566                 unlock_user(target_pfd, arg1, 0);
1567                 return ret;
1568             }
1569         }
1570 
1571         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1572                                    set, SIGSET_T_SIZE));
1573 
1574         if (set) {
1575             finish_sigsuspend_mask(ret);
1576         }
1577         if (!is_error(ret) && arg3) {
1578             if (time64) {
1579                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1580                     return -TARGET_EFAULT;
1581                 }
1582             } else {
1583                 if (host_to_target_timespec(arg3, timeout_ts)) {
1584                     return -TARGET_EFAULT;
1585                 }
1586             }
1587         }
1588     } else {
1589           struct timespec ts, *pts;
1590 
1591           if (arg3 >= 0) {
1592               /* Convert ms to secs, ns */
1593               ts.tv_sec = arg3 / 1000;
1594               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1595               pts = &ts;
1596           } else {
1597               /* -ve poll() timeout means "infinite" */
1598               pts = NULL;
1599           }
1600           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1601     }
1602 
1603     if (!is_error(ret)) {
1604         for (i = 0; i < nfds; i++) {
1605             target_pfd[i].revents = tswap16(pfd[i].revents);
1606         }
1607     }
1608     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1609     return ret;
1610 }
1611 #endif
1612 
do_pipe(CPUArchState * cpu_env,abi_ulong pipedes,int flags,int is_pipe2)1613 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1614                         int flags, int is_pipe2)
1615 {
1616     int host_pipe[2];
1617     abi_long ret;
1618     ret = pipe2(host_pipe, flags);
1619 
1620     if (is_error(ret))
1621         return get_errno(ret);
1622 
1623     /* Several targets have special calling conventions for the original
1624        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1625     if (!is_pipe2) {
1626 #if defined(TARGET_ALPHA)
1627         cpu_env->ir[IR_A4] = host_pipe[1];
1628         return host_pipe[0];
1629 #elif defined(TARGET_MIPS)
1630         cpu_env->active_tc.gpr[3] = host_pipe[1];
1631         return host_pipe[0];
1632 #elif defined(TARGET_SH4)
1633         cpu_env->gregs[1] = host_pipe[1];
1634         return host_pipe[0];
1635 #elif defined(TARGET_SPARC)
1636         cpu_env->regwptr[1] = host_pipe[1];
1637         return host_pipe[0];
1638 #endif
1639     }
1640 
1641     if (put_user_s32(host_pipe[0], pipedes)
1642         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1643         return -TARGET_EFAULT;
1644     return get_errno(ret);
1645 }
1646 
target_to_host_sockaddr(int fd,struct sockaddr * addr,abi_ulong target_addr,socklen_t len)1647 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1648                                                abi_ulong target_addr,
1649                                                socklen_t len)
1650 {
1651     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1652     sa_family_t sa_family;
1653     struct target_sockaddr *target_saddr;
1654 
1655     if (fd_trans_target_to_host_addr(fd)) {
1656         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1657     }
1658 
1659     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1660     if (!target_saddr)
1661         return -TARGET_EFAULT;
1662 
1663     sa_family = tswap16(target_saddr->sa_family);
1664 
1665     /* Oops. The caller might send a incomplete sun_path; sun_path
1666      * must be terminated by \0 (see the manual page), but
1667      * unfortunately it is quite common to specify sockaddr_un
1668      * length as "strlen(x->sun_path)" while it should be
1669      * "strlen(...) + 1". We'll fix that here if needed.
1670      * Linux kernel has a similar feature.
1671      */
1672 
1673     if (sa_family == AF_UNIX) {
1674         if (len < unix_maxlen && len > 0) {
1675             char *cp = (char*)target_saddr;
1676 
1677             if ( cp[len-1] && !cp[len] )
1678                 len++;
1679         }
1680         if (len > unix_maxlen)
1681             len = unix_maxlen;
1682     }
1683 
1684     memcpy(addr, target_saddr, len);
1685     addr->sa_family = sa_family;
1686     if (sa_family == AF_NETLINK) {
1687         struct sockaddr_nl *nladdr;
1688 
1689         nladdr = (struct sockaddr_nl *)addr;
1690         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1691         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1692     } else if (sa_family == AF_PACKET) {
1693 	struct target_sockaddr_ll *lladdr;
1694 
1695 	lladdr = (struct target_sockaddr_ll *)addr;
1696 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1697 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1698     } else if (sa_family == AF_INET6) {
1699         struct sockaddr_in6 *in6addr;
1700 
1701         in6addr = (struct sockaddr_in6 *)addr;
1702         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1703     }
1704     unlock_user(target_saddr, target_addr, 0);
1705 
1706     return 0;
1707 }
1708 
host_to_target_sockaddr(abi_ulong target_addr,struct sockaddr * addr,socklen_t len)1709 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1710                                                struct sockaddr *addr,
1711                                                socklen_t len)
1712 {
1713     struct target_sockaddr *target_saddr;
1714 
1715     if (len == 0) {
1716         return 0;
1717     }
1718     assert(addr);
1719 
1720     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1721     if (!target_saddr)
1722         return -TARGET_EFAULT;
1723     memcpy(target_saddr, addr, len);
1724     if (len >= offsetof(struct target_sockaddr, sa_family) +
1725         sizeof(target_saddr->sa_family)) {
1726         target_saddr->sa_family = tswap16(addr->sa_family);
1727     }
1728     if (addr->sa_family == AF_NETLINK &&
1729         len >= sizeof(struct target_sockaddr_nl)) {
1730         struct target_sockaddr_nl *target_nl =
1731                (struct target_sockaddr_nl *)target_saddr;
1732         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1733         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1734     } else if (addr->sa_family == AF_PACKET) {
1735         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1736         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1737         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1738     } else if (addr->sa_family == AF_INET6 &&
1739                len >= sizeof(struct target_sockaddr_in6)) {
1740         struct target_sockaddr_in6 *target_in6 =
1741                (struct target_sockaddr_in6 *)target_saddr;
1742         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1743     }
1744     unlock_user(target_saddr, target_addr, len);
1745 
1746     return 0;
1747 }
1748 
target_to_host_cmsg(struct msghdr * msgh,struct target_msghdr * target_msgh)1749 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1750                                            struct target_msghdr *target_msgh)
1751 {
1752     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1753     abi_long msg_controllen;
1754     abi_ulong target_cmsg_addr;
1755     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1756     socklen_t space = 0;
1757 
1758     msg_controllen = tswapal(target_msgh->msg_controllen);
1759     if (msg_controllen < sizeof (struct target_cmsghdr))
1760         goto the_end;
1761     target_cmsg_addr = tswapal(target_msgh->msg_control);
1762     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1763     target_cmsg_start = target_cmsg;
1764     if (!target_cmsg)
1765         return -TARGET_EFAULT;
1766 
1767     while (cmsg && target_cmsg) {
1768         void *data = CMSG_DATA(cmsg);
1769         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1770 
1771         int len = tswapal(target_cmsg->cmsg_len)
1772             - sizeof(struct target_cmsghdr);
1773 
1774         space += CMSG_SPACE(len);
1775         if (space > msgh->msg_controllen) {
1776             space -= CMSG_SPACE(len);
1777             /* This is a QEMU bug, since we allocated the payload
1778              * area ourselves (unlike overflow in host-to-target
1779              * conversion, which is just the guest giving us a buffer
1780              * that's too small). It can't happen for the payload types
1781              * we currently support; if it becomes an issue in future
1782              * we would need to improve our allocation strategy to
1783              * something more intelligent than "twice the size of the
1784              * target buffer we're reading from".
1785              */
1786             qemu_log_mask(LOG_UNIMP,
1787                           ("Unsupported ancillary data %d/%d: "
1788                            "unhandled msg size\n"),
1789                           tswap32(target_cmsg->cmsg_level),
1790                           tswap32(target_cmsg->cmsg_type));
1791             break;
1792         }
1793 
1794         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1795             cmsg->cmsg_level = SOL_SOCKET;
1796         } else {
1797             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1798         }
1799         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1800         cmsg->cmsg_len = CMSG_LEN(len);
1801 
1802         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1803             int *fd = (int *)data;
1804             int *target_fd = (int *)target_data;
1805             int i, numfds = len / sizeof(int);
1806 
1807             for (i = 0; i < numfds; i++) {
1808                 __get_user(fd[i], target_fd + i);
1809             }
1810         } else if (cmsg->cmsg_level == SOL_SOCKET
1811                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1812             struct ucred *cred = (struct ucred *)data;
1813             struct target_ucred *target_cred =
1814                 (struct target_ucred *)target_data;
1815 
1816             __get_user(cred->pid, &target_cred->pid);
1817             __get_user(cred->uid, &target_cred->uid);
1818             __get_user(cred->gid, &target_cred->gid);
1819         } else if (cmsg->cmsg_level == SOL_ALG) {
1820             uint32_t *dst = (uint32_t *)data;
1821 
1822             memcpy(dst, target_data, len);
1823             /* fix endianness of first 32-bit word */
1824             if (len >= sizeof(uint32_t)) {
1825                 *dst = tswap32(*dst);
1826             }
1827         } else {
1828             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1829                           cmsg->cmsg_level, cmsg->cmsg_type);
1830             memcpy(data, target_data, len);
1831         }
1832 
1833         cmsg = CMSG_NXTHDR(msgh, cmsg);
1834         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1835                                          target_cmsg_start);
1836     }
1837     unlock_user(target_cmsg, target_cmsg_addr, 0);
1838  the_end:
1839     msgh->msg_controllen = space;
1840     return 0;
1841 }
1842 
host_to_target_cmsg(struct target_msghdr * target_msgh,struct msghdr * msgh)1843 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1844                                            struct msghdr *msgh)
1845 {
1846     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1847     abi_long msg_controllen;
1848     abi_ulong target_cmsg_addr;
1849     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1850     socklen_t space = 0;
1851 
1852     msg_controllen = tswapal(target_msgh->msg_controllen);
1853     if (msg_controllen < sizeof (struct target_cmsghdr))
1854         goto the_end;
1855     target_cmsg_addr = tswapal(target_msgh->msg_control);
1856     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1857     target_cmsg_start = target_cmsg;
1858     if (!target_cmsg)
1859         return -TARGET_EFAULT;
1860 
1861     while (cmsg && target_cmsg) {
1862         void *data = CMSG_DATA(cmsg);
1863         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1864 
1865         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1866         int tgt_len, tgt_space;
1867 
1868         /* We never copy a half-header but may copy half-data;
1869          * this is Linux's behaviour in put_cmsg(). Note that
1870          * truncation here is a guest problem (which we report
1871          * to the guest via the CTRUNC bit), unlike truncation
1872          * in target_to_host_cmsg, which is a QEMU bug.
1873          */
1874         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1875             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1876             break;
1877         }
1878 
1879         if (cmsg->cmsg_level == SOL_SOCKET) {
1880             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1881         } else {
1882             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1883         }
1884         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1885 
1886         /* Payload types which need a different size of payload on
1887          * the target must adjust tgt_len here.
1888          */
1889         tgt_len = len;
1890         switch (cmsg->cmsg_level) {
1891         case SOL_SOCKET:
1892             switch (cmsg->cmsg_type) {
1893             case SO_TIMESTAMP:
1894                 tgt_len = sizeof(struct target_timeval);
1895                 break;
1896             default:
1897                 break;
1898             }
1899             break;
1900         default:
1901             break;
1902         }
1903 
1904         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1905             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1906             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1907         }
1908 
1909         /* We must now copy-and-convert len bytes of payload
1910          * into tgt_len bytes of destination space. Bear in mind
1911          * that in both source and destination we may be dealing
1912          * with a truncated value!
1913          */
1914         switch (cmsg->cmsg_level) {
1915         case SOL_SOCKET:
1916             switch (cmsg->cmsg_type) {
1917             case SCM_RIGHTS:
1918             {
1919                 int *fd = (int *)data;
1920                 int *target_fd = (int *)target_data;
1921                 int i, numfds = tgt_len / sizeof(int);
1922 
1923                 for (i = 0; i < numfds; i++) {
1924                     __put_user(fd[i], target_fd + i);
1925                 }
1926                 break;
1927             }
1928             case SO_TIMESTAMP:
1929             {
1930                 struct timeval *tv = (struct timeval *)data;
1931                 struct target_timeval *target_tv =
1932                     (struct target_timeval *)target_data;
1933 
1934                 if (len != sizeof(struct timeval) ||
1935                     tgt_len != sizeof(struct target_timeval)) {
1936                     goto unimplemented;
1937                 }
1938 
1939                 /* copy struct timeval to target */
1940                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1941                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1942                 break;
1943             }
1944             case SCM_CREDENTIALS:
1945             {
1946                 struct ucred *cred = (struct ucred *)data;
1947                 struct target_ucred *target_cred =
1948                     (struct target_ucred *)target_data;
1949 
1950                 __put_user(cred->pid, &target_cred->pid);
1951                 __put_user(cred->uid, &target_cred->uid);
1952                 __put_user(cred->gid, &target_cred->gid);
1953                 break;
1954             }
1955             default:
1956                 goto unimplemented;
1957             }
1958             break;
1959 
1960         case SOL_IP:
1961             switch (cmsg->cmsg_type) {
1962             case IP_TTL:
1963             {
1964                 uint32_t *v = (uint32_t *)data;
1965                 uint32_t *t_int = (uint32_t *)target_data;
1966 
1967                 if (len != sizeof(uint32_t) ||
1968                     tgt_len != sizeof(uint32_t)) {
1969                     goto unimplemented;
1970                 }
1971                 __put_user(*v, t_int);
1972                 break;
1973             }
1974             case IP_RECVERR:
1975             {
1976                 struct errhdr_t {
1977                    struct sock_extended_err ee;
1978                    struct sockaddr_in offender;
1979                 };
1980                 struct errhdr_t *errh = (struct errhdr_t *)data;
1981                 struct errhdr_t *target_errh =
1982                     (struct errhdr_t *)target_data;
1983 
1984                 if (len != sizeof(struct errhdr_t) ||
1985                     tgt_len != sizeof(struct errhdr_t)) {
1986                     goto unimplemented;
1987                 }
1988                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1989                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1990                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1991                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1992                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1993                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1994                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1995                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1996                     (void *) &errh->offender, sizeof(errh->offender));
1997                 break;
1998             }
1999             default:
2000                 goto unimplemented;
2001             }
2002             break;
2003 
2004         case SOL_IPV6:
2005             switch (cmsg->cmsg_type) {
2006             case IPV6_HOPLIMIT:
2007             {
2008                 uint32_t *v = (uint32_t *)data;
2009                 uint32_t *t_int = (uint32_t *)target_data;
2010 
2011                 if (len != sizeof(uint32_t) ||
2012                     tgt_len != sizeof(uint32_t)) {
2013                     goto unimplemented;
2014                 }
2015                 __put_user(*v, t_int);
2016                 break;
2017             }
2018             case IPV6_RECVERR:
2019             {
2020                 struct errhdr6_t {
2021                    struct sock_extended_err ee;
2022                    struct sockaddr_in6 offender;
2023                 };
2024                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2025                 struct errhdr6_t *target_errh =
2026                     (struct errhdr6_t *)target_data;
2027 
2028                 if (len != sizeof(struct errhdr6_t) ||
2029                     tgt_len != sizeof(struct errhdr6_t)) {
2030                     goto unimplemented;
2031                 }
2032                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2033                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2034                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2035                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2036                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2037                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2038                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2039                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2040                     (void *) &errh->offender, sizeof(errh->offender));
2041                 break;
2042             }
2043             default:
2044                 goto unimplemented;
2045             }
2046             break;
2047 
2048         default:
2049         unimplemented:
2050             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2051                           cmsg->cmsg_level, cmsg->cmsg_type);
2052             memcpy(target_data, data, MIN(len, tgt_len));
2053             if (tgt_len > len) {
2054                 memset(target_data + len, 0, tgt_len - len);
2055             }
2056         }
2057 
2058         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2059         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2060         if (msg_controllen < tgt_space) {
2061             tgt_space = msg_controllen;
2062         }
2063         msg_controllen -= tgt_space;
2064         space += tgt_space;
2065         cmsg = CMSG_NXTHDR(msgh, cmsg);
2066         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2067                                          target_cmsg_start);
2068     }
2069     unlock_user(target_cmsg, target_cmsg_addr, space);
2070  the_end:
2071     target_msgh->msg_controllen = tswapal(space);
2072     return 0;
2073 }
2074 
2075 /* do_setsockopt() Must return target values and target errnos. */
do_setsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,socklen_t optlen)2076 static abi_long do_setsockopt(int sockfd, int level, int optname,
2077                               abi_ulong optval_addr, socklen_t optlen)
2078 {
2079     abi_long ret;
2080     int val;
2081 
2082     switch(level) {
2083     case SOL_TCP:
2084     case SOL_UDP:
2085         /* TCP and UDP options all take an 'int' value.  */
2086         if (optlen < sizeof(uint32_t))
2087             return -TARGET_EINVAL;
2088 
2089         if (get_user_u32(val, optval_addr))
2090             return -TARGET_EFAULT;
2091         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2092         break;
2093     case SOL_IP:
2094         switch(optname) {
2095         case IP_TOS:
2096         case IP_TTL:
2097         case IP_HDRINCL:
2098         case IP_ROUTER_ALERT:
2099         case IP_RECVOPTS:
2100         case IP_RETOPTS:
2101         case IP_PKTINFO:
2102         case IP_MTU_DISCOVER:
2103         case IP_RECVERR:
2104         case IP_RECVTTL:
2105         case IP_RECVTOS:
2106 #ifdef IP_FREEBIND
2107         case IP_FREEBIND:
2108 #endif
2109         case IP_MULTICAST_TTL:
2110         case IP_MULTICAST_LOOP:
2111             val = 0;
2112             if (optlen >= sizeof(uint32_t)) {
2113                 if (get_user_u32(val, optval_addr))
2114                     return -TARGET_EFAULT;
2115             } else if (optlen >= 1) {
2116                 if (get_user_u8(val, optval_addr))
2117                     return -TARGET_EFAULT;
2118             }
2119             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2120             break;
2121         case IP_ADD_MEMBERSHIP:
2122         case IP_DROP_MEMBERSHIP:
2123         {
2124             struct ip_mreqn ip_mreq;
2125             struct target_ip_mreqn *target_smreqn;
2126 
2127             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2128                               sizeof(struct target_ip_mreq));
2129 
2130             if (optlen < sizeof (struct target_ip_mreq) ||
2131                 optlen > sizeof (struct target_ip_mreqn)) {
2132                 return -TARGET_EINVAL;
2133             }
2134 
2135             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2136             if (!target_smreqn) {
2137                 return -TARGET_EFAULT;
2138             }
2139             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2140             ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2141             if (optlen == sizeof(struct target_ip_mreqn)) {
2142                 ip_mreq.imr_ifindex = tswapal(target_smreqn->imr_ifindex);
2143                 optlen = sizeof(struct ip_mreqn);
2144             }
2145             unlock_user(target_smreqn, optval_addr, 0);
2146 
2147             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2148             break;
2149         }
2150         case IP_BLOCK_SOURCE:
2151         case IP_UNBLOCK_SOURCE:
2152         case IP_ADD_SOURCE_MEMBERSHIP:
2153         case IP_DROP_SOURCE_MEMBERSHIP:
2154         {
2155             struct ip_mreq_source *ip_mreq_source;
2156 
2157             if (optlen != sizeof (struct target_ip_mreq_source))
2158                 return -TARGET_EINVAL;
2159 
2160             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2161             if (!ip_mreq_source) {
2162                 return -TARGET_EFAULT;
2163             }
2164             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2165             unlock_user (ip_mreq_source, optval_addr, 0);
2166             break;
2167         }
2168         default:
2169             goto unimplemented;
2170         }
2171         break;
2172     case SOL_IPV6:
2173         switch (optname) {
2174         case IPV6_MTU_DISCOVER:
2175         case IPV6_MTU:
2176         case IPV6_V6ONLY:
2177         case IPV6_RECVPKTINFO:
2178         case IPV6_UNICAST_HOPS:
2179         case IPV6_MULTICAST_HOPS:
2180         case IPV6_MULTICAST_LOOP:
2181         case IPV6_RECVERR:
2182         case IPV6_RECVHOPLIMIT:
2183         case IPV6_2292HOPLIMIT:
2184         case IPV6_CHECKSUM:
2185         case IPV6_ADDRFORM:
2186         case IPV6_2292PKTINFO:
2187         case IPV6_RECVTCLASS:
2188         case IPV6_RECVRTHDR:
2189         case IPV6_2292RTHDR:
2190         case IPV6_RECVHOPOPTS:
2191         case IPV6_2292HOPOPTS:
2192         case IPV6_RECVDSTOPTS:
2193         case IPV6_2292DSTOPTS:
2194         case IPV6_TCLASS:
2195         case IPV6_ADDR_PREFERENCES:
2196 #ifdef IPV6_RECVPATHMTU
2197         case IPV6_RECVPATHMTU:
2198 #endif
2199 #ifdef IPV6_TRANSPARENT
2200         case IPV6_TRANSPARENT:
2201 #endif
2202 #ifdef IPV6_FREEBIND
2203         case IPV6_FREEBIND:
2204 #endif
2205 #ifdef IPV6_RECVORIGDSTADDR
2206         case IPV6_RECVORIGDSTADDR:
2207 #endif
2208             val = 0;
2209             if (optlen < sizeof(uint32_t)) {
2210                 return -TARGET_EINVAL;
2211             }
2212             if (get_user_u32(val, optval_addr)) {
2213                 return -TARGET_EFAULT;
2214             }
2215             ret = get_errno(setsockopt(sockfd, level, optname,
2216                                        &val, sizeof(val)));
2217             break;
2218         case IPV6_PKTINFO:
2219         {
2220             struct in6_pktinfo pki;
2221 
2222             if (optlen < sizeof(pki)) {
2223                 return -TARGET_EINVAL;
2224             }
2225 
2226             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2227                 return -TARGET_EFAULT;
2228             }
2229 
2230             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2231 
2232             ret = get_errno(setsockopt(sockfd, level, optname,
2233                                        &pki, sizeof(pki)));
2234             break;
2235         }
2236         case IPV6_ADD_MEMBERSHIP:
2237         case IPV6_DROP_MEMBERSHIP:
2238         {
2239             struct ipv6_mreq ipv6mreq;
2240 
2241             if (optlen < sizeof(ipv6mreq)) {
2242                 return -TARGET_EINVAL;
2243             }
2244 
2245             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2246                 return -TARGET_EFAULT;
2247             }
2248 
2249             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2250 
2251             ret = get_errno(setsockopt(sockfd, level, optname,
2252                                        &ipv6mreq, sizeof(ipv6mreq)));
2253             break;
2254         }
2255         default:
2256             goto unimplemented;
2257         }
2258         break;
2259     case SOL_ICMPV6:
2260         switch (optname) {
2261         case ICMPV6_FILTER:
2262         {
2263             struct icmp6_filter icmp6f;
2264 
2265             if (optlen > sizeof(icmp6f)) {
2266                 optlen = sizeof(icmp6f);
2267             }
2268 
2269             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2270                 return -TARGET_EFAULT;
2271             }
2272 
2273             for (val = 0; val < 8; val++) {
2274                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2275             }
2276 
2277             ret = get_errno(setsockopt(sockfd, level, optname,
2278                                        &icmp6f, optlen));
2279             break;
2280         }
2281         default:
2282             goto unimplemented;
2283         }
2284         break;
2285     case SOL_RAW:
2286         switch (optname) {
2287         case ICMP_FILTER:
2288         case IPV6_CHECKSUM:
2289             /* those take an u32 value */
2290             if (optlen < sizeof(uint32_t)) {
2291                 return -TARGET_EINVAL;
2292             }
2293 
2294             if (get_user_u32(val, optval_addr)) {
2295                 return -TARGET_EFAULT;
2296             }
2297             ret = get_errno(setsockopt(sockfd, level, optname,
2298                                        &val, sizeof(val)));
2299             break;
2300 
2301         default:
2302             goto unimplemented;
2303         }
2304         break;
2305 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2306     case SOL_ALG:
2307         switch (optname) {
2308         case ALG_SET_KEY:
2309         {
2310             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2311             if (!alg_key) {
2312                 return -TARGET_EFAULT;
2313             }
2314             ret = get_errno(setsockopt(sockfd, level, optname,
2315                                        alg_key, optlen));
2316             unlock_user(alg_key, optval_addr, optlen);
2317             break;
2318         }
2319         case ALG_SET_AEAD_AUTHSIZE:
2320         {
2321             ret = get_errno(setsockopt(sockfd, level, optname,
2322                                        NULL, optlen));
2323             break;
2324         }
2325         default:
2326             goto unimplemented;
2327         }
2328         break;
2329 #endif
2330     case TARGET_SOL_SOCKET:
2331         switch (optname) {
2332         case TARGET_SO_RCVTIMEO:
2333         case TARGET_SO_SNDTIMEO:
2334         {
2335                 struct timeval tv;
2336 
2337                 if (optlen != sizeof(struct target_timeval)) {
2338                     return -TARGET_EINVAL;
2339                 }
2340 
2341                 if (copy_from_user_timeval(&tv, optval_addr)) {
2342                     return -TARGET_EFAULT;
2343                 }
2344 
2345                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2346                                 optname == TARGET_SO_RCVTIMEO ?
2347                                     SO_RCVTIMEO : SO_SNDTIMEO,
2348                                 &tv, sizeof(tv)));
2349                 return ret;
2350         }
2351         case TARGET_SO_ATTACH_FILTER:
2352         {
2353                 struct target_sock_fprog *tfprog;
2354                 struct target_sock_filter *tfilter;
2355                 struct sock_fprog fprog;
2356                 struct sock_filter *filter;
2357                 int i;
2358 
2359                 if (optlen != sizeof(*tfprog)) {
2360                     return -TARGET_EINVAL;
2361                 }
2362                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2363                     return -TARGET_EFAULT;
2364                 }
2365                 if (!lock_user_struct(VERIFY_READ, tfilter,
2366                                       tswapal(tfprog->filter), 0)) {
2367                     unlock_user_struct(tfprog, optval_addr, 1);
2368                     return -TARGET_EFAULT;
2369                 }
2370 
2371                 fprog.len = tswap16(tfprog->len);
2372                 filter = g_try_new(struct sock_filter, fprog.len);
2373                 if (filter == NULL) {
2374                     unlock_user_struct(tfilter, tfprog->filter, 1);
2375                     unlock_user_struct(tfprog, optval_addr, 1);
2376                     return -TARGET_ENOMEM;
2377                 }
2378                 for (i = 0; i < fprog.len; i++) {
2379                     filter[i].code = tswap16(tfilter[i].code);
2380                     filter[i].jt = tfilter[i].jt;
2381                     filter[i].jf = tfilter[i].jf;
2382                     filter[i].k = tswap32(tfilter[i].k);
2383                 }
2384                 fprog.filter = filter;
2385 
2386                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2387                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2388                 g_free(filter);
2389 
2390                 unlock_user_struct(tfilter, tfprog->filter, 1);
2391                 unlock_user_struct(tfprog, optval_addr, 1);
2392                 return ret;
2393         }
2394 	case TARGET_SO_BINDTODEVICE:
2395 	{
2396 		char *dev_ifname, *addr_ifname;
2397 
2398 		if (optlen > IFNAMSIZ - 1) {
2399 		    optlen = IFNAMSIZ - 1;
2400 		}
2401 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2402 		if (!dev_ifname) {
2403 		    return -TARGET_EFAULT;
2404 		}
2405 		optname = SO_BINDTODEVICE;
2406 		addr_ifname = alloca(IFNAMSIZ);
2407 		memcpy(addr_ifname, dev_ifname, optlen);
2408 		addr_ifname[optlen] = 0;
2409 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2410                                            addr_ifname, optlen));
2411 		unlock_user (dev_ifname, optval_addr, 0);
2412 		return ret;
2413 	}
2414         case TARGET_SO_LINGER:
2415         {
2416                 struct linger lg;
2417                 struct target_linger *tlg;
2418 
2419                 if (optlen != sizeof(struct target_linger)) {
2420                     return -TARGET_EINVAL;
2421                 }
2422                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2423                     return -TARGET_EFAULT;
2424                 }
2425                 __get_user(lg.l_onoff, &tlg->l_onoff);
2426                 __get_user(lg.l_linger, &tlg->l_linger);
2427                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2428                                 &lg, sizeof(lg)));
2429                 unlock_user_struct(tlg, optval_addr, 0);
2430                 return ret;
2431         }
2432             /* Options with 'int' argument.  */
2433         case TARGET_SO_DEBUG:
2434 		optname = SO_DEBUG;
2435 		break;
2436         case TARGET_SO_REUSEADDR:
2437 		optname = SO_REUSEADDR;
2438 		break;
2439 #ifdef SO_REUSEPORT
2440         case TARGET_SO_REUSEPORT:
2441                 optname = SO_REUSEPORT;
2442                 break;
2443 #endif
2444         case TARGET_SO_TYPE:
2445 		optname = SO_TYPE;
2446 		break;
2447         case TARGET_SO_ERROR:
2448 		optname = SO_ERROR;
2449 		break;
2450         case TARGET_SO_DONTROUTE:
2451 		optname = SO_DONTROUTE;
2452 		break;
2453         case TARGET_SO_BROADCAST:
2454 		optname = SO_BROADCAST;
2455 		break;
2456         case TARGET_SO_SNDBUF:
2457 		optname = SO_SNDBUF;
2458 		break;
2459         case TARGET_SO_SNDBUFFORCE:
2460                 optname = SO_SNDBUFFORCE;
2461                 break;
2462         case TARGET_SO_RCVBUF:
2463 		optname = SO_RCVBUF;
2464 		break;
2465         case TARGET_SO_RCVBUFFORCE:
2466                 optname = SO_RCVBUFFORCE;
2467                 break;
2468         case TARGET_SO_KEEPALIVE:
2469 		optname = SO_KEEPALIVE;
2470 		break;
2471         case TARGET_SO_OOBINLINE:
2472 		optname = SO_OOBINLINE;
2473 		break;
2474         case TARGET_SO_NO_CHECK:
2475 		optname = SO_NO_CHECK;
2476 		break;
2477         case TARGET_SO_PRIORITY:
2478 		optname = SO_PRIORITY;
2479 		break;
2480 #ifdef SO_BSDCOMPAT
2481         case TARGET_SO_BSDCOMPAT:
2482 		optname = SO_BSDCOMPAT;
2483 		break;
2484 #endif
2485         case TARGET_SO_PASSCRED:
2486 		optname = SO_PASSCRED;
2487 		break;
2488         case TARGET_SO_PASSSEC:
2489                 optname = SO_PASSSEC;
2490                 break;
2491         case TARGET_SO_TIMESTAMP:
2492 		optname = SO_TIMESTAMP;
2493 		break;
2494         case TARGET_SO_RCVLOWAT:
2495 		optname = SO_RCVLOWAT;
2496 		break;
2497         default:
2498             goto unimplemented;
2499         }
2500 	if (optlen < sizeof(uint32_t))
2501             return -TARGET_EINVAL;
2502 
2503 	if (get_user_u32(val, optval_addr))
2504             return -TARGET_EFAULT;
2505 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2506         break;
2507 #ifdef SOL_NETLINK
2508     case SOL_NETLINK:
2509         switch (optname) {
2510         case NETLINK_PKTINFO:
2511         case NETLINK_ADD_MEMBERSHIP:
2512         case NETLINK_DROP_MEMBERSHIP:
2513         case NETLINK_BROADCAST_ERROR:
2514         case NETLINK_NO_ENOBUFS:
2515 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2516         case NETLINK_LISTEN_ALL_NSID:
2517         case NETLINK_CAP_ACK:
2518 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2519 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2520         case NETLINK_EXT_ACK:
2521 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2522 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2523         case NETLINK_GET_STRICT_CHK:
2524 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2525             break;
2526         default:
2527             goto unimplemented;
2528         }
2529         val = 0;
2530         if (optlen < sizeof(uint32_t)) {
2531             return -TARGET_EINVAL;
2532         }
2533         if (get_user_u32(val, optval_addr)) {
2534             return -TARGET_EFAULT;
2535         }
2536         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2537                                    sizeof(val)));
2538         break;
2539 #endif /* SOL_NETLINK */
2540     default:
2541     unimplemented:
2542         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2543                       level, optname);
2544         ret = -TARGET_ENOPROTOOPT;
2545     }
2546     return ret;
2547 }
2548 
2549 /* do_getsockopt() Must return target values and target errnos. */
do_getsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,abi_ulong optlen)2550 static abi_long do_getsockopt(int sockfd, int level, int optname,
2551                               abi_ulong optval_addr, abi_ulong optlen)
2552 {
2553     abi_long ret;
2554     int len, val;
2555     socklen_t lv;
2556 
2557     switch(level) {
2558     case TARGET_SOL_SOCKET:
2559         level = SOL_SOCKET;
2560         switch (optname) {
2561         /* These don't just return a single integer */
2562         case TARGET_SO_PEERNAME:
2563             goto unimplemented;
2564         case TARGET_SO_RCVTIMEO: {
2565             struct timeval tv;
2566             socklen_t tvlen;
2567 
2568             optname = SO_RCVTIMEO;
2569 
2570 get_timeout:
2571             if (get_user_u32(len, optlen)) {
2572                 return -TARGET_EFAULT;
2573             }
2574             if (len < 0) {
2575                 return -TARGET_EINVAL;
2576             }
2577 
2578             tvlen = sizeof(tv);
2579             ret = get_errno(getsockopt(sockfd, level, optname,
2580                                        &tv, &tvlen));
2581             if (ret < 0) {
2582                 return ret;
2583             }
2584             if (len > sizeof(struct target_timeval)) {
2585                 len = sizeof(struct target_timeval);
2586             }
2587             if (copy_to_user_timeval(optval_addr, &tv)) {
2588                 return -TARGET_EFAULT;
2589             }
2590             if (put_user_u32(len, optlen)) {
2591                 return -TARGET_EFAULT;
2592             }
2593             break;
2594         }
2595         case TARGET_SO_SNDTIMEO:
2596             optname = SO_SNDTIMEO;
2597             goto get_timeout;
2598         case TARGET_SO_PEERCRED: {
2599             struct ucred cr;
2600             socklen_t crlen;
2601             struct target_ucred *tcr;
2602 
2603             if (get_user_u32(len, optlen)) {
2604                 return -TARGET_EFAULT;
2605             }
2606             if (len < 0) {
2607                 return -TARGET_EINVAL;
2608             }
2609 
2610             crlen = sizeof(cr);
2611             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2612                                        &cr, &crlen));
2613             if (ret < 0) {
2614                 return ret;
2615             }
2616             if (len > crlen) {
2617                 len = crlen;
2618             }
2619             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2620                 return -TARGET_EFAULT;
2621             }
2622             __put_user(cr.pid, &tcr->pid);
2623             __put_user(cr.uid, &tcr->uid);
2624             __put_user(cr.gid, &tcr->gid);
2625             unlock_user_struct(tcr, optval_addr, 1);
2626             if (put_user_u32(len, optlen)) {
2627                 return -TARGET_EFAULT;
2628             }
2629             break;
2630         }
2631         case TARGET_SO_PEERSEC: {
2632             char *name;
2633 
2634             if (get_user_u32(len, optlen)) {
2635                 return -TARGET_EFAULT;
2636             }
2637             if (len < 0) {
2638                 return -TARGET_EINVAL;
2639             }
2640             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2641             if (!name) {
2642                 return -TARGET_EFAULT;
2643             }
2644             lv = len;
2645             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2646                                        name, &lv));
2647             if (put_user_u32(lv, optlen)) {
2648                 ret = -TARGET_EFAULT;
2649             }
2650             unlock_user(name, optval_addr, lv);
2651             break;
2652         }
2653         case TARGET_SO_LINGER:
2654         {
2655             struct linger lg;
2656             socklen_t lglen;
2657             struct target_linger *tlg;
2658 
2659             if (get_user_u32(len, optlen)) {
2660                 return -TARGET_EFAULT;
2661             }
2662             if (len < 0) {
2663                 return -TARGET_EINVAL;
2664             }
2665 
2666             lglen = sizeof(lg);
2667             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2668                                        &lg, &lglen));
2669             if (ret < 0) {
2670                 return ret;
2671             }
2672             if (len > lglen) {
2673                 len = lglen;
2674             }
2675             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2676                 return -TARGET_EFAULT;
2677             }
2678             __put_user(lg.l_onoff, &tlg->l_onoff);
2679             __put_user(lg.l_linger, &tlg->l_linger);
2680             unlock_user_struct(tlg, optval_addr, 1);
2681             if (put_user_u32(len, optlen)) {
2682                 return -TARGET_EFAULT;
2683             }
2684             break;
2685         }
2686         /* Options with 'int' argument.  */
2687         case TARGET_SO_DEBUG:
2688             optname = SO_DEBUG;
2689             goto int_case;
2690         case TARGET_SO_REUSEADDR:
2691             optname = SO_REUSEADDR;
2692             goto int_case;
2693 #ifdef SO_REUSEPORT
2694         case TARGET_SO_REUSEPORT:
2695             optname = SO_REUSEPORT;
2696             goto int_case;
2697 #endif
2698         case TARGET_SO_TYPE:
2699             optname = SO_TYPE;
2700             goto int_case;
2701         case TARGET_SO_ERROR:
2702             optname = SO_ERROR;
2703             goto int_case;
2704         case TARGET_SO_DONTROUTE:
2705             optname = SO_DONTROUTE;
2706             goto int_case;
2707         case TARGET_SO_BROADCAST:
2708             optname = SO_BROADCAST;
2709             goto int_case;
2710         case TARGET_SO_SNDBUF:
2711             optname = SO_SNDBUF;
2712             goto int_case;
2713         case TARGET_SO_RCVBUF:
2714             optname = SO_RCVBUF;
2715             goto int_case;
2716         case TARGET_SO_KEEPALIVE:
2717             optname = SO_KEEPALIVE;
2718             goto int_case;
2719         case TARGET_SO_OOBINLINE:
2720             optname = SO_OOBINLINE;
2721             goto int_case;
2722         case TARGET_SO_NO_CHECK:
2723             optname = SO_NO_CHECK;
2724             goto int_case;
2725         case TARGET_SO_PRIORITY:
2726             optname = SO_PRIORITY;
2727             goto int_case;
2728 #ifdef SO_BSDCOMPAT
2729         case TARGET_SO_BSDCOMPAT:
2730             optname = SO_BSDCOMPAT;
2731             goto int_case;
2732 #endif
2733         case TARGET_SO_PASSCRED:
2734             optname = SO_PASSCRED;
2735             goto int_case;
2736         case TARGET_SO_TIMESTAMP:
2737             optname = SO_TIMESTAMP;
2738             goto int_case;
2739         case TARGET_SO_RCVLOWAT:
2740             optname = SO_RCVLOWAT;
2741             goto int_case;
2742         case TARGET_SO_ACCEPTCONN:
2743             optname = SO_ACCEPTCONN;
2744             goto int_case;
2745         case TARGET_SO_PROTOCOL:
2746             optname = SO_PROTOCOL;
2747             goto int_case;
2748         case TARGET_SO_DOMAIN:
2749             optname = SO_DOMAIN;
2750             goto int_case;
2751         default:
2752             goto int_case;
2753         }
2754         break;
2755     case SOL_TCP:
2756     case SOL_UDP:
2757         /* TCP and UDP options all take an 'int' value.  */
2758     int_case:
2759         if (get_user_u32(len, optlen))
2760             return -TARGET_EFAULT;
2761         if (len < 0)
2762             return -TARGET_EINVAL;
2763         lv = sizeof(lv);
2764         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2765         if (ret < 0)
2766             return ret;
2767         switch (optname) {
2768         case SO_TYPE:
2769             val = host_to_target_sock_type(val);
2770             break;
2771         case SO_ERROR:
2772             val = host_to_target_errno(val);
2773             break;
2774         }
2775         if (len > lv)
2776             len = lv;
2777         if (len == 4) {
2778             if (put_user_u32(val, optval_addr))
2779                 return -TARGET_EFAULT;
2780         } else {
2781             if (put_user_u8(val, optval_addr))
2782                 return -TARGET_EFAULT;
2783         }
2784         if (put_user_u32(len, optlen))
2785             return -TARGET_EFAULT;
2786         break;
2787     case SOL_IP:
2788         switch(optname) {
2789         case IP_TOS:
2790         case IP_TTL:
2791         case IP_HDRINCL:
2792         case IP_ROUTER_ALERT:
2793         case IP_RECVOPTS:
2794         case IP_RETOPTS:
2795         case IP_PKTINFO:
2796         case IP_MTU_DISCOVER:
2797         case IP_RECVERR:
2798         case IP_RECVTOS:
2799 #ifdef IP_FREEBIND
2800         case IP_FREEBIND:
2801 #endif
2802         case IP_MULTICAST_TTL:
2803         case IP_MULTICAST_LOOP:
2804             if (get_user_u32(len, optlen))
2805                 return -TARGET_EFAULT;
2806             if (len < 0)
2807                 return -TARGET_EINVAL;
2808             lv = sizeof(lv);
2809             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2810             if (ret < 0)
2811                 return ret;
2812             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2813                 len = 1;
2814                 if (put_user_u32(len, optlen)
2815                     || put_user_u8(val, optval_addr))
2816                     return -TARGET_EFAULT;
2817             } else {
2818                 if (len > sizeof(int))
2819                     len = sizeof(int);
2820                 if (put_user_u32(len, optlen)
2821                     || put_user_u32(val, optval_addr))
2822                     return -TARGET_EFAULT;
2823             }
2824             break;
2825         default:
2826             ret = -TARGET_ENOPROTOOPT;
2827             break;
2828         }
2829         break;
2830     case SOL_IPV6:
2831         switch (optname) {
2832         case IPV6_MTU_DISCOVER:
2833         case IPV6_MTU:
2834         case IPV6_V6ONLY:
2835         case IPV6_RECVPKTINFO:
2836         case IPV6_UNICAST_HOPS:
2837         case IPV6_MULTICAST_HOPS:
2838         case IPV6_MULTICAST_LOOP:
2839         case IPV6_RECVERR:
2840         case IPV6_RECVHOPLIMIT:
2841         case IPV6_2292HOPLIMIT:
2842         case IPV6_CHECKSUM:
2843         case IPV6_ADDRFORM:
2844         case IPV6_2292PKTINFO:
2845         case IPV6_RECVTCLASS:
2846         case IPV6_RECVRTHDR:
2847         case IPV6_2292RTHDR:
2848         case IPV6_RECVHOPOPTS:
2849         case IPV6_2292HOPOPTS:
2850         case IPV6_RECVDSTOPTS:
2851         case IPV6_2292DSTOPTS:
2852         case IPV6_TCLASS:
2853         case IPV6_ADDR_PREFERENCES:
2854 #ifdef IPV6_RECVPATHMTU
2855         case IPV6_RECVPATHMTU:
2856 #endif
2857 #ifdef IPV6_TRANSPARENT
2858         case IPV6_TRANSPARENT:
2859 #endif
2860 #ifdef IPV6_FREEBIND
2861         case IPV6_FREEBIND:
2862 #endif
2863 #ifdef IPV6_RECVORIGDSTADDR
2864         case IPV6_RECVORIGDSTADDR:
2865 #endif
2866             if (get_user_u32(len, optlen))
2867                 return -TARGET_EFAULT;
2868             if (len < 0)
2869                 return -TARGET_EINVAL;
2870             lv = sizeof(lv);
2871             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2872             if (ret < 0)
2873                 return ret;
2874             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2875                 len = 1;
2876                 if (put_user_u32(len, optlen)
2877                     || put_user_u8(val, optval_addr))
2878                     return -TARGET_EFAULT;
2879             } else {
2880                 if (len > sizeof(int))
2881                     len = sizeof(int);
2882                 if (put_user_u32(len, optlen)
2883                     || put_user_u32(val, optval_addr))
2884                     return -TARGET_EFAULT;
2885             }
2886             break;
2887         default:
2888             ret = -TARGET_ENOPROTOOPT;
2889             break;
2890         }
2891         break;
2892 #ifdef SOL_NETLINK
2893     case SOL_NETLINK:
2894         switch (optname) {
2895         case NETLINK_PKTINFO:
2896         case NETLINK_BROADCAST_ERROR:
2897         case NETLINK_NO_ENOBUFS:
2898 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2899         case NETLINK_LISTEN_ALL_NSID:
2900         case NETLINK_CAP_ACK:
2901 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2902 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2903         case NETLINK_EXT_ACK:
2904 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2905 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2906         case NETLINK_GET_STRICT_CHK:
2907 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2908             if (get_user_u32(len, optlen)) {
2909                 return -TARGET_EFAULT;
2910             }
2911             if (len != sizeof(val)) {
2912                 return -TARGET_EINVAL;
2913             }
2914             lv = len;
2915             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2916             if (ret < 0) {
2917                 return ret;
2918             }
2919             if (put_user_u32(lv, optlen)
2920                 || put_user_u32(val, optval_addr)) {
2921                 return -TARGET_EFAULT;
2922             }
2923             break;
2924 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2925         case NETLINK_LIST_MEMBERSHIPS:
2926         {
2927             uint32_t *results;
2928             int i;
2929             if (get_user_u32(len, optlen)) {
2930                 return -TARGET_EFAULT;
2931             }
2932             if (len < 0) {
2933                 return -TARGET_EINVAL;
2934             }
2935             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2936             if (!results && len > 0) {
2937                 return -TARGET_EFAULT;
2938             }
2939             lv = len;
2940             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2941             if (ret < 0) {
2942                 unlock_user(results, optval_addr, 0);
2943                 return ret;
2944             }
2945             /* swap host endianness to target endianness. */
2946             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2947                 results[i] = tswap32(results[i]);
2948             }
2949             if (put_user_u32(lv, optlen)) {
2950                 return -TARGET_EFAULT;
2951             }
2952             unlock_user(results, optval_addr, 0);
2953             break;
2954         }
2955 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2956         default:
2957             goto unimplemented;
2958         }
2959         break;
2960 #endif /* SOL_NETLINK */
2961     default:
2962     unimplemented:
2963         qemu_log_mask(LOG_UNIMP,
2964                       "getsockopt level=%d optname=%d not yet supported\n",
2965                       level, optname);
2966         ret = -TARGET_EOPNOTSUPP;
2967         break;
2968     }
2969     return ret;
2970 }
2971 
2972 /* Convert target low/high pair representing file offset into the host
2973  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2974  * as the kernel doesn't handle them either.
2975  */
target_to_host_low_high(abi_ulong tlow,abi_ulong thigh,unsigned long * hlow,unsigned long * hhigh)2976 static void target_to_host_low_high(abi_ulong tlow,
2977                                     abi_ulong thigh,
2978                                     unsigned long *hlow,
2979                                     unsigned long *hhigh)
2980 {
2981     uint64_t off = tlow |
2982         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2983         TARGET_LONG_BITS / 2;
2984 
2985     *hlow = off;
2986     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2987 }
2988 
lock_iovec(int type,abi_ulong target_addr,abi_ulong count,int copy)2989 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2990                                 abi_ulong count, int copy)
2991 {
2992     struct target_iovec *target_vec;
2993     struct iovec *vec;
2994     abi_ulong total_len, max_len;
2995     int i;
2996     int err = 0;
2997     bool bad_address = false;
2998 
2999     if (count == 0) {
3000         errno = 0;
3001         return NULL;
3002     }
3003     if (count > IOV_MAX) {
3004         errno = EINVAL;
3005         return NULL;
3006     }
3007 
3008     vec = g_try_new0(struct iovec, count);
3009     if (vec == NULL) {
3010         errno = ENOMEM;
3011         return NULL;
3012     }
3013 
3014     target_vec = lock_user(VERIFY_READ, target_addr,
3015                            count * sizeof(struct target_iovec), 1);
3016     if (target_vec == NULL) {
3017         err = EFAULT;
3018         goto fail2;
3019     }
3020 
3021     /* ??? If host page size > target page size, this will result in a
3022        value larger than what we can actually support.  */
3023     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3024     total_len = 0;
3025 
3026     for (i = 0; i < count; i++) {
3027         abi_ulong base = tswapal(target_vec[i].iov_base);
3028         abi_long len = tswapal(target_vec[i].iov_len);
3029 
3030         if (len < 0) {
3031             err = EINVAL;
3032             goto fail;
3033         } else if (len == 0) {
3034             /* Zero length pointer is ignored.  */
3035             vec[i].iov_base = 0;
3036         } else {
3037             vec[i].iov_base = lock_user(type, base, len, copy);
3038             /* If the first buffer pointer is bad, this is a fault.  But
3039              * subsequent bad buffers will result in a partial write; this
3040              * is realized by filling the vector with null pointers and
3041              * zero lengths. */
3042             if (!vec[i].iov_base) {
3043                 if (i == 0) {
3044                     err = EFAULT;
3045                     goto fail;
3046                 } else {
3047                     bad_address = true;
3048                 }
3049             }
3050             if (bad_address) {
3051                 len = 0;
3052             }
3053             if (len > max_len - total_len) {
3054                 len = max_len - total_len;
3055             }
3056         }
3057         vec[i].iov_len = len;
3058         total_len += len;
3059     }
3060 
3061     unlock_user(target_vec, target_addr, 0);
3062     return vec;
3063 
3064  fail:
3065     while (--i >= 0) {
3066         if (tswapal(target_vec[i].iov_len) > 0) {
3067             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3068         }
3069     }
3070     unlock_user(target_vec, target_addr, 0);
3071  fail2:
3072     g_free(vec);
3073     errno = err;
3074     return NULL;
3075 }
3076 
unlock_iovec(struct iovec * vec,abi_ulong target_addr,abi_ulong count,int copy)3077 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3078                          abi_ulong count, int copy)
3079 {
3080     struct target_iovec *target_vec;
3081     int i;
3082 
3083     target_vec = lock_user(VERIFY_READ, target_addr,
3084                            count * sizeof(struct target_iovec), 1);
3085     if (target_vec) {
3086         for (i = 0; i < count; i++) {
3087             abi_ulong base = tswapal(target_vec[i].iov_base);
3088             abi_long len = tswapal(target_vec[i].iov_len);
3089             if (len < 0) {
3090                 break;
3091             }
3092             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3093         }
3094         unlock_user(target_vec, target_addr, 0);
3095     }
3096 
3097     g_free(vec);
3098 }
3099 
target_to_host_sock_type(int * type)3100 static inline int target_to_host_sock_type(int *type)
3101 {
3102     int host_type = 0;
3103     int target_type = *type;
3104 
3105     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3106     case TARGET_SOCK_DGRAM:
3107         host_type = SOCK_DGRAM;
3108         break;
3109     case TARGET_SOCK_STREAM:
3110         host_type = SOCK_STREAM;
3111         break;
3112     default:
3113         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3114         break;
3115     }
3116     if (target_type & TARGET_SOCK_CLOEXEC) {
3117 #if defined(SOCK_CLOEXEC)
3118         host_type |= SOCK_CLOEXEC;
3119 #else
3120         return -TARGET_EINVAL;
3121 #endif
3122     }
3123     if (target_type & TARGET_SOCK_NONBLOCK) {
3124 #if defined(SOCK_NONBLOCK)
3125         host_type |= SOCK_NONBLOCK;
3126 #elif !defined(O_NONBLOCK)
3127         return -TARGET_EINVAL;
3128 #endif
3129     }
3130     *type = host_type;
3131     return 0;
3132 }
3133 
3134 /* Try to emulate socket type flags after socket creation.  */
sock_flags_fixup(int fd,int target_type)3135 static int sock_flags_fixup(int fd, int target_type)
3136 {
3137 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3138     if (target_type & TARGET_SOCK_NONBLOCK) {
3139         int flags = fcntl(fd, F_GETFL);
3140         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3141             close(fd);
3142             return -TARGET_EINVAL;
3143         }
3144     }
3145 #endif
3146     return fd;
3147 }
3148 
3149 /* do_socket() Must return target values and target errnos. */
do_socket(int domain,int type,int protocol)3150 static abi_long do_socket(int domain, int type, int protocol)
3151 {
3152     int target_type = type;
3153     int ret;
3154 
3155     ret = target_to_host_sock_type(&type);
3156     if (ret) {
3157         return ret;
3158     }
3159 
3160     if (domain == PF_NETLINK && !(
3161 #ifdef CONFIG_RTNETLINK
3162          protocol == NETLINK_ROUTE ||
3163 #endif
3164          protocol == NETLINK_KOBJECT_UEVENT ||
3165          protocol == NETLINK_AUDIT)) {
3166         return -TARGET_EPROTONOSUPPORT;
3167     }
3168 
3169     if (domain == AF_PACKET ||
3170         (domain == AF_INET && type == SOCK_PACKET)) {
3171         protocol = tswap16(protocol);
3172     }
3173 
3174     ret = get_errno(socket(domain, type, protocol));
3175     if (ret >= 0) {
3176         ret = sock_flags_fixup(ret, target_type);
3177         if (type == SOCK_PACKET) {
3178             /* Manage an obsolete case :
3179              * if socket type is SOCK_PACKET, bind by name
3180              */
3181             fd_trans_register(ret, &target_packet_trans);
3182         } else if (domain == PF_NETLINK) {
3183             switch (protocol) {
3184 #ifdef CONFIG_RTNETLINK
3185             case NETLINK_ROUTE:
3186                 fd_trans_register(ret, &target_netlink_route_trans);
3187                 break;
3188 #endif
3189             case NETLINK_KOBJECT_UEVENT:
3190                 /* nothing to do: messages are strings */
3191                 break;
3192             case NETLINK_AUDIT:
3193                 fd_trans_register(ret, &target_netlink_audit_trans);
3194                 break;
3195             default:
3196                 g_assert_not_reached();
3197             }
3198         }
3199     }
3200     return ret;
3201 }
3202 
3203 /* do_bind() Must return target values and target errnos. */
do_bind(int sockfd,abi_ulong target_addr,socklen_t addrlen)3204 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3205                         socklen_t addrlen)
3206 {
3207     void *addr;
3208     abi_long ret;
3209 
3210     if ((int)addrlen < 0) {
3211         return -TARGET_EINVAL;
3212     }
3213 
3214     addr = alloca(addrlen+1);
3215 
3216     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3217     if (ret)
3218         return ret;
3219 
3220     return get_errno(bind(sockfd, addr, addrlen));
3221 }
3222 
3223 /* do_connect() Must return target values and target errnos. */
do_connect(int sockfd,abi_ulong target_addr,socklen_t addrlen)3224 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3225                            socklen_t addrlen)
3226 {
3227     void *addr;
3228     abi_long ret;
3229 
3230     if ((int)addrlen < 0) {
3231         return -TARGET_EINVAL;
3232     }
3233 
3234     addr = alloca(addrlen+1);
3235 
3236     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3237     if (ret)
3238         return ret;
3239 
3240     return get_errno(safe_connect(sockfd, addr, addrlen));
3241 }
3242 
3243 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
do_sendrecvmsg_locked(int fd,struct target_msghdr * msgp,int flags,int send)3244 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3245                                       int flags, int send)
3246 {
3247     abi_long ret, len;
3248     struct msghdr msg;
3249     abi_ulong count;
3250     struct iovec *vec;
3251     abi_ulong target_vec;
3252 
3253     if (msgp->msg_name) {
3254         msg.msg_namelen = tswap32(msgp->msg_namelen);
3255         msg.msg_name = alloca(msg.msg_namelen+1);
3256         ret = target_to_host_sockaddr(fd, msg.msg_name,
3257                                       tswapal(msgp->msg_name),
3258                                       msg.msg_namelen);
3259         if (ret == -TARGET_EFAULT) {
3260             /* For connected sockets msg_name and msg_namelen must
3261              * be ignored, so returning EFAULT immediately is wrong.
3262              * Instead, pass a bad msg_name to the host kernel, and
3263              * let it decide whether to return EFAULT or not.
3264              */
3265             msg.msg_name = (void *)-1;
3266         } else if (ret) {
3267             goto out2;
3268         }
3269     } else {
3270         msg.msg_name = NULL;
3271         msg.msg_namelen = 0;
3272     }
3273     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3274     msg.msg_control = alloca(msg.msg_controllen);
3275     memset(msg.msg_control, 0, msg.msg_controllen);
3276 
3277     msg.msg_flags = tswap32(msgp->msg_flags);
3278 
3279     count = tswapal(msgp->msg_iovlen);
3280     target_vec = tswapal(msgp->msg_iov);
3281 
3282     if (count > IOV_MAX) {
3283         /* sendrcvmsg returns a different errno for this condition than
3284          * readv/writev, so we must catch it here before lock_iovec() does.
3285          */
3286         ret = -TARGET_EMSGSIZE;
3287         goto out2;
3288     }
3289 
3290     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3291                      target_vec, count, send);
3292     if (vec == NULL) {
3293         ret = -host_to_target_errno(errno);
3294         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3295         if (!send || ret) {
3296             goto out2;
3297         }
3298     }
3299     msg.msg_iovlen = count;
3300     msg.msg_iov = vec;
3301 
3302     if (send) {
3303         if (fd_trans_target_to_host_data(fd)) {
3304             void *host_msg;
3305 
3306             host_msg = g_malloc(msg.msg_iov->iov_len);
3307             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3308             ret = fd_trans_target_to_host_data(fd)(host_msg,
3309                                                    msg.msg_iov->iov_len);
3310             if (ret >= 0) {
3311                 msg.msg_iov->iov_base = host_msg;
3312                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3313             }
3314             g_free(host_msg);
3315         } else {
3316             ret = target_to_host_cmsg(&msg, msgp);
3317             if (ret == 0) {
3318                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3319             }
3320         }
3321     } else {
3322         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3323         if (!is_error(ret)) {
3324             len = ret;
3325             if (fd_trans_host_to_target_data(fd)) {
3326                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3327                                                MIN(msg.msg_iov->iov_len, len));
3328             }
3329             if (!is_error(ret)) {
3330                 ret = host_to_target_cmsg(msgp, &msg);
3331             }
3332             if (!is_error(ret)) {
3333                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3334                 msgp->msg_flags = tswap32(msg.msg_flags);
3335                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3336                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3337                                     msg.msg_name, msg.msg_namelen);
3338                     if (ret) {
3339                         goto out;
3340                     }
3341                 }
3342 
3343                 ret = len;
3344             }
3345         }
3346     }
3347 
3348 out:
3349     if (vec) {
3350         unlock_iovec(vec, target_vec, count, !send);
3351     }
3352 out2:
3353     return ret;
3354 }
3355 
do_sendrecvmsg(int fd,abi_ulong target_msg,int flags,int send)3356 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3357                                int flags, int send)
3358 {
3359     abi_long ret;
3360     struct target_msghdr *msgp;
3361 
3362     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3363                           msgp,
3364                           target_msg,
3365                           send ? 1 : 0)) {
3366         return -TARGET_EFAULT;
3367     }
3368     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3369     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3370     return ret;
3371 }
3372 
3373 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3374  * so it might not have this *mmsg-specific flag either.
3375  */
3376 #ifndef MSG_WAITFORONE
3377 #define MSG_WAITFORONE 0x10000
3378 #endif
3379 
do_sendrecvmmsg(int fd,abi_ulong target_msgvec,unsigned int vlen,unsigned int flags,int send)3380 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3381                                 unsigned int vlen, unsigned int flags,
3382                                 int send)
3383 {
3384     struct target_mmsghdr *mmsgp;
3385     abi_long ret = 0;
3386     int i;
3387 
3388     if (vlen > UIO_MAXIOV) {
3389         vlen = UIO_MAXIOV;
3390     }
3391 
3392     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3393     if (!mmsgp) {
3394         return -TARGET_EFAULT;
3395     }
3396 
3397     for (i = 0; i < vlen; i++) {
3398         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3399         if (is_error(ret)) {
3400             break;
3401         }
3402         mmsgp[i].msg_len = tswap32(ret);
3403         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3404         if (flags & MSG_WAITFORONE) {
3405             flags |= MSG_DONTWAIT;
3406         }
3407     }
3408 
3409     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3410 
3411     /* Return number of datagrams sent if we sent any at all;
3412      * otherwise return the error.
3413      */
3414     if (i) {
3415         return i;
3416     }
3417     return ret;
3418 }
3419 
3420 /* do_accept4() Must return target values and target errnos. */
do_accept4(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr,int flags)3421 static abi_long do_accept4(int fd, abi_ulong target_addr,
3422                            abi_ulong target_addrlen_addr, int flags)
3423 {
3424     socklen_t addrlen, ret_addrlen;
3425     void *addr;
3426     abi_long ret;
3427     int host_flags;
3428 
3429     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3430         return -TARGET_EINVAL;
3431     }
3432 
3433     host_flags = 0;
3434     if (flags & TARGET_SOCK_NONBLOCK) {
3435         host_flags |= SOCK_NONBLOCK;
3436     }
3437     if (flags & TARGET_SOCK_CLOEXEC) {
3438         host_flags |= SOCK_CLOEXEC;
3439     }
3440 
3441     if (target_addr == 0) {
3442         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3443     }
3444 
3445     /* linux returns EFAULT if addrlen pointer is invalid */
3446     if (get_user_u32(addrlen, target_addrlen_addr))
3447         return -TARGET_EFAULT;
3448 
3449     if ((int)addrlen < 0) {
3450         return -TARGET_EINVAL;
3451     }
3452 
3453     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3454         return -TARGET_EFAULT;
3455     }
3456 
3457     addr = alloca(addrlen);
3458 
3459     ret_addrlen = addrlen;
3460     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3461     if (!is_error(ret)) {
3462         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3463         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3464             ret = -TARGET_EFAULT;
3465         }
3466     }
3467     return ret;
3468 }
3469 
3470 /* do_getpeername() Must return target values and target errnos. */
do_getpeername(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3471 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3472                                abi_ulong target_addrlen_addr)
3473 {
3474     socklen_t addrlen, ret_addrlen;
3475     void *addr;
3476     abi_long ret;
3477 
3478     if (get_user_u32(addrlen, target_addrlen_addr))
3479         return -TARGET_EFAULT;
3480 
3481     if ((int)addrlen < 0) {
3482         return -TARGET_EINVAL;
3483     }
3484 
3485     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3486         return -TARGET_EFAULT;
3487     }
3488 
3489     addr = alloca(addrlen);
3490 
3491     ret_addrlen = addrlen;
3492     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3493     if (!is_error(ret)) {
3494         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3495         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3496             ret = -TARGET_EFAULT;
3497         }
3498     }
3499     return ret;
3500 }
3501 
3502 /* do_getsockname() Must return target values and target errnos. */
do_getsockname(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3503 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3504                                abi_ulong target_addrlen_addr)
3505 {
3506     socklen_t addrlen, ret_addrlen;
3507     void *addr;
3508     abi_long ret;
3509 
3510     if (get_user_u32(addrlen, target_addrlen_addr))
3511         return -TARGET_EFAULT;
3512 
3513     if ((int)addrlen < 0) {
3514         return -TARGET_EINVAL;
3515     }
3516 
3517     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3518         return -TARGET_EFAULT;
3519     }
3520 
3521     addr = alloca(addrlen);
3522 
3523     ret_addrlen = addrlen;
3524     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3525     if (!is_error(ret)) {
3526         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3527         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3528             ret = -TARGET_EFAULT;
3529         }
3530     }
3531     return ret;
3532 }
3533 
3534 /* do_socketpair() Must return target values and target errnos. */
do_socketpair(int domain,int type,int protocol,abi_ulong target_tab_addr)3535 static abi_long do_socketpair(int domain, int type, int protocol,
3536                               abi_ulong target_tab_addr)
3537 {
3538     int tab[2];
3539     abi_long ret;
3540 
3541     target_to_host_sock_type(&type);
3542 
3543     ret = get_errno(socketpair(domain, type, protocol, tab));
3544     if (!is_error(ret)) {
3545         if (put_user_s32(tab[0], target_tab_addr)
3546             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3547             ret = -TARGET_EFAULT;
3548     }
3549     return ret;
3550 }
3551 
3552 /* do_sendto() Must return target values and target errnos. */
do_sendto(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,socklen_t addrlen)3553 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3554                           abi_ulong target_addr, socklen_t addrlen)
3555 {
3556     void *addr;
3557     void *host_msg;
3558     void *copy_msg = NULL;
3559     abi_long ret;
3560 
3561     if ((int)addrlen < 0) {
3562         return -TARGET_EINVAL;
3563     }
3564 
3565     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3566     if (!host_msg)
3567         return -TARGET_EFAULT;
3568     if (fd_trans_target_to_host_data(fd)) {
3569         copy_msg = host_msg;
3570         host_msg = g_malloc(len);
3571         memcpy(host_msg, copy_msg, len);
3572         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3573         if (ret < 0) {
3574             goto fail;
3575         }
3576     }
3577     if (target_addr) {
3578         addr = alloca(addrlen+1);
3579         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3580         if (ret) {
3581             goto fail;
3582         }
3583         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3584     } else {
3585         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3586     }
3587 fail:
3588     if (copy_msg) {
3589         g_free(host_msg);
3590         host_msg = copy_msg;
3591     }
3592     unlock_user(host_msg, msg, 0);
3593     return ret;
3594 }
3595 
3596 /* do_recvfrom() Must return target values and target errnos. */
do_recvfrom(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,abi_ulong target_addrlen)3597 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3598                             abi_ulong target_addr,
3599                             abi_ulong target_addrlen)
3600 {
3601     socklen_t addrlen, ret_addrlen;
3602     void *addr;
3603     void *host_msg;
3604     abi_long ret;
3605 
3606     if (!msg) {
3607         host_msg = NULL;
3608     } else {
3609         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3610         if (!host_msg) {
3611             return -TARGET_EFAULT;
3612         }
3613     }
3614     if (target_addr) {
3615         if (get_user_u32(addrlen, target_addrlen)) {
3616             ret = -TARGET_EFAULT;
3617             goto fail;
3618         }
3619         if ((int)addrlen < 0) {
3620             ret = -TARGET_EINVAL;
3621             goto fail;
3622         }
3623         addr = alloca(addrlen);
3624         ret_addrlen = addrlen;
3625         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3626                                       addr, &ret_addrlen));
3627     } else {
3628         addr = NULL; /* To keep compiler quiet.  */
3629         addrlen = 0; /* To keep compiler quiet.  */
3630         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3631     }
3632     if (!is_error(ret)) {
3633         if (fd_trans_host_to_target_data(fd)) {
3634             abi_long trans;
3635             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3636             if (is_error(trans)) {
3637                 ret = trans;
3638                 goto fail;
3639             }
3640         }
3641         if (target_addr) {
3642             host_to_target_sockaddr(target_addr, addr,
3643                                     MIN(addrlen, ret_addrlen));
3644             if (put_user_u32(ret_addrlen, target_addrlen)) {
3645                 ret = -TARGET_EFAULT;
3646                 goto fail;
3647             }
3648         }
3649         unlock_user(host_msg, msg, len);
3650     } else {
3651 fail:
3652         unlock_user(host_msg, msg, 0);
3653     }
3654     return ret;
3655 }
3656 
3657 #ifdef TARGET_NR_socketcall
3658 /* do_socketcall() must return target values and target errnos. */
do_socketcall(int num,abi_ulong vptr)3659 static abi_long do_socketcall(int num, abi_ulong vptr)
3660 {
3661     static const unsigned nargs[] = { /* number of arguments per operation */
3662         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3663         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3664         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3665         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3666         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3667         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3668         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3669         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3670         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3671         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3672         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3673         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3674         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3675         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3676         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3677         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3678         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3679         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3680         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3681         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3682     };
3683     abi_long a[6]; /* max 6 args */
3684     unsigned i;
3685 
3686     /* check the range of the first argument num */
3687     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3688     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3689         return -TARGET_EINVAL;
3690     }
3691     /* ensure we have space for args */
3692     if (nargs[num] > ARRAY_SIZE(a)) {
3693         return -TARGET_EINVAL;
3694     }
3695     /* collect the arguments in a[] according to nargs[] */
3696     for (i = 0; i < nargs[num]; ++i) {
3697         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3698             return -TARGET_EFAULT;
3699         }
3700     }
3701     /* now when we have the args, invoke the appropriate underlying function */
3702     switch (num) {
3703     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3704         return do_socket(a[0], a[1], a[2]);
3705     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3706         return do_bind(a[0], a[1], a[2]);
3707     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3708         return do_connect(a[0], a[1], a[2]);
3709     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3710         return get_errno(listen(a[0], a[1]));
3711     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3712         return do_accept4(a[0], a[1], a[2], 0);
3713     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3714         return do_getsockname(a[0], a[1], a[2]);
3715     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3716         return do_getpeername(a[0], a[1], a[2]);
3717     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3718         return do_socketpair(a[0], a[1], a[2], a[3]);
3719     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3720         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3721     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3722         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3723     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3724         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3725     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3726         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3727     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3728         return get_errno(shutdown(a[0], a[1]));
3729     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3730         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3731     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3732         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3733     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3734         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3735     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3736         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3737     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3738         return do_accept4(a[0], a[1], a[2], a[3]);
3739     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3740         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3741     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3742         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3743     default:
3744         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3745         return -TARGET_EINVAL;
3746     }
3747 }
3748 #endif
3749 
3750 #ifndef TARGET_SEMID64_DS
3751 /* asm-generic version of this struct */
3752 struct target_semid64_ds
3753 {
3754   struct target_ipc_perm sem_perm;
3755   abi_ulong sem_otime;
3756 #if TARGET_ABI_BITS == 32
3757   abi_ulong __unused1;
3758 #endif
3759   abi_ulong sem_ctime;
3760 #if TARGET_ABI_BITS == 32
3761   abi_ulong __unused2;
3762 #endif
3763   abi_ulong sem_nsems;
3764   abi_ulong __unused3;
3765   abi_ulong __unused4;
3766 };
3767 #endif
3768 
target_to_host_ipc_perm(struct ipc_perm * host_ip,abi_ulong target_addr)3769 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3770                                                abi_ulong target_addr)
3771 {
3772     struct target_ipc_perm *target_ip;
3773     struct target_semid64_ds *target_sd;
3774 
3775     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3776         return -TARGET_EFAULT;
3777     target_ip = &(target_sd->sem_perm);
3778     host_ip->__key = tswap32(target_ip->__key);
3779     host_ip->uid = tswap32(target_ip->uid);
3780     host_ip->gid = tswap32(target_ip->gid);
3781     host_ip->cuid = tswap32(target_ip->cuid);
3782     host_ip->cgid = tswap32(target_ip->cgid);
3783 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3784     host_ip->mode = tswap32(target_ip->mode);
3785 #else
3786     host_ip->mode = tswap16(target_ip->mode);
3787 #endif
3788 #if defined(TARGET_PPC)
3789     host_ip->__seq = tswap32(target_ip->__seq);
3790 #else
3791     host_ip->__seq = tswap16(target_ip->__seq);
3792 #endif
3793     unlock_user_struct(target_sd, target_addr, 0);
3794     return 0;
3795 }
3796 
host_to_target_ipc_perm(abi_ulong target_addr,struct ipc_perm * host_ip)3797 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3798                                                struct ipc_perm *host_ip)
3799 {
3800     struct target_ipc_perm *target_ip;
3801     struct target_semid64_ds *target_sd;
3802 
3803     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3804         return -TARGET_EFAULT;
3805     target_ip = &(target_sd->sem_perm);
3806     target_ip->__key = tswap32(host_ip->__key);
3807     target_ip->uid = tswap32(host_ip->uid);
3808     target_ip->gid = tswap32(host_ip->gid);
3809     target_ip->cuid = tswap32(host_ip->cuid);
3810     target_ip->cgid = tswap32(host_ip->cgid);
3811 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3812     target_ip->mode = tswap32(host_ip->mode);
3813 #else
3814     target_ip->mode = tswap16(host_ip->mode);
3815 #endif
3816 #if defined(TARGET_PPC)
3817     target_ip->__seq = tswap32(host_ip->__seq);
3818 #else
3819     target_ip->__seq = tswap16(host_ip->__seq);
3820 #endif
3821     unlock_user_struct(target_sd, target_addr, 1);
3822     return 0;
3823 }
3824 
target_to_host_semid_ds(struct semid_ds * host_sd,abi_ulong target_addr)3825 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3826                                                abi_ulong target_addr)
3827 {
3828     struct target_semid64_ds *target_sd;
3829 
3830     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3831         return -TARGET_EFAULT;
3832     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3833         return -TARGET_EFAULT;
3834     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3835     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3836     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3837     unlock_user_struct(target_sd, target_addr, 0);
3838     return 0;
3839 }
3840 
host_to_target_semid_ds(abi_ulong target_addr,struct semid_ds * host_sd)3841 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3842                                                struct semid_ds *host_sd)
3843 {
3844     struct target_semid64_ds *target_sd;
3845 
3846     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3847         return -TARGET_EFAULT;
3848     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3849         return -TARGET_EFAULT;
3850     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3851     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3852     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3853     unlock_user_struct(target_sd, target_addr, 1);
3854     return 0;
3855 }
3856 
3857 struct target_seminfo {
3858     int semmap;
3859     int semmni;
3860     int semmns;
3861     int semmnu;
3862     int semmsl;
3863     int semopm;
3864     int semume;
3865     int semusz;
3866     int semvmx;
3867     int semaem;
3868 };
3869 
host_to_target_seminfo(abi_ulong target_addr,struct seminfo * host_seminfo)3870 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3871                                               struct seminfo *host_seminfo)
3872 {
3873     struct target_seminfo *target_seminfo;
3874     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3875         return -TARGET_EFAULT;
3876     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3877     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3878     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3879     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3880     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3881     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3882     __put_user(host_seminfo->semume, &target_seminfo->semume);
3883     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3884     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3885     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3886     unlock_user_struct(target_seminfo, target_addr, 1);
3887     return 0;
3888 }
3889 
3890 union semun {
3891 	int val;
3892 	struct semid_ds *buf;
3893 	unsigned short *array;
3894 	struct seminfo *__buf;
3895 };
3896 
3897 union target_semun {
3898 	int val;
3899 	abi_ulong buf;
3900 	abi_ulong array;
3901 	abi_ulong __buf;
3902 };
3903 
target_to_host_semarray(int semid,unsigned short ** host_array,abi_ulong target_addr)3904 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3905                                                abi_ulong target_addr)
3906 {
3907     int nsems;
3908     unsigned short *array;
3909     union semun semun;
3910     struct semid_ds semid_ds;
3911     int i, ret;
3912 
3913     semun.buf = &semid_ds;
3914 
3915     ret = semctl(semid, 0, IPC_STAT, semun);
3916     if (ret == -1)
3917         return get_errno(ret);
3918 
3919     nsems = semid_ds.sem_nsems;
3920 
3921     *host_array = g_try_new(unsigned short, nsems);
3922     if (!*host_array) {
3923         return -TARGET_ENOMEM;
3924     }
3925     array = lock_user(VERIFY_READ, target_addr,
3926                       nsems*sizeof(unsigned short), 1);
3927     if (!array) {
3928         g_free(*host_array);
3929         return -TARGET_EFAULT;
3930     }
3931 
3932     for(i=0; i<nsems; i++) {
3933         __get_user((*host_array)[i], &array[i]);
3934     }
3935     unlock_user(array, target_addr, 0);
3936 
3937     return 0;
3938 }
3939 
host_to_target_semarray(int semid,abi_ulong target_addr,unsigned short ** host_array)3940 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3941                                                unsigned short **host_array)
3942 {
3943     int nsems;
3944     unsigned short *array;
3945     union semun semun;
3946     struct semid_ds semid_ds;
3947     int i, ret;
3948 
3949     semun.buf = &semid_ds;
3950 
3951     ret = semctl(semid, 0, IPC_STAT, semun);
3952     if (ret == -1)
3953         return get_errno(ret);
3954 
3955     nsems = semid_ds.sem_nsems;
3956 
3957     array = lock_user(VERIFY_WRITE, target_addr,
3958                       nsems*sizeof(unsigned short), 0);
3959     if (!array)
3960         return -TARGET_EFAULT;
3961 
3962     for(i=0; i<nsems; i++) {
3963         __put_user((*host_array)[i], &array[i]);
3964     }
3965     g_free(*host_array);
3966     unlock_user(array, target_addr, 1);
3967 
3968     return 0;
3969 }
3970 
do_semctl(int semid,int semnum,int cmd,abi_ulong target_arg)3971 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3972                                  abi_ulong target_arg)
3973 {
3974     union target_semun target_su = { .buf = target_arg };
3975     union semun arg;
3976     struct semid_ds dsarg;
3977     unsigned short *array = NULL;
3978     struct seminfo seminfo;
3979     abi_long ret = -TARGET_EINVAL;
3980     abi_long err;
3981     cmd &= 0xff;
3982 
3983     switch( cmd ) {
3984 	case GETVAL:
3985 	case SETVAL:
3986             /* In 64 bit cross-endian situations, we will erroneously pick up
3987              * the wrong half of the union for the "val" element.  To rectify
3988              * this, the entire 8-byte structure is byteswapped, followed by
3989 	     * a swap of the 4 byte val field. In other cases, the data is
3990 	     * already in proper host byte order. */
3991 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3992 		target_su.buf = tswapal(target_su.buf);
3993 		arg.val = tswap32(target_su.val);
3994 	    } else {
3995 		arg.val = target_su.val;
3996 	    }
3997             ret = get_errno(semctl(semid, semnum, cmd, arg));
3998             break;
3999 	case GETALL:
4000 	case SETALL:
4001             err = target_to_host_semarray(semid, &array, target_su.array);
4002             if (err)
4003                 return err;
4004             arg.array = array;
4005             ret = get_errno(semctl(semid, semnum, cmd, arg));
4006             err = host_to_target_semarray(semid, target_su.array, &array);
4007             if (err)
4008                 return err;
4009             break;
4010 	case IPC_STAT:
4011 	case IPC_SET:
4012 	case SEM_STAT:
4013             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4014             if (err)
4015                 return err;
4016             arg.buf = &dsarg;
4017             ret = get_errno(semctl(semid, semnum, cmd, arg));
4018             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4019             if (err)
4020                 return err;
4021             break;
4022 	case IPC_INFO:
4023 	case SEM_INFO:
4024             arg.__buf = &seminfo;
4025             ret = get_errno(semctl(semid, semnum, cmd, arg));
4026             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4027             if (err)
4028                 return err;
4029             break;
4030 	case IPC_RMID:
4031 	case GETPID:
4032 	case GETNCNT:
4033 	case GETZCNT:
4034             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4035             break;
4036     }
4037 
4038     return ret;
4039 }
4040 
4041 struct target_sembuf {
4042     unsigned short sem_num;
4043     short sem_op;
4044     short sem_flg;
4045 };
4046 
target_to_host_sembuf(struct sembuf * host_sembuf,abi_ulong target_addr,unsigned nsops)4047 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4048                                              abi_ulong target_addr,
4049                                              unsigned nsops)
4050 {
4051     struct target_sembuf *target_sembuf;
4052     int i;
4053 
4054     target_sembuf = lock_user(VERIFY_READ, target_addr,
4055                               nsops*sizeof(struct target_sembuf), 1);
4056     if (!target_sembuf)
4057         return -TARGET_EFAULT;
4058 
4059     for(i=0; i<nsops; i++) {
4060         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4061         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4062         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4063     }
4064 
4065     unlock_user(target_sembuf, target_addr, 0);
4066 
4067     return 0;
4068 }
4069 
4070 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4071     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4072 
4073 /*
4074  * This macro is required to handle the s390 variants, which passes the
4075  * arguments in a different order than default.
4076  */
4077 #ifdef __s390x__
4078 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4079   (__nsops), (__timeout), (__sops)
4080 #else
4081 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4082   (__nsops), 0, (__sops), (__timeout)
4083 #endif
4084 
do_semtimedop(int semid,abi_long ptr,unsigned nsops,abi_long timeout,bool time64)4085 static inline abi_long do_semtimedop(int semid,
4086                                      abi_long ptr,
4087                                      unsigned nsops,
4088                                      abi_long timeout, bool time64)
4089 {
4090     struct sembuf *sops;
4091     struct timespec ts, *pts = NULL;
4092     abi_long ret;
4093 
4094     if (timeout) {
4095         pts = &ts;
4096         if (time64) {
4097             if (target_to_host_timespec64(pts, timeout)) {
4098                 return -TARGET_EFAULT;
4099             }
4100         } else {
4101             if (target_to_host_timespec(pts, timeout)) {
4102                 return -TARGET_EFAULT;
4103             }
4104         }
4105     }
4106 
4107     if (nsops > TARGET_SEMOPM) {
4108         return -TARGET_E2BIG;
4109     }
4110 
4111     sops = g_new(struct sembuf, nsops);
4112 
4113     if (target_to_host_sembuf(sops, ptr, nsops)) {
4114         g_free(sops);
4115         return -TARGET_EFAULT;
4116     }
4117 
4118     ret = -TARGET_ENOSYS;
4119 #ifdef __NR_semtimedop
4120     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4121 #endif
4122 #ifdef __NR_ipc
4123     if (ret == -TARGET_ENOSYS) {
4124         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4125                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4126     }
4127 #endif
4128     g_free(sops);
4129     return ret;
4130 }
4131 #endif
4132 
4133 struct target_msqid_ds
4134 {
4135     struct target_ipc_perm msg_perm;
4136     abi_ulong msg_stime;
4137 #if TARGET_ABI_BITS == 32
4138     abi_ulong __unused1;
4139 #endif
4140     abi_ulong msg_rtime;
4141 #if TARGET_ABI_BITS == 32
4142     abi_ulong __unused2;
4143 #endif
4144     abi_ulong msg_ctime;
4145 #if TARGET_ABI_BITS == 32
4146     abi_ulong __unused3;
4147 #endif
4148     abi_ulong __msg_cbytes;
4149     abi_ulong msg_qnum;
4150     abi_ulong msg_qbytes;
4151     abi_ulong msg_lspid;
4152     abi_ulong msg_lrpid;
4153     abi_ulong __unused4;
4154     abi_ulong __unused5;
4155 };
4156 
target_to_host_msqid_ds(struct msqid_ds * host_md,abi_ulong target_addr)4157 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4158                                                abi_ulong target_addr)
4159 {
4160     struct target_msqid_ds *target_md;
4161 
4162     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4163         return -TARGET_EFAULT;
4164     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4165         return -TARGET_EFAULT;
4166     host_md->msg_stime = tswapal(target_md->msg_stime);
4167     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4168     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4169     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4170     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4171     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4172     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4173     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4174     unlock_user_struct(target_md, target_addr, 0);
4175     return 0;
4176 }
4177 
host_to_target_msqid_ds(abi_ulong target_addr,struct msqid_ds * host_md)4178 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4179                                                struct msqid_ds *host_md)
4180 {
4181     struct target_msqid_ds *target_md;
4182 
4183     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4184         return -TARGET_EFAULT;
4185     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4186         return -TARGET_EFAULT;
4187     target_md->msg_stime = tswapal(host_md->msg_stime);
4188     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4189     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4190     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4191     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4192     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4193     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4194     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4195     unlock_user_struct(target_md, target_addr, 1);
4196     return 0;
4197 }
4198 
4199 struct target_msginfo {
4200     int msgpool;
4201     int msgmap;
4202     int msgmax;
4203     int msgmnb;
4204     int msgmni;
4205     int msgssz;
4206     int msgtql;
4207     unsigned short int msgseg;
4208 };
4209 
host_to_target_msginfo(abi_ulong target_addr,struct msginfo * host_msginfo)4210 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4211                                               struct msginfo *host_msginfo)
4212 {
4213     struct target_msginfo *target_msginfo;
4214     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4215         return -TARGET_EFAULT;
4216     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4217     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4218     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4219     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4220     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4221     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4222     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4223     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4224     unlock_user_struct(target_msginfo, target_addr, 1);
4225     return 0;
4226 }
4227 
do_msgctl(int msgid,int cmd,abi_long ptr)4228 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4229 {
4230     struct msqid_ds dsarg;
4231     struct msginfo msginfo;
4232     abi_long ret = -TARGET_EINVAL;
4233 
4234     cmd &= 0xff;
4235 
4236     switch (cmd) {
4237     case IPC_STAT:
4238     case IPC_SET:
4239     case MSG_STAT:
4240         if (target_to_host_msqid_ds(&dsarg,ptr))
4241             return -TARGET_EFAULT;
4242         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4243         if (host_to_target_msqid_ds(ptr,&dsarg))
4244             return -TARGET_EFAULT;
4245         break;
4246     case IPC_RMID:
4247         ret = get_errno(msgctl(msgid, cmd, NULL));
4248         break;
4249     case IPC_INFO:
4250     case MSG_INFO:
4251         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4252         if (host_to_target_msginfo(ptr, &msginfo))
4253             return -TARGET_EFAULT;
4254         break;
4255     }
4256 
4257     return ret;
4258 }
4259 
4260 struct target_msgbuf {
4261     abi_long mtype;
4262     char	mtext[1];
4263 };
4264 
do_msgsnd(int msqid,abi_long msgp,ssize_t msgsz,int msgflg)4265 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4266                                  ssize_t msgsz, int msgflg)
4267 {
4268     struct target_msgbuf *target_mb;
4269     struct msgbuf *host_mb;
4270     abi_long ret = 0;
4271 
4272     if (msgsz < 0) {
4273         return -TARGET_EINVAL;
4274     }
4275 
4276     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4277         return -TARGET_EFAULT;
4278     host_mb = g_try_malloc(msgsz + sizeof(long));
4279     if (!host_mb) {
4280         unlock_user_struct(target_mb, msgp, 0);
4281         return -TARGET_ENOMEM;
4282     }
4283     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4284     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4285     ret = -TARGET_ENOSYS;
4286 #ifdef __NR_msgsnd
4287     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4288 #endif
4289 #ifdef __NR_ipc
4290     if (ret == -TARGET_ENOSYS) {
4291 #ifdef __s390x__
4292         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4293                                  host_mb));
4294 #else
4295         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4296                                  host_mb, 0));
4297 #endif
4298     }
4299 #endif
4300     g_free(host_mb);
4301     unlock_user_struct(target_mb, msgp, 0);
4302 
4303     return ret;
4304 }
4305 
4306 #ifdef __NR_ipc
4307 #if defined(__sparc__)
4308 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4309 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4310 #elif defined(__s390x__)
4311 /* The s390 sys_ipc variant has only five parameters.  */
4312 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4313     ((long int[]){(long int)__msgp, __msgtyp})
4314 #else
4315 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4316     ((long int[]){(long int)__msgp, __msgtyp}), 0
4317 #endif
4318 #endif
4319 
do_msgrcv(int msqid,abi_long msgp,ssize_t msgsz,abi_long msgtyp,int msgflg)4320 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4321                                  ssize_t msgsz, abi_long msgtyp,
4322                                  int msgflg)
4323 {
4324     struct target_msgbuf *target_mb;
4325     char *target_mtext;
4326     struct msgbuf *host_mb;
4327     abi_long ret = 0;
4328 
4329     if (msgsz < 0) {
4330         return -TARGET_EINVAL;
4331     }
4332 
4333     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4334         return -TARGET_EFAULT;
4335 
4336     host_mb = g_try_malloc(msgsz + sizeof(long));
4337     if (!host_mb) {
4338         ret = -TARGET_ENOMEM;
4339         goto end;
4340     }
4341     ret = -TARGET_ENOSYS;
4342 #ifdef __NR_msgrcv
4343     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4344 #endif
4345 #ifdef __NR_ipc
4346     if (ret == -TARGET_ENOSYS) {
4347         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4348                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4349     }
4350 #endif
4351 
4352     if (ret > 0) {
4353         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4354         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4355         if (!target_mtext) {
4356             ret = -TARGET_EFAULT;
4357             goto end;
4358         }
4359         memcpy(target_mb->mtext, host_mb->mtext, ret);
4360         unlock_user(target_mtext, target_mtext_addr, ret);
4361     }
4362 
4363     target_mb->mtype = tswapal(host_mb->mtype);
4364 
4365 end:
4366     if (target_mb)
4367         unlock_user_struct(target_mb, msgp, 1);
4368     g_free(host_mb);
4369     return ret;
4370 }
4371 
target_to_host_shmid_ds(struct shmid_ds * host_sd,abi_ulong target_addr)4372 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4373                                                abi_ulong target_addr)
4374 {
4375     struct target_shmid_ds *target_sd;
4376 
4377     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4378         return -TARGET_EFAULT;
4379     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4380         return -TARGET_EFAULT;
4381     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4382     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4383     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4384     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4385     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4386     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4387     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4388     unlock_user_struct(target_sd, target_addr, 0);
4389     return 0;
4390 }
4391 
host_to_target_shmid_ds(abi_ulong target_addr,struct shmid_ds * host_sd)4392 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4393                                                struct shmid_ds *host_sd)
4394 {
4395     struct target_shmid_ds *target_sd;
4396 
4397     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4398         return -TARGET_EFAULT;
4399     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4400         return -TARGET_EFAULT;
4401     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4402     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4403     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4404     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4405     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4406     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4407     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4408     unlock_user_struct(target_sd, target_addr, 1);
4409     return 0;
4410 }
4411 
4412 struct  target_shminfo {
4413     abi_ulong shmmax;
4414     abi_ulong shmmin;
4415     abi_ulong shmmni;
4416     abi_ulong shmseg;
4417     abi_ulong shmall;
4418 };
4419 
host_to_target_shminfo(abi_ulong target_addr,struct shminfo * host_shminfo)4420 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4421                                               struct shminfo *host_shminfo)
4422 {
4423     struct target_shminfo *target_shminfo;
4424     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4425         return -TARGET_EFAULT;
4426     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4427     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4428     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4429     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4430     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4431     unlock_user_struct(target_shminfo, target_addr, 1);
4432     return 0;
4433 }
4434 
4435 struct target_shm_info {
4436     int used_ids;
4437     abi_ulong shm_tot;
4438     abi_ulong shm_rss;
4439     abi_ulong shm_swp;
4440     abi_ulong swap_attempts;
4441     abi_ulong swap_successes;
4442 };
4443 
host_to_target_shm_info(abi_ulong target_addr,struct shm_info * host_shm_info)4444 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4445                                                struct shm_info *host_shm_info)
4446 {
4447     struct target_shm_info *target_shm_info;
4448     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4449         return -TARGET_EFAULT;
4450     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4451     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4452     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4453     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4454     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4455     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4456     unlock_user_struct(target_shm_info, target_addr, 1);
4457     return 0;
4458 }
4459 
do_shmctl(int shmid,int cmd,abi_long buf)4460 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4461 {
4462     struct shmid_ds dsarg;
4463     struct shminfo shminfo;
4464     struct shm_info shm_info;
4465     abi_long ret = -TARGET_EINVAL;
4466 
4467     cmd &= 0xff;
4468 
4469     switch(cmd) {
4470     case IPC_STAT:
4471     case IPC_SET:
4472     case SHM_STAT:
4473         if (target_to_host_shmid_ds(&dsarg, buf))
4474             return -TARGET_EFAULT;
4475         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4476         if (host_to_target_shmid_ds(buf, &dsarg))
4477             return -TARGET_EFAULT;
4478         break;
4479     case IPC_INFO:
4480         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4481         if (host_to_target_shminfo(buf, &shminfo))
4482             return -TARGET_EFAULT;
4483         break;
4484     case SHM_INFO:
4485         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4486         if (host_to_target_shm_info(buf, &shm_info))
4487             return -TARGET_EFAULT;
4488         break;
4489     case IPC_RMID:
4490     case SHM_LOCK:
4491     case SHM_UNLOCK:
4492         ret = get_errno(shmctl(shmid, cmd, NULL));
4493         break;
4494     }
4495 
4496     return ret;
4497 }
4498 
4499 #ifdef TARGET_NR_ipc
4500 /* ??? This only works with linear mappings.  */
4501 /* do_ipc() must return target values and target errnos. */
do_ipc(CPUArchState * cpu_env,unsigned int call,abi_long first,abi_long second,abi_long third,abi_long ptr,abi_long fifth)4502 static abi_long do_ipc(CPUArchState *cpu_env,
4503                        unsigned int call, abi_long first,
4504                        abi_long second, abi_long third,
4505                        abi_long ptr, abi_long fifth)
4506 {
4507     int version;
4508     abi_long ret = 0;
4509 
4510     version = call >> 16;
4511     call &= 0xffff;
4512 
4513     switch (call) {
4514     case IPCOP_semop:
4515         ret = do_semtimedop(first, ptr, second, 0, false);
4516         break;
4517     case IPCOP_semtimedop:
4518     /*
4519      * The s390 sys_ipc variant has only five parameters instead of six
4520      * (as for default variant) and the only difference is the handling of
4521      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4522      * to a struct timespec where the generic variant uses fifth parameter.
4523      */
4524 #if defined(TARGET_S390X)
4525         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4526 #else
4527         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4528 #endif
4529         break;
4530 
4531     case IPCOP_semget:
4532         ret = get_errno(semget(first, second, third));
4533         break;
4534 
4535     case IPCOP_semctl: {
4536         /* The semun argument to semctl is passed by value, so dereference the
4537          * ptr argument. */
4538         abi_ulong atptr;
4539         get_user_ual(atptr, ptr);
4540         ret = do_semctl(first, second, third, atptr);
4541         break;
4542     }
4543 
4544     case IPCOP_msgget:
4545         ret = get_errno(msgget(first, second));
4546         break;
4547 
4548     case IPCOP_msgsnd:
4549         ret = do_msgsnd(first, ptr, second, third);
4550         break;
4551 
4552     case IPCOP_msgctl:
4553         ret = do_msgctl(first, second, ptr);
4554         break;
4555 
4556     case IPCOP_msgrcv:
4557         switch (version) {
4558         case 0:
4559             {
4560                 struct target_ipc_kludge {
4561                     abi_long msgp;
4562                     abi_long msgtyp;
4563                 } *tmp;
4564 
4565                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4566                     ret = -TARGET_EFAULT;
4567                     break;
4568                 }
4569 
4570                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4571 
4572                 unlock_user_struct(tmp, ptr, 0);
4573                 break;
4574             }
4575         default:
4576             ret = do_msgrcv(first, ptr, second, fifth, third);
4577         }
4578         break;
4579 
4580     case IPCOP_shmat:
4581         switch (version) {
4582         default:
4583         {
4584             abi_ulong raddr;
4585             raddr = target_shmat(cpu_env, first, ptr, second);
4586             if (is_error(raddr))
4587                 return get_errno(raddr);
4588             if (put_user_ual(raddr, third))
4589                 return -TARGET_EFAULT;
4590             break;
4591         }
4592         case 1:
4593             ret = -TARGET_EINVAL;
4594             break;
4595         }
4596 	break;
4597     case IPCOP_shmdt:
4598         ret = target_shmdt(ptr);
4599 	break;
4600 
4601     case IPCOP_shmget:
4602 	/* IPC_* flag values are the same on all linux platforms */
4603 	ret = get_errno(shmget(first, second, third));
4604 	break;
4605 
4606 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4607     case IPCOP_shmctl:
4608         ret = do_shmctl(first, second, ptr);
4609         break;
4610     default:
4611         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4612                       call, version);
4613 	ret = -TARGET_ENOSYS;
4614 	break;
4615     }
4616     return ret;
4617 }
4618 #endif
4619 
4620 /* kernel structure types definitions */
4621 
4622 #define STRUCT(name, ...) STRUCT_ ## name,
4623 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4624 enum {
4625 #include "syscall_types.h"
4626 STRUCT_MAX
4627 };
4628 #undef STRUCT
4629 #undef STRUCT_SPECIAL
4630 
4631 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4632 #define STRUCT_SPECIAL(name)
4633 #include "syscall_types.h"
4634 #undef STRUCT
4635 #undef STRUCT_SPECIAL
4636 
4637 #define MAX_STRUCT_SIZE 4096
4638 
4639 #ifdef CONFIG_FIEMAP
4640 /* So fiemap access checks don't overflow on 32 bit systems.
4641  * This is very slightly smaller than the limit imposed by
4642  * the underlying kernel.
4643  */
4644 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4645                             / sizeof(struct fiemap_extent))
4646 
do_ioctl_fs_ioc_fiemap(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4647 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4648                                        int fd, int cmd, abi_long arg)
4649 {
4650     /* The parameter for this ioctl is a struct fiemap followed
4651      * by an array of struct fiemap_extent whose size is set
4652      * in fiemap->fm_extent_count. The array is filled in by the
4653      * ioctl.
4654      */
4655     int target_size_in, target_size_out;
4656     struct fiemap *fm;
4657     const argtype *arg_type = ie->arg_type;
4658     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4659     void *argptr, *p;
4660     abi_long ret;
4661     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4662     uint32_t outbufsz;
4663     int free_fm = 0;
4664 
4665     assert(arg_type[0] == TYPE_PTR);
4666     assert(ie->access == IOC_RW);
4667     arg_type++;
4668     target_size_in = thunk_type_size(arg_type, 0);
4669     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4670     if (!argptr) {
4671         return -TARGET_EFAULT;
4672     }
4673     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4674     unlock_user(argptr, arg, 0);
4675     fm = (struct fiemap *)buf_temp;
4676     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4677         return -TARGET_EINVAL;
4678     }
4679 
4680     outbufsz = sizeof (*fm) +
4681         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4682 
4683     if (outbufsz > MAX_STRUCT_SIZE) {
4684         /* We can't fit all the extents into the fixed size buffer.
4685          * Allocate one that is large enough and use it instead.
4686          */
4687         fm = g_try_malloc(outbufsz);
4688         if (!fm) {
4689             return -TARGET_ENOMEM;
4690         }
4691         memcpy(fm, buf_temp, sizeof(struct fiemap));
4692         free_fm = 1;
4693     }
4694     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4695     if (!is_error(ret)) {
4696         target_size_out = target_size_in;
4697         /* An extent_count of 0 means we were only counting the extents
4698          * so there are no structs to copy
4699          */
4700         if (fm->fm_extent_count != 0) {
4701             target_size_out += fm->fm_mapped_extents * extent_size;
4702         }
4703         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4704         if (!argptr) {
4705             ret = -TARGET_EFAULT;
4706         } else {
4707             /* Convert the struct fiemap */
4708             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4709             if (fm->fm_extent_count != 0) {
4710                 p = argptr + target_size_in;
4711                 /* ...and then all the struct fiemap_extents */
4712                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4713                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4714                                   THUNK_TARGET);
4715                     p += extent_size;
4716                 }
4717             }
4718             unlock_user(argptr, arg, target_size_out);
4719         }
4720     }
4721     if (free_fm) {
4722         g_free(fm);
4723     }
4724     return ret;
4725 }
4726 #endif
4727 
do_ioctl_ifconf(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4728 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4729                                 int fd, int cmd, abi_long arg)
4730 {
4731     const argtype *arg_type = ie->arg_type;
4732     int target_size;
4733     void *argptr;
4734     int ret;
4735     struct ifconf *host_ifconf;
4736     uint32_t outbufsz;
4737     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4738     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4739     int target_ifreq_size;
4740     int nb_ifreq;
4741     int free_buf = 0;
4742     int i;
4743     int target_ifc_len;
4744     abi_long target_ifc_buf;
4745     int host_ifc_len;
4746     char *host_ifc_buf;
4747 
4748     assert(arg_type[0] == TYPE_PTR);
4749     assert(ie->access == IOC_RW);
4750 
4751     arg_type++;
4752     target_size = thunk_type_size(arg_type, 0);
4753 
4754     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4755     if (!argptr)
4756         return -TARGET_EFAULT;
4757     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4758     unlock_user(argptr, arg, 0);
4759 
4760     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4761     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4762     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4763 
4764     if (target_ifc_buf != 0) {
4765         target_ifc_len = host_ifconf->ifc_len;
4766         nb_ifreq = target_ifc_len / target_ifreq_size;
4767         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4768 
4769         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4770         if (outbufsz > MAX_STRUCT_SIZE) {
4771             /*
4772              * We can't fit all the extents into the fixed size buffer.
4773              * Allocate one that is large enough and use it instead.
4774              */
4775             host_ifconf = g_try_malloc(outbufsz);
4776             if (!host_ifconf) {
4777                 return -TARGET_ENOMEM;
4778             }
4779             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4780             free_buf = 1;
4781         }
4782         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4783 
4784         host_ifconf->ifc_len = host_ifc_len;
4785     } else {
4786       host_ifc_buf = NULL;
4787     }
4788     host_ifconf->ifc_buf = host_ifc_buf;
4789 
4790     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4791     if (!is_error(ret)) {
4792 	/* convert host ifc_len to target ifc_len */
4793 
4794         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4795         target_ifc_len = nb_ifreq * target_ifreq_size;
4796         host_ifconf->ifc_len = target_ifc_len;
4797 
4798 	/* restore target ifc_buf */
4799 
4800         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4801 
4802 	/* copy struct ifconf to target user */
4803 
4804         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4805         if (!argptr)
4806             return -TARGET_EFAULT;
4807         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4808         unlock_user(argptr, arg, target_size);
4809 
4810         if (target_ifc_buf != 0) {
4811             /* copy ifreq[] to target user */
4812             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4813             for (i = 0; i < nb_ifreq ; i++) {
4814                 thunk_convert(argptr + i * target_ifreq_size,
4815                               host_ifc_buf + i * sizeof(struct ifreq),
4816                               ifreq_arg_type, THUNK_TARGET);
4817             }
4818             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4819         }
4820     }
4821 
4822     if (free_buf) {
4823         g_free(host_ifconf);
4824     }
4825 
4826     return ret;
4827 }
4828 
4829 #if defined(CONFIG_USBFS)
4830 #if HOST_LONG_BITS > 64
4831 #error USBDEVFS thunks do not support >64 bit hosts yet.
4832 #endif
4833 struct live_urb {
4834     uint64_t target_urb_adr;
4835     uint64_t target_buf_adr;
4836     char *target_buf_ptr;
4837     struct usbdevfs_urb host_urb;
4838 };
4839 
usbdevfs_urb_hashtable(void)4840 static GHashTable *usbdevfs_urb_hashtable(void)
4841 {
4842     static GHashTable *urb_hashtable;
4843 
4844     if (!urb_hashtable) {
4845         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4846     }
4847     return urb_hashtable;
4848 }
4849 
urb_hashtable_insert(struct live_urb * urb)4850 static void urb_hashtable_insert(struct live_urb *urb)
4851 {
4852     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4853     g_hash_table_insert(urb_hashtable, urb, urb);
4854 }
4855 
urb_hashtable_lookup(uint64_t target_urb_adr)4856 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4857 {
4858     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4859     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4860 }
4861 
urb_hashtable_remove(struct live_urb * urb)4862 static void urb_hashtable_remove(struct live_urb *urb)
4863 {
4864     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4865     g_hash_table_remove(urb_hashtable, urb);
4866 }
4867 
4868 static abi_long
do_ioctl_usbdevfs_reapurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4869 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4870                           int fd, int cmd, abi_long arg)
4871 {
4872     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4873     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4874     struct live_urb *lurb;
4875     void *argptr;
4876     uint64_t hurb;
4877     int target_size;
4878     uintptr_t target_urb_adr;
4879     abi_long ret;
4880 
4881     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4882 
4883     memset(buf_temp, 0, sizeof(uint64_t));
4884     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4885     if (is_error(ret)) {
4886         return ret;
4887     }
4888 
4889     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4890     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4891     if (!lurb->target_urb_adr) {
4892         return -TARGET_EFAULT;
4893     }
4894     urb_hashtable_remove(lurb);
4895     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4896         lurb->host_urb.buffer_length);
4897     lurb->target_buf_ptr = NULL;
4898 
4899     /* restore the guest buffer pointer */
4900     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4901 
4902     /* update the guest urb struct */
4903     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4904     if (!argptr) {
4905         g_free(lurb);
4906         return -TARGET_EFAULT;
4907     }
4908     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4909     unlock_user(argptr, lurb->target_urb_adr, target_size);
4910 
4911     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4912     /* write back the urb handle */
4913     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4914     if (!argptr) {
4915         g_free(lurb);
4916         return -TARGET_EFAULT;
4917     }
4918 
4919     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4920     target_urb_adr = lurb->target_urb_adr;
4921     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4922     unlock_user(argptr, arg, target_size);
4923 
4924     g_free(lurb);
4925     return ret;
4926 }
4927 
4928 static abi_long
do_ioctl_usbdevfs_discardurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4929 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4930                              uint8_t *buf_temp __attribute__((unused)),
4931                              int fd, int cmd, abi_long arg)
4932 {
4933     struct live_urb *lurb;
4934 
4935     /* map target address back to host URB with metadata. */
4936     lurb = urb_hashtable_lookup(arg);
4937     if (!lurb) {
4938         return -TARGET_EFAULT;
4939     }
4940     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4941 }
4942 
4943 static abi_long
do_ioctl_usbdevfs_submiturb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4944 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4945                             int fd, int cmd, abi_long arg)
4946 {
4947     const argtype *arg_type = ie->arg_type;
4948     int target_size;
4949     abi_long ret;
4950     void *argptr;
4951     int rw_dir;
4952     struct live_urb *lurb;
4953 
4954     /*
4955      * each submitted URB needs to map to a unique ID for the
4956      * kernel, and that unique ID needs to be a pointer to
4957      * host memory.  hence, we need to malloc for each URB.
4958      * isochronous transfers have a variable length struct.
4959      */
4960     arg_type++;
4961     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4962 
4963     /* construct host copy of urb and metadata */
4964     lurb = g_try_new0(struct live_urb, 1);
4965     if (!lurb) {
4966         return -TARGET_ENOMEM;
4967     }
4968 
4969     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4970     if (!argptr) {
4971         g_free(lurb);
4972         return -TARGET_EFAULT;
4973     }
4974     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4975     unlock_user(argptr, arg, 0);
4976 
4977     lurb->target_urb_adr = arg;
4978     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4979 
4980     /* buffer space used depends on endpoint type so lock the entire buffer */
4981     /* control type urbs should check the buffer contents for true direction */
4982     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4983     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4984         lurb->host_urb.buffer_length, 1);
4985     if (lurb->target_buf_ptr == NULL) {
4986         g_free(lurb);
4987         return -TARGET_EFAULT;
4988     }
4989 
4990     /* update buffer pointer in host copy */
4991     lurb->host_urb.buffer = lurb->target_buf_ptr;
4992 
4993     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4994     if (is_error(ret)) {
4995         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4996         g_free(lurb);
4997     } else {
4998         urb_hashtable_insert(lurb);
4999     }
5000 
5001     return ret;
5002 }
5003 #endif /* CONFIG_USBFS */
5004 
do_ioctl_dm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5005 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5006                             int cmd, abi_long arg)
5007 {
5008     void *argptr;
5009     struct dm_ioctl *host_dm;
5010     abi_long guest_data;
5011     uint32_t guest_data_size;
5012     int target_size;
5013     const argtype *arg_type = ie->arg_type;
5014     abi_long ret;
5015     void *big_buf = NULL;
5016     char *host_data;
5017 
5018     arg_type++;
5019     target_size = thunk_type_size(arg_type, 0);
5020     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5021     if (!argptr) {
5022         ret = -TARGET_EFAULT;
5023         goto out;
5024     }
5025     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5026     unlock_user(argptr, arg, 0);
5027 
5028     /* buf_temp is too small, so fetch things into a bigger buffer */
5029     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5030     memcpy(big_buf, buf_temp, target_size);
5031     buf_temp = big_buf;
5032     host_dm = big_buf;
5033 
5034     guest_data = arg + host_dm->data_start;
5035     if ((guest_data - arg) < 0) {
5036         ret = -TARGET_EINVAL;
5037         goto out;
5038     }
5039     guest_data_size = host_dm->data_size - host_dm->data_start;
5040     host_data = (char*)host_dm + host_dm->data_start;
5041 
5042     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5043     if (!argptr) {
5044         ret = -TARGET_EFAULT;
5045         goto out;
5046     }
5047 
5048     switch (ie->host_cmd) {
5049     case DM_REMOVE_ALL:
5050     case DM_LIST_DEVICES:
5051     case DM_DEV_CREATE:
5052     case DM_DEV_REMOVE:
5053     case DM_DEV_SUSPEND:
5054     case DM_DEV_STATUS:
5055     case DM_DEV_WAIT:
5056     case DM_TABLE_STATUS:
5057     case DM_TABLE_CLEAR:
5058     case DM_TABLE_DEPS:
5059     case DM_LIST_VERSIONS:
5060         /* no input data */
5061         break;
5062     case DM_DEV_RENAME:
5063     case DM_DEV_SET_GEOMETRY:
5064         /* data contains only strings */
5065         memcpy(host_data, argptr, guest_data_size);
5066         break;
5067     case DM_TARGET_MSG:
5068         memcpy(host_data, argptr, guest_data_size);
5069         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5070         break;
5071     case DM_TABLE_LOAD:
5072     {
5073         void *gspec = argptr;
5074         void *cur_data = host_data;
5075         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5076         int spec_size = thunk_type_size(dm_arg_type, 0);
5077         int i;
5078 
5079         for (i = 0; i < host_dm->target_count; i++) {
5080             struct dm_target_spec *spec = cur_data;
5081             uint32_t next;
5082             int slen;
5083 
5084             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5085             slen = strlen((char*)gspec + spec_size) + 1;
5086             next = spec->next;
5087             spec->next = sizeof(*spec) + slen;
5088             strcpy((char*)&spec[1], gspec + spec_size);
5089             gspec += next;
5090             cur_data += spec->next;
5091         }
5092         break;
5093     }
5094     default:
5095         ret = -TARGET_EINVAL;
5096         unlock_user(argptr, guest_data, 0);
5097         goto out;
5098     }
5099     unlock_user(argptr, guest_data, 0);
5100 
5101     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5102     if (!is_error(ret)) {
5103         guest_data = arg + host_dm->data_start;
5104         guest_data_size = host_dm->data_size - host_dm->data_start;
5105         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5106         switch (ie->host_cmd) {
5107         case DM_REMOVE_ALL:
5108         case DM_DEV_CREATE:
5109         case DM_DEV_REMOVE:
5110         case DM_DEV_RENAME:
5111         case DM_DEV_SUSPEND:
5112         case DM_DEV_STATUS:
5113         case DM_TABLE_LOAD:
5114         case DM_TABLE_CLEAR:
5115         case DM_TARGET_MSG:
5116         case DM_DEV_SET_GEOMETRY:
5117             /* no return data */
5118             break;
5119         case DM_LIST_DEVICES:
5120         {
5121             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5122             uint32_t remaining_data = guest_data_size;
5123             void *cur_data = argptr;
5124             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5125             int nl_size = 12; /* can't use thunk_size due to alignment */
5126 
5127             while (1) {
5128                 uint32_t next = nl->next;
5129                 if (next) {
5130                     nl->next = nl_size + (strlen(nl->name) + 1);
5131                 }
5132                 if (remaining_data < nl->next) {
5133                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5134                     break;
5135                 }
5136                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5137                 strcpy(cur_data + nl_size, nl->name);
5138                 cur_data += nl->next;
5139                 remaining_data -= nl->next;
5140                 if (!next) {
5141                     break;
5142                 }
5143                 nl = (void*)nl + next;
5144             }
5145             break;
5146         }
5147         case DM_DEV_WAIT:
5148         case DM_TABLE_STATUS:
5149         {
5150             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5151             void *cur_data = argptr;
5152             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5153             int spec_size = thunk_type_size(dm_arg_type, 0);
5154             int i;
5155 
5156             for (i = 0; i < host_dm->target_count; i++) {
5157                 uint32_t next = spec->next;
5158                 int slen = strlen((char*)&spec[1]) + 1;
5159                 spec->next = (cur_data - argptr) + spec_size + slen;
5160                 if (guest_data_size < spec->next) {
5161                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5162                     break;
5163                 }
5164                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5165                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5166                 cur_data = argptr + spec->next;
5167                 spec = (void*)host_dm + host_dm->data_start + next;
5168             }
5169             break;
5170         }
5171         case DM_TABLE_DEPS:
5172         {
5173             void *hdata = (void*)host_dm + host_dm->data_start;
5174             int count = *(uint32_t*)hdata;
5175             uint64_t *hdev = hdata + 8;
5176             uint64_t *gdev = argptr + 8;
5177             int i;
5178 
5179             *(uint32_t*)argptr = tswap32(count);
5180             for (i = 0; i < count; i++) {
5181                 *gdev = tswap64(*hdev);
5182                 gdev++;
5183                 hdev++;
5184             }
5185             break;
5186         }
5187         case DM_LIST_VERSIONS:
5188         {
5189             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5190             uint32_t remaining_data = guest_data_size;
5191             void *cur_data = argptr;
5192             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5193             int vers_size = thunk_type_size(dm_arg_type, 0);
5194 
5195             while (1) {
5196                 uint32_t next = vers->next;
5197                 if (next) {
5198                     vers->next = vers_size + (strlen(vers->name) + 1);
5199                 }
5200                 if (remaining_data < vers->next) {
5201                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5202                     break;
5203                 }
5204                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5205                 strcpy(cur_data + vers_size, vers->name);
5206                 cur_data += vers->next;
5207                 remaining_data -= vers->next;
5208                 if (!next) {
5209                     break;
5210                 }
5211                 vers = (void*)vers + next;
5212             }
5213             break;
5214         }
5215         default:
5216             unlock_user(argptr, guest_data, 0);
5217             ret = -TARGET_EINVAL;
5218             goto out;
5219         }
5220         unlock_user(argptr, guest_data, guest_data_size);
5221 
5222         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5223         if (!argptr) {
5224             ret = -TARGET_EFAULT;
5225             goto out;
5226         }
5227         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5228         unlock_user(argptr, arg, target_size);
5229     }
5230 out:
5231     g_free(big_buf);
5232     return ret;
5233 }
5234 
do_ioctl_blkpg(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5235 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5236                                int cmd, abi_long arg)
5237 {
5238     void *argptr;
5239     int target_size;
5240     const argtype *arg_type = ie->arg_type;
5241     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5242     abi_long ret;
5243 
5244     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5245     struct blkpg_partition host_part;
5246 
5247     /* Read and convert blkpg */
5248     arg_type++;
5249     target_size = thunk_type_size(arg_type, 0);
5250     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5251     if (!argptr) {
5252         ret = -TARGET_EFAULT;
5253         goto out;
5254     }
5255     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5256     unlock_user(argptr, arg, 0);
5257 
5258     switch (host_blkpg->op) {
5259     case BLKPG_ADD_PARTITION:
5260     case BLKPG_DEL_PARTITION:
5261         /* payload is struct blkpg_partition */
5262         break;
5263     default:
5264         /* Unknown opcode */
5265         ret = -TARGET_EINVAL;
5266         goto out;
5267     }
5268 
5269     /* Read and convert blkpg->data */
5270     arg = (abi_long)(uintptr_t)host_blkpg->data;
5271     target_size = thunk_type_size(part_arg_type, 0);
5272     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5273     if (!argptr) {
5274         ret = -TARGET_EFAULT;
5275         goto out;
5276     }
5277     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5278     unlock_user(argptr, arg, 0);
5279 
5280     /* Swizzle the data pointer to our local copy and call! */
5281     host_blkpg->data = &host_part;
5282     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5283 
5284 out:
5285     return ret;
5286 }
5287 
do_ioctl_rt(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5288 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5289                                 int fd, int cmd, abi_long arg)
5290 {
5291     const argtype *arg_type = ie->arg_type;
5292     const StructEntry *se;
5293     const argtype *field_types;
5294     const int *dst_offsets, *src_offsets;
5295     int target_size;
5296     void *argptr;
5297     abi_ulong *target_rt_dev_ptr = NULL;
5298     unsigned long *host_rt_dev_ptr = NULL;
5299     abi_long ret;
5300     int i;
5301 
5302     assert(ie->access == IOC_W);
5303     assert(*arg_type == TYPE_PTR);
5304     arg_type++;
5305     assert(*arg_type == TYPE_STRUCT);
5306     target_size = thunk_type_size(arg_type, 0);
5307     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5308     if (!argptr) {
5309         return -TARGET_EFAULT;
5310     }
5311     arg_type++;
5312     assert(*arg_type == (int)STRUCT_rtentry);
5313     se = struct_entries + *arg_type++;
5314     assert(se->convert[0] == NULL);
5315     /* convert struct here to be able to catch rt_dev string */
5316     field_types = se->field_types;
5317     dst_offsets = se->field_offsets[THUNK_HOST];
5318     src_offsets = se->field_offsets[THUNK_TARGET];
5319     for (i = 0; i < se->nb_fields; i++) {
5320         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5321             assert(*field_types == TYPE_PTRVOID);
5322             target_rt_dev_ptr = argptr + src_offsets[i];
5323             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5324             if (*target_rt_dev_ptr != 0) {
5325                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5326                                                   tswapal(*target_rt_dev_ptr));
5327                 if (!*host_rt_dev_ptr) {
5328                     unlock_user(argptr, arg, 0);
5329                     return -TARGET_EFAULT;
5330                 }
5331             } else {
5332                 *host_rt_dev_ptr = 0;
5333             }
5334             field_types++;
5335             continue;
5336         }
5337         field_types = thunk_convert(buf_temp + dst_offsets[i],
5338                                     argptr + src_offsets[i],
5339                                     field_types, THUNK_HOST);
5340     }
5341     unlock_user(argptr, arg, 0);
5342 
5343     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5344 
5345     assert(host_rt_dev_ptr != NULL);
5346     assert(target_rt_dev_ptr != NULL);
5347     if (*host_rt_dev_ptr != 0) {
5348         unlock_user((void *)*host_rt_dev_ptr,
5349                     *target_rt_dev_ptr, 0);
5350     }
5351     return ret;
5352 }
5353 
do_ioctl_kdsigaccept(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5354 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5355                                      int fd, int cmd, abi_long arg)
5356 {
5357     int sig = target_to_host_signal(arg);
5358     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5359 }
5360 
do_ioctl_SIOCGSTAMP(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5361 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5362                                     int fd, int cmd, abi_long arg)
5363 {
5364     struct timeval tv;
5365     abi_long ret;
5366 
5367     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5368     if (is_error(ret)) {
5369         return ret;
5370     }
5371 
5372     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5373         if (copy_to_user_timeval(arg, &tv)) {
5374             return -TARGET_EFAULT;
5375         }
5376     } else {
5377         if (copy_to_user_timeval64(arg, &tv)) {
5378             return -TARGET_EFAULT;
5379         }
5380     }
5381 
5382     return ret;
5383 }
5384 
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5385 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5386                                       int fd, int cmd, abi_long arg)
5387 {
5388     struct timespec ts;
5389     abi_long ret;
5390 
5391     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5392     if (is_error(ret)) {
5393         return ret;
5394     }
5395 
5396     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5397         if (host_to_target_timespec(arg, &ts)) {
5398             return -TARGET_EFAULT;
5399         }
5400     } else{
5401         if (host_to_target_timespec64(arg, &ts)) {
5402             return -TARGET_EFAULT;
5403         }
5404     }
5405 
5406     return ret;
5407 }
5408 
5409 #ifdef TIOCGPTPEER
do_ioctl_tiocgptpeer(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5410 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5411                                      int fd, int cmd, abi_long arg)
5412 {
5413     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5414     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5415 }
5416 #endif
5417 
5418 #ifdef HAVE_DRM_H
5419 
unlock_drm_version(struct drm_version * host_ver,struct target_drm_version * target_ver,bool copy)5420 static void unlock_drm_version(struct drm_version *host_ver,
5421                                struct target_drm_version *target_ver,
5422                                bool copy)
5423 {
5424     unlock_user(host_ver->name, target_ver->name,
5425                                 copy ? host_ver->name_len : 0);
5426     unlock_user(host_ver->date, target_ver->date,
5427                                 copy ? host_ver->date_len : 0);
5428     unlock_user(host_ver->desc, target_ver->desc,
5429                                 copy ? host_ver->desc_len : 0);
5430 }
5431 
target_to_host_drmversion(struct drm_version * host_ver,struct target_drm_version * target_ver)5432 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5433                                           struct target_drm_version *target_ver)
5434 {
5435     memset(host_ver, 0, sizeof(*host_ver));
5436 
5437     __get_user(host_ver->name_len, &target_ver->name_len);
5438     if (host_ver->name_len) {
5439         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5440                                    target_ver->name_len, 0);
5441         if (!host_ver->name) {
5442             return -EFAULT;
5443         }
5444     }
5445 
5446     __get_user(host_ver->date_len, &target_ver->date_len);
5447     if (host_ver->date_len) {
5448         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5449                                    target_ver->date_len, 0);
5450         if (!host_ver->date) {
5451             goto err;
5452         }
5453     }
5454 
5455     __get_user(host_ver->desc_len, &target_ver->desc_len);
5456     if (host_ver->desc_len) {
5457         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5458                                    target_ver->desc_len, 0);
5459         if (!host_ver->desc) {
5460             goto err;
5461         }
5462     }
5463 
5464     return 0;
5465 err:
5466     unlock_drm_version(host_ver, target_ver, false);
5467     return -EFAULT;
5468 }
5469 
host_to_target_drmversion(struct target_drm_version * target_ver,struct drm_version * host_ver)5470 static inline void host_to_target_drmversion(
5471                                           struct target_drm_version *target_ver,
5472                                           struct drm_version *host_ver)
5473 {
5474     __put_user(host_ver->version_major, &target_ver->version_major);
5475     __put_user(host_ver->version_minor, &target_ver->version_minor);
5476     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5477     __put_user(host_ver->name_len, &target_ver->name_len);
5478     __put_user(host_ver->date_len, &target_ver->date_len);
5479     __put_user(host_ver->desc_len, &target_ver->desc_len);
5480     unlock_drm_version(host_ver, target_ver, true);
5481 }
5482 
do_ioctl_drm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5483 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5484                              int fd, int cmd, abi_long arg)
5485 {
5486     struct drm_version *ver;
5487     struct target_drm_version *target_ver;
5488     abi_long ret;
5489 
5490     switch (ie->host_cmd) {
5491     case DRM_IOCTL_VERSION:
5492         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5493             return -TARGET_EFAULT;
5494         }
5495         ver = (struct drm_version *)buf_temp;
5496         ret = target_to_host_drmversion(ver, target_ver);
5497         if (!is_error(ret)) {
5498             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5499             if (is_error(ret)) {
5500                 unlock_drm_version(ver, target_ver, false);
5501             } else {
5502                 host_to_target_drmversion(target_ver, ver);
5503             }
5504         }
5505         unlock_user_struct(target_ver, arg, 0);
5506         return ret;
5507     }
5508     return -TARGET_ENOSYS;
5509 }
5510 
do_ioctl_drm_i915_getparam(const IOCTLEntry * ie,struct drm_i915_getparam * gparam,int fd,abi_long arg)5511 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5512                                            struct drm_i915_getparam *gparam,
5513                                            int fd, abi_long arg)
5514 {
5515     abi_long ret;
5516     int value;
5517     struct target_drm_i915_getparam *target_gparam;
5518 
5519     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5520         return -TARGET_EFAULT;
5521     }
5522 
5523     __get_user(gparam->param, &target_gparam->param);
5524     gparam->value = &value;
5525     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5526     put_user_s32(value, target_gparam->value);
5527 
5528     unlock_user_struct(target_gparam, arg, 0);
5529     return ret;
5530 }
5531 
do_ioctl_drm_i915(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5532 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5533                                   int fd, int cmd, abi_long arg)
5534 {
5535     switch (ie->host_cmd) {
5536     case DRM_IOCTL_I915_GETPARAM:
5537         return do_ioctl_drm_i915_getparam(ie,
5538                                           (struct drm_i915_getparam *)buf_temp,
5539                                           fd, arg);
5540     default:
5541         return -TARGET_ENOSYS;
5542     }
5543 }
5544 
5545 #endif
5546 
do_ioctl_TUNSETTXFILTER(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5547 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5548                                         int fd, int cmd, abi_long arg)
5549 {
5550     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5551     struct tun_filter *target_filter;
5552     char *target_addr;
5553 
5554     assert(ie->access == IOC_W);
5555 
5556     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5557     if (!target_filter) {
5558         return -TARGET_EFAULT;
5559     }
5560     filter->flags = tswap16(target_filter->flags);
5561     filter->count = tswap16(target_filter->count);
5562     unlock_user(target_filter, arg, 0);
5563 
5564     if (filter->count) {
5565         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5566             MAX_STRUCT_SIZE) {
5567             return -TARGET_EFAULT;
5568         }
5569 
5570         target_addr = lock_user(VERIFY_READ,
5571                                 arg + offsetof(struct tun_filter, addr),
5572                                 filter->count * ETH_ALEN, 1);
5573         if (!target_addr) {
5574             return -TARGET_EFAULT;
5575         }
5576         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5577         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5578     }
5579 
5580     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5581 }
5582 
5583 IOCTLEntry ioctl_entries[] = {
5584 #define IOCTL(cmd, access, ...) \
5585     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5586 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5587     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5588 #define IOCTL_IGNORE(cmd) \
5589     { TARGET_ ## cmd, 0, #cmd },
5590 #include "ioctls.h"
5591     { 0, 0, },
5592 };
5593 
5594 /* ??? Implement proper locking for ioctls.  */
5595 /* do_ioctl() Must return target values and target errnos. */
do_ioctl(int fd,int cmd,abi_long arg)5596 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5597 {
5598     const IOCTLEntry *ie;
5599     const argtype *arg_type;
5600     abi_long ret;
5601     uint8_t buf_temp[MAX_STRUCT_SIZE];
5602     int target_size;
5603     void *argptr;
5604 
5605     ie = ioctl_entries;
5606     for(;;) {
5607         if (ie->target_cmd == 0) {
5608             qemu_log_mask(
5609                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5610             return -TARGET_ENOTTY;
5611         }
5612         if (ie->target_cmd == cmd)
5613             break;
5614         ie++;
5615     }
5616     arg_type = ie->arg_type;
5617     if (ie->do_ioctl) {
5618         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5619     } else if (!ie->host_cmd) {
5620         /* Some architectures define BSD ioctls in their headers
5621            that are not implemented in Linux.  */
5622         return -TARGET_ENOTTY;
5623     }
5624 
5625     switch(arg_type[0]) {
5626     case TYPE_NULL:
5627         /* no argument */
5628         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5629         break;
5630     case TYPE_PTRVOID:
5631     case TYPE_INT:
5632     case TYPE_LONG:
5633     case TYPE_ULONG:
5634         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5635         break;
5636     case TYPE_PTR:
5637         arg_type++;
5638         target_size = thunk_type_size(arg_type, 0);
5639         switch(ie->access) {
5640         case IOC_R:
5641             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5642             if (!is_error(ret)) {
5643                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5644                 if (!argptr)
5645                     return -TARGET_EFAULT;
5646                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5647                 unlock_user(argptr, arg, target_size);
5648             }
5649             break;
5650         case IOC_W:
5651             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5652             if (!argptr)
5653                 return -TARGET_EFAULT;
5654             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5655             unlock_user(argptr, arg, 0);
5656             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5657             break;
5658         default:
5659         case IOC_RW:
5660             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5661             if (!argptr)
5662                 return -TARGET_EFAULT;
5663             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5664             unlock_user(argptr, arg, 0);
5665             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5666             if (!is_error(ret)) {
5667                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5668                 if (!argptr)
5669                     return -TARGET_EFAULT;
5670                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5671                 unlock_user(argptr, arg, target_size);
5672             }
5673             break;
5674         }
5675         break;
5676     default:
5677         qemu_log_mask(LOG_UNIMP,
5678                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5679                       (long)cmd, arg_type[0]);
5680         ret = -TARGET_ENOTTY;
5681         break;
5682     }
5683     return ret;
5684 }
5685 
5686 static const bitmask_transtbl iflag_tbl[] = {
5687         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5688         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5689         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5690         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5691         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5692         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5693         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5694         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5695         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5696         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5697         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5698         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5699         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5700         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5701         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5702 };
5703 
5704 static const bitmask_transtbl oflag_tbl[] = {
5705 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5706 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5707 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5708 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5709 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5710 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5711 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5712 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5713 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5714 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5715 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5716 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5717 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5718 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5719 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5720 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5721 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5722 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5723 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5724 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5725 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5726 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5727 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5728 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5729 };
5730 
5731 static const bitmask_transtbl cflag_tbl[] = {
5732 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5733 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5734 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5735 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5736 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5737 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5738 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5739 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5740 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5741 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5742 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5743 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5744 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5745 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5746 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5747 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5748 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5749 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5750 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5751 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5752 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5753 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5754 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5755 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5756 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5757 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5758 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5759 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5760 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5761 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5762 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5763 };
5764 
5765 static const bitmask_transtbl lflag_tbl[] = {
5766   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5767   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5768   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5769   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5770   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5771   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5772   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5773   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5774   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5775   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5776   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5777   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5778   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5779   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5780   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5781   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5782 };
5783 
target_to_host_termios(void * dst,const void * src)5784 static void target_to_host_termios (void *dst, const void *src)
5785 {
5786     struct host_termios *host = dst;
5787     const struct target_termios *target = src;
5788 
5789     host->c_iflag =
5790         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5791     host->c_oflag =
5792         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5793     host->c_cflag =
5794         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5795     host->c_lflag =
5796         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5797     host->c_line = target->c_line;
5798 
5799     memset(host->c_cc, 0, sizeof(host->c_cc));
5800     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5801     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5802     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5803     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5804     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5805     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5806     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5807     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5808     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5809     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5810     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5811     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5812     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5813     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5814     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5815     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5816     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5817 }
5818 
host_to_target_termios(void * dst,const void * src)5819 static void host_to_target_termios (void *dst, const void *src)
5820 {
5821     struct target_termios *target = dst;
5822     const struct host_termios *host = src;
5823 
5824     target->c_iflag =
5825         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5826     target->c_oflag =
5827         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5828     target->c_cflag =
5829         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5830     target->c_lflag =
5831         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5832     target->c_line = host->c_line;
5833 
5834     memset(target->c_cc, 0, sizeof(target->c_cc));
5835     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5836     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5837     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5838     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5839     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5840     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5841     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5842     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5843     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5844     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5845     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5846     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5847     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5848     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5849     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5850     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5851     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5852 }
5853 
5854 static const StructEntry struct_termios_def = {
5855     .convert = { host_to_target_termios, target_to_host_termios },
5856     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5857     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5858     .print = print_termios,
5859 };
5860 
5861 /* If the host does not provide these bits, they may be safely discarded. */
5862 #ifndef MAP_SYNC
5863 #define MAP_SYNC 0
5864 #endif
5865 #ifndef MAP_UNINITIALIZED
5866 #define MAP_UNINITIALIZED 0
5867 #endif
5868 
5869 static const bitmask_transtbl mmap_flags_tbl[] = {
5870     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5871     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5872       MAP_ANONYMOUS, MAP_ANONYMOUS },
5873     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5874       MAP_GROWSDOWN, MAP_GROWSDOWN },
5875     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5876       MAP_DENYWRITE, MAP_DENYWRITE },
5877     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5878       MAP_EXECUTABLE, MAP_EXECUTABLE },
5879     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5880     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5881       MAP_NORESERVE, MAP_NORESERVE },
5882     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5883     /* MAP_STACK had been ignored by the kernel for quite some time.
5884        Recognize it for the target insofar as we do not want to pass
5885        it through to the host.  */
5886     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5887     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5888     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5889     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5890       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5891     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5892       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5893 };
5894 
5895 /*
5896  * Arrange for legacy / undefined architecture specific flags to be
5897  * ignored by mmap handling code.
5898  */
5899 #ifndef TARGET_MAP_32BIT
5900 #define TARGET_MAP_32BIT 0
5901 #endif
5902 #ifndef TARGET_MAP_HUGE_2MB
5903 #define TARGET_MAP_HUGE_2MB 0
5904 #endif
5905 #ifndef TARGET_MAP_HUGE_1GB
5906 #define TARGET_MAP_HUGE_1GB 0
5907 #endif
5908 
do_mmap(abi_ulong addr,abi_ulong len,int prot,int target_flags,int fd,off_t offset)5909 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5910                         int target_flags, int fd, off_t offset)
5911 {
5912     /*
5913      * The historical set of flags that all mmap types implicitly support.
5914      */
5915     enum {
5916         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5917                                | TARGET_MAP_PRIVATE
5918                                | TARGET_MAP_FIXED
5919                                | TARGET_MAP_ANONYMOUS
5920                                | TARGET_MAP_DENYWRITE
5921                                | TARGET_MAP_EXECUTABLE
5922                                | TARGET_MAP_UNINITIALIZED
5923                                | TARGET_MAP_GROWSDOWN
5924                                | TARGET_MAP_LOCKED
5925                                | TARGET_MAP_NORESERVE
5926                                | TARGET_MAP_POPULATE
5927                                | TARGET_MAP_NONBLOCK
5928                                | TARGET_MAP_STACK
5929                                | TARGET_MAP_HUGETLB
5930                                | TARGET_MAP_32BIT
5931                                | TARGET_MAP_HUGE_2MB
5932                                | TARGET_MAP_HUGE_1GB
5933     };
5934     int host_flags;
5935 
5936     switch (target_flags & TARGET_MAP_TYPE) {
5937     case TARGET_MAP_PRIVATE:
5938         host_flags = MAP_PRIVATE;
5939         break;
5940     case TARGET_MAP_SHARED:
5941         host_flags = MAP_SHARED;
5942         break;
5943     case TARGET_MAP_SHARED_VALIDATE:
5944         /*
5945          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5946          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5947          */
5948         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5949             return -TARGET_EOPNOTSUPP;
5950         }
5951         host_flags = MAP_SHARED_VALIDATE;
5952         if (target_flags & TARGET_MAP_SYNC) {
5953             host_flags |= MAP_SYNC;
5954         }
5955         break;
5956     default:
5957         return -TARGET_EINVAL;
5958     }
5959     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5960 
5961     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5962 }
5963 
5964 /*
5965  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5966  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5967  */
5968 #if defined(TARGET_I386)
5969 
5970 /* NOTE: there is really one LDT for all the threads */
5971 static uint8_t *ldt_table;
5972 
read_ldt(abi_ulong ptr,unsigned long bytecount)5973 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5974 {
5975     int size;
5976     void *p;
5977 
5978     if (!ldt_table)
5979         return 0;
5980     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5981     if (size > bytecount)
5982         size = bytecount;
5983     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5984     if (!p)
5985         return -TARGET_EFAULT;
5986     /* ??? Should this by byteswapped?  */
5987     memcpy(p, ldt_table, size);
5988     unlock_user(p, ptr, size);
5989     return size;
5990 }
5991 
5992 /* XXX: add locking support */
write_ldt(CPUX86State * env,abi_ulong ptr,unsigned long bytecount,int oldmode)5993 static abi_long write_ldt(CPUX86State *env,
5994                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5995 {
5996     struct target_modify_ldt_ldt_s ldt_info;
5997     struct target_modify_ldt_ldt_s *target_ldt_info;
5998     int seg_32bit, contents, read_exec_only, limit_in_pages;
5999     int seg_not_present, useable, lm;
6000     uint32_t *lp, entry_1, entry_2;
6001 
6002     if (bytecount != sizeof(ldt_info))
6003         return -TARGET_EINVAL;
6004     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6005         return -TARGET_EFAULT;
6006     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6007     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6008     ldt_info.limit = tswap32(target_ldt_info->limit);
6009     ldt_info.flags = tswap32(target_ldt_info->flags);
6010     unlock_user_struct(target_ldt_info, ptr, 0);
6011 
6012     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6013         return -TARGET_EINVAL;
6014     seg_32bit = ldt_info.flags & 1;
6015     contents = (ldt_info.flags >> 1) & 3;
6016     read_exec_only = (ldt_info.flags >> 3) & 1;
6017     limit_in_pages = (ldt_info.flags >> 4) & 1;
6018     seg_not_present = (ldt_info.flags >> 5) & 1;
6019     useable = (ldt_info.flags >> 6) & 1;
6020 #ifdef TARGET_ABI32
6021     lm = 0;
6022 #else
6023     lm = (ldt_info.flags >> 7) & 1;
6024 #endif
6025     if (contents == 3) {
6026         if (oldmode)
6027             return -TARGET_EINVAL;
6028         if (seg_not_present == 0)
6029             return -TARGET_EINVAL;
6030     }
6031     /* allocate the LDT */
6032     if (!ldt_table) {
6033         env->ldt.base = target_mmap(0,
6034                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6035                                     PROT_READ|PROT_WRITE,
6036                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6037         if (env->ldt.base == -1)
6038             return -TARGET_ENOMEM;
6039         memset(g2h_untagged(env->ldt.base), 0,
6040                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6041         env->ldt.limit = 0xffff;
6042         ldt_table = g2h_untagged(env->ldt.base);
6043     }
6044 
6045     /* NOTE: same code as Linux kernel */
6046     /* Allow LDTs to be cleared by the user. */
6047     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6048         if (oldmode ||
6049             (contents == 0		&&
6050              read_exec_only == 1	&&
6051              seg_32bit == 0		&&
6052              limit_in_pages == 0	&&
6053              seg_not_present == 1	&&
6054              useable == 0 )) {
6055             entry_1 = 0;
6056             entry_2 = 0;
6057             goto install;
6058         }
6059     }
6060 
6061     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6062         (ldt_info.limit & 0x0ffff);
6063     entry_2 = (ldt_info.base_addr & 0xff000000) |
6064         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6065         (ldt_info.limit & 0xf0000) |
6066         ((read_exec_only ^ 1) << 9) |
6067         (contents << 10) |
6068         ((seg_not_present ^ 1) << 15) |
6069         (seg_32bit << 22) |
6070         (limit_in_pages << 23) |
6071         (lm << 21) |
6072         0x7000;
6073     if (!oldmode)
6074         entry_2 |= (useable << 20);
6075 
6076     /* Install the new entry ...  */
6077 install:
6078     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6079     lp[0] = tswap32(entry_1);
6080     lp[1] = tswap32(entry_2);
6081     return 0;
6082 }
6083 
6084 /* specific and weird i386 syscalls */
do_modify_ldt(CPUX86State * env,int func,abi_ulong ptr,unsigned long bytecount)6085 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6086                               unsigned long bytecount)
6087 {
6088     abi_long ret;
6089 
6090     switch (func) {
6091     case 0:
6092         ret = read_ldt(ptr, bytecount);
6093         break;
6094     case 1:
6095         ret = write_ldt(env, ptr, bytecount, 1);
6096         break;
6097     case 0x11:
6098         ret = write_ldt(env, ptr, bytecount, 0);
6099         break;
6100     default:
6101         ret = -TARGET_ENOSYS;
6102         break;
6103     }
6104     return ret;
6105 }
6106 
6107 #if defined(TARGET_ABI32)
do_set_thread_area(CPUX86State * env,abi_ulong ptr)6108 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6109 {
6110     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6111     struct target_modify_ldt_ldt_s ldt_info;
6112     struct target_modify_ldt_ldt_s *target_ldt_info;
6113     int seg_32bit, contents, read_exec_only, limit_in_pages;
6114     int seg_not_present, useable, lm;
6115     uint32_t *lp, entry_1, entry_2;
6116     int i;
6117 
6118     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6119     if (!target_ldt_info)
6120         return -TARGET_EFAULT;
6121     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6122     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6123     ldt_info.limit = tswap32(target_ldt_info->limit);
6124     ldt_info.flags = tswap32(target_ldt_info->flags);
6125     if (ldt_info.entry_number == -1) {
6126         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6127             if (gdt_table[i] == 0) {
6128                 ldt_info.entry_number = i;
6129                 target_ldt_info->entry_number = tswap32(i);
6130                 break;
6131             }
6132         }
6133     }
6134     unlock_user_struct(target_ldt_info, ptr, 1);
6135 
6136     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6137         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6138            return -TARGET_EINVAL;
6139     seg_32bit = ldt_info.flags & 1;
6140     contents = (ldt_info.flags >> 1) & 3;
6141     read_exec_only = (ldt_info.flags >> 3) & 1;
6142     limit_in_pages = (ldt_info.flags >> 4) & 1;
6143     seg_not_present = (ldt_info.flags >> 5) & 1;
6144     useable = (ldt_info.flags >> 6) & 1;
6145 #ifdef TARGET_ABI32
6146     lm = 0;
6147 #else
6148     lm = (ldt_info.flags >> 7) & 1;
6149 #endif
6150 
6151     if (contents == 3) {
6152         if (seg_not_present == 0)
6153             return -TARGET_EINVAL;
6154     }
6155 
6156     /* NOTE: same code as Linux kernel */
6157     /* Allow LDTs to be cleared by the user. */
6158     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6159         if ((contents == 0             &&
6160              read_exec_only == 1       &&
6161              seg_32bit == 0            &&
6162              limit_in_pages == 0       &&
6163              seg_not_present == 1      &&
6164              useable == 0 )) {
6165             entry_1 = 0;
6166             entry_2 = 0;
6167             goto install;
6168         }
6169     }
6170 
6171     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6172         (ldt_info.limit & 0x0ffff);
6173     entry_2 = (ldt_info.base_addr & 0xff000000) |
6174         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6175         (ldt_info.limit & 0xf0000) |
6176         ((read_exec_only ^ 1) << 9) |
6177         (contents << 10) |
6178         ((seg_not_present ^ 1) << 15) |
6179         (seg_32bit << 22) |
6180         (limit_in_pages << 23) |
6181         (useable << 20) |
6182         (lm << 21) |
6183         0x7000;
6184 
6185     /* Install the new entry ...  */
6186 install:
6187     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6188     lp[0] = tswap32(entry_1);
6189     lp[1] = tswap32(entry_2);
6190     return 0;
6191 }
6192 
do_get_thread_area(CPUX86State * env,abi_ulong ptr)6193 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6194 {
6195     struct target_modify_ldt_ldt_s *target_ldt_info;
6196     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6197     uint32_t base_addr, limit, flags;
6198     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6199     int seg_not_present, useable, lm;
6200     uint32_t *lp, entry_1, entry_2;
6201 
6202     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6203     if (!target_ldt_info)
6204         return -TARGET_EFAULT;
6205     idx = tswap32(target_ldt_info->entry_number);
6206     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6207         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6208         unlock_user_struct(target_ldt_info, ptr, 1);
6209         return -TARGET_EINVAL;
6210     }
6211     lp = (uint32_t *)(gdt_table + idx);
6212     entry_1 = tswap32(lp[0]);
6213     entry_2 = tswap32(lp[1]);
6214 
6215     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6216     contents = (entry_2 >> 10) & 3;
6217     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6218     seg_32bit = (entry_2 >> 22) & 1;
6219     limit_in_pages = (entry_2 >> 23) & 1;
6220     useable = (entry_2 >> 20) & 1;
6221 #ifdef TARGET_ABI32
6222     lm = 0;
6223 #else
6224     lm = (entry_2 >> 21) & 1;
6225 #endif
6226     flags = (seg_32bit << 0) | (contents << 1) |
6227         (read_exec_only << 3) | (limit_in_pages << 4) |
6228         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6229     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6230     base_addr = (entry_1 >> 16) |
6231         (entry_2 & 0xff000000) |
6232         ((entry_2 & 0xff) << 16);
6233     target_ldt_info->base_addr = tswapal(base_addr);
6234     target_ldt_info->limit = tswap32(limit);
6235     target_ldt_info->flags = tswap32(flags);
6236     unlock_user_struct(target_ldt_info, ptr, 1);
6237     return 0;
6238 }
6239 
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6240 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6241 {
6242     return -TARGET_ENOSYS;
6243 }
6244 #else
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6245 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6246 {
6247     abi_long ret = 0;
6248     abi_ulong val;
6249     int idx;
6250 
6251     switch(code) {
6252     case TARGET_ARCH_SET_GS:
6253     case TARGET_ARCH_SET_FS:
6254         if (code == TARGET_ARCH_SET_GS)
6255             idx = R_GS;
6256         else
6257             idx = R_FS;
6258         cpu_x86_load_seg(env, idx, 0);
6259         env->segs[idx].base = addr;
6260         break;
6261     case TARGET_ARCH_GET_GS:
6262     case TARGET_ARCH_GET_FS:
6263         if (code == TARGET_ARCH_GET_GS)
6264             idx = R_GS;
6265         else
6266             idx = R_FS;
6267         val = env->segs[idx].base;
6268         if (put_user(val, addr, abi_ulong))
6269             ret = -TARGET_EFAULT;
6270         break;
6271     default:
6272         ret = -TARGET_EINVAL;
6273         break;
6274     }
6275     return ret;
6276 }
6277 #endif /* defined(TARGET_ABI32 */
6278 #endif /* defined(TARGET_I386) */
6279 
6280 /*
6281  * These constants are generic.  Supply any that are missing from the host.
6282  */
6283 #ifndef PR_SET_NAME
6284 # define PR_SET_NAME    15
6285 # define PR_GET_NAME    16
6286 #endif
6287 #ifndef PR_SET_FP_MODE
6288 # define PR_SET_FP_MODE 45
6289 # define PR_GET_FP_MODE 46
6290 # define PR_FP_MODE_FR   (1 << 0)
6291 # define PR_FP_MODE_FRE  (1 << 1)
6292 #endif
6293 #ifndef PR_SVE_SET_VL
6294 # define PR_SVE_SET_VL  50
6295 # define PR_SVE_GET_VL  51
6296 # define PR_SVE_VL_LEN_MASK  0xffff
6297 # define PR_SVE_VL_INHERIT   (1 << 17)
6298 #endif
6299 #ifndef PR_PAC_RESET_KEYS
6300 # define PR_PAC_RESET_KEYS  54
6301 # define PR_PAC_APIAKEY   (1 << 0)
6302 # define PR_PAC_APIBKEY   (1 << 1)
6303 # define PR_PAC_APDAKEY   (1 << 2)
6304 # define PR_PAC_APDBKEY   (1 << 3)
6305 # define PR_PAC_APGAKEY   (1 << 4)
6306 #endif
6307 #ifndef PR_SET_TAGGED_ADDR_CTRL
6308 # define PR_SET_TAGGED_ADDR_CTRL 55
6309 # define PR_GET_TAGGED_ADDR_CTRL 56
6310 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6311 #endif
6312 #ifndef PR_SET_IO_FLUSHER
6313 # define PR_SET_IO_FLUSHER 57
6314 # define PR_GET_IO_FLUSHER 58
6315 #endif
6316 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6317 # define PR_SET_SYSCALL_USER_DISPATCH 59
6318 #endif
6319 #ifndef PR_SME_SET_VL
6320 # define PR_SME_SET_VL  63
6321 # define PR_SME_GET_VL  64
6322 # define PR_SME_VL_LEN_MASK  0xffff
6323 # define PR_SME_VL_INHERIT   (1 << 17)
6324 #endif
6325 
6326 #include "target_prctl.h"
6327 
do_prctl_inval0(CPUArchState * env)6328 static abi_long do_prctl_inval0(CPUArchState *env)
6329 {
6330     return -TARGET_EINVAL;
6331 }
6332 
do_prctl_inval1(CPUArchState * env,abi_long arg2)6333 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6334 {
6335     return -TARGET_EINVAL;
6336 }
6337 
6338 #ifndef do_prctl_get_fp_mode
6339 #define do_prctl_get_fp_mode do_prctl_inval0
6340 #endif
6341 #ifndef do_prctl_set_fp_mode
6342 #define do_prctl_set_fp_mode do_prctl_inval1
6343 #endif
6344 #ifndef do_prctl_sve_get_vl
6345 #define do_prctl_sve_get_vl do_prctl_inval0
6346 #endif
6347 #ifndef do_prctl_sve_set_vl
6348 #define do_prctl_sve_set_vl do_prctl_inval1
6349 #endif
6350 #ifndef do_prctl_reset_keys
6351 #define do_prctl_reset_keys do_prctl_inval1
6352 #endif
6353 #ifndef do_prctl_set_tagged_addr_ctrl
6354 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6355 #endif
6356 #ifndef do_prctl_get_tagged_addr_ctrl
6357 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6358 #endif
6359 #ifndef do_prctl_get_unalign
6360 #define do_prctl_get_unalign do_prctl_inval1
6361 #endif
6362 #ifndef do_prctl_set_unalign
6363 #define do_prctl_set_unalign do_prctl_inval1
6364 #endif
6365 #ifndef do_prctl_sme_get_vl
6366 #define do_prctl_sme_get_vl do_prctl_inval0
6367 #endif
6368 #ifndef do_prctl_sme_set_vl
6369 #define do_prctl_sme_set_vl do_prctl_inval1
6370 #endif
6371 
do_prctl(CPUArchState * env,abi_long option,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)6372 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6373                          abi_long arg3, abi_long arg4, abi_long arg5)
6374 {
6375     abi_long ret;
6376 
6377     switch (option) {
6378     case PR_GET_PDEATHSIG:
6379         {
6380             int deathsig;
6381             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6382                                   arg3, arg4, arg5));
6383             if (!is_error(ret) &&
6384                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6385                 return -TARGET_EFAULT;
6386             }
6387             return ret;
6388         }
6389     case PR_SET_PDEATHSIG:
6390         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6391                                arg3, arg4, arg5));
6392     case PR_GET_NAME:
6393         {
6394             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6395             if (!name) {
6396                 return -TARGET_EFAULT;
6397             }
6398             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6399                                   arg3, arg4, arg5));
6400             unlock_user(name, arg2, 16);
6401             return ret;
6402         }
6403     case PR_SET_NAME:
6404         {
6405             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6406             if (!name) {
6407                 return -TARGET_EFAULT;
6408             }
6409             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6410                                   arg3, arg4, arg5));
6411             unlock_user(name, arg2, 0);
6412             return ret;
6413         }
6414     case PR_GET_FP_MODE:
6415         return do_prctl_get_fp_mode(env);
6416     case PR_SET_FP_MODE:
6417         return do_prctl_set_fp_mode(env, arg2);
6418     case PR_SVE_GET_VL:
6419         return do_prctl_sve_get_vl(env);
6420     case PR_SVE_SET_VL:
6421         return do_prctl_sve_set_vl(env, arg2);
6422     case PR_SME_GET_VL:
6423         return do_prctl_sme_get_vl(env);
6424     case PR_SME_SET_VL:
6425         return do_prctl_sme_set_vl(env, arg2);
6426     case PR_PAC_RESET_KEYS:
6427         if (arg3 || arg4 || arg5) {
6428             return -TARGET_EINVAL;
6429         }
6430         return do_prctl_reset_keys(env, arg2);
6431     case PR_SET_TAGGED_ADDR_CTRL:
6432         if (arg3 || arg4 || arg5) {
6433             return -TARGET_EINVAL;
6434         }
6435         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6436     case PR_GET_TAGGED_ADDR_CTRL:
6437         if (arg2 || arg3 || arg4 || arg5) {
6438             return -TARGET_EINVAL;
6439         }
6440         return do_prctl_get_tagged_addr_ctrl(env);
6441 
6442     case PR_GET_UNALIGN:
6443         return do_prctl_get_unalign(env, arg2);
6444     case PR_SET_UNALIGN:
6445         return do_prctl_set_unalign(env, arg2);
6446 
6447     case PR_CAP_AMBIENT:
6448     case PR_CAPBSET_READ:
6449     case PR_CAPBSET_DROP:
6450     case PR_GET_DUMPABLE:
6451     case PR_SET_DUMPABLE:
6452     case PR_GET_KEEPCAPS:
6453     case PR_SET_KEEPCAPS:
6454     case PR_GET_SECUREBITS:
6455     case PR_SET_SECUREBITS:
6456     case PR_GET_TIMING:
6457     case PR_SET_TIMING:
6458     case PR_GET_TIMERSLACK:
6459     case PR_SET_TIMERSLACK:
6460     case PR_MCE_KILL:
6461     case PR_MCE_KILL_GET:
6462     case PR_GET_NO_NEW_PRIVS:
6463     case PR_SET_NO_NEW_PRIVS:
6464     case PR_GET_IO_FLUSHER:
6465     case PR_SET_IO_FLUSHER:
6466     case PR_SET_CHILD_SUBREAPER:
6467     case PR_GET_SPECULATION_CTRL:
6468     case PR_SET_SPECULATION_CTRL:
6469         /* Some prctl options have no pointer arguments and we can pass on. */
6470         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6471 
6472     case PR_GET_CHILD_SUBREAPER:
6473         {
6474             int val;
6475             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6476                                   arg3, arg4, arg5));
6477             if (!is_error(ret) && put_user_s32(val, arg2)) {
6478                 return -TARGET_EFAULT;
6479             }
6480             return ret;
6481         }
6482 
6483     case PR_GET_TID_ADDRESS:
6484         {
6485             TaskState *ts = get_task_state(env_cpu(env));
6486             return put_user_ual(ts->child_tidptr, arg2);
6487         }
6488 
6489     case PR_GET_FPEXC:
6490     case PR_SET_FPEXC:
6491         /* Was used for SPE on PowerPC. */
6492         return -TARGET_EINVAL;
6493 
6494     case PR_GET_ENDIAN:
6495     case PR_SET_ENDIAN:
6496     case PR_GET_FPEMU:
6497     case PR_SET_FPEMU:
6498     case PR_SET_MM:
6499     case PR_GET_SECCOMP:
6500     case PR_SET_SECCOMP:
6501     case PR_SET_SYSCALL_USER_DISPATCH:
6502     case PR_GET_THP_DISABLE:
6503     case PR_SET_THP_DISABLE:
6504     case PR_GET_TSC:
6505     case PR_SET_TSC:
6506         /* Disable to prevent the target disabling stuff we need. */
6507         return -TARGET_EINVAL;
6508 
6509     default:
6510         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6511                       option);
6512         return -TARGET_EINVAL;
6513     }
6514 }
6515 
6516 #define NEW_STACK_SIZE 0x40000
6517 
6518 
6519 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6520 typedef struct {
6521     CPUArchState *env;
6522     pthread_mutex_t mutex;
6523     pthread_cond_t cond;
6524     pthread_t thread;
6525     uint32_t tid;
6526     abi_ulong child_tidptr;
6527     abi_ulong parent_tidptr;
6528     sigset_t sigmask;
6529 } new_thread_info;
6530 
clone_func(void * arg)6531 static void *clone_func(void *arg)
6532 {
6533     new_thread_info *info = arg;
6534     CPUArchState *env;
6535     CPUState *cpu;
6536     TaskState *ts;
6537 
6538     rcu_register_thread();
6539     tcg_register_thread();
6540     env = info->env;
6541     cpu = env_cpu(env);
6542     thread_cpu = cpu;
6543     ts = get_task_state(cpu);
6544     info->tid = sys_gettid();
6545     task_settid(ts);
6546     if (info->child_tidptr)
6547         put_user_u32(info->tid, info->child_tidptr);
6548     if (info->parent_tidptr)
6549         put_user_u32(info->tid, info->parent_tidptr);
6550     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6551     /* Enable signals.  */
6552     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6553     /* Signal to the parent that we're ready.  */
6554     pthread_mutex_lock(&info->mutex);
6555     pthread_cond_broadcast(&info->cond);
6556     pthread_mutex_unlock(&info->mutex);
6557     /* Wait until the parent has finished initializing the tls state.  */
6558     pthread_mutex_lock(&clone_lock);
6559     pthread_mutex_unlock(&clone_lock);
6560     cpu_loop(env);
6561     /* never exits */
6562     return NULL;
6563 }
6564 
6565 /* do_fork() Must return host values and target errnos (unlike most
6566    do_*() functions). */
do_fork(CPUArchState * env,unsigned int flags,abi_ulong newsp,abi_ulong parent_tidptr,target_ulong newtls,abi_ulong child_tidptr)6567 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6568                    abi_ulong parent_tidptr, target_ulong newtls,
6569                    abi_ulong child_tidptr)
6570 {
6571     CPUState *cpu = env_cpu(env);
6572     int ret;
6573     TaskState *ts;
6574     CPUState *new_cpu;
6575     CPUArchState *new_env;
6576     sigset_t sigmask;
6577 
6578     flags &= ~CLONE_IGNORED_FLAGS;
6579 
6580     /* Emulate vfork() with fork() */
6581     if (flags & CLONE_VFORK)
6582         flags &= ~(CLONE_VFORK | CLONE_VM);
6583 
6584     if (flags & CLONE_VM) {
6585         TaskState *parent_ts = get_task_state(cpu);
6586         new_thread_info info;
6587         pthread_attr_t attr;
6588 
6589         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6590             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6591             return -TARGET_EINVAL;
6592         }
6593 
6594         ts = g_new0(TaskState, 1);
6595         init_task_state(ts);
6596 
6597         /* Grab a mutex so that thread setup appears atomic.  */
6598         pthread_mutex_lock(&clone_lock);
6599 
6600         /*
6601          * If this is our first additional thread, we need to ensure we
6602          * generate code for parallel execution and flush old translations.
6603          * Do this now so that the copy gets CF_PARALLEL too.
6604          */
6605         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
6606             tcg_cflags_set(cpu, CF_PARALLEL);
6607             tb_flush(cpu);
6608         }
6609 
6610         /* we create a new CPU instance. */
6611         new_env = cpu_copy(env);
6612         /* Init regs that differ from the parent.  */
6613         cpu_clone_regs_child(new_env, newsp, flags);
6614         cpu_clone_regs_parent(env, flags);
6615         new_cpu = env_cpu(new_env);
6616         new_cpu->opaque = ts;
6617         ts->bprm = parent_ts->bprm;
6618         ts->info = parent_ts->info;
6619         ts->signal_mask = parent_ts->signal_mask;
6620 
6621         if (flags & CLONE_CHILD_CLEARTID) {
6622             ts->child_tidptr = child_tidptr;
6623         }
6624 
6625         if (flags & CLONE_SETTLS) {
6626             cpu_set_tls (new_env, newtls);
6627         }
6628 
6629         memset(&info, 0, sizeof(info));
6630         pthread_mutex_init(&info.mutex, NULL);
6631         pthread_mutex_lock(&info.mutex);
6632         pthread_cond_init(&info.cond, NULL);
6633         info.env = new_env;
6634         if (flags & CLONE_CHILD_SETTID) {
6635             info.child_tidptr = child_tidptr;
6636         }
6637         if (flags & CLONE_PARENT_SETTID) {
6638             info.parent_tidptr = parent_tidptr;
6639         }
6640 
6641         ret = pthread_attr_init(&attr);
6642         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6643         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6644         /* It is not safe to deliver signals until the child has finished
6645            initializing, so temporarily block all signals.  */
6646         sigfillset(&sigmask);
6647         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6648         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6649 
6650         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6651         /* TODO: Free new CPU state if thread creation failed.  */
6652 
6653         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6654         pthread_attr_destroy(&attr);
6655         if (ret == 0) {
6656             /* Wait for the child to initialize.  */
6657             pthread_cond_wait(&info.cond, &info.mutex);
6658             ret = info.tid;
6659         } else {
6660             ret = -1;
6661         }
6662         pthread_mutex_unlock(&info.mutex);
6663         pthread_cond_destroy(&info.cond);
6664         pthread_mutex_destroy(&info.mutex);
6665         pthread_mutex_unlock(&clone_lock);
6666     } else {
6667         /* if no CLONE_VM, we consider it is a fork */
6668         if (flags & CLONE_INVALID_FORK_FLAGS) {
6669             return -TARGET_EINVAL;
6670         }
6671 
6672         /* We can't support custom termination signals */
6673         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6674             return -TARGET_EINVAL;
6675         }
6676 
6677 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6678         if (flags & CLONE_PIDFD) {
6679             return -TARGET_EINVAL;
6680         }
6681 #endif
6682 
6683         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6684         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6685             return -TARGET_EINVAL;
6686         }
6687 
6688         if (block_signals()) {
6689             return -QEMU_ERESTARTSYS;
6690         }
6691 
6692         fork_start();
6693         ret = fork();
6694         if (ret == 0) {
6695             /* Child Process.  */
6696             cpu_clone_regs_child(env, newsp, flags);
6697             fork_end(ret);
6698             /* There is a race condition here.  The parent process could
6699                theoretically read the TID in the child process before the child
6700                tid is set.  This would require using either ptrace
6701                (not implemented) or having *_tidptr to point at a shared memory
6702                mapping.  We can't repeat the spinlock hack used above because
6703                the child process gets its own copy of the lock.  */
6704             if (flags & CLONE_CHILD_SETTID)
6705                 put_user_u32(sys_gettid(), child_tidptr);
6706             if (flags & CLONE_PARENT_SETTID)
6707                 put_user_u32(sys_gettid(), parent_tidptr);
6708             ts = get_task_state(cpu);
6709             if (flags & CLONE_SETTLS)
6710                 cpu_set_tls (env, newtls);
6711             if (flags & CLONE_CHILD_CLEARTID)
6712                 ts->child_tidptr = child_tidptr;
6713         } else {
6714             cpu_clone_regs_parent(env, flags);
6715             if (flags & CLONE_PIDFD) {
6716                 int pid_fd = 0;
6717 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6718                 int pid_child = ret;
6719                 pid_fd = pidfd_open(pid_child, 0);
6720                 if (pid_fd >= 0) {
6721                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6722                                                | FD_CLOEXEC);
6723                 } else {
6724                         pid_fd = 0;
6725                 }
6726 #endif
6727                 put_user_u32(pid_fd, parent_tidptr);
6728             }
6729             fork_end(ret);
6730         }
6731         g_assert(!cpu_in_exclusive_context(cpu));
6732     }
6733     return ret;
6734 }
6735 
6736 /* warning : doesn't handle linux specific flags... */
target_to_host_fcntl_cmd(int cmd)6737 static int target_to_host_fcntl_cmd(int cmd)
6738 {
6739     int ret;
6740 
6741     switch(cmd) {
6742     case TARGET_F_DUPFD:
6743     case TARGET_F_GETFD:
6744     case TARGET_F_SETFD:
6745     case TARGET_F_GETFL:
6746     case TARGET_F_SETFL:
6747     case TARGET_F_OFD_GETLK:
6748     case TARGET_F_OFD_SETLK:
6749     case TARGET_F_OFD_SETLKW:
6750         ret = cmd;
6751         break;
6752     case TARGET_F_GETLK:
6753         ret = F_GETLK;
6754         break;
6755     case TARGET_F_SETLK:
6756         ret = F_SETLK;
6757         break;
6758     case TARGET_F_SETLKW:
6759         ret = F_SETLKW;
6760         break;
6761     case TARGET_F_GETOWN:
6762         ret = F_GETOWN;
6763         break;
6764     case TARGET_F_SETOWN:
6765         ret = F_SETOWN;
6766         break;
6767     case TARGET_F_GETSIG:
6768         ret = F_GETSIG;
6769         break;
6770     case TARGET_F_SETSIG:
6771         ret = F_SETSIG;
6772         break;
6773 #if TARGET_ABI_BITS == 32
6774     case TARGET_F_GETLK64:
6775         ret = F_GETLK;
6776         break;
6777     case TARGET_F_SETLK64:
6778         ret = F_SETLK;
6779         break;
6780     case TARGET_F_SETLKW64:
6781         ret = F_SETLKW;
6782         break;
6783 #endif
6784     case TARGET_F_SETLEASE:
6785         ret = F_SETLEASE;
6786         break;
6787     case TARGET_F_GETLEASE:
6788         ret = F_GETLEASE;
6789         break;
6790 #ifdef F_DUPFD_CLOEXEC
6791     case TARGET_F_DUPFD_CLOEXEC:
6792         ret = F_DUPFD_CLOEXEC;
6793         break;
6794 #endif
6795     case TARGET_F_NOTIFY:
6796         ret = F_NOTIFY;
6797         break;
6798 #ifdef F_GETOWN_EX
6799     case TARGET_F_GETOWN_EX:
6800         ret = F_GETOWN_EX;
6801         break;
6802 #endif
6803 #ifdef F_SETOWN_EX
6804     case TARGET_F_SETOWN_EX:
6805         ret = F_SETOWN_EX;
6806         break;
6807 #endif
6808 #ifdef F_SETPIPE_SZ
6809     case TARGET_F_SETPIPE_SZ:
6810         ret = F_SETPIPE_SZ;
6811         break;
6812     case TARGET_F_GETPIPE_SZ:
6813         ret = F_GETPIPE_SZ;
6814         break;
6815 #endif
6816 #ifdef F_ADD_SEALS
6817     case TARGET_F_ADD_SEALS:
6818         ret = F_ADD_SEALS;
6819         break;
6820     case TARGET_F_GET_SEALS:
6821         ret = F_GET_SEALS;
6822         break;
6823 #endif
6824     default:
6825         ret = -TARGET_EINVAL;
6826         break;
6827     }
6828 
6829 #if defined(__powerpc64__)
6830     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6831      * is not supported by kernel. The glibc fcntl call actually adjusts
6832      * them to 5, 6 and 7 before making the syscall(). Since we make the
6833      * syscall directly, adjust to what is supported by the kernel.
6834      */
6835     if (ret >= F_GETLK && ret <= F_SETLKW) {
6836         ret -= F_GETLK - 5;
6837     }
6838 #endif
6839 
6840     return ret;
6841 }
6842 
6843 #define FLOCK_TRANSTBL \
6844     switch (type) { \
6845     TRANSTBL_CONVERT(F_RDLCK); \
6846     TRANSTBL_CONVERT(F_WRLCK); \
6847     TRANSTBL_CONVERT(F_UNLCK); \
6848     }
6849 
target_to_host_flock(int type)6850 static int target_to_host_flock(int type)
6851 {
6852 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6853     FLOCK_TRANSTBL
6854 #undef  TRANSTBL_CONVERT
6855     return -TARGET_EINVAL;
6856 }
6857 
host_to_target_flock(int type)6858 static int host_to_target_flock(int type)
6859 {
6860 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6861     FLOCK_TRANSTBL
6862 #undef  TRANSTBL_CONVERT
6863     /* if we don't know how to convert the value coming
6864      * from the host we copy to the target field as-is
6865      */
6866     return type;
6867 }
6868 
copy_from_user_flock(struct flock * fl,abi_ulong target_flock_addr)6869 static inline abi_long copy_from_user_flock(struct flock *fl,
6870                                             abi_ulong target_flock_addr)
6871 {
6872     struct target_flock *target_fl;
6873     int l_type;
6874 
6875     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6876         return -TARGET_EFAULT;
6877     }
6878 
6879     __get_user(l_type, &target_fl->l_type);
6880     l_type = target_to_host_flock(l_type);
6881     if (l_type < 0) {
6882         return l_type;
6883     }
6884     fl->l_type = l_type;
6885     __get_user(fl->l_whence, &target_fl->l_whence);
6886     __get_user(fl->l_start, &target_fl->l_start);
6887     __get_user(fl->l_len, &target_fl->l_len);
6888     __get_user(fl->l_pid, &target_fl->l_pid);
6889     unlock_user_struct(target_fl, target_flock_addr, 0);
6890     return 0;
6891 }
6892 
copy_to_user_flock(abi_ulong target_flock_addr,const struct flock * fl)6893 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6894                                           const struct flock *fl)
6895 {
6896     struct target_flock *target_fl;
6897     short l_type;
6898 
6899     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6900         return -TARGET_EFAULT;
6901     }
6902 
6903     l_type = host_to_target_flock(fl->l_type);
6904     __put_user(l_type, &target_fl->l_type);
6905     __put_user(fl->l_whence, &target_fl->l_whence);
6906     __put_user(fl->l_start, &target_fl->l_start);
6907     __put_user(fl->l_len, &target_fl->l_len);
6908     __put_user(fl->l_pid, &target_fl->l_pid);
6909     unlock_user_struct(target_fl, target_flock_addr, 1);
6910     return 0;
6911 }
6912 
6913 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
6914 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
6915 
6916 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6917 struct target_oabi_flock64 {
6918     abi_short l_type;
6919     abi_short l_whence;
6920     abi_llong l_start;
6921     abi_llong l_len;
6922     abi_int   l_pid;
6923 } QEMU_PACKED;
6924 
copy_from_user_oabi_flock64(struct flock * fl,abi_ulong target_flock_addr)6925 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
6926                                                    abi_ulong target_flock_addr)
6927 {
6928     struct target_oabi_flock64 *target_fl;
6929     int l_type;
6930 
6931     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6932         return -TARGET_EFAULT;
6933     }
6934 
6935     __get_user(l_type, &target_fl->l_type);
6936     l_type = target_to_host_flock(l_type);
6937     if (l_type < 0) {
6938         return l_type;
6939     }
6940     fl->l_type = l_type;
6941     __get_user(fl->l_whence, &target_fl->l_whence);
6942     __get_user(fl->l_start, &target_fl->l_start);
6943     __get_user(fl->l_len, &target_fl->l_len);
6944     __get_user(fl->l_pid, &target_fl->l_pid);
6945     unlock_user_struct(target_fl, target_flock_addr, 0);
6946     return 0;
6947 }
6948 
copy_to_user_oabi_flock64(abi_ulong target_flock_addr,const struct flock * fl)6949 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6950                                                  const struct flock *fl)
6951 {
6952     struct target_oabi_flock64 *target_fl;
6953     short l_type;
6954 
6955     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6956         return -TARGET_EFAULT;
6957     }
6958 
6959     l_type = host_to_target_flock(fl->l_type);
6960     __put_user(l_type, &target_fl->l_type);
6961     __put_user(fl->l_whence, &target_fl->l_whence);
6962     __put_user(fl->l_start, &target_fl->l_start);
6963     __put_user(fl->l_len, &target_fl->l_len);
6964     __put_user(fl->l_pid, &target_fl->l_pid);
6965     unlock_user_struct(target_fl, target_flock_addr, 1);
6966     return 0;
6967 }
6968 #endif
6969 
copy_from_user_flock64(struct flock * fl,abi_ulong target_flock_addr)6970 static inline abi_long copy_from_user_flock64(struct flock *fl,
6971                                               abi_ulong target_flock_addr)
6972 {
6973     struct target_flock64 *target_fl;
6974     int l_type;
6975 
6976     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6977         return -TARGET_EFAULT;
6978     }
6979 
6980     __get_user(l_type, &target_fl->l_type);
6981     l_type = target_to_host_flock(l_type);
6982     if (l_type < 0) {
6983         return l_type;
6984     }
6985     fl->l_type = l_type;
6986     __get_user(fl->l_whence, &target_fl->l_whence);
6987     __get_user(fl->l_start, &target_fl->l_start);
6988     __get_user(fl->l_len, &target_fl->l_len);
6989     __get_user(fl->l_pid, &target_fl->l_pid);
6990     unlock_user_struct(target_fl, target_flock_addr, 0);
6991     return 0;
6992 }
6993 
copy_to_user_flock64(abi_ulong target_flock_addr,const struct flock * fl)6994 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6995                                             const struct flock *fl)
6996 {
6997     struct target_flock64 *target_fl;
6998     short l_type;
6999 
7000     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7001         return -TARGET_EFAULT;
7002     }
7003 
7004     l_type = host_to_target_flock(fl->l_type);
7005     __put_user(l_type, &target_fl->l_type);
7006     __put_user(fl->l_whence, &target_fl->l_whence);
7007     __put_user(fl->l_start, &target_fl->l_start);
7008     __put_user(fl->l_len, &target_fl->l_len);
7009     __put_user(fl->l_pid, &target_fl->l_pid);
7010     unlock_user_struct(target_fl, target_flock_addr, 1);
7011     return 0;
7012 }
7013 
do_fcntl(int fd,int cmd,abi_ulong arg)7014 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7015 {
7016     struct flock fl;
7017 #ifdef F_GETOWN_EX
7018     struct f_owner_ex fox;
7019     struct target_f_owner_ex *target_fox;
7020 #endif
7021     abi_long ret;
7022     int host_cmd = target_to_host_fcntl_cmd(cmd);
7023 
7024     if (host_cmd == -TARGET_EINVAL)
7025 	    return host_cmd;
7026 
7027     switch(cmd) {
7028     case TARGET_F_GETLK:
7029         ret = copy_from_user_flock(&fl, arg);
7030         if (ret) {
7031             return ret;
7032         }
7033         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7034         if (ret == 0) {
7035             ret = copy_to_user_flock(arg, &fl);
7036         }
7037         break;
7038 
7039     case TARGET_F_SETLK:
7040     case TARGET_F_SETLKW:
7041         ret = copy_from_user_flock(&fl, arg);
7042         if (ret) {
7043             return ret;
7044         }
7045         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7046         break;
7047 
7048     case TARGET_F_GETLK64:
7049     case TARGET_F_OFD_GETLK:
7050         ret = copy_from_user_flock64(&fl, arg);
7051         if (ret) {
7052             return ret;
7053         }
7054         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7055         if (ret == 0) {
7056             ret = copy_to_user_flock64(arg, &fl);
7057         }
7058         break;
7059     case TARGET_F_SETLK64:
7060     case TARGET_F_SETLKW64:
7061     case TARGET_F_OFD_SETLK:
7062     case TARGET_F_OFD_SETLKW:
7063         ret = copy_from_user_flock64(&fl, arg);
7064         if (ret) {
7065             return ret;
7066         }
7067         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7068         break;
7069 
7070     case TARGET_F_GETFL:
7071         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7072         if (ret >= 0) {
7073             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7074             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7075             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7076                 ret |= TARGET_O_LARGEFILE;
7077             }
7078         }
7079         break;
7080 
7081     case TARGET_F_SETFL:
7082         ret = get_errno(safe_fcntl(fd, host_cmd,
7083                                    target_to_host_bitmask(arg,
7084                                                           fcntl_flags_tbl)));
7085         break;
7086 
7087 #ifdef F_GETOWN_EX
7088     case TARGET_F_GETOWN_EX:
7089         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7090         if (ret >= 0) {
7091             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7092                 return -TARGET_EFAULT;
7093             target_fox->type = tswap32(fox.type);
7094             target_fox->pid = tswap32(fox.pid);
7095             unlock_user_struct(target_fox, arg, 1);
7096         }
7097         break;
7098 #endif
7099 
7100 #ifdef F_SETOWN_EX
7101     case TARGET_F_SETOWN_EX:
7102         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7103             return -TARGET_EFAULT;
7104         fox.type = tswap32(target_fox->type);
7105         fox.pid = tswap32(target_fox->pid);
7106         unlock_user_struct(target_fox, arg, 0);
7107         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7108         break;
7109 #endif
7110 
7111     case TARGET_F_SETSIG:
7112         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7113         break;
7114 
7115     case TARGET_F_GETSIG:
7116         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7117         break;
7118 
7119     case TARGET_F_SETOWN:
7120     case TARGET_F_GETOWN:
7121     case TARGET_F_SETLEASE:
7122     case TARGET_F_GETLEASE:
7123     case TARGET_F_SETPIPE_SZ:
7124     case TARGET_F_GETPIPE_SZ:
7125     case TARGET_F_ADD_SEALS:
7126     case TARGET_F_GET_SEALS:
7127         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7128         break;
7129 
7130     default:
7131         ret = get_errno(safe_fcntl(fd, cmd, arg));
7132         break;
7133     }
7134     return ret;
7135 }
7136 
7137 #ifdef USE_UID16
7138 
high2lowuid(int uid)7139 static inline int high2lowuid(int uid)
7140 {
7141     if (uid > 65535)
7142         return 65534;
7143     else
7144         return uid;
7145 }
7146 
high2lowgid(int gid)7147 static inline int high2lowgid(int gid)
7148 {
7149     if (gid > 65535)
7150         return 65534;
7151     else
7152         return gid;
7153 }
7154 
low2highuid(int uid)7155 static inline int low2highuid(int uid)
7156 {
7157     if ((int16_t)uid == -1)
7158         return -1;
7159     else
7160         return uid;
7161 }
7162 
low2highgid(int gid)7163 static inline int low2highgid(int gid)
7164 {
7165     if ((int16_t)gid == -1)
7166         return -1;
7167     else
7168         return gid;
7169 }
tswapid(int id)7170 static inline int tswapid(int id)
7171 {
7172     return tswap16(id);
7173 }
7174 
7175 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7176 
7177 #else /* !USE_UID16 */
high2lowuid(int uid)7178 static inline int high2lowuid(int uid)
7179 {
7180     return uid;
7181 }
high2lowgid(int gid)7182 static inline int high2lowgid(int gid)
7183 {
7184     return gid;
7185 }
low2highuid(int uid)7186 static inline int low2highuid(int uid)
7187 {
7188     return uid;
7189 }
low2highgid(int gid)7190 static inline int low2highgid(int gid)
7191 {
7192     return gid;
7193 }
tswapid(int id)7194 static inline int tswapid(int id)
7195 {
7196     return tswap32(id);
7197 }
7198 
7199 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7200 
7201 #endif /* USE_UID16 */
7202 
7203 /* We must do direct syscalls for setting UID/GID, because we want to
7204  * implement the Linux system call semantics of "change only for this thread",
7205  * not the libc/POSIX semantics of "change for all threads in process".
7206  * (See http://ewontfix.com/17/ for more details.)
7207  * We use the 32-bit version of the syscalls if present; if it is not
7208  * then either the host architecture supports 32-bit UIDs natively with
7209  * the standard syscall, or the 16-bit UID is the best we can do.
7210  */
7211 #ifdef __NR_setuid32
7212 #define __NR_sys_setuid __NR_setuid32
7213 #else
7214 #define __NR_sys_setuid __NR_setuid
7215 #endif
7216 #ifdef __NR_setgid32
7217 #define __NR_sys_setgid __NR_setgid32
7218 #else
7219 #define __NR_sys_setgid __NR_setgid
7220 #endif
7221 #ifdef __NR_setresuid32
7222 #define __NR_sys_setresuid __NR_setresuid32
7223 #else
7224 #define __NR_sys_setresuid __NR_setresuid
7225 #endif
7226 #ifdef __NR_setresgid32
7227 #define __NR_sys_setresgid __NR_setresgid32
7228 #else
7229 #define __NR_sys_setresgid __NR_setresgid
7230 #endif
7231 #ifdef __NR_setgroups32
7232 #define __NR_sys_setgroups __NR_setgroups32
7233 #else
7234 #define __NR_sys_setgroups __NR_setgroups
7235 #endif
7236 
_syscall1(int,sys_setuid,uid_t,uid)7237 _syscall1(int, sys_setuid, uid_t, uid)
7238 _syscall1(int, sys_setgid, gid_t, gid)
7239 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7240 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7241 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7242 
7243 void syscall_init(void)
7244 {
7245     IOCTLEntry *ie;
7246     const argtype *arg_type;
7247     int size;
7248 
7249     thunk_init(STRUCT_MAX);
7250 
7251 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7252 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7253 #include "syscall_types.h"
7254 #undef STRUCT
7255 #undef STRUCT_SPECIAL
7256 
7257     /* we patch the ioctl size if necessary. We rely on the fact that
7258        no ioctl has all the bits at '1' in the size field */
7259     ie = ioctl_entries;
7260     while (ie->target_cmd != 0) {
7261         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7262             TARGET_IOC_SIZEMASK) {
7263             arg_type = ie->arg_type;
7264             if (arg_type[0] != TYPE_PTR) {
7265                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7266                         ie->target_cmd);
7267                 exit(1);
7268             }
7269             arg_type++;
7270             size = thunk_type_size(arg_type, 0);
7271             ie->target_cmd = (ie->target_cmd &
7272                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7273                 (size << TARGET_IOC_SIZESHIFT);
7274         }
7275 
7276         /* automatic consistency check if same arch */
7277 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7278     (defined(__x86_64__) && defined(TARGET_X86_64))
7279         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7280             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7281                     ie->name, ie->target_cmd, ie->host_cmd);
7282         }
7283 #endif
7284         ie++;
7285     }
7286 }
7287 
7288 #ifdef TARGET_NR_truncate64
target_truncate64(CPUArchState * cpu_env,const char * arg1,abi_long arg2,abi_long arg3,abi_long arg4)7289 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7290                                          abi_long arg2,
7291                                          abi_long arg3,
7292                                          abi_long arg4)
7293 {
7294     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7295         arg2 = arg3;
7296         arg3 = arg4;
7297     }
7298     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7299 }
7300 #endif
7301 
7302 #ifdef TARGET_NR_ftruncate64
target_ftruncate64(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4)7303 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7304                                           abi_long arg2,
7305                                           abi_long arg3,
7306                                           abi_long arg4)
7307 {
7308     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7309         arg2 = arg3;
7310         arg3 = arg4;
7311     }
7312     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7313 }
7314 #endif
7315 
7316 #if defined(TARGET_NR_timer_settime) || \
7317     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec(struct itimerspec * host_its,abi_ulong target_addr)7318 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7319                                                  abi_ulong target_addr)
7320 {
7321     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7322                                 offsetof(struct target_itimerspec,
7323                                          it_interval)) ||
7324         target_to_host_timespec(&host_its->it_value, target_addr +
7325                                 offsetof(struct target_itimerspec,
7326                                          it_value))) {
7327         return -TARGET_EFAULT;
7328     }
7329 
7330     return 0;
7331 }
7332 #endif
7333 
7334 #if defined(TARGET_NR_timer_settime64) || \
7335     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec64(struct itimerspec * host_its,abi_ulong target_addr)7336 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7337                                                    abi_ulong target_addr)
7338 {
7339     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7340                                   offsetof(struct target__kernel_itimerspec,
7341                                            it_interval)) ||
7342         target_to_host_timespec64(&host_its->it_value, target_addr +
7343                                   offsetof(struct target__kernel_itimerspec,
7344                                            it_value))) {
7345         return -TARGET_EFAULT;
7346     }
7347 
7348     return 0;
7349 }
7350 #endif
7351 
7352 #if ((defined(TARGET_NR_timerfd_gettime) || \
7353       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7354       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
host_to_target_itimerspec(abi_ulong target_addr,struct itimerspec * host_its)7355 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7356                                                  struct itimerspec *host_its)
7357 {
7358     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7359                                                        it_interval),
7360                                 &host_its->it_interval) ||
7361         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7362                                                        it_value),
7363                                 &host_its->it_value)) {
7364         return -TARGET_EFAULT;
7365     }
7366     return 0;
7367 }
7368 #endif
7369 
7370 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7371       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7372       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
host_to_target_itimerspec64(abi_ulong target_addr,struct itimerspec * host_its)7373 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7374                                                    struct itimerspec *host_its)
7375 {
7376     if (host_to_target_timespec64(target_addr +
7377                                   offsetof(struct target__kernel_itimerspec,
7378                                            it_interval),
7379                                   &host_its->it_interval) ||
7380         host_to_target_timespec64(target_addr +
7381                                   offsetof(struct target__kernel_itimerspec,
7382                                            it_value),
7383                                   &host_its->it_value)) {
7384         return -TARGET_EFAULT;
7385     }
7386     return 0;
7387 }
7388 #endif
7389 
7390 #if defined(TARGET_NR_adjtimex) || \
7391     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
target_to_host_timex(struct timex * host_tx,abi_long target_addr)7392 static inline abi_long target_to_host_timex(struct timex *host_tx,
7393                                             abi_long target_addr)
7394 {
7395     struct target_timex *target_tx;
7396 
7397     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7398         return -TARGET_EFAULT;
7399     }
7400 
7401     __get_user(host_tx->modes, &target_tx->modes);
7402     __get_user(host_tx->offset, &target_tx->offset);
7403     __get_user(host_tx->freq, &target_tx->freq);
7404     __get_user(host_tx->maxerror, &target_tx->maxerror);
7405     __get_user(host_tx->esterror, &target_tx->esterror);
7406     __get_user(host_tx->status, &target_tx->status);
7407     __get_user(host_tx->constant, &target_tx->constant);
7408     __get_user(host_tx->precision, &target_tx->precision);
7409     __get_user(host_tx->tolerance, &target_tx->tolerance);
7410     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7411     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7412     __get_user(host_tx->tick, &target_tx->tick);
7413     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7414     __get_user(host_tx->jitter, &target_tx->jitter);
7415     __get_user(host_tx->shift, &target_tx->shift);
7416     __get_user(host_tx->stabil, &target_tx->stabil);
7417     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7418     __get_user(host_tx->calcnt, &target_tx->calcnt);
7419     __get_user(host_tx->errcnt, &target_tx->errcnt);
7420     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7421     __get_user(host_tx->tai, &target_tx->tai);
7422 
7423     unlock_user_struct(target_tx, target_addr, 0);
7424     return 0;
7425 }
7426 
host_to_target_timex(abi_long target_addr,struct timex * host_tx)7427 static inline abi_long host_to_target_timex(abi_long target_addr,
7428                                             struct timex *host_tx)
7429 {
7430     struct target_timex *target_tx;
7431 
7432     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7433         return -TARGET_EFAULT;
7434     }
7435 
7436     __put_user(host_tx->modes, &target_tx->modes);
7437     __put_user(host_tx->offset, &target_tx->offset);
7438     __put_user(host_tx->freq, &target_tx->freq);
7439     __put_user(host_tx->maxerror, &target_tx->maxerror);
7440     __put_user(host_tx->esterror, &target_tx->esterror);
7441     __put_user(host_tx->status, &target_tx->status);
7442     __put_user(host_tx->constant, &target_tx->constant);
7443     __put_user(host_tx->precision, &target_tx->precision);
7444     __put_user(host_tx->tolerance, &target_tx->tolerance);
7445     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7446     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7447     __put_user(host_tx->tick, &target_tx->tick);
7448     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7449     __put_user(host_tx->jitter, &target_tx->jitter);
7450     __put_user(host_tx->shift, &target_tx->shift);
7451     __put_user(host_tx->stabil, &target_tx->stabil);
7452     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7453     __put_user(host_tx->calcnt, &target_tx->calcnt);
7454     __put_user(host_tx->errcnt, &target_tx->errcnt);
7455     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7456     __put_user(host_tx->tai, &target_tx->tai);
7457 
7458     unlock_user_struct(target_tx, target_addr, 1);
7459     return 0;
7460 }
7461 #endif
7462 
7463 
7464 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
target_to_host_timex64(struct timex * host_tx,abi_long target_addr)7465 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7466                                               abi_long target_addr)
7467 {
7468     struct target__kernel_timex *target_tx;
7469 
7470     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7471                                  offsetof(struct target__kernel_timex,
7472                                           time))) {
7473         return -TARGET_EFAULT;
7474     }
7475 
7476     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7477         return -TARGET_EFAULT;
7478     }
7479 
7480     __get_user(host_tx->modes, &target_tx->modes);
7481     __get_user(host_tx->offset, &target_tx->offset);
7482     __get_user(host_tx->freq, &target_tx->freq);
7483     __get_user(host_tx->maxerror, &target_tx->maxerror);
7484     __get_user(host_tx->esterror, &target_tx->esterror);
7485     __get_user(host_tx->status, &target_tx->status);
7486     __get_user(host_tx->constant, &target_tx->constant);
7487     __get_user(host_tx->precision, &target_tx->precision);
7488     __get_user(host_tx->tolerance, &target_tx->tolerance);
7489     __get_user(host_tx->tick, &target_tx->tick);
7490     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7491     __get_user(host_tx->jitter, &target_tx->jitter);
7492     __get_user(host_tx->shift, &target_tx->shift);
7493     __get_user(host_tx->stabil, &target_tx->stabil);
7494     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7495     __get_user(host_tx->calcnt, &target_tx->calcnt);
7496     __get_user(host_tx->errcnt, &target_tx->errcnt);
7497     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7498     __get_user(host_tx->tai, &target_tx->tai);
7499 
7500     unlock_user_struct(target_tx, target_addr, 0);
7501     return 0;
7502 }
7503 
host_to_target_timex64(abi_long target_addr,struct timex * host_tx)7504 static inline abi_long host_to_target_timex64(abi_long target_addr,
7505                                               struct timex *host_tx)
7506 {
7507     struct target__kernel_timex *target_tx;
7508 
7509    if (copy_to_user_timeval64(target_addr +
7510                               offsetof(struct target__kernel_timex, time),
7511                               &host_tx->time)) {
7512         return -TARGET_EFAULT;
7513     }
7514 
7515     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7516         return -TARGET_EFAULT;
7517     }
7518 
7519     __put_user(host_tx->modes, &target_tx->modes);
7520     __put_user(host_tx->offset, &target_tx->offset);
7521     __put_user(host_tx->freq, &target_tx->freq);
7522     __put_user(host_tx->maxerror, &target_tx->maxerror);
7523     __put_user(host_tx->esterror, &target_tx->esterror);
7524     __put_user(host_tx->status, &target_tx->status);
7525     __put_user(host_tx->constant, &target_tx->constant);
7526     __put_user(host_tx->precision, &target_tx->precision);
7527     __put_user(host_tx->tolerance, &target_tx->tolerance);
7528     __put_user(host_tx->tick, &target_tx->tick);
7529     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7530     __put_user(host_tx->jitter, &target_tx->jitter);
7531     __put_user(host_tx->shift, &target_tx->shift);
7532     __put_user(host_tx->stabil, &target_tx->stabil);
7533     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7534     __put_user(host_tx->calcnt, &target_tx->calcnt);
7535     __put_user(host_tx->errcnt, &target_tx->errcnt);
7536     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7537     __put_user(host_tx->tai, &target_tx->tai);
7538 
7539     unlock_user_struct(target_tx, target_addr, 1);
7540     return 0;
7541 }
7542 #endif
7543 
7544 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7545 #define sigev_notify_thread_id _sigev_un._tid
7546 #endif
7547 
target_to_host_sigevent(struct sigevent * host_sevp,abi_ulong target_addr)7548 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7549                                                abi_ulong target_addr)
7550 {
7551     struct target_sigevent *target_sevp;
7552 
7553     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7554         return -TARGET_EFAULT;
7555     }
7556 
7557     /* This union is awkward on 64 bit systems because it has a 32 bit
7558      * integer and a pointer in it; we follow the conversion approach
7559      * used for handling sigval types in signal.c so the guest should get
7560      * the correct value back even if we did a 64 bit byteswap and it's
7561      * using the 32 bit integer.
7562      */
7563     host_sevp->sigev_value.sival_ptr =
7564         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7565     host_sevp->sigev_signo =
7566         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7567     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7568     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7569 
7570     unlock_user_struct(target_sevp, target_addr, 1);
7571     return 0;
7572 }
7573 
7574 #if defined(TARGET_NR_mlockall)
target_to_host_mlockall_arg(int arg)7575 static inline int target_to_host_mlockall_arg(int arg)
7576 {
7577     int result = 0;
7578 
7579     if (arg & TARGET_MCL_CURRENT) {
7580         result |= MCL_CURRENT;
7581     }
7582     if (arg & TARGET_MCL_FUTURE) {
7583         result |= MCL_FUTURE;
7584     }
7585 #ifdef MCL_ONFAULT
7586     if (arg & TARGET_MCL_ONFAULT) {
7587         result |= MCL_ONFAULT;
7588     }
7589 #endif
7590 
7591     return result;
7592 }
7593 #endif
7594 
target_to_host_msync_arg(abi_long arg)7595 static inline int target_to_host_msync_arg(abi_long arg)
7596 {
7597     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7598            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7599            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7600            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7601 }
7602 
7603 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7604      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7605      defined(TARGET_NR_newfstatat))
host_to_target_stat64(CPUArchState * cpu_env,abi_ulong target_addr,struct stat * host_st)7606 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7607                                              abi_ulong target_addr,
7608                                              struct stat *host_st)
7609 {
7610 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7611     if (cpu_env->eabi) {
7612         struct target_eabi_stat64 *target_st;
7613 
7614         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7615             return -TARGET_EFAULT;
7616         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7617         __put_user(host_st->st_dev, &target_st->st_dev);
7618         __put_user(host_st->st_ino, &target_st->st_ino);
7619 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7620         __put_user(host_st->st_ino, &target_st->__st_ino);
7621 #endif
7622         __put_user(host_st->st_mode, &target_st->st_mode);
7623         __put_user(host_st->st_nlink, &target_st->st_nlink);
7624         __put_user(host_st->st_uid, &target_st->st_uid);
7625         __put_user(host_st->st_gid, &target_st->st_gid);
7626         __put_user(host_st->st_rdev, &target_st->st_rdev);
7627         __put_user(host_st->st_size, &target_st->st_size);
7628         __put_user(host_st->st_blksize, &target_st->st_blksize);
7629         __put_user(host_st->st_blocks, &target_st->st_blocks);
7630         __put_user(host_st->st_atime, &target_st->target_st_atime);
7631         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7632         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7633 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7634         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7635         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7636         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7637 #endif
7638         unlock_user_struct(target_st, target_addr, 1);
7639     } else
7640 #endif
7641     {
7642 #if defined(TARGET_HAS_STRUCT_STAT64)
7643         struct target_stat64 *target_st;
7644 #else
7645         struct target_stat *target_st;
7646 #endif
7647 
7648         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7649             return -TARGET_EFAULT;
7650         memset(target_st, 0, sizeof(*target_st));
7651         __put_user(host_st->st_dev, &target_st->st_dev);
7652         __put_user(host_st->st_ino, &target_st->st_ino);
7653 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7654         __put_user(host_st->st_ino, &target_st->__st_ino);
7655 #endif
7656         __put_user(host_st->st_mode, &target_st->st_mode);
7657         __put_user(host_st->st_nlink, &target_st->st_nlink);
7658         __put_user(host_st->st_uid, &target_st->st_uid);
7659         __put_user(host_st->st_gid, &target_st->st_gid);
7660         __put_user(host_st->st_rdev, &target_st->st_rdev);
7661         /* XXX: better use of kernel struct */
7662         __put_user(host_st->st_size, &target_st->st_size);
7663         __put_user(host_st->st_blksize, &target_st->st_blksize);
7664         __put_user(host_st->st_blocks, &target_st->st_blocks);
7665         __put_user(host_st->st_atime, &target_st->target_st_atime);
7666         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7667         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7668 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7669         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7670         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7671         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7672 #endif
7673         unlock_user_struct(target_st, target_addr, 1);
7674     }
7675 
7676     return 0;
7677 }
7678 #endif
7679 
7680 #if defined(TARGET_NR_statx) && defined(__NR_statx)
host_to_target_statx(struct target_statx * host_stx,abi_ulong target_addr)7681 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7682                                             abi_ulong target_addr)
7683 {
7684     struct target_statx *target_stx;
7685 
7686     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7687         return -TARGET_EFAULT;
7688     }
7689     memset(target_stx, 0, sizeof(*target_stx));
7690 
7691     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7692     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7693     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7694     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7695     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7696     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7697     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7698     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7699     __put_user(host_stx->stx_size, &target_stx->stx_size);
7700     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7701     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7702     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7703     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7704     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7705     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7706     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7707     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7708     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7709     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7710     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7711     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7712     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7713     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7714 
7715     unlock_user_struct(target_stx, target_addr, 1);
7716 
7717     return 0;
7718 }
7719 #endif
7720 
do_sys_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7721 static int do_sys_futex(int *uaddr, int op, int val,
7722                          const struct timespec *timeout, int *uaddr2,
7723                          int val3)
7724 {
7725 #if HOST_LONG_BITS == 64
7726 #if defined(__NR_futex)
7727     /* always a 64-bit time_t, it doesn't define _time64 version  */
7728     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7729 
7730 #endif
7731 #else /* HOST_LONG_BITS == 64 */
7732 #if defined(__NR_futex_time64)
7733     if (sizeof(timeout->tv_sec) == 8) {
7734         /* _time64 function on 32bit arch */
7735         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7736     }
7737 #endif
7738 #if defined(__NR_futex)
7739     /* old function on 32bit arch */
7740     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7741 #endif
7742 #endif /* HOST_LONG_BITS == 64 */
7743     g_assert_not_reached();
7744 }
7745 
do_safe_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7746 static int do_safe_futex(int *uaddr, int op, int val,
7747                          const struct timespec *timeout, int *uaddr2,
7748                          int val3)
7749 {
7750 #if HOST_LONG_BITS == 64
7751 #if defined(__NR_futex)
7752     /* always a 64-bit time_t, it doesn't define _time64 version  */
7753     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7754 #endif
7755 #else /* HOST_LONG_BITS == 64 */
7756 #if defined(__NR_futex_time64)
7757     if (sizeof(timeout->tv_sec) == 8) {
7758         /* _time64 function on 32bit arch */
7759         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7760                                            val3));
7761     }
7762 #endif
7763 #if defined(__NR_futex)
7764     /* old function on 32bit arch */
7765     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7766 #endif
7767 #endif /* HOST_LONG_BITS == 64 */
7768     return -TARGET_ENOSYS;
7769 }
7770 
7771 /* ??? Using host futex calls even when target atomic operations
7772    are not really atomic probably breaks things.  However implementing
7773    futexes locally would make futexes shared between multiple processes
7774    tricky.  However they're probably useless because guest atomic
7775    operations won't work either.  */
7776 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
do_futex(CPUState * cpu,bool time64,target_ulong uaddr,int op,int val,target_ulong timeout,target_ulong uaddr2,int val3)7777 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7778                     int op, int val, target_ulong timeout,
7779                     target_ulong uaddr2, int val3)
7780 {
7781     struct timespec ts, *pts = NULL;
7782     void *haddr2 = NULL;
7783     int base_op;
7784 
7785     /* We assume FUTEX_* constants are the same on both host and target. */
7786 #ifdef FUTEX_CMD_MASK
7787     base_op = op & FUTEX_CMD_MASK;
7788 #else
7789     base_op = op;
7790 #endif
7791     switch (base_op) {
7792     case FUTEX_WAIT:
7793     case FUTEX_WAIT_BITSET:
7794         val = tswap32(val);
7795         break;
7796     case FUTEX_WAIT_REQUEUE_PI:
7797         val = tswap32(val);
7798         haddr2 = g2h(cpu, uaddr2);
7799         break;
7800     case FUTEX_LOCK_PI:
7801     case FUTEX_LOCK_PI2:
7802         break;
7803     case FUTEX_WAKE:
7804     case FUTEX_WAKE_BITSET:
7805     case FUTEX_TRYLOCK_PI:
7806     case FUTEX_UNLOCK_PI:
7807         timeout = 0;
7808         break;
7809     case FUTEX_FD:
7810         val = target_to_host_signal(val);
7811         timeout = 0;
7812         break;
7813     case FUTEX_CMP_REQUEUE:
7814     case FUTEX_CMP_REQUEUE_PI:
7815         val3 = tswap32(val3);
7816         /* fall through */
7817     case FUTEX_REQUEUE:
7818     case FUTEX_WAKE_OP:
7819         /*
7820          * For these, the 4th argument is not TIMEOUT, but VAL2.
7821          * But the prototype of do_safe_futex takes a pointer, so
7822          * insert casts to satisfy the compiler.  We do not need
7823          * to tswap VAL2 since it's not compared to guest memory.
7824           */
7825         pts = (struct timespec *)(uintptr_t)timeout;
7826         timeout = 0;
7827         haddr2 = g2h(cpu, uaddr2);
7828         break;
7829     default:
7830         return -TARGET_ENOSYS;
7831     }
7832     if (timeout) {
7833         pts = &ts;
7834         if (time64
7835             ? target_to_host_timespec64(pts, timeout)
7836             : target_to_host_timespec(pts, timeout)) {
7837             return -TARGET_EFAULT;
7838         }
7839     }
7840     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7841 }
7842 #endif
7843 
7844 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_name_to_handle_at(abi_long dirfd,abi_long pathname,abi_long handle,abi_long mount_id,abi_long flags)7845 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7846                                      abi_long handle, abi_long mount_id,
7847                                      abi_long flags)
7848 {
7849     struct file_handle *target_fh;
7850     struct file_handle *fh;
7851     int mid = 0;
7852     abi_long ret;
7853     char *name;
7854     unsigned int size, total_size;
7855 
7856     if (get_user_s32(size, handle)) {
7857         return -TARGET_EFAULT;
7858     }
7859 
7860     name = lock_user_string(pathname);
7861     if (!name) {
7862         return -TARGET_EFAULT;
7863     }
7864 
7865     total_size = sizeof(struct file_handle) + size;
7866     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7867     if (!target_fh) {
7868         unlock_user(name, pathname, 0);
7869         return -TARGET_EFAULT;
7870     }
7871 
7872     fh = g_malloc0(total_size);
7873     fh->handle_bytes = size;
7874 
7875     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7876     unlock_user(name, pathname, 0);
7877 
7878     /* man name_to_handle_at(2):
7879      * Other than the use of the handle_bytes field, the caller should treat
7880      * the file_handle structure as an opaque data type
7881      */
7882 
7883     memcpy(target_fh, fh, total_size);
7884     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7885     target_fh->handle_type = tswap32(fh->handle_type);
7886     g_free(fh);
7887     unlock_user(target_fh, handle, total_size);
7888 
7889     if (put_user_s32(mid, mount_id)) {
7890         return -TARGET_EFAULT;
7891     }
7892 
7893     return ret;
7894 
7895 }
7896 #endif
7897 
7898 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_open_by_handle_at(abi_long mount_fd,abi_long handle,abi_long flags)7899 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7900                                      abi_long flags)
7901 {
7902     struct file_handle *target_fh;
7903     struct file_handle *fh;
7904     unsigned int size, total_size;
7905     abi_long ret;
7906 
7907     if (get_user_s32(size, handle)) {
7908         return -TARGET_EFAULT;
7909     }
7910 
7911     total_size = sizeof(struct file_handle) + size;
7912     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7913     if (!target_fh) {
7914         return -TARGET_EFAULT;
7915     }
7916 
7917     fh = g_memdup(target_fh, total_size);
7918     fh->handle_bytes = size;
7919     fh->handle_type = tswap32(target_fh->handle_type);
7920 
7921     ret = get_errno(open_by_handle_at(mount_fd, fh,
7922                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7923 
7924     g_free(fh);
7925 
7926     unlock_user(target_fh, handle, total_size);
7927 
7928     return ret;
7929 }
7930 #endif
7931 
7932 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7933 
do_signalfd4(int fd,abi_long mask,int flags)7934 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7935 {
7936     int host_flags;
7937     target_sigset_t *target_mask;
7938     sigset_t host_mask;
7939     abi_long ret;
7940 
7941     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7942         return -TARGET_EINVAL;
7943     }
7944     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7945         return -TARGET_EFAULT;
7946     }
7947 
7948     target_to_host_sigset(&host_mask, target_mask);
7949 
7950     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7951 
7952     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7953     if (ret >= 0) {
7954         fd_trans_register(ret, &target_signalfd_trans);
7955     }
7956 
7957     unlock_user_struct(target_mask, mask, 0);
7958 
7959     return ret;
7960 }
7961 #endif
7962 
7963 /* Map host to target signal numbers for the wait family of syscalls.
7964    Assume all other status bits are the same.  */
host_to_target_waitstatus(int status)7965 int host_to_target_waitstatus(int status)
7966 {
7967     if (WIFSIGNALED(status)) {
7968         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7969     }
7970     if (WIFSTOPPED(status)) {
7971         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7972                | (status & 0xff);
7973     }
7974     return status;
7975 }
7976 
open_self_cmdline(CPUArchState * cpu_env,int fd)7977 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7978 {
7979     CPUState *cpu = env_cpu(cpu_env);
7980     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
7981     int i;
7982 
7983     for (i = 0; i < bprm->argc; i++) {
7984         size_t len = strlen(bprm->argv[i]) + 1;
7985 
7986         if (write(fd, bprm->argv[i], len) != len) {
7987             return -1;
7988         }
7989     }
7990 
7991     return 0;
7992 }
7993 
7994 struct open_self_maps_data {
7995     TaskState *ts;
7996     IntervalTreeRoot *host_maps;
7997     int fd;
7998     bool smaps;
7999 };
8000 
8001 /*
8002  * Subroutine to output one line of /proc/self/maps,
8003  * or one region of /proc/self/smaps.
8004  */
8005 
8006 #ifdef TARGET_HPPA
8007 # define test_stack(S, E, L)  (E == L)
8008 #else
8009 # define test_stack(S, E, L)  (S == L)
8010 #endif
8011 
open_self_maps_4(const struct open_self_maps_data * d,const MapInfo * mi,abi_ptr start,abi_ptr end,unsigned flags)8012 static void open_self_maps_4(const struct open_self_maps_data *d,
8013                              const MapInfo *mi, abi_ptr start,
8014                              abi_ptr end, unsigned flags)
8015 {
8016     const struct image_info *info = d->ts->info;
8017     const char *path = mi->path;
8018     uint64_t offset;
8019     int fd = d->fd;
8020     int count;
8021 
8022     if (test_stack(start, end, info->stack_limit)) {
8023         path = "[stack]";
8024     } else if (start == info->brk) {
8025         path = "[heap]";
8026     } else if (start == info->vdso) {
8027         path = "[vdso]";
8028 #ifdef TARGET_X86_64
8029     } else if (start == TARGET_VSYSCALL_PAGE) {
8030         path = "[vsyscall]";
8031 #endif
8032     }
8033 
8034     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8035     offset = mi->offset;
8036     if (mi->dev) {
8037         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8038         offset += hstart - mi->itree.start;
8039     }
8040 
8041     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8042                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8043                     start, end,
8044                     (flags & PAGE_READ) ? 'r' : '-',
8045                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8046                     (flags & PAGE_EXEC) ? 'x' : '-',
8047                     mi->is_priv ? 'p' : 's',
8048                     offset, major(mi->dev), minor(mi->dev),
8049                     (uint64_t)mi->inode);
8050     if (path) {
8051         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8052     } else {
8053         dprintf(fd, "\n");
8054     }
8055 
8056     if (d->smaps) {
8057         unsigned long size = end - start;
8058         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8059         unsigned long size_kb = size >> 10;
8060 
8061         dprintf(fd, "Size:                  %lu kB\n"
8062                 "KernelPageSize:        %lu kB\n"
8063                 "MMUPageSize:           %lu kB\n"
8064                 "Rss:                   0 kB\n"
8065                 "Pss:                   0 kB\n"
8066                 "Pss_Dirty:             0 kB\n"
8067                 "Shared_Clean:          0 kB\n"
8068                 "Shared_Dirty:          0 kB\n"
8069                 "Private_Clean:         0 kB\n"
8070                 "Private_Dirty:         0 kB\n"
8071                 "Referenced:            0 kB\n"
8072                 "Anonymous:             %lu kB\n"
8073                 "LazyFree:              0 kB\n"
8074                 "AnonHugePages:         0 kB\n"
8075                 "ShmemPmdMapped:        0 kB\n"
8076                 "FilePmdMapped:         0 kB\n"
8077                 "Shared_Hugetlb:        0 kB\n"
8078                 "Private_Hugetlb:       0 kB\n"
8079                 "Swap:                  0 kB\n"
8080                 "SwapPss:               0 kB\n"
8081                 "Locked:                0 kB\n"
8082                 "THPeligible:    0\n"
8083                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8084                 size_kb, page_size_kb, page_size_kb,
8085                 (flags & PAGE_ANON ? size_kb : 0),
8086                 (flags & PAGE_READ) ? " rd" : "",
8087                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8088                 (flags & PAGE_EXEC) ? " ex" : "",
8089                 mi->is_priv ? "" : " sh",
8090                 (flags & PAGE_READ) ? " mr" : "",
8091                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8092                 (flags & PAGE_EXEC) ? " me" : "",
8093                 mi->is_priv ? "" : " ms");
8094     }
8095 }
8096 
8097 /*
8098  * Callback for walk_memory_regions, when read_self_maps() fails.
8099  * Proceed without the benefit of host /proc/self/maps cross-check.
8100  */
open_self_maps_3(void * opaque,target_ulong guest_start,target_ulong guest_end,unsigned long flags)8101 static int open_self_maps_3(void *opaque, target_ulong guest_start,
8102                             target_ulong guest_end, unsigned long flags)
8103 {
8104     static const MapInfo mi = { .is_priv = true };
8105 
8106     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8107     return 0;
8108 }
8109 
8110 /*
8111  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8112  */
open_self_maps_2(void * opaque,target_ulong guest_start,target_ulong guest_end,unsigned long flags)8113 static int open_self_maps_2(void *opaque, target_ulong guest_start,
8114                             target_ulong guest_end, unsigned long flags)
8115 {
8116     const struct open_self_maps_data *d = opaque;
8117     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8118     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8119 
8120 #ifdef TARGET_X86_64
8121     /*
8122      * Because of the extremely high position of the page within the guest
8123      * virtual address space, this is not backed by host memory at all.
8124      * Therefore the loop below would fail.  This is the only instance
8125      * of not having host backing memory.
8126      */
8127     if (guest_start == TARGET_VSYSCALL_PAGE) {
8128         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8129     }
8130 #endif
8131 
8132     while (1) {
8133         IntervalTreeNode *n =
8134             interval_tree_iter_first(d->host_maps, host_start, host_start);
8135         MapInfo *mi = container_of(n, MapInfo, itree);
8136         uintptr_t this_hlast = MIN(host_last, n->last);
8137         target_ulong this_gend = h2g(this_hlast) + 1;
8138 
8139         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8140 
8141         if (this_hlast == host_last) {
8142             return 0;
8143         }
8144         host_start = this_hlast + 1;
8145         guest_start = h2g(host_start);
8146     }
8147 }
8148 
open_self_maps_1(CPUArchState * env,int fd,bool smaps)8149 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8150 {
8151     struct open_self_maps_data d = {
8152         .ts = get_task_state(env_cpu(env)),
8153         .fd = fd,
8154         .smaps = smaps
8155     };
8156 
8157     mmap_lock();
8158     d.host_maps = read_self_maps();
8159     if (d.host_maps) {
8160         walk_memory_regions(&d, open_self_maps_2);
8161         free_self_maps(d.host_maps);
8162     } else {
8163         walk_memory_regions(&d, open_self_maps_3);
8164     }
8165     mmap_unlock();
8166     return 0;
8167 }
8168 
open_self_maps(CPUArchState * cpu_env,int fd)8169 static int open_self_maps(CPUArchState *cpu_env, int fd)
8170 {
8171     return open_self_maps_1(cpu_env, fd, false);
8172 }
8173 
open_self_smaps(CPUArchState * cpu_env,int fd)8174 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8175 {
8176     return open_self_maps_1(cpu_env, fd, true);
8177 }
8178 
open_self_stat(CPUArchState * cpu_env,int fd)8179 static int open_self_stat(CPUArchState *cpu_env, int fd)
8180 {
8181     CPUState *cpu = env_cpu(cpu_env);
8182     TaskState *ts = get_task_state(cpu);
8183     g_autoptr(GString) buf = g_string_new(NULL);
8184     int i;
8185 
8186     for (i = 0; i < 44; i++) {
8187         if (i == 0) {
8188             /* pid */
8189             g_string_printf(buf, FMT_pid " ", getpid());
8190         } else if (i == 1) {
8191             /* app name */
8192             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8193             bin = bin ? bin + 1 : ts->bprm->argv[0];
8194             g_string_printf(buf, "(%.15s) ", bin);
8195         } else if (i == 2) {
8196             /* task state */
8197             g_string_assign(buf, "R "); /* we are running right now */
8198         } else if (i == 3) {
8199             /* ppid */
8200             g_string_printf(buf, FMT_pid " ", getppid());
8201         } else if (i == 19) {
8202             /* num_threads */
8203             int cpus = 0;
8204             WITH_RCU_READ_LOCK_GUARD() {
8205                 CPUState *cpu_iter;
8206                 CPU_FOREACH(cpu_iter) {
8207                     cpus++;
8208                 }
8209             }
8210             g_string_printf(buf, "%d ", cpus);
8211         } else if (i == 21) {
8212             /* starttime */
8213             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8214         } else if (i == 27) {
8215             /* stack bottom */
8216             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8217         } else {
8218             /* for the rest, there is MasterCard */
8219             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8220         }
8221 
8222         if (write(fd, buf->str, buf->len) != buf->len) {
8223             return -1;
8224         }
8225     }
8226 
8227     return 0;
8228 }
8229 
open_self_auxv(CPUArchState * cpu_env,int fd)8230 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8231 {
8232     CPUState *cpu = env_cpu(cpu_env);
8233     TaskState *ts = get_task_state(cpu);
8234     abi_ulong auxv = ts->info->saved_auxv;
8235     abi_ulong len = ts->info->auxv_len;
8236     char *ptr;
8237 
8238     /*
8239      * Auxiliary vector is stored in target process stack.
8240      * read in whole auxv vector and copy it to file
8241      */
8242     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8243     if (ptr != NULL) {
8244         while (len > 0) {
8245             ssize_t r;
8246             r = write(fd, ptr, len);
8247             if (r <= 0) {
8248                 break;
8249             }
8250             len -= r;
8251             ptr += r;
8252         }
8253         lseek(fd, 0, SEEK_SET);
8254         unlock_user(ptr, auxv, len);
8255     }
8256 
8257     return 0;
8258 }
8259 
is_proc_myself(const char * filename,const char * entry)8260 static int is_proc_myself(const char *filename, const char *entry)
8261 {
8262     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8263         filename += strlen("/proc/");
8264         if (!strncmp(filename, "self/", strlen("self/"))) {
8265             filename += strlen("self/");
8266         } else if (*filename >= '1' && *filename <= '9') {
8267             char myself[80];
8268             snprintf(myself, sizeof(myself), "%d/", getpid());
8269             if (!strncmp(filename, myself, strlen(myself))) {
8270                 filename += strlen(myself);
8271             } else {
8272                 return 0;
8273             }
8274         } else {
8275             return 0;
8276         }
8277         if (!strcmp(filename, entry)) {
8278             return 1;
8279         }
8280     }
8281     return 0;
8282 }
8283 
excp_dump_file(FILE * logfile,CPUArchState * env,const char * fmt,int code)8284 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8285                       const char *fmt, int code)
8286 {
8287     if (logfile) {
8288         CPUState *cs = env_cpu(env);
8289 
8290         fprintf(logfile, fmt, code);
8291         fprintf(logfile, "Failing executable: %s\n", exec_path);
8292         cpu_dump_state(cs, logfile, 0);
8293         open_self_maps(env, fileno(logfile));
8294     }
8295 }
8296 
target_exception_dump(CPUArchState * env,const char * fmt,int code)8297 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8298 {
8299     /* dump to console */
8300     excp_dump_file(stderr, env, fmt, code);
8301 
8302     /* dump to log file */
8303     if (qemu_log_separate()) {
8304         FILE *logfile = qemu_log_trylock();
8305 
8306         excp_dump_file(logfile, env, fmt, code);
8307         qemu_log_unlock(logfile);
8308     }
8309 }
8310 
8311 #include "target_proc.h"
8312 
8313 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8314     defined(HAVE_ARCH_PROC_CPUINFO) || \
8315     defined(HAVE_ARCH_PROC_HARDWARE)
is_proc(const char * filename,const char * entry)8316 static int is_proc(const char *filename, const char *entry)
8317 {
8318     return strcmp(filename, entry) == 0;
8319 }
8320 #endif
8321 
8322 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
open_net_route(CPUArchState * cpu_env,int fd)8323 static int open_net_route(CPUArchState *cpu_env, int fd)
8324 {
8325     FILE *fp;
8326     char *line = NULL;
8327     size_t len = 0;
8328     ssize_t read;
8329 
8330     fp = fopen("/proc/net/route", "r");
8331     if (fp == NULL) {
8332         return -1;
8333     }
8334 
8335     /* read header */
8336 
8337     read = getline(&line, &len, fp);
8338     dprintf(fd, "%s", line);
8339 
8340     /* read routes */
8341 
8342     while ((read = getline(&line, &len, fp)) != -1) {
8343         char iface[16];
8344         uint32_t dest, gw, mask;
8345         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8346         int fields;
8347 
8348         fields = sscanf(line,
8349                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8350                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8351                         &mask, &mtu, &window, &irtt);
8352         if (fields != 11) {
8353             continue;
8354         }
8355         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8356                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8357                 metric, tswap32(mask), mtu, window, irtt);
8358     }
8359 
8360     free(line);
8361     fclose(fp);
8362 
8363     return 0;
8364 }
8365 #endif
8366 
maybe_do_fake_open(CPUArchState * cpu_env,int dirfd,const char * fname,int flags,mode_t mode,int openat2_resolve,bool safe)8367 static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8368                               const char *fname, int flags, mode_t mode,
8369                               int openat2_resolve, bool safe)
8370 {
8371     g_autofree char *proc_name = NULL;
8372     const char *pathname;
8373     struct fake_open {
8374         const char *filename;
8375         int (*fill)(CPUArchState *cpu_env, int fd);
8376         int (*cmp)(const char *s1, const char *s2);
8377     };
8378     const struct fake_open *fake_open;
8379     static const struct fake_open fakes[] = {
8380         { "maps", open_self_maps, is_proc_myself },
8381         { "smaps", open_self_smaps, is_proc_myself },
8382         { "stat", open_self_stat, is_proc_myself },
8383         { "auxv", open_self_auxv, is_proc_myself },
8384         { "cmdline", open_self_cmdline, is_proc_myself },
8385 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8386         { "/proc/net/route", open_net_route, is_proc },
8387 #endif
8388 #if defined(HAVE_ARCH_PROC_CPUINFO)
8389         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8390 #endif
8391 #if defined(HAVE_ARCH_PROC_HARDWARE)
8392         { "/proc/hardware", open_hardware, is_proc },
8393 #endif
8394         { NULL, NULL, NULL }
8395     };
8396 
8397     /* if this is a file from /proc/ filesystem, expand full name */
8398     proc_name = realpath(fname, NULL);
8399     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8400         pathname = proc_name;
8401     } else {
8402         pathname = fname;
8403     }
8404 
8405     if (is_proc_myself(pathname, "exe")) {
8406         /* Honor openat2 resolve flags */
8407         if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8408             (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8409             errno = ELOOP;
8410             return -1;
8411         }
8412         if (safe) {
8413             return safe_openat(dirfd, exec_path, flags, mode);
8414         } else {
8415             return openat(dirfd, exec_path, flags, mode);
8416         }
8417     }
8418 
8419     for (fake_open = fakes; fake_open->filename; fake_open++) {
8420         if (fake_open->cmp(pathname, fake_open->filename)) {
8421             break;
8422         }
8423     }
8424 
8425     if (fake_open->filename) {
8426         const char *tmpdir;
8427         char filename[PATH_MAX];
8428         int fd, r;
8429 
8430         fd = memfd_create("qemu-open", 0);
8431         if (fd < 0) {
8432             if (errno != ENOSYS) {
8433                 return fd;
8434             }
8435             /* create temporary file to map stat to */
8436             tmpdir = getenv("TMPDIR");
8437             if (!tmpdir)
8438                 tmpdir = "/tmp";
8439             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8440             fd = mkstemp(filename);
8441             if (fd < 0) {
8442                 return fd;
8443             }
8444             unlink(filename);
8445         }
8446 
8447         if ((r = fake_open->fill(cpu_env, fd))) {
8448             int e = errno;
8449             close(fd);
8450             errno = e;
8451             return r;
8452         }
8453         lseek(fd, 0, SEEK_SET);
8454 
8455         return fd;
8456     }
8457 
8458     return -2;
8459 }
8460 
do_guest_openat(CPUArchState * cpu_env,int dirfd,const char * pathname,int flags,mode_t mode,bool safe)8461 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8462                     int flags, mode_t mode, bool safe)
8463 {
8464     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8465     if (fd > -2) {
8466         return fd;
8467     }
8468 
8469     if (safe) {
8470         return safe_openat(dirfd, path(pathname), flags, mode);
8471     } else {
8472         return openat(dirfd, path(pathname), flags, mode);
8473     }
8474 }
8475 
8476 
do_openat2(CPUArchState * cpu_env,abi_long dirfd,abi_ptr guest_pathname,abi_ptr guest_open_how,abi_ulong guest_size)8477 static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8478                       abi_ptr guest_pathname, abi_ptr guest_open_how,
8479                       abi_ulong guest_size)
8480 {
8481     struct open_how_ver0 how = {0};
8482     char *pathname;
8483     int ret;
8484 
8485     if (guest_size < sizeof(struct target_open_how_ver0)) {
8486         return -TARGET_EINVAL;
8487     }
8488     ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8489     if (ret) {
8490         if (ret == -TARGET_E2BIG) {
8491             qemu_log_mask(LOG_UNIMP,
8492                           "Unimplemented openat2 open_how size: "
8493                           TARGET_ABI_FMT_lu "\n", guest_size);
8494         }
8495         return ret;
8496     }
8497     pathname = lock_user_string(guest_pathname);
8498     if (!pathname) {
8499         return -TARGET_EFAULT;
8500     }
8501 
8502     how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8503     how.mode = tswap64(how.mode);
8504     how.resolve = tswap64(how.resolve);
8505     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8506                                 how.resolve, true);
8507     if (fd > -2) {
8508         ret = get_errno(fd);
8509     } else {
8510         ret = get_errno(safe_openat2(dirfd, pathname, &how,
8511                                      sizeof(struct open_how_ver0)));
8512     }
8513 
8514     fd_trans_unregister(ret);
8515     unlock_user(pathname, guest_pathname, 0);
8516     return ret;
8517 }
8518 
do_guest_readlink(const char * pathname,char * buf,size_t bufsiz)8519 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8520 {
8521     ssize_t ret;
8522 
8523     if (!pathname || !buf) {
8524         errno = EFAULT;
8525         return -1;
8526     }
8527 
8528     if (!bufsiz) {
8529         /* Short circuit this for the magic exe check. */
8530         errno = EINVAL;
8531         return -1;
8532     }
8533 
8534     if (is_proc_myself((const char *)pathname, "exe")) {
8535         /*
8536          * Don't worry about sign mismatch as earlier mapping
8537          * logic would have thrown a bad address error.
8538          */
8539         ret = MIN(strlen(exec_path), bufsiz);
8540         /* We cannot NUL terminate the string. */
8541         memcpy(buf, exec_path, ret);
8542     } else {
8543         ret = readlink(path(pathname), buf, bufsiz);
8544     }
8545 
8546     return ret;
8547 }
8548 
do_execv(CPUArchState * cpu_env,int dirfd,abi_long pathname,abi_long guest_argp,abi_long guest_envp,int flags,bool is_execveat)8549 static int do_execv(CPUArchState *cpu_env, int dirfd,
8550                     abi_long pathname, abi_long guest_argp,
8551                     abi_long guest_envp, int flags, bool is_execveat)
8552 {
8553     int ret;
8554     char **argp, **envp;
8555     int argc, envc;
8556     abi_ulong gp;
8557     abi_ulong addr;
8558     char **q;
8559     void *p;
8560 
8561     argc = 0;
8562 
8563     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8564         if (get_user_ual(addr, gp)) {
8565             return -TARGET_EFAULT;
8566         }
8567         if (!addr) {
8568             break;
8569         }
8570         argc++;
8571     }
8572     envc = 0;
8573     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8574         if (get_user_ual(addr, gp)) {
8575             return -TARGET_EFAULT;
8576         }
8577         if (!addr) {
8578             break;
8579         }
8580         envc++;
8581     }
8582 
8583     argp = g_new0(char *, argc + 1);
8584     envp = g_new0(char *, envc + 1);
8585 
8586     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8587         if (get_user_ual(addr, gp)) {
8588             goto execve_efault;
8589         }
8590         if (!addr) {
8591             break;
8592         }
8593         *q = lock_user_string(addr);
8594         if (!*q) {
8595             goto execve_efault;
8596         }
8597     }
8598     *q = NULL;
8599 
8600     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8601         if (get_user_ual(addr, gp)) {
8602             goto execve_efault;
8603         }
8604         if (!addr) {
8605             break;
8606         }
8607         *q = lock_user_string(addr);
8608         if (!*q) {
8609             goto execve_efault;
8610         }
8611     }
8612     *q = NULL;
8613 
8614     /*
8615      * Although execve() is not an interruptible syscall it is
8616      * a special case where we must use the safe_syscall wrapper:
8617      * if we allow a signal to happen before we make the host
8618      * syscall then we will 'lose' it, because at the point of
8619      * execve the process leaves QEMU's control. So we use the
8620      * safe syscall wrapper to ensure that we either take the
8621      * signal as a guest signal, or else it does not happen
8622      * before the execve completes and makes it the other
8623      * program's problem.
8624      */
8625     p = lock_user_string(pathname);
8626     if (!p) {
8627         goto execve_efault;
8628     }
8629 
8630     const char *exe = p;
8631     if (is_proc_myself(p, "exe")) {
8632         exe = exec_path;
8633     }
8634     ret = is_execveat
8635         ? safe_execveat(dirfd, exe, argp, envp, flags)
8636         : safe_execve(exe, argp, envp);
8637     ret = get_errno(ret);
8638 
8639     unlock_user(p, pathname, 0);
8640 
8641     goto execve_end;
8642 
8643 execve_efault:
8644     ret = -TARGET_EFAULT;
8645 
8646 execve_end:
8647     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8648         if (get_user_ual(addr, gp) || !addr) {
8649             break;
8650         }
8651         unlock_user(*q, addr, 0);
8652     }
8653     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8654         if (get_user_ual(addr, gp) || !addr) {
8655             break;
8656         }
8657         unlock_user(*q, addr, 0);
8658     }
8659 
8660     g_free(argp);
8661     g_free(envp);
8662     return ret;
8663 }
8664 
8665 #define TIMER_MAGIC 0x0caf0000
8666 #define TIMER_MAGIC_MASK 0xffff0000
8667 
8668 /* Convert QEMU provided timer ID back to internal 16bit index format */
get_timer_id(abi_long arg)8669 static target_timer_t get_timer_id(abi_long arg)
8670 {
8671     target_timer_t timerid = arg;
8672 
8673     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8674         return -TARGET_EINVAL;
8675     }
8676 
8677     timerid &= 0xffff;
8678 
8679     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8680         return -TARGET_EINVAL;
8681     }
8682 
8683     return timerid;
8684 }
8685 
target_to_host_cpu_mask(unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8686 static int target_to_host_cpu_mask(unsigned long *host_mask,
8687                                    size_t host_size,
8688                                    abi_ulong target_addr,
8689                                    size_t target_size)
8690 {
8691     unsigned target_bits = sizeof(abi_ulong) * 8;
8692     unsigned host_bits = sizeof(*host_mask) * 8;
8693     abi_ulong *target_mask;
8694     unsigned i, j;
8695 
8696     assert(host_size >= target_size);
8697 
8698     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8699     if (!target_mask) {
8700         return -TARGET_EFAULT;
8701     }
8702     memset(host_mask, 0, host_size);
8703 
8704     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8705         unsigned bit = i * target_bits;
8706         abi_ulong val;
8707 
8708         __get_user(val, &target_mask[i]);
8709         for (j = 0; j < target_bits; j++, bit++) {
8710             if (val & (1UL << j)) {
8711                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8712             }
8713         }
8714     }
8715 
8716     unlock_user(target_mask, target_addr, 0);
8717     return 0;
8718 }
8719 
host_to_target_cpu_mask(const unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8720 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8721                                    size_t host_size,
8722                                    abi_ulong target_addr,
8723                                    size_t target_size)
8724 {
8725     unsigned target_bits = sizeof(abi_ulong) * 8;
8726     unsigned host_bits = sizeof(*host_mask) * 8;
8727     abi_ulong *target_mask;
8728     unsigned i, j;
8729 
8730     assert(host_size >= target_size);
8731 
8732     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8733     if (!target_mask) {
8734         return -TARGET_EFAULT;
8735     }
8736 
8737     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8738         unsigned bit = i * target_bits;
8739         abi_ulong val = 0;
8740 
8741         for (j = 0; j < target_bits; j++, bit++) {
8742             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8743                 val |= 1UL << j;
8744             }
8745         }
8746         __put_user(val, &target_mask[i]);
8747     }
8748 
8749     unlock_user(target_mask, target_addr, target_size);
8750     return 0;
8751 }
8752 
8753 #ifdef TARGET_NR_getdents
do_getdents(abi_long dirfd,abi_long arg2,abi_long count)8754 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8755 {
8756     g_autofree void *hdirp = NULL;
8757     void *tdirp;
8758     int hlen, hoff, toff;
8759     int hreclen, treclen;
8760     off_t prev_diroff = 0;
8761 
8762     hdirp = g_try_malloc(count);
8763     if (!hdirp) {
8764         return -TARGET_ENOMEM;
8765     }
8766 
8767 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8768     hlen = sys_getdents(dirfd, hdirp, count);
8769 #else
8770     hlen = sys_getdents64(dirfd, hdirp, count);
8771 #endif
8772 
8773     hlen = get_errno(hlen);
8774     if (is_error(hlen)) {
8775         return hlen;
8776     }
8777 
8778     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8779     if (!tdirp) {
8780         return -TARGET_EFAULT;
8781     }
8782 
8783     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8784 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8785         struct linux_dirent *hde = hdirp + hoff;
8786 #else
8787         struct linux_dirent64 *hde = hdirp + hoff;
8788 #endif
8789         struct target_dirent *tde = tdirp + toff;
8790         int namelen;
8791         uint8_t type;
8792 
8793         namelen = strlen(hde->d_name);
8794         hreclen = hde->d_reclen;
8795         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8796         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8797 
8798         if (toff + treclen > count) {
8799             /*
8800              * If the host struct is smaller than the target struct, or
8801              * requires less alignment and thus packs into less space,
8802              * then the host can return more entries than we can pass
8803              * on to the guest.
8804              */
8805             if (toff == 0) {
8806                 toff = -TARGET_EINVAL; /* result buffer is too small */
8807                 break;
8808             }
8809             /*
8810              * Return what we have, resetting the file pointer to the
8811              * location of the first record not returned.
8812              */
8813             lseek(dirfd, prev_diroff, SEEK_SET);
8814             break;
8815         }
8816 
8817         prev_diroff = hde->d_off;
8818         tde->d_ino = tswapal(hde->d_ino);
8819         tde->d_off = tswapal(hde->d_off);
8820         tde->d_reclen = tswap16(treclen);
8821         memcpy(tde->d_name, hde->d_name, namelen + 1);
8822 
8823         /*
8824          * The getdents type is in what was formerly a padding byte at the
8825          * end of the structure.
8826          */
8827 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8828         type = *((uint8_t *)hde + hreclen - 1);
8829 #else
8830         type = hde->d_type;
8831 #endif
8832         *((uint8_t *)tde + treclen - 1) = type;
8833     }
8834 
8835     unlock_user(tdirp, arg2, toff);
8836     return toff;
8837 }
8838 #endif /* TARGET_NR_getdents */
8839 
8840 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
do_getdents64(abi_long dirfd,abi_long arg2,abi_long count)8841 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8842 {
8843     g_autofree void *hdirp = NULL;
8844     void *tdirp;
8845     int hlen, hoff, toff;
8846     int hreclen, treclen;
8847     off_t prev_diroff = 0;
8848 
8849     hdirp = g_try_malloc(count);
8850     if (!hdirp) {
8851         return -TARGET_ENOMEM;
8852     }
8853 
8854     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8855     if (is_error(hlen)) {
8856         return hlen;
8857     }
8858 
8859     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8860     if (!tdirp) {
8861         return -TARGET_EFAULT;
8862     }
8863 
8864     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8865         struct linux_dirent64 *hde = hdirp + hoff;
8866         struct target_dirent64 *tde = tdirp + toff;
8867         int namelen;
8868 
8869         namelen = strlen(hde->d_name) + 1;
8870         hreclen = hde->d_reclen;
8871         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8872         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8873 
8874         if (toff + treclen > count) {
8875             /*
8876              * If the host struct is smaller than the target struct, or
8877              * requires less alignment and thus packs into less space,
8878              * then the host can return more entries than we can pass
8879              * on to the guest.
8880              */
8881             if (toff == 0) {
8882                 toff = -TARGET_EINVAL; /* result buffer is too small */
8883                 break;
8884             }
8885             /*
8886              * Return what we have, resetting the file pointer to the
8887              * location of the first record not returned.
8888              */
8889             lseek(dirfd, prev_diroff, SEEK_SET);
8890             break;
8891         }
8892 
8893         prev_diroff = hde->d_off;
8894         tde->d_ino = tswap64(hde->d_ino);
8895         tde->d_off = tswap64(hde->d_off);
8896         tde->d_reclen = tswap16(treclen);
8897         tde->d_type = hde->d_type;
8898         memcpy(tde->d_name, hde->d_name, namelen);
8899     }
8900 
8901     unlock_user(tdirp, arg2, toff);
8902     return toff;
8903 }
8904 #endif /* TARGET_NR_getdents64 */
8905 
8906 #if defined(TARGET_NR_riscv_hwprobe)
8907 
8908 #define RISCV_HWPROBE_KEY_MVENDORID     0
8909 #define RISCV_HWPROBE_KEY_MARCHID       1
8910 #define RISCV_HWPROBE_KEY_MIMPID        2
8911 
8912 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8913 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8914 
8915 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8916 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8917 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8918 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8919 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8920 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8921 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8922 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8923 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8924 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8925 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8926 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8927 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8928 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8929 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8930 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8931 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8932 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8933 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8934 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8935 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8936 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8937 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8938 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8939 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8940 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8941 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8942 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8943 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8944 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8945 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8946 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8947 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1ULL << 31)
8948 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8949 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8950 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8951 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8952 
8953 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8954 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8955 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8956 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8957 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8958 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8959 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
8960 
8961 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
8962 
8963 struct riscv_hwprobe {
8964     abi_llong  key;
8965     abi_ullong value;
8966 };
8967 
risc_hwprobe_fill_pairs(CPURISCVState * env,struct riscv_hwprobe * pair,size_t pair_count)8968 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
8969                                     struct riscv_hwprobe *pair,
8970                                     size_t pair_count)
8971 {
8972     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
8973 
8974     for (; pair_count > 0; pair_count--, pair++) {
8975         abi_llong key;
8976         abi_ullong value;
8977         __put_user(0, &pair->value);
8978         __get_user(key, &pair->key);
8979         switch (key) {
8980         case RISCV_HWPROBE_KEY_MVENDORID:
8981             __put_user(cfg->mvendorid, &pair->value);
8982             break;
8983         case RISCV_HWPROBE_KEY_MARCHID:
8984             __put_user(cfg->marchid, &pair->value);
8985             break;
8986         case RISCV_HWPROBE_KEY_MIMPID:
8987             __put_user(cfg->mimpid, &pair->value);
8988             break;
8989         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
8990             value = riscv_has_ext(env, RVI) &&
8991                     riscv_has_ext(env, RVM) &&
8992                     riscv_has_ext(env, RVA) ?
8993                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
8994             __put_user(value, &pair->value);
8995             break;
8996         case RISCV_HWPROBE_KEY_IMA_EXT_0:
8997             value = riscv_has_ext(env, RVF) &&
8998                     riscv_has_ext(env, RVD) ?
8999                     RISCV_HWPROBE_IMA_FD : 0;
9000             value |= riscv_has_ext(env, RVC) ?
9001                      RISCV_HWPROBE_IMA_C : 0;
9002             value |= riscv_has_ext(env, RVV) ?
9003                      RISCV_HWPROBE_IMA_V : 0;
9004             value |= cfg->ext_zba ?
9005                      RISCV_HWPROBE_EXT_ZBA : 0;
9006             value |= cfg->ext_zbb ?
9007                      RISCV_HWPROBE_EXT_ZBB : 0;
9008             value |= cfg->ext_zbs ?
9009                      RISCV_HWPROBE_EXT_ZBS : 0;
9010             value |= cfg->ext_zicboz ?
9011                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
9012             value |= cfg->ext_zbc ?
9013                      RISCV_HWPROBE_EXT_ZBC : 0;
9014             value |= cfg->ext_zbkb ?
9015                      RISCV_HWPROBE_EXT_ZBKB : 0;
9016             value |= cfg->ext_zbkc ?
9017                      RISCV_HWPROBE_EXT_ZBKC : 0;
9018             value |= cfg->ext_zbkx ?
9019                      RISCV_HWPROBE_EXT_ZBKX : 0;
9020             value |= cfg->ext_zknd ?
9021                      RISCV_HWPROBE_EXT_ZKND : 0;
9022             value |= cfg->ext_zkne ?
9023                      RISCV_HWPROBE_EXT_ZKNE : 0;
9024             value |= cfg->ext_zknh ?
9025                      RISCV_HWPROBE_EXT_ZKNH : 0;
9026             value |= cfg->ext_zksed ?
9027                      RISCV_HWPROBE_EXT_ZKSED : 0;
9028             value |= cfg->ext_zksh ?
9029                      RISCV_HWPROBE_EXT_ZKSH : 0;
9030             value |= cfg->ext_zkt ?
9031                      RISCV_HWPROBE_EXT_ZKT : 0;
9032             value |= cfg->ext_zvbb ?
9033                      RISCV_HWPROBE_EXT_ZVBB : 0;
9034             value |= cfg->ext_zvbc ?
9035                      RISCV_HWPROBE_EXT_ZVBC : 0;
9036             value |= cfg->ext_zvkb ?
9037                      RISCV_HWPROBE_EXT_ZVKB : 0;
9038             value |= cfg->ext_zvkg ?
9039                      RISCV_HWPROBE_EXT_ZVKG : 0;
9040             value |= cfg->ext_zvkned ?
9041                      RISCV_HWPROBE_EXT_ZVKNED : 0;
9042             value |= cfg->ext_zvknha ?
9043                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
9044             value |= cfg->ext_zvknhb ?
9045                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
9046             value |= cfg->ext_zvksed ?
9047                      RISCV_HWPROBE_EXT_ZVKSED : 0;
9048             value |= cfg->ext_zvksh ?
9049                      RISCV_HWPROBE_EXT_ZVKSH : 0;
9050             value |= cfg->ext_zvkt ?
9051                      RISCV_HWPROBE_EXT_ZVKT : 0;
9052             value |= cfg->ext_zfh ?
9053                      RISCV_HWPROBE_EXT_ZFH : 0;
9054             value |= cfg->ext_zfhmin ?
9055                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
9056             value |= cfg->ext_zihintntl ?
9057                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9058             value |= cfg->ext_zvfh ?
9059                      RISCV_HWPROBE_EXT_ZVFH : 0;
9060             value |= cfg->ext_zvfhmin ?
9061                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9062             value |= cfg->ext_zfa ?
9063                      RISCV_HWPROBE_EXT_ZFA : 0;
9064             value |= cfg->ext_ztso ?
9065                      RISCV_HWPROBE_EXT_ZTSO : 0;
9066             value |= cfg->ext_zacas ?
9067                      RISCV_HWPROBE_EXT_ZACAS : 0;
9068             value |= cfg->ext_zicond ?
9069                      RISCV_HWPROBE_EXT_ZICOND : 0;
9070             __put_user(value, &pair->value);
9071             break;
9072         case RISCV_HWPROBE_KEY_CPUPERF_0:
9073             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9074             break;
9075         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9076             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9077             __put_user(value, &pair->value);
9078             break;
9079         default:
9080             __put_user(-1, &pair->key);
9081             break;
9082         }
9083     }
9084 }
9085 
cpu_set_valid(abi_long arg3,abi_long arg4)9086 static int cpu_set_valid(abi_long arg3, abi_long arg4)
9087 {
9088     int ret, i, tmp;
9089     size_t host_mask_size, target_mask_size;
9090     unsigned long *host_mask;
9091 
9092     /*
9093      * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9094      * arg3 contains the cpu count.
9095      */
9096     tmp = (8 * sizeof(abi_ulong));
9097     target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9098     host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9099                      ~(sizeof(*host_mask) - 1);
9100 
9101     host_mask = alloca(host_mask_size);
9102 
9103     ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9104                                   arg4, target_mask_size);
9105     if (ret != 0) {
9106         return ret;
9107     }
9108 
9109     for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9110         if (host_mask[i] != 0) {
9111             return 0;
9112         }
9113     }
9114     return -TARGET_EINVAL;
9115 }
9116 
do_riscv_hwprobe(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)9117 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9118                                  abi_long arg2, abi_long arg3,
9119                                  abi_long arg4, abi_long arg5)
9120 {
9121     int ret;
9122     struct riscv_hwprobe *host_pairs;
9123 
9124     /* flags must be 0 */
9125     if (arg5 != 0) {
9126         return -TARGET_EINVAL;
9127     }
9128 
9129     /* check cpu_set */
9130     if (arg3 != 0) {
9131         ret = cpu_set_valid(arg3, arg4);
9132         if (ret != 0) {
9133             return ret;
9134         }
9135     } else if (arg4 != 0) {
9136         return -TARGET_EINVAL;
9137     }
9138 
9139     /* no pairs */
9140     if (arg2 == 0) {
9141         return 0;
9142     }
9143 
9144     host_pairs = lock_user(VERIFY_WRITE, arg1,
9145                            sizeof(*host_pairs) * (size_t)arg2, 0);
9146     if (host_pairs == NULL) {
9147         return -TARGET_EFAULT;
9148     }
9149     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9150     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9151     return 0;
9152 }
9153 #endif /* TARGET_NR_riscv_hwprobe */
9154 
9155 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
_syscall2(int,pivot_root,const char *,new_root,const char *,put_old)9156 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9157 #endif
9158 
9159 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9160 #define __NR_sys_open_tree __NR_open_tree
9161 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9162           unsigned int, __flags)
9163 #endif
9164 
9165 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9166 #define __NR_sys_move_mount __NR_move_mount
9167 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9168            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9169 #endif
9170 
9171 /* This is an internal helper for do_syscall so that it is easier
9172  * to have a single return point, so that actions, such as logging
9173  * of syscall results, can be performed.
9174  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9175  */
9176 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9177                             abi_long arg2, abi_long arg3, abi_long arg4,
9178                             abi_long arg5, abi_long arg6, abi_long arg7,
9179                             abi_long arg8)
9180 {
9181     CPUState *cpu = env_cpu(cpu_env);
9182     abi_long ret;
9183 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9184     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9185     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9186     || defined(TARGET_NR_statx)
9187     struct stat st;
9188 #endif
9189 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9190     || defined(TARGET_NR_fstatfs)
9191     struct statfs stfs;
9192 #endif
9193     void *p;
9194 
9195     switch(num) {
9196     case TARGET_NR_exit:
9197         /* In old applications this may be used to implement _exit(2).
9198            However in threaded applications it is used for thread termination,
9199            and _exit_group is used for application termination.
9200            Do thread termination if we have more then one thread.  */
9201 
9202         if (block_signals()) {
9203             return -QEMU_ERESTARTSYS;
9204         }
9205 
9206         pthread_mutex_lock(&clone_lock);
9207 
9208         if (CPU_NEXT(first_cpu)) {
9209             TaskState *ts = get_task_state(cpu);
9210 
9211             if (ts->child_tidptr) {
9212                 put_user_u32(0, ts->child_tidptr);
9213                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9214                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9215             }
9216 
9217             object_unparent(OBJECT(cpu));
9218             object_unref(OBJECT(cpu));
9219             /*
9220              * At this point the CPU should be unrealized and removed
9221              * from cpu lists. We can clean-up the rest of the thread
9222              * data without the lock held.
9223              */
9224 
9225             pthread_mutex_unlock(&clone_lock);
9226 
9227             thread_cpu = NULL;
9228             g_free(ts);
9229             rcu_unregister_thread();
9230             pthread_exit(NULL);
9231         }
9232 
9233         pthread_mutex_unlock(&clone_lock);
9234         preexit_cleanup(cpu_env, arg1);
9235         _exit(arg1);
9236         return 0; /* avoid warning */
9237     case TARGET_NR_read:
9238         if (arg2 == 0 && arg3 == 0) {
9239             return get_errno(safe_read(arg1, 0, 0));
9240         } else {
9241             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9242                 return -TARGET_EFAULT;
9243             ret = get_errno(safe_read(arg1, p, arg3));
9244             if (ret >= 0 &&
9245                 fd_trans_host_to_target_data(arg1)) {
9246                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9247             }
9248             unlock_user(p, arg2, ret);
9249         }
9250         return ret;
9251     case TARGET_NR_write:
9252         if (arg2 == 0 && arg3 == 0) {
9253             return get_errno(safe_write(arg1, 0, 0));
9254         }
9255         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9256             return -TARGET_EFAULT;
9257         if (fd_trans_target_to_host_data(arg1)) {
9258             void *copy = g_malloc(arg3);
9259             memcpy(copy, p, arg3);
9260             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9261             if (ret >= 0) {
9262                 ret = get_errno(safe_write(arg1, copy, ret));
9263             }
9264             g_free(copy);
9265         } else {
9266             ret = get_errno(safe_write(arg1, p, arg3));
9267         }
9268         unlock_user(p, arg2, 0);
9269         return ret;
9270 
9271 #ifdef TARGET_NR_open
9272     case TARGET_NR_open:
9273         if (!(p = lock_user_string(arg1)))
9274             return -TARGET_EFAULT;
9275         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9276                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9277                                   arg3, true));
9278         fd_trans_unregister(ret);
9279         unlock_user(p, arg1, 0);
9280         return ret;
9281 #endif
9282     case TARGET_NR_openat:
9283         if (!(p = lock_user_string(arg2)))
9284             return -TARGET_EFAULT;
9285         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9286                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9287                                   arg4, true));
9288         fd_trans_unregister(ret);
9289         unlock_user(p, arg2, 0);
9290         return ret;
9291     case TARGET_NR_openat2:
9292         ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9293         return ret;
9294 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9295     case TARGET_NR_name_to_handle_at:
9296         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9297         return ret;
9298 #endif
9299 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9300     case TARGET_NR_open_by_handle_at:
9301         ret = do_open_by_handle_at(arg1, arg2, arg3);
9302         fd_trans_unregister(ret);
9303         return ret;
9304 #endif
9305 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9306     case TARGET_NR_pidfd_open:
9307         return get_errno(pidfd_open(arg1, arg2));
9308 #endif
9309 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9310     case TARGET_NR_pidfd_send_signal:
9311         {
9312             siginfo_t uinfo, *puinfo;
9313 
9314             if (arg3) {
9315                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9316                 if (!p) {
9317                     return -TARGET_EFAULT;
9318                  }
9319                  target_to_host_siginfo(&uinfo, p);
9320                  unlock_user(p, arg3, 0);
9321                  puinfo = &uinfo;
9322             } else {
9323                  puinfo = NULL;
9324             }
9325             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9326                                               puinfo, arg4));
9327         }
9328         return ret;
9329 #endif
9330 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9331     case TARGET_NR_pidfd_getfd:
9332         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9333 #endif
9334     case TARGET_NR_close:
9335         fd_trans_unregister(arg1);
9336         return get_errno(close(arg1));
9337 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9338     case TARGET_NR_close_range:
9339         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9340         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9341             abi_long fd, maxfd;
9342             maxfd = MIN(arg2, target_fd_max);
9343             for (fd = arg1; fd < maxfd; fd++) {
9344                 fd_trans_unregister(fd);
9345             }
9346         }
9347         return ret;
9348 #endif
9349 
9350     case TARGET_NR_brk:
9351         return do_brk(arg1);
9352 #ifdef TARGET_NR_fork
9353     case TARGET_NR_fork:
9354         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9355 #endif
9356 #ifdef TARGET_NR_waitpid
9357     case TARGET_NR_waitpid:
9358         {
9359             int status;
9360             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9361             if (!is_error(ret) && arg2 && ret
9362                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9363                 return -TARGET_EFAULT;
9364         }
9365         return ret;
9366 #endif
9367 #ifdef TARGET_NR_waitid
9368     case TARGET_NR_waitid:
9369         {
9370             struct rusage ru;
9371             siginfo_t info;
9372 
9373             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9374                                         arg4, (arg5 ? &ru : NULL)));
9375             if (!is_error(ret)) {
9376                 if (arg3) {
9377                     p = lock_user(VERIFY_WRITE, arg3,
9378                                   sizeof(target_siginfo_t), 0);
9379                     if (!p) {
9380                         return -TARGET_EFAULT;
9381                     }
9382                     host_to_target_siginfo(p, &info);
9383                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9384                 }
9385                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9386                     return -TARGET_EFAULT;
9387                 }
9388             }
9389         }
9390         return ret;
9391 #endif
9392 #ifdef TARGET_NR_creat /* not on alpha */
9393     case TARGET_NR_creat:
9394         if (!(p = lock_user_string(arg1)))
9395             return -TARGET_EFAULT;
9396         ret = get_errno(creat(p, arg2));
9397         fd_trans_unregister(ret);
9398         unlock_user(p, arg1, 0);
9399         return ret;
9400 #endif
9401 #ifdef TARGET_NR_link
9402     case TARGET_NR_link:
9403         {
9404             void * p2;
9405             p = lock_user_string(arg1);
9406             p2 = lock_user_string(arg2);
9407             if (!p || !p2)
9408                 ret = -TARGET_EFAULT;
9409             else
9410                 ret = get_errno(link(p, p2));
9411             unlock_user(p2, arg2, 0);
9412             unlock_user(p, arg1, 0);
9413         }
9414         return ret;
9415 #endif
9416 #if defined(TARGET_NR_linkat)
9417     case TARGET_NR_linkat:
9418         {
9419             void * p2 = NULL;
9420             if (!arg2 || !arg4)
9421                 return -TARGET_EFAULT;
9422             p  = lock_user_string(arg2);
9423             p2 = lock_user_string(arg4);
9424             if (!p || !p2)
9425                 ret = -TARGET_EFAULT;
9426             else
9427                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9428             unlock_user(p, arg2, 0);
9429             unlock_user(p2, arg4, 0);
9430         }
9431         return ret;
9432 #endif
9433 #ifdef TARGET_NR_unlink
9434     case TARGET_NR_unlink:
9435         if (!(p = lock_user_string(arg1)))
9436             return -TARGET_EFAULT;
9437         ret = get_errno(unlink(p));
9438         unlock_user(p, arg1, 0);
9439         return ret;
9440 #endif
9441 #if defined(TARGET_NR_unlinkat)
9442     case TARGET_NR_unlinkat:
9443         if (!(p = lock_user_string(arg2)))
9444             return -TARGET_EFAULT;
9445         ret = get_errno(unlinkat(arg1, p, arg3));
9446         unlock_user(p, arg2, 0);
9447         return ret;
9448 #endif
9449     case TARGET_NR_execveat:
9450         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9451     case TARGET_NR_execve:
9452         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9453     case TARGET_NR_chdir:
9454         if (!(p = lock_user_string(arg1)))
9455             return -TARGET_EFAULT;
9456         ret = get_errno(chdir(p));
9457         unlock_user(p, arg1, 0);
9458         return ret;
9459 #ifdef TARGET_NR_time
9460     case TARGET_NR_time:
9461         {
9462             time_t host_time;
9463             ret = get_errno(time(&host_time));
9464             if (!is_error(ret)
9465                 && arg1
9466                 && put_user_sal(host_time, arg1))
9467                 return -TARGET_EFAULT;
9468         }
9469         return ret;
9470 #endif
9471 #ifdef TARGET_NR_mknod
9472     case TARGET_NR_mknod:
9473         if (!(p = lock_user_string(arg1)))
9474             return -TARGET_EFAULT;
9475         ret = get_errno(mknod(p, arg2, arg3));
9476         unlock_user(p, arg1, 0);
9477         return ret;
9478 #endif
9479 #if defined(TARGET_NR_mknodat)
9480     case TARGET_NR_mknodat:
9481         if (!(p = lock_user_string(arg2)))
9482             return -TARGET_EFAULT;
9483         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9484         unlock_user(p, arg2, 0);
9485         return ret;
9486 #endif
9487 #ifdef TARGET_NR_chmod
9488     case TARGET_NR_chmod:
9489         if (!(p = lock_user_string(arg1)))
9490             return -TARGET_EFAULT;
9491         ret = get_errno(chmod(p, arg2));
9492         unlock_user(p, arg1, 0);
9493         return ret;
9494 #endif
9495 #ifdef TARGET_NR_lseek
9496     case TARGET_NR_lseek:
9497         return get_errno(lseek(arg1, arg2, arg3));
9498 #endif
9499 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9500     /* Alpha specific */
9501     case TARGET_NR_getxpid:
9502         cpu_env->ir[IR_A4] = getppid();
9503         return get_errno(getpid());
9504 #endif
9505 #ifdef TARGET_NR_getpid
9506     case TARGET_NR_getpid:
9507         return get_errno(getpid());
9508 #endif
9509     case TARGET_NR_mount:
9510         {
9511             /* need to look at the data field */
9512             void *p2, *p3;
9513 
9514             if (arg1) {
9515                 p = lock_user_string(arg1);
9516                 if (!p) {
9517                     return -TARGET_EFAULT;
9518                 }
9519             } else {
9520                 p = NULL;
9521             }
9522 
9523             p2 = lock_user_string(arg2);
9524             if (!p2) {
9525                 if (arg1) {
9526                     unlock_user(p, arg1, 0);
9527                 }
9528                 return -TARGET_EFAULT;
9529             }
9530 
9531             if (arg3) {
9532                 p3 = lock_user_string(arg3);
9533                 if (!p3) {
9534                     if (arg1) {
9535                         unlock_user(p, arg1, 0);
9536                     }
9537                     unlock_user(p2, arg2, 0);
9538                     return -TARGET_EFAULT;
9539                 }
9540             } else {
9541                 p3 = NULL;
9542             }
9543 
9544             /* FIXME - arg5 should be locked, but it isn't clear how to
9545              * do that since it's not guaranteed to be a NULL-terminated
9546              * string.
9547              */
9548             if (!arg5) {
9549                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9550             } else {
9551                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9552             }
9553             ret = get_errno(ret);
9554 
9555             if (arg1) {
9556                 unlock_user(p, arg1, 0);
9557             }
9558             unlock_user(p2, arg2, 0);
9559             if (arg3) {
9560                 unlock_user(p3, arg3, 0);
9561             }
9562         }
9563         return ret;
9564 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9565 #if defined(TARGET_NR_umount)
9566     case TARGET_NR_umount:
9567 #endif
9568 #if defined(TARGET_NR_oldumount)
9569     case TARGET_NR_oldumount:
9570 #endif
9571         if (!(p = lock_user_string(arg1)))
9572             return -TARGET_EFAULT;
9573         ret = get_errno(umount(p));
9574         unlock_user(p, arg1, 0);
9575         return ret;
9576 #endif
9577 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9578     case TARGET_NR_move_mount:
9579         {
9580             void *p2, *p4;
9581 
9582             if (!arg2 || !arg4) {
9583                 return -TARGET_EFAULT;
9584             }
9585 
9586             p2 = lock_user_string(arg2);
9587             if (!p2) {
9588                 return -TARGET_EFAULT;
9589             }
9590 
9591             p4 = lock_user_string(arg4);
9592             if (!p4) {
9593                 unlock_user(p2, arg2, 0);
9594                 return -TARGET_EFAULT;
9595             }
9596             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9597 
9598             unlock_user(p2, arg2, 0);
9599             unlock_user(p4, arg4, 0);
9600 
9601             return ret;
9602         }
9603 #endif
9604 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9605     case TARGET_NR_open_tree:
9606         {
9607             void *p2;
9608             int host_flags;
9609 
9610             if (!arg2) {
9611                 return -TARGET_EFAULT;
9612             }
9613 
9614             p2 = lock_user_string(arg2);
9615             if (!p2) {
9616                 return -TARGET_EFAULT;
9617             }
9618 
9619             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9620             if (arg3 & TARGET_O_CLOEXEC) {
9621                 host_flags |= O_CLOEXEC;
9622             }
9623 
9624             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9625 
9626             unlock_user(p2, arg2, 0);
9627 
9628             return ret;
9629         }
9630 #endif
9631 #ifdef TARGET_NR_stime /* not on alpha */
9632     case TARGET_NR_stime:
9633         {
9634             struct timespec ts;
9635             ts.tv_nsec = 0;
9636             if (get_user_sal(ts.tv_sec, arg1)) {
9637                 return -TARGET_EFAULT;
9638             }
9639             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9640         }
9641 #endif
9642 #ifdef TARGET_NR_alarm /* not on alpha */
9643     case TARGET_NR_alarm:
9644         return alarm(arg1);
9645 #endif
9646 #ifdef TARGET_NR_pause /* not on alpha */
9647     case TARGET_NR_pause:
9648         if (!block_signals()) {
9649             sigsuspend(&get_task_state(cpu)->signal_mask);
9650         }
9651         return -TARGET_EINTR;
9652 #endif
9653 #ifdef TARGET_NR_utime
9654     case TARGET_NR_utime:
9655         {
9656             struct utimbuf tbuf, *host_tbuf;
9657             struct target_utimbuf *target_tbuf;
9658             if (arg2) {
9659                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9660                     return -TARGET_EFAULT;
9661                 tbuf.actime = tswapal(target_tbuf->actime);
9662                 tbuf.modtime = tswapal(target_tbuf->modtime);
9663                 unlock_user_struct(target_tbuf, arg2, 0);
9664                 host_tbuf = &tbuf;
9665             } else {
9666                 host_tbuf = NULL;
9667             }
9668             if (!(p = lock_user_string(arg1)))
9669                 return -TARGET_EFAULT;
9670             ret = get_errno(utime(p, host_tbuf));
9671             unlock_user(p, arg1, 0);
9672         }
9673         return ret;
9674 #endif
9675 #ifdef TARGET_NR_utimes
9676     case TARGET_NR_utimes:
9677         {
9678             struct timeval *tvp, tv[2];
9679             if (arg2) {
9680                 if (copy_from_user_timeval(&tv[0], arg2)
9681                     || copy_from_user_timeval(&tv[1],
9682                                               arg2 + sizeof(struct target_timeval)))
9683                     return -TARGET_EFAULT;
9684                 tvp = tv;
9685             } else {
9686                 tvp = NULL;
9687             }
9688             if (!(p = lock_user_string(arg1)))
9689                 return -TARGET_EFAULT;
9690             ret = get_errno(utimes(p, tvp));
9691             unlock_user(p, arg1, 0);
9692         }
9693         return ret;
9694 #endif
9695 #if defined(TARGET_NR_futimesat)
9696     case TARGET_NR_futimesat:
9697         {
9698             struct timeval *tvp, tv[2];
9699             if (arg3) {
9700                 if (copy_from_user_timeval(&tv[0], arg3)
9701                     || copy_from_user_timeval(&tv[1],
9702                                               arg3 + sizeof(struct target_timeval)))
9703                     return -TARGET_EFAULT;
9704                 tvp = tv;
9705             } else {
9706                 tvp = NULL;
9707             }
9708             if (!(p = lock_user_string(arg2))) {
9709                 return -TARGET_EFAULT;
9710             }
9711             ret = get_errno(futimesat(arg1, path(p), tvp));
9712             unlock_user(p, arg2, 0);
9713         }
9714         return ret;
9715 #endif
9716 #ifdef TARGET_NR_access
9717     case TARGET_NR_access:
9718         if (!(p = lock_user_string(arg1))) {
9719             return -TARGET_EFAULT;
9720         }
9721         ret = get_errno(access(path(p), arg2));
9722         unlock_user(p, arg1, 0);
9723         return ret;
9724 #endif
9725 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9726     case TARGET_NR_faccessat:
9727         if (!(p = lock_user_string(arg2))) {
9728             return -TARGET_EFAULT;
9729         }
9730         ret = get_errno(faccessat(arg1, p, arg3, 0));
9731         unlock_user(p, arg2, 0);
9732         return ret;
9733 #endif
9734 #if defined(TARGET_NR_faccessat2)
9735     case TARGET_NR_faccessat2:
9736         if (!(p = lock_user_string(arg2))) {
9737             return -TARGET_EFAULT;
9738         }
9739         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9740         unlock_user(p, arg2, 0);
9741         return ret;
9742 #endif
9743 #ifdef TARGET_NR_nice /* not on alpha */
9744     case TARGET_NR_nice:
9745         return get_errno(nice(arg1));
9746 #endif
9747     case TARGET_NR_sync:
9748         sync();
9749         return 0;
9750 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9751     case TARGET_NR_syncfs:
9752         return get_errno(syncfs(arg1));
9753 #endif
9754     case TARGET_NR_kill:
9755         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9756 #ifdef TARGET_NR_rename
9757     case TARGET_NR_rename:
9758         {
9759             void *p2;
9760             p = lock_user_string(arg1);
9761             p2 = lock_user_string(arg2);
9762             if (!p || !p2)
9763                 ret = -TARGET_EFAULT;
9764             else
9765                 ret = get_errno(rename(p, p2));
9766             unlock_user(p2, arg2, 0);
9767             unlock_user(p, arg1, 0);
9768         }
9769         return ret;
9770 #endif
9771 #if defined(TARGET_NR_renameat)
9772     case TARGET_NR_renameat:
9773         {
9774             void *p2;
9775             p  = lock_user_string(arg2);
9776             p2 = lock_user_string(arg4);
9777             if (!p || !p2)
9778                 ret = -TARGET_EFAULT;
9779             else
9780                 ret = get_errno(renameat(arg1, p, arg3, p2));
9781             unlock_user(p2, arg4, 0);
9782             unlock_user(p, arg2, 0);
9783         }
9784         return ret;
9785 #endif
9786 #if defined(TARGET_NR_renameat2)
9787     case TARGET_NR_renameat2:
9788         {
9789             void *p2;
9790             p  = lock_user_string(arg2);
9791             p2 = lock_user_string(arg4);
9792             if (!p || !p2) {
9793                 ret = -TARGET_EFAULT;
9794             } else {
9795                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9796             }
9797             unlock_user(p2, arg4, 0);
9798             unlock_user(p, arg2, 0);
9799         }
9800         return ret;
9801 #endif
9802 #ifdef TARGET_NR_mkdir
9803     case TARGET_NR_mkdir:
9804         if (!(p = lock_user_string(arg1)))
9805             return -TARGET_EFAULT;
9806         ret = get_errno(mkdir(p, arg2));
9807         unlock_user(p, arg1, 0);
9808         return ret;
9809 #endif
9810 #if defined(TARGET_NR_mkdirat)
9811     case TARGET_NR_mkdirat:
9812         if (!(p = lock_user_string(arg2)))
9813             return -TARGET_EFAULT;
9814         ret = get_errno(mkdirat(arg1, p, arg3));
9815         unlock_user(p, arg2, 0);
9816         return ret;
9817 #endif
9818 #ifdef TARGET_NR_rmdir
9819     case TARGET_NR_rmdir:
9820         if (!(p = lock_user_string(arg1)))
9821             return -TARGET_EFAULT;
9822         ret = get_errno(rmdir(p));
9823         unlock_user(p, arg1, 0);
9824         return ret;
9825 #endif
9826     case TARGET_NR_dup:
9827         ret = get_errno(dup(arg1));
9828         if (ret >= 0) {
9829             fd_trans_dup(arg1, ret);
9830         }
9831         return ret;
9832 #ifdef TARGET_NR_pipe
9833     case TARGET_NR_pipe:
9834         return do_pipe(cpu_env, arg1, 0, 0);
9835 #endif
9836 #ifdef TARGET_NR_pipe2
9837     case TARGET_NR_pipe2:
9838         return do_pipe(cpu_env, arg1,
9839                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9840 #endif
9841     case TARGET_NR_times:
9842         {
9843             struct target_tms *tmsp;
9844             struct tms tms;
9845             ret = get_errno(times(&tms));
9846             if (arg1) {
9847                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9848                 if (!tmsp)
9849                     return -TARGET_EFAULT;
9850                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9851                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9852                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9853                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9854             }
9855             if (!is_error(ret))
9856                 ret = host_to_target_clock_t(ret);
9857         }
9858         return ret;
9859     case TARGET_NR_acct:
9860         if (arg1 == 0) {
9861             ret = get_errno(acct(NULL));
9862         } else {
9863             if (!(p = lock_user_string(arg1))) {
9864                 return -TARGET_EFAULT;
9865             }
9866             ret = get_errno(acct(path(p)));
9867             unlock_user(p, arg1, 0);
9868         }
9869         return ret;
9870 #ifdef TARGET_NR_umount2
9871     case TARGET_NR_umount2:
9872         if (!(p = lock_user_string(arg1)))
9873             return -TARGET_EFAULT;
9874         ret = get_errno(umount2(p, arg2));
9875         unlock_user(p, arg1, 0);
9876         return ret;
9877 #endif
9878     case TARGET_NR_ioctl:
9879         return do_ioctl(arg1, arg2, arg3);
9880 #ifdef TARGET_NR_fcntl
9881     case TARGET_NR_fcntl:
9882         return do_fcntl(arg1, arg2, arg3);
9883 #endif
9884     case TARGET_NR_setpgid:
9885         return get_errno(setpgid(arg1, arg2));
9886     case TARGET_NR_umask:
9887         return get_errno(umask(arg1));
9888     case TARGET_NR_chroot:
9889         if (!(p = lock_user_string(arg1)))
9890             return -TARGET_EFAULT;
9891         ret = get_errno(chroot(p));
9892         unlock_user(p, arg1, 0);
9893         return ret;
9894 #ifdef TARGET_NR_dup2
9895     case TARGET_NR_dup2:
9896         ret = get_errno(dup2(arg1, arg2));
9897         if (ret >= 0) {
9898             fd_trans_dup(arg1, arg2);
9899         }
9900         return ret;
9901 #endif
9902 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9903     case TARGET_NR_dup3:
9904     {
9905         int host_flags;
9906 
9907         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9908             return -EINVAL;
9909         }
9910         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9911         ret = get_errno(dup3(arg1, arg2, host_flags));
9912         if (ret >= 0) {
9913             fd_trans_dup(arg1, arg2);
9914         }
9915         return ret;
9916     }
9917 #endif
9918 #ifdef TARGET_NR_getppid /* not on alpha */
9919     case TARGET_NR_getppid:
9920         return get_errno(getppid());
9921 #endif
9922 #ifdef TARGET_NR_getpgrp
9923     case TARGET_NR_getpgrp:
9924         return get_errno(getpgrp());
9925 #endif
9926     case TARGET_NR_setsid:
9927         return get_errno(setsid());
9928 #ifdef TARGET_NR_sigaction
9929     case TARGET_NR_sigaction:
9930         {
9931 #if defined(TARGET_MIPS)
9932 	    struct target_sigaction act, oact, *pact, *old_act;
9933 
9934 	    if (arg2) {
9935                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9936                     return -TARGET_EFAULT;
9937 		act._sa_handler = old_act->_sa_handler;
9938 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9939 		act.sa_flags = old_act->sa_flags;
9940 		unlock_user_struct(old_act, arg2, 0);
9941 		pact = &act;
9942 	    } else {
9943 		pact = NULL;
9944 	    }
9945 
9946         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9947 
9948 	    if (!is_error(ret) && arg3) {
9949                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9950                     return -TARGET_EFAULT;
9951 		old_act->_sa_handler = oact._sa_handler;
9952 		old_act->sa_flags = oact.sa_flags;
9953 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9954 		old_act->sa_mask.sig[1] = 0;
9955 		old_act->sa_mask.sig[2] = 0;
9956 		old_act->sa_mask.sig[3] = 0;
9957 		unlock_user_struct(old_act, arg3, 1);
9958 	    }
9959 #else
9960             struct target_old_sigaction *old_act;
9961             struct target_sigaction act, oact, *pact;
9962             if (arg2) {
9963                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9964                     return -TARGET_EFAULT;
9965                 act._sa_handler = old_act->_sa_handler;
9966                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9967                 act.sa_flags = old_act->sa_flags;
9968 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9969                 act.sa_restorer = old_act->sa_restorer;
9970 #endif
9971                 unlock_user_struct(old_act, arg2, 0);
9972                 pact = &act;
9973             } else {
9974                 pact = NULL;
9975             }
9976             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9977             if (!is_error(ret) && arg3) {
9978                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9979                     return -TARGET_EFAULT;
9980                 old_act->_sa_handler = oact._sa_handler;
9981                 old_act->sa_mask = oact.sa_mask.sig[0];
9982                 old_act->sa_flags = oact.sa_flags;
9983 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9984                 old_act->sa_restorer = oact.sa_restorer;
9985 #endif
9986                 unlock_user_struct(old_act, arg3, 1);
9987             }
9988 #endif
9989         }
9990         return ret;
9991 #endif
9992     case TARGET_NR_rt_sigaction:
9993         {
9994             /*
9995              * For Alpha and SPARC this is a 5 argument syscall, with
9996              * a 'restorer' parameter which must be copied into the
9997              * sa_restorer field of the sigaction struct.
9998              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9999              * and arg5 is the sigsetsize.
10000              */
10001 #if defined(TARGET_ALPHA)
10002             target_ulong sigsetsize = arg4;
10003             target_ulong restorer = arg5;
10004 #elif defined(TARGET_SPARC)
10005             target_ulong restorer = arg4;
10006             target_ulong sigsetsize = arg5;
10007 #else
10008             target_ulong sigsetsize = arg4;
10009             target_ulong restorer = 0;
10010 #endif
10011             struct target_sigaction *act = NULL;
10012             struct target_sigaction *oact = NULL;
10013 
10014             if (sigsetsize != sizeof(target_sigset_t)) {
10015                 return -TARGET_EINVAL;
10016             }
10017             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10018                 return -TARGET_EFAULT;
10019             }
10020             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10021                 ret = -TARGET_EFAULT;
10022             } else {
10023                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10024                 if (oact) {
10025                     unlock_user_struct(oact, arg3, 1);
10026                 }
10027             }
10028             if (act) {
10029                 unlock_user_struct(act, arg2, 0);
10030             }
10031         }
10032         return ret;
10033 #ifdef TARGET_NR_sgetmask /* not on alpha */
10034     case TARGET_NR_sgetmask:
10035         {
10036             sigset_t cur_set;
10037             abi_ulong target_set;
10038             ret = do_sigprocmask(0, NULL, &cur_set);
10039             if (!ret) {
10040                 host_to_target_old_sigset(&target_set, &cur_set);
10041                 ret = target_set;
10042             }
10043         }
10044         return ret;
10045 #endif
10046 #ifdef TARGET_NR_ssetmask /* not on alpha */
10047     case TARGET_NR_ssetmask:
10048         {
10049             sigset_t set, oset;
10050             abi_ulong target_set = arg1;
10051             target_to_host_old_sigset(&set, &target_set);
10052             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10053             if (!ret) {
10054                 host_to_target_old_sigset(&target_set, &oset);
10055                 ret = target_set;
10056             }
10057         }
10058         return ret;
10059 #endif
10060 #ifdef TARGET_NR_sigprocmask
10061     case TARGET_NR_sigprocmask:
10062         {
10063 #if defined(TARGET_ALPHA)
10064             sigset_t set, oldset;
10065             abi_ulong mask;
10066             int how;
10067 
10068             switch (arg1) {
10069             case TARGET_SIG_BLOCK:
10070                 how = SIG_BLOCK;
10071                 break;
10072             case TARGET_SIG_UNBLOCK:
10073                 how = SIG_UNBLOCK;
10074                 break;
10075             case TARGET_SIG_SETMASK:
10076                 how = SIG_SETMASK;
10077                 break;
10078             default:
10079                 return -TARGET_EINVAL;
10080             }
10081             mask = arg2;
10082             target_to_host_old_sigset(&set, &mask);
10083 
10084             ret = do_sigprocmask(how, &set, &oldset);
10085             if (!is_error(ret)) {
10086                 host_to_target_old_sigset(&mask, &oldset);
10087                 ret = mask;
10088                 cpu_env->ir[IR_V0] = 0; /* force no error */
10089             }
10090 #else
10091             sigset_t set, oldset, *set_ptr;
10092             int how;
10093 
10094             if (arg2) {
10095                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10096                 if (!p) {
10097                     return -TARGET_EFAULT;
10098                 }
10099                 target_to_host_old_sigset(&set, p);
10100                 unlock_user(p, arg2, 0);
10101                 set_ptr = &set;
10102                 switch (arg1) {
10103                 case TARGET_SIG_BLOCK:
10104                     how = SIG_BLOCK;
10105                     break;
10106                 case TARGET_SIG_UNBLOCK:
10107                     how = SIG_UNBLOCK;
10108                     break;
10109                 case TARGET_SIG_SETMASK:
10110                     how = SIG_SETMASK;
10111                     break;
10112                 default:
10113                     return -TARGET_EINVAL;
10114                 }
10115             } else {
10116                 how = 0;
10117                 set_ptr = NULL;
10118             }
10119             ret = do_sigprocmask(how, set_ptr, &oldset);
10120             if (!is_error(ret) && arg3) {
10121                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10122                     return -TARGET_EFAULT;
10123                 host_to_target_old_sigset(p, &oldset);
10124                 unlock_user(p, arg3, sizeof(target_sigset_t));
10125             }
10126 #endif
10127         }
10128         return ret;
10129 #endif
10130     case TARGET_NR_rt_sigprocmask:
10131         {
10132             int how = arg1;
10133             sigset_t set, oldset, *set_ptr;
10134 
10135             if (arg4 != sizeof(target_sigset_t)) {
10136                 return -TARGET_EINVAL;
10137             }
10138 
10139             if (arg2) {
10140                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10141                 if (!p) {
10142                     return -TARGET_EFAULT;
10143                 }
10144                 target_to_host_sigset(&set, p);
10145                 unlock_user(p, arg2, 0);
10146                 set_ptr = &set;
10147                 switch(how) {
10148                 case TARGET_SIG_BLOCK:
10149                     how = SIG_BLOCK;
10150                     break;
10151                 case TARGET_SIG_UNBLOCK:
10152                     how = SIG_UNBLOCK;
10153                     break;
10154                 case TARGET_SIG_SETMASK:
10155                     how = SIG_SETMASK;
10156                     break;
10157                 default:
10158                     return -TARGET_EINVAL;
10159                 }
10160             } else {
10161                 how = 0;
10162                 set_ptr = NULL;
10163             }
10164             ret = do_sigprocmask(how, set_ptr, &oldset);
10165             if (!is_error(ret) && arg3) {
10166                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10167                     return -TARGET_EFAULT;
10168                 host_to_target_sigset(p, &oldset);
10169                 unlock_user(p, arg3, sizeof(target_sigset_t));
10170             }
10171         }
10172         return ret;
10173 #ifdef TARGET_NR_sigpending
10174     case TARGET_NR_sigpending:
10175         {
10176             sigset_t set;
10177             ret = get_errno(sigpending(&set));
10178             if (!is_error(ret)) {
10179                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10180                     return -TARGET_EFAULT;
10181                 host_to_target_old_sigset(p, &set);
10182                 unlock_user(p, arg1, sizeof(target_sigset_t));
10183             }
10184         }
10185         return ret;
10186 #endif
10187     case TARGET_NR_rt_sigpending:
10188         {
10189             sigset_t set;
10190 
10191             /* Yes, this check is >, not != like most. We follow the kernel's
10192              * logic and it does it like this because it implements
10193              * NR_sigpending through the same code path, and in that case
10194              * the old_sigset_t is smaller in size.
10195              */
10196             if (arg2 > sizeof(target_sigset_t)) {
10197                 return -TARGET_EINVAL;
10198             }
10199 
10200             ret = get_errno(sigpending(&set));
10201             if (!is_error(ret)) {
10202                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10203                     return -TARGET_EFAULT;
10204                 host_to_target_sigset(p, &set);
10205                 unlock_user(p, arg1, sizeof(target_sigset_t));
10206             }
10207         }
10208         return ret;
10209 #ifdef TARGET_NR_sigsuspend
10210     case TARGET_NR_sigsuspend:
10211         {
10212             sigset_t *set;
10213 
10214 #if defined(TARGET_ALPHA)
10215             TaskState *ts = get_task_state(cpu);
10216             /* target_to_host_old_sigset will bswap back */
10217             abi_ulong mask = tswapal(arg1);
10218             set = &ts->sigsuspend_mask;
10219             target_to_host_old_sigset(set, &mask);
10220 #else
10221             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10222             if (ret != 0) {
10223                 return ret;
10224             }
10225 #endif
10226             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10227             finish_sigsuspend_mask(ret);
10228         }
10229         return ret;
10230 #endif
10231     case TARGET_NR_rt_sigsuspend:
10232         {
10233             sigset_t *set;
10234 
10235             ret = process_sigsuspend_mask(&set, arg1, arg2);
10236             if (ret != 0) {
10237                 return ret;
10238             }
10239             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10240             finish_sigsuspend_mask(ret);
10241         }
10242         return ret;
10243 #ifdef TARGET_NR_rt_sigtimedwait
10244     case TARGET_NR_rt_sigtimedwait:
10245         {
10246             sigset_t set;
10247             struct timespec uts, *puts;
10248             siginfo_t uinfo;
10249 
10250             if (arg4 != sizeof(target_sigset_t)) {
10251                 return -TARGET_EINVAL;
10252             }
10253 
10254             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10255                 return -TARGET_EFAULT;
10256             target_to_host_sigset(&set, p);
10257             unlock_user(p, arg1, 0);
10258             if (arg3) {
10259                 puts = &uts;
10260                 if (target_to_host_timespec(puts, arg3)) {
10261                     return -TARGET_EFAULT;
10262                 }
10263             } else {
10264                 puts = NULL;
10265             }
10266             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10267                                                  SIGSET_T_SIZE));
10268             if (!is_error(ret)) {
10269                 if (arg2) {
10270                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10271                                   0);
10272                     if (!p) {
10273                         return -TARGET_EFAULT;
10274                     }
10275                     host_to_target_siginfo(p, &uinfo);
10276                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10277                 }
10278                 ret = host_to_target_signal(ret);
10279             }
10280         }
10281         return ret;
10282 #endif
10283 #ifdef TARGET_NR_rt_sigtimedwait_time64
10284     case TARGET_NR_rt_sigtimedwait_time64:
10285         {
10286             sigset_t set;
10287             struct timespec uts, *puts;
10288             siginfo_t uinfo;
10289 
10290             if (arg4 != sizeof(target_sigset_t)) {
10291                 return -TARGET_EINVAL;
10292             }
10293 
10294             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10295             if (!p) {
10296                 return -TARGET_EFAULT;
10297             }
10298             target_to_host_sigset(&set, p);
10299             unlock_user(p, arg1, 0);
10300             if (arg3) {
10301                 puts = &uts;
10302                 if (target_to_host_timespec64(puts, arg3)) {
10303                     return -TARGET_EFAULT;
10304                 }
10305             } else {
10306                 puts = NULL;
10307             }
10308             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10309                                                  SIGSET_T_SIZE));
10310             if (!is_error(ret)) {
10311                 if (arg2) {
10312                     p = lock_user(VERIFY_WRITE, arg2,
10313                                   sizeof(target_siginfo_t), 0);
10314                     if (!p) {
10315                         return -TARGET_EFAULT;
10316                     }
10317                     host_to_target_siginfo(p, &uinfo);
10318                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10319                 }
10320                 ret = host_to_target_signal(ret);
10321             }
10322         }
10323         return ret;
10324 #endif
10325     case TARGET_NR_rt_sigqueueinfo:
10326         {
10327             siginfo_t uinfo;
10328 
10329             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10330             if (!p) {
10331                 return -TARGET_EFAULT;
10332             }
10333             target_to_host_siginfo(&uinfo, p);
10334             unlock_user(p, arg3, 0);
10335             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10336         }
10337         return ret;
10338     case TARGET_NR_rt_tgsigqueueinfo:
10339         {
10340             siginfo_t uinfo;
10341 
10342             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10343             if (!p) {
10344                 return -TARGET_EFAULT;
10345             }
10346             target_to_host_siginfo(&uinfo, p);
10347             unlock_user(p, arg4, 0);
10348             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10349         }
10350         return ret;
10351 #ifdef TARGET_NR_sigreturn
10352     case TARGET_NR_sigreturn:
10353         if (block_signals()) {
10354             return -QEMU_ERESTARTSYS;
10355         }
10356         return do_sigreturn(cpu_env);
10357 #endif
10358     case TARGET_NR_rt_sigreturn:
10359         if (block_signals()) {
10360             return -QEMU_ERESTARTSYS;
10361         }
10362         return do_rt_sigreturn(cpu_env);
10363     case TARGET_NR_sethostname:
10364         if (!(p = lock_user_string(arg1)))
10365             return -TARGET_EFAULT;
10366         ret = get_errno(sethostname(p, arg2));
10367         unlock_user(p, arg1, 0);
10368         return ret;
10369 #ifdef TARGET_NR_setrlimit
10370     case TARGET_NR_setrlimit:
10371         {
10372             int resource = target_to_host_resource(arg1);
10373             struct target_rlimit *target_rlim;
10374             struct rlimit rlim;
10375             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10376                 return -TARGET_EFAULT;
10377             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10378             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10379             unlock_user_struct(target_rlim, arg2, 0);
10380             /*
10381              * If we just passed through resource limit settings for memory then
10382              * they would also apply to QEMU's own allocations, and QEMU will
10383              * crash or hang or die if its allocations fail. Ideally we would
10384              * track the guest allocations in QEMU and apply the limits ourselves.
10385              * For now, just tell the guest the call succeeded but don't actually
10386              * limit anything.
10387              */
10388             if (resource != RLIMIT_AS &&
10389                 resource != RLIMIT_DATA &&
10390                 resource != RLIMIT_STACK) {
10391                 return get_errno(setrlimit(resource, &rlim));
10392             } else {
10393                 return 0;
10394             }
10395         }
10396 #endif
10397 #ifdef TARGET_NR_getrlimit
10398     case TARGET_NR_getrlimit:
10399         {
10400             int resource = target_to_host_resource(arg1);
10401             struct target_rlimit *target_rlim;
10402             struct rlimit rlim;
10403 
10404             ret = get_errno(getrlimit(resource, &rlim));
10405             if (!is_error(ret)) {
10406                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10407                     return -TARGET_EFAULT;
10408                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10409                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10410                 unlock_user_struct(target_rlim, arg2, 1);
10411             }
10412         }
10413         return ret;
10414 #endif
10415     case TARGET_NR_getrusage:
10416         {
10417             struct rusage rusage;
10418             ret = get_errno(getrusage(arg1, &rusage));
10419             if (!is_error(ret)) {
10420                 ret = host_to_target_rusage(arg2, &rusage);
10421             }
10422         }
10423         return ret;
10424 #if defined(TARGET_NR_gettimeofday)
10425     case TARGET_NR_gettimeofday:
10426         {
10427             struct timeval tv;
10428             struct timezone tz;
10429 
10430             ret = get_errno(gettimeofday(&tv, &tz));
10431             if (!is_error(ret)) {
10432                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10433                     return -TARGET_EFAULT;
10434                 }
10435                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10436                     return -TARGET_EFAULT;
10437                 }
10438             }
10439         }
10440         return ret;
10441 #endif
10442 #if defined(TARGET_NR_settimeofday)
10443     case TARGET_NR_settimeofday:
10444         {
10445             struct timeval tv, *ptv = NULL;
10446             struct timezone tz, *ptz = NULL;
10447 
10448             if (arg1) {
10449                 if (copy_from_user_timeval(&tv, arg1)) {
10450                     return -TARGET_EFAULT;
10451                 }
10452                 ptv = &tv;
10453             }
10454 
10455             if (arg2) {
10456                 if (copy_from_user_timezone(&tz, arg2)) {
10457                     return -TARGET_EFAULT;
10458                 }
10459                 ptz = &tz;
10460             }
10461 
10462             return get_errno(settimeofday(ptv, ptz));
10463         }
10464 #endif
10465 #if defined(TARGET_NR_select)
10466     case TARGET_NR_select:
10467 #if defined(TARGET_WANT_NI_OLD_SELECT)
10468         /* some architectures used to have old_select here
10469          * but now ENOSYS it.
10470          */
10471         ret = -TARGET_ENOSYS;
10472 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10473         ret = do_old_select(arg1);
10474 #else
10475         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10476 #endif
10477         return ret;
10478 #endif
10479 #ifdef TARGET_NR_pselect6
10480     case TARGET_NR_pselect6:
10481         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10482 #endif
10483 #ifdef TARGET_NR_pselect6_time64
10484     case TARGET_NR_pselect6_time64:
10485         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10486 #endif
10487 #ifdef TARGET_NR_symlink
10488     case TARGET_NR_symlink:
10489         {
10490             void *p2;
10491             p = lock_user_string(arg1);
10492             p2 = lock_user_string(arg2);
10493             if (!p || !p2)
10494                 ret = -TARGET_EFAULT;
10495             else
10496                 ret = get_errno(symlink(p, p2));
10497             unlock_user(p2, arg2, 0);
10498             unlock_user(p, arg1, 0);
10499         }
10500         return ret;
10501 #endif
10502 #if defined(TARGET_NR_symlinkat)
10503     case TARGET_NR_symlinkat:
10504         {
10505             void *p2;
10506             p  = lock_user_string(arg1);
10507             p2 = lock_user_string(arg3);
10508             if (!p || !p2)
10509                 ret = -TARGET_EFAULT;
10510             else
10511                 ret = get_errno(symlinkat(p, arg2, p2));
10512             unlock_user(p2, arg3, 0);
10513             unlock_user(p, arg1, 0);
10514         }
10515         return ret;
10516 #endif
10517 #ifdef TARGET_NR_readlink
10518     case TARGET_NR_readlink:
10519         {
10520             void *p2;
10521             p = lock_user_string(arg1);
10522             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10523             ret = get_errno(do_guest_readlink(p, p2, arg3));
10524             unlock_user(p2, arg2, ret);
10525             unlock_user(p, arg1, 0);
10526         }
10527         return ret;
10528 #endif
10529 #if defined(TARGET_NR_readlinkat)
10530     case TARGET_NR_readlinkat:
10531         {
10532             void *p2;
10533             p  = lock_user_string(arg2);
10534             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10535             if (!p || !p2) {
10536                 ret = -TARGET_EFAULT;
10537             } else if (!arg4) {
10538                 /* Short circuit this for the magic exe check. */
10539                 ret = -TARGET_EINVAL;
10540             } else if (is_proc_myself((const char *)p, "exe")) {
10541                 /*
10542                  * Don't worry about sign mismatch as earlier mapping
10543                  * logic would have thrown a bad address error.
10544                  */
10545                 ret = MIN(strlen(exec_path), arg4);
10546                 /* We cannot NUL terminate the string. */
10547                 memcpy(p2, exec_path, ret);
10548             } else {
10549                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10550             }
10551             unlock_user(p2, arg3, ret);
10552             unlock_user(p, arg2, 0);
10553         }
10554         return ret;
10555 #endif
10556 #ifdef TARGET_NR_swapon
10557     case TARGET_NR_swapon:
10558         if (!(p = lock_user_string(arg1)))
10559             return -TARGET_EFAULT;
10560         ret = get_errno(swapon(p, arg2));
10561         unlock_user(p, arg1, 0);
10562         return ret;
10563 #endif
10564     case TARGET_NR_reboot:
10565         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10566            /* arg4 must be ignored in all other cases */
10567            p = lock_user_string(arg4);
10568            if (!p) {
10569                return -TARGET_EFAULT;
10570            }
10571            ret = get_errno(reboot(arg1, arg2, arg3, p));
10572            unlock_user(p, arg4, 0);
10573         } else {
10574            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10575         }
10576         return ret;
10577 #ifdef TARGET_NR_mmap
10578     case TARGET_NR_mmap:
10579 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10580     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10581     defined(TARGET_M68K) || defined(TARGET_MICROBLAZE) \
10582     || defined(TARGET_S390X)
10583         {
10584             abi_ulong *v;
10585             abi_ulong v1, v2, v3, v4, v5, v6;
10586             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10587                 return -TARGET_EFAULT;
10588             v1 = tswapal(v[0]);
10589             v2 = tswapal(v[1]);
10590             v3 = tswapal(v[2]);
10591             v4 = tswapal(v[3]);
10592             v5 = tswapal(v[4]);
10593             v6 = tswapal(v[5]);
10594             unlock_user(v, arg1, 0);
10595             return do_mmap(v1, v2, v3, v4, v5, v6);
10596         }
10597 #else
10598         /* mmap pointers are always untagged */
10599         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10600 #endif
10601 #endif
10602 #ifdef TARGET_NR_mmap2
10603     case TARGET_NR_mmap2:
10604 #ifndef MMAP_SHIFT
10605 #define MMAP_SHIFT 12
10606 #endif
10607         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10608                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10609 #endif
10610     case TARGET_NR_munmap:
10611         arg1 = cpu_untagged_addr(cpu, arg1);
10612         return get_errno(target_munmap(arg1, arg2));
10613     case TARGET_NR_mprotect:
10614         arg1 = cpu_untagged_addr(cpu, arg1);
10615         {
10616             TaskState *ts = get_task_state(cpu);
10617             /* Special hack to detect libc making the stack executable.  */
10618             if ((arg3 & PROT_GROWSDOWN)
10619                 && arg1 >= ts->info->stack_limit
10620                 && arg1 <= ts->info->start_stack) {
10621                 arg3 &= ~PROT_GROWSDOWN;
10622                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10623                 arg1 = ts->info->stack_limit;
10624             }
10625         }
10626         return get_errno(target_mprotect(arg1, arg2, arg3));
10627 #ifdef TARGET_NR_mremap
10628     case TARGET_NR_mremap:
10629         arg1 = cpu_untagged_addr(cpu, arg1);
10630         /* mremap new_addr (arg5) is always untagged */
10631         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10632 #endif
10633         /* ??? msync/mlock/munlock are broken for softmmu.  */
10634 #ifdef TARGET_NR_msync
10635     case TARGET_NR_msync:
10636         return get_errno(msync(g2h(cpu, arg1), arg2,
10637                                target_to_host_msync_arg(arg3)));
10638 #endif
10639 #ifdef TARGET_NR_mlock
10640     case TARGET_NR_mlock:
10641         return get_errno(mlock(g2h(cpu, arg1), arg2));
10642 #endif
10643 #ifdef TARGET_NR_munlock
10644     case TARGET_NR_munlock:
10645         return get_errno(munlock(g2h(cpu, arg1), arg2));
10646 #endif
10647 #ifdef TARGET_NR_mlockall
10648     case TARGET_NR_mlockall:
10649         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10650 #endif
10651 #ifdef TARGET_NR_munlockall
10652     case TARGET_NR_munlockall:
10653         return get_errno(munlockall());
10654 #endif
10655 #ifdef TARGET_NR_truncate
10656     case TARGET_NR_truncate:
10657         if (!(p = lock_user_string(arg1)))
10658             return -TARGET_EFAULT;
10659         ret = get_errno(truncate(p, arg2));
10660         unlock_user(p, arg1, 0);
10661         return ret;
10662 #endif
10663 #ifdef TARGET_NR_ftruncate
10664     case TARGET_NR_ftruncate:
10665         return get_errno(ftruncate(arg1, arg2));
10666 #endif
10667     case TARGET_NR_fchmod:
10668         return get_errno(fchmod(arg1, arg2));
10669 #if defined(TARGET_NR_fchmodat)
10670     case TARGET_NR_fchmodat:
10671         if (!(p = lock_user_string(arg2)))
10672             return -TARGET_EFAULT;
10673         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10674         unlock_user(p, arg2, 0);
10675         return ret;
10676 #endif
10677     case TARGET_NR_getpriority:
10678         /* Note that negative values are valid for getpriority, so we must
10679            differentiate based on errno settings.  */
10680         errno = 0;
10681         ret = getpriority(arg1, arg2);
10682         if (ret == -1 && errno != 0) {
10683             return -host_to_target_errno(errno);
10684         }
10685 #ifdef TARGET_ALPHA
10686         /* Return value is the unbiased priority.  Signal no error.  */
10687         cpu_env->ir[IR_V0] = 0;
10688 #else
10689         /* Return value is a biased priority to avoid negative numbers.  */
10690         ret = 20 - ret;
10691 #endif
10692         return ret;
10693     case TARGET_NR_setpriority:
10694         return get_errno(setpriority(arg1, arg2, arg3));
10695 #ifdef TARGET_NR_statfs
10696     case TARGET_NR_statfs:
10697         if (!(p = lock_user_string(arg1))) {
10698             return -TARGET_EFAULT;
10699         }
10700         ret = get_errno(statfs(path(p), &stfs));
10701         unlock_user(p, arg1, 0);
10702     convert_statfs:
10703         if (!is_error(ret)) {
10704             struct target_statfs *target_stfs;
10705 
10706             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10707                 return -TARGET_EFAULT;
10708             __put_user(stfs.f_type, &target_stfs->f_type);
10709             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10710             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10711             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10712             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10713             __put_user(stfs.f_files, &target_stfs->f_files);
10714             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10715             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10716             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10717             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10718             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10719 #ifdef _STATFS_F_FLAGS
10720             __put_user(stfs.f_flags, &target_stfs->f_flags);
10721 #else
10722             __put_user(0, &target_stfs->f_flags);
10723 #endif
10724             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10725             unlock_user_struct(target_stfs, arg2, 1);
10726         }
10727         return ret;
10728 #endif
10729 #ifdef TARGET_NR_fstatfs
10730     case TARGET_NR_fstatfs:
10731         ret = get_errno(fstatfs(arg1, &stfs));
10732         goto convert_statfs;
10733 #endif
10734 #ifdef TARGET_NR_statfs64
10735     case TARGET_NR_statfs64:
10736         if (!(p = lock_user_string(arg1))) {
10737             return -TARGET_EFAULT;
10738         }
10739         ret = get_errno(statfs(path(p), &stfs));
10740         unlock_user(p, arg1, 0);
10741     convert_statfs64:
10742         if (!is_error(ret)) {
10743             struct target_statfs64 *target_stfs;
10744 
10745             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10746                 return -TARGET_EFAULT;
10747             __put_user(stfs.f_type, &target_stfs->f_type);
10748             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10749             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10750             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10751             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10752             __put_user(stfs.f_files, &target_stfs->f_files);
10753             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10754             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10755             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10756             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10757             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10758 #ifdef _STATFS_F_FLAGS
10759             __put_user(stfs.f_flags, &target_stfs->f_flags);
10760 #else
10761             __put_user(0, &target_stfs->f_flags);
10762 #endif
10763             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10764             unlock_user_struct(target_stfs, arg3, 1);
10765         }
10766         return ret;
10767     case TARGET_NR_fstatfs64:
10768         ret = get_errno(fstatfs(arg1, &stfs));
10769         goto convert_statfs64;
10770 #endif
10771 #ifdef TARGET_NR_socketcall
10772     case TARGET_NR_socketcall:
10773         return do_socketcall(arg1, arg2);
10774 #endif
10775 #ifdef TARGET_NR_accept
10776     case TARGET_NR_accept:
10777         return do_accept4(arg1, arg2, arg3, 0);
10778 #endif
10779 #ifdef TARGET_NR_accept4
10780     case TARGET_NR_accept4:
10781         return do_accept4(arg1, arg2, arg3, arg4);
10782 #endif
10783 #ifdef TARGET_NR_bind
10784     case TARGET_NR_bind:
10785         return do_bind(arg1, arg2, arg3);
10786 #endif
10787 #ifdef TARGET_NR_connect
10788     case TARGET_NR_connect:
10789         return do_connect(arg1, arg2, arg3);
10790 #endif
10791 #ifdef TARGET_NR_getpeername
10792     case TARGET_NR_getpeername:
10793         return do_getpeername(arg1, arg2, arg3);
10794 #endif
10795 #ifdef TARGET_NR_getsockname
10796     case TARGET_NR_getsockname:
10797         return do_getsockname(arg1, arg2, arg3);
10798 #endif
10799 #ifdef TARGET_NR_getsockopt
10800     case TARGET_NR_getsockopt:
10801         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10802 #endif
10803 #ifdef TARGET_NR_listen
10804     case TARGET_NR_listen:
10805         return get_errno(listen(arg1, arg2));
10806 #endif
10807 #ifdef TARGET_NR_recv
10808     case TARGET_NR_recv:
10809         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10810 #endif
10811 #ifdef TARGET_NR_recvfrom
10812     case TARGET_NR_recvfrom:
10813         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10814 #endif
10815 #ifdef TARGET_NR_recvmsg
10816     case TARGET_NR_recvmsg:
10817         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10818 #endif
10819 #ifdef TARGET_NR_send
10820     case TARGET_NR_send:
10821         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10822 #endif
10823 #ifdef TARGET_NR_sendmsg
10824     case TARGET_NR_sendmsg:
10825         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10826 #endif
10827 #ifdef TARGET_NR_sendmmsg
10828     case TARGET_NR_sendmmsg:
10829         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10830 #endif
10831 #ifdef TARGET_NR_recvmmsg
10832     case TARGET_NR_recvmmsg:
10833         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10834 #endif
10835 #ifdef TARGET_NR_sendto
10836     case TARGET_NR_sendto:
10837         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10838 #endif
10839 #ifdef TARGET_NR_shutdown
10840     case TARGET_NR_shutdown:
10841         return get_errno(shutdown(arg1, arg2));
10842 #endif
10843 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10844     case TARGET_NR_getrandom:
10845         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10846         if (!p) {
10847             return -TARGET_EFAULT;
10848         }
10849         ret = get_errno(getrandom(p, arg2, arg3));
10850         unlock_user(p, arg1, ret);
10851         return ret;
10852 #endif
10853 #ifdef TARGET_NR_socket
10854     case TARGET_NR_socket:
10855         return do_socket(arg1, arg2, arg3);
10856 #endif
10857 #ifdef TARGET_NR_socketpair
10858     case TARGET_NR_socketpair:
10859         return do_socketpair(arg1, arg2, arg3, arg4);
10860 #endif
10861 #ifdef TARGET_NR_setsockopt
10862     case TARGET_NR_setsockopt:
10863         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10864 #endif
10865 #if defined(TARGET_NR_syslog)
10866     case TARGET_NR_syslog:
10867         {
10868             int len = arg2;
10869 
10870             switch (arg1) {
10871             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10872             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10873             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10874             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10875             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10876             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10877             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10878             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10879                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10880             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10881             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10882             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10883                 {
10884                     if (len < 0) {
10885                         return -TARGET_EINVAL;
10886                     }
10887                     if (len == 0) {
10888                         return 0;
10889                     }
10890                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10891                     if (!p) {
10892                         return -TARGET_EFAULT;
10893                     }
10894                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10895                     unlock_user(p, arg2, arg3);
10896                 }
10897                 return ret;
10898             default:
10899                 return -TARGET_EINVAL;
10900             }
10901         }
10902         break;
10903 #endif
10904     case TARGET_NR_setitimer:
10905         {
10906             struct itimerval value, ovalue, *pvalue;
10907 
10908             if (arg2) {
10909                 pvalue = &value;
10910                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10911                     || copy_from_user_timeval(&pvalue->it_value,
10912                                               arg2 + sizeof(struct target_timeval)))
10913                     return -TARGET_EFAULT;
10914             } else {
10915                 pvalue = NULL;
10916             }
10917             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10918             if (!is_error(ret) && arg3) {
10919                 if (copy_to_user_timeval(arg3,
10920                                          &ovalue.it_interval)
10921                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10922                                             &ovalue.it_value))
10923                     return -TARGET_EFAULT;
10924             }
10925         }
10926         return ret;
10927     case TARGET_NR_getitimer:
10928         {
10929             struct itimerval value;
10930 
10931             ret = get_errno(getitimer(arg1, &value));
10932             if (!is_error(ret) && arg2) {
10933                 if (copy_to_user_timeval(arg2,
10934                                          &value.it_interval)
10935                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10936                                             &value.it_value))
10937                     return -TARGET_EFAULT;
10938             }
10939         }
10940         return ret;
10941 #ifdef TARGET_NR_stat
10942     case TARGET_NR_stat:
10943         if (!(p = lock_user_string(arg1))) {
10944             return -TARGET_EFAULT;
10945         }
10946         ret = get_errno(stat(path(p), &st));
10947         unlock_user(p, arg1, 0);
10948         goto do_stat;
10949 #endif
10950 #ifdef TARGET_NR_lstat
10951     case TARGET_NR_lstat:
10952         if (!(p = lock_user_string(arg1))) {
10953             return -TARGET_EFAULT;
10954         }
10955         ret = get_errno(lstat(path(p), &st));
10956         unlock_user(p, arg1, 0);
10957         goto do_stat;
10958 #endif
10959 #ifdef TARGET_NR_fstat
10960     case TARGET_NR_fstat:
10961         {
10962             ret = get_errno(fstat(arg1, &st));
10963 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10964         do_stat:
10965 #endif
10966             if (!is_error(ret)) {
10967                 struct target_stat *target_st;
10968 
10969                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10970                     return -TARGET_EFAULT;
10971                 memset(target_st, 0, sizeof(*target_st));
10972                 __put_user(st.st_dev, &target_st->st_dev);
10973                 __put_user(st.st_ino, &target_st->st_ino);
10974                 __put_user(st.st_mode, &target_st->st_mode);
10975                 __put_user(st.st_uid, &target_st->st_uid);
10976                 __put_user(st.st_gid, &target_st->st_gid);
10977                 __put_user(st.st_nlink, &target_st->st_nlink);
10978                 __put_user(st.st_rdev, &target_st->st_rdev);
10979                 __put_user(st.st_size, &target_st->st_size);
10980                 __put_user(st.st_blksize, &target_st->st_blksize);
10981                 __put_user(st.st_blocks, &target_st->st_blocks);
10982                 __put_user(st.st_atime, &target_st->target_st_atime);
10983                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10984                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10985 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10986                 __put_user(st.st_atim.tv_nsec,
10987                            &target_st->target_st_atime_nsec);
10988                 __put_user(st.st_mtim.tv_nsec,
10989                            &target_st->target_st_mtime_nsec);
10990                 __put_user(st.st_ctim.tv_nsec,
10991                            &target_st->target_st_ctime_nsec);
10992 #endif
10993                 unlock_user_struct(target_st, arg2, 1);
10994             }
10995         }
10996         return ret;
10997 #endif
10998     case TARGET_NR_vhangup:
10999         return get_errno(vhangup());
11000 #ifdef TARGET_NR_syscall
11001     case TARGET_NR_syscall:
11002         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11003                           arg6, arg7, arg8, 0);
11004 #endif
11005 #if defined(TARGET_NR_wait4)
11006     case TARGET_NR_wait4:
11007         {
11008             int status;
11009             abi_long status_ptr = arg2;
11010             struct rusage rusage, *rusage_ptr;
11011             abi_ulong target_rusage = arg4;
11012             abi_long rusage_err;
11013             if (target_rusage)
11014                 rusage_ptr = &rusage;
11015             else
11016                 rusage_ptr = NULL;
11017             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11018             if (!is_error(ret)) {
11019                 if (status_ptr && ret) {
11020                     status = host_to_target_waitstatus(status);
11021                     if (put_user_s32(status, status_ptr))
11022                         return -TARGET_EFAULT;
11023                 }
11024                 if (target_rusage) {
11025                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11026                     if (rusage_err) {
11027                         ret = rusage_err;
11028                     }
11029                 }
11030             }
11031         }
11032         return ret;
11033 #endif
11034 #ifdef TARGET_NR_swapoff
11035     case TARGET_NR_swapoff:
11036         if (!(p = lock_user_string(arg1)))
11037             return -TARGET_EFAULT;
11038         ret = get_errno(swapoff(p));
11039         unlock_user(p, arg1, 0);
11040         return ret;
11041 #endif
11042     case TARGET_NR_sysinfo:
11043         {
11044             struct target_sysinfo *target_value;
11045             struct sysinfo value;
11046             ret = get_errno(sysinfo(&value));
11047             if (!is_error(ret) && arg1)
11048             {
11049                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11050                     return -TARGET_EFAULT;
11051                 __put_user(value.uptime, &target_value->uptime);
11052                 __put_user(value.loads[0], &target_value->loads[0]);
11053                 __put_user(value.loads[1], &target_value->loads[1]);
11054                 __put_user(value.loads[2], &target_value->loads[2]);
11055                 __put_user(value.totalram, &target_value->totalram);
11056                 __put_user(value.freeram, &target_value->freeram);
11057                 __put_user(value.sharedram, &target_value->sharedram);
11058                 __put_user(value.bufferram, &target_value->bufferram);
11059                 __put_user(value.totalswap, &target_value->totalswap);
11060                 __put_user(value.freeswap, &target_value->freeswap);
11061                 __put_user(value.procs, &target_value->procs);
11062                 __put_user(value.totalhigh, &target_value->totalhigh);
11063                 __put_user(value.freehigh, &target_value->freehigh);
11064                 __put_user(value.mem_unit, &target_value->mem_unit);
11065                 unlock_user_struct(target_value, arg1, 1);
11066             }
11067         }
11068         return ret;
11069 #ifdef TARGET_NR_ipc
11070     case TARGET_NR_ipc:
11071         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11072 #endif
11073 #ifdef TARGET_NR_semget
11074     case TARGET_NR_semget:
11075         return get_errno(semget(arg1, arg2, arg3));
11076 #endif
11077 #ifdef TARGET_NR_semop
11078     case TARGET_NR_semop:
11079         return do_semtimedop(arg1, arg2, arg3, 0, false);
11080 #endif
11081 #ifdef TARGET_NR_semtimedop
11082     case TARGET_NR_semtimedop:
11083         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11084 #endif
11085 #ifdef TARGET_NR_semtimedop_time64
11086     case TARGET_NR_semtimedop_time64:
11087         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11088 #endif
11089 #ifdef TARGET_NR_semctl
11090     case TARGET_NR_semctl:
11091         return do_semctl(arg1, arg2, arg3, arg4);
11092 #endif
11093 #ifdef TARGET_NR_msgctl
11094     case TARGET_NR_msgctl:
11095         return do_msgctl(arg1, arg2, arg3);
11096 #endif
11097 #ifdef TARGET_NR_msgget
11098     case TARGET_NR_msgget:
11099         return get_errno(msgget(arg1, arg2));
11100 #endif
11101 #ifdef TARGET_NR_msgrcv
11102     case TARGET_NR_msgrcv:
11103         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11104 #endif
11105 #ifdef TARGET_NR_msgsnd
11106     case TARGET_NR_msgsnd:
11107         return do_msgsnd(arg1, arg2, arg3, arg4);
11108 #endif
11109 #ifdef TARGET_NR_shmget
11110     case TARGET_NR_shmget:
11111         return get_errno(shmget(arg1, arg2, arg3));
11112 #endif
11113 #ifdef TARGET_NR_shmctl
11114     case TARGET_NR_shmctl:
11115         return do_shmctl(arg1, arg2, arg3);
11116 #endif
11117 #ifdef TARGET_NR_shmat
11118     case TARGET_NR_shmat:
11119         return target_shmat(cpu_env, arg1, arg2, arg3);
11120 #endif
11121 #ifdef TARGET_NR_shmdt
11122     case TARGET_NR_shmdt:
11123         return target_shmdt(arg1);
11124 #endif
11125     case TARGET_NR_fsync:
11126         return get_errno(fsync(arg1));
11127     case TARGET_NR_clone:
11128         /* Linux manages to have three different orderings for its
11129          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11130          * match the kernel's CONFIG_CLONE_* settings.
11131          * Microblaze is further special in that it uses a sixth
11132          * implicit argument to clone for the TLS pointer.
11133          */
11134 #if defined(TARGET_MICROBLAZE)
11135         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11136 #elif defined(TARGET_CLONE_BACKWARDS)
11137         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11138 #elif defined(TARGET_CLONE_BACKWARDS2)
11139         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11140 #else
11141         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11142 #endif
11143         return ret;
11144 #ifdef __NR_exit_group
11145         /* new thread calls */
11146     case TARGET_NR_exit_group:
11147         preexit_cleanup(cpu_env, arg1);
11148         return get_errno(exit_group(arg1));
11149 #endif
11150     case TARGET_NR_setdomainname:
11151         if (!(p = lock_user_string(arg1)))
11152             return -TARGET_EFAULT;
11153         ret = get_errno(setdomainname(p, arg2));
11154         unlock_user(p, arg1, 0);
11155         return ret;
11156     case TARGET_NR_uname:
11157         /* no need to transcode because we use the linux syscall */
11158         {
11159             struct new_utsname * buf;
11160 
11161             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11162                 return -TARGET_EFAULT;
11163             ret = get_errno(sys_uname(buf));
11164             if (!is_error(ret)) {
11165                 /* Overwrite the native machine name with whatever is being
11166                    emulated. */
11167                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11168                           sizeof(buf->machine));
11169                 /* Allow the user to override the reported release.  */
11170                 if (qemu_uname_release && *qemu_uname_release) {
11171                     g_strlcpy(buf->release, qemu_uname_release,
11172                               sizeof(buf->release));
11173                 }
11174             }
11175             unlock_user_struct(buf, arg1, 1);
11176         }
11177         return ret;
11178 #ifdef TARGET_I386
11179     case TARGET_NR_modify_ldt:
11180         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11181 #if !defined(TARGET_X86_64)
11182     case TARGET_NR_vm86:
11183         return do_vm86(cpu_env, arg1, arg2);
11184 #endif
11185 #endif
11186 #if defined(TARGET_NR_adjtimex)
11187     case TARGET_NR_adjtimex:
11188         {
11189             struct timex host_buf;
11190 
11191             if (target_to_host_timex(&host_buf, arg1) != 0) {
11192                 return -TARGET_EFAULT;
11193             }
11194             ret = get_errno(adjtimex(&host_buf));
11195             if (!is_error(ret)) {
11196                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11197                     return -TARGET_EFAULT;
11198                 }
11199             }
11200         }
11201         return ret;
11202 #endif
11203 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11204     case TARGET_NR_clock_adjtime:
11205         {
11206             struct timex htx;
11207 
11208             if (target_to_host_timex(&htx, arg2) != 0) {
11209                 return -TARGET_EFAULT;
11210             }
11211             ret = get_errno(clock_adjtime(arg1, &htx));
11212             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11213                 return -TARGET_EFAULT;
11214             }
11215         }
11216         return ret;
11217 #endif
11218 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11219     case TARGET_NR_clock_adjtime64:
11220         {
11221             struct timex htx;
11222 
11223             if (target_to_host_timex64(&htx, arg2) != 0) {
11224                 return -TARGET_EFAULT;
11225             }
11226             ret = get_errno(clock_adjtime(arg1, &htx));
11227             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11228                     return -TARGET_EFAULT;
11229             }
11230         }
11231         return ret;
11232 #endif
11233     case TARGET_NR_getpgid:
11234         return get_errno(getpgid(arg1));
11235     case TARGET_NR_fchdir:
11236         return get_errno(fchdir(arg1));
11237     case TARGET_NR_personality:
11238         return get_errno(personality(arg1));
11239 #ifdef TARGET_NR__llseek /* Not on alpha */
11240     case TARGET_NR__llseek:
11241         {
11242             int64_t res;
11243 #if !defined(__NR_llseek)
11244             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11245             if (res == -1) {
11246                 ret = get_errno(res);
11247             } else {
11248                 ret = 0;
11249             }
11250 #else
11251             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11252 #endif
11253             if ((ret == 0) && put_user_s64(res, arg4)) {
11254                 return -TARGET_EFAULT;
11255             }
11256         }
11257         return ret;
11258 #endif
11259 #ifdef TARGET_NR_getdents
11260     case TARGET_NR_getdents:
11261         return do_getdents(arg1, arg2, arg3);
11262 #endif /* TARGET_NR_getdents */
11263 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11264     case TARGET_NR_getdents64:
11265         return do_getdents64(arg1, arg2, arg3);
11266 #endif /* TARGET_NR_getdents64 */
11267 #if defined(TARGET_NR__newselect)
11268     case TARGET_NR__newselect:
11269         return do_select(arg1, arg2, arg3, arg4, arg5);
11270 #endif
11271 #ifdef TARGET_NR_poll
11272     case TARGET_NR_poll:
11273         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11274 #endif
11275 #ifdef TARGET_NR_ppoll
11276     case TARGET_NR_ppoll:
11277         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11278 #endif
11279 #ifdef TARGET_NR_ppoll_time64
11280     case TARGET_NR_ppoll_time64:
11281         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11282 #endif
11283     case TARGET_NR_flock:
11284         /* NOTE: the flock constant seems to be the same for every
11285            Linux platform */
11286         return get_errno(safe_flock(arg1, arg2));
11287     case TARGET_NR_readv:
11288         {
11289             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11290             if (vec != NULL) {
11291                 ret = get_errno(safe_readv(arg1, vec, arg3));
11292                 unlock_iovec(vec, arg2, arg3, 1);
11293             } else {
11294                 ret = -host_to_target_errno(errno);
11295             }
11296         }
11297         return ret;
11298     case TARGET_NR_writev:
11299         {
11300             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11301             if (vec != NULL) {
11302                 ret = get_errno(safe_writev(arg1, vec, arg3));
11303                 unlock_iovec(vec, arg2, arg3, 0);
11304             } else {
11305                 ret = -host_to_target_errno(errno);
11306             }
11307         }
11308         return ret;
11309 #if defined(TARGET_NR_preadv)
11310     case TARGET_NR_preadv:
11311         {
11312             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11313             if (vec != NULL) {
11314                 unsigned long low, high;
11315 
11316                 target_to_host_low_high(arg4, arg5, &low, &high);
11317                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11318                 unlock_iovec(vec, arg2, arg3, 1);
11319             } else {
11320                 ret = -host_to_target_errno(errno);
11321            }
11322         }
11323         return ret;
11324 #endif
11325 #if defined(TARGET_NR_pwritev)
11326     case TARGET_NR_pwritev:
11327         {
11328             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11329             if (vec != NULL) {
11330                 unsigned long low, high;
11331 
11332                 target_to_host_low_high(arg4, arg5, &low, &high);
11333                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11334                 unlock_iovec(vec, arg2, arg3, 0);
11335             } else {
11336                 ret = -host_to_target_errno(errno);
11337            }
11338         }
11339         return ret;
11340 #endif
11341     case TARGET_NR_getsid:
11342         return get_errno(getsid(arg1));
11343 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11344     case TARGET_NR_fdatasync:
11345         return get_errno(fdatasync(arg1));
11346 #endif
11347     case TARGET_NR_sched_getaffinity:
11348         {
11349             unsigned int mask_size;
11350             unsigned long *mask;
11351 
11352             /*
11353              * sched_getaffinity needs multiples of ulong, so need to take
11354              * care of mismatches between target ulong and host ulong sizes.
11355              */
11356             if (arg2 & (sizeof(abi_ulong) - 1)) {
11357                 return -TARGET_EINVAL;
11358             }
11359             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11360 
11361             mask = alloca(mask_size);
11362             memset(mask, 0, mask_size);
11363             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11364 
11365             if (!is_error(ret)) {
11366                 if (ret > arg2) {
11367                     /* More data returned than the caller's buffer will fit.
11368                      * This only happens if sizeof(abi_long) < sizeof(long)
11369                      * and the caller passed us a buffer holding an odd number
11370                      * of abi_longs. If the host kernel is actually using the
11371                      * extra 4 bytes then fail EINVAL; otherwise we can just
11372                      * ignore them and only copy the interesting part.
11373                      */
11374                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11375                     if (numcpus > arg2 * 8) {
11376                         return -TARGET_EINVAL;
11377                     }
11378                     ret = arg2;
11379                 }
11380 
11381                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11382                     return -TARGET_EFAULT;
11383                 }
11384             }
11385         }
11386         return ret;
11387     case TARGET_NR_sched_setaffinity:
11388         {
11389             unsigned int mask_size;
11390             unsigned long *mask;
11391 
11392             /*
11393              * sched_setaffinity needs multiples of ulong, so need to take
11394              * care of mismatches between target ulong and host ulong sizes.
11395              */
11396             if (arg2 & (sizeof(abi_ulong) - 1)) {
11397                 return -TARGET_EINVAL;
11398             }
11399             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11400             mask = alloca(mask_size);
11401 
11402             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11403             if (ret) {
11404                 return ret;
11405             }
11406 
11407             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11408         }
11409     case TARGET_NR_getcpu:
11410         {
11411             unsigned cpuid, node;
11412             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11413                                        arg2 ? &node : NULL,
11414                                        NULL));
11415             if (is_error(ret)) {
11416                 return ret;
11417             }
11418             if (arg1 && put_user_u32(cpuid, arg1)) {
11419                 return -TARGET_EFAULT;
11420             }
11421             if (arg2 && put_user_u32(node, arg2)) {
11422                 return -TARGET_EFAULT;
11423             }
11424         }
11425         return ret;
11426     case TARGET_NR_sched_setparam:
11427         {
11428             struct target_sched_param *target_schp;
11429             struct sched_param schp;
11430 
11431             if (arg2 == 0) {
11432                 return -TARGET_EINVAL;
11433             }
11434             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11435                 return -TARGET_EFAULT;
11436             }
11437             schp.sched_priority = tswap32(target_schp->sched_priority);
11438             unlock_user_struct(target_schp, arg2, 0);
11439             return get_errno(sys_sched_setparam(arg1, &schp));
11440         }
11441     case TARGET_NR_sched_getparam:
11442         {
11443             struct target_sched_param *target_schp;
11444             struct sched_param schp;
11445 
11446             if (arg2 == 0) {
11447                 return -TARGET_EINVAL;
11448             }
11449             ret = get_errno(sys_sched_getparam(arg1, &schp));
11450             if (!is_error(ret)) {
11451                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11452                     return -TARGET_EFAULT;
11453                 }
11454                 target_schp->sched_priority = tswap32(schp.sched_priority);
11455                 unlock_user_struct(target_schp, arg2, 1);
11456             }
11457         }
11458         return ret;
11459     case TARGET_NR_sched_setscheduler:
11460         {
11461             struct target_sched_param *target_schp;
11462             struct sched_param schp;
11463             if (arg3 == 0) {
11464                 return -TARGET_EINVAL;
11465             }
11466             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11467                 return -TARGET_EFAULT;
11468             }
11469             schp.sched_priority = tswap32(target_schp->sched_priority);
11470             unlock_user_struct(target_schp, arg3, 0);
11471             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11472         }
11473     case TARGET_NR_sched_getscheduler:
11474         return get_errno(sys_sched_getscheduler(arg1));
11475     case TARGET_NR_sched_getattr:
11476         {
11477             struct target_sched_attr *target_scha;
11478             struct sched_attr scha;
11479             if (arg2 == 0) {
11480                 return -TARGET_EINVAL;
11481             }
11482             if (arg3 > sizeof(scha)) {
11483                 arg3 = sizeof(scha);
11484             }
11485             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11486             if (!is_error(ret)) {
11487                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11488                 if (!target_scha) {
11489                     return -TARGET_EFAULT;
11490                 }
11491                 target_scha->size = tswap32(scha.size);
11492                 target_scha->sched_policy = tswap32(scha.sched_policy);
11493                 target_scha->sched_flags = tswap64(scha.sched_flags);
11494                 target_scha->sched_nice = tswap32(scha.sched_nice);
11495                 target_scha->sched_priority = tswap32(scha.sched_priority);
11496                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11497                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11498                 target_scha->sched_period = tswap64(scha.sched_period);
11499                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11500                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11501                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11502                 }
11503                 unlock_user(target_scha, arg2, arg3);
11504             }
11505             return ret;
11506         }
11507     case TARGET_NR_sched_setattr:
11508         {
11509             struct target_sched_attr *target_scha;
11510             struct sched_attr scha;
11511             uint32_t size;
11512             int zeroed;
11513             if (arg2 == 0) {
11514                 return -TARGET_EINVAL;
11515             }
11516             if (get_user_u32(size, arg2)) {
11517                 return -TARGET_EFAULT;
11518             }
11519             if (!size) {
11520                 size = offsetof(struct target_sched_attr, sched_util_min);
11521             }
11522             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11523                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11524                     return -TARGET_EFAULT;
11525                 }
11526                 return -TARGET_E2BIG;
11527             }
11528 
11529             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11530             if (zeroed < 0) {
11531                 return zeroed;
11532             } else if (zeroed == 0) {
11533                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11534                     return -TARGET_EFAULT;
11535                 }
11536                 return -TARGET_E2BIG;
11537             }
11538             if (size > sizeof(struct target_sched_attr)) {
11539                 size = sizeof(struct target_sched_attr);
11540             }
11541 
11542             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11543             if (!target_scha) {
11544                 return -TARGET_EFAULT;
11545             }
11546             scha.size = size;
11547             scha.sched_policy = tswap32(target_scha->sched_policy);
11548             scha.sched_flags = tswap64(target_scha->sched_flags);
11549             scha.sched_nice = tswap32(target_scha->sched_nice);
11550             scha.sched_priority = tswap32(target_scha->sched_priority);
11551             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11552             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11553             scha.sched_period = tswap64(target_scha->sched_period);
11554             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11555                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11556                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11557             }
11558             unlock_user(target_scha, arg2, 0);
11559             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11560         }
11561     case TARGET_NR_sched_yield:
11562         return get_errno(sched_yield());
11563     case TARGET_NR_sched_get_priority_max:
11564         return get_errno(sched_get_priority_max(arg1));
11565     case TARGET_NR_sched_get_priority_min:
11566         return get_errno(sched_get_priority_min(arg1));
11567 #ifdef TARGET_NR_sched_rr_get_interval
11568     case TARGET_NR_sched_rr_get_interval:
11569         {
11570             struct timespec ts;
11571             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11572             if (!is_error(ret)) {
11573                 ret = host_to_target_timespec(arg2, &ts);
11574             }
11575         }
11576         return ret;
11577 #endif
11578 #ifdef TARGET_NR_sched_rr_get_interval_time64
11579     case TARGET_NR_sched_rr_get_interval_time64:
11580         {
11581             struct timespec ts;
11582             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11583             if (!is_error(ret)) {
11584                 ret = host_to_target_timespec64(arg2, &ts);
11585             }
11586         }
11587         return ret;
11588 #endif
11589 #if defined(TARGET_NR_nanosleep)
11590     case TARGET_NR_nanosleep:
11591         {
11592             struct timespec req, rem;
11593             target_to_host_timespec(&req, arg1);
11594             ret = get_errno(safe_nanosleep(&req, &rem));
11595             if (is_error(ret) && arg2) {
11596                 host_to_target_timespec(arg2, &rem);
11597             }
11598         }
11599         return ret;
11600 #endif
11601     case TARGET_NR_prctl:
11602         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11603         break;
11604 #ifdef TARGET_NR_arch_prctl
11605     case TARGET_NR_arch_prctl:
11606         return do_arch_prctl(cpu_env, arg1, arg2);
11607 #endif
11608 #ifdef TARGET_NR_pread64
11609     case TARGET_NR_pread64:
11610         if (regpairs_aligned(cpu_env, num)) {
11611             arg4 = arg5;
11612             arg5 = arg6;
11613         }
11614         if (arg2 == 0 && arg3 == 0) {
11615             /* Special-case NULL buffer and zero length, which should succeed */
11616             p = 0;
11617         } else {
11618             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11619             if (!p) {
11620                 return -TARGET_EFAULT;
11621             }
11622         }
11623         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11624         unlock_user(p, arg2, ret);
11625         return ret;
11626     case TARGET_NR_pwrite64:
11627         if (regpairs_aligned(cpu_env, num)) {
11628             arg4 = arg5;
11629             arg5 = arg6;
11630         }
11631         if (arg2 == 0 && arg3 == 0) {
11632             /* Special-case NULL buffer and zero length, which should succeed */
11633             p = 0;
11634         } else {
11635             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11636             if (!p) {
11637                 return -TARGET_EFAULT;
11638             }
11639         }
11640         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11641         unlock_user(p, arg2, 0);
11642         return ret;
11643 #endif
11644     case TARGET_NR_getcwd:
11645         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11646             return -TARGET_EFAULT;
11647         ret = get_errno(sys_getcwd1(p, arg2));
11648         unlock_user(p, arg1, ret);
11649         return ret;
11650     case TARGET_NR_capget:
11651     case TARGET_NR_capset:
11652     {
11653         struct target_user_cap_header *target_header;
11654         struct target_user_cap_data *target_data = NULL;
11655         struct __user_cap_header_struct header;
11656         struct __user_cap_data_struct data[2];
11657         struct __user_cap_data_struct *dataptr = NULL;
11658         int i, target_datalen;
11659         int data_items = 1;
11660 
11661         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11662             return -TARGET_EFAULT;
11663         }
11664         header.version = tswap32(target_header->version);
11665         header.pid = tswap32(target_header->pid);
11666 
11667         if (header.version != _LINUX_CAPABILITY_VERSION) {
11668             /* Version 2 and up takes pointer to two user_data structs */
11669             data_items = 2;
11670         }
11671 
11672         target_datalen = sizeof(*target_data) * data_items;
11673 
11674         if (arg2) {
11675             if (num == TARGET_NR_capget) {
11676                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11677             } else {
11678                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11679             }
11680             if (!target_data) {
11681                 unlock_user_struct(target_header, arg1, 0);
11682                 return -TARGET_EFAULT;
11683             }
11684 
11685             if (num == TARGET_NR_capset) {
11686                 for (i = 0; i < data_items; i++) {
11687                     data[i].effective = tswap32(target_data[i].effective);
11688                     data[i].permitted = tswap32(target_data[i].permitted);
11689                     data[i].inheritable = tswap32(target_data[i].inheritable);
11690                 }
11691             }
11692 
11693             dataptr = data;
11694         }
11695 
11696         if (num == TARGET_NR_capget) {
11697             ret = get_errno(capget(&header, dataptr));
11698         } else {
11699             ret = get_errno(capset(&header, dataptr));
11700         }
11701 
11702         /* The kernel always updates version for both capget and capset */
11703         target_header->version = tswap32(header.version);
11704         unlock_user_struct(target_header, arg1, 1);
11705 
11706         if (arg2) {
11707             if (num == TARGET_NR_capget) {
11708                 for (i = 0; i < data_items; i++) {
11709                     target_data[i].effective = tswap32(data[i].effective);
11710                     target_data[i].permitted = tswap32(data[i].permitted);
11711                     target_data[i].inheritable = tswap32(data[i].inheritable);
11712                 }
11713                 unlock_user(target_data, arg2, target_datalen);
11714             } else {
11715                 unlock_user(target_data, arg2, 0);
11716             }
11717         }
11718         return ret;
11719     }
11720     case TARGET_NR_sigaltstack:
11721         return do_sigaltstack(arg1, arg2, cpu_env);
11722 
11723 #ifdef CONFIG_SENDFILE
11724 #ifdef TARGET_NR_sendfile
11725     case TARGET_NR_sendfile:
11726     {
11727         off_t *offp = NULL;
11728         off_t off;
11729         if (arg3) {
11730             ret = get_user_sal(off, arg3);
11731             if (is_error(ret)) {
11732                 return ret;
11733             }
11734             offp = &off;
11735         }
11736         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11737         if (!is_error(ret) && arg3) {
11738             abi_long ret2 = put_user_sal(off, arg3);
11739             if (is_error(ret2)) {
11740                 ret = ret2;
11741             }
11742         }
11743         return ret;
11744     }
11745 #endif
11746 #ifdef TARGET_NR_sendfile64
11747     case TARGET_NR_sendfile64:
11748     {
11749         off_t *offp = NULL;
11750         off_t off;
11751         if (arg3) {
11752             ret = get_user_s64(off, arg3);
11753             if (is_error(ret)) {
11754                 return ret;
11755             }
11756             offp = &off;
11757         }
11758         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11759         if (!is_error(ret) && arg3) {
11760             abi_long ret2 = put_user_s64(off, arg3);
11761             if (is_error(ret2)) {
11762                 ret = ret2;
11763             }
11764         }
11765         return ret;
11766     }
11767 #endif
11768 #endif
11769 #ifdef TARGET_NR_vfork
11770     case TARGET_NR_vfork:
11771         return get_errno(do_fork(cpu_env,
11772                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11773                          0, 0, 0, 0));
11774 #endif
11775 #ifdef TARGET_NR_ugetrlimit
11776     case TARGET_NR_ugetrlimit:
11777     {
11778 	struct rlimit rlim;
11779 	int resource = target_to_host_resource(arg1);
11780 	ret = get_errno(getrlimit(resource, &rlim));
11781 	if (!is_error(ret)) {
11782 	    struct target_rlimit *target_rlim;
11783             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11784                 return -TARGET_EFAULT;
11785 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11786 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11787             unlock_user_struct(target_rlim, arg2, 1);
11788 	}
11789         return ret;
11790     }
11791 #endif
11792 #ifdef TARGET_NR_truncate64
11793     case TARGET_NR_truncate64:
11794         if (!(p = lock_user_string(arg1)))
11795             return -TARGET_EFAULT;
11796 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11797         unlock_user(p, arg1, 0);
11798         return ret;
11799 #endif
11800 #ifdef TARGET_NR_ftruncate64
11801     case TARGET_NR_ftruncate64:
11802         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11803 #endif
11804 #ifdef TARGET_NR_stat64
11805     case TARGET_NR_stat64:
11806         if (!(p = lock_user_string(arg1))) {
11807             return -TARGET_EFAULT;
11808         }
11809         ret = get_errno(stat(path(p), &st));
11810         unlock_user(p, arg1, 0);
11811         if (!is_error(ret))
11812             ret = host_to_target_stat64(cpu_env, arg2, &st);
11813         return ret;
11814 #endif
11815 #ifdef TARGET_NR_lstat64
11816     case TARGET_NR_lstat64:
11817         if (!(p = lock_user_string(arg1))) {
11818             return -TARGET_EFAULT;
11819         }
11820         ret = get_errno(lstat(path(p), &st));
11821         unlock_user(p, arg1, 0);
11822         if (!is_error(ret))
11823             ret = host_to_target_stat64(cpu_env, arg2, &st);
11824         return ret;
11825 #endif
11826 #ifdef TARGET_NR_fstat64
11827     case TARGET_NR_fstat64:
11828         ret = get_errno(fstat(arg1, &st));
11829         if (!is_error(ret))
11830             ret = host_to_target_stat64(cpu_env, arg2, &st);
11831         return ret;
11832 #endif
11833 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11834 #ifdef TARGET_NR_fstatat64
11835     case TARGET_NR_fstatat64:
11836 #endif
11837 #ifdef TARGET_NR_newfstatat
11838     case TARGET_NR_newfstatat:
11839 #endif
11840         if (!(p = lock_user_string(arg2))) {
11841             return -TARGET_EFAULT;
11842         }
11843         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11844         unlock_user(p, arg2, 0);
11845         if (!is_error(ret))
11846             ret = host_to_target_stat64(cpu_env, arg3, &st);
11847         return ret;
11848 #endif
11849 #if defined(TARGET_NR_statx)
11850     case TARGET_NR_statx:
11851         {
11852             struct target_statx *target_stx;
11853             int dirfd = arg1;
11854             int flags = arg3;
11855 
11856             p = lock_user_string(arg2);
11857             if (p == NULL) {
11858                 return -TARGET_EFAULT;
11859             }
11860 #if defined(__NR_statx)
11861             {
11862                 /*
11863                  * It is assumed that struct statx is architecture independent.
11864                  */
11865                 struct target_statx host_stx;
11866                 int mask = arg4;
11867 
11868                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11869                 if (!is_error(ret)) {
11870                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11871                         unlock_user(p, arg2, 0);
11872                         return -TARGET_EFAULT;
11873                     }
11874                 }
11875 
11876                 if (ret != -TARGET_ENOSYS) {
11877                     unlock_user(p, arg2, 0);
11878                     return ret;
11879                 }
11880             }
11881 #endif
11882             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11883             unlock_user(p, arg2, 0);
11884 
11885             if (!is_error(ret)) {
11886                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11887                     return -TARGET_EFAULT;
11888                 }
11889                 memset(target_stx, 0, sizeof(*target_stx));
11890                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11891                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11892                 __put_user(st.st_ino, &target_stx->stx_ino);
11893                 __put_user(st.st_mode, &target_stx->stx_mode);
11894                 __put_user(st.st_uid, &target_stx->stx_uid);
11895                 __put_user(st.st_gid, &target_stx->stx_gid);
11896                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11897                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11898                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11899                 __put_user(st.st_size, &target_stx->stx_size);
11900                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11901                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11902                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11903                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11904                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11905                 unlock_user_struct(target_stx, arg5, 1);
11906             }
11907         }
11908         return ret;
11909 #endif
11910 #ifdef TARGET_NR_lchown
11911     case TARGET_NR_lchown:
11912         if (!(p = lock_user_string(arg1)))
11913             return -TARGET_EFAULT;
11914         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11915         unlock_user(p, arg1, 0);
11916         return ret;
11917 #endif
11918 #ifdef TARGET_NR_getuid
11919     case TARGET_NR_getuid:
11920         return get_errno(high2lowuid(getuid()));
11921 #endif
11922 #ifdef TARGET_NR_getgid
11923     case TARGET_NR_getgid:
11924         return get_errno(high2lowgid(getgid()));
11925 #endif
11926 #ifdef TARGET_NR_geteuid
11927     case TARGET_NR_geteuid:
11928         return get_errno(high2lowuid(geteuid()));
11929 #endif
11930 #ifdef TARGET_NR_getegid
11931     case TARGET_NR_getegid:
11932         return get_errno(high2lowgid(getegid()));
11933 #endif
11934     case TARGET_NR_setreuid:
11935         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11936     case TARGET_NR_setregid:
11937         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11938     case TARGET_NR_getgroups:
11939         { /* the same code as for TARGET_NR_getgroups32 */
11940             int gidsetsize = arg1;
11941             target_id *target_grouplist;
11942             g_autofree gid_t *grouplist = NULL;
11943             int i;
11944 
11945             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11946                 return -TARGET_EINVAL;
11947             }
11948             if (gidsetsize > 0) {
11949                 grouplist = g_try_new(gid_t, gidsetsize);
11950                 if (!grouplist) {
11951                     return -TARGET_ENOMEM;
11952                 }
11953             }
11954             ret = get_errno(getgroups(gidsetsize, grouplist));
11955             if (!is_error(ret) && gidsetsize > 0) {
11956                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11957                                              gidsetsize * sizeof(target_id), 0);
11958                 if (!target_grouplist) {
11959                     return -TARGET_EFAULT;
11960                 }
11961                 for (i = 0; i < ret; i++) {
11962                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11963                 }
11964                 unlock_user(target_grouplist, arg2,
11965                             gidsetsize * sizeof(target_id));
11966             }
11967             return ret;
11968         }
11969     case TARGET_NR_setgroups:
11970         { /* the same code as for TARGET_NR_setgroups32 */
11971             int gidsetsize = arg1;
11972             target_id *target_grouplist;
11973             g_autofree gid_t *grouplist = NULL;
11974             int i;
11975 
11976             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11977                 return -TARGET_EINVAL;
11978             }
11979             if (gidsetsize > 0) {
11980                 grouplist = g_try_new(gid_t, gidsetsize);
11981                 if (!grouplist) {
11982                     return -TARGET_ENOMEM;
11983                 }
11984                 target_grouplist = lock_user(VERIFY_READ, arg2,
11985                                              gidsetsize * sizeof(target_id), 1);
11986                 if (!target_grouplist) {
11987                     return -TARGET_EFAULT;
11988                 }
11989                 for (i = 0; i < gidsetsize; i++) {
11990                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11991                 }
11992                 unlock_user(target_grouplist, arg2,
11993                             gidsetsize * sizeof(target_id));
11994             }
11995             return get_errno(sys_setgroups(gidsetsize, grouplist));
11996         }
11997     case TARGET_NR_fchown:
11998         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11999 #if defined(TARGET_NR_fchownat)
12000     case TARGET_NR_fchownat:
12001         if (!(p = lock_user_string(arg2)))
12002             return -TARGET_EFAULT;
12003         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12004                                  low2highgid(arg4), arg5));
12005         unlock_user(p, arg2, 0);
12006         return ret;
12007 #endif
12008 #ifdef TARGET_NR_setresuid
12009     case TARGET_NR_setresuid:
12010         return get_errno(sys_setresuid(low2highuid(arg1),
12011                                        low2highuid(arg2),
12012                                        low2highuid(arg3)));
12013 #endif
12014 #ifdef TARGET_NR_getresuid
12015     case TARGET_NR_getresuid:
12016         {
12017             uid_t ruid, euid, suid;
12018             ret = get_errno(getresuid(&ruid, &euid, &suid));
12019             if (!is_error(ret)) {
12020                 if (put_user_id(high2lowuid(ruid), arg1)
12021                     || put_user_id(high2lowuid(euid), arg2)
12022                     || put_user_id(high2lowuid(suid), arg3))
12023                     return -TARGET_EFAULT;
12024             }
12025         }
12026         return ret;
12027 #endif
12028 #ifdef TARGET_NR_getresgid
12029     case TARGET_NR_setresgid:
12030         return get_errno(sys_setresgid(low2highgid(arg1),
12031                                        low2highgid(arg2),
12032                                        low2highgid(arg3)));
12033 #endif
12034 #ifdef TARGET_NR_getresgid
12035     case TARGET_NR_getresgid:
12036         {
12037             gid_t rgid, egid, sgid;
12038             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12039             if (!is_error(ret)) {
12040                 if (put_user_id(high2lowgid(rgid), arg1)
12041                     || put_user_id(high2lowgid(egid), arg2)
12042                     || put_user_id(high2lowgid(sgid), arg3))
12043                     return -TARGET_EFAULT;
12044             }
12045         }
12046         return ret;
12047 #endif
12048 #ifdef TARGET_NR_chown
12049     case TARGET_NR_chown:
12050         if (!(p = lock_user_string(arg1)))
12051             return -TARGET_EFAULT;
12052         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12053         unlock_user(p, arg1, 0);
12054         return ret;
12055 #endif
12056     case TARGET_NR_setuid:
12057         return get_errno(sys_setuid(low2highuid(arg1)));
12058     case TARGET_NR_setgid:
12059         return get_errno(sys_setgid(low2highgid(arg1)));
12060     case TARGET_NR_setfsuid:
12061         return get_errno(setfsuid(arg1));
12062     case TARGET_NR_setfsgid:
12063         return get_errno(setfsgid(arg1));
12064 
12065 #ifdef TARGET_NR_lchown32
12066     case TARGET_NR_lchown32:
12067         if (!(p = lock_user_string(arg1)))
12068             return -TARGET_EFAULT;
12069         ret = get_errno(lchown(p, arg2, arg3));
12070         unlock_user(p, arg1, 0);
12071         return ret;
12072 #endif
12073 #ifdef TARGET_NR_getuid32
12074     case TARGET_NR_getuid32:
12075         return get_errno(getuid());
12076 #endif
12077 
12078 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12079    /* Alpha specific */
12080     case TARGET_NR_getxuid:
12081          {
12082             uid_t euid;
12083             euid=geteuid();
12084             cpu_env->ir[IR_A4]=euid;
12085          }
12086         return get_errno(getuid());
12087 #endif
12088 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12089    /* Alpha specific */
12090     case TARGET_NR_getxgid:
12091          {
12092             uid_t egid;
12093             egid=getegid();
12094             cpu_env->ir[IR_A4]=egid;
12095          }
12096         return get_errno(getgid());
12097 #endif
12098 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12099     /* Alpha specific */
12100     case TARGET_NR_osf_getsysinfo:
12101         ret = -TARGET_EOPNOTSUPP;
12102         switch (arg1) {
12103           case TARGET_GSI_IEEE_FP_CONTROL:
12104             {
12105                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12106                 uint64_t swcr = cpu_env->swcr;
12107 
12108                 swcr &= ~SWCR_STATUS_MASK;
12109                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12110 
12111                 if (put_user_u64 (swcr, arg2))
12112                         return -TARGET_EFAULT;
12113                 ret = 0;
12114             }
12115             break;
12116 
12117           /* case GSI_IEEE_STATE_AT_SIGNAL:
12118              -- Not implemented in linux kernel.
12119              case GSI_UACPROC:
12120              -- Retrieves current unaligned access state; not much used.
12121              case GSI_PROC_TYPE:
12122              -- Retrieves implver information; surely not used.
12123              case GSI_GET_HWRPB:
12124              -- Grabs a copy of the HWRPB; surely not used.
12125           */
12126         }
12127         return ret;
12128 #endif
12129 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12130     /* Alpha specific */
12131     case TARGET_NR_osf_setsysinfo:
12132         ret = -TARGET_EOPNOTSUPP;
12133         switch (arg1) {
12134           case TARGET_SSI_IEEE_FP_CONTROL:
12135             {
12136                 uint64_t swcr, fpcr;
12137 
12138                 if (get_user_u64 (swcr, arg2)) {
12139                     return -TARGET_EFAULT;
12140                 }
12141 
12142                 /*
12143                  * The kernel calls swcr_update_status to update the
12144                  * status bits from the fpcr at every point that it
12145                  * could be queried.  Therefore, we store the status
12146                  * bits only in FPCR.
12147                  */
12148                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12149 
12150                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12151                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12152                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12153                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12154                 ret = 0;
12155             }
12156             break;
12157 
12158           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12159             {
12160                 uint64_t exc, fpcr, fex;
12161 
12162                 if (get_user_u64(exc, arg2)) {
12163                     return -TARGET_EFAULT;
12164                 }
12165                 exc &= SWCR_STATUS_MASK;
12166                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12167 
12168                 /* Old exceptions are not signaled.  */
12169                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12170                 fex = exc & ~fex;
12171                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12172                 fex &= (cpu_env)->swcr;
12173 
12174                 /* Update the hardware fpcr.  */
12175                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12176                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12177 
12178                 if (fex) {
12179                     int si_code = TARGET_FPE_FLTUNK;
12180                     target_siginfo_t info;
12181 
12182                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12183                         si_code = TARGET_FPE_FLTUND;
12184                     }
12185                     if (fex & SWCR_TRAP_ENABLE_INE) {
12186                         si_code = TARGET_FPE_FLTRES;
12187                     }
12188                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12189                         si_code = TARGET_FPE_FLTUND;
12190                     }
12191                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12192                         si_code = TARGET_FPE_FLTOVF;
12193                     }
12194                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12195                         si_code = TARGET_FPE_FLTDIV;
12196                     }
12197                     if (fex & SWCR_TRAP_ENABLE_INV) {
12198                         si_code = TARGET_FPE_FLTINV;
12199                     }
12200 
12201                     info.si_signo = SIGFPE;
12202                     info.si_errno = 0;
12203                     info.si_code = si_code;
12204                     info._sifields._sigfault._addr = (cpu_env)->pc;
12205                     queue_signal(cpu_env, info.si_signo,
12206                                  QEMU_SI_FAULT, &info);
12207                 }
12208                 ret = 0;
12209             }
12210             break;
12211 
12212           /* case SSI_NVPAIRS:
12213              -- Used with SSIN_UACPROC to enable unaligned accesses.
12214              case SSI_IEEE_STATE_AT_SIGNAL:
12215              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12216              -- Not implemented in linux kernel
12217           */
12218         }
12219         return ret;
12220 #endif
12221 #ifdef TARGET_NR_osf_sigprocmask
12222     /* Alpha specific.  */
12223     case TARGET_NR_osf_sigprocmask:
12224         {
12225             abi_ulong mask;
12226             int how;
12227             sigset_t set, oldset;
12228 
12229             switch(arg1) {
12230             case TARGET_SIG_BLOCK:
12231                 how = SIG_BLOCK;
12232                 break;
12233             case TARGET_SIG_UNBLOCK:
12234                 how = SIG_UNBLOCK;
12235                 break;
12236             case TARGET_SIG_SETMASK:
12237                 how = SIG_SETMASK;
12238                 break;
12239             default:
12240                 return -TARGET_EINVAL;
12241             }
12242             mask = arg2;
12243             target_to_host_old_sigset(&set, &mask);
12244             ret = do_sigprocmask(how, &set, &oldset);
12245             if (!ret) {
12246                 host_to_target_old_sigset(&mask, &oldset);
12247                 ret = mask;
12248             }
12249         }
12250         return ret;
12251 #endif
12252 
12253 #ifdef TARGET_NR_getgid32
12254     case TARGET_NR_getgid32:
12255         return get_errno(getgid());
12256 #endif
12257 #ifdef TARGET_NR_geteuid32
12258     case TARGET_NR_geteuid32:
12259         return get_errno(geteuid());
12260 #endif
12261 #ifdef TARGET_NR_getegid32
12262     case TARGET_NR_getegid32:
12263         return get_errno(getegid());
12264 #endif
12265 #ifdef TARGET_NR_setreuid32
12266     case TARGET_NR_setreuid32:
12267         return get_errno(setreuid(arg1, arg2));
12268 #endif
12269 #ifdef TARGET_NR_setregid32
12270     case TARGET_NR_setregid32:
12271         return get_errno(setregid(arg1, arg2));
12272 #endif
12273 #ifdef TARGET_NR_getgroups32
12274     case TARGET_NR_getgroups32:
12275         { /* the same code as for TARGET_NR_getgroups */
12276             int gidsetsize = arg1;
12277             uint32_t *target_grouplist;
12278             g_autofree gid_t *grouplist = NULL;
12279             int i;
12280 
12281             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12282                 return -TARGET_EINVAL;
12283             }
12284             if (gidsetsize > 0) {
12285                 grouplist = g_try_new(gid_t, gidsetsize);
12286                 if (!grouplist) {
12287                     return -TARGET_ENOMEM;
12288                 }
12289             }
12290             ret = get_errno(getgroups(gidsetsize, grouplist));
12291             if (!is_error(ret) && gidsetsize > 0) {
12292                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12293                                              gidsetsize * 4, 0);
12294                 if (!target_grouplist) {
12295                     return -TARGET_EFAULT;
12296                 }
12297                 for (i = 0; i < ret; i++) {
12298                     target_grouplist[i] = tswap32(grouplist[i]);
12299                 }
12300                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12301             }
12302             return ret;
12303         }
12304 #endif
12305 #ifdef TARGET_NR_setgroups32
12306     case TARGET_NR_setgroups32:
12307         { /* the same code as for TARGET_NR_setgroups */
12308             int gidsetsize = arg1;
12309             uint32_t *target_grouplist;
12310             g_autofree gid_t *grouplist = NULL;
12311             int i;
12312 
12313             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12314                 return -TARGET_EINVAL;
12315             }
12316             if (gidsetsize > 0) {
12317                 grouplist = g_try_new(gid_t, gidsetsize);
12318                 if (!grouplist) {
12319                     return -TARGET_ENOMEM;
12320                 }
12321                 target_grouplist = lock_user(VERIFY_READ, arg2,
12322                                              gidsetsize * 4, 1);
12323                 if (!target_grouplist) {
12324                     return -TARGET_EFAULT;
12325                 }
12326                 for (i = 0; i < gidsetsize; i++) {
12327                     grouplist[i] = tswap32(target_grouplist[i]);
12328                 }
12329                 unlock_user(target_grouplist, arg2, 0);
12330             }
12331             return get_errno(sys_setgroups(gidsetsize, grouplist));
12332         }
12333 #endif
12334 #ifdef TARGET_NR_fchown32
12335     case TARGET_NR_fchown32:
12336         return get_errno(fchown(arg1, arg2, arg3));
12337 #endif
12338 #ifdef TARGET_NR_setresuid32
12339     case TARGET_NR_setresuid32:
12340         return get_errno(sys_setresuid(arg1, arg2, arg3));
12341 #endif
12342 #ifdef TARGET_NR_getresuid32
12343     case TARGET_NR_getresuid32:
12344         {
12345             uid_t ruid, euid, suid;
12346             ret = get_errno(getresuid(&ruid, &euid, &suid));
12347             if (!is_error(ret)) {
12348                 if (put_user_u32(ruid, arg1)
12349                     || put_user_u32(euid, arg2)
12350                     || put_user_u32(suid, arg3))
12351                     return -TARGET_EFAULT;
12352             }
12353         }
12354         return ret;
12355 #endif
12356 #ifdef TARGET_NR_setresgid32
12357     case TARGET_NR_setresgid32:
12358         return get_errno(sys_setresgid(arg1, arg2, arg3));
12359 #endif
12360 #ifdef TARGET_NR_getresgid32
12361     case TARGET_NR_getresgid32:
12362         {
12363             gid_t rgid, egid, sgid;
12364             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12365             if (!is_error(ret)) {
12366                 if (put_user_u32(rgid, arg1)
12367                     || put_user_u32(egid, arg2)
12368                     || put_user_u32(sgid, arg3))
12369                     return -TARGET_EFAULT;
12370             }
12371         }
12372         return ret;
12373 #endif
12374 #ifdef TARGET_NR_chown32
12375     case TARGET_NR_chown32:
12376         if (!(p = lock_user_string(arg1)))
12377             return -TARGET_EFAULT;
12378         ret = get_errno(chown(p, arg2, arg3));
12379         unlock_user(p, arg1, 0);
12380         return ret;
12381 #endif
12382 #ifdef TARGET_NR_setuid32
12383     case TARGET_NR_setuid32:
12384         return get_errno(sys_setuid(arg1));
12385 #endif
12386 #ifdef TARGET_NR_setgid32
12387     case TARGET_NR_setgid32:
12388         return get_errno(sys_setgid(arg1));
12389 #endif
12390 #ifdef TARGET_NR_setfsuid32
12391     case TARGET_NR_setfsuid32:
12392         return get_errno(setfsuid(arg1));
12393 #endif
12394 #ifdef TARGET_NR_setfsgid32
12395     case TARGET_NR_setfsgid32:
12396         return get_errno(setfsgid(arg1));
12397 #endif
12398 #ifdef TARGET_NR_mincore
12399     case TARGET_NR_mincore:
12400         {
12401             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12402             if (!a) {
12403                 return -TARGET_ENOMEM;
12404             }
12405             p = lock_user_string(arg3);
12406             if (!p) {
12407                 ret = -TARGET_EFAULT;
12408             } else {
12409                 ret = get_errno(mincore(a, arg2, p));
12410                 unlock_user(p, arg3, ret);
12411             }
12412             unlock_user(a, arg1, 0);
12413         }
12414         return ret;
12415 #endif
12416 #ifdef TARGET_NR_arm_fadvise64_64
12417     case TARGET_NR_arm_fadvise64_64:
12418         /* arm_fadvise64_64 looks like fadvise64_64 but
12419          * with different argument order: fd, advice, offset, len
12420          * rather than the usual fd, offset, len, advice.
12421          * Note that offset and len are both 64-bit so appear as
12422          * pairs of 32-bit registers.
12423          */
12424         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12425                             target_offset64(arg5, arg6), arg2);
12426         return -host_to_target_errno(ret);
12427 #endif
12428 
12429 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12430 
12431 #ifdef TARGET_NR_fadvise64_64
12432     case TARGET_NR_fadvise64_64:
12433 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12434         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12435         ret = arg2;
12436         arg2 = arg3;
12437         arg3 = arg4;
12438         arg4 = arg5;
12439         arg5 = arg6;
12440         arg6 = ret;
12441 #else
12442         /* 6 args: fd, offset (high, low), len (high, low), advice */
12443         if (regpairs_aligned(cpu_env, num)) {
12444             /* offset is in (3,4), len in (5,6) and advice in 7 */
12445             arg2 = arg3;
12446             arg3 = arg4;
12447             arg4 = arg5;
12448             arg5 = arg6;
12449             arg6 = arg7;
12450         }
12451 #endif
12452         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12453                             target_offset64(arg4, arg5), arg6);
12454         return -host_to_target_errno(ret);
12455 #endif
12456 
12457 #ifdef TARGET_NR_fadvise64
12458     case TARGET_NR_fadvise64:
12459         /* 5 args: fd, offset (high, low), len, advice */
12460         if (regpairs_aligned(cpu_env, num)) {
12461             /* offset is in (3,4), len in 5 and advice in 6 */
12462             arg2 = arg3;
12463             arg3 = arg4;
12464             arg4 = arg5;
12465             arg5 = arg6;
12466         }
12467         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12468         return -host_to_target_errno(ret);
12469 #endif
12470 
12471 #else /* not a 32-bit ABI */
12472 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12473 #ifdef TARGET_NR_fadvise64_64
12474     case TARGET_NR_fadvise64_64:
12475 #endif
12476 #ifdef TARGET_NR_fadvise64
12477     case TARGET_NR_fadvise64:
12478 #endif
12479 #ifdef TARGET_S390X
12480         switch (arg4) {
12481         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12482         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12483         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12484         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12485         default: break;
12486         }
12487 #endif
12488         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12489 #endif
12490 #endif /* end of 64-bit ABI fadvise handling */
12491 
12492 #ifdef TARGET_NR_madvise
12493     case TARGET_NR_madvise:
12494         return target_madvise(arg1, arg2, arg3);
12495 #endif
12496 #ifdef TARGET_NR_fcntl64
12497     case TARGET_NR_fcntl64:
12498     {
12499         int cmd;
12500         struct flock fl;
12501         from_flock64_fn *copyfrom = copy_from_user_flock64;
12502         to_flock64_fn *copyto = copy_to_user_flock64;
12503 
12504 #ifdef TARGET_ARM
12505         if (!cpu_env->eabi) {
12506             copyfrom = copy_from_user_oabi_flock64;
12507             copyto = copy_to_user_oabi_flock64;
12508         }
12509 #endif
12510 
12511         cmd = target_to_host_fcntl_cmd(arg2);
12512         if (cmd == -TARGET_EINVAL) {
12513             return cmd;
12514         }
12515 
12516         switch(arg2) {
12517         case TARGET_F_GETLK64:
12518             ret = copyfrom(&fl, arg3);
12519             if (ret) {
12520                 break;
12521             }
12522             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12523             if (ret == 0) {
12524                 ret = copyto(arg3, &fl);
12525             }
12526 	    break;
12527 
12528         case TARGET_F_SETLK64:
12529         case TARGET_F_SETLKW64:
12530             ret = copyfrom(&fl, arg3);
12531             if (ret) {
12532                 break;
12533             }
12534             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12535 	    break;
12536         default:
12537             ret = do_fcntl(arg1, arg2, arg3);
12538             break;
12539         }
12540         return ret;
12541     }
12542 #endif
12543 #ifdef TARGET_NR_cacheflush
12544     case TARGET_NR_cacheflush:
12545         /* self-modifying code is handled automatically, so nothing needed */
12546         return 0;
12547 #endif
12548 #ifdef TARGET_NR_getpagesize
12549     case TARGET_NR_getpagesize:
12550         return TARGET_PAGE_SIZE;
12551 #endif
12552     case TARGET_NR_gettid:
12553         return get_errno(sys_gettid());
12554 #ifdef TARGET_NR_readahead
12555     case TARGET_NR_readahead:
12556 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12557         if (regpairs_aligned(cpu_env, num)) {
12558             arg2 = arg3;
12559             arg3 = arg4;
12560             arg4 = arg5;
12561         }
12562         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12563 #else
12564         ret = get_errno(readahead(arg1, arg2, arg3));
12565 #endif
12566         return ret;
12567 #endif
12568 #ifdef CONFIG_ATTR
12569 #ifdef TARGET_NR_setxattr
12570     case TARGET_NR_listxattr:
12571     case TARGET_NR_llistxattr:
12572     {
12573         void *b = 0;
12574         if (arg2) {
12575             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12576             if (!b) {
12577                 return -TARGET_EFAULT;
12578             }
12579         }
12580         p = lock_user_string(arg1);
12581         if (p) {
12582             if (num == TARGET_NR_listxattr) {
12583                 ret = get_errno(listxattr(p, b, arg3));
12584             } else {
12585                 ret = get_errno(llistxattr(p, b, arg3));
12586             }
12587         } else {
12588             ret = -TARGET_EFAULT;
12589         }
12590         unlock_user(p, arg1, 0);
12591         unlock_user(b, arg2, arg3);
12592         return ret;
12593     }
12594     case TARGET_NR_flistxattr:
12595     {
12596         void *b = 0;
12597         if (arg2) {
12598             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12599             if (!b) {
12600                 return -TARGET_EFAULT;
12601             }
12602         }
12603         ret = get_errno(flistxattr(arg1, b, arg3));
12604         unlock_user(b, arg2, arg3);
12605         return ret;
12606     }
12607     case TARGET_NR_setxattr:
12608     case TARGET_NR_lsetxattr:
12609         {
12610             void *n, *v = 0;
12611             if (arg3) {
12612                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12613                 if (!v) {
12614                     return -TARGET_EFAULT;
12615                 }
12616             }
12617             p = lock_user_string(arg1);
12618             n = lock_user_string(arg2);
12619             if (p && n) {
12620                 if (num == TARGET_NR_setxattr) {
12621                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12622                 } else {
12623                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12624                 }
12625             } else {
12626                 ret = -TARGET_EFAULT;
12627             }
12628             unlock_user(p, arg1, 0);
12629             unlock_user(n, arg2, 0);
12630             unlock_user(v, arg3, 0);
12631         }
12632         return ret;
12633     case TARGET_NR_fsetxattr:
12634         {
12635             void *n, *v = 0;
12636             if (arg3) {
12637                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12638                 if (!v) {
12639                     return -TARGET_EFAULT;
12640                 }
12641             }
12642             n = lock_user_string(arg2);
12643             if (n) {
12644                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12645             } else {
12646                 ret = -TARGET_EFAULT;
12647             }
12648             unlock_user(n, arg2, 0);
12649             unlock_user(v, arg3, 0);
12650         }
12651         return ret;
12652     case TARGET_NR_getxattr:
12653     case TARGET_NR_lgetxattr:
12654         {
12655             void *n, *v = 0;
12656             if (arg3) {
12657                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12658                 if (!v) {
12659                     return -TARGET_EFAULT;
12660                 }
12661             }
12662             p = lock_user_string(arg1);
12663             n = lock_user_string(arg2);
12664             if (p && n) {
12665                 if (num == TARGET_NR_getxattr) {
12666                     ret = get_errno(getxattr(p, n, v, arg4));
12667                 } else {
12668                     ret = get_errno(lgetxattr(p, n, v, arg4));
12669                 }
12670             } else {
12671                 ret = -TARGET_EFAULT;
12672             }
12673             unlock_user(p, arg1, 0);
12674             unlock_user(n, arg2, 0);
12675             unlock_user(v, arg3, arg4);
12676         }
12677         return ret;
12678     case TARGET_NR_fgetxattr:
12679         {
12680             void *n, *v = 0;
12681             if (arg3) {
12682                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12683                 if (!v) {
12684                     return -TARGET_EFAULT;
12685                 }
12686             }
12687             n = lock_user_string(arg2);
12688             if (n) {
12689                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12690             } else {
12691                 ret = -TARGET_EFAULT;
12692             }
12693             unlock_user(n, arg2, 0);
12694             unlock_user(v, arg3, arg4);
12695         }
12696         return ret;
12697     case TARGET_NR_removexattr:
12698     case TARGET_NR_lremovexattr:
12699         {
12700             void *n;
12701             p = lock_user_string(arg1);
12702             n = lock_user_string(arg2);
12703             if (p && n) {
12704                 if (num == TARGET_NR_removexattr) {
12705                     ret = get_errno(removexattr(p, n));
12706                 } else {
12707                     ret = get_errno(lremovexattr(p, n));
12708                 }
12709             } else {
12710                 ret = -TARGET_EFAULT;
12711             }
12712             unlock_user(p, arg1, 0);
12713             unlock_user(n, arg2, 0);
12714         }
12715         return ret;
12716     case TARGET_NR_fremovexattr:
12717         {
12718             void *n;
12719             n = lock_user_string(arg2);
12720             if (n) {
12721                 ret = get_errno(fremovexattr(arg1, n));
12722             } else {
12723                 ret = -TARGET_EFAULT;
12724             }
12725             unlock_user(n, arg2, 0);
12726         }
12727         return ret;
12728 #endif
12729 #endif /* CONFIG_ATTR */
12730 #ifdef TARGET_NR_set_thread_area
12731     case TARGET_NR_set_thread_area:
12732 #if defined(TARGET_MIPS)
12733       cpu_env->active_tc.CP0_UserLocal = arg1;
12734       return 0;
12735 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12736       return do_set_thread_area(cpu_env, arg1);
12737 #elif defined(TARGET_M68K)
12738       {
12739           TaskState *ts = get_task_state(cpu);
12740           ts->tp_value = arg1;
12741           return 0;
12742       }
12743 #else
12744       return -TARGET_ENOSYS;
12745 #endif
12746 #endif
12747 #ifdef TARGET_NR_get_thread_area
12748     case TARGET_NR_get_thread_area:
12749 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12750         return do_get_thread_area(cpu_env, arg1);
12751 #elif defined(TARGET_M68K)
12752         {
12753             TaskState *ts = get_task_state(cpu);
12754             return ts->tp_value;
12755         }
12756 #else
12757         return -TARGET_ENOSYS;
12758 #endif
12759 #endif
12760 #ifdef TARGET_NR_getdomainname
12761     case TARGET_NR_getdomainname:
12762         return -TARGET_ENOSYS;
12763 #endif
12764 
12765 #ifdef TARGET_NR_clock_settime
12766     case TARGET_NR_clock_settime:
12767     {
12768         struct timespec ts;
12769 
12770         ret = target_to_host_timespec(&ts, arg2);
12771         if (!is_error(ret)) {
12772             ret = get_errno(clock_settime(arg1, &ts));
12773         }
12774         return ret;
12775     }
12776 #endif
12777 #ifdef TARGET_NR_clock_settime64
12778     case TARGET_NR_clock_settime64:
12779     {
12780         struct timespec ts;
12781 
12782         ret = target_to_host_timespec64(&ts, arg2);
12783         if (!is_error(ret)) {
12784             ret = get_errno(clock_settime(arg1, &ts));
12785         }
12786         return ret;
12787     }
12788 #endif
12789 #ifdef TARGET_NR_clock_gettime
12790     case TARGET_NR_clock_gettime:
12791     {
12792         struct timespec ts;
12793         ret = get_errno(clock_gettime(arg1, &ts));
12794         if (!is_error(ret)) {
12795             ret = host_to_target_timespec(arg2, &ts);
12796         }
12797         return ret;
12798     }
12799 #endif
12800 #ifdef TARGET_NR_clock_gettime64
12801     case TARGET_NR_clock_gettime64:
12802     {
12803         struct timespec ts;
12804         ret = get_errno(clock_gettime(arg1, &ts));
12805         if (!is_error(ret)) {
12806             ret = host_to_target_timespec64(arg2, &ts);
12807         }
12808         return ret;
12809     }
12810 #endif
12811 #ifdef TARGET_NR_clock_getres
12812     case TARGET_NR_clock_getres:
12813     {
12814         struct timespec ts;
12815         ret = get_errno(clock_getres(arg1, &ts));
12816         if (!is_error(ret)) {
12817             host_to_target_timespec(arg2, &ts);
12818         }
12819         return ret;
12820     }
12821 #endif
12822 #ifdef TARGET_NR_clock_getres_time64
12823     case TARGET_NR_clock_getres_time64:
12824     {
12825         struct timespec ts;
12826         ret = get_errno(clock_getres(arg1, &ts));
12827         if (!is_error(ret)) {
12828             host_to_target_timespec64(arg2, &ts);
12829         }
12830         return ret;
12831     }
12832 #endif
12833 #ifdef TARGET_NR_clock_nanosleep
12834     case TARGET_NR_clock_nanosleep:
12835     {
12836         struct timespec ts;
12837         if (target_to_host_timespec(&ts, arg3)) {
12838             return -TARGET_EFAULT;
12839         }
12840         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12841                                              &ts, arg4 ? &ts : NULL));
12842         /*
12843          * if the call is interrupted by a signal handler, it fails
12844          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12845          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12846          */
12847         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12848             host_to_target_timespec(arg4, &ts)) {
12849               return -TARGET_EFAULT;
12850         }
12851 
12852         return ret;
12853     }
12854 #endif
12855 #ifdef TARGET_NR_clock_nanosleep_time64
12856     case TARGET_NR_clock_nanosleep_time64:
12857     {
12858         struct timespec ts;
12859 
12860         if (target_to_host_timespec64(&ts, arg3)) {
12861             return -TARGET_EFAULT;
12862         }
12863 
12864         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12865                                              &ts, arg4 ? &ts : NULL));
12866 
12867         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12868             host_to_target_timespec64(arg4, &ts)) {
12869             return -TARGET_EFAULT;
12870         }
12871         return ret;
12872     }
12873 #endif
12874 
12875 #if defined(TARGET_NR_set_tid_address)
12876     case TARGET_NR_set_tid_address:
12877     {
12878         TaskState *ts = get_task_state(cpu);
12879         ts->child_tidptr = arg1;
12880         /* do not call host set_tid_address() syscall, instead return tid() */
12881         return get_errno(sys_gettid());
12882     }
12883 #endif
12884 
12885     case TARGET_NR_tkill:
12886         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12887 
12888     case TARGET_NR_tgkill:
12889         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12890                          target_to_host_signal(arg3)));
12891 
12892 #ifdef TARGET_NR_set_robust_list
12893     case TARGET_NR_set_robust_list:
12894     case TARGET_NR_get_robust_list:
12895         /* The ABI for supporting robust futexes has userspace pass
12896          * the kernel a pointer to a linked list which is updated by
12897          * userspace after the syscall; the list is walked by the kernel
12898          * when the thread exits. Since the linked list in QEMU guest
12899          * memory isn't a valid linked list for the host and we have
12900          * no way to reliably intercept the thread-death event, we can't
12901          * support these. Silently return ENOSYS so that guest userspace
12902          * falls back to a non-robust futex implementation (which should
12903          * be OK except in the corner case of the guest crashing while
12904          * holding a mutex that is shared with another process via
12905          * shared memory).
12906          */
12907         return -TARGET_ENOSYS;
12908 #endif
12909 
12910 #if defined(TARGET_NR_utimensat)
12911     case TARGET_NR_utimensat:
12912         {
12913             struct timespec *tsp, ts[2];
12914             if (!arg3) {
12915                 tsp = NULL;
12916             } else {
12917                 if (target_to_host_timespec(ts, arg3)) {
12918                     return -TARGET_EFAULT;
12919                 }
12920                 if (target_to_host_timespec(ts + 1, arg3 +
12921                                             sizeof(struct target_timespec))) {
12922                     return -TARGET_EFAULT;
12923                 }
12924                 tsp = ts;
12925             }
12926             if (!arg2)
12927                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12928             else {
12929                 if (!(p = lock_user_string(arg2))) {
12930                     return -TARGET_EFAULT;
12931                 }
12932                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12933                 unlock_user(p, arg2, 0);
12934             }
12935         }
12936         return ret;
12937 #endif
12938 #ifdef TARGET_NR_utimensat_time64
12939     case TARGET_NR_utimensat_time64:
12940         {
12941             struct timespec *tsp, ts[2];
12942             if (!arg3) {
12943                 tsp = NULL;
12944             } else {
12945                 if (target_to_host_timespec64(ts, arg3)) {
12946                     return -TARGET_EFAULT;
12947                 }
12948                 if (target_to_host_timespec64(ts + 1, arg3 +
12949                                      sizeof(struct target__kernel_timespec))) {
12950                     return -TARGET_EFAULT;
12951                 }
12952                 tsp = ts;
12953             }
12954             if (!arg2)
12955                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12956             else {
12957                 p = lock_user_string(arg2);
12958                 if (!p) {
12959                     return -TARGET_EFAULT;
12960                 }
12961                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12962                 unlock_user(p, arg2, 0);
12963             }
12964         }
12965         return ret;
12966 #endif
12967 #ifdef TARGET_NR_futex
12968     case TARGET_NR_futex:
12969         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12970 #endif
12971 #ifdef TARGET_NR_futex_time64
12972     case TARGET_NR_futex_time64:
12973         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12974 #endif
12975 #ifdef CONFIG_INOTIFY
12976 #if defined(TARGET_NR_inotify_init)
12977     case TARGET_NR_inotify_init:
12978         ret = get_errno(inotify_init());
12979         if (ret >= 0) {
12980             fd_trans_register(ret, &target_inotify_trans);
12981         }
12982         return ret;
12983 #endif
12984 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12985     case TARGET_NR_inotify_init1:
12986         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12987                                           fcntl_flags_tbl)));
12988         if (ret >= 0) {
12989             fd_trans_register(ret, &target_inotify_trans);
12990         }
12991         return ret;
12992 #endif
12993 #if defined(TARGET_NR_inotify_add_watch)
12994     case TARGET_NR_inotify_add_watch:
12995         p = lock_user_string(arg2);
12996         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12997         unlock_user(p, arg2, 0);
12998         return ret;
12999 #endif
13000 #if defined(TARGET_NR_inotify_rm_watch)
13001     case TARGET_NR_inotify_rm_watch:
13002         return get_errno(inotify_rm_watch(arg1, arg2));
13003 #endif
13004 #endif
13005 
13006 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13007     case TARGET_NR_mq_open:
13008         {
13009             struct mq_attr posix_mq_attr;
13010             struct mq_attr *pposix_mq_attr;
13011             int host_flags;
13012 
13013             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13014             pposix_mq_attr = NULL;
13015             if (arg4) {
13016                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13017                     return -TARGET_EFAULT;
13018                 }
13019                 pposix_mq_attr = &posix_mq_attr;
13020             }
13021             p = lock_user_string(arg1 - 1);
13022             if (!p) {
13023                 return -TARGET_EFAULT;
13024             }
13025             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13026             unlock_user (p, arg1, 0);
13027         }
13028         return ret;
13029 
13030     case TARGET_NR_mq_unlink:
13031         p = lock_user_string(arg1 - 1);
13032         if (!p) {
13033             return -TARGET_EFAULT;
13034         }
13035         ret = get_errno(mq_unlink(p));
13036         unlock_user (p, arg1, 0);
13037         return ret;
13038 
13039 #ifdef TARGET_NR_mq_timedsend
13040     case TARGET_NR_mq_timedsend:
13041         {
13042             struct timespec ts;
13043 
13044             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13045             if (arg5 != 0) {
13046                 if (target_to_host_timespec(&ts, arg5)) {
13047                     return -TARGET_EFAULT;
13048                 }
13049                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13050                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13051                     return -TARGET_EFAULT;
13052                 }
13053             } else {
13054                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13055             }
13056             unlock_user (p, arg2, arg3);
13057         }
13058         return ret;
13059 #endif
13060 #ifdef TARGET_NR_mq_timedsend_time64
13061     case TARGET_NR_mq_timedsend_time64:
13062         {
13063             struct timespec ts;
13064 
13065             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13066             if (arg5 != 0) {
13067                 if (target_to_host_timespec64(&ts, arg5)) {
13068                     return -TARGET_EFAULT;
13069                 }
13070                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13071                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13072                     return -TARGET_EFAULT;
13073                 }
13074             } else {
13075                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13076             }
13077             unlock_user(p, arg2, arg3);
13078         }
13079         return ret;
13080 #endif
13081 
13082 #ifdef TARGET_NR_mq_timedreceive
13083     case TARGET_NR_mq_timedreceive:
13084         {
13085             struct timespec ts;
13086             unsigned int prio;
13087 
13088             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13089             if (arg5 != 0) {
13090                 if (target_to_host_timespec(&ts, arg5)) {
13091                     return -TARGET_EFAULT;
13092                 }
13093                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13094                                                      &prio, &ts));
13095                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13096                     return -TARGET_EFAULT;
13097                 }
13098             } else {
13099                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13100                                                      &prio, NULL));
13101             }
13102             unlock_user (p, arg2, arg3);
13103             if (arg4 != 0)
13104                 put_user_u32(prio, arg4);
13105         }
13106         return ret;
13107 #endif
13108 #ifdef TARGET_NR_mq_timedreceive_time64
13109     case TARGET_NR_mq_timedreceive_time64:
13110         {
13111             struct timespec ts;
13112             unsigned int prio;
13113 
13114             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13115             if (arg5 != 0) {
13116                 if (target_to_host_timespec64(&ts, arg5)) {
13117                     return -TARGET_EFAULT;
13118                 }
13119                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13120                                                      &prio, &ts));
13121                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13122                     return -TARGET_EFAULT;
13123                 }
13124             } else {
13125                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13126                                                      &prio, NULL));
13127             }
13128             unlock_user(p, arg2, arg3);
13129             if (arg4 != 0) {
13130                 put_user_u32(prio, arg4);
13131             }
13132         }
13133         return ret;
13134 #endif
13135 
13136     /* Not implemented for now... */
13137 /*     case TARGET_NR_mq_notify: */
13138 /*         break; */
13139 
13140     case TARGET_NR_mq_getsetattr:
13141         {
13142             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13143             ret = 0;
13144             if (arg2 != 0) {
13145                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13146                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13147                                            &posix_mq_attr_out));
13148             } else if (arg3 != 0) {
13149                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13150             }
13151             if (ret == 0 && arg3 != 0) {
13152                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13153             }
13154         }
13155         return ret;
13156 #endif
13157 
13158 #ifdef CONFIG_SPLICE
13159 #ifdef TARGET_NR_tee
13160     case TARGET_NR_tee:
13161         {
13162             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13163         }
13164         return ret;
13165 #endif
13166 #ifdef TARGET_NR_splice
13167     case TARGET_NR_splice:
13168         {
13169             loff_t loff_in, loff_out;
13170             loff_t *ploff_in = NULL, *ploff_out = NULL;
13171             if (arg2) {
13172                 if (get_user_u64(loff_in, arg2)) {
13173                     return -TARGET_EFAULT;
13174                 }
13175                 ploff_in = &loff_in;
13176             }
13177             if (arg4) {
13178                 if (get_user_u64(loff_out, arg4)) {
13179                     return -TARGET_EFAULT;
13180                 }
13181                 ploff_out = &loff_out;
13182             }
13183             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13184             if (arg2) {
13185                 if (put_user_u64(loff_in, arg2)) {
13186                     return -TARGET_EFAULT;
13187                 }
13188             }
13189             if (arg4) {
13190                 if (put_user_u64(loff_out, arg4)) {
13191                     return -TARGET_EFAULT;
13192                 }
13193             }
13194         }
13195         return ret;
13196 #endif
13197 #ifdef TARGET_NR_vmsplice
13198 	case TARGET_NR_vmsplice:
13199         {
13200             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13201             if (vec != NULL) {
13202                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13203                 unlock_iovec(vec, arg2, arg3, 0);
13204             } else {
13205                 ret = -host_to_target_errno(errno);
13206             }
13207         }
13208         return ret;
13209 #endif
13210 #endif /* CONFIG_SPLICE */
13211 #ifdef CONFIG_EVENTFD
13212 #if defined(TARGET_NR_eventfd)
13213     case TARGET_NR_eventfd:
13214         ret = get_errno(eventfd(arg1, 0));
13215         if (ret >= 0) {
13216             fd_trans_register(ret, &target_eventfd_trans);
13217         }
13218         return ret;
13219 #endif
13220 #if defined(TARGET_NR_eventfd2)
13221     case TARGET_NR_eventfd2:
13222     {
13223         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13224         if (arg2 & TARGET_O_NONBLOCK) {
13225             host_flags |= O_NONBLOCK;
13226         }
13227         if (arg2 & TARGET_O_CLOEXEC) {
13228             host_flags |= O_CLOEXEC;
13229         }
13230         ret = get_errno(eventfd(arg1, host_flags));
13231         if (ret >= 0) {
13232             fd_trans_register(ret, &target_eventfd_trans);
13233         }
13234         return ret;
13235     }
13236 #endif
13237 #endif /* CONFIG_EVENTFD  */
13238 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13239     case TARGET_NR_fallocate:
13240 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13241         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13242                                   target_offset64(arg5, arg6)));
13243 #else
13244         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13245 #endif
13246         return ret;
13247 #endif
13248 #if defined(CONFIG_SYNC_FILE_RANGE)
13249 #if defined(TARGET_NR_sync_file_range)
13250     case TARGET_NR_sync_file_range:
13251 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13252 #if defined(TARGET_MIPS)
13253         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13254                                         target_offset64(arg5, arg6), arg7));
13255 #else
13256         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13257                                         target_offset64(arg4, arg5), arg6));
13258 #endif /* !TARGET_MIPS */
13259 #else
13260         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13261 #endif
13262         return ret;
13263 #endif
13264 #if defined(TARGET_NR_sync_file_range2) || \
13265     defined(TARGET_NR_arm_sync_file_range)
13266 #if defined(TARGET_NR_sync_file_range2)
13267     case TARGET_NR_sync_file_range2:
13268 #endif
13269 #if defined(TARGET_NR_arm_sync_file_range)
13270     case TARGET_NR_arm_sync_file_range:
13271 #endif
13272         /* This is like sync_file_range but the arguments are reordered */
13273 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13274         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13275                                         target_offset64(arg5, arg6), arg2));
13276 #else
13277         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13278 #endif
13279         return ret;
13280 #endif
13281 #endif
13282 #if defined(TARGET_NR_signalfd4)
13283     case TARGET_NR_signalfd4:
13284         return do_signalfd4(arg1, arg2, arg4);
13285 #endif
13286 #if defined(TARGET_NR_signalfd)
13287     case TARGET_NR_signalfd:
13288         return do_signalfd4(arg1, arg2, 0);
13289 #endif
13290 #if defined(CONFIG_EPOLL)
13291 #if defined(TARGET_NR_epoll_create)
13292     case TARGET_NR_epoll_create:
13293         return get_errno(epoll_create(arg1));
13294 #endif
13295 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13296     case TARGET_NR_epoll_create1:
13297         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13298 #endif
13299 #if defined(TARGET_NR_epoll_ctl)
13300     case TARGET_NR_epoll_ctl:
13301     {
13302         struct epoll_event ep;
13303         struct epoll_event *epp = 0;
13304         if (arg4) {
13305             if (arg2 != EPOLL_CTL_DEL) {
13306                 struct target_epoll_event *target_ep;
13307                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13308                     return -TARGET_EFAULT;
13309                 }
13310                 ep.events = tswap32(target_ep->events);
13311                 /*
13312                  * The epoll_data_t union is just opaque data to the kernel,
13313                  * so we transfer all 64 bits across and need not worry what
13314                  * actual data type it is.
13315                  */
13316                 ep.data.u64 = tswap64(target_ep->data.u64);
13317                 unlock_user_struct(target_ep, arg4, 0);
13318             }
13319             /*
13320              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13321              * non-null pointer, even though this argument is ignored.
13322              *
13323              */
13324             epp = &ep;
13325         }
13326         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13327     }
13328 #endif
13329 
13330 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13331 #if defined(TARGET_NR_epoll_wait)
13332     case TARGET_NR_epoll_wait:
13333 #endif
13334 #if defined(TARGET_NR_epoll_pwait)
13335     case TARGET_NR_epoll_pwait:
13336 #endif
13337     {
13338         struct target_epoll_event *target_ep;
13339         struct epoll_event *ep;
13340         int epfd = arg1;
13341         int maxevents = arg3;
13342         int timeout = arg4;
13343 
13344         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13345             return -TARGET_EINVAL;
13346         }
13347 
13348         target_ep = lock_user(VERIFY_WRITE, arg2,
13349                               maxevents * sizeof(struct target_epoll_event), 1);
13350         if (!target_ep) {
13351             return -TARGET_EFAULT;
13352         }
13353 
13354         ep = g_try_new(struct epoll_event, maxevents);
13355         if (!ep) {
13356             unlock_user(target_ep, arg2, 0);
13357             return -TARGET_ENOMEM;
13358         }
13359 
13360         switch (num) {
13361 #if defined(TARGET_NR_epoll_pwait)
13362         case TARGET_NR_epoll_pwait:
13363         {
13364             sigset_t *set = NULL;
13365 
13366             if (arg5) {
13367                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13368                 if (ret != 0) {
13369                     break;
13370                 }
13371             }
13372 
13373             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13374                                              set, SIGSET_T_SIZE));
13375 
13376             if (set) {
13377                 finish_sigsuspend_mask(ret);
13378             }
13379             break;
13380         }
13381 #endif
13382 #if defined(TARGET_NR_epoll_wait)
13383         case TARGET_NR_epoll_wait:
13384             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13385                                              NULL, 0));
13386             break;
13387 #endif
13388         default:
13389             ret = -TARGET_ENOSYS;
13390         }
13391         if (!is_error(ret)) {
13392             int i;
13393             for (i = 0; i < ret; i++) {
13394                 target_ep[i].events = tswap32(ep[i].events);
13395                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13396             }
13397             unlock_user(target_ep, arg2,
13398                         ret * sizeof(struct target_epoll_event));
13399         } else {
13400             unlock_user(target_ep, arg2, 0);
13401         }
13402         g_free(ep);
13403         return ret;
13404     }
13405 #endif
13406 #endif
13407 #ifdef TARGET_NR_prlimit64
13408     case TARGET_NR_prlimit64:
13409     {
13410         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13411         struct target_rlimit64 *target_rnew, *target_rold;
13412         struct host_rlimit64 rnew, rold, *rnewp = 0;
13413         int resource = target_to_host_resource(arg2);
13414 
13415         if (arg3 && (resource != RLIMIT_AS &&
13416                      resource != RLIMIT_DATA &&
13417                      resource != RLIMIT_STACK)) {
13418             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13419                 return -TARGET_EFAULT;
13420             }
13421             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13422             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13423             unlock_user_struct(target_rnew, arg3, 0);
13424             rnewp = &rnew;
13425         }
13426 
13427         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13428         if (!is_error(ret) && arg4) {
13429             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13430                 return -TARGET_EFAULT;
13431             }
13432             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13433             __put_user(rold.rlim_max, &target_rold->rlim_max);
13434             unlock_user_struct(target_rold, arg4, 1);
13435         }
13436         return ret;
13437     }
13438 #endif
13439 #ifdef TARGET_NR_gethostname
13440     case TARGET_NR_gethostname:
13441     {
13442         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13443         if (name) {
13444             ret = get_errno(gethostname(name, arg2));
13445             unlock_user(name, arg1, arg2);
13446         } else {
13447             ret = -TARGET_EFAULT;
13448         }
13449         return ret;
13450     }
13451 #endif
13452 #ifdef TARGET_NR_atomic_cmpxchg_32
13453     case TARGET_NR_atomic_cmpxchg_32:
13454     {
13455         /* should use start_exclusive from main.c */
13456         abi_ulong mem_value;
13457         if (get_user_u32(mem_value, arg6)) {
13458             target_siginfo_t info;
13459             info.si_signo = SIGSEGV;
13460             info.si_errno = 0;
13461             info.si_code = TARGET_SEGV_MAPERR;
13462             info._sifields._sigfault._addr = arg6;
13463             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13464             ret = 0xdeadbeef;
13465 
13466         }
13467         if (mem_value == arg2)
13468             put_user_u32(arg1, arg6);
13469         return mem_value;
13470     }
13471 #endif
13472 #ifdef TARGET_NR_atomic_barrier
13473     case TARGET_NR_atomic_barrier:
13474         /* Like the kernel implementation and the
13475            qemu arm barrier, no-op this? */
13476         return 0;
13477 #endif
13478 
13479 #ifdef TARGET_NR_timer_create
13480     case TARGET_NR_timer_create:
13481     {
13482         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13483 
13484         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13485 
13486         int clkid = arg1;
13487         int timer_index = next_free_host_timer();
13488 
13489         if (timer_index < 0) {
13490             ret = -TARGET_EAGAIN;
13491         } else {
13492             timer_t *phtimer = g_posix_timers  + timer_index;
13493 
13494             if (arg2) {
13495                 phost_sevp = &host_sevp;
13496                 ret = target_to_host_sigevent(phost_sevp, arg2);
13497                 if (ret != 0) {
13498                     free_host_timer_slot(timer_index);
13499                     return ret;
13500                 }
13501             }
13502 
13503             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13504             if (ret) {
13505                 free_host_timer_slot(timer_index);
13506             } else {
13507                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13508                     timer_delete(*phtimer);
13509                     free_host_timer_slot(timer_index);
13510                     return -TARGET_EFAULT;
13511                 }
13512             }
13513         }
13514         return ret;
13515     }
13516 #endif
13517 
13518 #ifdef TARGET_NR_timer_settime
13519     case TARGET_NR_timer_settime:
13520     {
13521         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13522          * struct itimerspec * old_value */
13523         target_timer_t timerid = get_timer_id(arg1);
13524 
13525         if (timerid < 0) {
13526             ret = timerid;
13527         } else if (arg3 == 0) {
13528             ret = -TARGET_EINVAL;
13529         } else {
13530             timer_t htimer = g_posix_timers[timerid];
13531             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13532 
13533             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13534                 return -TARGET_EFAULT;
13535             }
13536             ret = get_errno(
13537                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13538             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13539                 return -TARGET_EFAULT;
13540             }
13541         }
13542         return ret;
13543     }
13544 #endif
13545 
13546 #ifdef TARGET_NR_timer_settime64
13547     case TARGET_NR_timer_settime64:
13548     {
13549         target_timer_t timerid = get_timer_id(arg1);
13550 
13551         if (timerid < 0) {
13552             ret = timerid;
13553         } else if (arg3 == 0) {
13554             ret = -TARGET_EINVAL;
13555         } else {
13556             timer_t htimer = g_posix_timers[timerid];
13557             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13558 
13559             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13560                 return -TARGET_EFAULT;
13561             }
13562             ret = get_errno(
13563                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13564             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13565                 return -TARGET_EFAULT;
13566             }
13567         }
13568         return ret;
13569     }
13570 #endif
13571 
13572 #ifdef TARGET_NR_timer_gettime
13573     case TARGET_NR_timer_gettime:
13574     {
13575         /* args: timer_t timerid, struct itimerspec *curr_value */
13576         target_timer_t timerid = get_timer_id(arg1);
13577 
13578         if (timerid < 0) {
13579             ret = timerid;
13580         } else if (!arg2) {
13581             ret = -TARGET_EFAULT;
13582         } else {
13583             timer_t htimer = g_posix_timers[timerid];
13584             struct itimerspec hspec;
13585             ret = get_errno(timer_gettime(htimer, &hspec));
13586 
13587             if (host_to_target_itimerspec(arg2, &hspec)) {
13588                 ret = -TARGET_EFAULT;
13589             }
13590         }
13591         return ret;
13592     }
13593 #endif
13594 
13595 #ifdef TARGET_NR_timer_gettime64
13596     case TARGET_NR_timer_gettime64:
13597     {
13598         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13599         target_timer_t timerid = get_timer_id(arg1);
13600 
13601         if (timerid < 0) {
13602             ret = timerid;
13603         } else if (!arg2) {
13604             ret = -TARGET_EFAULT;
13605         } else {
13606             timer_t htimer = g_posix_timers[timerid];
13607             struct itimerspec hspec;
13608             ret = get_errno(timer_gettime(htimer, &hspec));
13609 
13610             if (host_to_target_itimerspec64(arg2, &hspec)) {
13611                 ret = -TARGET_EFAULT;
13612             }
13613         }
13614         return ret;
13615     }
13616 #endif
13617 
13618 #ifdef TARGET_NR_timer_getoverrun
13619     case TARGET_NR_timer_getoverrun:
13620     {
13621         /* args: timer_t timerid */
13622         target_timer_t timerid = get_timer_id(arg1);
13623 
13624         if (timerid < 0) {
13625             ret = timerid;
13626         } else {
13627             timer_t htimer = g_posix_timers[timerid];
13628             ret = get_errno(timer_getoverrun(htimer));
13629         }
13630         return ret;
13631     }
13632 #endif
13633 
13634 #ifdef TARGET_NR_timer_delete
13635     case TARGET_NR_timer_delete:
13636     {
13637         /* args: timer_t timerid */
13638         target_timer_t timerid = get_timer_id(arg1);
13639 
13640         if (timerid < 0) {
13641             ret = timerid;
13642         } else {
13643             timer_t htimer = g_posix_timers[timerid];
13644             ret = get_errno(timer_delete(htimer));
13645             free_host_timer_slot(timerid);
13646         }
13647         return ret;
13648     }
13649 #endif
13650 
13651 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13652     case TARGET_NR_timerfd_create:
13653         ret = get_errno(timerfd_create(arg1,
13654                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13655         if (ret >= 0) {
13656             fd_trans_register(ret, &target_timerfd_trans);
13657         }
13658         return ret;
13659 #endif
13660 
13661 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13662     case TARGET_NR_timerfd_gettime:
13663         {
13664             struct itimerspec its_curr;
13665 
13666             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13667 
13668             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13669                 return -TARGET_EFAULT;
13670             }
13671         }
13672         return ret;
13673 #endif
13674 
13675 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13676     case TARGET_NR_timerfd_gettime64:
13677         {
13678             struct itimerspec its_curr;
13679 
13680             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13681 
13682             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13683                 return -TARGET_EFAULT;
13684             }
13685         }
13686         return ret;
13687 #endif
13688 
13689 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13690     case TARGET_NR_timerfd_settime:
13691         {
13692             struct itimerspec its_new, its_old, *p_new;
13693 
13694             if (arg3) {
13695                 if (target_to_host_itimerspec(&its_new, arg3)) {
13696                     return -TARGET_EFAULT;
13697                 }
13698                 p_new = &its_new;
13699             } else {
13700                 p_new = NULL;
13701             }
13702 
13703             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13704 
13705             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13706                 return -TARGET_EFAULT;
13707             }
13708         }
13709         return ret;
13710 #endif
13711 
13712 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13713     case TARGET_NR_timerfd_settime64:
13714         {
13715             struct itimerspec its_new, its_old, *p_new;
13716 
13717             if (arg3) {
13718                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13719                     return -TARGET_EFAULT;
13720                 }
13721                 p_new = &its_new;
13722             } else {
13723                 p_new = NULL;
13724             }
13725 
13726             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13727 
13728             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13729                 return -TARGET_EFAULT;
13730             }
13731         }
13732         return ret;
13733 #endif
13734 
13735 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13736     case TARGET_NR_ioprio_get:
13737         return get_errno(ioprio_get(arg1, arg2));
13738 #endif
13739 
13740 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13741     case TARGET_NR_ioprio_set:
13742         return get_errno(ioprio_set(arg1, arg2, arg3));
13743 #endif
13744 
13745 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13746     case TARGET_NR_setns:
13747         return get_errno(setns(arg1, arg2));
13748 #endif
13749 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13750     case TARGET_NR_unshare:
13751         return get_errno(unshare(arg1));
13752 #endif
13753 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13754     case TARGET_NR_kcmp:
13755         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13756 #endif
13757 #ifdef TARGET_NR_swapcontext
13758     case TARGET_NR_swapcontext:
13759         /* PowerPC specific.  */
13760         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13761 #endif
13762 #ifdef TARGET_NR_memfd_create
13763     case TARGET_NR_memfd_create:
13764         p = lock_user_string(arg1);
13765         if (!p) {
13766             return -TARGET_EFAULT;
13767         }
13768         ret = get_errno(memfd_create(p, arg2));
13769         fd_trans_unregister(ret);
13770         unlock_user(p, arg1, 0);
13771         return ret;
13772 #endif
13773 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13774     case TARGET_NR_membarrier:
13775         return get_errno(membarrier(arg1, arg2));
13776 #endif
13777 
13778 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13779     case TARGET_NR_copy_file_range:
13780         {
13781             loff_t inoff, outoff;
13782             loff_t *pinoff = NULL, *poutoff = NULL;
13783 
13784             if (arg2) {
13785                 if (get_user_u64(inoff, arg2)) {
13786                     return -TARGET_EFAULT;
13787                 }
13788                 pinoff = &inoff;
13789             }
13790             if (arg4) {
13791                 if (get_user_u64(outoff, arg4)) {
13792                     return -TARGET_EFAULT;
13793                 }
13794                 poutoff = &outoff;
13795             }
13796             /* Do not sign-extend the count parameter. */
13797             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13798                                                  (abi_ulong)arg5, arg6));
13799             if (!is_error(ret) && ret > 0) {
13800                 if (arg2) {
13801                     if (put_user_u64(inoff, arg2)) {
13802                         return -TARGET_EFAULT;
13803                     }
13804                 }
13805                 if (arg4) {
13806                     if (put_user_u64(outoff, arg4)) {
13807                         return -TARGET_EFAULT;
13808                     }
13809                 }
13810             }
13811         }
13812         return ret;
13813 #endif
13814 
13815 #if defined(TARGET_NR_pivot_root)
13816     case TARGET_NR_pivot_root:
13817         {
13818             void *p2;
13819             p = lock_user_string(arg1); /* new_root */
13820             p2 = lock_user_string(arg2); /* put_old */
13821             if (!p || !p2) {
13822                 ret = -TARGET_EFAULT;
13823             } else {
13824                 ret = get_errno(pivot_root(p, p2));
13825             }
13826             unlock_user(p2, arg2, 0);
13827             unlock_user(p, arg1, 0);
13828         }
13829         return ret;
13830 #endif
13831 
13832 #if defined(TARGET_NR_riscv_hwprobe)
13833     case TARGET_NR_riscv_hwprobe:
13834         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13835 #endif
13836 
13837     default:
13838         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13839         return -TARGET_ENOSYS;
13840     }
13841     return ret;
13842 }
13843 
do_syscall(CPUArchState * cpu_env,int num,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,abi_long arg7,abi_long arg8)13844 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13845                     abi_long arg2, abi_long arg3, abi_long arg4,
13846                     abi_long arg5, abi_long arg6, abi_long arg7,
13847                     abi_long arg8)
13848 {
13849     CPUState *cpu = env_cpu(cpu_env);
13850     abi_long ret;
13851 
13852 #ifdef DEBUG_ERESTARTSYS
13853     /* Debug-only code for exercising the syscall-restart code paths
13854      * in the per-architecture cpu main loops: restart every syscall
13855      * the guest makes once before letting it through.
13856      */
13857     {
13858         static bool flag;
13859         flag = !flag;
13860         if (flag) {
13861             return -QEMU_ERESTARTSYS;
13862         }
13863     }
13864 #endif
13865 
13866     record_syscall_start(cpu, num, arg1,
13867                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13868 
13869     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13870         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13871     }
13872 
13873     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13874                       arg5, arg6, arg7, arg8);
13875 
13876     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13877         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13878                           arg3, arg4, arg5, arg6);
13879     }
13880 
13881     record_syscall_return(cpu, num, ret);
13882     return ret;
13883 }
13884