xref: /qemu/linux-user/syscall.c (revision 88a722b6)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include <elf.h>
29 #include <endian.h>
30 #include <grp.h>
31 #include <sys/ipc.h>
32 #include <sys/msg.h>
33 #include <sys/wait.h>
34 #include <sys/mount.h>
35 #include <sys/file.h>
36 #include <sys/fsuid.h>
37 #include <sys/personality.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/swap.h>
41 #include <linux/capability.h>
42 #include <sched.h>
43 #include <sys/timex.h>
44 #include <sys/socket.h>
45 #include <linux/sockios.h>
46 #include <sys/un.h>
47 #include <sys/uio.h>
48 #include <poll.h>
49 #include <sys/times.h>
50 #include <sys/shm.h>
51 #include <sys/sem.h>
52 #include <sys/statfs.h>
53 #include <utime.h>
54 #include <sys/sysinfo.h>
55 #include <sys/signalfd.h>
56 //#include <sys/user.h>
57 #include <netinet/in.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 #include <netinet/udp.h>
61 #include <linux/wireless.h>
62 #include <linux/icmp.h>
63 #include <linux/icmpv6.h>
64 #include <linux/if_tun.h>
65 #include <linux/in6.h>
66 #include <linux/errqueue.h>
67 #include <linux/random.h>
68 #ifdef CONFIG_TIMERFD
69 #include <sys/timerfd.h>
70 #endif
71 #ifdef CONFIG_EVENTFD
72 #include <sys/eventfd.h>
73 #endif
74 #ifdef CONFIG_EPOLL
75 #include <sys/epoll.h>
76 #endif
77 #ifdef CONFIG_ATTR
78 #include "qemu/xattr.h"
79 #endif
80 #ifdef CONFIG_SENDFILE
81 #include <sys/sendfile.h>
82 #endif
83 #ifdef HAVE_SYS_KCOV_H
84 #include <sys/kcov.h>
85 #endif
86 
87 #define termios host_termios
88 #define winsize host_winsize
89 #define termio host_termio
90 #define sgttyb host_sgttyb /* same as target */
91 #define tchars host_tchars /* same as target */
92 #define ltchars host_ltchars /* same as target */
93 
94 #include <linux/termios.h>
95 #include <linux/unistd.h>
96 #include <linux/cdrom.h>
97 #include <linux/hdreg.h>
98 #include <linux/soundcard.h>
99 #include <linux/kd.h>
100 #include <linux/mtio.h>
101 #include <linux/fs.h>
102 #include <linux/fd.h>
103 #if defined(CONFIG_FIEMAP)
104 #include <linux/fiemap.h>
105 #endif
106 #include <linux/fb.h>
107 #if defined(CONFIG_USBFS)
108 #include <linux/usbdevice_fs.h>
109 #include <linux/usb/ch9.h>
110 #endif
111 #include <linux/vt.h>
112 #include <linux/dm-ioctl.h>
113 #include <linux/reboot.h>
114 #include <linux/route.h>
115 #include <linux/filter.h>
116 #include <linux/blkpg.h>
117 #include <netpacket/packet.h>
118 #include <linux/netlink.h>
119 #include <linux/if_alg.h>
120 #include <linux/rtc.h>
121 #include <sound/asound.h>
122 #ifdef HAVE_BTRFS_H
123 #include <linux/btrfs.h>
124 #endif
125 #ifdef HAVE_DRM_H
126 #include <libdrm/drm.h>
127 #include <libdrm/i915_drm.h>
128 #endif
129 #include "linux_loop.h"
130 #include "uname.h"
131 
132 #include "qemu.h"
133 #include "user-internals.h"
134 #include "strace.h"
135 #include "signal-common.h"
136 #include "loader.h"
137 #include "user-mmap.h"
138 #include "user/safe-syscall.h"
139 #include "qemu/guest-random.h"
140 #include "qemu/selfmap.h"
141 #include "user/syscall-trace.h"
142 #include "special-errno.h"
143 #include "qapi/error.h"
144 #include "fd-trans.h"
145 #include "cpu_loop-common.h"
146 
147 #ifndef CLONE_IO
148 #define CLONE_IO                0x80000000      /* Clone io context */
149 #endif
150 
151 /* We can't directly call the host clone syscall, because this will
152  * badly confuse libc (breaking mutexes, for example). So we must
153  * divide clone flags into:
154  *  * flag combinations that look like pthread_create()
155  *  * flag combinations that look like fork()
156  *  * flags we can implement within QEMU itself
157  *  * flags we can't support and will return an error for
158  */
159 /* For thread creation, all these flags must be present; for
160  * fork, none must be present.
161  */
162 #define CLONE_THREAD_FLAGS                              \
163     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
164      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
165 
166 /* These flags are ignored:
167  * CLONE_DETACHED is now ignored by the kernel;
168  * CLONE_IO is just an optimisation hint to the I/O scheduler
169  */
170 #define CLONE_IGNORED_FLAGS                     \
171     (CLONE_DETACHED | CLONE_IO)
172 
173 #ifndef CLONE_PIDFD
174 # define CLONE_PIDFD 0x00001000
175 #endif
176 
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS               \
179     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
181 
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
184     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
185      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
186 
187 #define CLONE_INVALID_FORK_FLAGS                                        \
188     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
189 
190 #define CLONE_INVALID_THREAD_FLAGS                                      \
191     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
192        CLONE_IGNORED_FLAGS))
193 
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195  * have almost all been allocated. We cannot support any of
196  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198  * The checks against the invalid thread masks above will catch these.
199  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
200  */
201 
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203  * once. This exercises the codepaths for restart.
204  */
205 //#define DEBUG_ERESTARTSYS
206 
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
212 
213 #undef _syscall0
214 #undef _syscall1
215 #undef _syscall2
216 #undef _syscall3
217 #undef _syscall4
218 #undef _syscall5
219 #undef _syscall6
220 
221 #define _syscall0(type,name)		\
222 static type name (void)			\
223 {					\
224 	return syscall(__NR_##name);	\
225 }
226 
227 #define _syscall1(type,name,type1,arg1)		\
228 static type name (type1 arg1)			\
229 {						\
230 	return syscall(__NR_##name, arg1);	\
231 }
232 
233 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
234 static type name (type1 arg1,type2 arg2)		\
235 {							\
236 	return syscall(__NR_##name, arg1, arg2);	\
237 }
238 
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3)		\
241 {								\
242 	return syscall(__NR_##name, arg1, arg2, arg3);		\
243 }
244 
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
249 }
250 
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
252 		  type5,arg5)							\
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
254 {										\
255 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
256 }
257 
258 
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
260 		  type5,arg5,type6,arg6)					\
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
262                   type6 arg6)							\
263 {										\
264 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
265 }
266 
267 
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
278 #endif
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
281 #endif
282 #define __NR_sys_statx __NR_statx
283 
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
286 #endif
287 
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
291 #endif
292 
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296 #endif
297 
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid)
300 
301 /* For the 64-bit guest on 32-bit host case we must emulate
302  * getdents using getdents64, because otherwise the host
303  * might hand us back more dirent records than we can fit
304  * into the guest buffer after structure format conversion.
305  * Otherwise we emulate getdents with getdents if the host has it.
306  */
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
309 #endif
310 
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
313 #endif
314 #if (defined(TARGET_NR_getdents) && \
315       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
318 #endif
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
321           loff_t *, res, unsigned int, wh);
322 #endif
323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325           siginfo_t *, uinfo)
326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group,int,error_code)
329 #endif
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
335 #endif
336 #endif
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339           const struct timespec *,timeout,int *,uaddr2,int,val3)
340 #endif
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343           const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347 #endif
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350                              unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354 #endif
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357           unsigned long *, user_mask_ptr);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360           unsigned long *, user_mask_ptr);
361 /* sched_attr is not defined in glibc */
362 struct sched_attr {
363     uint32_t size;
364     uint32_t sched_policy;
365     uint64_t sched_flags;
366     int32_t sched_nice;
367     uint32_t sched_priority;
368     uint64_t sched_runtime;
369     uint64_t sched_deadline;
370     uint64_t sched_period;
371     uint32_t sched_util_min;
372     uint32_t sched_util_max;
373 };
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
376           unsigned int, size, unsigned int, flags);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
379           unsigned int, flags);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
384           const struct sched_param *, param);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam, pid_t, pid,
387           struct sched_param *, param);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam, pid_t, pid,
390           const struct sched_param *, param);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
393 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
394           void *, arg);
395 _syscall2(int, capget, struct __user_cap_header_struct *, header,
396           struct __user_cap_data_struct *, data);
397 _syscall2(int, capset, struct __user_cap_header_struct *, header,
398           struct __user_cap_data_struct *, data);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get, int, which, int, who)
401 #endif
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
404 #endif
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
407 #endif
408 
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
411           unsigned long, idx1, unsigned long, idx2)
412 #endif
413 
414 /*
415  * It is assumed that struct statx is architecture independent.
416  */
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
419           unsigned int, mask, struct target_statx *, statxbuf)
420 #endif
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier, int, cmd, int, flags)
423 #endif
424 
425 static const bitmask_transtbl fcntl_flags_tbl[] = {
426   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
427   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
428   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
429   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
430   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
431   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
432   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
433   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
434   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
435   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
436   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
437   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
438   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
439 #if defined(O_DIRECT)
440   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
441 #endif
442 #if defined(O_NOATIME)
443   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
444 #endif
445 #if defined(O_CLOEXEC)
446   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
447 #endif
448 #if defined(O_PATH)
449   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
450 #endif
451 #if defined(O_TMPFILE)
452   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
453 #endif
454   /* Don't terminate the list prematurely on 64-bit host+guest.  */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
457 #endif
458 };
459 
460 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
461 
462 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
463 #if defined(__NR_utimensat)
464 #define __NR_sys_utimensat __NR_utimensat
465 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
466           const struct timespec *,tsp,int,flags)
467 #else
468 static int sys_utimensat(int dirfd, const char *pathname,
469                          const struct timespec times[2], int flags)
470 {
471     errno = ENOSYS;
472     return -1;
473 }
474 #endif
475 #endif /* TARGET_NR_utimensat */
476 
477 #ifdef TARGET_NR_renameat2
478 #if defined(__NR_renameat2)
479 #define __NR_sys_renameat2 __NR_renameat2
480 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
481           const char *, new, unsigned int, flags)
482 #else
483 static int sys_renameat2(int oldfd, const char *old,
484                          int newfd, const char *new, int flags)
485 {
486     if (flags == 0) {
487         return renameat(oldfd, old, newfd, new);
488     }
489     errno = ENOSYS;
490     return -1;
491 }
492 #endif
493 #endif /* TARGET_NR_renameat2 */
494 
495 #ifdef CONFIG_INOTIFY
496 #include <sys/inotify.h>
497 #else
498 /* Userspace can usually survive runtime without inotify */
499 #undef TARGET_NR_inotify_init
500 #undef TARGET_NR_inotify_init1
501 #undef TARGET_NR_inotify_add_watch
502 #undef TARGET_NR_inotify_rm_watch
503 #endif /* CONFIG_INOTIFY  */
504 
505 #if defined(TARGET_NR_prlimit64)
506 #ifndef __NR_prlimit64
507 # define __NR_prlimit64 -1
508 #endif
509 #define __NR_sys_prlimit64 __NR_prlimit64
510 /* The glibc rlimit structure may not be that used by the underlying syscall */
511 struct host_rlimit64 {
512     uint64_t rlim_cur;
513     uint64_t rlim_max;
514 };
515 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
516           const struct host_rlimit64 *, new_limit,
517           struct host_rlimit64 *, old_limit)
518 #endif
519 
520 
521 #if defined(TARGET_NR_timer_create)
522 /* Maximum of 32 active POSIX timers allowed at any one time. */
523 #define GUEST_TIMER_MAX 32
524 static timer_t g_posix_timers[GUEST_TIMER_MAX];
525 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
526 
next_free_host_timer(void)527 static inline int next_free_host_timer(void)
528 {
529     int k;
530     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
531         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
532             return k;
533         }
534     }
535     return -1;
536 }
537 
free_host_timer_slot(int id)538 static inline void free_host_timer_slot(int id)
539 {
540     qatomic_store_release(g_posix_timer_allocated + id, 0);
541 }
542 #endif
543 
host_to_target_errno(int host_errno)544 static inline int host_to_target_errno(int host_errno)
545 {
546     switch (host_errno) {
547 #define E(X)  case X: return TARGET_##X;
548 #include "errnos.c.inc"
549 #undef E
550     default:
551         return host_errno;
552     }
553 }
554 
target_to_host_errno(int target_errno)555 static inline int target_to_host_errno(int target_errno)
556 {
557     switch (target_errno) {
558 #define E(X)  case TARGET_##X: return X;
559 #include "errnos.c.inc"
560 #undef E
561     default:
562         return target_errno;
563     }
564 }
565 
get_errno(abi_long ret)566 abi_long get_errno(abi_long ret)
567 {
568     if (ret == -1)
569         return -host_to_target_errno(errno);
570     else
571         return ret;
572 }
573 
target_strerror(int err)574 const char *target_strerror(int err)
575 {
576     if (err == QEMU_ERESTARTSYS) {
577         return "To be restarted";
578     }
579     if (err == QEMU_ESIGRETURN) {
580         return "Successful exit from sigreturn";
581     }
582 
583     return strerror(target_to_host_errno(err));
584 }
585 
check_zeroed_user(abi_long addr,size_t ksize,size_t usize)586 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
587 {
588     int i;
589     uint8_t b;
590     if (usize <= ksize) {
591         return 1;
592     }
593     for (i = ksize; i < usize; i++) {
594         if (get_user_u8(b, addr + i)) {
595             return -TARGET_EFAULT;
596         }
597         if (b != 0) {
598             return 0;
599         }
600     }
601     return 1;
602 }
603 
604 #define safe_syscall0(type, name) \
605 static type safe_##name(void) \
606 { \
607     return safe_syscall(__NR_##name); \
608 }
609 
610 #define safe_syscall1(type, name, type1, arg1) \
611 static type safe_##name(type1 arg1) \
612 { \
613     return safe_syscall(__NR_##name, arg1); \
614 }
615 
616 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
617 static type safe_##name(type1 arg1, type2 arg2) \
618 { \
619     return safe_syscall(__NR_##name, arg1, arg2); \
620 }
621 
622 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
623 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
624 { \
625     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
626 }
627 
628 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
629     type4, arg4) \
630 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
631 { \
632     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
633 }
634 
635 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
636     type4, arg4, type5, arg5) \
637 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
638     type5 arg5) \
639 { \
640     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
641 }
642 
643 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
644     type4, arg4, type5, arg5, type6, arg6) \
645 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
646     type5 arg5, type6 arg6) \
647 { \
648     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
649 }
650 
safe_syscall3(ssize_t,read,int,fd,void *,buff,size_t,count)651 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
652 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
653 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
654               int, flags, mode_t, mode)
655 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
656 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
657               struct rusage *, rusage)
658 #endif
659 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
660               int, options, struct rusage *, rusage)
661 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
662 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
663               char **, argv, char **, envp, int, flags)
664 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
665     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
666 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
667               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
668 #endif
669 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
670 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
671               struct timespec *, tsp, const sigset_t *, sigmask,
672               size_t, sigsetsize)
673 #endif
674 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
675               int, maxevents, int, timeout, const sigset_t *, sigmask,
676               size_t, sigsetsize)
677 #if defined(__NR_futex)
678 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
679               const struct timespec *,timeout,int *,uaddr2,int,val3)
680 #endif
681 #if defined(__NR_futex_time64)
682 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
683               const struct timespec *,timeout,int *,uaddr2,int,val3)
684 #endif
685 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
686 safe_syscall2(int, kill, pid_t, pid, int, sig)
687 safe_syscall2(int, tkill, int, tid, int, sig)
688 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
689 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
690 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
691 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
692               unsigned long, pos_l, unsigned long, pos_h)
693 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
694               unsigned long, pos_l, unsigned long, pos_h)
695 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
696               socklen_t, addrlen)
697 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
698               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
699 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
700               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
701 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
702 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
703 safe_syscall2(int, flock, int, fd, int, operation)
704 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
705 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
706               const struct timespec *, uts, size_t, sigsetsize)
707 #endif
708 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
709               int, flags)
710 #if defined(TARGET_NR_nanosleep)
711 safe_syscall2(int, nanosleep, const struct timespec *, req,
712               struct timespec *, rem)
713 #endif
714 #if defined(TARGET_NR_clock_nanosleep) || \
715     defined(TARGET_NR_clock_nanosleep_time64)
716 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
717               const struct timespec *, req, struct timespec *, rem)
718 #endif
719 #ifdef __NR_ipc
720 #ifdef __s390x__
721 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
722               void *, ptr)
723 #else
724 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
725               void *, ptr, long, fifth)
726 #endif
727 #endif
728 #ifdef __NR_msgsnd
729 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
730               int, flags)
731 #endif
732 #ifdef __NR_msgrcv
733 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
734               long, msgtype, int, flags)
735 #endif
736 #ifdef __NR_semtimedop
737 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
738               unsigned, nsops, const struct timespec *, timeout)
739 #endif
740 #if defined(TARGET_NR_mq_timedsend) || \
741     defined(TARGET_NR_mq_timedsend_time64)
742 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
743               size_t, len, unsigned, prio, const struct timespec *, timeout)
744 #endif
745 #if defined(TARGET_NR_mq_timedreceive) || \
746     defined(TARGET_NR_mq_timedreceive_time64)
747 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
748               size_t, len, unsigned *, prio, const struct timespec *, timeout)
749 #endif
750 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
751 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
752               int, outfd, loff_t *, poutoff, size_t, length,
753               unsigned int, flags)
754 #endif
755 
756 /* We do ioctl like this rather than via safe_syscall3 to preserve the
757  * "third argument might be integer or pointer or not present" behaviour of
758  * the libc function.
759  */
760 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
761 /* Similarly for fcntl. Note that callers must always:
762  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
763  *  use the flock64 struct rather than unsuffixed flock
764  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
765  */
766 #ifdef __NR_fcntl64
767 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
768 #else
769 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
770 #endif
771 
772 static inline int host_to_target_sock_type(int host_type)
773 {
774     int target_type;
775 
776     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
777     case SOCK_DGRAM:
778         target_type = TARGET_SOCK_DGRAM;
779         break;
780     case SOCK_STREAM:
781         target_type = TARGET_SOCK_STREAM;
782         break;
783     default:
784         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
785         break;
786     }
787 
788 #if defined(SOCK_CLOEXEC)
789     if (host_type & SOCK_CLOEXEC) {
790         target_type |= TARGET_SOCK_CLOEXEC;
791     }
792 #endif
793 
794 #if defined(SOCK_NONBLOCK)
795     if (host_type & SOCK_NONBLOCK) {
796         target_type |= TARGET_SOCK_NONBLOCK;
797     }
798 #endif
799 
800     return target_type;
801 }
802 
803 static abi_ulong target_brk, initial_target_brk;
804 
target_set_brk(abi_ulong new_brk)805 void target_set_brk(abi_ulong new_brk)
806 {
807     target_brk = TARGET_PAGE_ALIGN(new_brk);
808     initial_target_brk = target_brk;
809 }
810 
811 /* do_brk() must return target values and target errnos. */
do_brk(abi_ulong brk_val)812 abi_long do_brk(abi_ulong brk_val)
813 {
814     abi_long mapped_addr;
815     abi_ulong new_brk;
816     abi_ulong old_brk;
817 
818     /* brk pointers are always untagged */
819 
820     /* do not allow to shrink below initial brk value */
821     if (brk_val < initial_target_brk) {
822         return target_brk;
823     }
824 
825     new_brk = TARGET_PAGE_ALIGN(brk_val);
826     old_brk = TARGET_PAGE_ALIGN(target_brk);
827 
828     /* new and old target_brk might be on the same page */
829     if (new_brk == old_brk) {
830         target_brk = brk_val;
831         return target_brk;
832     }
833 
834     /* Release heap if necessary */
835     if (new_brk < old_brk) {
836         target_munmap(new_brk, old_brk - new_brk);
837 
838         target_brk = brk_val;
839         return target_brk;
840     }
841 
842     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
843                               PROT_READ | PROT_WRITE,
844                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
845                               -1, 0);
846 
847     if (mapped_addr == old_brk) {
848         target_brk = brk_val;
849         return target_brk;
850     }
851 
852 #if defined(TARGET_ALPHA)
853     /* We (partially) emulate OSF/1 on Alpha, which requires we
854        return a proper errno, not an unchanged brk value.  */
855     return -TARGET_ENOMEM;
856 #endif
857     /* For everything else, return the previous break. */
858     return target_brk;
859 }
860 
861 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
862     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
copy_from_user_fdset(fd_set * fds,abi_ulong target_fds_addr,int n)863 static inline abi_long copy_from_user_fdset(fd_set *fds,
864                                             abi_ulong target_fds_addr,
865                                             int n)
866 {
867     int i, nw, j, k;
868     abi_ulong b, *target_fds;
869 
870     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
871     if (!(target_fds = lock_user(VERIFY_READ,
872                                  target_fds_addr,
873                                  sizeof(abi_ulong) * nw,
874                                  1)))
875         return -TARGET_EFAULT;
876 
877     FD_ZERO(fds);
878     k = 0;
879     for (i = 0; i < nw; i++) {
880         /* grab the abi_ulong */
881         __get_user(b, &target_fds[i]);
882         for (j = 0; j < TARGET_ABI_BITS; j++) {
883             /* check the bit inside the abi_ulong */
884             if ((b >> j) & 1)
885                 FD_SET(k, fds);
886             k++;
887         }
888     }
889 
890     unlock_user(target_fds, target_fds_addr, 0);
891 
892     return 0;
893 }
894 
copy_from_user_fdset_ptr(fd_set * fds,fd_set ** fds_ptr,abi_ulong target_fds_addr,int n)895 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
896                                                  abi_ulong target_fds_addr,
897                                                  int n)
898 {
899     if (target_fds_addr) {
900         if (copy_from_user_fdset(fds, target_fds_addr, n))
901             return -TARGET_EFAULT;
902         *fds_ptr = fds;
903     } else {
904         *fds_ptr = NULL;
905     }
906     return 0;
907 }
908 
copy_to_user_fdset(abi_ulong target_fds_addr,const fd_set * fds,int n)909 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
910                                           const fd_set *fds,
911                                           int n)
912 {
913     int i, nw, j, k;
914     abi_long v;
915     abi_ulong *target_fds;
916 
917     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
918     if (!(target_fds = lock_user(VERIFY_WRITE,
919                                  target_fds_addr,
920                                  sizeof(abi_ulong) * nw,
921                                  0)))
922         return -TARGET_EFAULT;
923 
924     k = 0;
925     for (i = 0; i < nw; i++) {
926         v = 0;
927         for (j = 0; j < TARGET_ABI_BITS; j++) {
928             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
929             k++;
930         }
931         __put_user(v, &target_fds[i]);
932     }
933 
934     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
935 
936     return 0;
937 }
938 #endif
939 
940 #if defined(__alpha__)
941 #define HOST_HZ 1024
942 #else
943 #define HOST_HZ 100
944 #endif
945 
host_to_target_clock_t(long ticks)946 static inline abi_long host_to_target_clock_t(long ticks)
947 {
948 #if HOST_HZ == TARGET_HZ
949     return ticks;
950 #else
951     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
952 #endif
953 }
954 
host_to_target_rusage(abi_ulong target_addr,const struct rusage * rusage)955 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
956                                              const struct rusage *rusage)
957 {
958     struct target_rusage *target_rusage;
959 
960     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
961         return -TARGET_EFAULT;
962     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
963     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
964     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
965     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
966     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
967     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
968     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
969     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
970     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
971     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
972     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
973     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
974     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
975     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
976     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
977     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
978     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
979     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
980     unlock_user_struct(target_rusage, target_addr, 1);
981 
982     return 0;
983 }
984 
985 #ifdef TARGET_NR_setrlimit
target_to_host_rlim(abi_ulong target_rlim)986 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
987 {
988     abi_ulong target_rlim_swap;
989     rlim_t result;
990 
991     target_rlim_swap = tswapal(target_rlim);
992     if (target_rlim_swap == TARGET_RLIM_INFINITY)
993         return RLIM_INFINITY;
994 
995     result = target_rlim_swap;
996     if (target_rlim_swap != (rlim_t)result)
997         return RLIM_INFINITY;
998 
999     return result;
1000 }
1001 #endif
1002 
1003 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
host_to_target_rlim(rlim_t rlim)1004 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1005 {
1006     abi_ulong target_rlim_swap;
1007     abi_ulong result;
1008 
1009     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1010         target_rlim_swap = TARGET_RLIM_INFINITY;
1011     else
1012         target_rlim_swap = rlim;
1013     result = tswapal(target_rlim_swap);
1014 
1015     return result;
1016 }
1017 #endif
1018 
target_to_host_resource(int code)1019 static inline int target_to_host_resource(int code)
1020 {
1021     switch (code) {
1022     case TARGET_RLIMIT_AS:
1023         return RLIMIT_AS;
1024     case TARGET_RLIMIT_CORE:
1025         return RLIMIT_CORE;
1026     case TARGET_RLIMIT_CPU:
1027         return RLIMIT_CPU;
1028     case TARGET_RLIMIT_DATA:
1029         return RLIMIT_DATA;
1030     case TARGET_RLIMIT_FSIZE:
1031         return RLIMIT_FSIZE;
1032     case TARGET_RLIMIT_LOCKS:
1033         return RLIMIT_LOCKS;
1034     case TARGET_RLIMIT_MEMLOCK:
1035         return RLIMIT_MEMLOCK;
1036     case TARGET_RLIMIT_MSGQUEUE:
1037         return RLIMIT_MSGQUEUE;
1038     case TARGET_RLIMIT_NICE:
1039         return RLIMIT_NICE;
1040     case TARGET_RLIMIT_NOFILE:
1041         return RLIMIT_NOFILE;
1042     case TARGET_RLIMIT_NPROC:
1043         return RLIMIT_NPROC;
1044     case TARGET_RLIMIT_RSS:
1045         return RLIMIT_RSS;
1046     case TARGET_RLIMIT_RTPRIO:
1047         return RLIMIT_RTPRIO;
1048 #ifdef RLIMIT_RTTIME
1049     case TARGET_RLIMIT_RTTIME:
1050         return RLIMIT_RTTIME;
1051 #endif
1052     case TARGET_RLIMIT_SIGPENDING:
1053         return RLIMIT_SIGPENDING;
1054     case TARGET_RLIMIT_STACK:
1055         return RLIMIT_STACK;
1056     default:
1057         return code;
1058     }
1059 }
1060 
copy_from_user_timeval(struct timeval * tv,abi_ulong target_tv_addr)1061 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1062                                               abi_ulong target_tv_addr)
1063 {
1064     struct target_timeval *target_tv;
1065 
1066     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1067         return -TARGET_EFAULT;
1068     }
1069 
1070     __get_user(tv->tv_sec, &target_tv->tv_sec);
1071     __get_user(tv->tv_usec, &target_tv->tv_usec);
1072 
1073     unlock_user_struct(target_tv, target_tv_addr, 0);
1074 
1075     return 0;
1076 }
1077 
copy_to_user_timeval(abi_ulong target_tv_addr,const struct timeval * tv)1078 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1079                                             const struct timeval *tv)
1080 {
1081     struct target_timeval *target_tv;
1082 
1083     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1084         return -TARGET_EFAULT;
1085     }
1086 
1087     __put_user(tv->tv_sec, &target_tv->tv_sec);
1088     __put_user(tv->tv_usec, &target_tv->tv_usec);
1089 
1090     unlock_user_struct(target_tv, target_tv_addr, 1);
1091 
1092     return 0;
1093 }
1094 
1095 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
copy_from_user_timeval64(struct timeval * tv,abi_ulong target_tv_addr)1096 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1097                                                 abi_ulong target_tv_addr)
1098 {
1099     struct target__kernel_sock_timeval *target_tv;
1100 
1101     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1102         return -TARGET_EFAULT;
1103     }
1104 
1105     __get_user(tv->tv_sec, &target_tv->tv_sec);
1106     __get_user(tv->tv_usec, &target_tv->tv_usec);
1107 
1108     unlock_user_struct(target_tv, target_tv_addr, 0);
1109 
1110     return 0;
1111 }
1112 #endif
1113 
copy_to_user_timeval64(abi_ulong target_tv_addr,const struct timeval * tv)1114 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1115                                               const struct timeval *tv)
1116 {
1117     struct target__kernel_sock_timeval *target_tv;
1118 
1119     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1120         return -TARGET_EFAULT;
1121     }
1122 
1123     __put_user(tv->tv_sec, &target_tv->tv_sec);
1124     __put_user(tv->tv_usec, &target_tv->tv_usec);
1125 
1126     unlock_user_struct(target_tv, target_tv_addr, 1);
1127 
1128     return 0;
1129 }
1130 
1131 #if defined(TARGET_NR_futex) || \
1132     defined(TARGET_NR_rt_sigtimedwait) || \
1133     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1134     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1135     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1136     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1137     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1138     defined(TARGET_NR_timer_settime) || \
1139     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_timespec(struct timespec * host_ts,abi_ulong target_addr)1140 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1141                                                abi_ulong target_addr)
1142 {
1143     struct target_timespec *target_ts;
1144 
1145     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1146         return -TARGET_EFAULT;
1147     }
1148     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1149     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1150     unlock_user_struct(target_ts, target_addr, 0);
1151     return 0;
1152 }
1153 #endif
1154 
1155 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1156     defined(TARGET_NR_timer_settime64) || \
1157     defined(TARGET_NR_mq_timedsend_time64) || \
1158     defined(TARGET_NR_mq_timedreceive_time64) || \
1159     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1160     defined(TARGET_NR_clock_nanosleep_time64) || \
1161     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1162     defined(TARGET_NR_utimensat) || \
1163     defined(TARGET_NR_utimensat_time64) || \
1164     defined(TARGET_NR_semtimedop_time64) || \
1165     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
target_to_host_timespec64(struct timespec * host_ts,abi_ulong target_addr)1166 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1167                                                  abi_ulong target_addr)
1168 {
1169     struct target__kernel_timespec *target_ts;
1170 
1171     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1172         return -TARGET_EFAULT;
1173     }
1174     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1175     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1176     /* in 32bit mode, this drops the padding */
1177     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1178     unlock_user_struct(target_ts, target_addr, 0);
1179     return 0;
1180 }
1181 #endif
1182 
host_to_target_timespec(abi_ulong target_addr,struct timespec * host_ts)1183 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1184                                                struct timespec *host_ts)
1185 {
1186     struct target_timespec *target_ts;
1187 
1188     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1189         return -TARGET_EFAULT;
1190     }
1191     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1192     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1193     unlock_user_struct(target_ts, target_addr, 1);
1194     return 0;
1195 }
1196 
host_to_target_timespec64(abi_ulong target_addr,struct timespec * host_ts)1197 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1198                                                  struct timespec *host_ts)
1199 {
1200     struct target__kernel_timespec *target_ts;
1201 
1202     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1203         return -TARGET_EFAULT;
1204     }
1205     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1206     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1207     unlock_user_struct(target_ts, target_addr, 1);
1208     return 0;
1209 }
1210 
1211 #if defined(TARGET_NR_gettimeofday)
copy_to_user_timezone(abi_ulong target_tz_addr,struct timezone * tz)1212 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1213                                              struct timezone *tz)
1214 {
1215     struct target_timezone *target_tz;
1216 
1217     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1218         return -TARGET_EFAULT;
1219     }
1220 
1221     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1222     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1223 
1224     unlock_user_struct(target_tz, target_tz_addr, 1);
1225 
1226     return 0;
1227 }
1228 #endif
1229 
1230 #if defined(TARGET_NR_settimeofday)
copy_from_user_timezone(struct timezone * tz,abi_ulong target_tz_addr)1231 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1232                                                abi_ulong target_tz_addr)
1233 {
1234     struct target_timezone *target_tz;
1235 
1236     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1237         return -TARGET_EFAULT;
1238     }
1239 
1240     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1241     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1242 
1243     unlock_user_struct(target_tz, target_tz_addr, 0);
1244 
1245     return 0;
1246 }
1247 #endif
1248 
1249 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1250 #include <mqueue.h>
1251 
copy_from_user_mq_attr(struct mq_attr * attr,abi_ulong target_mq_attr_addr)1252 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1253                                               abi_ulong target_mq_attr_addr)
1254 {
1255     struct target_mq_attr *target_mq_attr;
1256 
1257     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1258                           target_mq_attr_addr, 1))
1259         return -TARGET_EFAULT;
1260 
1261     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1262     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1263     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1264     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1265 
1266     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1267 
1268     return 0;
1269 }
1270 
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,const struct mq_attr * attr)1271 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1272                                             const struct mq_attr *attr)
1273 {
1274     struct target_mq_attr *target_mq_attr;
1275 
1276     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1277                           target_mq_attr_addr, 0))
1278         return -TARGET_EFAULT;
1279 
1280     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1281     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1282     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1283     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1284 
1285     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1286 
1287     return 0;
1288 }
1289 #endif
1290 
1291 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1292 /* do_select() must return target values and target errnos. */
do_select(int n,abi_ulong rfd_addr,abi_ulong wfd_addr,abi_ulong efd_addr,abi_ulong target_tv_addr)1293 static abi_long do_select(int n,
1294                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1295                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1296 {
1297     fd_set rfds, wfds, efds;
1298     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1299     struct timeval tv;
1300     struct timespec ts, *ts_ptr;
1301     abi_long ret;
1302 
1303     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1304     if (ret) {
1305         return ret;
1306     }
1307     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1308     if (ret) {
1309         return ret;
1310     }
1311     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1312     if (ret) {
1313         return ret;
1314     }
1315 
1316     if (target_tv_addr) {
1317         if (copy_from_user_timeval(&tv, target_tv_addr))
1318             return -TARGET_EFAULT;
1319         ts.tv_sec = tv.tv_sec;
1320         ts.tv_nsec = tv.tv_usec * 1000;
1321         ts_ptr = &ts;
1322     } else {
1323         ts_ptr = NULL;
1324     }
1325 
1326     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1327                                   ts_ptr, NULL));
1328 
1329     if (!is_error(ret)) {
1330         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1331             return -TARGET_EFAULT;
1332         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1333             return -TARGET_EFAULT;
1334         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1335             return -TARGET_EFAULT;
1336 
1337         if (target_tv_addr) {
1338             tv.tv_sec = ts.tv_sec;
1339             tv.tv_usec = ts.tv_nsec / 1000;
1340             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1341                 return -TARGET_EFAULT;
1342             }
1343         }
1344     }
1345 
1346     return ret;
1347 }
1348 
1349 #if defined(TARGET_WANT_OLD_SYS_SELECT)
do_old_select(abi_ulong arg1)1350 static abi_long do_old_select(abi_ulong arg1)
1351 {
1352     struct target_sel_arg_struct *sel;
1353     abi_ulong inp, outp, exp, tvp;
1354     long nsel;
1355 
1356     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1357         return -TARGET_EFAULT;
1358     }
1359 
1360     nsel = tswapal(sel->n);
1361     inp = tswapal(sel->inp);
1362     outp = tswapal(sel->outp);
1363     exp = tswapal(sel->exp);
1364     tvp = tswapal(sel->tvp);
1365 
1366     unlock_user_struct(sel, arg1, 0);
1367 
1368     return do_select(nsel, inp, outp, exp, tvp);
1369 }
1370 #endif
1371 #endif
1372 
1373 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
do_pselect6(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,bool time64)1374 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1375                             abi_long arg4, abi_long arg5, abi_long arg6,
1376                             bool time64)
1377 {
1378     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1379     fd_set rfds, wfds, efds;
1380     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1381     struct timespec ts, *ts_ptr;
1382     abi_long ret;
1383 
1384     /*
1385      * The 6th arg is actually two args smashed together,
1386      * so we cannot use the C library.
1387      */
1388     struct {
1389         sigset_t *set;
1390         size_t size;
1391     } sig, *sig_ptr;
1392 
1393     abi_ulong arg_sigset, arg_sigsize, *arg7;
1394 
1395     n = arg1;
1396     rfd_addr = arg2;
1397     wfd_addr = arg3;
1398     efd_addr = arg4;
1399     ts_addr = arg5;
1400 
1401     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1402     if (ret) {
1403         return ret;
1404     }
1405     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1406     if (ret) {
1407         return ret;
1408     }
1409     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1410     if (ret) {
1411         return ret;
1412     }
1413 
1414     /*
1415      * This takes a timespec, and not a timeval, so we cannot
1416      * use the do_select() helper ...
1417      */
1418     if (ts_addr) {
1419         if (time64) {
1420             if (target_to_host_timespec64(&ts, ts_addr)) {
1421                 return -TARGET_EFAULT;
1422             }
1423         } else {
1424             if (target_to_host_timespec(&ts, ts_addr)) {
1425                 return -TARGET_EFAULT;
1426             }
1427         }
1428             ts_ptr = &ts;
1429     } else {
1430         ts_ptr = NULL;
1431     }
1432 
1433     /* Extract the two packed args for the sigset */
1434     sig_ptr = NULL;
1435     if (arg6) {
1436         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1437         if (!arg7) {
1438             return -TARGET_EFAULT;
1439         }
1440         arg_sigset = tswapal(arg7[0]);
1441         arg_sigsize = tswapal(arg7[1]);
1442         unlock_user(arg7, arg6, 0);
1443 
1444         if (arg_sigset) {
1445             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1446             if (ret != 0) {
1447                 return ret;
1448             }
1449             sig_ptr = &sig;
1450             sig.size = SIGSET_T_SIZE;
1451         }
1452     }
1453 
1454     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1455                                   ts_ptr, sig_ptr));
1456 
1457     if (sig_ptr) {
1458         finish_sigsuspend_mask(ret);
1459     }
1460 
1461     if (!is_error(ret)) {
1462         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1463             return -TARGET_EFAULT;
1464         }
1465         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1466             return -TARGET_EFAULT;
1467         }
1468         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1469             return -TARGET_EFAULT;
1470         }
1471         if (time64) {
1472             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1473                 return -TARGET_EFAULT;
1474             }
1475         } else {
1476             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1477                 return -TARGET_EFAULT;
1478             }
1479         }
1480     }
1481     return ret;
1482 }
1483 #endif
1484 
1485 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1486     defined(TARGET_NR_ppoll_time64)
do_ppoll(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,bool ppoll,bool time64)1487 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1488                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1489 {
1490     struct target_pollfd *target_pfd;
1491     unsigned int nfds = arg2;
1492     struct pollfd *pfd;
1493     unsigned int i;
1494     abi_long ret;
1495 
1496     pfd = NULL;
1497     target_pfd = NULL;
1498     if (nfds) {
1499         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1500             return -TARGET_EINVAL;
1501         }
1502         target_pfd = lock_user(VERIFY_WRITE, arg1,
1503                                sizeof(struct target_pollfd) * nfds, 1);
1504         if (!target_pfd) {
1505             return -TARGET_EFAULT;
1506         }
1507 
1508         pfd = alloca(sizeof(struct pollfd) * nfds);
1509         for (i = 0; i < nfds; i++) {
1510             pfd[i].fd = tswap32(target_pfd[i].fd);
1511             pfd[i].events = tswap16(target_pfd[i].events);
1512         }
1513     }
1514     if (ppoll) {
1515         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1516         sigset_t *set = NULL;
1517 
1518         if (arg3) {
1519             if (time64) {
1520                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1521                     unlock_user(target_pfd, arg1, 0);
1522                     return -TARGET_EFAULT;
1523                 }
1524             } else {
1525                 if (target_to_host_timespec(timeout_ts, arg3)) {
1526                     unlock_user(target_pfd, arg1, 0);
1527                     return -TARGET_EFAULT;
1528                 }
1529             }
1530         } else {
1531             timeout_ts = NULL;
1532         }
1533 
1534         if (arg4) {
1535             ret = process_sigsuspend_mask(&set, arg4, arg5);
1536             if (ret != 0) {
1537                 unlock_user(target_pfd, arg1, 0);
1538                 return ret;
1539             }
1540         }
1541 
1542         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1543                                    set, SIGSET_T_SIZE));
1544 
1545         if (set) {
1546             finish_sigsuspend_mask(ret);
1547         }
1548         if (!is_error(ret) && arg3) {
1549             if (time64) {
1550                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1551                     return -TARGET_EFAULT;
1552                 }
1553             } else {
1554                 if (host_to_target_timespec(arg3, timeout_ts)) {
1555                     return -TARGET_EFAULT;
1556                 }
1557             }
1558         }
1559     } else {
1560           struct timespec ts, *pts;
1561 
1562           if (arg3 >= 0) {
1563               /* Convert ms to secs, ns */
1564               ts.tv_sec = arg3 / 1000;
1565               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1566               pts = &ts;
1567           } else {
1568               /* -ve poll() timeout means "infinite" */
1569               pts = NULL;
1570           }
1571           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1572     }
1573 
1574     if (!is_error(ret)) {
1575         for (i = 0; i < nfds; i++) {
1576             target_pfd[i].revents = tswap16(pfd[i].revents);
1577         }
1578     }
1579     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1580     return ret;
1581 }
1582 #endif
1583 
do_pipe(CPUArchState * cpu_env,abi_ulong pipedes,int flags,int is_pipe2)1584 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1585                         int flags, int is_pipe2)
1586 {
1587     int host_pipe[2];
1588     abi_long ret;
1589     ret = pipe2(host_pipe, flags);
1590 
1591     if (is_error(ret))
1592         return get_errno(ret);
1593 
1594     /* Several targets have special calling conventions for the original
1595        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1596     if (!is_pipe2) {
1597 #if defined(TARGET_ALPHA)
1598         cpu_env->ir[IR_A4] = host_pipe[1];
1599         return host_pipe[0];
1600 #elif defined(TARGET_MIPS)
1601         cpu_env->active_tc.gpr[3] = host_pipe[1];
1602         return host_pipe[0];
1603 #elif defined(TARGET_SH4)
1604         cpu_env->gregs[1] = host_pipe[1];
1605         return host_pipe[0];
1606 #elif defined(TARGET_SPARC)
1607         cpu_env->regwptr[1] = host_pipe[1];
1608         return host_pipe[0];
1609 #endif
1610     }
1611 
1612     if (put_user_s32(host_pipe[0], pipedes)
1613         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1614         return -TARGET_EFAULT;
1615     return get_errno(ret);
1616 }
1617 
target_to_host_sockaddr(int fd,struct sockaddr * addr,abi_ulong target_addr,socklen_t len)1618 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1619                                                abi_ulong target_addr,
1620                                                socklen_t len)
1621 {
1622     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1623     sa_family_t sa_family;
1624     struct target_sockaddr *target_saddr;
1625 
1626     if (fd_trans_target_to_host_addr(fd)) {
1627         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1628     }
1629 
1630     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1631     if (!target_saddr)
1632         return -TARGET_EFAULT;
1633 
1634     sa_family = tswap16(target_saddr->sa_family);
1635 
1636     /* Oops. The caller might send a incomplete sun_path; sun_path
1637      * must be terminated by \0 (see the manual page), but
1638      * unfortunately it is quite common to specify sockaddr_un
1639      * length as "strlen(x->sun_path)" while it should be
1640      * "strlen(...) + 1". We'll fix that here if needed.
1641      * Linux kernel has a similar feature.
1642      */
1643 
1644     if (sa_family == AF_UNIX) {
1645         if (len < unix_maxlen && len > 0) {
1646             char *cp = (char*)target_saddr;
1647 
1648             if ( cp[len-1] && !cp[len] )
1649                 len++;
1650         }
1651         if (len > unix_maxlen)
1652             len = unix_maxlen;
1653     }
1654 
1655     memcpy(addr, target_saddr, len);
1656     addr->sa_family = sa_family;
1657     if (sa_family == AF_NETLINK) {
1658         struct sockaddr_nl *nladdr;
1659 
1660         nladdr = (struct sockaddr_nl *)addr;
1661         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1662         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1663     } else if (sa_family == AF_PACKET) {
1664 	struct target_sockaddr_ll *lladdr;
1665 
1666 	lladdr = (struct target_sockaddr_ll *)addr;
1667 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1668 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1669     } else if (sa_family == AF_INET6) {
1670         struct sockaddr_in6 *in6addr;
1671 
1672         in6addr = (struct sockaddr_in6 *)addr;
1673         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1674     }
1675     unlock_user(target_saddr, target_addr, 0);
1676 
1677     return 0;
1678 }
1679 
host_to_target_sockaddr(abi_ulong target_addr,struct sockaddr * addr,socklen_t len)1680 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1681                                                struct sockaddr *addr,
1682                                                socklen_t len)
1683 {
1684     struct target_sockaddr *target_saddr;
1685 
1686     if (len == 0) {
1687         return 0;
1688     }
1689     assert(addr);
1690 
1691     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1692     if (!target_saddr)
1693         return -TARGET_EFAULT;
1694     memcpy(target_saddr, addr, len);
1695     if (len >= offsetof(struct target_sockaddr, sa_family) +
1696         sizeof(target_saddr->sa_family)) {
1697         target_saddr->sa_family = tswap16(addr->sa_family);
1698     }
1699     if (addr->sa_family == AF_NETLINK &&
1700         len >= sizeof(struct target_sockaddr_nl)) {
1701         struct target_sockaddr_nl *target_nl =
1702                (struct target_sockaddr_nl *)target_saddr;
1703         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1704         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1705     } else if (addr->sa_family == AF_PACKET) {
1706         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1707         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1708         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1709     } else if (addr->sa_family == AF_INET6 &&
1710                len >= sizeof(struct target_sockaddr_in6)) {
1711         struct target_sockaddr_in6 *target_in6 =
1712                (struct target_sockaddr_in6 *)target_saddr;
1713         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1714     }
1715     unlock_user(target_saddr, target_addr, len);
1716 
1717     return 0;
1718 }
1719 
target_to_host_cmsg(struct msghdr * msgh,struct target_msghdr * target_msgh)1720 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1721                                            struct target_msghdr *target_msgh)
1722 {
1723     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1724     abi_long msg_controllen;
1725     abi_ulong target_cmsg_addr;
1726     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1727     socklen_t space = 0;
1728 
1729     msg_controllen = tswapal(target_msgh->msg_controllen);
1730     if (msg_controllen < sizeof (struct target_cmsghdr))
1731         goto the_end;
1732     target_cmsg_addr = tswapal(target_msgh->msg_control);
1733     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1734     target_cmsg_start = target_cmsg;
1735     if (!target_cmsg)
1736         return -TARGET_EFAULT;
1737 
1738     while (cmsg && target_cmsg) {
1739         void *data = CMSG_DATA(cmsg);
1740         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1741 
1742         int len = tswapal(target_cmsg->cmsg_len)
1743             - sizeof(struct target_cmsghdr);
1744 
1745         space += CMSG_SPACE(len);
1746         if (space > msgh->msg_controllen) {
1747             space -= CMSG_SPACE(len);
1748             /* This is a QEMU bug, since we allocated the payload
1749              * area ourselves (unlike overflow in host-to-target
1750              * conversion, which is just the guest giving us a buffer
1751              * that's too small). It can't happen for the payload types
1752              * we currently support; if it becomes an issue in future
1753              * we would need to improve our allocation strategy to
1754              * something more intelligent than "twice the size of the
1755              * target buffer we're reading from".
1756              */
1757             qemu_log_mask(LOG_UNIMP,
1758                           ("Unsupported ancillary data %d/%d: "
1759                            "unhandled msg size\n"),
1760                           tswap32(target_cmsg->cmsg_level),
1761                           tswap32(target_cmsg->cmsg_type));
1762             break;
1763         }
1764 
1765         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1766             cmsg->cmsg_level = SOL_SOCKET;
1767         } else {
1768             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1769         }
1770         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1771         cmsg->cmsg_len = CMSG_LEN(len);
1772 
1773         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1774             int *fd = (int *)data;
1775             int *target_fd = (int *)target_data;
1776             int i, numfds = len / sizeof(int);
1777 
1778             for (i = 0; i < numfds; i++) {
1779                 __get_user(fd[i], target_fd + i);
1780             }
1781         } else if (cmsg->cmsg_level == SOL_SOCKET
1782                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1783             struct ucred *cred = (struct ucred *)data;
1784             struct target_ucred *target_cred =
1785                 (struct target_ucred *)target_data;
1786 
1787             __get_user(cred->pid, &target_cred->pid);
1788             __get_user(cred->uid, &target_cred->uid);
1789             __get_user(cred->gid, &target_cred->gid);
1790         } else if (cmsg->cmsg_level == SOL_ALG) {
1791             uint32_t *dst = (uint32_t *)data;
1792 
1793             memcpy(dst, target_data, len);
1794             /* fix endianness of first 32-bit word */
1795             if (len >= sizeof(uint32_t)) {
1796                 *dst = tswap32(*dst);
1797             }
1798         } else {
1799             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1800                           cmsg->cmsg_level, cmsg->cmsg_type);
1801             memcpy(data, target_data, len);
1802         }
1803 
1804         cmsg = CMSG_NXTHDR(msgh, cmsg);
1805         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1806                                          target_cmsg_start);
1807     }
1808     unlock_user(target_cmsg, target_cmsg_addr, 0);
1809  the_end:
1810     msgh->msg_controllen = space;
1811     return 0;
1812 }
1813 
host_to_target_cmsg(struct target_msghdr * target_msgh,struct msghdr * msgh)1814 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1815                                            struct msghdr *msgh)
1816 {
1817     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1818     abi_long msg_controllen;
1819     abi_ulong target_cmsg_addr;
1820     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1821     socklen_t space = 0;
1822 
1823     msg_controllen = tswapal(target_msgh->msg_controllen);
1824     if (msg_controllen < sizeof (struct target_cmsghdr))
1825         goto the_end;
1826     target_cmsg_addr = tswapal(target_msgh->msg_control);
1827     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1828     target_cmsg_start = target_cmsg;
1829     if (!target_cmsg)
1830         return -TARGET_EFAULT;
1831 
1832     while (cmsg && target_cmsg) {
1833         void *data = CMSG_DATA(cmsg);
1834         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1835 
1836         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1837         int tgt_len, tgt_space;
1838 
1839         /* We never copy a half-header but may copy half-data;
1840          * this is Linux's behaviour in put_cmsg(). Note that
1841          * truncation here is a guest problem (which we report
1842          * to the guest via the CTRUNC bit), unlike truncation
1843          * in target_to_host_cmsg, which is a QEMU bug.
1844          */
1845         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1846             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1847             break;
1848         }
1849 
1850         if (cmsg->cmsg_level == SOL_SOCKET) {
1851             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1852         } else {
1853             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1854         }
1855         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1856 
1857         /* Payload types which need a different size of payload on
1858          * the target must adjust tgt_len here.
1859          */
1860         tgt_len = len;
1861         switch (cmsg->cmsg_level) {
1862         case SOL_SOCKET:
1863             switch (cmsg->cmsg_type) {
1864             case SO_TIMESTAMP:
1865                 tgt_len = sizeof(struct target_timeval);
1866                 break;
1867             default:
1868                 break;
1869             }
1870             break;
1871         default:
1872             break;
1873         }
1874 
1875         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1876             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1877             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1878         }
1879 
1880         /* We must now copy-and-convert len bytes of payload
1881          * into tgt_len bytes of destination space. Bear in mind
1882          * that in both source and destination we may be dealing
1883          * with a truncated value!
1884          */
1885         switch (cmsg->cmsg_level) {
1886         case SOL_SOCKET:
1887             switch (cmsg->cmsg_type) {
1888             case SCM_RIGHTS:
1889             {
1890                 int *fd = (int *)data;
1891                 int *target_fd = (int *)target_data;
1892                 int i, numfds = tgt_len / sizeof(int);
1893 
1894                 for (i = 0; i < numfds; i++) {
1895                     __put_user(fd[i], target_fd + i);
1896                 }
1897                 break;
1898             }
1899             case SO_TIMESTAMP:
1900             {
1901                 struct timeval *tv = (struct timeval *)data;
1902                 struct target_timeval *target_tv =
1903                     (struct target_timeval *)target_data;
1904 
1905                 if (len != sizeof(struct timeval) ||
1906                     tgt_len != sizeof(struct target_timeval)) {
1907                     goto unimplemented;
1908                 }
1909 
1910                 /* copy struct timeval to target */
1911                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1912                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1913                 break;
1914             }
1915             case SCM_CREDENTIALS:
1916             {
1917                 struct ucred *cred = (struct ucred *)data;
1918                 struct target_ucred *target_cred =
1919                     (struct target_ucred *)target_data;
1920 
1921                 __put_user(cred->pid, &target_cred->pid);
1922                 __put_user(cred->uid, &target_cred->uid);
1923                 __put_user(cred->gid, &target_cred->gid);
1924                 break;
1925             }
1926             default:
1927                 goto unimplemented;
1928             }
1929             break;
1930 
1931         case SOL_IP:
1932             switch (cmsg->cmsg_type) {
1933             case IP_TTL:
1934             {
1935                 uint32_t *v = (uint32_t *)data;
1936                 uint32_t *t_int = (uint32_t *)target_data;
1937 
1938                 if (len != sizeof(uint32_t) ||
1939                     tgt_len != sizeof(uint32_t)) {
1940                     goto unimplemented;
1941                 }
1942                 __put_user(*v, t_int);
1943                 break;
1944             }
1945             case IP_RECVERR:
1946             {
1947                 struct errhdr_t {
1948                    struct sock_extended_err ee;
1949                    struct sockaddr_in offender;
1950                 };
1951                 struct errhdr_t *errh = (struct errhdr_t *)data;
1952                 struct errhdr_t *target_errh =
1953                     (struct errhdr_t *)target_data;
1954 
1955                 if (len != sizeof(struct errhdr_t) ||
1956                     tgt_len != sizeof(struct errhdr_t)) {
1957                     goto unimplemented;
1958                 }
1959                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1960                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1961                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1962                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1963                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1964                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1965                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1966                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1967                     (void *) &errh->offender, sizeof(errh->offender));
1968                 break;
1969             }
1970             default:
1971                 goto unimplemented;
1972             }
1973             break;
1974 
1975         case SOL_IPV6:
1976             switch (cmsg->cmsg_type) {
1977             case IPV6_HOPLIMIT:
1978             {
1979                 uint32_t *v = (uint32_t *)data;
1980                 uint32_t *t_int = (uint32_t *)target_data;
1981 
1982                 if (len != sizeof(uint32_t) ||
1983                     tgt_len != sizeof(uint32_t)) {
1984                     goto unimplemented;
1985                 }
1986                 __put_user(*v, t_int);
1987                 break;
1988             }
1989             case IPV6_RECVERR:
1990             {
1991                 struct errhdr6_t {
1992                    struct sock_extended_err ee;
1993                    struct sockaddr_in6 offender;
1994                 };
1995                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1996                 struct errhdr6_t *target_errh =
1997                     (struct errhdr6_t *)target_data;
1998 
1999                 if (len != sizeof(struct errhdr6_t) ||
2000                     tgt_len != sizeof(struct errhdr6_t)) {
2001                     goto unimplemented;
2002                 }
2003                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2004                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2005                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2006                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2007                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2008                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2009                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2010                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2011                     (void *) &errh->offender, sizeof(errh->offender));
2012                 break;
2013             }
2014             default:
2015                 goto unimplemented;
2016             }
2017             break;
2018 
2019         default:
2020         unimplemented:
2021             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2022                           cmsg->cmsg_level, cmsg->cmsg_type);
2023             memcpy(target_data, data, MIN(len, tgt_len));
2024             if (tgt_len > len) {
2025                 memset(target_data + len, 0, tgt_len - len);
2026             }
2027         }
2028 
2029         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2030         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2031         if (msg_controllen < tgt_space) {
2032             tgt_space = msg_controllen;
2033         }
2034         msg_controllen -= tgt_space;
2035         space += tgt_space;
2036         cmsg = CMSG_NXTHDR(msgh, cmsg);
2037         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2038                                          target_cmsg_start);
2039     }
2040     unlock_user(target_cmsg, target_cmsg_addr, space);
2041  the_end:
2042     target_msgh->msg_controllen = tswapal(space);
2043     return 0;
2044 }
2045 
2046 /* do_setsockopt() Must return target values and target errnos. */
do_setsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,socklen_t optlen)2047 static abi_long do_setsockopt(int sockfd, int level, int optname,
2048                               abi_ulong optval_addr, socklen_t optlen)
2049 {
2050     abi_long ret;
2051     int val;
2052 
2053     switch(level) {
2054     case SOL_TCP:
2055     case SOL_UDP:
2056         /* TCP and UDP options all take an 'int' value.  */
2057         if (optlen < sizeof(uint32_t))
2058             return -TARGET_EINVAL;
2059 
2060         if (get_user_u32(val, optval_addr))
2061             return -TARGET_EFAULT;
2062         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2063         break;
2064     case SOL_IP:
2065         switch(optname) {
2066         case IP_TOS:
2067         case IP_TTL:
2068         case IP_HDRINCL:
2069         case IP_ROUTER_ALERT:
2070         case IP_RECVOPTS:
2071         case IP_RETOPTS:
2072         case IP_PKTINFO:
2073         case IP_MTU_DISCOVER:
2074         case IP_RECVERR:
2075         case IP_RECVTTL:
2076         case IP_RECVTOS:
2077 #ifdef IP_FREEBIND
2078         case IP_FREEBIND:
2079 #endif
2080         case IP_MULTICAST_TTL:
2081         case IP_MULTICAST_LOOP:
2082             val = 0;
2083             if (optlen >= sizeof(uint32_t)) {
2084                 if (get_user_u32(val, optval_addr))
2085                     return -TARGET_EFAULT;
2086             } else if (optlen >= 1) {
2087                 if (get_user_u8(val, optval_addr))
2088                     return -TARGET_EFAULT;
2089             }
2090             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2091             break;
2092         case IP_ADD_MEMBERSHIP:
2093         case IP_DROP_MEMBERSHIP:
2094         {
2095             struct ip_mreqn ip_mreq;
2096             struct target_ip_mreqn *target_smreqn;
2097 
2098             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2099                               sizeof(struct target_ip_mreq));
2100 
2101             if (optlen < sizeof (struct target_ip_mreq) ||
2102                 optlen > sizeof (struct target_ip_mreqn)) {
2103                 return -TARGET_EINVAL;
2104             }
2105 
2106             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2107             if (!target_smreqn) {
2108                 return -TARGET_EFAULT;
2109             }
2110             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2111             ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2112             if (optlen == sizeof(struct target_ip_mreqn)) {
2113                 ip_mreq.imr_ifindex = tswapal(target_smreqn->imr_ifindex);
2114                 optlen = sizeof(struct ip_mreqn);
2115             }
2116             unlock_user(target_smreqn, optval_addr, 0);
2117 
2118             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2119             break;
2120         }
2121         case IP_BLOCK_SOURCE:
2122         case IP_UNBLOCK_SOURCE:
2123         case IP_ADD_SOURCE_MEMBERSHIP:
2124         case IP_DROP_SOURCE_MEMBERSHIP:
2125         {
2126             struct ip_mreq_source *ip_mreq_source;
2127 
2128             if (optlen != sizeof (struct target_ip_mreq_source))
2129                 return -TARGET_EINVAL;
2130 
2131             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2132             if (!ip_mreq_source) {
2133                 return -TARGET_EFAULT;
2134             }
2135             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2136             unlock_user (ip_mreq_source, optval_addr, 0);
2137             break;
2138         }
2139         default:
2140             goto unimplemented;
2141         }
2142         break;
2143     case SOL_IPV6:
2144         switch (optname) {
2145         case IPV6_MTU_DISCOVER:
2146         case IPV6_MTU:
2147         case IPV6_V6ONLY:
2148         case IPV6_RECVPKTINFO:
2149         case IPV6_UNICAST_HOPS:
2150         case IPV6_MULTICAST_HOPS:
2151         case IPV6_MULTICAST_LOOP:
2152         case IPV6_RECVERR:
2153         case IPV6_RECVHOPLIMIT:
2154         case IPV6_2292HOPLIMIT:
2155         case IPV6_CHECKSUM:
2156         case IPV6_ADDRFORM:
2157         case IPV6_2292PKTINFO:
2158         case IPV6_RECVTCLASS:
2159         case IPV6_RECVRTHDR:
2160         case IPV6_2292RTHDR:
2161         case IPV6_RECVHOPOPTS:
2162         case IPV6_2292HOPOPTS:
2163         case IPV6_RECVDSTOPTS:
2164         case IPV6_2292DSTOPTS:
2165         case IPV6_TCLASS:
2166         case IPV6_ADDR_PREFERENCES:
2167 #ifdef IPV6_RECVPATHMTU
2168         case IPV6_RECVPATHMTU:
2169 #endif
2170 #ifdef IPV6_TRANSPARENT
2171         case IPV6_TRANSPARENT:
2172 #endif
2173 #ifdef IPV6_FREEBIND
2174         case IPV6_FREEBIND:
2175 #endif
2176 #ifdef IPV6_RECVORIGDSTADDR
2177         case IPV6_RECVORIGDSTADDR:
2178 #endif
2179             val = 0;
2180             if (optlen < sizeof(uint32_t)) {
2181                 return -TARGET_EINVAL;
2182             }
2183             if (get_user_u32(val, optval_addr)) {
2184                 return -TARGET_EFAULT;
2185             }
2186             ret = get_errno(setsockopt(sockfd, level, optname,
2187                                        &val, sizeof(val)));
2188             break;
2189         case IPV6_PKTINFO:
2190         {
2191             struct in6_pktinfo pki;
2192 
2193             if (optlen < sizeof(pki)) {
2194                 return -TARGET_EINVAL;
2195             }
2196 
2197             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2198                 return -TARGET_EFAULT;
2199             }
2200 
2201             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2202 
2203             ret = get_errno(setsockopt(sockfd, level, optname,
2204                                        &pki, sizeof(pki)));
2205             break;
2206         }
2207         case IPV6_ADD_MEMBERSHIP:
2208         case IPV6_DROP_MEMBERSHIP:
2209         {
2210             struct ipv6_mreq ipv6mreq;
2211 
2212             if (optlen < sizeof(ipv6mreq)) {
2213                 return -TARGET_EINVAL;
2214             }
2215 
2216             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2217                 return -TARGET_EFAULT;
2218             }
2219 
2220             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2221 
2222             ret = get_errno(setsockopt(sockfd, level, optname,
2223                                        &ipv6mreq, sizeof(ipv6mreq)));
2224             break;
2225         }
2226         default:
2227             goto unimplemented;
2228         }
2229         break;
2230     case SOL_ICMPV6:
2231         switch (optname) {
2232         case ICMPV6_FILTER:
2233         {
2234             struct icmp6_filter icmp6f;
2235 
2236             if (optlen > sizeof(icmp6f)) {
2237                 optlen = sizeof(icmp6f);
2238             }
2239 
2240             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2241                 return -TARGET_EFAULT;
2242             }
2243 
2244             for (val = 0; val < 8; val++) {
2245                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2246             }
2247 
2248             ret = get_errno(setsockopt(sockfd, level, optname,
2249                                        &icmp6f, optlen));
2250             break;
2251         }
2252         default:
2253             goto unimplemented;
2254         }
2255         break;
2256     case SOL_RAW:
2257         switch (optname) {
2258         case ICMP_FILTER:
2259         case IPV6_CHECKSUM:
2260             /* those take an u32 value */
2261             if (optlen < sizeof(uint32_t)) {
2262                 return -TARGET_EINVAL;
2263             }
2264 
2265             if (get_user_u32(val, optval_addr)) {
2266                 return -TARGET_EFAULT;
2267             }
2268             ret = get_errno(setsockopt(sockfd, level, optname,
2269                                        &val, sizeof(val)));
2270             break;
2271 
2272         default:
2273             goto unimplemented;
2274         }
2275         break;
2276 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2277     case SOL_ALG:
2278         switch (optname) {
2279         case ALG_SET_KEY:
2280         {
2281             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2282             if (!alg_key) {
2283                 return -TARGET_EFAULT;
2284             }
2285             ret = get_errno(setsockopt(sockfd, level, optname,
2286                                        alg_key, optlen));
2287             unlock_user(alg_key, optval_addr, optlen);
2288             break;
2289         }
2290         case ALG_SET_AEAD_AUTHSIZE:
2291         {
2292             ret = get_errno(setsockopt(sockfd, level, optname,
2293                                        NULL, optlen));
2294             break;
2295         }
2296         default:
2297             goto unimplemented;
2298         }
2299         break;
2300 #endif
2301     case TARGET_SOL_SOCKET:
2302         switch (optname) {
2303         case TARGET_SO_RCVTIMEO:
2304         case TARGET_SO_SNDTIMEO:
2305         {
2306                 struct timeval tv;
2307 
2308                 if (optlen != sizeof(struct target_timeval)) {
2309                     return -TARGET_EINVAL;
2310                 }
2311 
2312                 if (copy_from_user_timeval(&tv, optval_addr)) {
2313                     return -TARGET_EFAULT;
2314                 }
2315 
2316                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2317                                 optname == TARGET_SO_RCVTIMEO ?
2318                                     SO_RCVTIMEO : SO_SNDTIMEO,
2319                                 &tv, sizeof(tv)));
2320                 return ret;
2321         }
2322         case TARGET_SO_ATTACH_FILTER:
2323         {
2324                 struct target_sock_fprog *tfprog;
2325                 struct target_sock_filter *tfilter;
2326                 struct sock_fprog fprog;
2327                 struct sock_filter *filter;
2328                 int i;
2329 
2330                 if (optlen != sizeof(*tfprog)) {
2331                     return -TARGET_EINVAL;
2332                 }
2333                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2334                     return -TARGET_EFAULT;
2335                 }
2336                 if (!lock_user_struct(VERIFY_READ, tfilter,
2337                                       tswapal(tfprog->filter), 0)) {
2338                     unlock_user_struct(tfprog, optval_addr, 1);
2339                     return -TARGET_EFAULT;
2340                 }
2341 
2342                 fprog.len = tswap16(tfprog->len);
2343                 filter = g_try_new(struct sock_filter, fprog.len);
2344                 if (filter == NULL) {
2345                     unlock_user_struct(tfilter, tfprog->filter, 1);
2346                     unlock_user_struct(tfprog, optval_addr, 1);
2347                     return -TARGET_ENOMEM;
2348                 }
2349                 for (i = 0; i < fprog.len; i++) {
2350                     filter[i].code = tswap16(tfilter[i].code);
2351                     filter[i].jt = tfilter[i].jt;
2352                     filter[i].jf = tfilter[i].jf;
2353                     filter[i].k = tswap32(tfilter[i].k);
2354                 }
2355                 fprog.filter = filter;
2356 
2357                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2358                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2359                 g_free(filter);
2360 
2361                 unlock_user_struct(tfilter, tfprog->filter, 1);
2362                 unlock_user_struct(tfprog, optval_addr, 1);
2363                 return ret;
2364         }
2365 	case TARGET_SO_BINDTODEVICE:
2366 	{
2367 		char *dev_ifname, *addr_ifname;
2368 
2369 		if (optlen > IFNAMSIZ - 1) {
2370 		    optlen = IFNAMSIZ - 1;
2371 		}
2372 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2373 		if (!dev_ifname) {
2374 		    return -TARGET_EFAULT;
2375 		}
2376 		optname = SO_BINDTODEVICE;
2377 		addr_ifname = alloca(IFNAMSIZ);
2378 		memcpy(addr_ifname, dev_ifname, optlen);
2379 		addr_ifname[optlen] = 0;
2380 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2381                                            addr_ifname, optlen));
2382 		unlock_user (dev_ifname, optval_addr, 0);
2383 		return ret;
2384 	}
2385         case TARGET_SO_LINGER:
2386         {
2387                 struct linger lg;
2388                 struct target_linger *tlg;
2389 
2390                 if (optlen != sizeof(struct target_linger)) {
2391                     return -TARGET_EINVAL;
2392                 }
2393                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2394                     return -TARGET_EFAULT;
2395                 }
2396                 __get_user(lg.l_onoff, &tlg->l_onoff);
2397                 __get_user(lg.l_linger, &tlg->l_linger);
2398                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2399                                 &lg, sizeof(lg)));
2400                 unlock_user_struct(tlg, optval_addr, 0);
2401                 return ret;
2402         }
2403             /* Options with 'int' argument.  */
2404         case TARGET_SO_DEBUG:
2405 		optname = SO_DEBUG;
2406 		break;
2407         case TARGET_SO_REUSEADDR:
2408 		optname = SO_REUSEADDR;
2409 		break;
2410 #ifdef SO_REUSEPORT
2411         case TARGET_SO_REUSEPORT:
2412                 optname = SO_REUSEPORT;
2413                 break;
2414 #endif
2415         case TARGET_SO_TYPE:
2416 		optname = SO_TYPE;
2417 		break;
2418         case TARGET_SO_ERROR:
2419 		optname = SO_ERROR;
2420 		break;
2421         case TARGET_SO_DONTROUTE:
2422 		optname = SO_DONTROUTE;
2423 		break;
2424         case TARGET_SO_BROADCAST:
2425 		optname = SO_BROADCAST;
2426 		break;
2427         case TARGET_SO_SNDBUF:
2428 		optname = SO_SNDBUF;
2429 		break;
2430         case TARGET_SO_SNDBUFFORCE:
2431                 optname = SO_SNDBUFFORCE;
2432                 break;
2433         case TARGET_SO_RCVBUF:
2434 		optname = SO_RCVBUF;
2435 		break;
2436         case TARGET_SO_RCVBUFFORCE:
2437                 optname = SO_RCVBUFFORCE;
2438                 break;
2439         case TARGET_SO_KEEPALIVE:
2440 		optname = SO_KEEPALIVE;
2441 		break;
2442         case TARGET_SO_OOBINLINE:
2443 		optname = SO_OOBINLINE;
2444 		break;
2445         case TARGET_SO_NO_CHECK:
2446 		optname = SO_NO_CHECK;
2447 		break;
2448         case TARGET_SO_PRIORITY:
2449 		optname = SO_PRIORITY;
2450 		break;
2451 #ifdef SO_BSDCOMPAT
2452         case TARGET_SO_BSDCOMPAT:
2453 		optname = SO_BSDCOMPAT;
2454 		break;
2455 #endif
2456         case TARGET_SO_PASSCRED:
2457 		optname = SO_PASSCRED;
2458 		break;
2459         case TARGET_SO_PASSSEC:
2460                 optname = SO_PASSSEC;
2461                 break;
2462         case TARGET_SO_TIMESTAMP:
2463 		optname = SO_TIMESTAMP;
2464 		break;
2465         case TARGET_SO_RCVLOWAT:
2466 		optname = SO_RCVLOWAT;
2467 		break;
2468         default:
2469             goto unimplemented;
2470         }
2471 	if (optlen < sizeof(uint32_t))
2472             return -TARGET_EINVAL;
2473 
2474 	if (get_user_u32(val, optval_addr))
2475             return -TARGET_EFAULT;
2476 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2477         break;
2478 #ifdef SOL_NETLINK
2479     case SOL_NETLINK:
2480         switch (optname) {
2481         case NETLINK_PKTINFO:
2482         case NETLINK_ADD_MEMBERSHIP:
2483         case NETLINK_DROP_MEMBERSHIP:
2484         case NETLINK_BROADCAST_ERROR:
2485         case NETLINK_NO_ENOBUFS:
2486 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2487         case NETLINK_LISTEN_ALL_NSID:
2488         case NETLINK_CAP_ACK:
2489 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2490 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2491         case NETLINK_EXT_ACK:
2492 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2493 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2494         case NETLINK_GET_STRICT_CHK:
2495 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2496             break;
2497         default:
2498             goto unimplemented;
2499         }
2500         val = 0;
2501         if (optlen < sizeof(uint32_t)) {
2502             return -TARGET_EINVAL;
2503         }
2504         if (get_user_u32(val, optval_addr)) {
2505             return -TARGET_EFAULT;
2506         }
2507         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2508                                    sizeof(val)));
2509         break;
2510 #endif /* SOL_NETLINK */
2511     default:
2512     unimplemented:
2513         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2514                       level, optname);
2515         ret = -TARGET_ENOPROTOOPT;
2516     }
2517     return ret;
2518 }
2519 
2520 /* do_getsockopt() Must return target values and target errnos. */
do_getsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,abi_ulong optlen)2521 static abi_long do_getsockopt(int sockfd, int level, int optname,
2522                               abi_ulong optval_addr, abi_ulong optlen)
2523 {
2524     abi_long ret;
2525     int len, val;
2526     socklen_t lv;
2527 
2528     switch(level) {
2529     case TARGET_SOL_SOCKET:
2530         level = SOL_SOCKET;
2531         switch (optname) {
2532         /* These don't just return a single integer */
2533         case TARGET_SO_PEERNAME:
2534             goto unimplemented;
2535         case TARGET_SO_RCVTIMEO: {
2536             struct timeval tv;
2537             socklen_t tvlen;
2538 
2539             optname = SO_RCVTIMEO;
2540 
2541 get_timeout:
2542             if (get_user_u32(len, optlen)) {
2543                 return -TARGET_EFAULT;
2544             }
2545             if (len < 0) {
2546                 return -TARGET_EINVAL;
2547             }
2548 
2549             tvlen = sizeof(tv);
2550             ret = get_errno(getsockopt(sockfd, level, optname,
2551                                        &tv, &tvlen));
2552             if (ret < 0) {
2553                 return ret;
2554             }
2555             if (len > sizeof(struct target_timeval)) {
2556                 len = sizeof(struct target_timeval);
2557             }
2558             if (copy_to_user_timeval(optval_addr, &tv)) {
2559                 return -TARGET_EFAULT;
2560             }
2561             if (put_user_u32(len, optlen)) {
2562                 return -TARGET_EFAULT;
2563             }
2564             break;
2565         }
2566         case TARGET_SO_SNDTIMEO:
2567             optname = SO_SNDTIMEO;
2568             goto get_timeout;
2569         case TARGET_SO_PEERCRED: {
2570             struct ucred cr;
2571             socklen_t crlen;
2572             struct target_ucred *tcr;
2573 
2574             if (get_user_u32(len, optlen)) {
2575                 return -TARGET_EFAULT;
2576             }
2577             if (len < 0) {
2578                 return -TARGET_EINVAL;
2579             }
2580 
2581             crlen = sizeof(cr);
2582             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2583                                        &cr, &crlen));
2584             if (ret < 0) {
2585                 return ret;
2586             }
2587             if (len > crlen) {
2588                 len = crlen;
2589             }
2590             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2591                 return -TARGET_EFAULT;
2592             }
2593             __put_user(cr.pid, &tcr->pid);
2594             __put_user(cr.uid, &tcr->uid);
2595             __put_user(cr.gid, &tcr->gid);
2596             unlock_user_struct(tcr, optval_addr, 1);
2597             if (put_user_u32(len, optlen)) {
2598                 return -TARGET_EFAULT;
2599             }
2600             break;
2601         }
2602         case TARGET_SO_PEERSEC: {
2603             char *name;
2604 
2605             if (get_user_u32(len, optlen)) {
2606                 return -TARGET_EFAULT;
2607             }
2608             if (len < 0) {
2609                 return -TARGET_EINVAL;
2610             }
2611             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2612             if (!name) {
2613                 return -TARGET_EFAULT;
2614             }
2615             lv = len;
2616             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2617                                        name, &lv));
2618             if (put_user_u32(lv, optlen)) {
2619                 ret = -TARGET_EFAULT;
2620             }
2621             unlock_user(name, optval_addr, lv);
2622             break;
2623         }
2624         case TARGET_SO_LINGER:
2625         {
2626             struct linger lg;
2627             socklen_t lglen;
2628             struct target_linger *tlg;
2629 
2630             if (get_user_u32(len, optlen)) {
2631                 return -TARGET_EFAULT;
2632             }
2633             if (len < 0) {
2634                 return -TARGET_EINVAL;
2635             }
2636 
2637             lglen = sizeof(lg);
2638             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2639                                        &lg, &lglen));
2640             if (ret < 0) {
2641                 return ret;
2642             }
2643             if (len > lglen) {
2644                 len = lglen;
2645             }
2646             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2647                 return -TARGET_EFAULT;
2648             }
2649             __put_user(lg.l_onoff, &tlg->l_onoff);
2650             __put_user(lg.l_linger, &tlg->l_linger);
2651             unlock_user_struct(tlg, optval_addr, 1);
2652             if (put_user_u32(len, optlen)) {
2653                 return -TARGET_EFAULT;
2654             }
2655             break;
2656         }
2657         /* Options with 'int' argument.  */
2658         case TARGET_SO_DEBUG:
2659             optname = SO_DEBUG;
2660             goto int_case;
2661         case TARGET_SO_REUSEADDR:
2662             optname = SO_REUSEADDR;
2663             goto int_case;
2664 #ifdef SO_REUSEPORT
2665         case TARGET_SO_REUSEPORT:
2666             optname = SO_REUSEPORT;
2667             goto int_case;
2668 #endif
2669         case TARGET_SO_TYPE:
2670             optname = SO_TYPE;
2671             goto int_case;
2672         case TARGET_SO_ERROR:
2673             optname = SO_ERROR;
2674             goto int_case;
2675         case TARGET_SO_DONTROUTE:
2676             optname = SO_DONTROUTE;
2677             goto int_case;
2678         case TARGET_SO_BROADCAST:
2679             optname = SO_BROADCAST;
2680             goto int_case;
2681         case TARGET_SO_SNDBUF:
2682             optname = SO_SNDBUF;
2683             goto int_case;
2684         case TARGET_SO_RCVBUF:
2685             optname = SO_RCVBUF;
2686             goto int_case;
2687         case TARGET_SO_KEEPALIVE:
2688             optname = SO_KEEPALIVE;
2689             goto int_case;
2690         case TARGET_SO_OOBINLINE:
2691             optname = SO_OOBINLINE;
2692             goto int_case;
2693         case TARGET_SO_NO_CHECK:
2694             optname = SO_NO_CHECK;
2695             goto int_case;
2696         case TARGET_SO_PRIORITY:
2697             optname = SO_PRIORITY;
2698             goto int_case;
2699 #ifdef SO_BSDCOMPAT
2700         case TARGET_SO_BSDCOMPAT:
2701             optname = SO_BSDCOMPAT;
2702             goto int_case;
2703 #endif
2704         case TARGET_SO_PASSCRED:
2705             optname = SO_PASSCRED;
2706             goto int_case;
2707         case TARGET_SO_TIMESTAMP:
2708             optname = SO_TIMESTAMP;
2709             goto int_case;
2710         case TARGET_SO_RCVLOWAT:
2711             optname = SO_RCVLOWAT;
2712             goto int_case;
2713         case TARGET_SO_ACCEPTCONN:
2714             optname = SO_ACCEPTCONN;
2715             goto int_case;
2716         case TARGET_SO_PROTOCOL:
2717             optname = SO_PROTOCOL;
2718             goto int_case;
2719         case TARGET_SO_DOMAIN:
2720             optname = SO_DOMAIN;
2721             goto int_case;
2722         default:
2723             goto int_case;
2724         }
2725         break;
2726     case SOL_TCP:
2727     case SOL_UDP:
2728         /* TCP and UDP options all take an 'int' value.  */
2729     int_case:
2730         if (get_user_u32(len, optlen))
2731             return -TARGET_EFAULT;
2732         if (len < 0)
2733             return -TARGET_EINVAL;
2734         lv = sizeof(lv);
2735         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2736         if (ret < 0)
2737             return ret;
2738         switch (optname) {
2739         case SO_TYPE:
2740             val = host_to_target_sock_type(val);
2741             break;
2742         case SO_ERROR:
2743             val = host_to_target_errno(val);
2744             break;
2745         }
2746         if (len > lv)
2747             len = lv;
2748         if (len == 4) {
2749             if (put_user_u32(val, optval_addr))
2750                 return -TARGET_EFAULT;
2751         } else {
2752             if (put_user_u8(val, optval_addr))
2753                 return -TARGET_EFAULT;
2754         }
2755         if (put_user_u32(len, optlen))
2756             return -TARGET_EFAULT;
2757         break;
2758     case SOL_IP:
2759         switch(optname) {
2760         case IP_TOS:
2761         case IP_TTL:
2762         case IP_HDRINCL:
2763         case IP_ROUTER_ALERT:
2764         case IP_RECVOPTS:
2765         case IP_RETOPTS:
2766         case IP_PKTINFO:
2767         case IP_MTU_DISCOVER:
2768         case IP_RECVERR:
2769         case IP_RECVTOS:
2770 #ifdef IP_FREEBIND
2771         case IP_FREEBIND:
2772 #endif
2773         case IP_MULTICAST_TTL:
2774         case IP_MULTICAST_LOOP:
2775             if (get_user_u32(len, optlen))
2776                 return -TARGET_EFAULT;
2777             if (len < 0)
2778                 return -TARGET_EINVAL;
2779             lv = sizeof(lv);
2780             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2781             if (ret < 0)
2782                 return ret;
2783             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2784                 len = 1;
2785                 if (put_user_u32(len, optlen)
2786                     || put_user_u8(val, optval_addr))
2787                     return -TARGET_EFAULT;
2788             } else {
2789                 if (len > sizeof(int))
2790                     len = sizeof(int);
2791                 if (put_user_u32(len, optlen)
2792                     || put_user_u32(val, optval_addr))
2793                     return -TARGET_EFAULT;
2794             }
2795             break;
2796         default:
2797             ret = -TARGET_ENOPROTOOPT;
2798             break;
2799         }
2800         break;
2801     case SOL_IPV6:
2802         switch (optname) {
2803         case IPV6_MTU_DISCOVER:
2804         case IPV6_MTU:
2805         case IPV6_V6ONLY:
2806         case IPV6_RECVPKTINFO:
2807         case IPV6_UNICAST_HOPS:
2808         case IPV6_MULTICAST_HOPS:
2809         case IPV6_MULTICAST_LOOP:
2810         case IPV6_RECVERR:
2811         case IPV6_RECVHOPLIMIT:
2812         case IPV6_2292HOPLIMIT:
2813         case IPV6_CHECKSUM:
2814         case IPV6_ADDRFORM:
2815         case IPV6_2292PKTINFO:
2816         case IPV6_RECVTCLASS:
2817         case IPV6_RECVRTHDR:
2818         case IPV6_2292RTHDR:
2819         case IPV6_RECVHOPOPTS:
2820         case IPV6_2292HOPOPTS:
2821         case IPV6_RECVDSTOPTS:
2822         case IPV6_2292DSTOPTS:
2823         case IPV6_TCLASS:
2824         case IPV6_ADDR_PREFERENCES:
2825 #ifdef IPV6_RECVPATHMTU
2826         case IPV6_RECVPATHMTU:
2827 #endif
2828 #ifdef IPV6_TRANSPARENT
2829         case IPV6_TRANSPARENT:
2830 #endif
2831 #ifdef IPV6_FREEBIND
2832         case IPV6_FREEBIND:
2833 #endif
2834 #ifdef IPV6_RECVORIGDSTADDR
2835         case IPV6_RECVORIGDSTADDR:
2836 #endif
2837             if (get_user_u32(len, optlen))
2838                 return -TARGET_EFAULT;
2839             if (len < 0)
2840                 return -TARGET_EINVAL;
2841             lv = sizeof(lv);
2842             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2843             if (ret < 0)
2844                 return ret;
2845             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2846                 len = 1;
2847                 if (put_user_u32(len, optlen)
2848                     || put_user_u8(val, optval_addr))
2849                     return -TARGET_EFAULT;
2850             } else {
2851                 if (len > sizeof(int))
2852                     len = sizeof(int);
2853                 if (put_user_u32(len, optlen)
2854                     || put_user_u32(val, optval_addr))
2855                     return -TARGET_EFAULT;
2856             }
2857             break;
2858         default:
2859             ret = -TARGET_ENOPROTOOPT;
2860             break;
2861         }
2862         break;
2863 #ifdef SOL_NETLINK
2864     case SOL_NETLINK:
2865         switch (optname) {
2866         case NETLINK_PKTINFO:
2867         case NETLINK_BROADCAST_ERROR:
2868         case NETLINK_NO_ENOBUFS:
2869 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2870         case NETLINK_LISTEN_ALL_NSID:
2871         case NETLINK_CAP_ACK:
2872 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2873 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2874         case NETLINK_EXT_ACK:
2875 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2876 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2877         case NETLINK_GET_STRICT_CHK:
2878 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2879             if (get_user_u32(len, optlen)) {
2880                 return -TARGET_EFAULT;
2881             }
2882             if (len != sizeof(val)) {
2883                 return -TARGET_EINVAL;
2884             }
2885             lv = len;
2886             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2887             if (ret < 0) {
2888                 return ret;
2889             }
2890             if (put_user_u32(lv, optlen)
2891                 || put_user_u32(val, optval_addr)) {
2892                 return -TARGET_EFAULT;
2893             }
2894             break;
2895 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2896         case NETLINK_LIST_MEMBERSHIPS:
2897         {
2898             uint32_t *results;
2899             int i;
2900             if (get_user_u32(len, optlen)) {
2901                 return -TARGET_EFAULT;
2902             }
2903             if (len < 0) {
2904                 return -TARGET_EINVAL;
2905             }
2906             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2907             if (!results && len > 0) {
2908                 return -TARGET_EFAULT;
2909             }
2910             lv = len;
2911             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2912             if (ret < 0) {
2913                 unlock_user(results, optval_addr, 0);
2914                 return ret;
2915             }
2916             /* swap host endianness to target endianness. */
2917             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2918                 results[i] = tswap32(results[i]);
2919             }
2920             if (put_user_u32(lv, optlen)) {
2921                 return -TARGET_EFAULT;
2922             }
2923             unlock_user(results, optval_addr, 0);
2924             break;
2925         }
2926 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2927         default:
2928             goto unimplemented;
2929         }
2930         break;
2931 #endif /* SOL_NETLINK */
2932     default:
2933     unimplemented:
2934         qemu_log_mask(LOG_UNIMP,
2935                       "getsockopt level=%d optname=%d not yet supported\n",
2936                       level, optname);
2937         ret = -TARGET_EOPNOTSUPP;
2938         break;
2939     }
2940     return ret;
2941 }
2942 
2943 /* Convert target low/high pair representing file offset into the host
2944  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2945  * as the kernel doesn't handle them either.
2946  */
target_to_host_low_high(abi_ulong tlow,abi_ulong thigh,unsigned long * hlow,unsigned long * hhigh)2947 static void target_to_host_low_high(abi_ulong tlow,
2948                                     abi_ulong thigh,
2949                                     unsigned long *hlow,
2950                                     unsigned long *hhigh)
2951 {
2952     uint64_t off = tlow |
2953         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2954         TARGET_LONG_BITS / 2;
2955 
2956     *hlow = off;
2957     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2958 }
2959 
lock_iovec(int type,abi_ulong target_addr,abi_ulong count,int copy)2960 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2961                                 abi_ulong count, int copy)
2962 {
2963     struct target_iovec *target_vec;
2964     struct iovec *vec;
2965     abi_ulong total_len, max_len;
2966     int i;
2967     int err = 0;
2968     bool bad_address = false;
2969 
2970     if (count == 0) {
2971         errno = 0;
2972         return NULL;
2973     }
2974     if (count > IOV_MAX) {
2975         errno = EINVAL;
2976         return NULL;
2977     }
2978 
2979     vec = g_try_new0(struct iovec, count);
2980     if (vec == NULL) {
2981         errno = ENOMEM;
2982         return NULL;
2983     }
2984 
2985     target_vec = lock_user(VERIFY_READ, target_addr,
2986                            count * sizeof(struct target_iovec), 1);
2987     if (target_vec == NULL) {
2988         err = EFAULT;
2989         goto fail2;
2990     }
2991 
2992     /* ??? If host page size > target page size, this will result in a
2993        value larger than what we can actually support.  */
2994     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2995     total_len = 0;
2996 
2997     for (i = 0; i < count; i++) {
2998         abi_ulong base = tswapal(target_vec[i].iov_base);
2999         abi_long len = tswapal(target_vec[i].iov_len);
3000 
3001         if (len < 0) {
3002             err = EINVAL;
3003             goto fail;
3004         } else if (len == 0) {
3005             /* Zero length pointer is ignored.  */
3006             vec[i].iov_base = 0;
3007         } else {
3008             vec[i].iov_base = lock_user(type, base, len, copy);
3009             /* If the first buffer pointer is bad, this is a fault.  But
3010              * subsequent bad buffers will result in a partial write; this
3011              * is realized by filling the vector with null pointers and
3012              * zero lengths. */
3013             if (!vec[i].iov_base) {
3014                 if (i == 0) {
3015                     err = EFAULT;
3016                     goto fail;
3017                 } else {
3018                     bad_address = true;
3019                 }
3020             }
3021             if (bad_address) {
3022                 len = 0;
3023             }
3024             if (len > max_len - total_len) {
3025                 len = max_len - total_len;
3026             }
3027         }
3028         vec[i].iov_len = len;
3029         total_len += len;
3030     }
3031 
3032     unlock_user(target_vec, target_addr, 0);
3033     return vec;
3034 
3035  fail:
3036     while (--i >= 0) {
3037         if (tswapal(target_vec[i].iov_len) > 0) {
3038             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3039         }
3040     }
3041     unlock_user(target_vec, target_addr, 0);
3042  fail2:
3043     g_free(vec);
3044     errno = err;
3045     return NULL;
3046 }
3047 
unlock_iovec(struct iovec * vec,abi_ulong target_addr,abi_ulong count,int copy)3048 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3049                          abi_ulong count, int copy)
3050 {
3051     struct target_iovec *target_vec;
3052     int i;
3053 
3054     target_vec = lock_user(VERIFY_READ, target_addr,
3055                            count * sizeof(struct target_iovec), 1);
3056     if (target_vec) {
3057         for (i = 0; i < count; i++) {
3058             abi_ulong base = tswapal(target_vec[i].iov_base);
3059             abi_long len = tswapal(target_vec[i].iov_len);
3060             if (len < 0) {
3061                 break;
3062             }
3063             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3064         }
3065         unlock_user(target_vec, target_addr, 0);
3066     }
3067 
3068     g_free(vec);
3069 }
3070 
target_to_host_sock_type(int * type)3071 static inline int target_to_host_sock_type(int *type)
3072 {
3073     int host_type = 0;
3074     int target_type = *type;
3075 
3076     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3077     case TARGET_SOCK_DGRAM:
3078         host_type = SOCK_DGRAM;
3079         break;
3080     case TARGET_SOCK_STREAM:
3081         host_type = SOCK_STREAM;
3082         break;
3083     default:
3084         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3085         break;
3086     }
3087     if (target_type & TARGET_SOCK_CLOEXEC) {
3088 #if defined(SOCK_CLOEXEC)
3089         host_type |= SOCK_CLOEXEC;
3090 #else
3091         return -TARGET_EINVAL;
3092 #endif
3093     }
3094     if (target_type & TARGET_SOCK_NONBLOCK) {
3095 #if defined(SOCK_NONBLOCK)
3096         host_type |= SOCK_NONBLOCK;
3097 #elif !defined(O_NONBLOCK)
3098         return -TARGET_EINVAL;
3099 #endif
3100     }
3101     *type = host_type;
3102     return 0;
3103 }
3104 
3105 /* Try to emulate socket type flags after socket creation.  */
sock_flags_fixup(int fd,int target_type)3106 static int sock_flags_fixup(int fd, int target_type)
3107 {
3108 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3109     if (target_type & TARGET_SOCK_NONBLOCK) {
3110         int flags = fcntl(fd, F_GETFL);
3111         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3112             close(fd);
3113             return -TARGET_EINVAL;
3114         }
3115     }
3116 #endif
3117     return fd;
3118 }
3119 
3120 /* do_socket() Must return target values and target errnos. */
do_socket(int domain,int type,int protocol)3121 static abi_long do_socket(int domain, int type, int protocol)
3122 {
3123     int target_type = type;
3124     int ret;
3125 
3126     ret = target_to_host_sock_type(&type);
3127     if (ret) {
3128         return ret;
3129     }
3130 
3131     if (domain == PF_NETLINK && !(
3132 #ifdef CONFIG_RTNETLINK
3133          protocol == NETLINK_ROUTE ||
3134 #endif
3135          protocol == NETLINK_KOBJECT_UEVENT ||
3136          protocol == NETLINK_AUDIT)) {
3137         return -TARGET_EPROTONOSUPPORT;
3138     }
3139 
3140     if (domain == AF_PACKET ||
3141         (domain == AF_INET && type == SOCK_PACKET)) {
3142         protocol = tswap16(protocol);
3143     }
3144 
3145     ret = get_errno(socket(domain, type, protocol));
3146     if (ret >= 0) {
3147         ret = sock_flags_fixup(ret, target_type);
3148         if (type == SOCK_PACKET) {
3149             /* Manage an obsolete case :
3150              * if socket type is SOCK_PACKET, bind by name
3151              */
3152             fd_trans_register(ret, &target_packet_trans);
3153         } else if (domain == PF_NETLINK) {
3154             switch (protocol) {
3155 #ifdef CONFIG_RTNETLINK
3156             case NETLINK_ROUTE:
3157                 fd_trans_register(ret, &target_netlink_route_trans);
3158                 break;
3159 #endif
3160             case NETLINK_KOBJECT_UEVENT:
3161                 /* nothing to do: messages are strings */
3162                 break;
3163             case NETLINK_AUDIT:
3164                 fd_trans_register(ret, &target_netlink_audit_trans);
3165                 break;
3166             default:
3167                 g_assert_not_reached();
3168             }
3169         }
3170     }
3171     return ret;
3172 }
3173 
3174 /* do_bind() Must return target values and target errnos. */
do_bind(int sockfd,abi_ulong target_addr,socklen_t addrlen)3175 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3176                         socklen_t addrlen)
3177 {
3178     void *addr;
3179     abi_long ret;
3180 
3181     if ((int)addrlen < 0) {
3182         return -TARGET_EINVAL;
3183     }
3184 
3185     addr = alloca(addrlen+1);
3186 
3187     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3188     if (ret)
3189         return ret;
3190 
3191     return get_errno(bind(sockfd, addr, addrlen));
3192 }
3193 
3194 /* do_connect() Must return target values and target errnos. */
do_connect(int sockfd,abi_ulong target_addr,socklen_t addrlen)3195 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3196                            socklen_t addrlen)
3197 {
3198     void *addr;
3199     abi_long ret;
3200 
3201     if ((int)addrlen < 0) {
3202         return -TARGET_EINVAL;
3203     }
3204 
3205     addr = alloca(addrlen+1);
3206 
3207     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3208     if (ret)
3209         return ret;
3210 
3211     return get_errno(safe_connect(sockfd, addr, addrlen));
3212 }
3213 
3214 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
do_sendrecvmsg_locked(int fd,struct target_msghdr * msgp,int flags,int send)3215 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3216                                       int flags, int send)
3217 {
3218     abi_long ret, len;
3219     struct msghdr msg;
3220     abi_ulong count;
3221     struct iovec *vec;
3222     abi_ulong target_vec;
3223 
3224     if (msgp->msg_name) {
3225         msg.msg_namelen = tswap32(msgp->msg_namelen);
3226         msg.msg_name = alloca(msg.msg_namelen+1);
3227         ret = target_to_host_sockaddr(fd, msg.msg_name,
3228                                       tswapal(msgp->msg_name),
3229                                       msg.msg_namelen);
3230         if (ret == -TARGET_EFAULT) {
3231             /* For connected sockets msg_name and msg_namelen must
3232              * be ignored, so returning EFAULT immediately is wrong.
3233              * Instead, pass a bad msg_name to the host kernel, and
3234              * let it decide whether to return EFAULT or not.
3235              */
3236             msg.msg_name = (void *)-1;
3237         } else if (ret) {
3238             goto out2;
3239         }
3240     } else {
3241         msg.msg_name = NULL;
3242         msg.msg_namelen = 0;
3243     }
3244     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3245     msg.msg_control = alloca(msg.msg_controllen);
3246     memset(msg.msg_control, 0, msg.msg_controllen);
3247 
3248     msg.msg_flags = tswap32(msgp->msg_flags);
3249 
3250     count = tswapal(msgp->msg_iovlen);
3251     target_vec = tswapal(msgp->msg_iov);
3252 
3253     if (count > IOV_MAX) {
3254         /* sendrcvmsg returns a different errno for this condition than
3255          * readv/writev, so we must catch it here before lock_iovec() does.
3256          */
3257         ret = -TARGET_EMSGSIZE;
3258         goto out2;
3259     }
3260 
3261     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3262                      target_vec, count, send);
3263     if (vec == NULL) {
3264         ret = -host_to_target_errno(errno);
3265         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3266         if (!send || ret) {
3267             goto out2;
3268         }
3269     }
3270     msg.msg_iovlen = count;
3271     msg.msg_iov = vec;
3272 
3273     if (send) {
3274         if (fd_trans_target_to_host_data(fd)) {
3275             void *host_msg;
3276 
3277             host_msg = g_malloc(msg.msg_iov->iov_len);
3278             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3279             ret = fd_trans_target_to_host_data(fd)(host_msg,
3280                                                    msg.msg_iov->iov_len);
3281             if (ret >= 0) {
3282                 msg.msg_iov->iov_base = host_msg;
3283                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3284             }
3285             g_free(host_msg);
3286         } else {
3287             ret = target_to_host_cmsg(&msg, msgp);
3288             if (ret == 0) {
3289                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3290             }
3291         }
3292     } else {
3293         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3294         if (!is_error(ret)) {
3295             len = ret;
3296             if (fd_trans_host_to_target_data(fd)) {
3297                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3298                                                MIN(msg.msg_iov->iov_len, len));
3299             }
3300             if (!is_error(ret)) {
3301                 ret = host_to_target_cmsg(msgp, &msg);
3302             }
3303             if (!is_error(ret)) {
3304                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3305                 msgp->msg_flags = tswap32(msg.msg_flags);
3306                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3307                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3308                                     msg.msg_name, msg.msg_namelen);
3309                     if (ret) {
3310                         goto out;
3311                     }
3312                 }
3313 
3314                 ret = len;
3315             }
3316         }
3317     }
3318 
3319 out:
3320     if (vec) {
3321         unlock_iovec(vec, target_vec, count, !send);
3322     }
3323 out2:
3324     return ret;
3325 }
3326 
do_sendrecvmsg(int fd,abi_ulong target_msg,int flags,int send)3327 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3328                                int flags, int send)
3329 {
3330     abi_long ret;
3331     struct target_msghdr *msgp;
3332 
3333     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3334                           msgp,
3335                           target_msg,
3336                           send ? 1 : 0)) {
3337         return -TARGET_EFAULT;
3338     }
3339     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3340     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3341     return ret;
3342 }
3343 
3344 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3345  * so it might not have this *mmsg-specific flag either.
3346  */
3347 #ifndef MSG_WAITFORONE
3348 #define MSG_WAITFORONE 0x10000
3349 #endif
3350 
do_sendrecvmmsg(int fd,abi_ulong target_msgvec,unsigned int vlen,unsigned int flags,int send)3351 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3352                                 unsigned int vlen, unsigned int flags,
3353                                 int send)
3354 {
3355     struct target_mmsghdr *mmsgp;
3356     abi_long ret = 0;
3357     int i;
3358 
3359     if (vlen > UIO_MAXIOV) {
3360         vlen = UIO_MAXIOV;
3361     }
3362 
3363     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3364     if (!mmsgp) {
3365         return -TARGET_EFAULT;
3366     }
3367 
3368     for (i = 0; i < vlen; i++) {
3369         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3370         if (is_error(ret)) {
3371             break;
3372         }
3373         mmsgp[i].msg_len = tswap32(ret);
3374         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3375         if (flags & MSG_WAITFORONE) {
3376             flags |= MSG_DONTWAIT;
3377         }
3378     }
3379 
3380     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3381 
3382     /* Return number of datagrams sent if we sent any at all;
3383      * otherwise return the error.
3384      */
3385     if (i) {
3386         return i;
3387     }
3388     return ret;
3389 }
3390 
3391 /* do_accept4() Must return target values and target errnos. */
do_accept4(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr,int flags)3392 static abi_long do_accept4(int fd, abi_ulong target_addr,
3393                            abi_ulong target_addrlen_addr, int flags)
3394 {
3395     socklen_t addrlen, ret_addrlen;
3396     void *addr;
3397     abi_long ret;
3398     int host_flags;
3399 
3400     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3401         return -TARGET_EINVAL;
3402     }
3403 
3404     host_flags = 0;
3405     if (flags & TARGET_SOCK_NONBLOCK) {
3406         host_flags |= SOCK_NONBLOCK;
3407     }
3408     if (flags & TARGET_SOCK_CLOEXEC) {
3409         host_flags |= SOCK_CLOEXEC;
3410     }
3411 
3412     if (target_addr == 0) {
3413         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3414     }
3415 
3416     /* linux returns EFAULT if addrlen pointer is invalid */
3417     if (get_user_u32(addrlen, target_addrlen_addr))
3418         return -TARGET_EFAULT;
3419 
3420     if ((int)addrlen < 0) {
3421         return -TARGET_EINVAL;
3422     }
3423 
3424     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3425         return -TARGET_EFAULT;
3426     }
3427 
3428     addr = alloca(addrlen);
3429 
3430     ret_addrlen = addrlen;
3431     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3432     if (!is_error(ret)) {
3433         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3434         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3435             ret = -TARGET_EFAULT;
3436         }
3437     }
3438     return ret;
3439 }
3440 
3441 /* do_getpeername() Must return target values and target errnos. */
do_getpeername(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3442 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3443                                abi_ulong target_addrlen_addr)
3444 {
3445     socklen_t addrlen, ret_addrlen;
3446     void *addr;
3447     abi_long ret;
3448 
3449     if (get_user_u32(addrlen, target_addrlen_addr))
3450         return -TARGET_EFAULT;
3451 
3452     if ((int)addrlen < 0) {
3453         return -TARGET_EINVAL;
3454     }
3455 
3456     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3457         return -TARGET_EFAULT;
3458     }
3459 
3460     addr = alloca(addrlen);
3461 
3462     ret_addrlen = addrlen;
3463     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3464     if (!is_error(ret)) {
3465         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3466         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3467             ret = -TARGET_EFAULT;
3468         }
3469     }
3470     return ret;
3471 }
3472 
3473 /* do_getsockname() Must return target values and target errnos. */
do_getsockname(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3474 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3475                                abi_ulong target_addrlen_addr)
3476 {
3477     socklen_t addrlen, ret_addrlen;
3478     void *addr;
3479     abi_long ret;
3480 
3481     if (get_user_u32(addrlen, target_addrlen_addr))
3482         return -TARGET_EFAULT;
3483 
3484     if ((int)addrlen < 0) {
3485         return -TARGET_EINVAL;
3486     }
3487 
3488     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3489         return -TARGET_EFAULT;
3490     }
3491 
3492     addr = alloca(addrlen);
3493 
3494     ret_addrlen = addrlen;
3495     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3496     if (!is_error(ret)) {
3497         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3498         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3499             ret = -TARGET_EFAULT;
3500         }
3501     }
3502     return ret;
3503 }
3504 
3505 /* do_socketpair() Must return target values and target errnos. */
do_socketpair(int domain,int type,int protocol,abi_ulong target_tab_addr)3506 static abi_long do_socketpair(int domain, int type, int protocol,
3507                               abi_ulong target_tab_addr)
3508 {
3509     int tab[2];
3510     abi_long ret;
3511 
3512     target_to_host_sock_type(&type);
3513 
3514     ret = get_errno(socketpair(domain, type, protocol, tab));
3515     if (!is_error(ret)) {
3516         if (put_user_s32(tab[0], target_tab_addr)
3517             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3518             ret = -TARGET_EFAULT;
3519     }
3520     return ret;
3521 }
3522 
3523 /* do_sendto() Must return target values and target errnos. */
do_sendto(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,socklen_t addrlen)3524 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3525                           abi_ulong target_addr, socklen_t addrlen)
3526 {
3527     void *addr;
3528     void *host_msg;
3529     void *copy_msg = NULL;
3530     abi_long ret;
3531 
3532     if ((int)addrlen < 0) {
3533         return -TARGET_EINVAL;
3534     }
3535 
3536     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3537     if (!host_msg)
3538         return -TARGET_EFAULT;
3539     if (fd_trans_target_to_host_data(fd)) {
3540         copy_msg = host_msg;
3541         host_msg = g_malloc(len);
3542         memcpy(host_msg, copy_msg, len);
3543         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3544         if (ret < 0) {
3545             goto fail;
3546         }
3547     }
3548     if (target_addr) {
3549         addr = alloca(addrlen+1);
3550         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3551         if (ret) {
3552             goto fail;
3553         }
3554         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3555     } else {
3556         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3557     }
3558 fail:
3559     if (copy_msg) {
3560         g_free(host_msg);
3561         host_msg = copy_msg;
3562     }
3563     unlock_user(host_msg, msg, 0);
3564     return ret;
3565 }
3566 
3567 /* do_recvfrom() Must return target values and target errnos. */
do_recvfrom(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,abi_ulong target_addrlen)3568 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3569                             abi_ulong target_addr,
3570                             abi_ulong target_addrlen)
3571 {
3572     socklen_t addrlen, ret_addrlen;
3573     void *addr;
3574     void *host_msg;
3575     abi_long ret;
3576 
3577     if (!msg) {
3578         host_msg = NULL;
3579     } else {
3580         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3581         if (!host_msg) {
3582             return -TARGET_EFAULT;
3583         }
3584     }
3585     if (target_addr) {
3586         if (get_user_u32(addrlen, target_addrlen)) {
3587             ret = -TARGET_EFAULT;
3588             goto fail;
3589         }
3590         if ((int)addrlen < 0) {
3591             ret = -TARGET_EINVAL;
3592             goto fail;
3593         }
3594         addr = alloca(addrlen);
3595         ret_addrlen = addrlen;
3596         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3597                                       addr, &ret_addrlen));
3598     } else {
3599         addr = NULL; /* To keep compiler quiet.  */
3600         addrlen = 0; /* To keep compiler quiet.  */
3601         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3602     }
3603     if (!is_error(ret)) {
3604         if (fd_trans_host_to_target_data(fd)) {
3605             abi_long trans;
3606             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3607             if (is_error(trans)) {
3608                 ret = trans;
3609                 goto fail;
3610             }
3611         }
3612         if (target_addr) {
3613             host_to_target_sockaddr(target_addr, addr,
3614                                     MIN(addrlen, ret_addrlen));
3615             if (put_user_u32(ret_addrlen, target_addrlen)) {
3616                 ret = -TARGET_EFAULT;
3617                 goto fail;
3618             }
3619         }
3620         unlock_user(host_msg, msg, len);
3621     } else {
3622 fail:
3623         unlock_user(host_msg, msg, 0);
3624     }
3625     return ret;
3626 }
3627 
3628 #ifdef TARGET_NR_socketcall
3629 /* do_socketcall() must return target values and target errnos. */
do_socketcall(int num,abi_ulong vptr)3630 static abi_long do_socketcall(int num, abi_ulong vptr)
3631 {
3632     static const unsigned nargs[] = { /* number of arguments per operation */
3633         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3634         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3635         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3636         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3637         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3638         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3639         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3640         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3641         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3642         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3643         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3644         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3645         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3646         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3647         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3648         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3649         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3650         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3651         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3652         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3653     };
3654     abi_long a[6]; /* max 6 args */
3655     unsigned i;
3656 
3657     /* check the range of the first argument num */
3658     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3659     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3660         return -TARGET_EINVAL;
3661     }
3662     /* ensure we have space for args */
3663     if (nargs[num] > ARRAY_SIZE(a)) {
3664         return -TARGET_EINVAL;
3665     }
3666     /* collect the arguments in a[] according to nargs[] */
3667     for (i = 0; i < nargs[num]; ++i) {
3668         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3669             return -TARGET_EFAULT;
3670         }
3671     }
3672     /* now when we have the args, invoke the appropriate underlying function */
3673     switch (num) {
3674     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3675         return do_socket(a[0], a[1], a[2]);
3676     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3677         return do_bind(a[0], a[1], a[2]);
3678     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3679         return do_connect(a[0], a[1], a[2]);
3680     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3681         return get_errno(listen(a[0], a[1]));
3682     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3683         return do_accept4(a[0], a[1], a[2], 0);
3684     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3685         return do_getsockname(a[0], a[1], a[2]);
3686     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3687         return do_getpeername(a[0], a[1], a[2]);
3688     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3689         return do_socketpair(a[0], a[1], a[2], a[3]);
3690     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3691         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3692     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3693         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3694     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3695         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3696     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3697         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3698     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3699         return get_errno(shutdown(a[0], a[1]));
3700     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3701         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3702     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3703         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3704     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3705         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3706     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3707         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3708     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3709         return do_accept4(a[0], a[1], a[2], a[3]);
3710     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3711         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3712     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3713         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3714     default:
3715         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3716         return -TARGET_EINVAL;
3717     }
3718 }
3719 #endif
3720 
3721 #ifndef TARGET_SEMID64_DS
3722 /* asm-generic version of this struct */
3723 struct target_semid64_ds
3724 {
3725   struct target_ipc_perm sem_perm;
3726   abi_ulong sem_otime;
3727 #if TARGET_ABI_BITS == 32
3728   abi_ulong __unused1;
3729 #endif
3730   abi_ulong sem_ctime;
3731 #if TARGET_ABI_BITS == 32
3732   abi_ulong __unused2;
3733 #endif
3734   abi_ulong sem_nsems;
3735   abi_ulong __unused3;
3736   abi_ulong __unused4;
3737 };
3738 #endif
3739 
target_to_host_ipc_perm(struct ipc_perm * host_ip,abi_ulong target_addr)3740 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3741                                                abi_ulong target_addr)
3742 {
3743     struct target_ipc_perm *target_ip;
3744     struct target_semid64_ds *target_sd;
3745 
3746     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3747         return -TARGET_EFAULT;
3748     target_ip = &(target_sd->sem_perm);
3749     host_ip->__key = tswap32(target_ip->__key);
3750     host_ip->uid = tswap32(target_ip->uid);
3751     host_ip->gid = tswap32(target_ip->gid);
3752     host_ip->cuid = tswap32(target_ip->cuid);
3753     host_ip->cgid = tswap32(target_ip->cgid);
3754 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3755     host_ip->mode = tswap32(target_ip->mode);
3756 #else
3757     host_ip->mode = tswap16(target_ip->mode);
3758 #endif
3759 #if defined(TARGET_PPC)
3760     host_ip->__seq = tswap32(target_ip->__seq);
3761 #else
3762     host_ip->__seq = tswap16(target_ip->__seq);
3763 #endif
3764     unlock_user_struct(target_sd, target_addr, 0);
3765     return 0;
3766 }
3767 
host_to_target_ipc_perm(abi_ulong target_addr,struct ipc_perm * host_ip)3768 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3769                                                struct ipc_perm *host_ip)
3770 {
3771     struct target_ipc_perm *target_ip;
3772     struct target_semid64_ds *target_sd;
3773 
3774     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3775         return -TARGET_EFAULT;
3776     target_ip = &(target_sd->sem_perm);
3777     target_ip->__key = tswap32(host_ip->__key);
3778     target_ip->uid = tswap32(host_ip->uid);
3779     target_ip->gid = tswap32(host_ip->gid);
3780     target_ip->cuid = tswap32(host_ip->cuid);
3781     target_ip->cgid = tswap32(host_ip->cgid);
3782 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3783     target_ip->mode = tswap32(host_ip->mode);
3784 #else
3785     target_ip->mode = tswap16(host_ip->mode);
3786 #endif
3787 #if defined(TARGET_PPC)
3788     target_ip->__seq = tswap32(host_ip->__seq);
3789 #else
3790     target_ip->__seq = tswap16(host_ip->__seq);
3791 #endif
3792     unlock_user_struct(target_sd, target_addr, 1);
3793     return 0;
3794 }
3795 
target_to_host_semid_ds(struct semid_ds * host_sd,abi_ulong target_addr)3796 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3797                                                abi_ulong target_addr)
3798 {
3799     struct target_semid64_ds *target_sd;
3800 
3801     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3802         return -TARGET_EFAULT;
3803     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3804         return -TARGET_EFAULT;
3805     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3806     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3807     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3808     unlock_user_struct(target_sd, target_addr, 0);
3809     return 0;
3810 }
3811 
host_to_target_semid_ds(abi_ulong target_addr,struct semid_ds * host_sd)3812 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3813                                                struct semid_ds *host_sd)
3814 {
3815     struct target_semid64_ds *target_sd;
3816 
3817     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3818         return -TARGET_EFAULT;
3819     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3820         return -TARGET_EFAULT;
3821     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3822     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3823     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3824     unlock_user_struct(target_sd, target_addr, 1);
3825     return 0;
3826 }
3827 
3828 struct target_seminfo {
3829     int semmap;
3830     int semmni;
3831     int semmns;
3832     int semmnu;
3833     int semmsl;
3834     int semopm;
3835     int semume;
3836     int semusz;
3837     int semvmx;
3838     int semaem;
3839 };
3840 
host_to_target_seminfo(abi_ulong target_addr,struct seminfo * host_seminfo)3841 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3842                                               struct seminfo *host_seminfo)
3843 {
3844     struct target_seminfo *target_seminfo;
3845     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3846         return -TARGET_EFAULT;
3847     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3848     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3849     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3850     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3851     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3852     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3853     __put_user(host_seminfo->semume, &target_seminfo->semume);
3854     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3855     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3856     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3857     unlock_user_struct(target_seminfo, target_addr, 1);
3858     return 0;
3859 }
3860 
3861 union semun {
3862 	int val;
3863 	struct semid_ds *buf;
3864 	unsigned short *array;
3865 	struct seminfo *__buf;
3866 };
3867 
3868 union target_semun {
3869 	int val;
3870 	abi_ulong buf;
3871 	abi_ulong array;
3872 	abi_ulong __buf;
3873 };
3874 
target_to_host_semarray(int semid,unsigned short ** host_array,abi_ulong target_addr)3875 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3876                                                abi_ulong target_addr)
3877 {
3878     int nsems;
3879     unsigned short *array;
3880     union semun semun;
3881     struct semid_ds semid_ds;
3882     int i, ret;
3883 
3884     semun.buf = &semid_ds;
3885 
3886     ret = semctl(semid, 0, IPC_STAT, semun);
3887     if (ret == -1)
3888         return get_errno(ret);
3889 
3890     nsems = semid_ds.sem_nsems;
3891 
3892     *host_array = g_try_new(unsigned short, nsems);
3893     if (!*host_array) {
3894         return -TARGET_ENOMEM;
3895     }
3896     array = lock_user(VERIFY_READ, target_addr,
3897                       nsems*sizeof(unsigned short), 1);
3898     if (!array) {
3899         g_free(*host_array);
3900         return -TARGET_EFAULT;
3901     }
3902 
3903     for(i=0; i<nsems; i++) {
3904         __get_user((*host_array)[i], &array[i]);
3905     }
3906     unlock_user(array, target_addr, 0);
3907 
3908     return 0;
3909 }
3910 
host_to_target_semarray(int semid,abi_ulong target_addr,unsigned short ** host_array)3911 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3912                                                unsigned short **host_array)
3913 {
3914     int nsems;
3915     unsigned short *array;
3916     union semun semun;
3917     struct semid_ds semid_ds;
3918     int i, ret;
3919 
3920     semun.buf = &semid_ds;
3921 
3922     ret = semctl(semid, 0, IPC_STAT, semun);
3923     if (ret == -1)
3924         return get_errno(ret);
3925 
3926     nsems = semid_ds.sem_nsems;
3927 
3928     array = lock_user(VERIFY_WRITE, target_addr,
3929                       nsems*sizeof(unsigned short), 0);
3930     if (!array)
3931         return -TARGET_EFAULT;
3932 
3933     for(i=0; i<nsems; i++) {
3934         __put_user((*host_array)[i], &array[i]);
3935     }
3936     g_free(*host_array);
3937     unlock_user(array, target_addr, 1);
3938 
3939     return 0;
3940 }
3941 
do_semctl(int semid,int semnum,int cmd,abi_ulong target_arg)3942 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3943                                  abi_ulong target_arg)
3944 {
3945     union target_semun target_su = { .buf = target_arg };
3946     union semun arg;
3947     struct semid_ds dsarg;
3948     unsigned short *array = NULL;
3949     struct seminfo seminfo;
3950     abi_long ret = -TARGET_EINVAL;
3951     abi_long err;
3952     cmd &= 0xff;
3953 
3954     switch( cmd ) {
3955 	case GETVAL:
3956 	case SETVAL:
3957             /* In 64 bit cross-endian situations, we will erroneously pick up
3958              * the wrong half of the union for the "val" element.  To rectify
3959              * this, the entire 8-byte structure is byteswapped, followed by
3960 	     * a swap of the 4 byte val field. In other cases, the data is
3961 	     * already in proper host byte order. */
3962 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3963 		target_su.buf = tswapal(target_su.buf);
3964 		arg.val = tswap32(target_su.val);
3965 	    } else {
3966 		arg.val = target_su.val;
3967 	    }
3968             ret = get_errno(semctl(semid, semnum, cmd, arg));
3969             break;
3970 	case GETALL:
3971 	case SETALL:
3972             err = target_to_host_semarray(semid, &array, target_su.array);
3973             if (err)
3974                 return err;
3975             arg.array = array;
3976             ret = get_errno(semctl(semid, semnum, cmd, arg));
3977             err = host_to_target_semarray(semid, target_su.array, &array);
3978             if (err)
3979                 return err;
3980             break;
3981 	case IPC_STAT:
3982 	case IPC_SET:
3983 	case SEM_STAT:
3984             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3985             if (err)
3986                 return err;
3987             arg.buf = &dsarg;
3988             ret = get_errno(semctl(semid, semnum, cmd, arg));
3989             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3990             if (err)
3991                 return err;
3992             break;
3993 	case IPC_INFO:
3994 	case SEM_INFO:
3995             arg.__buf = &seminfo;
3996             ret = get_errno(semctl(semid, semnum, cmd, arg));
3997             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3998             if (err)
3999                 return err;
4000             break;
4001 	case IPC_RMID:
4002 	case GETPID:
4003 	case GETNCNT:
4004 	case GETZCNT:
4005             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4006             break;
4007     }
4008 
4009     return ret;
4010 }
4011 
4012 struct target_sembuf {
4013     unsigned short sem_num;
4014     short sem_op;
4015     short sem_flg;
4016 };
4017 
target_to_host_sembuf(struct sembuf * host_sembuf,abi_ulong target_addr,unsigned nsops)4018 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4019                                              abi_ulong target_addr,
4020                                              unsigned nsops)
4021 {
4022     struct target_sembuf *target_sembuf;
4023     int i;
4024 
4025     target_sembuf = lock_user(VERIFY_READ, target_addr,
4026                               nsops*sizeof(struct target_sembuf), 1);
4027     if (!target_sembuf)
4028         return -TARGET_EFAULT;
4029 
4030     for(i=0; i<nsops; i++) {
4031         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4032         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4033         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4034     }
4035 
4036     unlock_user(target_sembuf, target_addr, 0);
4037 
4038     return 0;
4039 }
4040 
4041 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4042     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4043 
4044 /*
4045  * This macro is required to handle the s390 variants, which passes the
4046  * arguments in a different order than default.
4047  */
4048 #ifdef __s390x__
4049 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4050   (__nsops), (__timeout), (__sops)
4051 #else
4052 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4053   (__nsops), 0, (__sops), (__timeout)
4054 #endif
4055 
do_semtimedop(int semid,abi_long ptr,unsigned nsops,abi_long timeout,bool time64)4056 static inline abi_long do_semtimedop(int semid,
4057                                      abi_long ptr,
4058                                      unsigned nsops,
4059                                      abi_long timeout, bool time64)
4060 {
4061     struct sembuf *sops;
4062     struct timespec ts, *pts = NULL;
4063     abi_long ret;
4064 
4065     if (timeout) {
4066         pts = &ts;
4067         if (time64) {
4068             if (target_to_host_timespec64(pts, timeout)) {
4069                 return -TARGET_EFAULT;
4070             }
4071         } else {
4072             if (target_to_host_timespec(pts, timeout)) {
4073                 return -TARGET_EFAULT;
4074             }
4075         }
4076     }
4077 
4078     if (nsops > TARGET_SEMOPM) {
4079         return -TARGET_E2BIG;
4080     }
4081 
4082     sops = g_new(struct sembuf, nsops);
4083 
4084     if (target_to_host_sembuf(sops, ptr, nsops)) {
4085         g_free(sops);
4086         return -TARGET_EFAULT;
4087     }
4088 
4089     ret = -TARGET_ENOSYS;
4090 #ifdef __NR_semtimedop
4091     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4092 #endif
4093 #ifdef __NR_ipc
4094     if (ret == -TARGET_ENOSYS) {
4095         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4096                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4097     }
4098 #endif
4099     g_free(sops);
4100     return ret;
4101 }
4102 #endif
4103 
4104 struct target_msqid_ds
4105 {
4106     struct target_ipc_perm msg_perm;
4107     abi_ulong msg_stime;
4108 #if TARGET_ABI_BITS == 32
4109     abi_ulong __unused1;
4110 #endif
4111     abi_ulong msg_rtime;
4112 #if TARGET_ABI_BITS == 32
4113     abi_ulong __unused2;
4114 #endif
4115     abi_ulong msg_ctime;
4116 #if TARGET_ABI_BITS == 32
4117     abi_ulong __unused3;
4118 #endif
4119     abi_ulong __msg_cbytes;
4120     abi_ulong msg_qnum;
4121     abi_ulong msg_qbytes;
4122     abi_ulong msg_lspid;
4123     abi_ulong msg_lrpid;
4124     abi_ulong __unused4;
4125     abi_ulong __unused5;
4126 };
4127 
target_to_host_msqid_ds(struct msqid_ds * host_md,abi_ulong target_addr)4128 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4129                                                abi_ulong target_addr)
4130 {
4131     struct target_msqid_ds *target_md;
4132 
4133     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4134         return -TARGET_EFAULT;
4135     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4136         return -TARGET_EFAULT;
4137     host_md->msg_stime = tswapal(target_md->msg_stime);
4138     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4139     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4140     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4141     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4142     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4143     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4144     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4145     unlock_user_struct(target_md, target_addr, 0);
4146     return 0;
4147 }
4148 
host_to_target_msqid_ds(abi_ulong target_addr,struct msqid_ds * host_md)4149 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4150                                                struct msqid_ds *host_md)
4151 {
4152     struct target_msqid_ds *target_md;
4153 
4154     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4155         return -TARGET_EFAULT;
4156     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4157         return -TARGET_EFAULT;
4158     target_md->msg_stime = tswapal(host_md->msg_stime);
4159     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4160     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4161     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4162     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4163     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4164     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4165     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4166     unlock_user_struct(target_md, target_addr, 1);
4167     return 0;
4168 }
4169 
4170 struct target_msginfo {
4171     int msgpool;
4172     int msgmap;
4173     int msgmax;
4174     int msgmnb;
4175     int msgmni;
4176     int msgssz;
4177     int msgtql;
4178     unsigned short int msgseg;
4179 };
4180 
host_to_target_msginfo(abi_ulong target_addr,struct msginfo * host_msginfo)4181 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4182                                               struct msginfo *host_msginfo)
4183 {
4184     struct target_msginfo *target_msginfo;
4185     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4186         return -TARGET_EFAULT;
4187     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4188     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4189     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4190     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4191     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4192     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4193     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4194     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4195     unlock_user_struct(target_msginfo, target_addr, 1);
4196     return 0;
4197 }
4198 
do_msgctl(int msgid,int cmd,abi_long ptr)4199 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4200 {
4201     struct msqid_ds dsarg;
4202     struct msginfo msginfo;
4203     abi_long ret = -TARGET_EINVAL;
4204 
4205     cmd &= 0xff;
4206 
4207     switch (cmd) {
4208     case IPC_STAT:
4209     case IPC_SET:
4210     case MSG_STAT:
4211         if (target_to_host_msqid_ds(&dsarg,ptr))
4212             return -TARGET_EFAULT;
4213         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4214         if (host_to_target_msqid_ds(ptr,&dsarg))
4215             return -TARGET_EFAULT;
4216         break;
4217     case IPC_RMID:
4218         ret = get_errno(msgctl(msgid, cmd, NULL));
4219         break;
4220     case IPC_INFO:
4221     case MSG_INFO:
4222         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4223         if (host_to_target_msginfo(ptr, &msginfo))
4224             return -TARGET_EFAULT;
4225         break;
4226     }
4227 
4228     return ret;
4229 }
4230 
4231 struct target_msgbuf {
4232     abi_long mtype;
4233     char	mtext[1];
4234 };
4235 
do_msgsnd(int msqid,abi_long msgp,ssize_t msgsz,int msgflg)4236 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4237                                  ssize_t msgsz, int msgflg)
4238 {
4239     struct target_msgbuf *target_mb;
4240     struct msgbuf *host_mb;
4241     abi_long ret = 0;
4242 
4243     if (msgsz < 0) {
4244         return -TARGET_EINVAL;
4245     }
4246 
4247     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4248         return -TARGET_EFAULT;
4249     host_mb = g_try_malloc(msgsz + sizeof(long));
4250     if (!host_mb) {
4251         unlock_user_struct(target_mb, msgp, 0);
4252         return -TARGET_ENOMEM;
4253     }
4254     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4255     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4256     ret = -TARGET_ENOSYS;
4257 #ifdef __NR_msgsnd
4258     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4259 #endif
4260 #ifdef __NR_ipc
4261     if (ret == -TARGET_ENOSYS) {
4262 #ifdef __s390x__
4263         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4264                                  host_mb));
4265 #else
4266         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4267                                  host_mb, 0));
4268 #endif
4269     }
4270 #endif
4271     g_free(host_mb);
4272     unlock_user_struct(target_mb, msgp, 0);
4273 
4274     return ret;
4275 }
4276 
4277 #ifdef __NR_ipc
4278 #if defined(__sparc__)
4279 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4280 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4281 #elif defined(__s390x__)
4282 /* The s390 sys_ipc variant has only five parameters.  */
4283 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4284     ((long int[]){(long int)__msgp, __msgtyp})
4285 #else
4286 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4287     ((long int[]){(long int)__msgp, __msgtyp}), 0
4288 #endif
4289 #endif
4290 
do_msgrcv(int msqid,abi_long msgp,ssize_t msgsz,abi_long msgtyp,int msgflg)4291 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4292                                  ssize_t msgsz, abi_long msgtyp,
4293                                  int msgflg)
4294 {
4295     struct target_msgbuf *target_mb;
4296     char *target_mtext;
4297     struct msgbuf *host_mb;
4298     abi_long ret = 0;
4299 
4300     if (msgsz < 0) {
4301         return -TARGET_EINVAL;
4302     }
4303 
4304     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4305         return -TARGET_EFAULT;
4306 
4307     host_mb = g_try_malloc(msgsz + sizeof(long));
4308     if (!host_mb) {
4309         ret = -TARGET_ENOMEM;
4310         goto end;
4311     }
4312     ret = -TARGET_ENOSYS;
4313 #ifdef __NR_msgrcv
4314     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4315 #endif
4316 #ifdef __NR_ipc
4317     if (ret == -TARGET_ENOSYS) {
4318         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4319                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4320     }
4321 #endif
4322 
4323     if (ret > 0) {
4324         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4325         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4326         if (!target_mtext) {
4327             ret = -TARGET_EFAULT;
4328             goto end;
4329         }
4330         memcpy(target_mb->mtext, host_mb->mtext, ret);
4331         unlock_user(target_mtext, target_mtext_addr, ret);
4332     }
4333 
4334     target_mb->mtype = tswapal(host_mb->mtype);
4335 
4336 end:
4337     if (target_mb)
4338         unlock_user_struct(target_mb, msgp, 1);
4339     g_free(host_mb);
4340     return ret;
4341 }
4342 
target_to_host_shmid_ds(struct shmid_ds * host_sd,abi_ulong target_addr)4343 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4344                                                abi_ulong target_addr)
4345 {
4346     struct target_shmid_ds *target_sd;
4347 
4348     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4349         return -TARGET_EFAULT;
4350     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4351         return -TARGET_EFAULT;
4352     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4353     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4354     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4355     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4356     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4357     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4358     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4359     unlock_user_struct(target_sd, target_addr, 0);
4360     return 0;
4361 }
4362 
host_to_target_shmid_ds(abi_ulong target_addr,struct shmid_ds * host_sd)4363 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4364                                                struct shmid_ds *host_sd)
4365 {
4366     struct target_shmid_ds *target_sd;
4367 
4368     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4369         return -TARGET_EFAULT;
4370     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4371         return -TARGET_EFAULT;
4372     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4373     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4374     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4375     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4376     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4377     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4378     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4379     unlock_user_struct(target_sd, target_addr, 1);
4380     return 0;
4381 }
4382 
4383 struct  target_shminfo {
4384     abi_ulong shmmax;
4385     abi_ulong shmmin;
4386     abi_ulong shmmni;
4387     abi_ulong shmseg;
4388     abi_ulong shmall;
4389 };
4390 
host_to_target_shminfo(abi_ulong target_addr,struct shminfo * host_shminfo)4391 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4392                                               struct shminfo *host_shminfo)
4393 {
4394     struct target_shminfo *target_shminfo;
4395     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4396         return -TARGET_EFAULT;
4397     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4398     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4399     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4400     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4401     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4402     unlock_user_struct(target_shminfo, target_addr, 1);
4403     return 0;
4404 }
4405 
4406 struct target_shm_info {
4407     int used_ids;
4408     abi_ulong shm_tot;
4409     abi_ulong shm_rss;
4410     abi_ulong shm_swp;
4411     abi_ulong swap_attempts;
4412     abi_ulong swap_successes;
4413 };
4414 
host_to_target_shm_info(abi_ulong target_addr,struct shm_info * host_shm_info)4415 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4416                                                struct shm_info *host_shm_info)
4417 {
4418     struct target_shm_info *target_shm_info;
4419     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4420         return -TARGET_EFAULT;
4421     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4422     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4423     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4424     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4425     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4426     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4427     unlock_user_struct(target_shm_info, target_addr, 1);
4428     return 0;
4429 }
4430 
do_shmctl(int shmid,int cmd,abi_long buf)4431 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4432 {
4433     struct shmid_ds dsarg;
4434     struct shminfo shminfo;
4435     struct shm_info shm_info;
4436     abi_long ret = -TARGET_EINVAL;
4437 
4438     cmd &= 0xff;
4439 
4440     switch(cmd) {
4441     case IPC_STAT:
4442     case IPC_SET:
4443     case SHM_STAT:
4444         if (target_to_host_shmid_ds(&dsarg, buf))
4445             return -TARGET_EFAULT;
4446         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4447         if (host_to_target_shmid_ds(buf, &dsarg))
4448             return -TARGET_EFAULT;
4449         break;
4450     case IPC_INFO:
4451         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4452         if (host_to_target_shminfo(buf, &shminfo))
4453             return -TARGET_EFAULT;
4454         break;
4455     case SHM_INFO:
4456         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4457         if (host_to_target_shm_info(buf, &shm_info))
4458             return -TARGET_EFAULT;
4459         break;
4460     case IPC_RMID:
4461     case SHM_LOCK:
4462     case SHM_UNLOCK:
4463         ret = get_errno(shmctl(shmid, cmd, NULL));
4464         break;
4465     }
4466 
4467     return ret;
4468 }
4469 
4470 #ifdef TARGET_NR_ipc
4471 /* ??? This only works with linear mappings.  */
4472 /* do_ipc() must return target values and target errnos. */
do_ipc(CPUArchState * cpu_env,unsigned int call,abi_long first,abi_long second,abi_long third,abi_long ptr,abi_long fifth)4473 static abi_long do_ipc(CPUArchState *cpu_env,
4474                        unsigned int call, abi_long first,
4475                        abi_long second, abi_long third,
4476                        abi_long ptr, abi_long fifth)
4477 {
4478     int version;
4479     abi_long ret = 0;
4480 
4481     version = call >> 16;
4482     call &= 0xffff;
4483 
4484     switch (call) {
4485     case IPCOP_semop:
4486         ret = do_semtimedop(first, ptr, second, 0, false);
4487         break;
4488     case IPCOP_semtimedop:
4489     /*
4490      * The s390 sys_ipc variant has only five parameters instead of six
4491      * (as for default variant) and the only difference is the handling of
4492      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4493      * to a struct timespec where the generic variant uses fifth parameter.
4494      */
4495 #if defined(TARGET_S390X)
4496         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4497 #else
4498         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4499 #endif
4500         break;
4501 
4502     case IPCOP_semget:
4503         ret = get_errno(semget(first, second, third));
4504         break;
4505 
4506     case IPCOP_semctl: {
4507         /* The semun argument to semctl is passed by value, so dereference the
4508          * ptr argument. */
4509         abi_ulong atptr;
4510         get_user_ual(atptr, ptr);
4511         ret = do_semctl(first, second, third, atptr);
4512         break;
4513     }
4514 
4515     case IPCOP_msgget:
4516         ret = get_errno(msgget(first, second));
4517         break;
4518 
4519     case IPCOP_msgsnd:
4520         ret = do_msgsnd(first, ptr, second, third);
4521         break;
4522 
4523     case IPCOP_msgctl:
4524         ret = do_msgctl(first, second, ptr);
4525         break;
4526 
4527     case IPCOP_msgrcv:
4528         switch (version) {
4529         case 0:
4530             {
4531                 struct target_ipc_kludge {
4532                     abi_long msgp;
4533                     abi_long msgtyp;
4534                 } *tmp;
4535 
4536                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4537                     ret = -TARGET_EFAULT;
4538                     break;
4539                 }
4540 
4541                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4542 
4543                 unlock_user_struct(tmp, ptr, 0);
4544                 break;
4545             }
4546         default:
4547             ret = do_msgrcv(first, ptr, second, fifth, third);
4548         }
4549         break;
4550 
4551     case IPCOP_shmat:
4552         switch (version) {
4553         default:
4554         {
4555             abi_ulong raddr;
4556             raddr = target_shmat(cpu_env, first, ptr, second);
4557             if (is_error(raddr))
4558                 return get_errno(raddr);
4559             if (put_user_ual(raddr, third))
4560                 return -TARGET_EFAULT;
4561             break;
4562         }
4563         case 1:
4564             ret = -TARGET_EINVAL;
4565             break;
4566         }
4567 	break;
4568     case IPCOP_shmdt:
4569         ret = target_shmdt(ptr);
4570 	break;
4571 
4572     case IPCOP_shmget:
4573 	/* IPC_* flag values are the same on all linux platforms */
4574 	ret = get_errno(shmget(first, second, third));
4575 	break;
4576 
4577 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4578     case IPCOP_shmctl:
4579         ret = do_shmctl(first, second, ptr);
4580         break;
4581     default:
4582         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4583                       call, version);
4584 	ret = -TARGET_ENOSYS;
4585 	break;
4586     }
4587     return ret;
4588 }
4589 #endif
4590 
4591 /* kernel structure types definitions */
4592 
4593 #define STRUCT(name, ...) STRUCT_ ## name,
4594 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4595 enum {
4596 #include "syscall_types.h"
4597 STRUCT_MAX
4598 };
4599 #undef STRUCT
4600 #undef STRUCT_SPECIAL
4601 
4602 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4603 #define STRUCT_SPECIAL(name)
4604 #include "syscall_types.h"
4605 #undef STRUCT
4606 #undef STRUCT_SPECIAL
4607 
4608 #define MAX_STRUCT_SIZE 4096
4609 
4610 #ifdef CONFIG_FIEMAP
4611 /* So fiemap access checks don't overflow on 32 bit systems.
4612  * This is very slightly smaller than the limit imposed by
4613  * the underlying kernel.
4614  */
4615 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4616                             / sizeof(struct fiemap_extent))
4617 
do_ioctl_fs_ioc_fiemap(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4618 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4619                                        int fd, int cmd, abi_long arg)
4620 {
4621     /* The parameter for this ioctl is a struct fiemap followed
4622      * by an array of struct fiemap_extent whose size is set
4623      * in fiemap->fm_extent_count. The array is filled in by the
4624      * ioctl.
4625      */
4626     int target_size_in, target_size_out;
4627     struct fiemap *fm;
4628     const argtype *arg_type = ie->arg_type;
4629     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4630     void *argptr, *p;
4631     abi_long ret;
4632     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4633     uint32_t outbufsz;
4634     int free_fm = 0;
4635 
4636     assert(arg_type[0] == TYPE_PTR);
4637     assert(ie->access == IOC_RW);
4638     arg_type++;
4639     target_size_in = thunk_type_size(arg_type, 0);
4640     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4641     if (!argptr) {
4642         return -TARGET_EFAULT;
4643     }
4644     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4645     unlock_user(argptr, arg, 0);
4646     fm = (struct fiemap *)buf_temp;
4647     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4648         return -TARGET_EINVAL;
4649     }
4650 
4651     outbufsz = sizeof (*fm) +
4652         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4653 
4654     if (outbufsz > MAX_STRUCT_SIZE) {
4655         /* We can't fit all the extents into the fixed size buffer.
4656          * Allocate one that is large enough and use it instead.
4657          */
4658         fm = g_try_malloc(outbufsz);
4659         if (!fm) {
4660             return -TARGET_ENOMEM;
4661         }
4662         memcpy(fm, buf_temp, sizeof(struct fiemap));
4663         free_fm = 1;
4664     }
4665     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4666     if (!is_error(ret)) {
4667         target_size_out = target_size_in;
4668         /* An extent_count of 0 means we were only counting the extents
4669          * so there are no structs to copy
4670          */
4671         if (fm->fm_extent_count != 0) {
4672             target_size_out += fm->fm_mapped_extents * extent_size;
4673         }
4674         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4675         if (!argptr) {
4676             ret = -TARGET_EFAULT;
4677         } else {
4678             /* Convert the struct fiemap */
4679             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4680             if (fm->fm_extent_count != 0) {
4681                 p = argptr + target_size_in;
4682                 /* ...and then all the struct fiemap_extents */
4683                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4684                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4685                                   THUNK_TARGET);
4686                     p += extent_size;
4687                 }
4688             }
4689             unlock_user(argptr, arg, target_size_out);
4690         }
4691     }
4692     if (free_fm) {
4693         g_free(fm);
4694     }
4695     return ret;
4696 }
4697 #endif
4698 
do_ioctl_ifconf(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4699 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4700                                 int fd, int cmd, abi_long arg)
4701 {
4702     const argtype *arg_type = ie->arg_type;
4703     int target_size;
4704     void *argptr;
4705     int ret;
4706     struct ifconf *host_ifconf;
4707     uint32_t outbufsz;
4708     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4709     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4710     int target_ifreq_size;
4711     int nb_ifreq;
4712     int free_buf = 0;
4713     int i;
4714     int target_ifc_len;
4715     abi_long target_ifc_buf;
4716     int host_ifc_len;
4717     char *host_ifc_buf;
4718 
4719     assert(arg_type[0] == TYPE_PTR);
4720     assert(ie->access == IOC_RW);
4721 
4722     arg_type++;
4723     target_size = thunk_type_size(arg_type, 0);
4724 
4725     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4726     if (!argptr)
4727         return -TARGET_EFAULT;
4728     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4729     unlock_user(argptr, arg, 0);
4730 
4731     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4732     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4733     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4734 
4735     if (target_ifc_buf != 0) {
4736         target_ifc_len = host_ifconf->ifc_len;
4737         nb_ifreq = target_ifc_len / target_ifreq_size;
4738         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4739 
4740         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4741         if (outbufsz > MAX_STRUCT_SIZE) {
4742             /*
4743              * We can't fit all the extents into the fixed size buffer.
4744              * Allocate one that is large enough and use it instead.
4745              */
4746             host_ifconf = g_try_malloc(outbufsz);
4747             if (!host_ifconf) {
4748                 return -TARGET_ENOMEM;
4749             }
4750             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4751             free_buf = 1;
4752         }
4753         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4754 
4755         host_ifconf->ifc_len = host_ifc_len;
4756     } else {
4757       host_ifc_buf = NULL;
4758     }
4759     host_ifconf->ifc_buf = host_ifc_buf;
4760 
4761     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4762     if (!is_error(ret)) {
4763 	/* convert host ifc_len to target ifc_len */
4764 
4765         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4766         target_ifc_len = nb_ifreq * target_ifreq_size;
4767         host_ifconf->ifc_len = target_ifc_len;
4768 
4769 	/* restore target ifc_buf */
4770 
4771         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4772 
4773 	/* copy struct ifconf to target user */
4774 
4775         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4776         if (!argptr)
4777             return -TARGET_EFAULT;
4778         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4779         unlock_user(argptr, arg, target_size);
4780 
4781         if (target_ifc_buf != 0) {
4782             /* copy ifreq[] to target user */
4783             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4784             for (i = 0; i < nb_ifreq ; i++) {
4785                 thunk_convert(argptr + i * target_ifreq_size,
4786                               host_ifc_buf + i * sizeof(struct ifreq),
4787                               ifreq_arg_type, THUNK_TARGET);
4788             }
4789             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4790         }
4791     }
4792 
4793     if (free_buf) {
4794         g_free(host_ifconf);
4795     }
4796 
4797     return ret;
4798 }
4799 
4800 #if defined(CONFIG_USBFS)
4801 #if HOST_LONG_BITS > 64
4802 #error USBDEVFS thunks do not support >64 bit hosts yet.
4803 #endif
4804 struct live_urb {
4805     uint64_t target_urb_adr;
4806     uint64_t target_buf_adr;
4807     char *target_buf_ptr;
4808     struct usbdevfs_urb host_urb;
4809 };
4810 
usbdevfs_urb_hashtable(void)4811 static GHashTable *usbdevfs_urb_hashtable(void)
4812 {
4813     static GHashTable *urb_hashtable;
4814 
4815     if (!urb_hashtable) {
4816         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4817     }
4818     return urb_hashtable;
4819 }
4820 
urb_hashtable_insert(struct live_urb * urb)4821 static void urb_hashtable_insert(struct live_urb *urb)
4822 {
4823     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4824     g_hash_table_insert(urb_hashtable, urb, urb);
4825 }
4826 
urb_hashtable_lookup(uint64_t target_urb_adr)4827 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4828 {
4829     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4830     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4831 }
4832 
urb_hashtable_remove(struct live_urb * urb)4833 static void urb_hashtable_remove(struct live_urb *urb)
4834 {
4835     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4836     g_hash_table_remove(urb_hashtable, urb);
4837 }
4838 
4839 static abi_long
do_ioctl_usbdevfs_reapurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4840 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4841                           int fd, int cmd, abi_long arg)
4842 {
4843     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4844     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4845     struct live_urb *lurb;
4846     void *argptr;
4847     uint64_t hurb;
4848     int target_size;
4849     uintptr_t target_urb_adr;
4850     abi_long ret;
4851 
4852     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4853 
4854     memset(buf_temp, 0, sizeof(uint64_t));
4855     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4856     if (is_error(ret)) {
4857         return ret;
4858     }
4859 
4860     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4861     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4862     if (!lurb->target_urb_adr) {
4863         return -TARGET_EFAULT;
4864     }
4865     urb_hashtable_remove(lurb);
4866     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4867         lurb->host_urb.buffer_length);
4868     lurb->target_buf_ptr = NULL;
4869 
4870     /* restore the guest buffer pointer */
4871     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4872 
4873     /* update the guest urb struct */
4874     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4875     if (!argptr) {
4876         g_free(lurb);
4877         return -TARGET_EFAULT;
4878     }
4879     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4880     unlock_user(argptr, lurb->target_urb_adr, target_size);
4881 
4882     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4883     /* write back the urb handle */
4884     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4885     if (!argptr) {
4886         g_free(lurb);
4887         return -TARGET_EFAULT;
4888     }
4889 
4890     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4891     target_urb_adr = lurb->target_urb_adr;
4892     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4893     unlock_user(argptr, arg, target_size);
4894 
4895     g_free(lurb);
4896     return ret;
4897 }
4898 
4899 static abi_long
do_ioctl_usbdevfs_discardurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4900 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4901                              uint8_t *buf_temp __attribute__((unused)),
4902                              int fd, int cmd, abi_long arg)
4903 {
4904     struct live_urb *lurb;
4905 
4906     /* map target address back to host URB with metadata. */
4907     lurb = urb_hashtable_lookup(arg);
4908     if (!lurb) {
4909         return -TARGET_EFAULT;
4910     }
4911     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4912 }
4913 
4914 static abi_long
do_ioctl_usbdevfs_submiturb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4915 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4916                             int fd, int cmd, abi_long arg)
4917 {
4918     const argtype *arg_type = ie->arg_type;
4919     int target_size;
4920     abi_long ret;
4921     void *argptr;
4922     int rw_dir;
4923     struct live_urb *lurb;
4924 
4925     /*
4926      * each submitted URB needs to map to a unique ID for the
4927      * kernel, and that unique ID needs to be a pointer to
4928      * host memory.  hence, we need to malloc for each URB.
4929      * isochronous transfers have a variable length struct.
4930      */
4931     arg_type++;
4932     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4933 
4934     /* construct host copy of urb and metadata */
4935     lurb = g_try_new0(struct live_urb, 1);
4936     if (!lurb) {
4937         return -TARGET_ENOMEM;
4938     }
4939 
4940     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4941     if (!argptr) {
4942         g_free(lurb);
4943         return -TARGET_EFAULT;
4944     }
4945     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4946     unlock_user(argptr, arg, 0);
4947 
4948     lurb->target_urb_adr = arg;
4949     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4950 
4951     /* buffer space used depends on endpoint type so lock the entire buffer */
4952     /* control type urbs should check the buffer contents for true direction */
4953     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4954     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4955         lurb->host_urb.buffer_length, 1);
4956     if (lurb->target_buf_ptr == NULL) {
4957         g_free(lurb);
4958         return -TARGET_EFAULT;
4959     }
4960 
4961     /* update buffer pointer in host copy */
4962     lurb->host_urb.buffer = lurb->target_buf_ptr;
4963 
4964     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4965     if (is_error(ret)) {
4966         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4967         g_free(lurb);
4968     } else {
4969         urb_hashtable_insert(lurb);
4970     }
4971 
4972     return ret;
4973 }
4974 #endif /* CONFIG_USBFS */
4975 
do_ioctl_dm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4976 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4977                             int cmd, abi_long arg)
4978 {
4979     void *argptr;
4980     struct dm_ioctl *host_dm;
4981     abi_long guest_data;
4982     uint32_t guest_data_size;
4983     int target_size;
4984     const argtype *arg_type = ie->arg_type;
4985     abi_long ret;
4986     void *big_buf = NULL;
4987     char *host_data;
4988 
4989     arg_type++;
4990     target_size = thunk_type_size(arg_type, 0);
4991     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4992     if (!argptr) {
4993         ret = -TARGET_EFAULT;
4994         goto out;
4995     }
4996     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4997     unlock_user(argptr, arg, 0);
4998 
4999     /* buf_temp is too small, so fetch things into a bigger buffer */
5000     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5001     memcpy(big_buf, buf_temp, target_size);
5002     buf_temp = big_buf;
5003     host_dm = big_buf;
5004 
5005     guest_data = arg + host_dm->data_start;
5006     if ((guest_data - arg) < 0) {
5007         ret = -TARGET_EINVAL;
5008         goto out;
5009     }
5010     guest_data_size = host_dm->data_size - host_dm->data_start;
5011     host_data = (char*)host_dm + host_dm->data_start;
5012 
5013     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5014     if (!argptr) {
5015         ret = -TARGET_EFAULT;
5016         goto out;
5017     }
5018 
5019     switch (ie->host_cmd) {
5020     case DM_REMOVE_ALL:
5021     case DM_LIST_DEVICES:
5022     case DM_DEV_CREATE:
5023     case DM_DEV_REMOVE:
5024     case DM_DEV_SUSPEND:
5025     case DM_DEV_STATUS:
5026     case DM_DEV_WAIT:
5027     case DM_TABLE_STATUS:
5028     case DM_TABLE_CLEAR:
5029     case DM_TABLE_DEPS:
5030     case DM_LIST_VERSIONS:
5031         /* no input data */
5032         break;
5033     case DM_DEV_RENAME:
5034     case DM_DEV_SET_GEOMETRY:
5035         /* data contains only strings */
5036         memcpy(host_data, argptr, guest_data_size);
5037         break;
5038     case DM_TARGET_MSG:
5039         memcpy(host_data, argptr, guest_data_size);
5040         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5041         break;
5042     case DM_TABLE_LOAD:
5043     {
5044         void *gspec = argptr;
5045         void *cur_data = host_data;
5046         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5047         int spec_size = thunk_type_size(dm_arg_type, 0);
5048         int i;
5049 
5050         for (i = 0; i < host_dm->target_count; i++) {
5051             struct dm_target_spec *spec = cur_data;
5052             uint32_t next;
5053             int slen;
5054 
5055             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5056             slen = strlen((char*)gspec + spec_size) + 1;
5057             next = spec->next;
5058             spec->next = sizeof(*spec) + slen;
5059             strcpy((char*)&spec[1], gspec + spec_size);
5060             gspec += next;
5061             cur_data += spec->next;
5062         }
5063         break;
5064     }
5065     default:
5066         ret = -TARGET_EINVAL;
5067         unlock_user(argptr, guest_data, 0);
5068         goto out;
5069     }
5070     unlock_user(argptr, guest_data, 0);
5071 
5072     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5073     if (!is_error(ret)) {
5074         guest_data = arg + host_dm->data_start;
5075         guest_data_size = host_dm->data_size - host_dm->data_start;
5076         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5077         switch (ie->host_cmd) {
5078         case DM_REMOVE_ALL:
5079         case DM_DEV_CREATE:
5080         case DM_DEV_REMOVE:
5081         case DM_DEV_RENAME:
5082         case DM_DEV_SUSPEND:
5083         case DM_DEV_STATUS:
5084         case DM_TABLE_LOAD:
5085         case DM_TABLE_CLEAR:
5086         case DM_TARGET_MSG:
5087         case DM_DEV_SET_GEOMETRY:
5088             /* no return data */
5089             break;
5090         case DM_LIST_DEVICES:
5091         {
5092             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5093             uint32_t remaining_data = guest_data_size;
5094             void *cur_data = argptr;
5095             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5096             int nl_size = 12; /* can't use thunk_size due to alignment */
5097 
5098             while (1) {
5099                 uint32_t next = nl->next;
5100                 if (next) {
5101                     nl->next = nl_size + (strlen(nl->name) + 1);
5102                 }
5103                 if (remaining_data < nl->next) {
5104                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5105                     break;
5106                 }
5107                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5108                 strcpy(cur_data + nl_size, nl->name);
5109                 cur_data += nl->next;
5110                 remaining_data -= nl->next;
5111                 if (!next) {
5112                     break;
5113                 }
5114                 nl = (void*)nl + next;
5115             }
5116             break;
5117         }
5118         case DM_DEV_WAIT:
5119         case DM_TABLE_STATUS:
5120         {
5121             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5122             void *cur_data = argptr;
5123             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5124             int spec_size = thunk_type_size(dm_arg_type, 0);
5125             int i;
5126 
5127             for (i = 0; i < host_dm->target_count; i++) {
5128                 uint32_t next = spec->next;
5129                 int slen = strlen((char*)&spec[1]) + 1;
5130                 spec->next = (cur_data - argptr) + spec_size + slen;
5131                 if (guest_data_size < spec->next) {
5132                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5133                     break;
5134                 }
5135                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5136                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5137                 cur_data = argptr + spec->next;
5138                 spec = (void*)host_dm + host_dm->data_start + next;
5139             }
5140             break;
5141         }
5142         case DM_TABLE_DEPS:
5143         {
5144             void *hdata = (void*)host_dm + host_dm->data_start;
5145             int count = *(uint32_t*)hdata;
5146             uint64_t *hdev = hdata + 8;
5147             uint64_t *gdev = argptr + 8;
5148             int i;
5149 
5150             *(uint32_t*)argptr = tswap32(count);
5151             for (i = 0; i < count; i++) {
5152                 *gdev = tswap64(*hdev);
5153                 gdev++;
5154                 hdev++;
5155             }
5156             break;
5157         }
5158         case DM_LIST_VERSIONS:
5159         {
5160             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5161             uint32_t remaining_data = guest_data_size;
5162             void *cur_data = argptr;
5163             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5164             int vers_size = thunk_type_size(dm_arg_type, 0);
5165 
5166             while (1) {
5167                 uint32_t next = vers->next;
5168                 if (next) {
5169                     vers->next = vers_size + (strlen(vers->name) + 1);
5170                 }
5171                 if (remaining_data < vers->next) {
5172                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5173                     break;
5174                 }
5175                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5176                 strcpy(cur_data + vers_size, vers->name);
5177                 cur_data += vers->next;
5178                 remaining_data -= vers->next;
5179                 if (!next) {
5180                     break;
5181                 }
5182                 vers = (void*)vers + next;
5183             }
5184             break;
5185         }
5186         default:
5187             unlock_user(argptr, guest_data, 0);
5188             ret = -TARGET_EINVAL;
5189             goto out;
5190         }
5191         unlock_user(argptr, guest_data, guest_data_size);
5192 
5193         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5194         if (!argptr) {
5195             ret = -TARGET_EFAULT;
5196             goto out;
5197         }
5198         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5199         unlock_user(argptr, arg, target_size);
5200     }
5201 out:
5202     g_free(big_buf);
5203     return ret;
5204 }
5205 
do_ioctl_blkpg(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5206 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5207                                int cmd, abi_long arg)
5208 {
5209     void *argptr;
5210     int target_size;
5211     const argtype *arg_type = ie->arg_type;
5212     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5213     abi_long ret;
5214 
5215     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5216     struct blkpg_partition host_part;
5217 
5218     /* Read and convert blkpg */
5219     arg_type++;
5220     target_size = thunk_type_size(arg_type, 0);
5221     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5222     if (!argptr) {
5223         ret = -TARGET_EFAULT;
5224         goto out;
5225     }
5226     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5227     unlock_user(argptr, arg, 0);
5228 
5229     switch (host_blkpg->op) {
5230     case BLKPG_ADD_PARTITION:
5231     case BLKPG_DEL_PARTITION:
5232         /* payload is struct blkpg_partition */
5233         break;
5234     default:
5235         /* Unknown opcode */
5236         ret = -TARGET_EINVAL;
5237         goto out;
5238     }
5239 
5240     /* Read and convert blkpg->data */
5241     arg = (abi_long)(uintptr_t)host_blkpg->data;
5242     target_size = thunk_type_size(part_arg_type, 0);
5243     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5244     if (!argptr) {
5245         ret = -TARGET_EFAULT;
5246         goto out;
5247     }
5248     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5249     unlock_user(argptr, arg, 0);
5250 
5251     /* Swizzle the data pointer to our local copy and call! */
5252     host_blkpg->data = &host_part;
5253     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5254 
5255 out:
5256     return ret;
5257 }
5258 
do_ioctl_rt(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5259 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5260                                 int fd, int cmd, abi_long arg)
5261 {
5262     const argtype *arg_type = ie->arg_type;
5263     const StructEntry *se;
5264     const argtype *field_types;
5265     const int *dst_offsets, *src_offsets;
5266     int target_size;
5267     void *argptr;
5268     abi_ulong *target_rt_dev_ptr = NULL;
5269     unsigned long *host_rt_dev_ptr = NULL;
5270     abi_long ret;
5271     int i;
5272 
5273     assert(ie->access == IOC_W);
5274     assert(*arg_type == TYPE_PTR);
5275     arg_type++;
5276     assert(*arg_type == TYPE_STRUCT);
5277     target_size = thunk_type_size(arg_type, 0);
5278     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5279     if (!argptr) {
5280         return -TARGET_EFAULT;
5281     }
5282     arg_type++;
5283     assert(*arg_type == (int)STRUCT_rtentry);
5284     se = struct_entries + *arg_type++;
5285     assert(se->convert[0] == NULL);
5286     /* convert struct here to be able to catch rt_dev string */
5287     field_types = se->field_types;
5288     dst_offsets = se->field_offsets[THUNK_HOST];
5289     src_offsets = se->field_offsets[THUNK_TARGET];
5290     for (i = 0; i < se->nb_fields; i++) {
5291         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5292             assert(*field_types == TYPE_PTRVOID);
5293             target_rt_dev_ptr = argptr + src_offsets[i];
5294             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5295             if (*target_rt_dev_ptr != 0) {
5296                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5297                                                   tswapal(*target_rt_dev_ptr));
5298                 if (!*host_rt_dev_ptr) {
5299                     unlock_user(argptr, arg, 0);
5300                     return -TARGET_EFAULT;
5301                 }
5302             } else {
5303                 *host_rt_dev_ptr = 0;
5304             }
5305             field_types++;
5306             continue;
5307         }
5308         field_types = thunk_convert(buf_temp + dst_offsets[i],
5309                                     argptr + src_offsets[i],
5310                                     field_types, THUNK_HOST);
5311     }
5312     unlock_user(argptr, arg, 0);
5313 
5314     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5315 
5316     assert(host_rt_dev_ptr != NULL);
5317     assert(target_rt_dev_ptr != NULL);
5318     if (*host_rt_dev_ptr != 0) {
5319         unlock_user((void *)*host_rt_dev_ptr,
5320                     *target_rt_dev_ptr, 0);
5321     }
5322     return ret;
5323 }
5324 
do_ioctl_kdsigaccept(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5325 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5326                                      int fd, int cmd, abi_long arg)
5327 {
5328     int sig = target_to_host_signal(arg);
5329     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5330 }
5331 
do_ioctl_SIOCGSTAMP(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5332 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5333                                     int fd, int cmd, abi_long arg)
5334 {
5335     struct timeval tv;
5336     abi_long ret;
5337 
5338     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5339     if (is_error(ret)) {
5340         return ret;
5341     }
5342 
5343     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5344         if (copy_to_user_timeval(arg, &tv)) {
5345             return -TARGET_EFAULT;
5346         }
5347     } else {
5348         if (copy_to_user_timeval64(arg, &tv)) {
5349             return -TARGET_EFAULT;
5350         }
5351     }
5352 
5353     return ret;
5354 }
5355 
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5356 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5357                                       int fd, int cmd, abi_long arg)
5358 {
5359     struct timespec ts;
5360     abi_long ret;
5361 
5362     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5363     if (is_error(ret)) {
5364         return ret;
5365     }
5366 
5367     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5368         if (host_to_target_timespec(arg, &ts)) {
5369             return -TARGET_EFAULT;
5370         }
5371     } else{
5372         if (host_to_target_timespec64(arg, &ts)) {
5373             return -TARGET_EFAULT;
5374         }
5375     }
5376 
5377     return ret;
5378 }
5379 
5380 #ifdef TIOCGPTPEER
do_ioctl_tiocgptpeer(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5381 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5382                                      int fd, int cmd, abi_long arg)
5383 {
5384     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5385     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5386 }
5387 #endif
5388 
5389 #ifdef HAVE_DRM_H
5390 
unlock_drm_version(struct drm_version * host_ver,struct target_drm_version * target_ver,bool copy)5391 static void unlock_drm_version(struct drm_version *host_ver,
5392                                struct target_drm_version *target_ver,
5393                                bool copy)
5394 {
5395     unlock_user(host_ver->name, target_ver->name,
5396                                 copy ? host_ver->name_len : 0);
5397     unlock_user(host_ver->date, target_ver->date,
5398                                 copy ? host_ver->date_len : 0);
5399     unlock_user(host_ver->desc, target_ver->desc,
5400                                 copy ? host_ver->desc_len : 0);
5401 }
5402 
target_to_host_drmversion(struct drm_version * host_ver,struct target_drm_version * target_ver)5403 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5404                                           struct target_drm_version *target_ver)
5405 {
5406     memset(host_ver, 0, sizeof(*host_ver));
5407 
5408     __get_user(host_ver->name_len, &target_ver->name_len);
5409     if (host_ver->name_len) {
5410         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5411                                    target_ver->name_len, 0);
5412         if (!host_ver->name) {
5413             return -EFAULT;
5414         }
5415     }
5416 
5417     __get_user(host_ver->date_len, &target_ver->date_len);
5418     if (host_ver->date_len) {
5419         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5420                                    target_ver->date_len, 0);
5421         if (!host_ver->date) {
5422             goto err;
5423         }
5424     }
5425 
5426     __get_user(host_ver->desc_len, &target_ver->desc_len);
5427     if (host_ver->desc_len) {
5428         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5429                                    target_ver->desc_len, 0);
5430         if (!host_ver->desc) {
5431             goto err;
5432         }
5433     }
5434 
5435     return 0;
5436 err:
5437     unlock_drm_version(host_ver, target_ver, false);
5438     return -EFAULT;
5439 }
5440 
host_to_target_drmversion(struct target_drm_version * target_ver,struct drm_version * host_ver)5441 static inline void host_to_target_drmversion(
5442                                           struct target_drm_version *target_ver,
5443                                           struct drm_version *host_ver)
5444 {
5445     __put_user(host_ver->version_major, &target_ver->version_major);
5446     __put_user(host_ver->version_minor, &target_ver->version_minor);
5447     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5448     __put_user(host_ver->name_len, &target_ver->name_len);
5449     __put_user(host_ver->date_len, &target_ver->date_len);
5450     __put_user(host_ver->desc_len, &target_ver->desc_len);
5451     unlock_drm_version(host_ver, target_ver, true);
5452 }
5453 
do_ioctl_drm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5454 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5455                              int fd, int cmd, abi_long arg)
5456 {
5457     struct drm_version *ver;
5458     struct target_drm_version *target_ver;
5459     abi_long ret;
5460 
5461     switch (ie->host_cmd) {
5462     case DRM_IOCTL_VERSION:
5463         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5464             return -TARGET_EFAULT;
5465         }
5466         ver = (struct drm_version *)buf_temp;
5467         ret = target_to_host_drmversion(ver, target_ver);
5468         if (!is_error(ret)) {
5469             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5470             if (is_error(ret)) {
5471                 unlock_drm_version(ver, target_ver, false);
5472             } else {
5473                 host_to_target_drmversion(target_ver, ver);
5474             }
5475         }
5476         unlock_user_struct(target_ver, arg, 0);
5477         return ret;
5478     }
5479     return -TARGET_ENOSYS;
5480 }
5481 
do_ioctl_drm_i915_getparam(const IOCTLEntry * ie,struct drm_i915_getparam * gparam,int fd,abi_long arg)5482 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5483                                            struct drm_i915_getparam *gparam,
5484                                            int fd, abi_long arg)
5485 {
5486     abi_long ret;
5487     int value;
5488     struct target_drm_i915_getparam *target_gparam;
5489 
5490     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5491         return -TARGET_EFAULT;
5492     }
5493 
5494     __get_user(gparam->param, &target_gparam->param);
5495     gparam->value = &value;
5496     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5497     put_user_s32(value, target_gparam->value);
5498 
5499     unlock_user_struct(target_gparam, arg, 0);
5500     return ret;
5501 }
5502 
do_ioctl_drm_i915(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5503 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5504                                   int fd, int cmd, abi_long arg)
5505 {
5506     switch (ie->host_cmd) {
5507     case DRM_IOCTL_I915_GETPARAM:
5508         return do_ioctl_drm_i915_getparam(ie,
5509                                           (struct drm_i915_getparam *)buf_temp,
5510                                           fd, arg);
5511     default:
5512         return -TARGET_ENOSYS;
5513     }
5514 }
5515 
5516 #endif
5517 
do_ioctl_TUNSETTXFILTER(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5518 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5519                                         int fd, int cmd, abi_long arg)
5520 {
5521     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5522     struct tun_filter *target_filter;
5523     char *target_addr;
5524 
5525     assert(ie->access == IOC_W);
5526 
5527     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5528     if (!target_filter) {
5529         return -TARGET_EFAULT;
5530     }
5531     filter->flags = tswap16(target_filter->flags);
5532     filter->count = tswap16(target_filter->count);
5533     unlock_user(target_filter, arg, 0);
5534 
5535     if (filter->count) {
5536         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5537             MAX_STRUCT_SIZE) {
5538             return -TARGET_EFAULT;
5539         }
5540 
5541         target_addr = lock_user(VERIFY_READ,
5542                                 arg + offsetof(struct tun_filter, addr),
5543                                 filter->count * ETH_ALEN, 1);
5544         if (!target_addr) {
5545             return -TARGET_EFAULT;
5546         }
5547         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5548         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5549     }
5550 
5551     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5552 }
5553 
5554 IOCTLEntry ioctl_entries[] = {
5555 #define IOCTL(cmd, access, ...) \
5556     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5557 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5558     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5559 #define IOCTL_IGNORE(cmd) \
5560     { TARGET_ ## cmd, 0, #cmd },
5561 #include "ioctls.h"
5562     { 0, 0, },
5563 };
5564 
5565 /* ??? Implement proper locking for ioctls.  */
5566 /* do_ioctl() Must return target values and target errnos. */
do_ioctl(int fd,int cmd,abi_long arg)5567 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5568 {
5569     const IOCTLEntry *ie;
5570     const argtype *arg_type;
5571     abi_long ret;
5572     uint8_t buf_temp[MAX_STRUCT_SIZE];
5573     int target_size;
5574     void *argptr;
5575 
5576     ie = ioctl_entries;
5577     for(;;) {
5578         if (ie->target_cmd == 0) {
5579             qemu_log_mask(
5580                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5581             return -TARGET_ENOTTY;
5582         }
5583         if (ie->target_cmd == cmd)
5584             break;
5585         ie++;
5586     }
5587     arg_type = ie->arg_type;
5588     if (ie->do_ioctl) {
5589         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5590     } else if (!ie->host_cmd) {
5591         /* Some architectures define BSD ioctls in their headers
5592            that are not implemented in Linux.  */
5593         return -TARGET_ENOTTY;
5594     }
5595 
5596     switch(arg_type[0]) {
5597     case TYPE_NULL:
5598         /* no argument */
5599         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5600         break;
5601     case TYPE_PTRVOID:
5602     case TYPE_INT:
5603     case TYPE_LONG:
5604     case TYPE_ULONG:
5605         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5606         break;
5607     case TYPE_PTR:
5608         arg_type++;
5609         target_size = thunk_type_size(arg_type, 0);
5610         switch(ie->access) {
5611         case IOC_R:
5612             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5613             if (!is_error(ret)) {
5614                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5615                 if (!argptr)
5616                     return -TARGET_EFAULT;
5617                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5618                 unlock_user(argptr, arg, target_size);
5619             }
5620             break;
5621         case IOC_W:
5622             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5623             if (!argptr)
5624                 return -TARGET_EFAULT;
5625             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5626             unlock_user(argptr, arg, 0);
5627             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5628             break;
5629         default:
5630         case IOC_RW:
5631             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5632             if (!argptr)
5633                 return -TARGET_EFAULT;
5634             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5635             unlock_user(argptr, arg, 0);
5636             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5637             if (!is_error(ret)) {
5638                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5639                 if (!argptr)
5640                     return -TARGET_EFAULT;
5641                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5642                 unlock_user(argptr, arg, target_size);
5643             }
5644             break;
5645         }
5646         break;
5647     default:
5648         qemu_log_mask(LOG_UNIMP,
5649                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5650                       (long)cmd, arg_type[0]);
5651         ret = -TARGET_ENOTTY;
5652         break;
5653     }
5654     return ret;
5655 }
5656 
5657 static const bitmask_transtbl iflag_tbl[] = {
5658         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5659         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5660         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5661         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5662         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5663         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5664         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5665         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5666         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5667         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5668         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5669         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5670         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5671         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5672         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5673 };
5674 
5675 static const bitmask_transtbl oflag_tbl[] = {
5676 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5677 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5678 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5679 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5680 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5681 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5682 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5683 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5684 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5685 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5686 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5687 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5688 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5689 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5690 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5691 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5692 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5693 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5694 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5695 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5696 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5697 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5698 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5699 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5700 };
5701 
5702 static const bitmask_transtbl cflag_tbl[] = {
5703 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5704 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5705 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5706 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5707 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5708 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5709 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5710 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5711 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5712 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5713 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5714 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5715 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5716 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5717 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5718 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5719 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5720 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5721 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5722 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5723 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5724 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5725 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5726 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5727 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5728 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5729 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5730 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5731 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5732 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5733 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5734 };
5735 
5736 static const bitmask_transtbl lflag_tbl[] = {
5737   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5738   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5739   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5740   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5741   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5742   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5743   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5744   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5745   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5746   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5747   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5748   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5749   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5750   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5751   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5752   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5753 };
5754 
target_to_host_termios(void * dst,const void * src)5755 static void target_to_host_termios (void *dst, const void *src)
5756 {
5757     struct host_termios *host = dst;
5758     const struct target_termios *target = src;
5759 
5760     host->c_iflag =
5761         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5762     host->c_oflag =
5763         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5764     host->c_cflag =
5765         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5766     host->c_lflag =
5767         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5768     host->c_line = target->c_line;
5769 
5770     memset(host->c_cc, 0, sizeof(host->c_cc));
5771     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5772     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5773     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5774     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5775     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5776     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5777     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5778     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5779     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5780     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5781     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5782     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5783     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5784     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5785     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5786     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5787     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5788 }
5789 
host_to_target_termios(void * dst,const void * src)5790 static void host_to_target_termios (void *dst, const void *src)
5791 {
5792     struct target_termios *target = dst;
5793     const struct host_termios *host = src;
5794 
5795     target->c_iflag =
5796         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5797     target->c_oflag =
5798         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5799     target->c_cflag =
5800         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5801     target->c_lflag =
5802         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5803     target->c_line = host->c_line;
5804 
5805     memset(target->c_cc, 0, sizeof(target->c_cc));
5806     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5807     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5808     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5809     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5810     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5811     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5812     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5813     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5814     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5815     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5816     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5817     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5818     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5819     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5820     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5821     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5822     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5823 }
5824 
5825 static const StructEntry struct_termios_def = {
5826     .convert = { host_to_target_termios, target_to_host_termios },
5827     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5828     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5829     .print = print_termios,
5830 };
5831 
5832 /* If the host does not provide these bits, they may be safely discarded. */
5833 #ifndef MAP_SYNC
5834 #define MAP_SYNC 0
5835 #endif
5836 #ifndef MAP_UNINITIALIZED
5837 #define MAP_UNINITIALIZED 0
5838 #endif
5839 
5840 static const bitmask_transtbl mmap_flags_tbl[] = {
5841     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5842     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5843       MAP_ANONYMOUS, MAP_ANONYMOUS },
5844     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5845       MAP_GROWSDOWN, MAP_GROWSDOWN },
5846     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5847       MAP_DENYWRITE, MAP_DENYWRITE },
5848     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5849       MAP_EXECUTABLE, MAP_EXECUTABLE },
5850     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5851     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5852       MAP_NORESERVE, MAP_NORESERVE },
5853     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5854     /* MAP_STACK had been ignored by the kernel for quite some time.
5855        Recognize it for the target insofar as we do not want to pass
5856        it through to the host.  */
5857     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5858     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5859     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5860     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5861       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5862     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5863       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5864 };
5865 
5866 /*
5867  * Arrange for legacy / undefined architecture specific flags to be
5868  * ignored by mmap handling code.
5869  */
5870 #ifndef TARGET_MAP_32BIT
5871 #define TARGET_MAP_32BIT 0
5872 #endif
5873 #ifndef TARGET_MAP_HUGE_2MB
5874 #define TARGET_MAP_HUGE_2MB 0
5875 #endif
5876 #ifndef TARGET_MAP_HUGE_1GB
5877 #define TARGET_MAP_HUGE_1GB 0
5878 #endif
5879 
do_mmap(abi_ulong addr,abi_ulong len,int prot,int target_flags,int fd,off_t offset)5880 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5881                         int target_flags, int fd, off_t offset)
5882 {
5883     /*
5884      * The historical set of flags that all mmap types implicitly support.
5885      */
5886     enum {
5887         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5888                                | TARGET_MAP_PRIVATE
5889                                | TARGET_MAP_FIXED
5890                                | TARGET_MAP_ANONYMOUS
5891                                | TARGET_MAP_DENYWRITE
5892                                | TARGET_MAP_EXECUTABLE
5893                                | TARGET_MAP_UNINITIALIZED
5894                                | TARGET_MAP_GROWSDOWN
5895                                | TARGET_MAP_LOCKED
5896                                | TARGET_MAP_NORESERVE
5897                                | TARGET_MAP_POPULATE
5898                                | TARGET_MAP_NONBLOCK
5899                                | TARGET_MAP_STACK
5900                                | TARGET_MAP_HUGETLB
5901                                | TARGET_MAP_32BIT
5902                                | TARGET_MAP_HUGE_2MB
5903                                | TARGET_MAP_HUGE_1GB
5904     };
5905     int host_flags;
5906 
5907     switch (target_flags & TARGET_MAP_TYPE) {
5908     case TARGET_MAP_PRIVATE:
5909         host_flags = MAP_PRIVATE;
5910         break;
5911     case TARGET_MAP_SHARED:
5912         host_flags = MAP_SHARED;
5913         break;
5914     case TARGET_MAP_SHARED_VALIDATE:
5915         /*
5916          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5917          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5918          */
5919         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5920             return -TARGET_EOPNOTSUPP;
5921         }
5922         host_flags = MAP_SHARED_VALIDATE;
5923         if (target_flags & TARGET_MAP_SYNC) {
5924             host_flags |= MAP_SYNC;
5925         }
5926         break;
5927     default:
5928         return -TARGET_EINVAL;
5929     }
5930     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5931 
5932     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5933 }
5934 
5935 /*
5936  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5937  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5938  */
5939 #if defined(TARGET_I386)
5940 
5941 /* NOTE: there is really one LDT for all the threads */
5942 static uint8_t *ldt_table;
5943 
read_ldt(abi_ulong ptr,unsigned long bytecount)5944 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5945 {
5946     int size;
5947     void *p;
5948 
5949     if (!ldt_table)
5950         return 0;
5951     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5952     if (size > bytecount)
5953         size = bytecount;
5954     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5955     if (!p)
5956         return -TARGET_EFAULT;
5957     /* ??? Should this by byteswapped?  */
5958     memcpy(p, ldt_table, size);
5959     unlock_user(p, ptr, size);
5960     return size;
5961 }
5962 
5963 /* XXX: add locking support */
write_ldt(CPUX86State * env,abi_ulong ptr,unsigned long bytecount,int oldmode)5964 static abi_long write_ldt(CPUX86State *env,
5965                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5966 {
5967     struct target_modify_ldt_ldt_s ldt_info;
5968     struct target_modify_ldt_ldt_s *target_ldt_info;
5969     int seg_32bit, contents, read_exec_only, limit_in_pages;
5970     int seg_not_present, useable, lm;
5971     uint32_t *lp, entry_1, entry_2;
5972 
5973     if (bytecount != sizeof(ldt_info))
5974         return -TARGET_EINVAL;
5975     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5976         return -TARGET_EFAULT;
5977     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5978     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5979     ldt_info.limit = tswap32(target_ldt_info->limit);
5980     ldt_info.flags = tswap32(target_ldt_info->flags);
5981     unlock_user_struct(target_ldt_info, ptr, 0);
5982 
5983     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5984         return -TARGET_EINVAL;
5985     seg_32bit = ldt_info.flags & 1;
5986     contents = (ldt_info.flags >> 1) & 3;
5987     read_exec_only = (ldt_info.flags >> 3) & 1;
5988     limit_in_pages = (ldt_info.flags >> 4) & 1;
5989     seg_not_present = (ldt_info.flags >> 5) & 1;
5990     useable = (ldt_info.flags >> 6) & 1;
5991 #ifdef TARGET_ABI32
5992     lm = 0;
5993 #else
5994     lm = (ldt_info.flags >> 7) & 1;
5995 #endif
5996     if (contents == 3) {
5997         if (oldmode)
5998             return -TARGET_EINVAL;
5999         if (seg_not_present == 0)
6000             return -TARGET_EINVAL;
6001     }
6002     /* allocate the LDT */
6003     if (!ldt_table) {
6004         env->ldt.base = target_mmap(0,
6005                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6006                                     PROT_READ|PROT_WRITE,
6007                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6008         if (env->ldt.base == -1)
6009             return -TARGET_ENOMEM;
6010         memset(g2h_untagged(env->ldt.base), 0,
6011                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6012         env->ldt.limit = 0xffff;
6013         ldt_table = g2h_untagged(env->ldt.base);
6014     }
6015 
6016     /* NOTE: same code as Linux kernel */
6017     /* Allow LDTs to be cleared by the user. */
6018     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6019         if (oldmode ||
6020             (contents == 0		&&
6021              read_exec_only == 1	&&
6022              seg_32bit == 0		&&
6023              limit_in_pages == 0	&&
6024              seg_not_present == 1	&&
6025              useable == 0 )) {
6026             entry_1 = 0;
6027             entry_2 = 0;
6028             goto install;
6029         }
6030     }
6031 
6032     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6033         (ldt_info.limit & 0x0ffff);
6034     entry_2 = (ldt_info.base_addr & 0xff000000) |
6035         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6036         (ldt_info.limit & 0xf0000) |
6037         ((read_exec_only ^ 1) << 9) |
6038         (contents << 10) |
6039         ((seg_not_present ^ 1) << 15) |
6040         (seg_32bit << 22) |
6041         (limit_in_pages << 23) |
6042         (lm << 21) |
6043         0x7000;
6044     if (!oldmode)
6045         entry_2 |= (useable << 20);
6046 
6047     /* Install the new entry ...  */
6048 install:
6049     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6050     lp[0] = tswap32(entry_1);
6051     lp[1] = tswap32(entry_2);
6052     return 0;
6053 }
6054 
6055 /* specific and weird i386 syscalls */
do_modify_ldt(CPUX86State * env,int func,abi_ulong ptr,unsigned long bytecount)6056 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6057                               unsigned long bytecount)
6058 {
6059     abi_long ret;
6060 
6061     switch (func) {
6062     case 0:
6063         ret = read_ldt(ptr, bytecount);
6064         break;
6065     case 1:
6066         ret = write_ldt(env, ptr, bytecount, 1);
6067         break;
6068     case 0x11:
6069         ret = write_ldt(env, ptr, bytecount, 0);
6070         break;
6071     default:
6072         ret = -TARGET_ENOSYS;
6073         break;
6074     }
6075     return ret;
6076 }
6077 
6078 #if defined(TARGET_ABI32)
do_set_thread_area(CPUX86State * env,abi_ulong ptr)6079 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6080 {
6081     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6082     struct target_modify_ldt_ldt_s ldt_info;
6083     struct target_modify_ldt_ldt_s *target_ldt_info;
6084     int seg_32bit, contents, read_exec_only, limit_in_pages;
6085     int seg_not_present, useable, lm;
6086     uint32_t *lp, entry_1, entry_2;
6087     int i;
6088 
6089     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6090     if (!target_ldt_info)
6091         return -TARGET_EFAULT;
6092     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6093     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6094     ldt_info.limit = tswap32(target_ldt_info->limit);
6095     ldt_info.flags = tswap32(target_ldt_info->flags);
6096     if (ldt_info.entry_number == -1) {
6097         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6098             if (gdt_table[i] == 0) {
6099                 ldt_info.entry_number = i;
6100                 target_ldt_info->entry_number = tswap32(i);
6101                 break;
6102             }
6103         }
6104     }
6105     unlock_user_struct(target_ldt_info, ptr, 1);
6106 
6107     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6108         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6109            return -TARGET_EINVAL;
6110     seg_32bit = ldt_info.flags & 1;
6111     contents = (ldt_info.flags >> 1) & 3;
6112     read_exec_only = (ldt_info.flags >> 3) & 1;
6113     limit_in_pages = (ldt_info.flags >> 4) & 1;
6114     seg_not_present = (ldt_info.flags >> 5) & 1;
6115     useable = (ldt_info.flags >> 6) & 1;
6116 #ifdef TARGET_ABI32
6117     lm = 0;
6118 #else
6119     lm = (ldt_info.flags >> 7) & 1;
6120 #endif
6121 
6122     if (contents == 3) {
6123         if (seg_not_present == 0)
6124             return -TARGET_EINVAL;
6125     }
6126 
6127     /* NOTE: same code as Linux kernel */
6128     /* Allow LDTs to be cleared by the user. */
6129     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6130         if ((contents == 0             &&
6131              read_exec_only == 1       &&
6132              seg_32bit == 0            &&
6133              limit_in_pages == 0       &&
6134              seg_not_present == 1      &&
6135              useable == 0 )) {
6136             entry_1 = 0;
6137             entry_2 = 0;
6138             goto install;
6139         }
6140     }
6141 
6142     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6143         (ldt_info.limit & 0x0ffff);
6144     entry_2 = (ldt_info.base_addr & 0xff000000) |
6145         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6146         (ldt_info.limit & 0xf0000) |
6147         ((read_exec_only ^ 1) << 9) |
6148         (contents << 10) |
6149         ((seg_not_present ^ 1) << 15) |
6150         (seg_32bit << 22) |
6151         (limit_in_pages << 23) |
6152         (useable << 20) |
6153         (lm << 21) |
6154         0x7000;
6155 
6156     /* Install the new entry ...  */
6157 install:
6158     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6159     lp[0] = tswap32(entry_1);
6160     lp[1] = tswap32(entry_2);
6161     return 0;
6162 }
6163 
do_get_thread_area(CPUX86State * env,abi_ulong ptr)6164 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6165 {
6166     struct target_modify_ldt_ldt_s *target_ldt_info;
6167     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6168     uint32_t base_addr, limit, flags;
6169     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6170     int seg_not_present, useable, lm;
6171     uint32_t *lp, entry_1, entry_2;
6172 
6173     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6174     if (!target_ldt_info)
6175         return -TARGET_EFAULT;
6176     idx = tswap32(target_ldt_info->entry_number);
6177     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6178         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6179         unlock_user_struct(target_ldt_info, ptr, 1);
6180         return -TARGET_EINVAL;
6181     }
6182     lp = (uint32_t *)(gdt_table + idx);
6183     entry_1 = tswap32(lp[0]);
6184     entry_2 = tswap32(lp[1]);
6185 
6186     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6187     contents = (entry_2 >> 10) & 3;
6188     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6189     seg_32bit = (entry_2 >> 22) & 1;
6190     limit_in_pages = (entry_2 >> 23) & 1;
6191     useable = (entry_2 >> 20) & 1;
6192 #ifdef TARGET_ABI32
6193     lm = 0;
6194 #else
6195     lm = (entry_2 >> 21) & 1;
6196 #endif
6197     flags = (seg_32bit << 0) | (contents << 1) |
6198         (read_exec_only << 3) | (limit_in_pages << 4) |
6199         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6200     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6201     base_addr = (entry_1 >> 16) |
6202         (entry_2 & 0xff000000) |
6203         ((entry_2 & 0xff) << 16);
6204     target_ldt_info->base_addr = tswapal(base_addr);
6205     target_ldt_info->limit = tswap32(limit);
6206     target_ldt_info->flags = tswap32(flags);
6207     unlock_user_struct(target_ldt_info, ptr, 1);
6208     return 0;
6209 }
6210 
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6211 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6212 {
6213     return -TARGET_ENOSYS;
6214 }
6215 #else
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6216 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6217 {
6218     abi_long ret = 0;
6219     abi_ulong val;
6220     int idx;
6221 
6222     switch(code) {
6223     case TARGET_ARCH_SET_GS:
6224     case TARGET_ARCH_SET_FS:
6225         if (code == TARGET_ARCH_SET_GS)
6226             idx = R_GS;
6227         else
6228             idx = R_FS;
6229         cpu_x86_load_seg(env, idx, 0);
6230         env->segs[idx].base = addr;
6231         break;
6232     case TARGET_ARCH_GET_GS:
6233     case TARGET_ARCH_GET_FS:
6234         if (code == TARGET_ARCH_GET_GS)
6235             idx = R_GS;
6236         else
6237             idx = R_FS;
6238         val = env->segs[idx].base;
6239         if (put_user(val, addr, abi_ulong))
6240             ret = -TARGET_EFAULT;
6241         break;
6242     default:
6243         ret = -TARGET_EINVAL;
6244         break;
6245     }
6246     return ret;
6247 }
6248 #endif /* defined(TARGET_ABI32 */
6249 #endif /* defined(TARGET_I386) */
6250 
6251 /*
6252  * These constants are generic.  Supply any that are missing from the host.
6253  */
6254 #ifndef PR_SET_NAME
6255 # define PR_SET_NAME    15
6256 # define PR_GET_NAME    16
6257 #endif
6258 #ifndef PR_SET_FP_MODE
6259 # define PR_SET_FP_MODE 45
6260 # define PR_GET_FP_MODE 46
6261 # define PR_FP_MODE_FR   (1 << 0)
6262 # define PR_FP_MODE_FRE  (1 << 1)
6263 #endif
6264 #ifndef PR_SVE_SET_VL
6265 # define PR_SVE_SET_VL  50
6266 # define PR_SVE_GET_VL  51
6267 # define PR_SVE_VL_LEN_MASK  0xffff
6268 # define PR_SVE_VL_INHERIT   (1 << 17)
6269 #endif
6270 #ifndef PR_PAC_RESET_KEYS
6271 # define PR_PAC_RESET_KEYS  54
6272 # define PR_PAC_APIAKEY   (1 << 0)
6273 # define PR_PAC_APIBKEY   (1 << 1)
6274 # define PR_PAC_APDAKEY   (1 << 2)
6275 # define PR_PAC_APDBKEY   (1 << 3)
6276 # define PR_PAC_APGAKEY   (1 << 4)
6277 #endif
6278 #ifndef PR_SET_TAGGED_ADDR_CTRL
6279 # define PR_SET_TAGGED_ADDR_CTRL 55
6280 # define PR_GET_TAGGED_ADDR_CTRL 56
6281 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6282 #endif
6283 #ifndef PR_MTE_TCF_SHIFT
6284 # define PR_MTE_TCF_SHIFT       1
6285 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6286 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6287 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6288 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6289 # define PR_MTE_TAG_SHIFT       3
6290 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6291 #endif
6292 #ifndef PR_SET_IO_FLUSHER
6293 # define PR_SET_IO_FLUSHER 57
6294 # define PR_GET_IO_FLUSHER 58
6295 #endif
6296 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6297 # define PR_SET_SYSCALL_USER_DISPATCH 59
6298 #endif
6299 #ifndef PR_SME_SET_VL
6300 # define PR_SME_SET_VL  63
6301 # define PR_SME_GET_VL  64
6302 # define PR_SME_VL_LEN_MASK  0xffff
6303 # define PR_SME_VL_INHERIT   (1 << 17)
6304 #endif
6305 
6306 #include "target_prctl.h"
6307 
do_prctl_inval0(CPUArchState * env)6308 static abi_long do_prctl_inval0(CPUArchState *env)
6309 {
6310     return -TARGET_EINVAL;
6311 }
6312 
do_prctl_inval1(CPUArchState * env,abi_long arg2)6313 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6314 {
6315     return -TARGET_EINVAL;
6316 }
6317 
6318 #ifndef do_prctl_get_fp_mode
6319 #define do_prctl_get_fp_mode do_prctl_inval0
6320 #endif
6321 #ifndef do_prctl_set_fp_mode
6322 #define do_prctl_set_fp_mode do_prctl_inval1
6323 #endif
6324 #ifndef do_prctl_sve_get_vl
6325 #define do_prctl_sve_get_vl do_prctl_inval0
6326 #endif
6327 #ifndef do_prctl_sve_set_vl
6328 #define do_prctl_sve_set_vl do_prctl_inval1
6329 #endif
6330 #ifndef do_prctl_reset_keys
6331 #define do_prctl_reset_keys do_prctl_inval1
6332 #endif
6333 #ifndef do_prctl_set_tagged_addr_ctrl
6334 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6335 #endif
6336 #ifndef do_prctl_get_tagged_addr_ctrl
6337 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6338 #endif
6339 #ifndef do_prctl_get_unalign
6340 #define do_prctl_get_unalign do_prctl_inval1
6341 #endif
6342 #ifndef do_prctl_set_unalign
6343 #define do_prctl_set_unalign do_prctl_inval1
6344 #endif
6345 #ifndef do_prctl_sme_get_vl
6346 #define do_prctl_sme_get_vl do_prctl_inval0
6347 #endif
6348 #ifndef do_prctl_sme_set_vl
6349 #define do_prctl_sme_set_vl do_prctl_inval1
6350 #endif
6351 
do_prctl(CPUArchState * env,abi_long option,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)6352 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6353                          abi_long arg3, abi_long arg4, abi_long arg5)
6354 {
6355     abi_long ret;
6356 
6357     switch (option) {
6358     case PR_GET_PDEATHSIG:
6359         {
6360             int deathsig;
6361             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6362                                   arg3, arg4, arg5));
6363             if (!is_error(ret) &&
6364                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6365                 return -TARGET_EFAULT;
6366             }
6367             return ret;
6368         }
6369     case PR_SET_PDEATHSIG:
6370         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6371                                arg3, arg4, arg5));
6372     case PR_GET_NAME:
6373         {
6374             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6375             if (!name) {
6376                 return -TARGET_EFAULT;
6377             }
6378             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6379                                   arg3, arg4, arg5));
6380             unlock_user(name, arg2, 16);
6381             return ret;
6382         }
6383     case PR_SET_NAME:
6384         {
6385             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6386             if (!name) {
6387                 return -TARGET_EFAULT;
6388             }
6389             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6390                                   arg3, arg4, arg5));
6391             unlock_user(name, arg2, 0);
6392             return ret;
6393         }
6394     case PR_GET_FP_MODE:
6395         return do_prctl_get_fp_mode(env);
6396     case PR_SET_FP_MODE:
6397         return do_prctl_set_fp_mode(env, arg2);
6398     case PR_SVE_GET_VL:
6399         return do_prctl_sve_get_vl(env);
6400     case PR_SVE_SET_VL:
6401         return do_prctl_sve_set_vl(env, arg2);
6402     case PR_SME_GET_VL:
6403         return do_prctl_sme_get_vl(env);
6404     case PR_SME_SET_VL:
6405         return do_prctl_sme_set_vl(env, arg2);
6406     case PR_PAC_RESET_KEYS:
6407         if (arg3 || arg4 || arg5) {
6408             return -TARGET_EINVAL;
6409         }
6410         return do_prctl_reset_keys(env, arg2);
6411     case PR_SET_TAGGED_ADDR_CTRL:
6412         if (arg3 || arg4 || arg5) {
6413             return -TARGET_EINVAL;
6414         }
6415         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6416     case PR_GET_TAGGED_ADDR_CTRL:
6417         if (arg2 || arg3 || arg4 || arg5) {
6418             return -TARGET_EINVAL;
6419         }
6420         return do_prctl_get_tagged_addr_ctrl(env);
6421 
6422     case PR_GET_UNALIGN:
6423         return do_prctl_get_unalign(env, arg2);
6424     case PR_SET_UNALIGN:
6425         return do_prctl_set_unalign(env, arg2);
6426 
6427     case PR_CAP_AMBIENT:
6428     case PR_CAPBSET_READ:
6429     case PR_CAPBSET_DROP:
6430     case PR_GET_DUMPABLE:
6431     case PR_SET_DUMPABLE:
6432     case PR_GET_KEEPCAPS:
6433     case PR_SET_KEEPCAPS:
6434     case PR_GET_SECUREBITS:
6435     case PR_SET_SECUREBITS:
6436     case PR_GET_TIMING:
6437     case PR_SET_TIMING:
6438     case PR_GET_TIMERSLACK:
6439     case PR_SET_TIMERSLACK:
6440     case PR_MCE_KILL:
6441     case PR_MCE_KILL_GET:
6442     case PR_GET_NO_NEW_PRIVS:
6443     case PR_SET_NO_NEW_PRIVS:
6444     case PR_GET_IO_FLUSHER:
6445     case PR_SET_IO_FLUSHER:
6446     case PR_SET_CHILD_SUBREAPER:
6447     case PR_GET_SPECULATION_CTRL:
6448     case PR_SET_SPECULATION_CTRL:
6449         /* Some prctl options have no pointer arguments and we can pass on. */
6450         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6451 
6452     case PR_GET_CHILD_SUBREAPER:
6453         {
6454             int val;
6455             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6456                                   arg3, arg4, arg5));
6457             if (!is_error(ret) && put_user_s32(val, arg2)) {
6458                 return -TARGET_EFAULT;
6459             }
6460             return ret;
6461         }
6462 
6463     case PR_GET_TID_ADDRESS:
6464         {
6465             TaskState *ts = env_cpu(env)->opaque;
6466             return put_user_ual(ts->child_tidptr, arg2);
6467         }
6468 
6469     case PR_GET_FPEXC:
6470     case PR_SET_FPEXC:
6471         /* Was used for SPE on PowerPC. */
6472         return -TARGET_EINVAL;
6473 
6474     case PR_GET_ENDIAN:
6475     case PR_SET_ENDIAN:
6476     case PR_GET_FPEMU:
6477     case PR_SET_FPEMU:
6478     case PR_SET_MM:
6479     case PR_GET_SECCOMP:
6480     case PR_SET_SECCOMP:
6481     case PR_SET_SYSCALL_USER_DISPATCH:
6482     case PR_GET_THP_DISABLE:
6483     case PR_SET_THP_DISABLE:
6484     case PR_GET_TSC:
6485     case PR_SET_TSC:
6486         /* Disable to prevent the target disabling stuff we need. */
6487         return -TARGET_EINVAL;
6488 
6489     default:
6490         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6491                       option);
6492         return -TARGET_EINVAL;
6493     }
6494 }
6495 
6496 #define NEW_STACK_SIZE 0x40000
6497 
6498 
6499 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6500 typedef struct {
6501     CPUArchState *env;
6502     pthread_mutex_t mutex;
6503     pthread_cond_t cond;
6504     pthread_t thread;
6505     uint32_t tid;
6506     abi_ulong child_tidptr;
6507     abi_ulong parent_tidptr;
6508     sigset_t sigmask;
6509 } new_thread_info;
6510 
clone_func(void * arg)6511 static void *clone_func(void *arg)
6512 {
6513     new_thread_info *info = arg;
6514     CPUArchState *env;
6515     CPUState *cpu;
6516     TaskState *ts;
6517 
6518     rcu_register_thread();
6519     tcg_register_thread();
6520     env = info->env;
6521     cpu = env_cpu(env);
6522     thread_cpu = cpu;
6523     ts = get_task_state(cpu);
6524     info->tid = sys_gettid();
6525     task_settid(ts);
6526     if (info->child_tidptr)
6527         put_user_u32(info->tid, info->child_tidptr);
6528     if (info->parent_tidptr)
6529         put_user_u32(info->tid, info->parent_tidptr);
6530     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6531     /* Enable signals.  */
6532     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6533     /* Signal to the parent that we're ready.  */
6534     pthread_mutex_lock(&info->mutex);
6535     pthread_cond_broadcast(&info->cond);
6536     pthread_mutex_unlock(&info->mutex);
6537     /* Wait until the parent has finished initializing the tls state.  */
6538     pthread_mutex_lock(&clone_lock);
6539     pthread_mutex_unlock(&clone_lock);
6540     cpu_loop(env);
6541     /* never exits */
6542     return NULL;
6543 }
6544 
6545 /* do_fork() Must return host values and target errnos (unlike most
6546    do_*() functions). */
do_fork(CPUArchState * env,unsigned int flags,abi_ulong newsp,abi_ulong parent_tidptr,target_ulong newtls,abi_ulong child_tidptr)6547 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6548                    abi_ulong parent_tidptr, target_ulong newtls,
6549                    abi_ulong child_tidptr)
6550 {
6551     CPUState *cpu = env_cpu(env);
6552     int ret;
6553     TaskState *ts;
6554     CPUState *new_cpu;
6555     CPUArchState *new_env;
6556     sigset_t sigmask;
6557 
6558     flags &= ~CLONE_IGNORED_FLAGS;
6559 
6560     /* Emulate vfork() with fork() */
6561     if (flags & CLONE_VFORK)
6562         flags &= ~(CLONE_VFORK | CLONE_VM);
6563 
6564     if (flags & CLONE_VM) {
6565         TaskState *parent_ts = get_task_state(cpu);
6566         new_thread_info info;
6567         pthread_attr_t attr;
6568 
6569         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6570             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6571             return -TARGET_EINVAL;
6572         }
6573 
6574         ts = g_new0(TaskState, 1);
6575         init_task_state(ts);
6576 
6577         /* Grab a mutex so that thread setup appears atomic.  */
6578         pthread_mutex_lock(&clone_lock);
6579 
6580         /*
6581          * If this is our first additional thread, we need to ensure we
6582          * generate code for parallel execution and flush old translations.
6583          * Do this now so that the copy gets CF_PARALLEL too.
6584          */
6585         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6586             cpu->tcg_cflags |= CF_PARALLEL;
6587             tb_flush(cpu);
6588         }
6589 
6590         /* we create a new CPU instance. */
6591         new_env = cpu_copy(env);
6592         /* Init regs that differ from the parent.  */
6593         cpu_clone_regs_child(new_env, newsp, flags);
6594         cpu_clone_regs_parent(env, flags);
6595         new_cpu = env_cpu(new_env);
6596         new_cpu->opaque = ts;
6597         ts->bprm = parent_ts->bprm;
6598         ts->info = parent_ts->info;
6599         ts->signal_mask = parent_ts->signal_mask;
6600 
6601         if (flags & CLONE_CHILD_CLEARTID) {
6602             ts->child_tidptr = child_tidptr;
6603         }
6604 
6605         if (flags & CLONE_SETTLS) {
6606             cpu_set_tls (new_env, newtls);
6607         }
6608 
6609         memset(&info, 0, sizeof(info));
6610         pthread_mutex_init(&info.mutex, NULL);
6611         pthread_mutex_lock(&info.mutex);
6612         pthread_cond_init(&info.cond, NULL);
6613         info.env = new_env;
6614         if (flags & CLONE_CHILD_SETTID) {
6615             info.child_tidptr = child_tidptr;
6616         }
6617         if (flags & CLONE_PARENT_SETTID) {
6618             info.parent_tidptr = parent_tidptr;
6619         }
6620 
6621         ret = pthread_attr_init(&attr);
6622         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6623         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6624         /* It is not safe to deliver signals until the child has finished
6625            initializing, so temporarily block all signals.  */
6626         sigfillset(&sigmask);
6627         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6628         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6629 
6630         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6631         /* TODO: Free new CPU state if thread creation failed.  */
6632 
6633         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6634         pthread_attr_destroy(&attr);
6635         if (ret == 0) {
6636             /* Wait for the child to initialize.  */
6637             pthread_cond_wait(&info.cond, &info.mutex);
6638             ret = info.tid;
6639         } else {
6640             ret = -1;
6641         }
6642         pthread_mutex_unlock(&info.mutex);
6643         pthread_cond_destroy(&info.cond);
6644         pthread_mutex_destroy(&info.mutex);
6645         pthread_mutex_unlock(&clone_lock);
6646     } else {
6647         /* if no CLONE_VM, we consider it is a fork */
6648         if (flags & CLONE_INVALID_FORK_FLAGS) {
6649             return -TARGET_EINVAL;
6650         }
6651 
6652         /* We can't support custom termination signals */
6653         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6654             return -TARGET_EINVAL;
6655         }
6656 
6657 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6658         if (flags & CLONE_PIDFD) {
6659             return -TARGET_EINVAL;
6660         }
6661 #endif
6662 
6663         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6664         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6665             return -TARGET_EINVAL;
6666         }
6667 
6668         if (block_signals()) {
6669             return -QEMU_ERESTARTSYS;
6670         }
6671 
6672         fork_start();
6673         ret = fork();
6674         if (ret == 0) {
6675             /* Child Process.  */
6676             cpu_clone_regs_child(env, newsp, flags);
6677             fork_end(ret);
6678             /* There is a race condition here.  The parent process could
6679                theoretically read the TID in the child process before the child
6680                tid is set.  This would require using either ptrace
6681                (not implemented) or having *_tidptr to point at a shared memory
6682                mapping.  We can't repeat the spinlock hack used above because
6683                the child process gets its own copy of the lock.  */
6684             if (flags & CLONE_CHILD_SETTID)
6685                 put_user_u32(sys_gettid(), child_tidptr);
6686             if (flags & CLONE_PARENT_SETTID)
6687                 put_user_u32(sys_gettid(), parent_tidptr);
6688             ts = get_task_state(cpu);
6689             if (flags & CLONE_SETTLS)
6690                 cpu_set_tls (env, newtls);
6691             if (flags & CLONE_CHILD_CLEARTID)
6692                 ts->child_tidptr = child_tidptr;
6693         } else {
6694             cpu_clone_regs_parent(env, flags);
6695             if (flags & CLONE_PIDFD) {
6696                 int pid_fd = 0;
6697 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6698                 int pid_child = ret;
6699                 pid_fd = pidfd_open(pid_child, 0);
6700                 if (pid_fd >= 0) {
6701                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6702                                                | FD_CLOEXEC);
6703                 } else {
6704                         pid_fd = 0;
6705                 }
6706 #endif
6707                 put_user_u32(pid_fd, parent_tidptr);
6708             }
6709             fork_end(ret);
6710         }
6711         g_assert(!cpu_in_exclusive_context(cpu));
6712     }
6713     return ret;
6714 }
6715 
6716 /* warning : doesn't handle linux specific flags... */
target_to_host_fcntl_cmd(int cmd)6717 static int target_to_host_fcntl_cmd(int cmd)
6718 {
6719     int ret;
6720 
6721     switch(cmd) {
6722     case TARGET_F_DUPFD:
6723     case TARGET_F_GETFD:
6724     case TARGET_F_SETFD:
6725     case TARGET_F_GETFL:
6726     case TARGET_F_SETFL:
6727     case TARGET_F_OFD_GETLK:
6728     case TARGET_F_OFD_SETLK:
6729     case TARGET_F_OFD_SETLKW:
6730         ret = cmd;
6731         break;
6732     case TARGET_F_GETLK:
6733         ret = F_GETLK64;
6734         break;
6735     case TARGET_F_SETLK:
6736         ret = F_SETLK64;
6737         break;
6738     case TARGET_F_SETLKW:
6739         ret = F_SETLKW64;
6740         break;
6741     case TARGET_F_GETOWN:
6742         ret = F_GETOWN;
6743         break;
6744     case TARGET_F_SETOWN:
6745         ret = F_SETOWN;
6746         break;
6747     case TARGET_F_GETSIG:
6748         ret = F_GETSIG;
6749         break;
6750     case TARGET_F_SETSIG:
6751         ret = F_SETSIG;
6752         break;
6753 #if TARGET_ABI_BITS == 32
6754     case TARGET_F_GETLK64:
6755         ret = F_GETLK64;
6756         break;
6757     case TARGET_F_SETLK64:
6758         ret = F_SETLK64;
6759         break;
6760     case TARGET_F_SETLKW64:
6761         ret = F_SETLKW64;
6762         break;
6763 #endif
6764     case TARGET_F_SETLEASE:
6765         ret = F_SETLEASE;
6766         break;
6767     case TARGET_F_GETLEASE:
6768         ret = F_GETLEASE;
6769         break;
6770 #ifdef F_DUPFD_CLOEXEC
6771     case TARGET_F_DUPFD_CLOEXEC:
6772         ret = F_DUPFD_CLOEXEC;
6773         break;
6774 #endif
6775     case TARGET_F_NOTIFY:
6776         ret = F_NOTIFY;
6777         break;
6778 #ifdef F_GETOWN_EX
6779     case TARGET_F_GETOWN_EX:
6780         ret = F_GETOWN_EX;
6781         break;
6782 #endif
6783 #ifdef F_SETOWN_EX
6784     case TARGET_F_SETOWN_EX:
6785         ret = F_SETOWN_EX;
6786         break;
6787 #endif
6788 #ifdef F_SETPIPE_SZ
6789     case TARGET_F_SETPIPE_SZ:
6790         ret = F_SETPIPE_SZ;
6791         break;
6792     case TARGET_F_GETPIPE_SZ:
6793         ret = F_GETPIPE_SZ;
6794         break;
6795 #endif
6796 #ifdef F_ADD_SEALS
6797     case TARGET_F_ADD_SEALS:
6798         ret = F_ADD_SEALS;
6799         break;
6800     case TARGET_F_GET_SEALS:
6801         ret = F_GET_SEALS;
6802         break;
6803 #endif
6804     default:
6805         ret = -TARGET_EINVAL;
6806         break;
6807     }
6808 
6809 #if defined(__powerpc64__)
6810     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6811      * is not supported by kernel. The glibc fcntl call actually adjusts
6812      * them to 5, 6 and 7 before making the syscall(). Since we make the
6813      * syscall directly, adjust to what is supported by the kernel.
6814      */
6815     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6816         ret -= F_GETLK64 - 5;
6817     }
6818 #endif
6819 
6820     return ret;
6821 }
6822 
6823 #define FLOCK_TRANSTBL \
6824     switch (type) { \
6825     TRANSTBL_CONVERT(F_RDLCK); \
6826     TRANSTBL_CONVERT(F_WRLCK); \
6827     TRANSTBL_CONVERT(F_UNLCK); \
6828     }
6829 
target_to_host_flock(int type)6830 static int target_to_host_flock(int type)
6831 {
6832 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6833     FLOCK_TRANSTBL
6834 #undef  TRANSTBL_CONVERT
6835     return -TARGET_EINVAL;
6836 }
6837 
host_to_target_flock(int type)6838 static int host_to_target_flock(int type)
6839 {
6840 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6841     FLOCK_TRANSTBL
6842 #undef  TRANSTBL_CONVERT
6843     /* if we don't know how to convert the value coming
6844      * from the host we copy to the target field as-is
6845      */
6846     return type;
6847 }
6848 
copy_from_user_flock(struct flock64 * fl,abi_ulong target_flock_addr)6849 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6850                                             abi_ulong target_flock_addr)
6851 {
6852     struct target_flock *target_fl;
6853     int l_type;
6854 
6855     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6856         return -TARGET_EFAULT;
6857     }
6858 
6859     __get_user(l_type, &target_fl->l_type);
6860     l_type = target_to_host_flock(l_type);
6861     if (l_type < 0) {
6862         return l_type;
6863     }
6864     fl->l_type = l_type;
6865     __get_user(fl->l_whence, &target_fl->l_whence);
6866     __get_user(fl->l_start, &target_fl->l_start);
6867     __get_user(fl->l_len, &target_fl->l_len);
6868     __get_user(fl->l_pid, &target_fl->l_pid);
6869     unlock_user_struct(target_fl, target_flock_addr, 0);
6870     return 0;
6871 }
6872 
copy_to_user_flock(abi_ulong target_flock_addr,const struct flock64 * fl)6873 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6874                                           const struct flock64 *fl)
6875 {
6876     struct target_flock *target_fl;
6877     short l_type;
6878 
6879     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6880         return -TARGET_EFAULT;
6881     }
6882 
6883     l_type = host_to_target_flock(fl->l_type);
6884     __put_user(l_type, &target_fl->l_type);
6885     __put_user(fl->l_whence, &target_fl->l_whence);
6886     __put_user(fl->l_start, &target_fl->l_start);
6887     __put_user(fl->l_len, &target_fl->l_len);
6888     __put_user(fl->l_pid, &target_fl->l_pid);
6889     unlock_user_struct(target_fl, target_flock_addr, 1);
6890     return 0;
6891 }
6892 
6893 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6894 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6895 
6896 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6897 struct target_oabi_flock64 {
6898     abi_short l_type;
6899     abi_short l_whence;
6900     abi_llong l_start;
6901     abi_llong l_len;
6902     abi_int   l_pid;
6903 } QEMU_PACKED;
6904 
copy_from_user_oabi_flock64(struct flock64 * fl,abi_ulong target_flock_addr)6905 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6906                                                    abi_ulong target_flock_addr)
6907 {
6908     struct target_oabi_flock64 *target_fl;
6909     int l_type;
6910 
6911     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6912         return -TARGET_EFAULT;
6913     }
6914 
6915     __get_user(l_type, &target_fl->l_type);
6916     l_type = target_to_host_flock(l_type);
6917     if (l_type < 0) {
6918         return l_type;
6919     }
6920     fl->l_type = l_type;
6921     __get_user(fl->l_whence, &target_fl->l_whence);
6922     __get_user(fl->l_start, &target_fl->l_start);
6923     __get_user(fl->l_len, &target_fl->l_len);
6924     __get_user(fl->l_pid, &target_fl->l_pid);
6925     unlock_user_struct(target_fl, target_flock_addr, 0);
6926     return 0;
6927 }
6928 
copy_to_user_oabi_flock64(abi_ulong target_flock_addr,const struct flock64 * fl)6929 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6930                                                  const struct flock64 *fl)
6931 {
6932     struct target_oabi_flock64 *target_fl;
6933     short l_type;
6934 
6935     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6936         return -TARGET_EFAULT;
6937     }
6938 
6939     l_type = host_to_target_flock(fl->l_type);
6940     __put_user(l_type, &target_fl->l_type);
6941     __put_user(fl->l_whence, &target_fl->l_whence);
6942     __put_user(fl->l_start, &target_fl->l_start);
6943     __put_user(fl->l_len, &target_fl->l_len);
6944     __put_user(fl->l_pid, &target_fl->l_pid);
6945     unlock_user_struct(target_fl, target_flock_addr, 1);
6946     return 0;
6947 }
6948 #endif
6949 
copy_from_user_flock64(struct flock64 * fl,abi_ulong target_flock_addr)6950 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6951                                               abi_ulong target_flock_addr)
6952 {
6953     struct target_flock64 *target_fl;
6954     int l_type;
6955 
6956     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6957         return -TARGET_EFAULT;
6958     }
6959 
6960     __get_user(l_type, &target_fl->l_type);
6961     l_type = target_to_host_flock(l_type);
6962     if (l_type < 0) {
6963         return l_type;
6964     }
6965     fl->l_type = l_type;
6966     __get_user(fl->l_whence, &target_fl->l_whence);
6967     __get_user(fl->l_start, &target_fl->l_start);
6968     __get_user(fl->l_len, &target_fl->l_len);
6969     __get_user(fl->l_pid, &target_fl->l_pid);
6970     unlock_user_struct(target_fl, target_flock_addr, 0);
6971     return 0;
6972 }
6973 
copy_to_user_flock64(abi_ulong target_flock_addr,const struct flock64 * fl)6974 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6975                                             const struct flock64 *fl)
6976 {
6977     struct target_flock64 *target_fl;
6978     short l_type;
6979 
6980     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6981         return -TARGET_EFAULT;
6982     }
6983 
6984     l_type = host_to_target_flock(fl->l_type);
6985     __put_user(l_type, &target_fl->l_type);
6986     __put_user(fl->l_whence, &target_fl->l_whence);
6987     __put_user(fl->l_start, &target_fl->l_start);
6988     __put_user(fl->l_len, &target_fl->l_len);
6989     __put_user(fl->l_pid, &target_fl->l_pid);
6990     unlock_user_struct(target_fl, target_flock_addr, 1);
6991     return 0;
6992 }
6993 
do_fcntl(int fd,int cmd,abi_ulong arg)6994 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6995 {
6996     struct flock64 fl64;
6997 #ifdef F_GETOWN_EX
6998     struct f_owner_ex fox;
6999     struct target_f_owner_ex *target_fox;
7000 #endif
7001     abi_long ret;
7002     int host_cmd = target_to_host_fcntl_cmd(cmd);
7003 
7004     if (host_cmd == -TARGET_EINVAL)
7005 	    return host_cmd;
7006 
7007     switch(cmd) {
7008     case TARGET_F_GETLK:
7009         ret = copy_from_user_flock(&fl64, arg);
7010         if (ret) {
7011             return ret;
7012         }
7013         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7014         if (ret == 0) {
7015             ret = copy_to_user_flock(arg, &fl64);
7016         }
7017         break;
7018 
7019     case TARGET_F_SETLK:
7020     case TARGET_F_SETLKW:
7021         ret = copy_from_user_flock(&fl64, arg);
7022         if (ret) {
7023             return ret;
7024         }
7025         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7026         break;
7027 
7028     case TARGET_F_GETLK64:
7029     case TARGET_F_OFD_GETLK:
7030         ret = copy_from_user_flock64(&fl64, arg);
7031         if (ret) {
7032             return ret;
7033         }
7034         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7035         if (ret == 0) {
7036             ret = copy_to_user_flock64(arg, &fl64);
7037         }
7038         break;
7039     case TARGET_F_SETLK64:
7040     case TARGET_F_SETLKW64:
7041     case TARGET_F_OFD_SETLK:
7042     case TARGET_F_OFD_SETLKW:
7043         ret = copy_from_user_flock64(&fl64, arg);
7044         if (ret) {
7045             return ret;
7046         }
7047         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7048         break;
7049 
7050     case TARGET_F_GETFL:
7051         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7052         if (ret >= 0) {
7053             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7054             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7055             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7056                 ret |= TARGET_O_LARGEFILE;
7057             }
7058         }
7059         break;
7060 
7061     case TARGET_F_SETFL:
7062         ret = get_errno(safe_fcntl(fd, host_cmd,
7063                                    target_to_host_bitmask(arg,
7064                                                           fcntl_flags_tbl)));
7065         break;
7066 
7067 #ifdef F_GETOWN_EX
7068     case TARGET_F_GETOWN_EX:
7069         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7070         if (ret >= 0) {
7071             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7072                 return -TARGET_EFAULT;
7073             target_fox->type = tswap32(fox.type);
7074             target_fox->pid = tswap32(fox.pid);
7075             unlock_user_struct(target_fox, arg, 1);
7076         }
7077         break;
7078 #endif
7079 
7080 #ifdef F_SETOWN_EX
7081     case TARGET_F_SETOWN_EX:
7082         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7083             return -TARGET_EFAULT;
7084         fox.type = tswap32(target_fox->type);
7085         fox.pid = tswap32(target_fox->pid);
7086         unlock_user_struct(target_fox, arg, 0);
7087         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7088         break;
7089 #endif
7090 
7091     case TARGET_F_SETSIG:
7092         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7093         break;
7094 
7095     case TARGET_F_GETSIG:
7096         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7097         break;
7098 
7099     case TARGET_F_SETOWN:
7100     case TARGET_F_GETOWN:
7101     case TARGET_F_SETLEASE:
7102     case TARGET_F_GETLEASE:
7103     case TARGET_F_SETPIPE_SZ:
7104     case TARGET_F_GETPIPE_SZ:
7105     case TARGET_F_ADD_SEALS:
7106     case TARGET_F_GET_SEALS:
7107         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7108         break;
7109 
7110     default:
7111         ret = get_errno(safe_fcntl(fd, cmd, arg));
7112         break;
7113     }
7114     return ret;
7115 }
7116 
7117 #ifdef USE_UID16
7118 
high2lowuid(int uid)7119 static inline int high2lowuid(int uid)
7120 {
7121     if (uid > 65535)
7122         return 65534;
7123     else
7124         return uid;
7125 }
7126 
high2lowgid(int gid)7127 static inline int high2lowgid(int gid)
7128 {
7129     if (gid > 65535)
7130         return 65534;
7131     else
7132         return gid;
7133 }
7134 
low2highuid(int uid)7135 static inline int low2highuid(int uid)
7136 {
7137     if ((int16_t)uid == -1)
7138         return -1;
7139     else
7140         return uid;
7141 }
7142 
low2highgid(int gid)7143 static inline int low2highgid(int gid)
7144 {
7145     if ((int16_t)gid == -1)
7146         return -1;
7147     else
7148         return gid;
7149 }
tswapid(int id)7150 static inline int tswapid(int id)
7151 {
7152     return tswap16(id);
7153 }
7154 
7155 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7156 
7157 #else /* !USE_UID16 */
high2lowuid(int uid)7158 static inline int high2lowuid(int uid)
7159 {
7160     return uid;
7161 }
high2lowgid(int gid)7162 static inline int high2lowgid(int gid)
7163 {
7164     return gid;
7165 }
low2highuid(int uid)7166 static inline int low2highuid(int uid)
7167 {
7168     return uid;
7169 }
low2highgid(int gid)7170 static inline int low2highgid(int gid)
7171 {
7172     return gid;
7173 }
tswapid(int id)7174 static inline int tswapid(int id)
7175 {
7176     return tswap32(id);
7177 }
7178 
7179 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7180 
7181 #endif /* USE_UID16 */
7182 
7183 /* We must do direct syscalls for setting UID/GID, because we want to
7184  * implement the Linux system call semantics of "change only for this thread",
7185  * not the libc/POSIX semantics of "change for all threads in process".
7186  * (See http://ewontfix.com/17/ for more details.)
7187  * We use the 32-bit version of the syscalls if present; if it is not
7188  * then either the host architecture supports 32-bit UIDs natively with
7189  * the standard syscall, or the 16-bit UID is the best we can do.
7190  */
7191 #ifdef __NR_setuid32
7192 #define __NR_sys_setuid __NR_setuid32
7193 #else
7194 #define __NR_sys_setuid __NR_setuid
7195 #endif
7196 #ifdef __NR_setgid32
7197 #define __NR_sys_setgid __NR_setgid32
7198 #else
7199 #define __NR_sys_setgid __NR_setgid
7200 #endif
7201 #ifdef __NR_setresuid32
7202 #define __NR_sys_setresuid __NR_setresuid32
7203 #else
7204 #define __NR_sys_setresuid __NR_setresuid
7205 #endif
7206 #ifdef __NR_setresgid32
7207 #define __NR_sys_setresgid __NR_setresgid32
7208 #else
7209 #define __NR_sys_setresgid __NR_setresgid
7210 #endif
7211 
_syscall1(int,sys_setuid,uid_t,uid)7212 _syscall1(int, sys_setuid, uid_t, uid)
7213 _syscall1(int, sys_setgid, gid_t, gid)
7214 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7215 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7216 
7217 void syscall_init(void)
7218 {
7219     IOCTLEntry *ie;
7220     const argtype *arg_type;
7221     int size;
7222 
7223     thunk_init(STRUCT_MAX);
7224 
7225 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7226 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7227 #include "syscall_types.h"
7228 #undef STRUCT
7229 #undef STRUCT_SPECIAL
7230 
7231     /* we patch the ioctl size if necessary. We rely on the fact that
7232        no ioctl has all the bits at '1' in the size field */
7233     ie = ioctl_entries;
7234     while (ie->target_cmd != 0) {
7235         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7236             TARGET_IOC_SIZEMASK) {
7237             arg_type = ie->arg_type;
7238             if (arg_type[0] != TYPE_PTR) {
7239                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7240                         ie->target_cmd);
7241                 exit(1);
7242             }
7243             arg_type++;
7244             size = thunk_type_size(arg_type, 0);
7245             ie->target_cmd = (ie->target_cmd &
7246                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7247                 (size << TARGET_IOC_SIZESHIFT);
7248         }
7249 
7250         /* automatic consistency check if same arch */
7251 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7252     (defined(__x86_64__) && defined(TARGET_X86_64))
7253         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7254             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7255                     ie->name, ie->target_cmd, ie->host_cmd);
7256         }
7257 #endif
7258         ie++;
7259     }
7260 }
7261 
7262 #ifdef TARGET_NR_truncate64
target_truncate64(CPUArchState * cpu_env,const char * arg1,abi_long arg2,abi_long arg3,abi_long arg4)7263 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7264                                          abi_long arg2,
7265                                          abi_long arg3,
7266                                          abi_long arg4)
7267 {
7268     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7269         arg2 = arg3;
7270         arg3 = arg4;
7271     }
7272     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7273 }
7274 #endif
7275 
7276 #ifdef TARGET_NR_ftruncate64
target_ftruncate64(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4)7277 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7278                                           abi_long arg2,
7279                                           abi_long arg3,
7280                                           abi_long arg4)
7281 {
7282     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7283         arg2 = arg3;
7284         arg3 = arg4;
7285     }
7286     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7287 }
7288 #endif
7289 
7290 #if defined(TARGET_NR_timer_settime) || \
7291     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec(struct itimerspec * host_its,abi_ulong target_addr)7292 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7293                                                  abi_ulong target_addr)
7294 {
7295     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7296                                 offsetof(struct target_itimerspec,
7297                                          it_interval)) ||
7298         target_to_host_timespec(&host_its->it_value, target_addr +
7299                                 offsetof(struct target_itimerspec,
7300                                          it_value))) {
7301         return -TARGET_EFAULT;
7302     }
7303 
7304     return 0;
7305 }
7306 #endif
7307 
7308 #if defined(TARGET_NR_timer_settime64) || \
7309     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec64(struct itimerspec * host_its,abi_ulong target_addr)7310 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7311                                                    abi_ulong target_addr)
7312 {
7313     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7314                                   offsetof(struct target__kernel_itimerspec,
7315                                            it_interval)) ||
7316         target_to_host_timespec64(&host_its->it_value, target_addr +
7317                                   offsetof(struct target__kernel_itimerspec,
7318                                            it_value))) {
7319         return -TARGET_EFAULT;
7320     }
7321 
7322     return 0;
7323 }
7324 #endif
7325 
7326 #if ((defined(TARGET_NR_timerfd_gettime) || \
7327       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7328       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
host_to_target_itimerspec(abi_ulong target_addr,struct itimerspec * host_its)7329 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7330                                                  struct itimerspec *host_its)
7331 {
7332     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7333                                                        it_interval),
7334                                 &host_its->it_interval) ||
7335         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7336                                                        it_value),
7337                                 &host_its->it_value)) {
7338         return -TARGET_EFAULT;
7339     }
7340     return 0;
7341 }
7342 #endif
7343 
7344 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7345       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7346       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
host_to_target_itimerspec64(abi_ulong target_addr,struct itimerspec * host_its)7347 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7348                                                    struct itimerspec *host_its)
7349 {
7350     if (host_to_target_timespec64(target_addr +
7351                                   offsetof(struct target__kernel_itimerspec,
7352                                            it_interval),
7353                                   &host_its->it_interval) ||
7354         host_to_target_timespec64(target_addr +
7355                                   offsetof(struct target__kernel_itimerspec,
7356                                            it_value),
7357                                   &host_its->it_value)) {
7358         return -TARGET_EFAULT;
7359     }
7360     return 0;
7361 }
7362 #endif
7363 
7364 #if defined(TARGET_NR_adjtimex) || \
7365     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
target_to_host_timex(struct timex * host_tx,abi_long target_addr)7366 static inline abi_long target_to_host_timex(struct timex *host_tx,
7367                                             abi_long target_addr)
7368 {
7369     struct target_timex *target_tx;
7370 
7371     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7372         return -TARGET_EFAULT;
7373     }
7374 
7375     __get_user(host_tx->modes, &target_tx->modes);
7376     __get_user(host_tx->offset, &target_tx->offset);
7377     __get_user(host_tx->freq, &target_tx->freq);
7378     __get_user(host_tx->maxerror, &target_tx->maxerror);
7379     __get_user(host_tx->esterror, &target_tx->esterror);
7380     __get_user(host_tx->status, &target_tx->status);
7381     __get_user(host_tx->constant, &target_tx->constant);
7382     __get_user(host_tx->precision, &target_tx->precision);
7383     __get_user(host_tx->tolerance, &target_tx->tolerance);
7384     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7385     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7386     __get_user(host_tx->tick, &target_tx->tick);
7387     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7388     __get_user(host_tx->jitter, &target_tx->jitter);
7389     __get_user(host_tx->shift, &target_tx->shift);
7390     __get_user(host_tx->stabil, &target_tx->stabil);
7391     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7392     __get_user(host_tx->calcnt, &target_tx->calcnt);
7393     __get_user(host_tx->errcnt, &target_tx->errcnt);
7394     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7395     __get_user(host_tx->tai, &target_tx->tai);
7396 
7397     unlock_user_struct(target_tx, target_addr, 0);
7398     return 0;
7399 }
7400 
host_to_target_timex(abi_long target_addr,struct timex * host_tx)7401 static inline abi_long host_to_target_timex(abi_long target_addr,
7402                                             struct timex *host_tx)
7403 {
7404     struct target_timex *target_tx;
7405 
7406     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7407         return -TARGET_EFAULT;
7408     }
7409 
7410     __put_user(host_tx->modes, &target_tx->modes);
7411     __put_user(host_tx->offset, &target_tx->offset);
7412     __put_user(host_tx->freq, &target_tx->freq);
7413     __put_user(host_tx->maxerror, &target_tx->maxerror);
7414     __put_user(host_tx->esterror, &target_tx->esterror);
7415     __put_user(host_tx->status, &target_tx->status);
7416     __put_user(host_tx->constant, &target_tx->constant);
7417     __put_user(host_tx->precision, &target_tx->precision);
7418     __put_user(host_tx->tolerance, &target_tx->tolerance);
7419     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7420     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7421     __put_user(host_tx->tick, &target_tx->tick);
7422     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7423     __put_user(host_tx->jitter, &target_tx->jitter);
7424     __put_user(host_tx->shift, &target_tx->shift);
7425     __put_user(host_tx->stabil, &target_tx->stabil);
7426     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7427     __put_user(host_tx->calcnt, &target_tx->calcnt);
7428     __put_user(host_tx->errcnt, &target_tx->errcnt);
7429     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7430     __put_user(host_tx->tai, &target_tx->tai);
7431 
7432     unlock_user_struct(target_tx, target_addr, 1);
7433     return 0;
7434 }
7435 #endif
7436 
7437 
7438 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
target_to_host_timex64(struct timex * host_tx,abi_long target_addr)7439 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7440                                               abi_long target_addr)
7441 {
7442     struct target__kernel_timex *target_tx;
7443 
7444     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7445                                  offsetof(struct target__kernel_timex,
7446                                           time))) {
7447         return -TARGET_EFAULT;
7448     }
7449 
7450     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7451         return -TARGET_EFAULT;
7452     }
7453 
7454     __get_user(host_tx->modes, &target_tx->modes);
7455     __get_user(host_tx->offset, &target_tx->offset);
7456     __get_user(host_tx->freq, &target_tx->freq);
7457     __get_user(host_tx->maxerror, &target_tx->maxerror);
7458     __get_user(host_tx->esterror, &target_tx->esterror);
7459     __get_user(host_tx->status, &target_tx->status);
7460     __get_user(host_tx->constant, &target_tx->constant);
7461     __get_user(host_tx->precision, &target_tx->precision);
7462     __get_user(host_tx->tolerance, &target_tx->tolerance);
7463     __get_user(host_tx->tick, &target_tx->tick);
7464     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7465     __get_user(host_tx->jitter, &target_tx->jitter);
7466     __get_user(host_tx->shift, &target_tx->shift);
7467     __get_user(host_tx->stabil, &target_tx->stabil);
7468     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7469     __get_user(host_tx->calcnt, &target_tx->calcnt);
7470     __get_user(host_tx->errcnt, &target_tx->errcnt);
7471     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7472     __get_user(host_tx->tai, &target_tx->tai);
7473 
7474     unlock_user_struct(target_tx, target_addr, 0);
7475     return 0;
7476 }
7477 
host_to_target_timex64(abi_long target_addr,struct timex * host_tx)7478 static inline abi_long host_to_target_timex64(abi_long target_addr,
7479                                               struct timex *host_tx)
7480 {
7481     struct target__kernel_timex *target_tx;
7482 
7483    if (copy_to_user_timeval64(target_addr +
7484                               offsetof(struct target__kernel_timex, time),
7485                               &host_tx->time)) {
7486         return -TARGET_EFAULT;
7487     }
7488 
7489     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7490         return -TARGET_EFAULT;
7491     }
7492 
7493     __put_user(host_tx->modes, &target_tx->modes);
7494     __put_user(host_tx->offset, &target_tx->offset);
7495     __put_user(host_tx->freq, &target_tx->freq);
7496     __put_user(host_tx->maxerror, &target_tx->maxerror);
7497     __put_user(host_tx->esterror, &target_tx->esterror);
7498     __put_user(host_tx->status, &target_tx->status);
7499     __put_user(host_tx->constant, &target_tx->constant);
7500     __put_user(host_tx->precision, &target_tx->precision);
7501     __put_user(host_tx->tolerance, &target_tx->tolerance);
7502     __put_user(host_tx->tick, &target_tx->tick);
7503     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7504     __put_user(host_tx->jitter, &target_tx->jitter);
7505     __put_user(host_tx->shift, &target_tx->shift);
7506     __put_user(host_tx->stabil, &target_tx->stabil);
7507     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7508     __put_user(host_tx->calcnt, &target_tx->calcnt);
7509     __put_user(host_tx->errcnt, &target_tx->errcnt);
7510     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7511     __put_user(host_tx->tai, &target_tx->tai);
7512 
7513     unlock_user_struct(target_tx, target_addr, 1);
7514     return 0;
7515 }
7516 #endif
7517 
7518 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7519 #define sigev_notify_thread_id _sigev_un._tid
7520 #endif
7521 
target_to_host_sigevent(struct sigevent * host_sevp,abi_ulong target_addr)7522 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7523                                                abi_ulong target_addr)
7524 {
7525     struct target_sigevent *target_sevp;
7526 
7527     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7528         return -TARGET_EFAULT;
7529     }
7530 
7531     /* This union is awkward on 64 bit systems because it has a 32 bit
7532      * integer and a pointer in it; we follow the conversion approach
7533      * used for handling sigval types in signal.c so the guest should get
7534      * the correct value back even if we did a 64 bit byteswap and it's
7535      * using the 32 bit integer.
7536      */
7537     host_sevp->sigev_value.sival_ptr =
7538         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7539     host_sevp->sigev_signo =
7540         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7541     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7542     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7543 
7544     unlock_user_struct(target_sevp, target_addr, 1);
7545     return 0;
7546 }
7547 
7548 #if defined(TARGET_NR_mlockall)
target_to_host_mlockall_arg(int arg)7549 static inline int target_to_host_mlockall_arg(int arg)
7550 {
7551     int result = 0;
7552 
7553     if (arg & TARGET_MCL_CURRENT) {
7554         result |= MCL_CURRENT;
7555     }
7556     if (arg & TARGET_MCL_FUTURE) {
7557         result |= MCL_FUTURE;
7558     }
7559 #ifdef MCL_ONFAULT
7560     if (arg & TARGET_MCL_ONFAULT) {
7561         result |= MCL_ONFAULT;
7562     }
7563 #endif
7564 
7565     return result;
7566 }
7567 #endif
7568 
target_to_host_msync_arg(abi_long arg)7569 static inline int target_to_host_msync_arg(abi_long arg)
7570 {
7571     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7572            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7573            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7574            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7575 }
7576 
7577 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7578      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7579      defined(TARGET_NR_newfstatat))
host_to_target_stat64(CPUArchState * cpu_env,abi_ulong target_addr,struct stat * host_st)7580 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7581                                              abi_ulong target_addr,
7582                                              struct stat *host_st)
7583 {
7584 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7585     if (cpu_env->eabi) {
7586         struct target_eabi_stat64 *target_st;
7587 
7588         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7589             return -TARGET_EFAULT;
7590         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7591         __put_user(host_st->st_dev, &target_st->st_dev);
7592         __put_user(host_st->st_ino, &target_st->st_ino);
7593 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7594         __put_user(host_st->st_ino, &target_st->__st_ino);
7595 #endif
7596         __put_user(host_st->st_mode, &target_st->st_mode);
7597         __put_user(host_st->st_nlink, &target_st->st_nlink);
7598         __put_user(host_st->st_uid, &target_st->st_uid);
7599         __put_user(host_st->st_gid, &target_st->st_gid);
7600         __put_user(host_st->st_rdev, &target_st->st_rdev);
7601         __put_user(host_st->st_size, &target_st->st_size);
7602         __put_user(host_st->st_blksize, &target_st->st_blksize);
7603         __put_user(host_st->st_blocks, &target_st->st_blocks);
7604         __put_user(host_st->st_atime, &target_st->target_st_atime);
7605         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7606         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7607 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7608         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7609         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7610         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7611 #endif
7612         unlock_user_struct(target_st, target_addr, 1);
7613     } else
7614 #endif
7615     {
7616 #if defined(TARGET_HAS_STRUCT_STAT64)
7617         struct target_stat64 *target_st;
7618 #else
7619         struct target_stat *target_st;
7620 #endif
7621 
7622         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7623             return -TARGET_EFAULT;
7624         memset(target_st, 0, sizeof(*target_st));
7625         __put_user(host_st->st_dev, &target_st->st_dev);
7626         __put_user(host_st->st_ino, &target_st->st_ino);
7627 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7628         __put_user(host_st->st_ino, &target_st->__st_ino);
7629 #endif
7630         __put_user(host_st->st_mode, &target_st->st_mode);
7631         __put_user(host_st->st_nlink, &target_st->st_nlink);
7632         __put_user(host_st->st_uid, &target_st->st_uid);
7633         __put_user(host_st->st_gid, &target_st->st_gid);
7634         __put_user(host_st->st_rdev, &target_st->st_rdev);
7635         /* XXX: better use of kernel struct */
7636         __put_user(host_st->st_size, &target_st->st_size);
7637         __put_user(host_st->st_blksize, &target_st->st_blksize);
7638         __put_user(host_st->st_blocks, &target_st->st_blocks);
7639         __put_user(host_st->st_atime, &target_st->target_st_atime);
7640         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7641         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7642 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7643         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7644         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7645         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7646 #endif
7647         unlock_user_struct(target_st, target_addr, 1);
7648     }
7649 
7650     return 0;
7651 }
7652 #endif
7653 
7654 #if defined(TARGET_NR_statx) && defined(__NR_statx)
host_to_target_statx(struct target_statx * host_stx,abi_ulong target_addr)7655 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7656                                             abi_ulong target_addr)
7657 {
7658     struct target_statx *target_stx;
7659 
7660     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7661         return -TARGET_EFAULT;
7662     }
7663     memset(target_stx, 0, sizeof(*target_stx));
7664 
7665     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7666     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7667     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7668     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7669     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7670     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7671     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7672     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7673     __put_user(host_stx->stx_size, &target_stx->stx_size);
7674     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7675     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7676     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7677     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7678     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7679     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7680     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7681     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7682     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7683     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7684     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7685     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7686     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7687     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7688 
7689     unlock_user_struct(target_stx, target_addr, 1);
7690 
7691     return 0;
7692 }
7693 #endif
7694 
do_sys_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7695 static int do_sys_futex(int *uaddr, int op, int val,
7696                          const struct timespec *timeout, int *uaddr2,
7697                          int val3)
7698 {
7699 #if HOST_LONG_BITS == 64
7700 #if defined(__NR_futex)
7701     /* always a 64-bit time_t, it doesn't define _time64 version  */
7702     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7703 
7704 #endif
7705 #else /* HOST_LONG_BITS == 64 */
7706 #if defined(__NR_futex_time64)
7707     if (sizeof(timeout->tv_sec) == 8) {
7708         /* _time64 function on 32bit arch */
7709         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7710     }
7711 #endif
7712 #if defined(__NR_futex)
7713     /* old function on 32bit arch */
7714     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7715 #endif
7716 #endif /* HOST_LONG_BITS == 64 */
7717     g_assert_not_reached();
7718 }
7719 
do_safe_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7720 static int do_safe_futex(int *uaddr, int op, int val,
7721                          const struct timespec *timeout, int *uaddr2,
7722                          int val3)
7723 {
7724 #if HOST_LONG_BITS == 64
7725 #if defined(__NR_futex)
7726     /* always a 64-bit time_t, it doesn't define _time64 version  */
7727     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7728 #endif
7729 #else /* HOST_LONG_BITS == 64 */
7730 #if defined(__NR_futex_time64)
7731     if (sizeof(timeout->tv_sec) == 8) {
7732         /* _time64 function on 32bit arch */
7733         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7734                                            val3));
7735     }
7736 #endif
7737 #if defined(__NR_futex)
7738     /* old function on 32bit arch */
7739     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7740 #endif
7741 #endif /* HOST_LONG_BITS == 64 */
7742     return -TARGET_ENOSYS;
7743 }
7744 
7745 /* ??? Using host futex calls even when target atomic operations
7746    are not really atomic probably breaks things.  However implementing
7747    futexes locally would make futexes shared between multiple processes
7748    tricky.  However they're probably useless because guest atomic
7749    operations won't work either.  */
7750 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
do_futex(CPUState * cpu,bool time64,target_ulong uaddr,int op,int val,target_ulong timeout,target_ulong uaddr2,int val3)7751 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7752                     int op, int val, target_ulong timeout,
7753                     target_ulong uaddr2, int val3)
7754 {
7755     struct timespec ts, *pts = NULL;
7756     void *haddr2 = NULL;
7757     int base_op;
7758 
7759     /* We assume FUTEX_* constants are the same on both host and target. */
7760 #ifdef FUTEX_CMD_MASK
7761     base_op = op & FUTEX_CMD_MASK;
7762 #else
7763     base_op = op;
7764 #endif
7765     switch (base_op) {
7766     case FUTEX_WAIT:
7767     case FUTEX_WAIT_BITSET:
7768         val = tswap32(val);
7769         break;
7770     case FUTEX_WAIT_REQUEUE_PI:
7771         val = tswap32(val);
7772         haddr2 = g2h(cpu, uaddr2);
7773         break;
7774     case FUTEX_LOCK_PI:
7775     case FUTEX_LOCK_PI2:
7776         break;
7777     case FUTEX_WAKE:
7778     case FUTEX_WAKE_BITSET:
7779     case FUTEX_TRYLOCK_PI:
7780     case FUTEX_UNLOCK_PI:
7781         timeout = 0;
7782         break;
7783     case FUTEX_FD:
7784         val = target_to_host_signal(val);
7785         timeout = 0;
7786         break;
7787     case FUTEX_CMP_REQUEUE:
7788     case FUTEX_CMP_REQUEUE_PI:
7789         val3 = tswap32(val3);
7790         /* fall through */
7791     case FUTEX_REQUEUE:
7792     case FUTEX_WAKE_OP:
7793         /*
7794          * For these, the 4th argument is not TIMEOUT, but VAL2.
7795          * But the prototype of do_safe_futex takes a pointer, so
7796          * insert casts to satisfy the compiler.  We do not need
7797          * to tswap VAL2 since it's not compared to guest memory.
7798           */
7799         pts = (struct timespec *)(uintptr_t)timeout;
7800         timeout = 0;
7801         haddr2 = g2h(cpu, uaddr2);
7802         break;
7803     default:
7804         return -TARGET_ENOSYS;
7805     }
7806     if (timeout) {
7807         pts = &ts;
7808         if (time64
7809             ? target_to_host_timespec64(pts, timeout)
7810             : target_to_host_timespec(pts, timeout)) {
7811             return -TARGET_EFAULT;
7812         }
7813     }
7814     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7815 }
7816 #endif
7817 
7818 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_name_to_handle_at(abi_long dirfd,abi_long pathname,abi_long handle,abi_long mount_id,abi_long flags)7819 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7820                                      abi_long handle, abi_long mount_id,
7821                                      abi_long flags)
7822 {
7823     struct file_handle *target_fh;
7824     struct file_handle *fh;
7825     int mid = 0;
7826     abi_long ret;
7827     char *name;
7828     unsigned int size, total_size;
7829 
7830     if (get_user_s32(size, handle)) {
7831         return -TARGET_EFAULT;
7832     }
7833 
7834     name = lock_user_string(pathname);
7835     if (!name) {
7836         return -TARGET_EFAULT;
7837     }
7838 
7839     total_size = sizeof(struct file_handle) + size;
7840     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7841     if (!target_fh) {
7842         unlock_user(name, pathname, 0);
7843         return -TARGET_EFAULT;
7844     }
7845 
7846     fh = g_malloc0(total_size);
7847     fh->handle_bytes = size;
7848 
7849     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7850     unlock_user(name, pathname, 0);
7851 
7852     /* man name_to_handle_at(2):
7853      * Other than the use of the handle_bytes field, the caller should treat
7854      * the file_handle structure as an opaque data type
7855      */
7856 
7857     memcpy(target_fh, fh, total_size);
7858     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7859     target_fh->handle_type = tswap32(fh->handle_type);
7860     g_free(fh);
7861     unlock_user(target_fh, handle, total_size);
7862 
7863     if (put_user_s32(mid, mount_id)) {
7864         return -TARGET_EFAULT;
7865     }
7866 
7867     return ret;
7868 
7869 }
7870 #endif
7871 
7872 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_open_by_handle_at(abi_long mount_fd,abi_long handle,abi_long flags)7873 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7874                                      abi_long flags)
7875 {
7876     struct file_handle *target_fh;
7877     struct file_handle *fh;
7878     unsigned int size, total_size;
7879     abi_long ret;
7880 
7881     if (get_user_s32(size, handle)) {
7882         return -TARGET_EFAULT;
7883     }
7884 
7885     total_size = sizeof(struct file_handle) + size;
7886     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7887     if (!target_fh) {
7888         return -TARGET_EFAULT;
7889     }
7890 
7891     fh = g_memdup(target_fh, total_size);
7892     fh->handle_bytes = size;
7893     fh->handle_type = tswap32(target_fh->handle_type);
7894 
7895     ret = get_errno(open_by_handle_at(mount_fd, fh,
7896                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7897 
7898     g_free(fh);
7899 
7900     unlock_user(target_fh, handle, total_size);
7901 
7902     return ret;
7903 }
7904 #endif
7905 
7906 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7907 
do_signalfd4(int fd,abi_long mask,int flags)7908 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7909 {
7910     int host_flags;
7911     target_sigset_t *target_mask;
7912     sigset_t host_mask;
7913     abi_long ret;
7914 
7915     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7916         return -TARGET_EINVAL;
7917     }
7918     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7919         return -TARGET_EFAULT;
7920     }
7921 
7922     target_to_host_sigset(&host_mask, target_mask);
7923 
7924     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7925 
7926     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7927     if (ret >= 0) {
7928         fd_trans_register(ret, &target_signalfd_trans);
7929     }
7930 
7931     unlock_user_struct(target_mask, mask, 0);
7932 
7933     return ret;
7934 }
7935 #endif
7936 
7937 /* Map host to target signal numbers for the wait family of syscalls.
7938    Assume all other status bits are the same.  */
host_to_target_waitstatus(int status)7939 int host_to_target_waitstatus(int status)
7940 {
7941     if (WIFSIGNALED(status)) {
7942         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7943     }
7944     if (WIFSTOPPED(status)) {
7945         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7946                | (status & 0xff);
7947     }
7948     return status;
7949 }
7950 
open_self_cmdline(CPUArchState * cpu_env,int fd)7951 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7952 {
7953     CPUState *cpu = env_cpu(cpu_env);
7954     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
7955     int i;
7956 
7957     for (i = 0; i < bprm->argc; i++) {
7958         size_t len = strlen(bprm->argv[i]) + 1;
7959 
7960         if (write(fd, bprm->argv[i], len) != len) {
7961             return -1;
7962         }
7963     }
7964 
7965     return 0;
7966 }
7967 
7968 struct open_self_maps_data {
7969     TaskState *ts;
7970     IntervalTreeRoot *host_maps;
7971     int fd;
7972     bool smaps;
7973 };
7974 
7975 /*
7976  * Subroutine to output one line of /proc/self/maps,
7977  * or one region of /proc/self/smaps.
7978  */
7979 
7980 #ifdef TARGET_HPPA
7981 # define test_stack(S, E, L)  (E == L)
7982 #else
7983 # define test_stack(S, E, L)  (S == L)
7984 #endif
7985 
open_self_maps_4(const struct open_self_maps_data * d,const MapInfo * mi,abi_ptr start,abi_ptr end,unsigned flags)7986 static void open_self_maps_4(const struct open_self_maps_data *d,
7987                              const MapInfo *mi, abi_ptr start,
7988                              abi_ptr end, unsigned flags)
7989 {
7990     const struct image_info *info = d->ts->info;
7991     const char *path = mi->path;
7992     uint64_t offset;
7993     int fd = d->fd;
7994     int count;
7995 
7996     if (test_stack(start, end, info->stack_limit)) {
7997         path = "[stack]";
7998     } else if (start == info->brk) {
7999         path = "[heap]";
8000     } else if (start == info->vdso) {
8001         path = "[vdso]";
8002 #ifdef TARGET_X86_64
8003     } else if (start == TARGET_VSYSCALL_PAGE) {
8004         path = "[vsyscall]";
8005 #endif
8006     }
8007 
8008     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8009     offset = mi->offset;
8010     if (mi->dev) {
8011         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8012         offset += hstart - mi->itree.start;
8013     }
8014 
8015     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8016                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8017                     start, end,
8018                     (flags & PAGE_READ) ? 'r' : '-',
8019                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8020                     (flags & PAGE_EXEC) ? 'x' : '-',
8021                     mi->is_priv ? 'p' : 's',
8022                     offset, major(mi->dev), minor(mi->dev),
8023                     (uint64_t)mi->inode);
8024     if (path) {
8025         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8026     } else {
8027         dprintf(fd, "\n");
8028     }
8029 
8030     if (d->smaps) {
8031         unsigned long size = end - start;
8032         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8033         unsigned long size_kb = size >> 10;
8034 
8035         dprintf(fd, "Size:                  %lu kB\n"
8036                 "KernelPageSize:        %lu kB\n"
8037                 "MMUPageSize:           %lu kB\n"
8038                 "Rss:                   0 kB\n"
8039                 "Pss:                   0 kB\n"
8040                 "Pss_Dirty:             0 kB\n"
8041                 "Shared_Clean:          0 kB\n"
8042                 "Shared_Dirty:          0 kB\n"
8043                 "Private_Clean:         0 kB\n"
8044                 "Private_Dirty:         0 kB\n"
8045                 "Referenced:            0 kB\n"
8046                 "Anonymous:             %lu kB\n"
8047                 "LazyFree:              0 kB\n"
8048                 "AnonHugePages:         0 kB\n"
8049                 "ShmemPmdMapped:        0 kB\n"
8050                 "FilePmdMapped:         0 kB\n"
8051                 "Shared_Hugetlb:        0 kB\n"
8052                 "Private_Hugetlb:       0 kB\n"
8053                 "Swap:                  0 kB\n"
8054                 "SwapPss:               0 kB\n"
8055                 "Locked:                0 kB\n"
8056                 "THPeligible:    0\n"
8057                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8058                 size_kb, page_size_kb, page_size_kb,
8059                 (flags & PAGE_ANON ? size_kb : 0),
8060                 (flags & PAGE_READ) ? " rd" : "",
8061                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8062                 (flags & PAGE_EXEC) ? " ex" : "",
8063                 mi->is_priv ? "" : " sh",
8064                 (flags & PAGE_READ) ? " mr" : "",
8065                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8066                 (flags & PAGE_EXEC) ? " me" : "",
8067                 mi->is_priv ? "" : " ms");
8068     }
8069 }
8070 
8071 /*
8072  * Callback for walk_memory_regions, when read_self_maps() fails.
8073  * Proceed without the benefit of host /proc/self/maps cross-check.
8074  */
open_self_maps_3(void * opaque,target_ulong guest_start,target_ulong guest_end,unsigned long flags)8075 static int open_self_maps_3(void *opaque, target_ulong guest_start,
8076                             target_ulong guest_end, unsigned long flags)
8077 {
8078     static const MapInfo mi = { .is_priv = true };
8079 
8080     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8081     return 0;
8082 }
8083 
8084 /*
8085  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8086  */
open_self_maps_2(void * opaque,target_ulong guest_start,target_ulong guest_end,unsigned long flags)8087 static int open_self_maps_2(void *opaque, target_ulong guest_start,
8088                             target_ulong guest_end, unsigned long flags)
8089 {
8090     const struct open_self_maps_data *d = opaque;
8091     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8092     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8093 
8094 #ifdef TARGET_X86_64
8095     /*
8096      * Because of the extremely high position of the page within the guest
8097      * virtual address space, this is not backed by host memory at all.
8098      * Therefore the loop below would fail.  This is the only instance
8099      * of not having host backing memory.
8100      */
8101     if (guest_start == TARGET_VSYSCALL_PAGE) {
8102         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8103     }
8104 #endif
8105 
8106     while (1) {
8107         IntervalTreeNode *n =
8108             interval_tree_iter_first(d->host_maps, host_start, host_start);
8109         MapInfo *mi = container_of(n, MapInfo, itree);
8110         uintptr_t this_hlast = MIN(host_last, n->last);
8111         target_ulong this_gend = h2g(this_hlast) + 1;
8112 
8113         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8114 
8115         if (this_hlast == host_last) {
8116             return 0;
8117         }
8118         host_start = this_hlast + 1;
8119         guest_start = h2g(host_start);
8120     }
8121 }
8122 
open_self_maps_1(CPUArchState * env,int fd,bool smaps)8123 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8124 {
8125     struct open_self_maps_data d = {
8126         .ts = env_cpu(env)->opaque,
8127         .host_maps = read_self_maps(),
8128         .fd = fd,
8129         .smaps = smaps
8130     };
8131 
8132     if (d.host_maps) {
8133         walk_memory_regions(&d, open_self_maps_2);
8134         free_self_maps(d.host_maps);
8135     } else {
8136         walk_memory_regions(&d, open_self_maps_3);
8137     }
8138     return 0;
8139 }
8140 
open_self_maps(CPUArchState * cpu_env,int fd)8141 static int open_self_maps(CPUArchState *cpu_env, int fd)
8142 {
8143     return open_self_maps_1(cpu_env, fd, false);
8144 }
8145 
open_self_smaps(CPUArchState * cpu_env,int fd)8146 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8147 {
8148     return open_self_maps_1(cpu_env, fd, true);
8149 }
8150 
open_self_stat(CPUArchState * cpu_env,int fd)8151 static int open_self_stat(CPUArchState *cpu_env, int fd)
8152 {
8153     CPUState *cpu = env_cpu(cpu_env);
8154     TaskState *ts = get_task_state(cpu);
8155     g_autoptr(GString) buf = g_string_new(NULL);
8156     int i;
8157 
8158     for (i = 0; i < 44; i++) {
8159         if (i == 0) {
8160             /* pid */
8161             g_string_printf(buf, FMT_pid " ", getpid());
8162         } else if (i == 1) {
8163             /* app name */
8164             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8165             bin = bin ? bin + 1 : ts->bprm->argv[0];
8166             g_string_printf(buf, "(%.15s) ", bin);
8167         } else if (i == 2) {
8168             /* task state */
8169             g_string_assign(buf, "R "); /* we are running right now */
8170         } else if (i == 3) {
8171             /* ppid */
8172             g_string_printf(buf, FMT_pid " ", getppid());
8173         } else if (i == 21) {
8174             /* starttime */
8175             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8176         } else if (i == 27) {
8177             /* stack bottom */
8178             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8179         } else {
8180             /* for the rest, there is MasterCard */
8181             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8182         }
8183 
8184         if (write(fd, buf->str, buf->len) != buf->len) {
8185             return -1;
8186         }
8187     }
8188 
8189     return 0;
8190 }
8191 
open_self_auxv(CPUArchState * cpu_env,int fd)8192 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8193 {
8194     CPUState *cpu = env_cpu(cpu_env);
8195     TaskState *ts = get_task_state(cpu);
8196     abi_ulong auxv = ts->info->saved_auxv;
8197     abi_ulong len = ts->info->auxv_len;
8198     char *ptr;
8199 
8200     /*
8201      * Auxiliary vector is stored in target process stack.
8202      * read in whole auxv vector and copy it to file
8203      */
8204     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8205     if (ptr != NULL) {
8206         while (len > 0) {
8207             ssize_t r;
8208             r = write(fd, ptr, len);
8209             if (r <= 0) {
8210                 break;
8211             }
8212             len -= r;
8213             ptr += r;
8214         }
8215         lseek(fd, 0, SEEK_SET);
8216         unlock_user(ptr, auxv, len);
8217     }
8218 
8219     return 0;
8220 }
8221 
is_proc_myself(const char * filename,const char * entry)8222 static int is_proc_myself(const char *filename, const char *entry)
8223 {
8224     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8225         filename += strlen("/proc/");
8226         if (!strncmp(filename, "self/", strlen("self/"))) {
8227             filename += strlen("self/");
8228         } else if (*filename >= '1' && *filename <= '9') {
8229             char myself[80];
8230             snprintf(myself, sizeof(myself), "%d/", getpid());
8231             if (!strncmp(filename, myself, strlen(myself))) {
8232                 filename += strlen(myself);
8233             } else {
8234                 return 0;
8235             }
8236         } else {
8237             return 0;
8238         }
8239         if (!strcmp(filename, entry)) {
8240             return 1;
8241         }
8242     }
8243     return 0;
8244 }
8245 
excp_dump_file(FILE * logfile,CPUArchState * env,const char * fmt,int code)8246 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8247                       const char *fmt, int code)
8248 {
8249     if (logfile) {
8250         CPUState *cs = env_cpu(env);
8251 
8252         fprintf(logfile, fmt, code);
8253         fprintf(logfile, "Failing executable: %s\n", exec_path);
8254         cpu_dump_state(cs, logfile, 0);
8255         open_self_maps(env, fileno(logfile));
8256     }
8257 }
8258 
target_exception_dump(CPUArchState * env,const char * fmt,int code)8259 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8260 {
8261     /* dump to console */
8262     excp_dump_file(stderr, env, fmt, code);
8263 
8264     /* dump to log file */
8265     if (qemu_log_separate()) {
8266         FILE *logfile = qemu_log_trylock();
8267 
8268         excp_dump_file(logfile, env, fmt, code);
8269         qemu_log_unlock(logfile);
8270     }
8271 }
8272 
8273 #include "target_proc.h"
8274 
8275 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8276     defined(HAVE_ARCH_PROC_CPUINFO) || \
8277     defined(HAVE_ARCH_PROC_HARDWARE)
is_proc(const char * filename,const char * entry)8278 static int is_proc(const char *filename, const char *entry)
8279 {
8280     return strcmp(filename, entry) == 0;
8281 }
8282 #endif
8283 
8284 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
open_net_route(CPUArchState * cpu_env,int fd)8285 static int open_net_route(CPUArchState *cpu_env, int fd)
8286 {
8287     FILE *fp;
8288     char *line = NULL;
8289     size_t len = 0;
8290     ssize_t read;
8291 
8292     fp = fopen("/proc/net/route", "r");
8293     if (fp == NULL) {
8294         return -1;
8295     }
8296 
8297     /* read header */
8298 
8299     read = getline(&line, &len, fp);
8300     dprintf(fd, "%s", line);
8301 
8302     /* read routes */
8303 
8304     while ((read = getline(&line, &len, fp)) != -1) {
8305         char iface[16];
8306         uint32_t dest, gw, mask;
8307         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8308         int fields;
8309 
8310         fields = sscanf(line,
8311                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8312                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8313                         &mask, &mtu, &window, &irtt);
8314         if (fields != 11) {
8315             continue;
8316         }
8317         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8318                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8319                 metric, tswap32(mask), mtu, window, irtt);
8320     }
8321 
8322     free(line);
8323     fclose(fp);
8324 
8325     return 0;
8326 }
8327 #endif
8328 
do_guest_openat(CPUArchState * cpu_env,int dirfd,const char * fname,int flags,mode_t mode,bool safe)8329 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *fname,
8330                     int flags, mode_t mode, bool safe)
8331 {
8332     g_autofree char *proc_name = NULL;
8333     const char *pathname;
8334     struct fake_open {
8335         const char *filename;
8336         int (*fill)(CPUArchState *cpu_env, int fd);
8337         int (*cmp)(const char *s1, const char *s2);
8338     };
8339     const struct fake_open *fake_open;
8340     static const struct fake_open fakes[] = {
8341         { "maps", open_self_maps, is_proc_myself },
8342         { "smaps", open_self_smaps, is_proc_myself },
8343         { "stat", open_self_stat, is_proc_myself },
8344         { "auxv", open_self_auxv, is_proc_myself },
8345         { "cmdline", open_self_cmdline, is_proc_myself },
8346 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8347         { "/proc/net/route", open_net_route, is_proc },
8348 #endif
8349 #if defined(HAVE_ARCH_PROC_CPUINFO)
8350         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8351 #endif
8352 #if defined(HAVE_ARCH_PROC_HARDWARE)
8353         { "/proc/hardware", open_hardware, is_proc },
8354 #endif
8355         { NULL, NULL, NULL }
8356     };
8357 
8358     /* if this is a file from /proc/ filesystem, expand full name */
8359     proc_name = realpath(fname, NULL);
8360     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8361         pathname = proc_name;
8362     } else {
8363         pathname = fname;
8364     }
8365 
8366     if (is_proc_myself(pathname, "exe")) {
8367         if (safe) {
8368             return safe_openat(dirfd, exec_path, flags, mode);
8369         } else {
8370             return openat(dirfd, exec_path, flags, mode);
8371         }
8372     }
8373 
8374     for (fake_open = fakes; fake_open->filename; fake_open++) {
8375         if (fake_open->cmp(pathname, fake_open->filename)) {
8376             break;
8377         }
8378     }
8379 
8380     if (fake_open->filename) {
8381         const char *tmpdir;
8382         char filename[PATH_MAX];
8383         int fd, r;
8384 
8385         fd = memfd_create("qemu-open", 0);
8386         if (fd < 0) {
8387             if (errno != ENOSYS) {
8388                 return fd;
8389             }
8390             /* create temporary file to map stat to */
8391             tmpdir = getenv("TMPDIR");
8392             if (!tmpdir)
8393                 tmpdir = "/tmp";
8394             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8395             fd = mkstemp(filename);
8396             if (fd < 0) {
8397                 return fd;
8398             }
8399             unlink(filename);
8400         }
8401 
8402         if ((r = fake_open->fill(cpu_env, fd))) {
8403             int e = errno;
8404             close(fd);
8405             errno = e;
8406             return r;
8407         }
8408         lseek(fd, 0, SEEK_SET);
8409 
8410         return fd;
8411     }
8412 
8413     if (safe) {
8414         return safe_openat(dirfd, path(pathname), flags, mode);
8415     } else {
8416         return openat(dirfd, path(pathname), flags, mode);
8417     }
8418 }
8419 
do_guest_readlink(const char * pathname,char * buf,size_t bufsiz)8420 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8421 {
8422     ssize_t ret;
8423 
8424     if (!pathname || !buf) {
8425         errno = EFAULT;
8426         return -1;
8427     }
8428 
8429     if (!bufsiz) {
8430         /* Short circuit this for the magic exe check. */
8431         errno = EINVAL;
8432         return -1;
8433     }
8434 
8435     if (is_proc_myself((const char *)pathname, "exe")) {
8436         /*
8437          * Don't worry about sign mismatch as earlier mapping
8438          * logic would have thrown a bad address error.
8439          */
8440         ret = MIN(strlen(exec_path), bufsiz);
8441         /* We cannot NUL terminate the string. */
8442         memcpy(buf, exec_path, ret);
8443     } else {
8444         ret = readlink(path(pathname), buf, bufsiz);
8445     }
8446 
8447     return ret;
8448 }
8449 
do_execv(CPUArchState * cpu_env,int dirfd,abi_long pathname,abi_long guest_argp,abi_long guest_envp,int flags,bool is_execveat)8450 static int do_execv(CPUArchState *cpu_env, int dirfd,
8451                     abi_long pathname, abi_long guest_argp,
8452                     abi_long guest_envp, int flags, bool is_execveat)
8453 {
8454     int ret;
8455     char **argp, **envp;
8456     int argc, envc;
8457     abi_ulong gp;
8458     abi_ulong addr;
8459     char **q;
8460     void *p;
8461 
8462     argc = 0;
8463 
8464     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8465         if (get_user_ual(addr, gp)) {
8466             return -TARGET_EFAULT;
8467         }
8468         if (!addr) {
8469             break;
8470         }
8471         argc++;
8472     }
8473     envc = 0;
8474     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8475         if (get_user_ual(addr, gp)) {
8476             return -TARGET_EFAULT;
8477         }
8478         if (!addr) {
8479             break;
8480         }
8481         envc++;
8482     }
8483 
8484     argp = g_new0(char *, argc + 1);
8485     envp = g_new0(char *, envc + 1);
8486 
8487     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8488         if (get_user_ual(addr, gp)) {
8489             goto execve_efault;
8490         }
8491         if (!addr) {
8492             break;
8493         }
8494         *q = lock_user_string(addr);
8495         if (!*q) {
8496             goto execve_efault;
8497         }
8498     }
8499     *q = NULL;
8500 
8501     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8502         if (get_user_ual(addr, gp)) {
8503             goto execve_efault;
8504         }
8505         if (!addr) {
8506             break;
8507         }
8508         *q = lock_user_string(addr);
8509         if (!*q) {
8510             goto execve_efault;
8511         }
8512     }
8513     *q = NULL;
8514 
8515     /*
8516      * Although execve() is not an interruptible syscall it is
8517      * a special case where we must use the safe_syscall wrapper:
8518      * if we allow a signal to happen before we make the host
8519      * syscall then we will 'lose' it, because at the point of
8520      * execve the process leaves QEMU's control. So we use the
8521      * safe syscall wrapper to ensure that we either take the
8522      * signal as a guest signal, or else it does not happen
8523      * before the execve completes and makes it the other
8524      * program's problem.
8525      */
8526     p = lock_user_string(pathname);
8527     if (!p) {
8528         goto execve_efault;
8529     }
8530 
8531     const char *exe = p;
8532     if (is_proc_myself(p, "exe")) {
8533         exe = exec_path;
8534     }
8535     ret = is_execveat
8536         ? safe_execveat(dirfd, exe, argp, envp, flags)
8537         : safe_execve(exe, argp, envp);
8538     ret = get_errno(ret);
8539 
8540     unlock_user(p, pathname, 0);
8541 
8542     goto execve_end;
8543 
8544 execve_efault:
8545     ret = -TARGET_EFAULT;
8546 
8547 execve_end:
8548     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8549         if (get_user_ual(addr, gp) || !addr) {
8550             break;
8551         }
8552         unlock_user(*q, addr, 0);
8553     }
8554     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8555         if (get_user_ual(addr, gp) || !addr) {
8556             break;
8557         }
8558         unlock_user(*q, addr, 0);
8559     }
8560 
8561     g_free(argp);
8562     g_free(envp);
8563     return ret;
8564 }
8565 
8566 #define TIMER_MAGIC 0x0caf0000
8567 #define TIMER_MAGIC_MASK 0xffff0000
8568 
8569 /* Convert QEMU provided timer ID back to internal 16bit index format */
get_timer_id(abi_long arg)8570 static target_timer_t get_timer_id(abi_long arg)
8571 {
8572     target_timer_t timerid = arg;
8573 
8574     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8575         return -TARGET_EINVAL;
8576     }
8577 
8578     timerid &= 0xffff;
8579 
8580     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8581         return -TARGET_EINVAL;
8582     }
8583 
8584     return timerid;
8585 }
8586 
target_to_host_cpu_mask(unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8587 static int target_to_host_cpu_mask(unsigned long *host_mask,
8588                                    size_t host_size,
8589                                    abi_ulong target_addr,
8590                                    size_t target_size)
8591 {
8592     unsigned target_bits = sizeof(abi_ulong) * 8;
8593     unsigned host_bits = sizeof(*host_mask) * 8;
8594     abi_ulong *target_mask;
8595     unsigned i, j;
8596 
8597     assert(host_size >= target_size);
8598 
8599     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8600     if (!target_mask) {
8601         return -TARGET_EFAULT;
8602     }
8603     memset(host_mask, 0, host_size);
8604 
8605     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8606         unsigned bit = i * target_bits;
8607         abi_ulong val;
8608 
8609         __get_user(val, &target_mask[i]);
8610         for (j = 0; j < target_bits; j++, bit++) {
8611             if (val & (1UL << j)) {
8612                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8613             }
8614         }
8615     }
8616 
8617     unlock_user(target_mask, target_addr, 0);
8618     return 0;
8619 }
8620 
host_to_target_cpu_mask(const unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8621 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8622                                    size_t host_size,
8623                                    abi_ulong target_addr,
8624                                    size_t target_size)
8625 {
8626     unsigned target_bits = sizeof(abi_ulong) * 8;
8627     unsigned host_bits = sizeof(*host_mask) * 8;
8628     abi_ulong *target_mask;
8629     unsigned i, j;
8630 
8631     assert(host_size >= target_size);
8632 
8633     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8634     if (!target_mask) {
8635         return -TARGET_EFAULT;
8636     }
8637 
8638     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8639         unsigned bit = i * target_bits;
8640         abi_ulong val = 0;
8641 
8642         for (j = 0; j < target_bits; j++, bit++) {
8643             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8644                 val |= 1UL << j;
8645             }
8646         }
8647         __put_user(val, &target_mask[i]);
8648     }
8649 
8650     unlock_user(target_mask, target_addr, target_size);
8651     return 0;
8652 }
8653 
8654 #ifdef TARGET_NR_getdents
do_getdents(abi_long dirfd,abi_long arg2,abi_long count)8655 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8656 {
8657     g_autofree void *hdirp = NULL;
8658     void *tdirp;
8659     int hlen, hoff, toff;
8660     int hreclen, treclen;
8661     off64_t prev_diroff = 0;
8662 
8663     hdirp = g_try_malloc(count);
8664     if (!hdirp) {
8665         return -TARGET_ENOMEM;
8666     }
8667 
8668 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8669     hlen = sys_getdents(dirfd, hdirp, count);
8670 #else
8671     hlen = sys_getdents64(dirfd, hdirp, count);
8672 #endif
8673 
8674     hlen = get_errno(hlen);
8675     if (is_error(hlen)) {
8676         return hlen;
8677     }
8678 
8679     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8680     if (!tdirp) {
8681         return -TARGET_EFAULT;
8682     }
8683 
8684     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8685 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8686         struct linux_dirent *hde = hdirp + hoff;
8687 #else
8688         struct linux_dirent64 *hde = hdirp + hoff;
8689 #endif
8690         struct target_dirent *tde = tdirp + toff;
8691         int namelen;
8692         uint8_t type;
8693 
8694         namelen = strlen(hde->d_name);
8695         hreclen = hde->d_reclen;
8696         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8697         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8698 
8699         if (toff + treclen > count) {
8700             /*
8701              * If the host struct is smaller than the target struct, or
8702              * requires less alignment and thus packs into less space,
8703              * then the host can return more entries than we can pass
8704              * on to the guest.
8705              */
8706             if (toff == 0) {
8707                 toff = -TARGET_EINVAL; /* result buffer is too small */
8708                 break;
8709             }
8710             /*
8711              * Return what we have, resetting the file pointer to the
8712              * location of the first record not returned.
8713              */
8714             lseek64(dirfd, prev_diroff, SEEK_SET);
8715             break;
8716         }
8717 
8718         prev_diroff = hde->d_off;
8719         tde->d_ino = tswapal(hde->d_ino);
8720         tde->d_off = tswapal(hde->d_off);
8721         tde->d_reclen = tswap16(treclen);
8722         memcpy(tde->d_name, hde->d_name, namelen + 1);
8723 
8724         /*
8725          * The getdents type is in what was formerly a padding byte at the
8726          * end of the structure.
8727          */
8728 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8729         type = *((uint8_t *)hde + hreclen - 1);
8730 #else
8731         type = hde->d_type;
8732 #endif
8733         *((uint8_t *)tde + treclen - 1) = type;
8734     }
8735 
8736     unlock_user(tdirp, arg2, toff);
8737     return toff;
8738 }
8739 #endif /* TARGET_NR_getdents */
8740 
8741 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
do_getdents64(abi_long dirfd,abi_long arg2,abi_long count)8742 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8743 {
8744     g_autofree void *hdirp = NULL;
8745     void *tdirp;
8746     int hlen, hoff, toff;
8747     int hreclen, treclen;
8748     off64_t prev_diroff = 0;
8749 
8750     hdirp = g_try_malloc(count);
8751     if (!hdirp) {
8752         return -TARGET_ENOMEM;
8753     }
8754 
8755     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8756     if (is_error(hlen)) {
8757         return hlen;
8758     }
8759 
8760     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8761     if (!tdirp) {
8762         return -TARGET_EFAULT;
8763     }
8764 
8765     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8766         struct linux_dirent64 *hde = hdirp + hoff;
8767         struct target_dirent64 *tde = tdirp + toff;
8768         int namelen;
8769 
8770         namelen = strlen(hde->d_name) + 1;
8771         hreclen = hde->d_reclen;
8772         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8773         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8774 
8775         if (toff + treclen > count) {
8776             /*
8777              * If the host struct is smaller than the target struct, or
8778              * requires less alignment and thus packs into less space,
8779              * then the host can return more entries than we can pass
8780              * on to the guest.
8781              */
8782             if (toff == 0) {
8783                 toff = -TARGET_EINVAL; /* result buffer is too small */
8784                 break;
8785             }
8786             /*
8787              * Return what we have, resetting the file pointer to the
8788              * location of the first record not returned.
8789              */
8790             lseek64(dirfd, prev_diroff, SEEK_SET);
8791             break;
8792         }
8793 
8794         prev_diroff = hde->d_off;
8795         tde->d_ino = tswap64(hde->d_ino);
8796         tde->d_off = tswap64(hde->d_off);
8797         tde->d_reclen = tswap16(treclen);
8798         tde->d_type = hde->d_type;
8799         memcpy(tde->d_name, hde->d_name, namelen);
8800     }
8801 
8802     unlock_user(tdirp, arg2, toff);
8803     return toff;
8804 }
8805 #endif /* TARGET_NR_getdents64 */
8806 
8807 #if defined(TARGET_NR_riscv_hwprobe)
8808 
8809 #define RISCV_HWPROBE_KEY_MVENDORID     0
8810 #define RISCV_HWPROBE_KEY_MARCHID       1
8811 #define RISCV_HWPROBE_KEY_MIMPID        2
8812 
8813 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8814 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8815 
8816 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8817 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8818 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8819 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8820 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8821 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8822 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8823 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8824 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8825 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8826 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8827 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8828 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8829 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8830 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8831 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8832 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8833 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8834 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8835 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8836 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8837 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8838 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8839 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8840 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8841 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8842 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8843 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8844 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8845 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8846 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8847 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8848 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1 << 31)
8849 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8850 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8851 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8852 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8853 
8854 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8855 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8856 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8857 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8858 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8859 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8860 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
8861 
8862 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
8863 
8864 struct riscv_hwprobe {
8865     abi_llong  key;
8866     abi_ullong value;
8867 };
8868 
risc_hwprobe_fill_pairs(CPURISCVState * env,struct riscv_hwprobe * pair,size_t pair_count)8869 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
8870                                     struct riscv_hwprobe *pair,
8871                                     size_t pair_count)
8872 {
8873     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
8874 
8875     for (; pair_count > 0; pair_count--, pair++) {
8876         abi_llong key;
8877         abi_ullong value;
8878         __put_user(0, &pair->value);
8879         __get_user(key, &pair->key);
8880         switch (key) {
8881         case RISCV_HWPROBE_KEY_MVENDORID:
8882             __put_user(cfg->mvendorid, &pair->value);
8883             break;
8884         case RISCV_HWPROBE_KEY_MARCHID:
8885             __put_user(cfg->marchid, &pair->value);
8886             break;
8887         case RISCV_HWPROBE_KEY_MIMPID:
8888             __put_user(cfg->mimpid, &pair->value);
8889             break;
8890         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
8891             value = riscv_has_ext(env, RVI) &&
8892                     riscv_has_ext(env, RVM) &&
8893                     riscv_has_ext(env, RVA) ?
8894                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
8895             __put_user(value, &pair->value);
8896             break;
8897         case RISCV_HWPROBE_KEY_IMA_EXT_0:
8898             value = riscv_has_ext(env, RVF) &&
8899                     riscv_has_ext(env, RVD) ?
8900                     RISCV_HWPROBE_IMA_FD : 0;
8901             value |= riscv_has_ext(env, RVC) ?
8902                      RISCV_HWPROBE_IMA_C : 0;
8903             value |= riscv_has_ext(env, RVV) ?
8904                      RISCV_HWPROBE_IMA_V : 0;
8905             value |= cfg->ext_zba ?
8906                      RISCV_HWPROBE_EXT_ZBA : 0;
8907             value |= cfg->ext_zbb ?
8908                      RISCV_HWPROBE_EXT_ZBB : 0;
8909             value |= cfg->ext_zbs ?
8910                      RISCV_HWPROBE_EXT_ZBS : 0;
8911             value |= cfg->ext_zicboz ?
8912                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
8913             value |= cfg->ext_zbc ?
8914                      RISCV_HWPROBE_EXT_ZBC : 0;
8915             value |= cfg->ext_zbkb ?
8916                      RISCV_HWPROBE_EXT_ZBKB : 0;
8917             value |= cfg->ext_zbkc ?
8918                      RISCV_HWPROBE_EXT_ZBKC : 0;
8919             value |= cfg->ext_zbkx ?
8920                      RISCV_HWPROBE_EXT_ZBKX : 0;
8921             value |= cfg->ext_zknd ?
8922                      RISCV_HWPROBE_EXT_ZKND : 0;
8923             value |= cfg->ext_zkne ?
8924                      RISCV_HWPROBE_EXT_ZKNE : 0;
8925             value |= cfg->ext_zknh ?
8926                      RISCV_HWPROBE_EXT_ZKNH : 0;
8927             value |= cfg->ext_zksed ?
8928                      RISCV_HWPROBE_EXT_ZKSED : 0;
8929             value |= cfg->ext_zksh ?
8930                      RISCV_HWPROBE_EXT_ZKSH : 0;
8931             value |= cfg->ext_zkt ?
8932                      RISCV_HWPROBE_EXT_ZKT : 0;
8933             value |= cfg->ext_zvbb ?
8934                      RISCV_HWPROBE_EXT_ZVBB : 0;
8935             value |= cfg->ext_zvbc ?
8936                      RISCV_HWPROBE_EXT_ZVBC : 0;
8937             value |= cfg->ext_zvkb ?
8938                      RISCV_HWPROBE_EXT_ZVKB : 0;
8939             value |= cfg->ext_zvkg ?
8940                      RISCV_HWPROBE_EXT_ZVKG : 0;
8941             value |= cfg->ext_zvkned ?
8942                      RISCV_HWPROBE_EXT_ZVKNED : 0;
8943             value |= cfg->ext_zvknha ?
8944                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
8945             value |= cfg->ext_zvknhb ?
8946                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
8947             value |= cfg->ext_zvksed ?
8948                      RISCV_HWPROBE_EXT_ZVKSED : 0;
8949             value |= cfg->ext_zvksh ?
8950                      RISCV_HWPROBE_EXT_ZVKSH : 0;
8951             value |= cfg->ext_zvkt ?
8952                      RISCV_HWPROBE_EXT_ZVKT : 0;
8953             value |= cfg->ext_zfh ?
8954                      RISCV_HWPROBE_EXT_ZFH : 0;
8955             value |= cfg->ext_zfhmin ?
8956                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
8957             value |= cfg->ext_zihintntl ?
8958                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
8959             value |= cfg->ext_zvfh ?
8960                      RISCV_HWPROBE_EXT_ZVFH : 0;
8961             value |= cfg->ext_zvfhmin ?
8962                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
8963             value |= cfg->ext_zfa ?
8964                      RISCV_HWPROBE_EXT_ZFA : 0;
8965             value |= cfg->ext_ztso ?
8966                      RISCV_HWPROBE_EXT_ZTSO : 0;
8967             value |= cfg->ext_zacas ?
8968                      RISCV_HWPROBE_EXT_ZACAS : 0;
8969             value |= cfg->ext_zicond ?
8970                      RISCV_HWPROBE_EXT_ZICOND : 0;
8971             __put_user(value, &pair->value);
8972             break;
8973         case RISCV_HWPROBE_KEY_CPUPERF_0:
8974             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
8975             break;
8976         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
8977             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
8978             __put_user(value, &pair->value);
8979             break;
8980         default:
8981             __put_user(-1, &pair->key);
8982             break;
8983         }
8984     }
8985 }
8986 
cpu_set_valid(abi_long arg3,abi_long arg4)8987 static int cpu_set_valid(abi_long arg3, abi_long arg4)
8988 {
8989     int ret, i, tmp;
8990     size_t host_mask_size, target_mask_size;
8991     unsigned long *host_mask;
8992 
8993     /*
8994      * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
8995      * arg3 contains the cpu count.
8996      */
8997     tmp = (8 * sizeof(abi_ulong));
8998     target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
8999     host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9000                      ~(sizeof(*host_mask) - 1);
9001 
9002     host_mask = alloca(host_mask_size);
9003 
9004     ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9005                                   arg4, target_mask_size);
9006     if (ret != 0) {
9007         return ret;
9008     }
9009 
9010     for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9011         if (host_mask[i] != 0) {
9012             return 0;
9013         }
9014     }
9015     return -TARGET_EINVAL;
9016 }
9017 
do_riscv_hwprobe(CPUArchState * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5)9018 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9019                                  abi_long arg2, abi_long arg3,
9020                                  abi_long arg4, abi_long arg5)
9021 {
9022     int ret;
9023     struct riscv_hwprobe *host_pairs;
9024 
9025     /* flags must be 0 */
9026     if (arg5 != 0) {
9027         return -TARGET_EINVAL;
9028     }
9029 
9030     /* check cpu_set */
9031     if (arg3 != 0) {
9032         ret = cpu_set_valid(arg3, arg4);
9033         if (ret != 0) {
9034             return ret;
9035         }
9036     } else if (arg4 != 0) {
9037         return -TARGET_EINVAL;
9038     }
9039 
9040     /* no pairs */
9041     if (arg2 == 0) {
9042         return 0;
9043     }
9044 
9045     host_pairs = lock_user(VERIFY_WRITE, arg1,
9046                            sizeof(*host_pairs) * (size_t)arg2, 0);
9047     if (host_pairs == NULL) {
9048         return -TARGET_EFAULT;
9049     }
9050     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9051     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9052     return 0;
9053 }
9054 #endif /* TARGET_NR_riscv_hwprobe */
9055 
9056 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
_syscall2(int,pivot_root,const char *,new_root,const char *,put_old)9057 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9058 #endif
9059 
9060 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9061 #define __NR_sys_open_tree __NR_open_tree
9062 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9063           unsigned int, __flags)
9064 #endif
9065 
9066 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9067 #define __NR_sys_move_mount __NR_move_mount
9068 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9069            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9070 #endif
9071 
9072 /* This is an internal helper for do_syscall so that it is easier
9073  * to have a single return point, so that actions, such as logging
9074  * of syscall results, can be performed.
9075  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9076  */
9077 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9078                             abi_long arg2, abi_long arg3, abi_long arg4,
9079                             abi_long arg5, abi_long arg6, abi_long arg7,
9080                             abi_long arg8)
9081 {
9082     CPUState *cpu = env_cpu(cpu_env);
9083     abi_long ret;
9084 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9085     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9086     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9087     || defined(TARGET_NR_statx)
9088     struct stat st;
9089 #endif
9090 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9091     || defined(TARGET_NR_fstatfs)
9092     struct statfs stfs;
9093 #endif
9094     void *p;
9095 
9096     switch(num) {
9097     case TARGET_NR_exit:
9098         /* In old applications this may be used to implement _exit(2).
9099            However in threaded applications it is used for thread termination,
9100            and _exit_group is used for application termination.
9101            Do thread termination if we have more then one thread.  */
9102 
9103         if (block_signals()) {
9104             return -QEMU_ERESTARTSYS;
9105         }
9106 
9107         pthread_mutex_lock(&clone_lock);
9108 
9109         if (CPU_NEXT(first_cpu)) {
9110             TaskState *ts = get_task_state(cpu);
9111 
9112             if (ts->child_tidptr) {
9113                 put_user_u32(0, ts->child_tidptr);
9114                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9115                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9116             }
9117 
9118             object_unparent(OBJECT(cpu));
9119             object_unref(OBJECT(cpu));
9120             /*
9121              * At this point the CPU should be unrealized and removed
9122              * from cpu lists. We can clean-up the rest of the thread
9123              * data without the lock held.
9124              */
9125 
9126             pthread_mutex_unlock(&clone_lock);
9127 
9128             thread_cpu = NULL;
9129             g_free(ts);
9130             rcu_unregister_thread();
9131             pthread_exit(NULL);
9132         }
9133 
9134         pthread_mutex_unlock(&clone_lock);
9135         preexit_cleanup(cpu_env, arg1);
9136         _exit(arg1);
9137         return 0; /* avoid warning */
9138     case TARGET_NR_read:
9139         if (arg2 == 0 && arg3 == 0) {
9140             return get_errno(safe_read(arg1, 0, 0));
9141         } else {
9142             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9143                 return -TARGET_EFAULT;
9144             ret = get_errno(safe_read(arg1, p, arg3));
9145             if (ret >= 0 &&
9146                 fd_trans_host_to_target_data(arg1)) {
9147                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9148             }
9149             unlock_user(p, arg2, ret);
9150         }
9151         return ret;
9152     case TARGET_NR_write:
9153         if (arg2 == 0 && arg3 == 0) {
9154             return get_errno(safe_write(arg1, 0, 0));
9155         }
9156         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9157             return -TARGET_EFAULT;
9158         if (fd_trans_target_to_host_data(arg1)) {
9159             void *copy = g_malloc(arg3);
9160             memcpy(copy, p, arg3);
9161             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9162             if (ret >= 0) {
9163                 ret = get_errno(safe_write(arg1, copy, ret));
9164             }
9165             g_free(copy);
9166         } else {
9167             ret = get_errno(safe_write(arg1, p, arg3));
9168         }
9169         unlock_user(p, arg2, 0);
9170         return ret;
9171 
9172 #ifdef TARGET_NR_open
9173     case TARGET_NR_open:
9174         if (!(p = lock_user_string(arg1)))
9175             return -TARGET_EFAULT;
9176         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9177                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9178                                   arg3, true));
9179         fd_trans_unregister(ret);
9180         unlock_user(p, arg1, 0);
9181         return ret;
9182 #endif
9183     case TARGET_NR_openat:
9184         if (!(p = lock_user_string(arg2)))
9185             return -TARGET_EFAULT;
9186         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9187                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9188                                   arg4, true));
9189         fd_trans_unregister(ret);
9190         unlock_user(p, arg2, 0);
9191         return ret;
9192 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9193     case TARGET_NR_name_to_handle_at:
9194         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9195         return ret;
9196 #endif
9197 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9198     case TARGET_NR_open_by_handle_at:
9199         ret = do_open_by_handle_at(arg1, arg2, arg3);
9200         fd_trans_unregister(ret);
9201         return ret;
9202 #endif
9203 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9204     case TARGET_NR_pidfd_open:
9205         return get_errno(pidfd_open(arg1, arg2));
9206 #endif
9207 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9208     case TARGET_NR_pidfd_send_signal:
9209         {
9210             siginfo_t uinfo, *puinfo;
9211 
9212             if (arg3) {
9213                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9214                 if (!p) {
9215                     return -TARGET_EFAULT;
9216                  }
9217                  target_to_host_siginfo(&uinfo, p);
9218                  unlock_user(p, arg3, 0);
9219                  puinfo = &uinfo;
9220             } else {
9221                  puinfo = NULL;
9222             }
9223             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9224                                               puinfo, arg4));
9225         }
9226         return ret;
9227 #endif
9228 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9229     case TARGET_NR_pidfd_getfd:
9230         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9231 #endif
9232     case TARGET_NR_close:
9233         fd_trans_unregister(arg1);
9234         return get_errno(close(arg1));
9235 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9236     case TARGET_NR_close_range:
9237         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9238         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9239             abi_long fd, maxfd;
9240             maxfd = MIN(arg2, target_fd_max);
9241             for (fd = arg1; fd < maxfd; fd++) {
9242                 fd_trans_unregister(fd);
9243             }
9244         }
9245         return ret;
9246 #endif
9247 
9248     case TARGET_NR_brk:
9249         return do_brk(arg1);
9250 #ifdef TARGET_NR_fork
9251     case TARGET_NR_fork:
9252         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9253 #endif
9254 #ifdef TARGET_NR_waitpid
9255     case TARGET_NR_waitpid:
9256         {
9257             int status;
9258             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9259             if (!is_error(ret) && arg2 && ret
9260                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9261                 return -TARGET_EFAULT;
9262         }
9263         return ret;
9264 #endif
9265 #ifdef TARGET_NR_waitid
9266     case TARGET_NR_waitid:
9267         {
9268             struct rusage ru;
9269             siginfo_t info;
9270 
9271             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9272                                         arg4, (arg5 ? &ru : NULL)));
9273             if (!is_error(ret)) {
9274                 if (arg3) {
9275                     p = lock_user(VERIFY_WRITE, arg3,
9276                                   sizeof(target_siginfo_t), 0);
9277                     if (!p) {
9278                         return -TARGET_EFAULT;
9279                     }
9280                     host_to_target_siginfo(p, &info);
9281                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9282                 }
9283                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9284                     return -TARGET_EFAULT;
9285                 }
9286             }
9287         }
9288         return ret;
9289 #endif
9290 #ifdef TARGET_NR_creat /* not on alpha */
9291     case TARGET_NR_creat:
9292         if (!(p = lock_user_string(arg1)))
9293             return -TARGET_EFAULT;
9294         ret = get_errno(creat(p, arg2));
9295         fd_trans_unregister(ret);
9296         unlock_user(p, arg1, 0);
9297         return ret;
9298 #endif
9299 #ifdef TARGET_NR_link
9300     case TARGET_NR_link:
9301         {
9302             void * p2;
9303             p = lock_user_string(arg1);
9304             p2 = lock_user_string(arg2);
9305             if (!p || !p2)
9306                 ret = -TARGET_EFAULT;
9307             else
9308                 ret = get_errno(link(p, p2));
9309             unlock_user(p2, arg2, 0);
9310             unlock_user(p, arg1, 0);
9311         }
9312         return ret;
9313 #endif
9314 #if defined(TARGET_NR_linkat)
9315     case TARGET_NR_linkat:
9316         {
9317             void * p2 = NULL;
9318             if (!arg2 || !arg4)
9319                 return -TARGET_EFAULT;
9320             p  = lock_user_string(arg2);
9321             p2 = lock_user_string(arg4);
9322             if (!p || !p2)
9323                 ret = -TARGET_EFAULT;
9324             else
9325                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9326             unlock_user(p, arg2, 0);
9327             unlock_user(p2, arg4, 0);
9328         }
9329         return ret;
9330 #endif
9331 #ifdef TARGET_NR_unlink
9332     case TARGET_NR_unlink:
9333         if (!(p = lock_user_string(arg1)))
9334             return -TARGET_EFAULT;
9335         ret = get_errno(unlink(p));
9336         unlock_user(p, arg1, 0);
9337         return ret;
9338 #endif
9339 #if defined(TARGET_NR_unlinkat)
9340     case TARGET_NR_unlinkat:
9341         if (!(p = lock_user_string(arg2)))
9342             return -TARGET_EFAULT;
9343         ret = get_errno(unlinkat(arg1, p, arg3));
9344         unlock_user(p, arg2, 0);
9345         return ret;
9346 #endif
9347     case TARGET_NR_execveat:
9348         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9349     case TARGET_NR_execve:
9350         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9351     case TARGET_NR_chdir:
9352         if (!(p = lock_user_string(arg1)))
9353             return -TARGET_EFAULT;
9354         ret = get_errno(chdir(p));
9355         unlock_user(p, arg1, 0);
9356         return ret;
9357 #ifdef TARGET_NR_time
9358     case TARGET_NR_time:
9359         {
9360             time_t host_time;
9361             ret = get_errno(time(&host_time));
9362             if (!is_error(ret)
9363                 && arg1
9364                 && put_user_sal(host_time, arg1))
9365                 return -TARGET_EFAULT;
9366         }
9367         return ret;
9368 #endif
9369 #ifdef TARGET_NR_mknod
9370     case TARGET_NR_mknod:
9371         if (!(p = lock_user_string(arg1)))
9372             return -TARGET_EFAULT;
9373         ret = get_errno(mknod(p, arg2, arg3));
9374         unlock_user(p, arg1, 0);
9375         return ret;
9376 #endif
9377 #if defined(TARGET_NR_mknodat)
9378     case TARGET_NR_mknodat:
9379         if (!(p = lock_user_string(arg2)))
9380             return -TARGET_EFAULT;
9381         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9382         unlock_user(p, arg2, 0);
9383         return ret;
9384 #endif
9385 #ifdef TARGET_NR_chmod
9386     case TARGET_NR_chmod:
9387         if (!(p = lock_user_string(arg1)))
9388             return -TARGET_EFAULT;
9389         ret = get_errno(chmod(p, arg2));
9390         unlock_user(p, arg1, 0);
9391         return ret;
9392 #endif
9393 #ifdef TARGET_NR_lseek
9394     case TARGET_NR_lseek:
9395         return get_errno(lseek(arg1, arg2, arg3));
9396 #endif
9397 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9398     /* Alpha specific */
9399     case TARGET_NR_getxpid:
9400         cpu_env->ir[IR_A4] = getppid();
9401         return get_errno(getpid());
9402 #endif
9403 #ifdef TARGET_NR_getpid
9404     case TARGET_NR_getpid:
9405         return get_errno(getpid());
9406 #endif
9407     case TARGET_NR_mount:
9408         {
9409             /* need to look at the data field */
9410             void *p2, *p3;
9411 
9412             if (arg1) {
9413                 p = lock_user_string(arg1);
9414                 if (!p) {
9415                     return -TARGET_EFAULT;
9416                 }
9417             } else {
9418                 p = NULL;
9419             }
9420 
9421             p2 = lock_user_string(arg2);
9422             if (!p2) {
9423                 if (arg1) {
9424                     unlock_user(p, arg1, 0);
9425                 }
9426                 return -TARGET_EFAULT;
9427             }
9428 
9429             if (arg3) {
9430                 p3 = lock_user_string(arg3);
9431                 if (!p3) {
9432                     if (arg1) {
9433                         unlock_user(p, arg1, 0);
9434                     }
9435                     unlock_user(p2, arg2, 0);
9436                     return -TARGET_EFAULT;
9437                 }
9438             } else {
9439                 p3 = NULL;
9440             }
9441 
9442             /* FIXME - arg5 should be locked, but it isn't clear how to
9443              * do that since it's not guaranteed to be a NULL-terminated
9444              * string.
9445              */
9446             if (!arg5) {
9447                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9448             } else {
9449                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9450             }
9451             ret = get_errno(ret);
9452 
9453             if (arg1) {
9454                 unlock_user(p, arg1, 0);
9455             }
9456             unlock_user(p2, arg2, 0);
9457             if (arg3) {
9458                 unlock_user(p3, arg3, 0);
9459             }
9460         }
9461         return ret;
9462 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9463 #if defined(TARGET_NR_umount)
9464     case TARGET_NR_umount:
9465 #endif
9466 #if defined(TARGET_NR_oldumount)
9467     case TARGET_NR_oldumount:
9468 #endif
9469         if (!(p = lock_user_string(arg1)))
9470             return -TARGET_EFAULT;
9471         ret = get_errno(umount(p));
9472         unlock_user(p, arg1, 0);
9473         return ret;
9474 #endif
9475 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9476     case TARGET_NR_move_mount:
9477         {
9478             void *p2, *p4;
9479 
9480             if (!arg2 || !arg4) {
9481                 return -TARGET_EFAULT;
9482             }
9483 
9484             p2 = lock_user_string(arg2);
9485             if (!p2) {
9486                 return -TARGET_EFAULT;
9487             }
9488 
9489             p4 = lock_user_string(arg4);
9490             if (!p4) {
9491                 unlock_user(p2, arg2, 0);
9492                 return -TARGET_EFAULT;
9493             }
9494             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9495 
9496             unlock_user(p2, arg2, 0);
9497             unlock_user(p4, arg4, 0);
9498 
9499             return ret;
9500         }
9501 #endif
9502 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9503     case TARGET_NR_open_tree:
9504         {
9505             void *p2;
9506             int host_flags;
9507 
9508             if (!arg2) {
9509                 return -TARGET_EFAULT;
9510             }
9511 
9512             p2 = lock_user_string(arg2);
9513             if (!p2) {
9514                 return -TARGET_EFAULT;
9515             }
9516 
9517             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9518             if (arg3 & TARGET_O_CLOEXEC) {
9519                 host_flags |= O_CLOEXEC;
9520             }
9521 
9522             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9523 
9524             unlock_user(p2, arg2, 0);
9525 
9526             return ret;
9527         }
9528 #endif
9529 #ifdef TARGET_NR_stime /* not on alpha */
9530     case TARGET_NR_stime:
9531         {
9532             struct timespec ts;
9533             ts.tv_nsec = 0;
9534             if (get_user_sal(ts.tv_sec, arg1)) {
9535                 return -TARGET_EFAULT;
9536             }
9537             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9538         }
9539 #endif
9540 #ifdef TARGET_NR_alarm /* not on alpha */
9541     case TARGET_NR_alarm:
9542         return alarm(arg1);
9543 #endif
9544 #ifdef TARGET_NR_pause /* not on alpha */
9545     case TARGET_NR_pause:
9546         if (!block_signals()) {
9547             sigsuspend(&get_task_state(cpu)->signal_mask);
9548         }
9549         return -TARGET_EINTR;
9550 #endif
9551 #ifdef TARGET_NR_utime
9552     case TARGET_NR_utime:
9553         {
9554             struct utimbuf tbuf, *host_tbuf;
9555             struct target_utimbuf *target_tbuf;
9556             if (arg2) {
9557                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9558                     return -TARGET_EFAULT;
9559                 tbuf.actime = tswapal(target_tbuf->actime);
9560                 tbuf.modtime = tswapal(target_tbuf->modtime);
9561                 unlock_user_struct(target_tbuf, arg2, 0);
9562                 host_tbuf = &tbuf;
9563             } else {
9564                 host_tbuf = NULL;
9565             }
9566             if (!(p = lock_user_string(arg1)))
9567                 return -TARGET_EFAULT;
9568             ret = get_errno(utime(p, host_tbuf));
9569             unlock_user(p, arg1, 0);
9570         }
9571         return ret;
9572 #endif
9573 #ifdef TARGET_NR_utimes
9574     case TARGET_NR_utimes:
9575         {
9576             struct timeval *tvp, tv[2];
9577             if (arg2) {
9578                 if (copy_from_user_timeval(&tv[0], arg2)
9579                     || copy_from_user_timeval(&tv[1],
9580                                               arg2 + sizeof(struct target_timeval)))
9581                     return -TARGET_EFAULT;
9582                 tvp = tv;
9583             } else {
9584                 tvp = NULL;
9585             }
9586             if (!(p = lock_user_string(arg1)))
9587                 return -TARGET_EFAULT;
9588             ret = get_errno(utimes(p, tvp));
9589             unlock_user(p, arg1, 0);
9590         }
9591         return ret;
9592 #endif
9593 #if defined(TARGET_NR_futimesat)
9594     case TARGET_NR_futimesat:
9595         {
9596             struct timeval *tvp, tv[2];
9597             if (arg3) {
9598                 if (copy_from_user_timeval(&tv[0], arg3)
9599                     || copy_from_user_timeval(&tv[1],
9600                                               arg3 + sizeof(struct target_timeval)))
9601                     return -TARGET_EFAULT;
9602                 tvp = tv;
9603             } else {
9604                 tvp = NULL;
9605             }
9606             if (!(p = lock_user_string(arg2))) {
9607                 return -TARGET_EFAULT;
9608             }
9609             ret = get_errno(futimesat(arg1, path(p), tvp));
9610             unlock_user(p, arg2, 0);
9611         }
9612         return ret;
9613 #endif
9614 #ifdef TARGET_NR_access
9615     case TARGET_NR_access:
9616         if (!(p = lock_user_string(arg1))) {
9617             return -TARGET_EFAULT;
9618         }
9619         ret = get_errno(access(path(p), arg2));
9620         unlock_user(p, arg1, 0);
9621         return ret;
9622 #endif
9623 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9624     case TARGET_NR_faccessat:
9625         if (!(p = lock_user_string(arg2))) {
9626             return -TARGET_EFAULT;
9627         }
9628         ret = get_errno(faccessat(arg1, p, arg3, 0));
9629         unlock_user(p, arg2, 0);
9630         return ret;
9631 #endif
9632 #if defined(TARGET_NR_faccessat2)
9633     case TARGET_NR_faccessat2:
9634         if (!(p = lock_user_string(arg2))) {
9635             return -TARGET_EFAULT;
9636         }
9637         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9638         unlock_user(p, arg2, 0);
9639         return ret;
9640 #endif
9641 #ifdef TARGET_NR_nice /* not on alpha */
9642     case TARGET_NR_nice:
9643         return get_errno(nice(arg1));
9644 #endif
9645     case TARGET_NR_sync:
9646         sync();
9647         return 0;
9648 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9649     case TARGET_NR_syncfs:
9650         return get_errno(syncfs(arg1));
9651 #endif
9652     case TARGET_NR_kill:
9653         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9654 #ifdef TARGET_NR_rename
9655     case TARGET_NR_rename:
9656         {
9657             void *p2;
9658             p = lock_user_string(arg1);
9659             p2 = lock_user_string(arg2);
9660             if (!p || !p2)
9661                 ret = -TARGET_EFAULT;
9662             else
9663                 ret = get_errno(rename(p, p2));
9664             unlock_user(p2, arg2, 0);
9665             unlock_user(p, arg1, 0);
9666         }
9667         return ret;
9668 #endif
9669 #if defined(TARGET_NR_renameat)
9670     case TARGET_NR_renameat:
9671         {
9672             void *p2;
9673             p  = lock_user_string(arg2);
9674             p2 = lock_user_string(arg4);
9675             if (!p || !p2)
9676                 ret = -TARGET_EFAULT;
9677             else
9678                 ret = get_errno(renameat(arg1, p, arg3, p2));
9679             unlock_user(p2, arg4, 0);
9680             unlock_user(p, arg2, 0);
9681         }
9682         return ret;
9683 #endif
9684 #if defined(TARGET_NR_renameat2)
9685     case TARGET_NR_renameat2:
9686         {
9687             void *p2;
9688             p  = lock_user_string(arg2);
9689             p2 = lock_user_string(arg4);
9690             if (!p || !p2) {
9691                 ret = -TARGET_EFAULT;
9692             } else {
9693                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9694             }
9695             unlock_user(p2, arg4, 0);
9696             unlock_user(p, arg2, 0);
9697         }
9698         return ret;
9699 #endif
9700 #ifdef TARGET_NR_mkdir
9701     case TARGET_NR_mkdir:
9702         if (!(p = lock_user_string(arg1)))
9703             return -TARGET_EFAULT;
9704         ret = get_errno(mkdir(p, arg2));
9705         unlock_user(p, arg1, 0);
9706         return ret;
9707 #endif
9708 #if defined(TARGET_NR_mkdirat)
9709     case TARGET_NR_mkdirat:
9710         if (!(p = lock_user_string(arg2)))
9711             return -TARGET_EFAULT;
9712         ret = get_errno(mkdirat(arg1, p, arg3));
9713         unlock_user(p, arg2, 0);
9714         return ret;
9715 #endif
9716 #ifdef TARGET_NR_rmdir
9717     case TARGET_NR_rmdir:
9718         if (!(p = lock_user_string(arg1)))
9719             return -TARGET_EFAULT;
9720         ret = get_errno(rmdir(p));
9721         unlock_user(p, arg1, 0);
9722         return ret;
9723 #endif
9724     case TARGET_NR_dup:
9725         ret = get_errno(dup(arg1));
9726         if (ret >= 0) {
9727             fd_trans_dup(arg1, ret);
9728         }
9729         return ret;
9730 #ifdef TARGET_NR_pipe
9731     case TARGET_NR_pipe:
9732         return do_pipe(cpu_env, arg1, 0, 0);
9733 #endif
9734 #ifdef TARGET_NR_pipe2
9735     case TARGET_NR_pipe2:
9736         return do_pipe(cpu_env, arg1,
9737                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9738 #endif
9739     case TARGET_NR_times:
9740         {
9741             struct target_tms *tmsp;
9742             struct tms tms;
9743             ret = get_errno(times(&tms));
9744             if (arg1) {
9745                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9746                 if (!tmsp)
9747                     return -TARGET_EFAULT;
9748                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9749                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9750                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9751                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9752             }
9753             if (!is_error(ret))
9754                 ret = host_to_target_clock_t(ret);
9755         }
9756         return ret;
9757     case TARGET_NR_acct:
9758         if (arg1 == 0) {
9759             ret = get_errno(acct(NULL));
9760         } else {
9761             if (!(p = lock_user_string(arg1))) {
9762                 return -TARGET_EFAULT;
9763             }
9764             ret = get_errno(acct(path(p)));
9765             unlock_user(p, arg1, 0);
9766         }
9767         return ret;
9768 #ifdef TARGET_NR_umount2
9769     case TARGET_NR_umount2:
9770         if (!(p = lock_user_string(arg1)))
9771             return -TARGET_EFAULT;
9772         ret = get_errno(umount2(p, arg2));
9773         unlock_user(p, arg1, 0);
9774         return ret;
9775 #endif
9776     case TARGET_NR_ioctl:
9777         return do_ioctl(arg1, arg2, arg3);
9778 #ifdef TARGET_NR_fcntl
9779     case TARGET_NR_fcntl:
9780         return do_fcntl(arg1, arg2, arg3);
9781 #endif
9782     case TARGET_NR_setpgid:
9783         return get_errno(setpgid(arg1, arg2));
9784     case TARGET_NR_umask:
9785         return get_errno(umask(arg1));
9786     case TARGET_NR_chroot:
9787         if (!(p = lock_user_string(arg1)))
9788             return -TARGET_EFAULT;
9789         ret = get_errno(chroot(p));
9790         unlock_user(p, arg1, 0);
9791         return ret;
9792 #ifdef TARGET_NR_dup2
9793     case TARGET_NR_dup2:
9794         ret = get_errno(dup2(arg1, arg2));
9795         if (ret >= 0) {
9796             fd_trans_dup(arg1, arg2);
9797         }
9798         return ret;
9799 #endif
9800 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9801     case TARGET_NR_dup3:
9802     {
9803         int host_flags;
9804 
9805         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9806             return -EINVAL;
9807         }
9808         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9809         ret = get_errno(dup3(arg1, arg2, host_flags));
9810         if (ret >= 0) {
9811             fd_trans_dup(arg1, arg2);
9812         }
9813         return ret;
9814     }
9815 #endif
9816 #ifdef TARGET_NR_getppid /* not on alpha */
9817     case TARGET_NR_getppid:
9818         return get_errno(getppid());
9819 #endif
9820 #ifdef TARGET_NR_getpgrp
9821     case TARGET_NR_getpgrp:
9822         return get_errno(getpgrp());
9823 #endif
9824     case TARGET_NR_setsid:
9825         return get_errno(setsid());
9826 #ifdef TARGET_NR_sigaction
9827     case TARGET_NR_sigaction:
9828         {
9829 #if defined(TARGET_MIPS)
9830 	    struct target_sigaction act, oact, *pact, *old_act;
9831 
9832 	    if (arg2) {
9833                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9834                     return -TARGET_EFAULT;
9835 		act._sa_handler = old_act->_sa_handler;
9836 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9837 		act.sa_flags = old_act->sa_flags;
9838 		unlock_user_struct(old_act, arg2, 0);
9839 		pact = &act;
9840 	    } else {
9841 		pact = NULL;
9842 	    }
9843 
9844         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9845 
9846 	    if (!is_error(ret) && arg3) {
9847                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9848                     return -TARGET_EFAULT;
9849 		old_act->_sa_handler = oact._sa_handler;
9850 		old_act->sa_flags = oact.sa_flags;
9851 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9852 		old_act->sa_mask.sig[1] = 0;
9853 		old_act->sa_mask.sig[2] = 0;
9854 		old_act->sa_mask.sig[3] = 0;
9855 		unlock_user_struct(old_act, arg3, 1);
9856 	    }
9857 #else
9858             struct target_old_sigaction *old_act;
9859             struct target_sigaction act, oact, *pact;
9860             if (arg2) {
9861                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9862                     return -TARGET_EFAULT;
9863                 act._sa_handler = old_act->_sa_handler;
9864                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9865                 act.sa_flags = old_act->sa_flags;
9866 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9867                 act.sa_restorer = old_act->sa_restorer;
9868 #endif
9869                 unlock_user_struct(old_act, arg2, 0);
9870                 pact = &act;
9871             } else {
9872                 pact = NULL;
9873             }
9874             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9875             if (!is_error(ret) && arg3) {
9876                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9877                     return -TARGET_EFAULT;
9878                 old_act->_sa_handler = oact._sa_handler;
9879                 old_act->sa_mask = oact.sa_mask.sig[0];
9880                 old_act->sa_flags = oact.sa_flags;
9881 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9882                 old_act->sa_restorer = oact.sa_restorer;
9883 #endif
9884                 unlock_user_struct(old_act, arg3, 1);
9885             }
9886 #endif
9887         }
9888         return ret;
9889 #endif
9890     case TARGET_NR_rt_sigaction:
9891         {
9892             /*
9893              * For Alpha and SPARC this is a 5 argument syscall, with
9894              * a 'restorer' parameter which must be copied into the
9895              * sa_restorer field of the sigaction struct.
9896              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9897              * and arg5 is the sigsetsize.
9898              */
9899 #if defined(TARGET_ALPHA)
9900             target_ulong sigsetsize = arg4;
9901             target_ulong restorer = arg5;
9902 #elif defined(TARGET_SPARC)
9903             target_ulong restorer = arg4;
9904             target_ulong sigsetsize = arg5;
9905 #else
9906             target_ulong sigsetsize = arg4;
9907             target_ulong restorer = 0;
9908 #endif
9909             struct target_sigaction *act = NULL;
9910             struct target_sigaction *oact = NULL;
9911 
9912             if (sigsetsize != sizeof(target_sigset_t)) {
9913                 return -TARGET_EINVAL;
9914             }
9915             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9916                 return -TARGET_EFAULT;
9917             }
9918             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9919                 ret = -TARGET_EFAULT;
9920             } else {
9921                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9922                 if (oact) {
9923                     unlock_user_struct(oact, arg3, 1);
9924                 }
9925             }
9926             if (act) {
9927                 unlock_user_struct(act, arg2, 0);
9928             }
9929         }
9930         return ret;
9931 #ifdef TARGET_NR_sgetmask /* not on alpha */
9932     case TARGET_NR_sgetmask:
9933         {
9934             sigset_t cur_set;
9935             abi_ulong target_set;
9936             ret = do_sigprocmask(0, NULL, &cur_set);
9937             if (!ret) {
9938                 host_to_target_old_sigset(&target_set, &cur_set);
9939                 ret = target_set;
9940             }
9941         }
9942         return ret;
9943 #endif
9944 #ifdef TARGET_NR_ssetmask /* not on alpha */
9945     case TARGET_NR_ssetmask:
9946         {
9947             sigset_t set, oset;
9948             abi_ulong target_set = arg1;
9949             target_to_host_old_sigset(&set, &target_set);
9950             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9951             if (!ret) {
9952                 host_to_target_old_sigset(&target_set, &oset);
9953                 ret = target_set;
9954             }
9955         }
9956         return ret;
9957 #endif
9958 #ifdef TARGET_NR_sigprocmask
9959     case TARGET_NR_sigprocmask:
9960         {
9961 #if defined(TARGET_ALPHA)
9962             sigset_t set, oldset;
9963             abi_ulong mask;
9964             int how;
9965 
9966             switch (arg1) {
9967             case TARGET_SIG_BLOCK:
9968                 how = SIG_BLOCK;
9969                 break;
9970             case TARGET_SIG_UNBLOCK:
9971                 how = SIG_UNBLOCK;
9972                 break;
9973             case TARGET_SIG_SETMASK:
9974                 how = SIG_SETMASK;
9975                 break;
9976             default:
9977                 return -TARGET_EINVAL;
9978             }
9979             mask = arg2;
9980             target_to_host_old_sigset(&set, &mask);
9981 
9982             ret = do_sigprocmask(how, &set, &oldset);
9983             if (!is_error(ret)) {
9984                 host_to_target_old_sigset(&mask, &oldset);
9985                 ret = mask;
9986                 cpu_env->ir[IR_V0] = 0; /* force no error */
9987             }
9988 #else
9989             sigset_t set, oldset, *set_ptr;
9990             int how;
9991 
9992             if (arg2) {
9993                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9994                 if (!p) {
9995                     return -TARGET_EFAULT;
9996                 }
9997                 target_to_host_old_sigset(&set, p);
9998                 unlock_user(p, arg2, 0);
9999                 set_ptr = &set;
10000                 switch (arg1) {
10001                 case TARGET_SIG_BLOCK:
10002                     how = SIG_BLOCK;
10003                     break;
10004                 case TARGET_SIG_UNBLOCK:
10005                     how = SIG_UNBLOCK;
10006                     break;
10007                 case TARGET_SIG_SETMASK:
10008                     how = SIG_SETMASK;
10009                     break;
10010                 default:
10011                     return -TARGET_EINVAL;
10012                 }
10013             } else {
10014                 how = 0;
10015                 set_ptr = NULL;
10016             }
10017             ret = do_sigprocmask(how, set_ptr, &oldset);
10018             if (!is_error(ret) && arg3) {
10019                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10020                     return -TARGET_EFAULT;
10021                 host_to_target_old_sigset(p, &oldset);
10022                 unlock_user(p, arg3, sizeof(target_sigset_t));
10023             }
10024 #endif
10025         }
10026         return ret;
10027 #endif
10028     case TARGET_NR_rt_sigprocmask:
10029         {
10030             int how = arg1;
10031             sigset_t set, oldset, *set_ptr;
10032 
10033             if (arg4 != sizeof(target_sigset_t)) {
10034                 return -TARGET_EINVAL;
10035             }
10036 
10037             if (arg2) {
10038                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10039                 if (!p) {
10040                     return -TARGET_EFAULT;
10041                 }
10042                 target_to_host_sigset(&set, p);
10043                 unlock_user(p, arg2, 0);
10044                 set_ptr = &set;
10045                 switch(how) {
10046                 case TARGET_SIG_BLOCK:
10047                     how = SIG_BLOCK;
10048                     break;
10049                 case TARGET_SIG_UNBLOCK:
10050                     how = SIG_UNBLOCK;
10051                     break;
10052                 case TARGET_SIG_SETMASK:
10053                     how = SIG_SETMASK;
10054                     break;
10055                 default:
10056                     return -TARGET_EINVAL;
10057                 }
10058             } else {
10059                 how = 0;
10060                 set_ptr = NULL;
10061             }
10062             ret = do_sigprocmask(how, set_ptr, &oldset);
10063             if (!is_error(ret) && arg3) {
10064                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10065                     return -TARGET_EFAULT;
10066                 host_to_target_sigset(p, &oldset);
10067                 unlock_user(p, arg3, sizeof(target_sigset_t));
10068             }
10069         }
10070         return ret;
10071 #ifdef TARGET_NR_sigpending
10072     case TARGET_NR_sigpending:
10073         {
10074             sigset_t set;
10075             ret = get_errno(sigpending(&set));
10076             if (!is_error(ret)) {
10077                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10078                     return -TARGET_EFAULT;
10079                 host_to_target_old_sigset(p, &set);
10080                 unlock_user(p, arg1, sizeof(target_sigset_t));
10081             }
10082         }
10083         return ret;
10084 #endif
10085     case TARGET_NR_rt_sigpending:
10086         {
10087             sigset_t set;
10088 
10089             /* Yes, this check is >, not != like most. We follow the kernel's
10090              * logic and it does it like this because it implements
10091              * NR_sigpending through the same code path, and in that case
10092              * the old_sigset_t is smaller in size.
10093              */
10094             if (arg2 > sizeof(target_sigset_t)) {
10095                 return -TARGET_EINVAL;
10096             }
10097 
10098             ret = get_errno(sigpending(&set));
10099             if (!is_error(ret)) {
10100                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10101                     return -TARGET_EFAULT;
10102                 host_to_target_sigset(p, &set);
10103                 unlock_user(p, arg1, sizeof(target_sigset_t));
10104             }
10105         }
10106         return ret;
10107 #ifdef TARGET_NR_sigsuspend
10108     case TARGET_NR_sigsuspend:
10109         {
10110             sigset_t *set;
10111 
10112 #if defined(TARGET_ALPHA)
10113             TaskState *ts = get_task_state(cpu);
10114             /* target_to_host_old_sigset will bswap back */
10115             abi_ulong mask = tswapal(arg1);
10116             set = &ts->sigsuspend_mask;
10117             target_to_host_old_sigset(set, &mask);
10118 #else
10119             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10120             if (ret != 0) {
10121                 return ret;
10122             }
10123 #endif
10124             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10125             finish_sigsuspend_mask(ret);
10126         }
10127         return ret;
10128 #endif
10129     case TARGET_NR_rt_sigsuspend:
10130         {
10131             sigset_t *set;
10132 
10133             ret = process_sigsuspend_mask(&set, arg1, arg2);
10134             if (ret != 0) {
10135                 return ret;
10136             }
10137             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10138             finish_sigsuspend_mask(ret);
10139         }
10140         return ret;
10141 #ifdef TARGET_NR_rt_sigtimedwait
10142     case TARGET_NR_rt_sigtimedwait:
10143         {
10144             sigset_t set;
10145             struct timespec uts, *puts;
10146             siginfo_t uinfo;
10147 
10148             if (arg4 != sizeof(target_sigset_t)) {
10149                 return -TARGET_EINVAL;
10150             }
10151 
10152             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10153                 return -TARGET_EFAULT;
10154             target_to_host_sigset(&set, p);
10155             unlock_user(p, arg1, 0);
10156             if (arg3) {
10157                 puts = &uts;
10158                 if (target_to_host_timespec(puts, arg3)) {
10159                     return -TARGET_EFAULT;
10160                 }
10161             } else {
10162                 puts = NULL;
10163             }
10164             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10165                                                  SIGSET_T_SIZE));
10166             if (!is_error(ret)) {
10167                 if (arg2) {
10168                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10169                                   0);
10170                     if (!p) {
10171                         return -TARGET_EFAULT;
10172                     }
10173                     host_to_target_siginfo(p, &uinfo);
10174                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10175                 }
10176                 ret = host_to_target_signal(ret);
10177             }
10178         }
10179         return ret;
10180 #endif
10181 #ifdef TARGET_NR_rt_sigtimedwait_time64
10182     case TARGET_NR_rt_sigtimedwait_time64:
10183         {
10184             sigset_t set;
10185             struct timespec uts, *puts;
10186             siginfo_t uinfo;
10187 
10188             if (arg4 != sizeof(target_sigset_t)) {
10189                 return -TARGET_EINVAL;
10190             }
10191 
10192             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10193             if (!p) {
10194                 return -TARGET_EFAULT;
10195             }
10196             target_to_host_sigset(&set, p);
10197             unlock_user(p, arg1, 0);
10198             if (arg3) {
10199                 puts = &uts;
10200                 if (target_to_host_timespec64(puts, arg3)) {
10201                     return -TARGET_EFAULT;
10202                 }
10203             } else {
10204                 puts = NULL;
10205             }
10206             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10207                                                  SIGSET_T_SIZE));
10208             if (!is_error(ret)) {
10209                 if (arg2) {
10210                     p = lock_user(VERIFY_WRITE, arg2,
10211                                   sizeof(target_siginfo_t), 0);
10212                     if (!p) {
10213                         return -TARGET_EFAULT;
10214                     }
10215                     host_to_target_siginfo(p, &uinfo);
10216                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10217                 }
10218                 ret = host_to_target_signal(ret);
10219             }
10220         }
10221         return ret;
10222 #endif
10223     case TARGET_NR_rt_sigqueueinfo:
10224         {
10225             siginfo_t uinfo;
10226 
10227             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10228             if (!p) {
10229                 return -TARGET_EFAULT;
10230             }
10231             target_to_host_siginfo(&uinfo, p);
10232             unlock_user(p, arg3, 0);
10233             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10234         }
10235         return ret;
10236     case TARGET_NR_rt_tgsigqueueinfo:
10237         {
10238             siginfo_t uinfo;
10239 
10240             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10241             if (!p) {
10242                 return -TARGET_EFAULT;
10243             }
10244             target_to_host_siginfo(&uinfo, p);
10245             unlock_user(p, arg4, 0);
10246             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10247         }
10248         return ret;
10249 #ifdef TARGET_NR_sigreturn
10250     case TARGET_NR_sigreturn:
10251         if (block_signals()) {
10252             return -QEMU_ERESTARTSYS;
10253         }
10254         return do_sigreturn(cpu_env);
10255 #endif
10256     case TARGET_NR_rt_sigreturn:
10257         if (block_signals()) {
10258             return -QEMU_ERESTARTSYS;
10259         }
10260         return do_rt_sigreturn(cpu_env);
10261     case TARGET_NR_sethostname:
10262         if (!(p = lock_user_string(arg1)))
10263             return -TARGET_EFAULT;
10264         ret = get_errno(sethostname(p, arg2));
10265         unlock_user(p, arg1, 0);
10266         return ret;
10267 #ifdef TARGET_NR_setrlimit
10268     case TARGET_NR_setrlimit:
10269         {
10270             int resource = target_to_host_resource(arg1);
10271             struct target_rlimit *target_rlim;
10272             struct rlimit rlim;
10273             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10274                 return -TARGET_EFAULT;
10275             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10276             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10277             unlock_user_struct(target_rlim, arg2, 0);
10278             /*
10279              * If we just passed through resource limit settings for memory then
10280              * they would also apply to QEMU's own allocations, and QEMU will
10281              * crash or hang or die if its allocations fail. Ideally we would
10282              * track the guest allocations in QEMU and apply the limits ourselves.
10283              * For now, just tell the guest the call succeeded but don't actually
10284              * limit anything.
10285              */
10286             if (resource != RLIMIT_AS &&
10287                 resource != RLIMIT_DATA &&
10288                 resource != RLIMIT_STACK) {
10289                 return get_errno(setrlimit(resource, &rlim));
10290             } else {
10291                 return 0;
10292             }
10293         }
10294 #endif
10295 #ifdef TARGET_NR_getrlimit
10296     case TARGET_NR_getrlimit:
10297         {
10298             int resource = target_to_host_resource(arg1);
10299             struct target_rlimit *target_rlim;
10300             struct rlimit rlim;
10301 
10302             ret = get_errno(getrlimit(resource, &rlim));
10303             if (!is_error(ret)) {
10304                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10305                     return -TARGET_EFAULT;
10306                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10307                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10308                 unlock_user_struct(target_rlim, arg2, 1);
10309             }
10310         }
10311         return ret;
10312 #endif
10313     case TARGET_NR_getrusage:
10314         {
10315             struct rusage rusage;
10316             ret = get_errno(getrusage(arg1, &rusage));
10317             if (!is_error(ret)) {
10318                 ret = host_to_target_rusage(arg2, &rusage);
10319             }
10320         }
10321         return ret;
10322 #if defined(TARGET_NR_gettimeofday)
10323     case TARGET_NR_gettimeofday:
10324         {
10325             struct timeval tv;
10326             struct timezone tz;
10327 
10328             ret = get_errno(gettimeofday(&tv, &tz));
10329             if (!is_error(ret)) {
10330                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10331                     return -TARGET_EFAULT;
10332                 }
10333                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10334                     return -TARGET_EFAULT;
10335                 }
10336             }
10337         }
10338         return ret;
10339 #endif
10340 #if defined(TARGET_NR_settimeofday)
10341     case TARGET_NR_settimeofday:
10342         {
10343             struct timeval tv, *ptv = NULL;
10344             struct timezone tz, *ptz = NULL;
10345 
10346             if (arg1) {
10347                 if (copy_from_user_timeval(&tv, arg1)) {
10348                     return -TARGET_EFAULT;
10349                 }
10350                 ptv = &tv;
10351             }
10352 
10353             if (arg2) {
10354                 if (copy_from_user_timezone(&tz, arg2)) {
10355                     return -TARGET_EFAULT;
10356                 }
10357                 ptz = &tz;
10358             }
10359 
10360             return get_errno(settimeofday(ptv, ptz));
10361         }
10362 #endif
10363 #if defined(TARGET_NR_select)
10364     case TARGET_NR_select:
10365 #if defined(TARGET_WANT_NI_OLD_SELECT)
10366         /* some architectures used to have old_select here
10367          * but now ENOSYS it.
10368          */
10369         ret = -TARGET_ENOSYS;
10370 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10371         ret = do_old_select(arg1);
10372 #else
10373         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10374 #endif
10375         return ret;
10376 #endif
10377 #ifdef TARGET_NR_pselect6
10378     case TARGET_NR_pselect6:
10379         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10380 #endif
10381 #ifdef TARGET_NR_pselect6_time64
10382     case TARGET_NR_pselect6_time64:
10383         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10384 #endif
10385 #ifdef TARGET_NR_symlink
10386     case TARGET_NR_symlink:
10387         {
10388             void *p2;
10389             p = lock_user_string(arg1);
10390             p2 = lock_user_string(arg2);
10391             if (!p || !p2)
10392                 ret = -TARGET_EFAULT;
10393             else
10394                 ret = get_errno(symlink(p, p2));
10395             unlock_user(p2, arg2, 0);
10396             unlock_user(p, arg1, 0);
10397         }
10398         return ret;
10399 #endif
10400 #if defined(TARGET_NR_symlinkat)
10401     case TARGET_NR_symlinkat:
10402         {
10403             void *p2;
10404             p  = lock_user_string(arg1);
10405             p2 = lock_user_string(arg3);
10406             if (!p || !p2)
10407                 ret = -TARGET_EFAULT;
10408             else
10409                 ret = get_errno(symlinkat(p, arg2, p2));
10410             unlock_user(p2, arg3, 0);
10411             unlock_user(p, arg1, 0);
10412         }
10413         return ret;
10414 #endif
10415 #ifdef TARGET_NR_readlink
10416     case TARGET_NR_readlink:
10417         {
10418             void *p2;
10419             p = lock_user_string(arg1);
10420             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10421             ret = get_errno(do_guest_readlink(p, p2, arg3));
10422             unlock_user(p2, arg2, ret);
10423             unlock_user(p, arg1, 0);
10424         }
10425         return ret;
10426 #endif
10427 #if defined(TARGET_NR_readlinkat)
10428     case TARGET_NR_readlinkat:
10429         {
10430             void *p2;
10431             p  = lock_user_string(arg2);
10432             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10433             if (!p || !p2) {
10434                 ret = -TARGET_EFAULT;
10435             } else if (!arg4) {
10436                 /* Short circuit this for the magic exe check. */
10437                 ret = -TARGET_EINVAL;
10438             } else if (is_proc_myself((const char *)p, "exe")) {
10439                 /*
10440                  * Don't worry about sign mismatch as earlier mapping
10441                  * logic would have thrown a bad address error.
10442                  */
10443                 ret = MIN(strlen(exec_path), arg4);
10444                 /* We cannot NUL terminate the string. */
10445                 memcpy(p2, exec_path, ret);
10446             } else {
10447                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10448             }
10449             unlock_user(p2, arg3, ret);
10450             unlock_user(p, arg2, 0);
10451         }
10452         return ret;
10453 #endif
10454 #ifdef TARGET_NR_swapon
10455     case TARGET_NR_swapon:
10456         if (!(p = lock_user_string(arg1)))
10457             return -TARGET_EFAULT;
10458         ret = get_errno(swapon(p, arg2));
10459         unlock_user(p, arg1, 0);
10460         return ret;
10461 #endif
10462     case TARGET_NR_reboot:
10463         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10464            /* arg4 must be ignored in all other cases */
10465            p = lock_user_string(arg4);
10466            if (!p) {
10467                return -TARGET_EFAULT;
10468            }
10469            ret = get_errno(reboot(arg1, arg2, arg3, p));
10470            unlock_user(p, arg4, 0);
10471         } else {
10472            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10473         }
10474         return ret;
10475 #ifdef TARGET_NR_mmap
10476     case TARGET_NR_mmap:
10477 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10478     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10479     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10480     || defined(TARGET_S390X)
10481         {
10482             abi_ulong *v;
10483             abi_ulong v1, v2, v3, v4, v5, v6;
10484             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10485                 return -TARGET_EFAULT;
10486             v1 = tswapal(v[0]);
10487             v2 = tswapal(v[1]);
10488             v3 = tswapal(v[2]);
10489             v4 = tswapal(v[3]);
10490             v5 = tswapal(v[4]);
10491             v6 = tswapal(v[5]);
10492             unlock_user(v, arg1, 0);
10493             return do_mmap(v1, v2, v3, v4, v5, v6);
10494         }
10495 #else
10496         /* mmap pointers are always untagged */
10497         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10498 #endif
10499 #endif
10500 #ifdef TARGET_NR_mmap2
10501     case TARGET_NR_mmap2:
10502 #ifndef MMAP_SHIFT
10503 #define MMAP_SHIFT 12
10504 #endif
10505         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10506                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10507 #endif
10508     case TARGET_NR_munmap:
10509         arg1 = cpu_untagged_addr(cpu, arg1);
10510         return get_errno(target_munmap(arg1, arg2));
10511     case TARGET_NR_mprotect:
10512         arg1 = cpu_untagged_addr(cpu, arg1);
10513         {
10514             TaskState *ts = get_task_state(cpu);
10515             /* Special hack to detect libc making the stack executable.  */
10516             if ((arg3 & PROT_GROWSDOWN)
10517                 && arg1 >= ts->info->stack_limit
10518                 && arg1 <= ts->info->start_stack) {
10519                 arg3 &= ~PROT_GROWSDOWN;
10520                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10521                 arg1 = ts->info->stack_limit;
10522             }
10523         }
10524         return get_errno(target_mprotect(arg1, arg2, arg3));
10525 #ifdef TARGET_NR_mremap
10526     case TARGET_NR_mremap:
10527         arg1 = cpu_untagged_addr(cpu, arg1);
10528         /* mremap new_addr (arg5) is always untagged */
10529         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10530 #endif
10531         /* ??? msync/mlock/munlock are broken for softmmu.  */
10532 #ifdef TARGET_NR_msync
10533     case TARGET_NR_msync:
10534         return get_errno(msync(g2h(cpu, arg1), arg2,
10535                                target_to_host_msync_arg(arg3)));
10536 #endif
10537 #ifdef TARGET_NR_mlock
10538     case TARGET_NR_mlock:
10539         return get_errno(mlock(g2h(cpu, arg1), arg2));
10540 #endif
10541 #ifdef TARGET_NR_munlock
10542     case TARGET_NR_munlock:
10543         return get_errno(munlock(g2h(cpu, arg1), arg2));
10544 #endif
10545 #ifdef TARGET_NR_mlockall
10546     case TARGET_NR_mlockall:
10547         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10548 #endif
10549 #ifdef TARGET_NR_munlockall
10550     case TARGET_NR_munlockall:
10551         return get_errno(munlockall());
10552 #endif
10553 #ifdef TARGET_NR_truncate
10554     case TARGET_NR_truncate:
10555         if (!(p = lock_user_string(arg1)))
10556             return -TARGET_EFAULT;
10557         ret = get_errno(truncate(p, arg2));
10558         unlock_user(p, arg1, 0);
10559         return ret;
10560 #endif
10561 #ifdef TARGET_NR_ftruncate
10562     case TARGET_NR_ftruncate:
10563         return get_errno(ftruncate(arg1, arg2));
10564 #endif
10565     case TARGET_NR_fchmod:
10566         return get_errno(fchmod(arg1, arg2));
10567 #if defined(TARGET_NR_fchmodat)
10568     case TARGET_NR_fchmodat:
10569         if (!(p = lock_user_string(arg2)))
10570             return -TARGET_EFAULT;
10571         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10572         unlock_user(p, arg2, 0);
10573         return ret;
10574 #endif
10575     case TARGET_NR_getpriority:
10576         /* Note that negative values are valid for getpriority, so we must
10577            differentiate based on errno settings.  */
10578         errno = 0;
10579         ret = getpriority(arg1, arg2);
10580         if (ret == -1 && errno != 0) {
10581             return -host_to_target_errno(errno);
10582         }
10583 #ifdef TARGET_ALPHA
10584         /* Return value is the unbiased priority.  Signal no error.  */
10585         cpu_env->ir[IR_V0] = 0;
10586 #else
10587         /* Return value is a biased priority to avoid negative numbers.  */
10588         ret = 20 - ret;
10589 #endif
10590         return ret;
10591     case TARGET_NR_setpriority:
10592         return get_errno(setpriority(arg1, arg2, arg3));
10593 #ifdef TARGET_NR_statfs
10594     case TARGET_NR_statfs:
10595         if (!(p = lock_user_string(arg1))) {
10596             return -TARGET_EFAULT;
10597         }
10598         ret = get_errno(statfs(path(p), &stfs));
10599         unlock_user(p, arg1, 0);
10600     convert_statfs:
10601         if (!is_error(ret)) {
10602             struct target_statfs *target_stfs;
10603 
10604             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10605                 return -TARGET_EFAULT;
10606             __put_user(stfs.f_type, &target_stfs->f_type);
10607             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10608             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10609             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10610             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10611             __put_user(stfs.f_files, &target_stfs->f_files);
10612             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10613             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10614             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10615             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10616             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10617 #ifdef _STATFS_F_FLAGS
10618             __put_user(stfs.f_flags, &target_stfs->f_flags);
10619 #else
10620             __put_user(0, &target_stfs->f_flags);
10621 #endif
10622             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10623             unlock_user_struct(target_stfs, arg2, 1);
10624         }
10625         return ret;
10626 #endif
10627 #ifdef TARGET_NR_fstatfs
10628     case TARGET_NR_fstatfs:
10629         ret = get_errno(fstatfs(arg1, &stfs));
10630         goto convert_statfs;
10631 #endif
10632 #ifdef TARGET_NR_statfs64
10633     case TARGET_NR_statfs64:
10634         if (!(p = lock_user_string(arg1))) {
10635             return -TARGET_EFAULT;
10636         }
10637         ret = get_errno(statfs(path(p), &stfs));
10638         unlock_user(p, arg1, 0);
10639     convert_statfs64:
10640         if (!is_error(ret)) {
10641             struct target_statfs64 *target_stfs;
10642 
10643             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10644                 return -TARGET_EFAULT;
10645             __put_user(stfs.f_type, &target_stfs->f_type);
10646             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10647             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10648             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10649             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10650             __put_user(stfs.f_files, &target_stfs->f_files);
10651             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10652             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10653             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10654             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10655             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10656 #ifdef _STATFS_F_FLAGS
10657             __put_user(stfs.f_flags, &target_stfs->f_flags);
10658 #else
10659             __put_user(0, &target_stfs->f_flags);
10660 #endif
10661             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10662             unlock_user_struct(target_stfs, arg3, 1);
10663         }
10664         return ret;
10665     case TARGET_NR_fstatfs64:
10666         ret = get_errno(fstatfs(arg1, &stfs));
10667         goto convert_statfs64;
10668 #endif
10669 #ifdef TARGET_NR_socketcall
10670     case TARGET_NR_socketcall:
10671         return do_socketcall(arg1, arg2);
10672 #endif
10673 #ifdef TARGET_NR_accept
10674     case TARGET_NR_accept:
10675         return do_accept4(arg1, arg2, arg3, 0);
10676 #endif
10677 #ifdef TARGET_NR_accept4
10678     case TARGET_NR_accept4:
10679         return do_accept4(arg1, arg2, arg3, arg4);
10680 #endif
10681 #ifdef TARGET_NR_bind
10682     case TARGET_NR_bind:
10683         return do_bind(arg1, arg2, arg3);
10684 #endif
10685 #ifdef TARGET_NR_connect
10686     case TARGET_NR_connect:
10687         return do_connect(arg1, arg2, arg3);
10688 #endif
10689 #ifdef TARGET_NR_getpeername
10690     case TARGET_NR_getpeername:
10691         return do_getpeername(arg1, arg2, arg3);
10692 #endif
10693 #ifdef TARGET_NR_getsockname
10694     case TARGET_NR_getsockname:
10695         return do_getsockname(arg1, arg2, arg3);
10696 #endif
10697 #ifdef TARGET_NR_getsockopt
10698     case TARGET_NR_getsockopt:
10699         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10700 #endif
10701 #ifdef TARGET_NR_listen
10702     case TARGET_NR_listen:
10703         return get_errno(listen(arg1, arg2));
10704 #endif
10705 #ifdef TARGET_NR_recv
10706     case TARGET_NR_recv:
10707         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10708 #endif
10709 #ifdef TARGET_NR_recvfrom
10710     case TARGET_NR_recvfrom:
10711         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10712 #endif
10713 #ifdef TARGET_NR_recvmsg
10714     case TARGET_NR_recvmsg:
10715         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10716 #endif
10717 #ifdef TARGET_NR_send
10718     case TARGET_NR_send:
10719         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10720 #endif
10721 #ifdef TARGET_NR_sendmsg
10722     case TARGET_NR_sendmsg:
10723         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10724 #endif
10725 #ifdef TARGET_NR_sendmmsg
10726     case TARGET_NR_sendmmsg:
10727         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10728 #endif
10729 #ifdef TARGET_NR_recvmmsg
10730     case TARGET_NR_recvmmsg:
10731         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10732 #endif
10733 #ifdef TARGET_NR_sendto
10734     case TARGET_NR_sendto:
10735         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10736 #endif
10737 #ifdef TARGET_NR_shutdown
10738     case TARGET_NR_shutdown:
10739         return get_errno(shutdown(arg1, arg2));
10740 #endif
10741 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10742     case TARGET_NR_getrandom:
10743         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10744         if (!p) {
10745             return -TARGET_EFAULT;
10746         }
10747         ret = get_errno(getrandom(p, arg2, arg3));
10748         unlock_user(p, arg1, ret);
10749         return ret;
10750 #endif
10751 #ifdef TARGET_NR_socket
10752     case TARGET_NR_socket:
10753         return do_socket(arg1, arg2, arg3);
10754 #endif
10755 #ifdef TARGET_NR_socketpair
10756     case TARGET_NR_socketpair:
10757         return do_socketpair(arg1, arg2, arg3, arg4);
10758 #endif
10759 #ifdef TARGET_NR_setsockopt
10760     case TARGET_NR_setsockopt:
10761         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10762 #endif
10763 #if defined(TARGET_NR_syslog)
10764     case TARGET_NR_syslog:
10765         {
10766             int len = arg2;
10767 
10768             switch (arg1) {
10769             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10770             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10771             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10772             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10773             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10774             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10775             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10776             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10777                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10778             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10779             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10780             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10781                 {
10782                     if (len < 0) {
10783                         return -TARGET_EINVAL;
10784                     }
10785                     if (len == 0) {
10786                         return 0;
10787                     }
10788                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10789                     if (!p) {
10790                         return -TARGET_EFAULT;
10791                     }
10792                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10793                     unlock_user(p, arg2, arg3);
10794                 }
10795                 return ret;
10796             default:
10797                 return -TARGET_EINVAL;
10798             }
10799         }
10800         break;
10801 #endif
10802     case TARGET_NR_setitimer:
10803         {
10804             struct itimerval value, ovalue, *pvalue;
10805 
10806             if (arg2) {
10807                 pvalue = &value;
10808                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10809                     || copy_from_user_timeval(&pvalue->it_value,
10810                                               arg2 + sizeof(struct target_timeval)))
10811                     return -TARGET_EFAULT;
10812             } else {
10813                 pvalue = NULL;
10814             }
10815             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10816             if (!is_error(ret) && arg3) {
10817                 if (copy_to_user_timeval(arg3,
10818                                          &ovalue.it_interval)
10819                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10820                                             &ovalue.it_value))
10821                     return -TARGET_EFAULT;
10822             }
10823         }
10824         return ret;
10825     case TARGET_NR_getitimer:
10826         {
10827             struct itimerval value;
10828 
10829             ret = get_errno(getitimer(arg1, &value));
10830             if (!is_error(ret) && arg2) {
10831                 if (copy_to_user_timeval(arg2,
10832                                          &value.it_interval)
10833                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10834                                             &value.it_value))
10835                     return -TARGET_EFAULT;
10836             }
10837         }
10838         return ret;
10839 #ifdef TARGET_NR_stat
10840     case TARGET_NR_stat:
10841         if (!(p = lock_user_string(arg1))) {
10842             return -TARGET_EFAULT;
10843         }
10844         ret = get_errno(stat(path(p), &st));
10845         unlock_user(p, arg1, 0);
10846         goto do_stat;
10847 #endif
10848 #ifdef TARGET_NR_lstat
10849     case TARGET_NR_lstat:
10850         if (!(p = lock_user_string(arg1))) {
10851             return -TARGET_EFAULT;
10852         }
10853         ret = get_errno(lstat(path(p), &st));
10854         unlock_user(p, arg1, 0);
10855         goto do_stat;
10856 #endif
10857 #ifdef TARGET_NR_fstat
10858     case TARGET_NR_fstat:
10859         {
10860             ret = get_errno(fstat(arg1, &st));
10861 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10862         do_stat:
10863 #endif
10864             if (!is_error(ret)) {
10865                 struct target_stat *target_st;
10866 
10867                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10868                     return -TARGET_EFAULT;
10869                 memset(target_st, 0, sizeof(*target_st));
10870                 __put_user(st.st_dev, &target_st->st_dev);
10871                 __put_user(st.st_ino, &target_st->st_ino);
10872                 __put_user(st.st_mode, &target_st->st_mode);
10873                 __put_user(st.st_uid, &target_st->st_uid);
10874                 __put_user(st.st_gid, &target_st->st_gid);
10875                 __put_user(st.st_nlink, &target_st->st_nlink);
10876                 __put_user(st.st_rdev, &target_st->st_rdev);
10877                 __put_user(st.st_size, &target_st->st_size);
10878                 __put_user(st.st_blksize, &target_st->st_blksize);
10879                 __put_user(st.st_blocks, &target_st->st_blocks);
10880                 __put_user(st.st_atime, &target_st->target_st_atime);
10881                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10882                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10883 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10884                 __put_user(st.st_atim.tv_nsec,
10885                            &target_st->target_st_atime_nsec);
10886                 __put_user(st.st_mtim.tv_nsec,
10887                            &target_st->target_st_mtime_nsec);
10888                 __put_user(st.st_ctim.tv_nsec,
10889                            &target_st->target_st_ctime_nsec);
10890 #endif
10891                 unlock_user_struct(target_st, arg2, 1);
10892             }
10893         }
10894         return ret;
10895 #endif
10896     case TARGET_NR_vhangup:
10897         return get_errno(vhangup());
10898 #ifdef TARGET_NR_syscall
10899     case TARGET_NR_syscall:
10900         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10901                           arg6, arg7, arg8, 0);
10902 #endif
10903 #if defined(TARGET_NR_wait4)
10904     case TARGET_NR_wait4:
10905         {
10906             int status;
10907             abi_long status_ptr = arg2;
10908             struct rusage rusage, *rusage_ptr;
10909             abi_ulong target_rusage = arg4;
10910             abi_long rusage_err;
10911             if (target_rusage)
10912                 rusage_ptr = &rusage;
10913             else
10914                 rusage_ptr = NULL;
10915             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10916             if (!is_error(ret)) {
10917                 if (status_ptr && ret) {
10918                     status = host_to_target_waitstatus(status);
10919                     if (put_user_s32(status, status_ptr))
10920                         return -TARGET_EFAULT;
10921                 }
10922                 if (target_rusage) {
10923                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10924                     if (rusage_err) {
10925                         ret = rusage_err;
10926                     }
10927                 }
10928             }
10929         }
10930         return ret;
10931 #endif
10932 #ifdef TARGET_NR_swapoff
10933     case TARGET_NR_swapoff:
10934         if (!(p = lock_user_string(arg1)))
10935             return -TARGET_EFAULT;
10936         ret = get_errno(swapoff(p));
10937         unlock_user(p, arg1, 0);
10938         return ret;
10939 #endif
10940     case TARGET_NR_sysinfo:
10941         {
10942             struct target_sysinfo *target_value;
10943             struct sysinfo value;
10944             ret = get_errno(sysinfo(&value));
10945             if (!is_error(ret) && arg1)
10946             {
10947                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10948                     return -TARGET_EFAULT;
10949                 __put_user(value.uptime, &target_value->uptime);
10950                 __put_user(value.loads[0], &target_value->loads[0]);
10951                 __put_user(value.loads[1], &target_value->loads[1]);
10952                 __put_user(value.loads[2], &target_value->loads[2]);
10953                 __put_user(value.totalram, &target_value->totalram);
10954                 __put_user(value.freeram, &target_value->freeram);
10955                 __put_user(value.sharedram, &target_value->sharedram);
10956                 __put_user(value.bufferram, &target_value->bufferram);
10957                 __put_user(value.totalswap, &target_value->totalswap);
10958                 __put_user(value.freeswap, &target_value->freeswap);
10959                 __put_user(value.procs, &target_value->procs);
10960                 __put_user(value.totalhigh, &target_value->totalhigh);
10961                 __put_user(value.freehigh, &target_value->freehigh);
10962                 __put_user(value.mem_unit, &target_value->mem_unit);
10963                 unlock_user_struct(target_value, arg1, 1);
10964             }
10965         }
10966         return ret;
10967 #ifdef TARGET_NR_ipc
10968     case TARGET_NR_ipc:
10969         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10970 #endif
10971 #ifdef TARGET_NR_semget
10972     case TARGET_NR_semget:
10973         return get_errno(semget(arg1, arg2, arg3));
10974 #endif
10975 #ifdef TARGET_NR_semop
10976     case TARGET_NR_semop:
10977         return do_semtimedop(arg1, arg2, arg3, 0, false);
10978 #endif
10979 #ifdef TARGET_NR_semtimedop
10980     case TARGET_NR_semtimedop:
10981         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10982 #endif
10983 #ifdef TARGET_NR_semtimedop_time64
10984     case TARGET_NR_semtimedop_time64:
10985         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10986 #endif
10987 #ifdef TARGET_NR_semctl
10988     case TARGET_NR_semctl:
10989         return do_semctl(arg1, arg2, arg3, arg4);
10990 #endif
10991 #ifdef TARGET_NR_msgctl
10992     case TARGET_NR_msgctl:
10993         return do_msgctl(arg1, arg2, arg3);
10994 #endif
10995 #ifdef TARGET_NR_msgget
10996     case TARGET_NR_msgget:
10997         return get_errno(msgget(arg1, arg2));
10998 #endif
10999 #ifdef TARGET_NR_msgrcv
11000     case TARGET_NR_msgrcv:
11001         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11002 #endif
11003 #ifdef TARGET_NR_msgsnd
11004     case TARGET_NR_msgsnd:
11005         return do_msgsnd(arg1, arg2, arg3, arg4);
11006 #endif
11007 #ifdef TARGET_NR_shmget
11008     case TARGET_NR_shmget:
11009         return get_errno(shmget(arg1, arg2, arg3));
11010 #endif
11011 #ifdef TARGET_NR_shmctl
11012     case TARGET_NR_shmctl:
11013         return do_shmctl(arg1, arg2, arg3);
11014 #endif
11015 #ifdef TARGET_NR_shmat
11016     case TARGET_NR_shmat:
11017         return target_shmat(cpu_env, arg1, arg2, arg3);
11018 #endif
11019 #ifdef TARGET_NR_shmdt
11020     case TARGET_NR_shmdt:
11021         return target_shmdt(arg1);
11022 #endif
11023     case TARGET_NR_fsync:
11024         return get_errno(fsync(arg1));
11025     case TARGET_NR_clone:
11026         /* Linux manages to have three different orderings for its
11027          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11028          * match the kernel's CONFIG_CLONE_* settings.
11029          * Microblaze is further special in that it uses a sixth
11030          * implicit argument to clone for the TLS pointer.
11031          */
11032 #if defined(TARGET_MICROBLAZE)
11033         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11034 #elif defined(TARGET_CLONE_BACKWARDS)
11035         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11036 #elif defined(TARGET_CLONE_BACKWARDS2)
11037         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11038 #else
11039         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11040 #endif
11041         return ret;
11042 #ifdef __NR_exit_group
11043         /* new thread calls */
11044     case TARGET_NR_exit_group:
11045         preexit_cleanup(cpu_env, arg1);
11046         return get_errno(exit_group(arg1));
11047 #endif
11048     case TARGET_NR_setdomainname:
11049         if (!(p = lock_user_string(arg1)))
11050             return -TARGET_EFAULT;
11051         ret = get_errno(setdomainname(p, arg2));
11052         unlock_user(p, arg1, 0);
11053         return ret;
11054     case TARGET_NR_uname:
11055         /* no need to transcode because we use the linux syscall */
11056         {
11057             struct new_utsname * buf;
11058 
11059             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11060                 return -TARGET_EFAULT;
11061             ret = get_errno(sys_uname(buf));
11062             if (!is_error(ret)) {
11063                 /* Overwrite the native machine name with whatever is being
11064                    emulated. */
11065                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11066                           sizeof(buf->machine));
11067                 /* Allow the user to override the reported release.  */
11068                 if (qemu_uname_release && *qemu_uname_release) {
11069                     g_strlcpy(buf->release, qemu_uname_release,
11070                               sizeof(buf->release));
11071                 }
11072             }
11073             unlock_user_struct(buf, arg1, 1);
11074         }
11075         return ret;
11076 #ifdef TARGET_I386
11077     case TARGET_NR_modify_ldt:
11078         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11079 #if !defined(TARGET_X86_64)
11080     case TARGET_NR_vm86:
11081         return do_vm86(cpu_env, arg1, arg2);
11082 #endif
11083 #endif
11084 #if defined(TARGET_NR_adjtimex)
11085     case TARGET_NR_adjtimex:
11086         {
11087             struct timex host_buf;
11088 
11089             if (target_to_host_timex(&host_buf, arg1) != 0) {
11090                 return -TARGET_EFAULT;
11091             }
11092             ret = get_errno(adjtimex(&host_buf));
11093             if (!is_error(ret)) {
11094                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11095                     return -TARGET_EFAULT;
11096                 }
11097             }
11098         }
11099         return ret;
11100 #endif
11101 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11102     case TARGET_NR_clock_adjtime:
11103         {
11104             struct timex htx;
11105 
11106             if (target_to_host_timex(&htx, arg2) != 0) {
11107                 return -TARGET_EFAULT;
11108             }
11109             ret = get_errno(clock_adjtime(arg1, &htx));
11110             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11111                 return -TARGET_EFAULT;
11112             }
11113         }
11114         return ret;
11115 #endif
11116 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11117     case TARGET_NR_clock_adjtime64:
11118         {
11119             struct timex htx;
11120 
11121             if (target_to_host_timex64(&htx, arg2) != 0) {
11122                 return -TARGET_EFAULT;
11123             }
11124             ret = get_errno(clock_adjtime(arg1, &htx));
11125             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11126                     return -TARGET_EFAULT;
11127             }
11128         }
11129         return ret;
11130 #endif
11131     case TARGET_NR_getpgid:
11132         return get_errno(getpgid(arg1));
11133     case TARGET_NR_fchdir:
11134         return get_errno(fchdir(arg1));
11135     case TARGET_NR_personality:
11136         return get_errno(personality(arg1));
11137 #ifdef TARGET_NR__llseek /* Not on alpha */
11138     case TARGET_NR__llseek:
11139         {
11140             int64_t res;
11141 #if !defined(__NR_llseek)
11142             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11143             if (res == -1) {
11144                 ret = get_errno(res);
11145             } else {
11146                 ret = 0;
11147             }
11148 #else
11149             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11150 #endif
11151             if ((ret == 0) && put_user_s64(res, arg4)) {
11152                 return -TARGET_EFAULT;
11153             }
11154         }
11155         return ret;
11156 #endif
11157 #ifdef TARGET_NR_getdents
11158     case TARGET_NR_getdents:
11159         return do_getdents(arg1, arg2, arg3);
11160 #endif /* TARGET_NR_getdents */
11161 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11162     case TARGET_NR_getdents64:
11163         return do_getdents64(arg1, arg2, arg3);
11164 #endif /* TARGET_NR_getdents64 */
11165 #if defined(TARGET_NR__newselect)
11166     case TARGET_NR__newselect:
11167         return do_select(arg1, arg2, arg3, arg4, arg5);
11168 #endif
11169 #ifdef TARGET_NR_poll
11170     case TARGET_NR_poll:
11171         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11172 #endif
11173 #ifdef TARGET_NR_ppoll
11174     case TARGET_NR_ppoll:
11175         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11176 #endif
11177 #ifdef TARGET_NR_ppoll_time64
11178     case TARGET_NR_ppoll_time64:
11179         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11180 #endif
11181     case TARGET_NR_flock:
11182         /* NOTE: the flock constant seems to be the same for every
11183            Linux platform */
11184         return get_errno(safe_flock(arg1, arg2));
11185     case TARGET_NR_readv:
11186         {
11187             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11188             if (vec != NULL) {
11189                 ret = get_errno(safe_readv(arg1, vec, arg3));
11190                 unlock_iovec(vec, arg2, arg3, 1);
11191             } else {
11192                 ret = -host_to_target_errno(errno);
11193             }
11194         }
11195         return ret;
11196     case TARGET_NR_writev:
11197         {
11198             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11199             if (vec != NULL) {
11200                 ret = get_errno(safe_writev(arg1, vec, arg3));
11201                 unlock_iovec(vec, arg2, arg3, 0);
11202             } else {
11203                 ret = -host_to_target_errno(errno);
11204             }
11205         }
11206         return ret;
11207 #if defined(TARGET_NR_preadv)
11208     case TARGET_NR_preadv:
11209         {
11210             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11211             if (vec != NULL) {
11212                 unsigned long low, high;
11213 
11214                 target_to_host_low_high(arg4, arg5, &low, &high);
11215                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11216                 unlock_iovec(vec, arg2, arg3, 1);
11217             } else {
11218                 ret = -host_to_target_errno(errno);
11219            }
11220         }
11221         return ret;
11222 #endif
11223 #if defined(TARGET_NR_pwritev)
11224     case TARGET_NR_pwritev:
11225         {
11226             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11227             if (vec != NULL) {
11228                 unsigned long low, high;
11229 
11230                 target_to_host_low_high(arg4, arg5, &low, &high);
11231                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11232                 unlock_iovec(vec, arg2, arg3, 0);
11233             } else {
11234                 ret = -host_to_target_errno(errno);
11235            }
11236         }
11237         return ret;
11238 #endif
11239     case TARGET_NR_getsid:
11240         return get_errno(getsid(arg1));
11241 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11242     case TARGET_NR_fdatasync:
11243         return get_errno(fdatasync(arg1));
11244 #endif
11245     case TARGET_NR_sched_getaffinity:
11246         {
11247             unsigned int mask_size;
11248             unsigned long *mask;
11249 
11250             /*
11251              * sched_getaffinity needs multiples of ulong, so need to take
11252              * care of mismatches between target ulong and host ulong sizes.
11253              */
11254             if (arg2 & (sizeof(abi_ulong) - 1)) {
11255                 return -TARGET_EINVAL;
11256             }
11257             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11258 
11259             mask = alloca(mask_size);
11260             memset(mask, 0, mask_size);
11261             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11262 
11263             if (!is_error(ret)) {
11264                 if (ret > arg2) {
11265                     /* More data returned than the caller's buffer will fit.
11266                      * This only happens if sizeof(abi_long) < sizeof(long)
11267                      * and the caller passed us a buffer holding an odd number
11268                      * of abi_longs. If the host kernel is actually using the
11269                      * extra 4 bytes then fail EINVAL; otherwise we can just
11270                      * ignore them and only copy the interesting part.
11271                      */
11272                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11273                     if (numcpus > arg2 * 8) {
11274                         return -TARGET_EINVAL;
11275                     }
11276                     ret = arg2;
11277                 }
11278 
11279                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11280                     return -TARGET_EFAULT;
11281                 }
11282             }
11283         }
11284         return ret;
11285     case TARGET_NR_sched_setaffinity:
11286         {
11287             unsigned int mask_size;
11288             unsigned long *mask;
11289 
11290             /*
11291              * sched_setaffinity needs multiples of ulong, so need to take
11292              * care of mismatches between target ulong and host ulong sizes.
11293              */
11294             if (arg2 & (sizeof(abi_ulong) - 1)) {
11295                 return -TARGET_EINVAL;
11296             }
11297             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11298             mask = alloca(mask_size);
11299 
11300             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11301             if (ret) {
11302                 return ret;
11303             }
11304 
11305             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11306         }
11307     case TARGET_NR_getcpu:
11308         {
11309             unsigned cpuid, node;
11310             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11311                                        arg2 ? &node : NULL,
11312                                        NULL));
11313             if (is_error(ret)) {
11314                 return ret;
11315             }
11316             if (arg1 && put_user_u32(cpuid, arg1)) {
11317                 return -TARGET_EFAULT;
11318             }
11319             if (arg2 && put_user_u32(node, arg2)) {
11320                 return -TARGET_EFAULT;
11321             }
11322         }
11323         return ret;
11324     case TARGET_NR_sched_setparam:
11325         {
11326             struct target_sched_param *target_schp;
11327             struct sched_param schp;
11328 
11329             if (arg2 == 0) {
11330                 return -TARGET_EINVAL;
11331             }
11332             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11333                 return -TARGET_EFAULT;
11334             }
11335             schp.sched_priority = tswap32(target_schp->sched_priority);
11336             unlock_user_struct(target_schp, arg2, 0);
11337             return get_errno(sys_sched_setparam(arg1, &schp));
11338         }
11339     case TARGET_NR_sched_getparam:
11340         {
11341             struct target_sched_param *target_schp;
11342             struct sched_param schp;
11343 
11344             if (arg2 == 0) {
11345                 return -TARGET_EINVAL;
11346             }
11347             ret = get_errno(sys_sched_getparam(arg1, &schp));
11348             if (!is_error(ret)) {
11349                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11350                     return -TARGET_EFAULT;
11351                 }
11352                 target_schp->sched_priority = tswap32(schp.sched_priority);
11353                 unlock_user_struct(target_schp, arg2, 1);
11354             }
11355         }
11356         return ret;
11357     case TARGET_NR_sched_setscheduler:
11358         {
11359             struct target_sched_param *target_schp;
11360             struct sched_param schp;
11361             if (arg3 == 0) {
11362                 return -TARGET_EINVAL;
11363             }
11364             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11365                 return -TARGET_EFAULT;
11366             }
11367             schp.sched_priority = tswap32(target_schp->sched_priority);
11368             unlock_user_struct(target_schp, arg3, 0);
11369             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11370         }
11371     case TARGET_NR_sched_getscheduler:
11372         return get_errno(sys_sched_getscheduler(arg1));
11373     case TARGET_NR_sched_getattr:
11374         {
11375             struct target_sched_attr *target_scha;
11376             struct sched_attr scha;
11377             if (arg2 == 0) {
11378                 return -TARGET_EINVAL;
11379             }
11380             if (arg3 > sizeof(scha)) {
11381                 arg3 = sizeof(scha);
11382             }
11383             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11384             if (!is_error(ret)) {
11385                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11386                 if (!target_scha) {
11387                     return -TARGET_EFAULT;
11388                 }
11389                 target_scha->size = tswap32(scha.size);
11390                 target_scha->sched_policy = tswap32(scha.sched_policy);
11391                 target_scha->sched_flags = tswap64(scha.sched_flags);
11392                 target_scha->sched_nice = tswap32(scha.sched_nice);
11393                 target_scha->sched_priority = tswap32(scha.sched_priority);
11394                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11395                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11396                 target_scha->sched_period = tswap64(scha.sched_period);
11397                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11398                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11399                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11400                 }
11401                 unlock_user(target_scha, arg2, arg3);
11402             }
11403             return ret;
11404         }
11405     case TARGET_NR_sched_setattr:
11406         {
11407             struct target_sched_attr *target_scha;
11408             struct sched_attr scha;
11409             uint32_t size;
11410             int zeroed;
11411             if (arg2 == 0) {
11412                 return -TARGET_EINVAL;
11413             }
11414             if (get_user_u32(size, arg2)) {
11415                 return -TARGET_EFAULT;
11416             }
11417             if (!size) {
11418                 size = offsetof(struct target_sched_attr, sched_util_min);
11419             }
11420             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11421                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11422                     return -TARGET_EFAULT;
11423                 }
11424                 return -TARGET_E2BIG;
11425             }
11426 
11427             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11428             if (zeroed < 0) {
11429                 return zeroed;
11430             } else if (zeroed == 0) {
11431                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11432                     return -TARGET_EFAULT;
11433                 }
11434                 return -TARGET_E2BIG;
11435             }
11436             if (size > sizeof(struct target_sched_attr)) {
11437                 size = sizeof(struct target_sched_attr);
11438             }
11439 
11440             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11441             if (!target_scha) {
11442                 return -TARGET_EFAULT;
11443             }
11444             scha.size = size;
11445             scha.sched_policy = tswap32(target_scha->sched_policy);
11446             scha.sched_flags = tswap64(target_scha->sched_flags);
11447             scha.sched_nice = tswap32(target_scha->sched_nice);
11448             scha.sched_priority = tswap32(target_scha->sched_priority);
11449             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11450             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11451             scha.sched_period = tswap64(target_scha->sched_period);
11452             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11453                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11454                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11455             }
11456             unlock_user(target_scha, arg2, 0);
11457             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11458         }
11459     case TARGET_NR_sched_yield:
11460         return get_errno(sched_yield());
11461     case TARGET_NR_sched_get_priority_max:
11462         return get_errno(sched_get_priority_max(arg1));
11463     case TARGET_NR_sched_get_priority_min:
11464         return get_errno(sched_get_priority_min(arg1));
11465 #ifdef TARGET_NR_sched_rr_get_interval
11466     case TARGET_NR_sched_rr_get_interval:
11467         {
11468             struct timespec ts;
11469             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11470             if (!is_error(ret)) {
11471                 ret = host_to_target_timespec(arg2, &ts);
11472             }
11473         }
11474         return ret;
11475 #endif
11476 #ifdef TARGET_NR_sched_rr_get_interval_time64
11477     case TARGET_NR_sched_rr_get_interval_time64:
11478         {
11479             struct timespec ts;
11480             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11481             if (!is_error(ret)) {
11482                 ret = host_to_target_timespec64(arg2, &ts);
11483             }
11484         }
11485         return ret;
11486 #endif
11487 #if defined(TARGET_NR_nanosleep)
11488     case TARGET_NR_nanosleep:
11489         {
11490             struct timespec req, rem;
11491             target_to_host_timespec(&req, arg1);
11492             ret = get_errno(safe_nanosleep(&req, &rem));
11493             if (is_error(ret) && arg2) {
11494                 host_to_target_timespec(arg2, &rem);
11495             }
11496         }
11497         return ret;
11498 #endif
11499     case TARGET_NR_prctl:
11500         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11501         break;
11502 #ifdef TARGET_NR_arch_prctl
11503     case TARGET_NR_arch_prctl:
11504         return do_arch_prctl(cpu_env, arg1, arg2);
11505 #endif
11506 #ifdef TARGET_NR_pread64
11507     case TARGET_NR_pread64:
11508         if (regpairs_aligned(cpu_env, num)) {
11509             arg4 = arg5;
11510             arg5 = arg6;
11511         }
11512         if (arg2 == 0 && arg3 == 0) {
11513             /* Special-case NULL buffer and zero length, which should succeed */
11514             p = 0;
11515         } else {
11516             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11517             if (!p) {
11518                 return -TARGET_EFAULT;
11519             }
11520         }
11521         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11522         unlock_user(p, arg2, ret);
11523         return ret;
11524     case TARGET_NR_pwrite64:
11525         if (regpairs_aligned(cpu_env, num)) {
11526             arg4 = arg5;
11527             arg5 = arg6;
11528         }
11529         if (arg2 == 0 && arg3 == 0) {
11530             /* Special-case NULL buffer and zero length, which should succeed */
11531             p = 0;
11532         } else {
11533             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11534             if (!p) {
11535                 return -TARGET_EFAULT;
11536             }
11537         }
11538         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11539         unlock_user(p, arg2, 0);
11540         return ret;
11541 #endif
11542     case TARGET_NR_getcwd:
11543         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11544             return -TARGET_EFAULT;
11545         ret = get_errno(sys_getcwd1(p, arg2));
11546         unlock_user(p, arg1, ret);
11547         return ret;
11548     case TARGET_NR_capget:
11549     case TARGET_NR_capset:
11550     {
11551         struct target_user_cap_header *target_header;
11552         struct target_user_cap_data *target_data = NULL;
11553         struct __user_cap_header_struct header;
11554         struct __user_cap_data_struct data[2];
11555         struct __user_cap_data_struct *dataptr = NULL;
11556         int i, target_datalen;
11557         int data_items = 1;
11558 
11559         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11560             return -TARGET_EFAULT;
11561         }
11562         header.version = tswap32(target_header->version);
11563         header.pid = tswap32(target_header->pid);
11564 
11565         if (header.version != _LINUX_CAPABILITY_VERSION) {
11566             /* Version 2 and up takes pointer to two user_data structs */
11567             data_items = 2;
11568         }
11569 
11570         target_datalen = sizeof(*target_data) * data_items;
11571 
11572         if (arg2) {
11573             if (num == TARGET_NR_capget) {
11574                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11575             } else {
11576                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11577             }
11578             if (!target_data) {
11579                 unlock_user_struct(target_header, arg1, 0);
11580                 return -TARGET_EFAULT;
11581             }
11582 
11583             if (num == TARGET_NR_capset) {
11584                 for (i = 0; i < data_items; i++) {
11585                     data[i].effective = tswap32(target_data[i].effective);
11586                     data[i].permitted = tswap32(target_data[i].permitted);
11587                     data[i].inheritable = tswap32(target_data[i].inheritable);
11588                 }
11589             }
11590 
11591             dataptr = data;
11592         }
11593 
11594         if (num == TARGET_NR_capget) {
11595             ret = get_errno(capget(&header, dataptr));
11596         } else {
11597             ret = get_errno(capset(&header, dataptr));
11598         }
11599 
11600         /* The kernel always updates version for both capget and capset */
11601         target_header->version = tswap32(header.version);
11602         unlock_user_struct(target_header, arg1, 1);
11603 
11604         if (arg2) {
11605             if (num == TARGET_NR_capget) {
11606                 for (i = 0; i < data_items; i++) {
11607                     target_data[i].effective = tswap32(data[i].effective);
11608                     target_data[i].permitted = tswap32(data[i].permitted);
11609                     target_data[i].inheritable = tswap32(data[i].inheritable);
11610                 }
11611                 unlock_user(target_data, arg2, target_datalen);
11612             } else {
11613                 unlock_user(target_data, arg2, 0);
11614             }
11615         }
11616         return ret;
11617     }
11618     case TARGET_NR_sigaltstack:
11619         return do_sigaltstack(arg1, arg2, cpu_env);
11620 
11621 #ifdef CONFIG_SENDFILE
11622 #ifdef TARGET_NR_sendfile
11623     case TARGET_NR_sendfile:
11624     {
11625         off_t *offp = NULL;
11626         off_t off;
11627         if (arg3) {
11628             ret = get_user_sal(off, arg3);
11629             if (is_error(ret)) {
11630                 return ret;
11631             }
11632             offp = &off;
11633         }
11634         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11635         if (!is_error(ret) && arg3) {
11636             abi_long ret2 = put_user_sal(off, arg3);
11637             if (is_error(ret2)) {
11638                 ret = ret2;
11639             }
11640         }
11641         return ret;
11642     }
11643 #endif
11644 #ifdef TARGET_NR_sendfile64
11645     case TARGET_NR_sendfile64:
11646     {
11647         off_t *offp = NULL;
11648         off_t off;
11649         if (arg3) {
11650             ret = get_user_s64(off, arg3);
11651             if (is_error(ret)) {
11652                 return ret;
11653             }
11654             offp = &off;
11655         }
11656         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11657         if (!is_error(ret) && arg3) {
11658             abi_long ret2 = put_user_s64(off, arg3);
11659             if (is_error(ret2)) {
11660                 ret = ret2;
11661             }
11662         }
11663         return ret;
11664     }
11665 #endif
11666 #endif
11667 #ifdef TARGET_NR_vfork
11668     case TARGET_NR_vfork:
11669         return get_errno(do_fork(cpu_env,
11670                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11671                          0, 0, 0, 0));
11672 #endif
11673 #ifdef TARGET_NR_ugetrlimit
11674     case TARGET_NR_ugetrlimit:
11675     {
11676 	struct rlimit rlim;
11677 	int resource = target_to_host_resource(arg1);
11678 	ret = get_errno(getrlimit(resource, &rlim));
11679 	if (!is_error(ret)) {
11680 	    struct target_rlimit *target_rlim;
11681             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11682                 return -TARGET_EFAULT;
11683 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11684 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11685             unlock_user_struct(target_rlim, arg2, 1);
11686 	}
11687         return ret;
11688     }
11689 #endif
11690 #ifdef TARGET_NR_truncate64
11691     case TARGET_NR_truncate64:
11692         if (!(p = lock_user_string(arg1)))
11693             return -TARGET_EFAULT;
11694 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11695         unlock_user(p, arg1, 0);
11696         return ret;
11697 #endif
11698 #ifdef TARGET_NR_ftruncate64
11699     case TARGET_NR_ftruncate64:
11700         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11701 #endif
11702 #ifdef TARGET_NR_stat64
11703     case TARGET_NR_stat64:
11704         if (!(p = lock_user_string(arg1))) {
11705             return -TARGET_EFAULT;
11706         }
11707         ret = get_errno(stat(path(p), &st));
11708         unlock_user(p, arg1, 0);
11709         if (!is_error(ret))
11710             ret = host_to_target_stat64(cpu_env, arg2, &st);
11711         return ret;
11712 #endif
11713 #ifdef TARGET_NR_lstat64
11714     case TARGET_NR_lstat64:
11715         if (!(p = lock_user_string(arg1))) {
11716             return -TARGET_EFAULT;
11717         }
11718         ret = get_errno(lstat(path(p), &st));
11719         unlock_user(p, arg1, 0);
11720         if (!is_error(ret))
11721             ret = host_to_target_stat64(cpu_env, arg2, &st);
11722         return ret;
11723 #endif
11724 #ifdef TARGET_NR_fstat64
11725     case TARGET_NR_fstat64:
11726         ret = get_errno(fstat(arg1, &st));
11727         if (!is_error(ret))
11728             ret = host_to_target_stat64(cpu_env, arg2, &st);
11729         return ret;
11730 #endif
11731 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11732 #ifdef TARGET_NR_fstatat64
11733     case TARGET_NR_fstatat64:
11734 #endif
11735 #ifdef TARGET_NR_newfstatat
11736     case TARGET_NR_newfstatat:
11737 #endif
11738         if (!(p = lock_user_string(arg2))) {
11739             return -TARGET_EFAULT;
11740         }
11741         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11742         unlock_user(p, arg2, 0);
11743         if (!is_error(ret))
11744             ret = host_to_target_stat64(cpu_env, arg3, &st);
11745         return ret;
11746 #endif
11747 #if defined(TARGET_NR_statx)
11748     case TARGET_NR_statx:
11749         {
11750             struct target_statx *target_stx;
11751             int dirfd = arg1;
11752             int flags = arg3;
11753 
11754             p = lock_user_string(arg2);
11755             if (p == NULL) {
11756                 return -TARGET_EFAULT;
11757             }
11758 #if defined(__NR_statx)
11759             {
11760                 /*
11761                  * It is assumed that struct statx is architecture independent.
11762                  */
11763                 struct target_statx host_stx;
11764                 int mask = arg4;
11765 
11766                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11767                 if (!is_error(ret)) {
11768                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11769                         unlock_user(p, arg2, 0);
11770                         return -TARGET_EFAULT;
11771                     }
11772                 }
11773 
11774                 if (ret != -TARGET_ENOSYS) {
11775                     unlock_user(p, arg2, 0);
11776                     return ret;
11777                 }
11778             }
11779 #endif
11780             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11781             unlock_user(p, arg2, 0);
11782 
11783             if (!is_error(ret)) {
11784                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11785                     return -TARGET_EFAULT;
11786                 }
11787                 memset(target_stx, 0, sizeof(*target_stx));
11788                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11789                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11790                 __put_user(st.st_ino, &target_stx->stx_ino);
11791                 __put_user(st.st_mode, &target_stx->stx_mode);
11792                 __put_user(st.st_uid, &target_stx->stx_uid);
11793                 __put_user(st.st_gid, &target_stx->stx_gid);
11794                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11795                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11796                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11797                 __put_user(st.st_size, &target_stx->stx_size);
11798                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11799                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11800                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11801                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11802                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11803                 unlock_user_struct(target_stx, arg5, 1);
11804             }
11805         }
11806         return ret;
11807 #endif
11808 #ifdef TARGET_NR_lchown
11809     case TARGET_NR_lchown:
11810         if (!(p = lock_user_string(arg1)))
11811             return -TARGET_EFAULT;
11812         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11813         unlock_user(p, arg1, 0);
11814         return ret;
11815 #endif
11816 #ifdef TARGET_NR_getuid
11817     case TARGET_NR_getuid:
11818         return get_errno(high2lowuid(getuid()));
11819 #endif
11820 #ifdef TARGET_NR_getgid
11821     case TARGET_NR_getgid:
11822         return get_errno(high2lowgid(getgid()));
11823 #endif
11824 #ifdef TARGET_NR_geteuid
11825     case TARGET_NR_geteuid:
11826         return get_errno(high2lowuid(geteuid()));
11827 #endif
11828 #ifdef TARGET_NR_getegid
11829     case TARGET_NR_getegid:
11830         return get_errno(high2lowgid(getegid()));
11831 #endif
11832     case TARGET_NR_setreuid:
11833         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11834     case TARGET_NR_setregid:
11835         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11836     case TARGET_NR_getgroups:
11837         { /* the same code as for TARGET_NR_getgroups32 */
11838             int gidsetsize = arg1;
11839             target_id *target_grouplist;
11840             g_autofree gid_t *grouplist = NULL;
11841             int i;
11842 
11843             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11844                 return -TARGET_EINVAL;
11845             }
11846             if (gidsetsize > 0) {
11847                 grouplist = g_try_new(gid_t, gidsetsize);
11848                 if (!grouplist) {
11849                     return -TARGET_ENOMEM;
11850                 }
11851             }
11852             ret = get_errno(getgroups(gidsetsize, grouplist));
11853             if (!is_error(ret) && gidsetsize > 0) {
11854                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11855                                              gidsetsize * sizeof(target_id), 0);
11856                 if (!target_grouplist) {
11857                     return -TARGET_EFAULT;
11858                 }
11859                 for (i = 0; i < ret; i++) {
11860                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11861                 }
11862                 unlock_user(target_grouplist, arg2,
11863                             gidsetsize * sizeof(target_id));
11864             }
11865             return ret;
11866         }
11867     case TARGET_NR_setgroups:
11868         { /* the same code as for TARGET_NR_setgroups32 */
11869             int gidsetsize = arg1;
11870             target_id *target_grouplist;
11871             g_autofree gid_t *grouplist = NULL;
11872             int i;
11873 
11874             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11875                 return -TARGET_EINVAL;
11876             }
11877             if (gidsetsize > 0) {
11878                 grouplist = g_try_new(gid_t, gidsetsize);
11879                 if (!grouplist) {
11880                     return -TARGET_ENOMEM;
11881                 }
11882                 target_grouplist = lock_user(VERIFY_READ, arg2,
11883                                              gidsetsize * sizeof(target_id), 1);
11884                 if (!target_grouplist) {
11885                     return -TARGET_EFAULT;
11886                 }
11887                 for (i = 0; i < gidsetsize; i++) {
11888                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11889                 }
11890                 unlock_user(target_grouplist, arg2,
11891                             gidsetsize * sizeof(target_id));
11892             }
11893             return get_errno(setgroups(gidsetsize, grouplist));
11894         }
11895     case TARGET_NR_fchown:
11896         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11897 #if defined(TARGET_NR_fchownat)
11898     case TARGET_NR_fchownat:
11899         if (!(p = lock_user_string(arg2)))
11900             return -TARGET_EFAULT;
11901         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11902                                  low2highgid(arg4), arg5));
11903         unlock_user(p, arg2, 0);
11904         return ret;
11905 #endif
11906 #ifdef TARGET_NR_setresuid
11907     case TARGET_NR_setresuid:
11908         return get_errno(sys_setresuid(low2highuid(arg1),
11909                                        low2highuid(arg2),
11910                                        low2highuid(arg3)));
11911 #endif
11912 #ifdef TARGET_NR_getresuid
11913     case TARGET_NR_getresuid:
11914         {
11915             uid_t ruid, euid, suid;
11916             ret = get_errno(getresuid(&ruid, &euid, &suid));
11917             if (!is_error(ret)) {
11918                 if (put_user_id(high2lowuid(ruid), arg1)
11919                     || put_user_id(high2lowuid(euid), arg2)
11920                     || put_user_id(high2lowuid(suid), arg3))
11921                     return -TARGET_EFAULT;
11922             }
11923         }
11924         return ret;
11925 #endif
11926 #ifdef TARGET_NR_getresgid
11927     case TARGET_NR_setresgid:
11928         return get_errno(sys_setresgid(low2highgid(arg1),
11929                                        low2highgid(arg2),
11930                                        low2highgid(arg3)));
11931 #endif
11932 #ifdef TARGET_NR_getresgid
11933     case TARGET_NR_getresgid:
11934         {
11935             gid_t rgid, egid, sgid;
11936             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11937             if (!is_error(ret)) {
11938                 if (put_user_id(high2lowgid(rgid), arg1)
11939                     || put_user_id(high2lowgid(egid), arg2)
11940                     || put_user_id(high2lowgid(sgid), arg3))
11941                     return -TARGET_EFAULT;
11942             }
11943         }
11944         return ret;
11945 #endif
11946 #ifdef TARGET_NR_chown
11947     case TARGET_NR_chown:
11948         if (!(p = lock_user_string(arg1)))
11949             return -TARGET_EFAULT;
11950         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11951         unlock_user(p, arg1, 0);
11952         return ret;
11953 #endif
11954     case TARGET_NR_setuid:
11955         return get_errno(sys_setuid(low2highuid(arg1)));
11956     case TARGET_NR_setgid:
11957         return get_errno(sys_setgid(low2highgid(arg1)));
11958     case TARGET_NR_setfsuid:
11959         return get_errno(setfsuid(arg1));
11960     case TARGET_NR_setfsgid:
11961         return get_errno(setfsgid(arg1));
11962 
11963 #ifdef TARGET_NR_lchown32
11964     case TARGET_NR_lchown32:
11965         if (!(p = lock_user_string(arg1)))
11966             return -TARGET_EFAULT;
11967         ret = get_errno(lchown(p, arg2, arg3));
11968         unlock_user(p, arg1, 0);
11969         return ret;
11970 #endif
11971 #ifdef TARGET_NR_getuid32
11972     case TARGET_NR_getuid32:
11973         return get_errno(getuid());
11974 #endif
11975 
11976 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11977    /* Alpha specific */
11978     case TARGET_NR_getxuid:
11979          {
11980             uid_t euid;
11981             euid=geteuid();
11982             cpu_env->ir[IR_A4]=euid;
11983          }
11984         return get_errno(getuid());
11985 #endif
11986 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11987    /* Alpha specific */
11988     case TARGET_NR_getxgid:
11989          {
11990             uid_t egid;
11991             egid=getegid();
11992             cpu_env->ir[IR_A4]=egid;
11993          }
11994         return get_errno(getgid());
11995 #endif
11996 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11997     /* Alpha specific */
11998     case TARGET_NR_osf_getsysinfo:
11999         ret = -TARGET_EOPNOTSUPP;
12000         switch (arg1) {
12001           case TARGET_GSI_IEEE_FP_CONTROL:
12002             {
12003                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12004                 uint64_t swcr = cpu_env->swcr;
12005 
12006                 swcr &= ~SWCR_STATUS_MASK;
12007                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12008 
12009                 if (put_user_u64 (swcr, arg2))
12010                         return -TARGET_EFAULT;
12011                 ret = 0;
12012             }
12013             break;
12014 
12015           /* case GSI_IEEE_STATE_AT_SIGNAL:
12016              -- Not implemented in linux kernel.
12017              case GSI_UACPROC:
12018              -- Retrieves current unaligned access state; not much used.
12019              case GSI_PROC_TYPE:
12020              -- Retrieves implver information; surely not used.
12021              case GSI_GET_HWRPB:
12022              -- Grabs a copy of the HWRPB; surely not used.
12023           */
12024         }
12025         return ret;
12026 #endif
12027 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12028     /* Alpha specific */
12029     case TARGET_NR_osf_setsysinfo:
12030         ret = -TARGET_EOPNOTSUPP;
12031         switch (arg1) {
12032           case TARGET_SSI_IEEE_FP_CONTROL:
12033             {
12034                 uint64_t swcr, fpcr;
12035 
12036                 if (get_user_u64 (swcr, arg2)) {
12037                     return -TARGET_EFAULT;
12038                 }
12039 
12040                 /*
12041                  * The kernel calls swcr_update_status to update the
12042                  * status bits from the fpcr at every point that it
12043                  * could be queried.  Therefore, we store the status
12044                  * bits only in FPCR.
12045                  */
12046                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12047 
12048                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12049                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12050                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12051                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12052                 ret = 0;
12053             }
12054             break;
12055 
12056           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12057             {
12058                 uint64_t exc, fpcr, fex;
12059 
12060                 if (get_user_u64(exc, arg2)) {
12061                     return -TARGET_EFAULT;
12062                 }
12063                 exc &= SWCR_STATUS_MASK;
12064                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12065 
12066                 /* Old exceptions are not signaled.  */
12067                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12068                 fex = exc & ~fex;
12069                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12070                 fex &= (cpu_env)->swcr;
12071 
12072                 /* Update the hardware fpcr.  */
12073                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12074                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12075 
12076                 if (fex) {
12077                     int si_code = TARGET_FPE_FLTUNK;
12078                     target_siginfo_t info;
12079 
12080                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12081                         si_code = TARGET_FPE_FLTUND;
12082                     }
12083                     if (fex & SWCR_TRAP_ENABLE_INE) {
12084                         si_code = TARGET_FPE_FLTRES;
12085                     }
12086                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12087                         si_code = TARGET_FPE_FLTUND;
12088                     }
12089                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12090                         si_code = TARGET_FPE_FLTOVF;
12091                     }
12092                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12093                         si_code = TARGET_FPE_FLTDIV;
12094                     }
12095                     if (fex & SWCR_TRAP_ENABLE_INV) {
12096                         si_code = TARGET_FPE_FLTINV;
12097                     }
12098 
12099                     info.si_signo = SIGFPE;
12100                     info.si_errno = 0;
12101                     info.si_code = si_code;
12102                     info._sifields._sigfault._addr = (cpu_env)->pc;
12103                     queue_signal(cpu_env, info.si_signo,
12104                                  QEMU_SI_FAULT, &info);
12105                 }
12106                 ret = 0;
12107             }
12108             break;
12109 
12110           /* case SSI_NVPAIRS:
12111              -- Used with SSIN_UACPROC to enable unaligned accesses.
12112              case SSI_IEEE_STATE_AT_SIGNAL:
12113              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12114              -- Not implemented in linux kernel
12115           */
12116         }
12117         return ret;
12118 #endif
12119 #ifdef TARGET_NR_osf_sigprocmask
12120     /* Alpha specific.  */
12121     case TARGET_NR_osf_sigprocmask:
12122         {
12123             abi_ulong mask;
12124             int how;
12125             sigset_t set, oldset;
12126 
12127             switch(arg1) {
12128             case TARGET_SIG_BLOCK:
12129                 how = SIG_BLOCK;
12130                 break;
12131             case TARGET_SIG_UNBLOCK:
12132                 how = SIG_UNBLOCK;
12133                 break;
12134             case TARGET_SIG_SETMASK:
12135                 how = SIG_SETMASK;
12136                 break;
12137             default:
12138                 return -TARGET_EINVAL;
12139             }
12140             mask = arg2;
12141             target_to_host_old_sigset(&set, &mask);
12142             ret = do_sigprocmask(how, &set, &oldset);
12143             if (!ret) {
12144                 host_to_target_old_sigset(&mask, &oldset);
12145                 ret = mask;
12146             }
12147         }
12148         return ret;
12149 #endif
12150 
12151 #ifdef TARGET_NR_getgid32
12152     case TARGET_NR_getgid32:
12153         return get_errno(getgid());
12154 #endif
12155 #ifdef TARGET_NR_geteuid32
12156     case TARGET_NR_geteuid32:
12157         return get_errno(geteuid());
12158 #endif
12159 #ifdef TARGET_NR_getegid32
12160     case TARGET_NR_getegid32:
12161         return get_errno(getegid());
12162 #endif
12163 #ifdef TARGET_NR_setreuid32
12164     case TARGET_NR_setreuid32:
12165         return get_errno(setreuid(arg1, arg2));
12166 #endif
12167 #ifdef TARGET_NR_setregid32
12168     case TARGET_NR_setregid32:
12169         return get_errno(setregid(arg1, arg2));
12170 #endif
12171 #ifdef TARGET_NR_getgroups32
12172     case TARGET_NR_getgroups32:
12173         { /* the same code as for TARGET_NR_getgroups */
12174             int gidsetsize = arg1;
12175             uint32_t *target_grouplist;
12176             g_autofree gid_t *grouplist = NULL;
12177             int i;
12178 
12179             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12180                 return -TARGET_EINVAL;
12181             }
12182             if (gidsetsize > 0) {
12183                 grouplist = g_try_new(gid_t, gidsetsize);
12184                 if (!grouplist) {
12185                     return -TARGET_ENOMEM;
12186                 }
12187             }
12188             ret = get_errno(getgroups(gidsetsize, grouplist));
12189             if (!is_error(ret) && gidsetsize > 0) {
12190                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12191                                              gidsetsize * 4, 0);
12192                 if (!target_grouplist) {
12193                     return -TARGET_EFAULT;
12194                 }
12195                 for (i = 0; i < ret; i++) {
12196                     target_grouplist[i] = tswap32(grouplist[i]);
12197                 }
12198                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12199             }
12200             return ret;
12201         }
12202 #endif
12203 #ifdef TARGET_NR_setgroups32
12204     case TARGET_NR_setgroups32:
12205         { /* the same code as for TARGET_NR_setgroups */
12206             int gidsetsize = arg1;
12207             uint32_t *target_grouplist;
12208             g_autofree gid_t *grouplist = NULL;
12209             int i;
12210 
12211             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12212                 return -TARGET_EINVAL;
12213             }
12214             if (gidsetsize > 0) {
12215                 grouplist = g_try_new(gid_t, gidsetsize);
12216                 if (!grouplist) {
12217                     return -TARGET_ENOMEM;
12218                 }
12219                 target_grouplist = lock_user(VERIFY_READ, arg2,
12220                                              gidsetsize * 4, 1);
12221                 if (!target_grouplist) {
12222                     return -TARGET_EFAULT;
12223                 }
12224                 for (i = 0; i < gidsetsize; i++) {
12225                     grouplist[i] = tswap32(target_grouplist[i]);
12226                 }
12227                 unlock_user(target_grouplist, arg2, 0);
12228             }
12229             return get_errno(setgroups(gidsetsize, grouplist));
12230         }
12231 #endif
12232 #ifdef TARGET_NR_fchown32
12233     case TARGET_NR_fchown32:
12234         return get_errno(fchown(arg1, arg2, arg3));
12235 #endif
12236 #ifdef TARGET_NR_setresuid32
12237     case TARGET_NR_setresuid32:
12238         return get_errno(sys_setresuid(arg1, arg2, arg3));
12239 #endif
12240 #ifdef TARGET_NR_getresuid32
12241     case TARGET_NR_getresuid32:
12242         {
12243             uid_t ruid, euid, suid;
12244             ret = get_errno(getresuid(&ruid, &euid, &suid));
12245             if (!is_error(ret)) {
12246                 if (put_user_u32(ruid, arg1)
12247                     || put_user_u32(euid, arg2)
12248                     || put_user_u32(suid, arg3))
12249                     return -TARGET_EFAULT;
12250             }
12251         }
12252         return ret;
12253 #endif
12254 #ifdef TARGET_NR_setresgid32
12255     case TARGET_NR_setresgid32:
12256         return get_errno(sys_setresgid(arg1, arg2, arg3));
12257 #endif
12258 #ifdef TARGET_NR_getresgid32
12259     case TARGET_NR_getresgid32:
12260         {
12261             gid_t rgid, egid, sgid;
12262             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12263             if (!is_error(ret)) {
12264                 if (put_user_u32(rgid, arg1)
12265                     || put_user_u32(egid, arg2)
12266                     || put_user_u32(sgid, arg3))
12267                     return -TARGET_EFAULT;
12268             }
12269         }
12270         return ret;
12271 #endif
12272 #ifdef TARGET_NR_chown32
12273     case TARGET_NR_chown32:
12274         if (!(p = lock_user_string(arg1)))
12275             return -TARGET_EFAULT;
12276         ret = get_errno(chown(p, arg2, arg3));
12277         unlock_user(p, arg1, 0);
12278         return ret;
12279 #endif
12280 #ifdef TARGET_NR_setuid32
12281     case TARGET_NR_setuid32:
12282         return get_errno(sys_setuid(arg1));
12283 #endif
12284 #ifdef TARGET_NR_setgid32
12285     case TARGET_NR_setgid32:
12286         return get_errno(sys_setgid(arg1));
12287 #endif
12288 #ifdef TARGET_NR_setfsuid32
12289     case TARGET_NR_setfsuid32:
12290         return get_errno(setfsuid(arg1));
12291 #endif
12292 #ifdef TARGET_NR_setfsgid32
12293     case TARGET_NR_setfsgid32:
12294         return get_errno(setfsgid(arg1));
12295 #endif
12296 #ifdef TARGET_NR_mincore
12297     case TARGET_NR_mincore:
12298         {
12299             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12300             if (!a) {
12301                 return -TARGET_ENOMEM;
12302             }
12303             p = lock_user_string(arg3);
12304             if (!p) {
12305                 ret = -TARGET_EFAULT;
12306             } else {
12307                 ret = get_errno(mincore(a, arg2, p));
12308                 unlock_user(p, arg3, ret);
12309             }
12310             unlock_user(a, arg1, 0);
12311         }
12312         return ret;
12313 #endif
12314 #ifdef TARGET_NR_arm_fadvise64_64
12315     case TARGET_NR_arm_fadvise64_64:
12316         /* arm_fadvise64_64 looks like fadvise64_64 but
12317          * with different argument order: fd, advice, offset, len
12318          * rather than the usual fd, offset, len, advice.
12319          * Note that offset and len are both 64-bit so appear as
12320          * pairs of 32-bit registers.
12321          */
12322         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12323                             target_offset64(arg5, arg6), arg2);
12324         return -host_to_target_errno(ret);
12325 #endif
12326 
12327 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12328 
12329 #ifdef TARGET_NR_fadvise64_64
12330     case TARGET_NR_fadvise64_64:
12331 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12332         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12333         ret = arg2;
12334         arg2 = arg3;
12335         arg3 = arg4;
12336         arg4 = arg5;
12337         arg5 = arg6;
12338         arg6 = ret;
12339 #else
12340         /* 6 args: fd, offset (high, low), len (high, low), advice */
12341         if (regpairs_aligned(cpu_env, num)) {
12342             /* offset is in (3,4), len in (5,6) and advice in 7 */
12343             arg2 = arg3;
12344             arg3 = arg4;
12345             arg4 = arg5;
12346             arg5 = arg6;
12347             arg6 = arg7;
12348         }
12349 #endif
12350         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12351                             target_offset64(arg4, arg5), arg6);
12352         return -host_to_target_errno(ret);
12353 #endif
12354 
12355 #ifdef TARGET_NR_fadvise64
12356     case TARGET_NR_fadvise64:
12357         /* 5 args: fd, offset (high, low), len, advice */
12358         if (regpairs_aligned(cpu_env, num)) {
12359             /* offset is in (3,4), len in 5 and advice in 6 */
12360             arg2 = arg3;
12361             arg3 = arg4;
12362             arg4 = arg5;
12363             arg5 = arg6;
12364         }
12365         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12366         return -host_to_target_errno(ret);
12367 #endif
12368 
12369 #else /* not a 32-bit ABI */
12370 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12371 #ifdef TARGET_NR_fadvise64_64
12372     case TARGET_NR_fadvise64_64:
12373 #endif
12374 #ifdef TARGET_NR_fadvise64
12375     case TARGET_NR_fadvise64:
12376 #endif
12377 #ifdef TARGET_S390X
12378         switch (arg4) {
12379         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12380         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12381         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12382         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12383         default: break;
12384         }
12385 #endif
12386         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12387 #endif
12388 #endif /* end of 64-bit ABI fadvise handling */
12389 
12390 #ifdef TARGET_NR_madvise
12391     case TARGET_NR_madvise:
12392         return target_madvise(arg1, arg2, arg3);
12393 #endif
12394 #ifdef TARGET_NR_fcntl64
12395     case TARGET_NR_fcntl64:
12396     {
12397         int cmd;
12398         struct flock64 fl;
12399         from_flock64_fn *copyfrom = copy_from_user_flock64;
12400         to_flock64_fn *copyto = copy_to_user_flock64;
12401 
12402 #ifdef TARGET_ARM
12403         if (!cpu_env->eabi) {
12404             copyfrom = copy_from_user_oabi_flock64;
12405             copyto = copy_to_user_oabi_flock64;
12406         }
12407 #endif
12408 
12409         cmd = target_to_host_fcntl_cmd(arg2);
12410         if (cmd == -TARGET_EINVAL) {
12411             return cmd;
12412         }
12413 
12414         switch(arg2) {
12415         case TARGET_F_GETLK64:
12416             ret = copyfrom(&fl, arg3);
12417             if (ret) {
12418                 break;
12419             }
12420             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12421             if (ret == 0) {
12422                 ret = copyto(arg3, &fl);
12423             }
12424 	    break;
12425 
12426         case TARGET_F_SETLK64:
12427         case TARGET_F_SETLKW64:
12428             ret = copyfrom(&fl, arg3);
12429             if (ret) {
12430                 break;
12431             }
12432             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12433 	    break;
12434         default:
12435             ret = do_fcntl(arg1, arg2, arg3);
12436             break;
12437         }
12438         return ret;
12439     }
12440 #endif
12441 #ifdef TARGET_NR_cacheflush
12442     case TARGET_NR_cacheflush:
12443         /* self-modifying code is handled automatically, so nothing needed */
12444         return 0;
12445 #endif
12446 #ifdef TARGET_NR_getpagesize
12447     case TARGET_NR_getpagesize:
12448         return TARGET_PAGE_SIZE;
12449 #endif
12450     case TARGET_NR_gettid:
12451         return get_errno(sys_gettid());
12452 #ifdef TARGET_NR_readahead
12453     case TARGET_NR_readahead:
12454 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12455         if (regpairs_aligned(cpu_env, num)) {
12456             arg2 = arg3;
12457             arg3 = arg4;
12458             arg4 = arg5;
12459         }
12460         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12461 #else
12462         ret = get_errno(readahead(arg1, arg2, arg3));
12463 #endif
12464         return ret;
12465 #endif
12466 #ifdef CONFIG_ATTR
12467 #ifdef TARGET_NR_setxattr
12468     case TARGET_NR_listxattr:
12469     case TARGET_NR_llistxattr:
12470     {
12471         void *b = 0;
12472         if (arg2) {
12473             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12474             if (!b) {
12475                 return -TARGET_EFAULT;
12476             }
12477         }
12478         p = lock_user_string(arg1);
12479         if (p) {
12480             if (num == TARGET_NR_listxattr) {
12481                 ret = get_errno(listxattr(p, b, arg3));
12482             } else {
12483                 ret = get_errno(llistxattr(p, b, arg3));
12484             }
12485         } else {
12486             ret = -TARGET_EFAULT;
12487         }
12488         unlock_user(p, arg1, 0);
12489         unlock_user(b, arg2, arg3);
12490         return ret;
12491     }
12492     case TARGET_NR_flistxattr:
12493     {
12494         void *b = 0;
12495         if (arg2) {
12496             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12497             if (!b) {
12498                 return -TARGET_EFAULT;
12499             }
12500         }
12501         ret = get_errno(flistxattr(arg1, b, arg3));
12502         unlock_user(b, arg2, arg3);
12503         return ret;
12504     }
12505     case TARGET_NR_setxattr:
12506     case TARGET_NR_lsetxattr:
12507         {
12508             void *n, *v = 0;
12509             if (arg3) {
12510                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12511                 if (!v) {
12512                     return -TARGET_EFAULT;
12513                 }
12514             }
12515             p = lock_user_string(arg1);
12516             n = lock_user_string(arg2);
12517             if (p && n) {
12518                 if (num == TARGET_NR_setxattr) {
12519                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12520                 } else {
12521                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12522                 }
12523             } else {
12524                 ret = -TARGET_EFAULT;
12525             }
12526             unlock_user(p, arg1, 0);
12527             unlock_user(n, arg2, 0);
12528             unlock_user(v, arg3, 0);
12529         }
12530         return ret;
12531     case TARGET_NR_fsetxattr:
12532         {
12533             void *n, *v = 0;
12534             if (arg3) {
12535                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12536                 if (!v) {
12537                     return -TARGET_EFAULT;
12538                 }
12539             }
12540             n = lock_user_string(arg2);
12541             if (n) {
12542                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12543             } else {
12544                 ret = -TARGET_EFAULT;
12545             }
12546             unlock_user(n, arg2, 0);
12547             unlock_user(v, arg3, 0);
12548         }
12549         return ret;
12550     case TARGET_NR_getxattr:
12551     case TARGET_NR_lgetxattr:
12552         {
12553             void *n, *v = 0;
12554             if (arg3) {
12555                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12556                 if (!v) {
12557                     return -TARGET_EFAULT;
12558                 }
12559             }
12560             p = lock_user_string(arg1);
12561             n = lock_user_string(arg2);
12562             if (p && n) {
12563                 if (num == TARGET_NR_getxattr) {
12564                     ret = get_errno(getxattr(p, n, v, arg4));
12565                 } else {
12566                     ret = get_errno(lgetxattr(p, n, v, arg4));
12567                 }
12568             } else {
12569                 ret = -TARGET_EFAULT;
12570             }
12571             unlock_user(p, arg1, 0);
12572             unlock_user(n, arg2, 0);
12573             unlock_user(v, arg3, arg4);
12574         }
12575         return ret;
12576     case TARGET_NR_fgetxattr:
12577         {
12578             void *n, *v = 0;
12579             if (arg3) {
12580                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12581                 if (!v) {
12582                     return -TARGET_EFAULT;
12583                 }
12584             }
12585             n = lock_user_string(arg2);
12586             if (n) {
12587                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12588             } else {
12589                 ret = -TARGET_EFAULT;
12590             }
12591             unlock_user(n, arg2, 0);
12592             unlock_user(v, arg3, arg4);
12593         }
12594         return ret;
12595     case TARGET_NR_removexattr:
12596     case TARGET_NR_lremovexattr:
12597         {
12598             void *n;
12599             p = lock_user_string(arg1);
12600             n = lock_user_string(arg2);
12601             if (p && n) {
12602                 if (num == TARGET_NR_removexattr) {
12603                     ret = get_errno(removexattr(p, n));
12604                 } else {
12605                     ret = get_errno(lremovexattr(p, n));
12606                 }
12607             } else {
12608                 ret = -TARGET_EFAULT;
12609             }
12610             unlock_user(p, arg1, 0);
12611             unlock_user(n, arg2, 0);
12612         }
12613         return ret;
12614     case TARGET_NR_fremovexattr:
12615         {
12616             void *n;
12617             n = lock_user_string(arg2);
12618             if (n) {
12619                 ret = get_errno(fremovexattr(arg1, n));
12620             } else {
12621                 ret = -TARGET_EFAULT;
12622             }
12623             unlock_user(n, arg2, 0);
12624         }
12625         return ret;
12626 #endif
12627 #endif /* CONFIG_ATTR */
12628 #ifdef TARGET_NR_set_thread_area
12629     case TARGET_NR_set_thread_area:
12630 #if defined(TARGET_MIPS)
12631       cpu_env->active_tc.CP0_UserLocal = arg1;
12632       return 0;
12633 #elif defined(TARGET_CRIS)
12634       if (arg1 & 0xff)
12635           ret = -TARGET_EINVAL;
12636       else {
12637           cpu_env->pregs[PR_PID] = arg1;
12638           ret = 0;
12639       }
12640       return ret;
12641 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12642       return do_set_thread_area(cpu_env, arg1);
12643 #elif defined(TARGET_M68K)
12644       {
12645           TaskState *ts = get_task_state(cpu);
12646           ts->tp_value = arg1;
12647           return 0;
12648       }
12649 #else
12650       return -TARGET_ENOSYS;
12651 #endif
12652 #endif
12653 #ifdef TARGET_NR_get_thread_area
12654     case TARGET_NR_get_thread_area:
12655 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12656         return do_get_thread_area(cpu_env, arg1);
12657 #elif defined(TARGET_M68K)
12658         {
12659             TaskState *ts = get_task_state(cpu);
12660             return ts->tp_value;
12661         }
12662 #else
12663         return -TARGET_ENOSYS;
12664 #endif
12665 #endif
12666 #ifdef TARGET_NR_getdomainname
12667     case TARGET_NR_getdomainname:
12668         return -TARGET_ENOSYS;
12669 #endif
12670 
12671 #ifdef TARGET_NR_clock_settime
12672     case TARGET_NR_clock_settime:
12673     {
12674         struct timespec ts;
12675 
12676         ret = target_to_host_timespec(&ts, arg2);
12677         if (!is_error(ret)) {
12678             ret = get_errno(clock_settime(arg1, &ts));
12679         }
12680         return ret;
12681     }
12682 #endif
12683 #ifdef TARGET_NR_clock_settime64
12684     case TARGET_NR_clock_settime64:
12685     {
12686         struct timespec ts;
12687 
12688         ret = target_to_host_timespec64(&ts, arg2);
12689         if (!is_error(ret)) {
12690             ret = get_errno(clock_settime(arg1, &ts));
12691         }
12692         return ret;
12693     }
12694 #endif
12695 #ifdef TARGET_NR_clock_gettime
12696     case TARGET_NR_clock_gettime:
12697     {
12698         struct timespec ts;
12699         ret = get_errno(clock_gettime(arg1, &ts));
12700         if (!is_error(ret)) {
12701             ret = host_to_target_timespec(arg2, &ts);
12702         }
12703         return ret;
12704     }
12705 #endif
12706 #ifdef TARGET_NR_clock_gettime64
12707     case TARGET_NR_clock_gettime64:
12708     {
12709         struct timespec ts;
12710         ret = get_errno(clock_gettime(arg1, &ts));
12711         if (!is_error(ret)) {
12712             ret = host_to_target_timespec64(arg2, &ts);
12713         }
12714         return ret;
12715     }
12716 #endif
12717 #ifdef TARGET_NR_clock_getres
12718     case TARGET_NR_clock_getres:
12719     {
12720         struct timespec ts;
12721         ret = get_errno(clock_getres(arg1, &ts));
12722         if (!is_error(ret)) {
12723             host_to_target_timespec(arg2, &ts);
12724         }
12725         return ret;
12726     }
12727 #endif
12728 #ifdef TARGET_NR_clock_getres_time64
12729     case TARGET_NR_clock_getres_time64:
12730     {
12731         struct timespec ts;
12732         ret = get_errno(clock_getres(arg1, &ts));
12733         if (!is_error(ret)) {
12734             host_to_target_timespec64(arg2, &ts);
12735         }
12736         return ret;
12737     }
12738 #endif
12739 #ifdef TARGET_NR_clock_nanosleep
12740     case TARGET_NR_clock_nanosleep:
12741     {
12742         struct timespec ts;
12743         if (target_to_host_timespec(&ts, arg3)) {
12744             return -TARGET_EFAULT;
12745         }
12746         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12747                                              &ts, arg4 ? &ts : NULL));
12748         /*
12749          * if the call is interrupted by a signal handler, it fails
12750          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12751          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12752          */
12753         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12754             host_to_target_timespec(arg4, &ts)) {
12755               return -TARGET_EFAULT;
12756         }
12757 
12758         return ret;
12759     }
12760 #endif
12761 #ifdef TARGET_NR_clock_nanosleep_time64
12762     case TARGET_NR_clock_nanosleep_time64:
12763     {
12764         struct timespec ts;
12765 
12766         if (target_to_host_timespec64(&ts, arg3)) {
12767             return -TARGET_EFAULT;
12768         }
12769 
12770         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12771                                              &ts, arg4 ? &ts : NULL));
12772 
12773         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12774             host_to_target_timespec64(arg4, &ts)) {
12775             return -TARGET_EFAULT;
12776         }
12777         return ret;
12778     }
12779 #endif
12780 
12781 #if defined(TARGET_NR_set_tid_address)
12782     case TARGET_NR_set_tid_address:
12783     {
12784         TaskState *ts = get_task_state(cpu);
12785         ts->child_tidptr = arg1;
12786         /* do not call host set_tid_address() syscall, instead return tid() */
12787         return get_errno(sys_gettid());
12788     }
12789 #endif
12790 
12791     case TARGET_NR_tkill:
12792         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12793 
12794     case TARGET_NR_tgkill:
12795         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12796                          target_to_host_signal(arg3)));
12797 
12798 #ifdef TARGET_NR_set_robust_list
12799     case TARGET_NR_set_robust_list:
12800     case TARGET_NR_get_robust_list:
12801         /* The ABI for supporting robust futexes has userspace pass
12802          * the kernel a pointer to a linked list which is updated by
12803          * userspace after the syscall; the list is walked by the kernel
12804          * when the thread exits. Since the linked list in QEMU guest
12805          * memory isn't a valid linked list for the host and we have
12806          * no way to reliably intercept the thread-death event, we can't
12807          * support these. Silently return ENOSYS so that guest userspace
12808          * falls back to a non-robust futex implementation (which should
12809          * be OK except in the corner case of the guest crashing while
12810          * holding a mutex that is shared with another process via
12811          * shared memory).
12812          */
12813         return -TARGET_ENOSYS;
12814 #endif
12815 
12816 #if defined(TARGET_NR_utimensat)
12817     case TARGET_NR_utimensat:
12818         {
12819             struct timespec *tsp, ts[2];
12820             if (!arg3) {
12821                 tsp = NULL;
12822             } else {
12823                 if (target_to_host_timespec(ts, arg3)) {
12824                     return -TARGET_EFAULT;
12825                 }
12826                 if (target_to_host_timespec(ts + 1, arg3 +
12827                                             sizeof(struct target_timespec))) {
12828                     return -TARGET_EFAULT;
12829                 }
12830                 tsp = ts;
12831             }
12832             if (!arg2)
12833                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12834             else {
12835                 if (!(p = lock_user_string(arg2))) {
12836                     return -TARGET_EFAULT;
12837                 }
12838                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12839                 unlock_user(p, arg2, 0);
12840             }
12841         }
12842         return ret;
12843 #endif
12844 #ifdef TARGET_NR_utimensat_time64
12845     case TARGET_NR_utimensat_time64:
12846         {
12847             struct timespec *tsp, ts[2];
12848             if (!arg3) {
12849                 tsp = NULL;
12850             } else {
12851                 if (target_to_host_timespec64(ts, arg3)) {
12852                     return -TARGET_EFAULT;
12853                 }
12854                 if (target_to_host_timespec64(ts + 1, arg3 +
12855                                      sizeof(struct target__kernel_timespec))) {
12856                     return -TARGET_EFAULT;
12857                 }
12858                 tsp = ts;
12859             }
12860             if (!arg2)
12861                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12862             else {
12863                 p = lock_user_string(arg2);
12864                 if (!p) {
12865                     return -TARGET_EFAULT;
12866                 }
12867                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12868                 unlock_user(p, arg2, 0);
12869             }
12870         }
12871         return ret;
12872 #endif
12873 #ifdef TARGET_NR_futex
12874     case TARGET_NR_futex:
12875         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12876 #endif
12877 #ifdef TARGET_NR_futex_time64
12878     case TARGET_NR_futex_time64:
12879         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12880 #endif
12881 #ifdef CONFIG_INOTIFY
12882 #if defined(TARGET_NR_inotify_init)
12883     case TARGET_NR_inotify_init:
12884         ret = get_errno(inotify_init());
12885         if (ret >= 0) {
12886             fd_trans_register(ret, &target_inotify_trans);
12887         }
12888         return ret;
12889 #endif
12890 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12891     case TARGET_NR_inotify_init1:
12892         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12893                                           fcntl_flags_tbl)));
12894         if (ret >= 0) {
12895             fd_trans_register(ret, &target_inotify_trans);
12896         }
12897         return ret;
12898 #endif
12899 #if defined(TARGET_NR_inotify_add_watch)
12900     case TARGET_NR_inotify_add_watch:
12901         p = lock_user_string(arg2);
12902         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12903         unlock_user(p, arg2, 0);
12904         return ret;
12905 #endif
12906 #if defined(TARGET_NR_inotify_rm_watch)
12907     case TARGET_NR_inotify_rm_watch:
12908         return get_errno(inotify_rm_watch(arg1, arg2));
12909 #endif
12910 #endif
12911 
12912 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12913     case TARGET_NR_mq_open:
12914         {
12915             struct mq_attr posix_mq_attr;
12916             struct mq_attr *pposix_mq_attr;
12917             int host_flags;
12918 
12919             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12920             pposix_mq_attr = NULL;
12921             if (arg4) {
12922                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12923                     return -TARGET_EFAULT;
12924                 }
12925                 pposix_mq_attr = &posix_mq_attr;
12926             }
12927             p = lock_user_string(arg1 - 1);
12928             if (!p) {
12929                 return -TARGET_EFAULT;
12930             }
12931             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12932             unlock_user (p, arg1, 0);
12933         }
12934         return ret;
12935 
12936     case TARGET_NR_mq_unlink:
12937         p = lock_user_string(arg1 - 1);
12938         if (!p) {
12939             return -TARGET_EFAULT;
12940         }
12941         ret = get_errno(mq_unlink(p));
12942         unlock_user (p, arg1, 0);
12943         return ret;
12944 
12945 #ifdef TARGET_NR_mq_timedsend
12946     case TARGET_NR_mq_timedsend:
12947         {
12948             struct timespec ts;
12949 
12950             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12951             if (arg5 != 0) {
12952                 if (target_to_host_timespec(&ts, arg5)) {
12953                     return -TARGET_EFAULT;
12954                 }
12955                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12956                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12957                     return -TARGET_EFAULT;
12958                 }
12959             } else {
12960                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12961             }
12962             unlock_user (p, arg2, arg3);
12963         }
12964         return ret;
12965 #endif
12966 #ifdef TARGET_NR_mq_timedsend_time64
12967     case TARGET_NR_mq_timedsend_time64:
12968         {
12969             struct timespec ts;
12970 
12971             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12972             if (arg5 != 0) {
12973                 if (target_to_host_timespec64(&ts, arg5)) {
12974                     return -TARGET_EFAULT;
12975                 }
12976                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12977                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12978                     return -TARGET_EFAULT;
12979                 }
12980             } else {
12981                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12982             }
12983             unlock_user(p, arg2, arg3);
12984         }
12985         return ret;
12986 #endif
12987 
12988 #ifdef TARGET_NR_mq_timedreceive
12989     case TARGET_NR_mq_timedreceive:
12990         {
12991             struct timespec ts;
12992             unsigned int prio;
12993 
12994             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12995             if (arg5 != 0) {
12996                 if (target_to_host_timespec(&ts, arg5)) {
12997                     return -TARGET_EFAULT;
12998                 }
12999                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13000                                                      &prio, &ts));
13001                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13002                     return -TARGET_EFAULT;
13003                 }
13004             } else {
13005                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13006                                                      &prio, NULL));
13007             }
13008             unlock_user (p, arg2, arg3);
13009             if (arg4 != 0)
13010                 put_user_u32(prio, arg4);
13011         }
13012         return ret;
13013 #endif
13014 #ifdef TARGET_NR_mq_timedreceive_time64
13015     case TARGET_NR_mq_timedreceive_time64:
13016         {
13017             struct timespec ts;
13018             unsigned int prio;
13019 
13020             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13021             if (arg5 != 0) {
13022                 if (target_to_host_timespec64(&ts, arg5)) {
13023                     return -TARGET_EFAULT;
13024                 }
13025                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13026                                                      &prio, &ts));
13027                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13028                     return -TARGET_EFAULT;
13029                 }
13030             } else {
13031                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13032                                                      &prio, NULL));
13033             }
13034             unlock_user(p, arg2, arg3);
13035             if (arg4 != 0) {
13036                 put_user_u32(prio, arg4);
13037             }
13038         }
13039         return ret;
13040 #endif
13041 
13042     /* Not implemented for now... */
13043 /*     case TARGET_NR_mq_notify: */
13044 /*         break; */
13045 
13046     case TARGET_NR_mq_getsetattr:
13047         {
13048             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13049             ret = 0;
13050             if (arg2 != 0) {
13051                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13052                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13053                                            &posix_mq_attr_out));
13054             } else if (arg3 != 0) {
13055                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13056             }
13057             if (ret == 0 && arg3 != 0) {
13058                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13059             }
13060         }
13061         return ret;
13062 #endif
13063 
13064 #ifdef CONFIG_SPLICE
13065 #ifdef TARGET_NR_tee
13066     case TARGET_NR_tee:
13067         {
13068             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13069         }
13070         return ret;
13071 #endif
13072 #ifdef TARGET_NR_splice
13073     case TARGET_NR_splice:
13074         {
13075             loff_t loff_in, loff_out;
13076             loff_t *ploff_in = NULL, *ploff_out = NULL;
13077             if (arg2) {
13078                 if (get_user_u64(loff_in, arg2)) {
13079                     return -TARGET_EFAULT;
13080                 }
13081                 ploff_in = &loff_in;
13082             }
13083             if (arg4) {
13084                 if (get_user_u64(loff_out, arg4)) {
13085                     return -TARGET_EFAULT;
13086                 }
13087                 ploff_out = &loff_out;
13088             }
13089             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13090             if (arg2) {
13091                 if (put_user_u64(loff_in, arg2)) {
13092                     return -TARGET_EFAULT;
13093                 }
13094             }
13095             if (arg4) {
13096                 if (put_user_u64(loff_out, arg4)) {
13097                     return -TARGET_EFAULT;
13098                 }
13099             }
13100         }
13101         return ret;
13102 #endif
13103 #ifdef TARGET_NR_vmsplice
13104 	case TARGET_NR_vmsplice:
13105         {
13106             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13107             if (vec != NULL) {
13108                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13109                 unlock_iovec(vec, arg2, arg3, 0);
13110             } else {
13111                 ret = -host_to_target_errno(errno);
13112             }
13113         }
13114         return ret;
13115 #endif
13116 #endif /* CONFIG_SPLICE */
13117 #ifdef CONFIG_EVENTFD
13118 #if defined(TARGET_NR_eventfd)
13119     case TARGET_NR_eventfd:
13120         ret = get_errno(eventfd(arg1, 0));
13121         if (ret >= 0) {
13122             fd_trans_register(ret, &target_eventfd_trans);
13123         }
13124         return ret;
13125 #endif
13126 #if defined(TARGET_NR_eventfd2)
13127     case TARGET_NR_eventfd2:
13128     {
13129         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13130         if (arg2 & TARGET_O_NONBLOCK) {
13131             host_flags |= O_NONBLOCK;
13132         }
13133         if (arg2 & TARGET_O_CLOEXEC) {
13134             host_flags |= O_CLOEXEC;
13135         }
13136         ret = get_errno(eventfd(arg1, host_flags));
13137         if (ret >= 0) {
13138             fd_trans_register(ret, &target_eventfd_trans);
13139         }
13140         return ret;
13141     }
13142 #endif
13143 #endif /* CONFIG_EVENTFD  */
13144 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13145     case TARGET_NR_fallocate:
13146 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13147         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13148                                   target_offset64(arg5, arg6)));
13149 #else
13150         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13151 #endif
13152         return ret;
13153 #endif
13154 #if defined(CONFIG_SYNC_FILE_RANGE)
13155 #if defined(TARGET_NR_sync_file_range)
13156     case TARGET_NR_sync_file_range:
13157 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13158 #if defined(TARGET_MIPS)
13159         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13160                                         target_offset64(arg5, arg6), arg7));
13161 #else
13162         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13163                                         target_offset64(arg4, arg5), arg6));
13164 #endif /* !TARGET_MIPS */
13165 #else
13166         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13167 #endif
13168         return ret;
13169 #endif
13170 #if defined(TARGET_NR_sync_file_range2) || \
13171     defined(TARGET_NR_arm_sync_file_range)
13172 #if defined(TARGET_NR_sync_file_range2)
13173     case TARGET_NR_sync_file_range2:
13174 #endif
13175 #if defined(TARGET_NR_arm_sync_file_range)
13176     case TARGET_NR_arm_sync_file_range:
13177 #endif
13178         /* This is like sync_file_range but the arguments are reordered */
13179 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13180         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13181                                         target_offset64(arg5, arg6), arg2));
13182 #else
13183         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13184 #endif
13185         return ret;
13186 #endif
13187 #endif
13188 #if defined(TARGET_NR_signalfd4)
13189     case TARGET_NR_signalfd4:
13190         return do_signalfd4(arg1, arg2, arg4);
13191 #endif
13192 #if defined(TARGET_NR_signalfd)
13193     case TARGET_NR_signalfd:
13194         return do_signalfd4(arg1, arg2, 0);
13195 #endif
13196 #if defined(CONFIG_EPOLL)
13197 #if defined(TARGET_NR_epoll_create)
13198     case TARGET_NR_epoll_create:
13199         return get_errno(epoll_create(arg1));
13200 #endif
13201 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13202     case TARGET_NR_epoll_create1:
13203         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13204 #endif
13205 #if defined(TARGET_NR_epoll_ctl)
13206     case TARGET_NR_epoll_ctl:
13207     {
13208         struct epoll_event ep;
13209         struct epoll_event *epp = 0;
13210         if (arg4) {
13211             if (arg2 != EPOLL_CTL_DEL) {
13212                 struct target_epoll_event *target_ep;
13213                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13214                     return -TARGET_EFAULT;
13215                 }
13216                 ep.events = tswap32(target_ep->events);
13217                 /*
13218                  * The epoll_data_t union is just opaque data to the kernel,
13219                  * so we transfer all 64 bits across and need not worry what
13220                  * actual data type it is.
13221                  */
13222                 ep.data.u64 = tswap64(target_ep->data.u64);
13223                 unlock_user_struct(target_ep, arg4, 0);
13224             }
13225             /*
13226              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13227              * non-null pointer, even though this argument is ignored.
13228              *
13229              */
13230             epp = &ep;
13231         }
13232         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13233     }
13234 #endif
13235 
13236 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13237 #if defined(TARGET_NR_epoll_wait)
13238     case TARGET_NR_epoll_wait:
13239 #endif
13240 #if defined(TARGET_NR_epoll_pwait)
13241     case TARGET_NR_epoll_pwait:
13242 #endif
13243     {
13244         struct target_epoll_event *target_ep;
13245         struct epoll_event *ep;
13246         int epfd = arg1;
13247         int maxevents = arg3;
13248         int timeout = arg4;
13249 
13250         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13251             return -TARGET_EINVAL;
13252         }
13253 
13254         target_ep = lock_user(VERIFY_WRITE, arg2,
13255                               maxevents * sizeof(struct target_epoll_event), 1);
13256         if (!target_ep) {
13257             return -TARGET_EFAULT;
13258         }
13259 
13260         ep = g_try_new(struct epoll_event, maxevents);
13261         if (!ep) {
13262             unlock_user(target_ep, arg2, 0);
13263             return -TARGET_ENOMEM;
13264         }
13265 
13266         switch (num) {
13267 #if defined(TARGET_NR_epoll_pwait)
13268         case TARGET_NR_epoll_pwait:
13269         {
13270             sigset_t *set = NULL;
13271 
13272             if (arg5) {
13273                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13274                 if (ret != 0) {
13275                     break;
13276                 }
13277             }
13278 
13279             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13280                                              set, SIGSET_T_SIZE));
13281 
13282             if (set) {
13283                 finish_sigsuspend_mask(ret);
13284             }
13285             break;
13286         }
13287 #endif
13288 #if defined(TARGET_NR_epoll_wait)
13289         case TARGET_NR_epoll_wait:
13290             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13291                                              NULL, 0));
13292             break;
13293 #endif
13294         default:
13295             ret = -TARGET_ENOSYS;
13296         }
13297         if (!is_error(ret)) {
13298             int i;
13299             for (i = 0; i < ret; i++) {
13300                 target_ep[i].events = tswap32(ep[i].events);
13301                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13302             }
13303             unlock_user(target_ep, arg2,
13304                         ret * sizeof(struct target_epoll_event));
13305         } else {
13306             unlock_user(target_ep, arg2, 0);
13307         }
13308         g_free(ep);
13309         return ret;
13310     }
13311 #endif
13312 #endif
13313 #ifdef TARGET_NR_prlimit64
13314     case TARGET_NR_prlimit64:
13315     {
13316         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13317         struct target_rlimit64 *target_rnew, *target_rold;
13318         struct host_rlimit64 rnew, rold, *rnewp = 0;
13319         int resource = target_to_host_resource(arg2);
13320 
13321         if (arg3 && (resource != RLIMIT_AS &&
13322                      resource != RLIMIT_DATA &&
13323                      resource != RLIMIT_STACK)) {
13324             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13325                 return -TARGET_EFAULT;
13326             }
13327             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13328             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13329             unlock_user_struct(target_rnew, arg3, 0);
13330             rnewp = &rnew;
13331         }
13332 
13333         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13334         if (!is_error(ret) && arg4) {
13335             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13336                 return -TARGET_EFAULT;
13337             }
13338             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13339             __put_user(rold.rlim_max, &target_rold->rlim_max);
13340             unlock_user_struct(target_rold, arg4, 1);
13341         }
13342         return ret;
13343     }
13344 #endif
13345 #ifdef TARGET_NR_gethostname
13346     case TARGET_NR_gethostname:
13347     {
13348         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13349         if (name) {
13350             ret = get_errno(gethostname(name, arg2));
13351             unlock_user(name, arg1, arg2);
13352         } else {
13353             ret = -TARGET_EFAULT;
13354         }
13355         return ret;
13356     }
13357 #endif
13358 #ifdef TARGET_NR_atomic_cmpxchg_32
13359     case TARGET_NR_atomic_cmpxchg_32:
13360     {
13361         /* should use start_exclusive from main.c */
13362         abi_ulong mem_value;
13363         if (get_user_u32(mem_value, arg6)) {
13364             target_siginfo_t info;
13365             info.si_signo = SIGSEGV;
13366             info.si_errno = 0;
13367             info.si_code = TARGET_SEGV_MAPERR;
13368             info._sifields._sigfault._addr = arg6;
13369             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13370             ret = 0xdeadbeef;
13371 
13372         }
13373         if (mem_value == arg2)
13374             put_user_u32(arg1, arg6);
13375         return mem_value;
13376     }
13377 #endif
13378 #ifdef TARGET_NR_atomic_barrier
13379     case TARGET_NR_atomic_barrier:
13380         /* Like the kernel implementation and the
13381            qemu arm barrier, no-op this? */
13382         return 0;
13383 #endif
13384 
13385 #ifdef TARGET_NR_timer_create
13386     case TARGET_NR_timer_create:
13387     {
13388         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13389 
13390         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13391 
13392         int clkid = arg1;
13393         int timer_index = next_free_host_timer();
13394 
13395         if (timer_index < 0) {
13396             ret = -TARGET_EAGAIN;
13397         } else {
13398             timer_t *phtimer = g_posix_timers  + timer_index;
13399 
13400             if (arg2) {
13401                 phost_sevp = &host_sevp;
13402                 ret = target_to_host_sigevent(phost_sevp, arg2);
13403                 if (ret != 0) {
13404                     free_host_timer_slot(timer_index);
13405                     return ret;
13406                 }
13407             }
13408 
13409             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13410             if (ret) {
13411                 free_host_timer_slot(timer_index);
13412             } else {
13413                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13414                     timer_delete(*phtimer);
13415                     free_host_timer_slot(timer_index);
13416                     return -TARGET_EFAULT;
13417                 }
13418             }
13419         }
13420         return ret;
13421     }
13422 #endif
13423 
13424 #ifdef TARGET_NR_timer_settime
13425     case TARGET_NR_timer_settime:
13426     {
13427         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13428          * struct itimerspec * old_value */
13429         target_timer_t timerid = get_timer_id(arg1);
13430 
13431         if (timerid < 0) {
13432             ret = timerid;
13433         } else if (arg3 == 0) {
13434             ret = -TARGET_EINVAL;
13435         } else {
13436             timer_t htimer = g_posix_timers[timerid];
13437             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13438 
13439             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13440                 return -TARGET_EFAULT;
13441             }
13442             ret = get_errno(
13443                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13444             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13445                 return -TARGET_EFAULT;
13446             }
13447         }
13448         return ret;
13449     }
13450 #endif
13451 
13452 #ifdef TARGET_NR_timer_settime64
13453     case TARGET_NR_timer_settime64:
13454     {
13455         target_timer_t timerid = get_timer_id(arg1);
13456 
13457         if (timerid < 0) {
13458             ret = timerid;
13459         } else if (arg3 == 0) {
13460             ret = -TARGET_EINVAL;
13461         } else {
13462             timer_t htimer = g_posix_timers[timerid];
13463             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13464 
13465             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13466                 return -TARGET_EFAULT;
13467             }
13468             ret = get_errno(
13469                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13470             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13471                 return -TARGET_EFAULT;
13472             }
13473         }
13474         return ret;
13475     }
13476 #endif
13477 
13478 #ifdef TARGET_NR_timer_gettime
13479     case TARGET_NR_timer_gettime:
13480     {
13481         /* args: timer_t timerid, struct itimerspec *curr_value */
13482         target_timer_t timerid = get_timer_id(arg1);
13483 
13484         if (timerid < 0) {
13485             ret = timerid;
13486         } else if (!arg2) {
13487             ret = -TARGET_EFAULT;
13488         } else {
13489             timer_t htimer = g_posix_timers[timerid];
13490             struct itimerspec hspec;
13491             ret = get_errno(timer_gettime(htimer, &hspec));
13492 
13493             if (host_to_target_itimerspec(arg2, &hspec)) {
13494                 ret = -TARGET_EFAULT;
13495             }
13496         }
13497         return ret;
13498     }
13499 #endif
13500 
13501 #ifdef TARGET_NR_timer_gettime64
13502     case TARGET_NR_timer_gettime64:
13503     {
13504         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13505         target_timer_t timerid = get_timer_id(arg1);
13506 
13507         if (timerid < 0) {
13508             ret = timerid;
13509         } else if (!arg2) {
13510             ret = -TARGET_EFAULT;
13511         } else {
13512             timer_t htimer = g_posix_timers[timerid];
13513             struct itimerspec hspec;
13514             ret = get_errno(timer_gettime(htimer, &hspec));
13515 
13516             if (host_to_target_itimerspec64(arg2, &hspec)) {
13517                 ret = -TARGET_EFAULT;
13518             }
13519         }
13520         return ret;
13521     }
13522 #endif
13523 
13524 #ifdef TARGET_NR_timer_getoverrun
13525     case TARGET_NR_timer_getoverrun:
13526     {
13527         /* args: timer_t timerid */
13528         target_timer_t timerid = get_timer_id(arg1);
13529 
13530         if (timerid < 0) {
13531             ret = timerid;
13532         } else {
13533             timer_t htimer = g_posix_timers[timerid];
13534             ret = get_errno(timer_getoverrun(htimer));
13535         }
13536         return ret;
13537     }
13538 #endif
13539 
13540 #ifdef TARGET_NR_timer_delete
13541     case TARGET_NR_timer_delete:
13542     {
13543         /* args: timer_t timerid */
13544         target_timer_t timerid = get_timer_id(arg1);
13545 
13546         if (timerid < 0) {
13547             ret = timerid;
13548         } else {
13549             timer_t htimer = g_posix_timers[timerid];
13550             ret = get_errno(timer_delete(htimer));
13551             free_host_timer_slot(timerid);
13552         }
13553         return ret;
13554     }
13555 #endif
13556 
13557 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13558     case TARGET_NR_timerfd_create:
13559         ret = get_errno(timerfd_create(arg1,
13560                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13561         if (ret >= 0) {
13562             fd_trans_register(ret, &target_timerfd_trans);
13563         }
13564         return ret;
13565 #endif
13566 
13567 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13568     case TARGET_NR_timerfd_gettime:
13569         {
13570             struct itimerspec its_curr;
13571 
13572             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13573 
13574             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13575                 return -TARGET_EFAULT;
13576             }
13577         }
13578         return ret;
13579 #endif
13580 
13581 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13582     case TARGET_NR_timerfd_gettime64:
13583         {
13584             struct itimerspec its_curr;
13585 
13586             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13587 
13588             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13589                 return -TARGET_EFAULT;
13590             }
13591         }
13592         return ret;
13593 #endif
13594 
13595 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13596     case TARGET_NR_timerfd_settime:
13597         {
13598             struct itimerspec its_new, its_old, *p_new;
13599 
13600             if (arg3) {
13601                 if (target_to_host_itimerspec(&its_new, arg3)) {
13602                     return -TARGET_EFAULT;
13603                 }
13604                 p_new = &its_new;
13605             } else {
13606                 p_new = NULL;
13607             }
13608 
13609             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13610 
13611             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13612                 return -TARGET_EFAULT;
13613             }
13614         }
13615         return ret;
13616 #endif
13617 
13618 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13619     case TARGET_NR_timerfd_settime64:
13620         {
13621             struct itimerspec its_new, its_old, *p_new;
13622 
13623             if (arg3) {
13624                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13625                     return -TARGET_EFAULT;
13626                 }
13627                 p_new = &its_new;
13628             } else {
13629                 p_new = NULL;
13630             }
13631 
13632             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13633 
13634             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13635                 return -TARGET_EFAULT;
13636             }
13637         }
13638         return ret;
13639 #endif
13640 
13641 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13642     case TARGET_NR_ioprio_get:
13643         return get_errno(ioprio_get(arg1, arg2));
13644 #endif
13645 
13646 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13647     case TARGET_NR_ioprio_set:
13648         return get_errno(ioprio_set(arg1, arg2, arg3));
13649 #endif
13650 
13651 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13652     case TARGET_NR_setns:
13653         return get_errno(setns(arg1, arg2));
13654 #endif
13655 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13656     case TARGET_NR_unshare:
13657         return get_errno(unshare(arg1));
13658 #endif
13659 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13660     case TARGET_NR_kcmp:
13661         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13662 #endif
13663 #ifdef TARGET_NR_swapcontext
13664     case TARGET_NR_swapcontext:
13665         /* PowerPC specific.  */
13666         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13667 #endif
13668 #ifdef TARGET_NR_memfd_create
13669     case TARGET_NR_memfd_create:
13670         p = lock_user_string(arg1);
13671         if (!p) {
13672             return -TARGET_EFAULT;
13673         }
13674         ret = get_errno(memfd_create(p, arg2));
13675         fd_trans_unregister(ret);
13676         unlock_user(p, arg1, 0);
13677         return ret;
13678 #endif
13679 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13680     case TARGET_NR_membarrier:
13681         return get_errno(membarrier(arg1, arg2));
13682 #endif
13683 
13684 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13685     case TARGET_NR_copy_file_range:
13686         {
13687             loff_t inoff, outoff;
13688             loff_t *pinoff = NULL, *poutoff = NULL;
13689 
13690             if (arg2) {
13691                 if (get_user_u64(inoff, arg2)) {
13692                     return -TARGET_EFAULT;
13693                 }
13694                 pinoff = &inoff;
13695             }
13696             if (arg4) {
13697                 if (get_user_u64(outoff, arg4)) {
13698                     return -TARGET_EFAULT;
13699                 }
13700                 poutoff = &outoff;
13701             }
13702             /* Do not sign-extend the count parameter. */
13703             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13704                                                  (abi_ulong)arg5, arg6));
13705             if (!is_error(ret) && ret > 0) {
13706                 if (arg2) {
13707                     if (put_user_u64(inoff, arg2)) {
13708                         return -TARGET_EFAULT;
13709                     }
13710                 }
13711                 if (arg4) {
13712                     if (put_user_u64(outoff, arg4)) {
13713                         return -TARGET_EFAULT;
13714                     }
13715                 }
13716             }
13717         }
13718         return ret;
13719 #endif
13720 
13721 #if defined(TARGET_NR_pivot_root)
13722     case TARGET_NR_pivot_root:
13723         {
13724             void *p2;
13725             p = lock_user_string(arg1); /* new_root */
13726             p2 = lock_user_string(arg2); /* put_old */
13727             if (!p || !p2) {
13728                 ret = -TARGET_EFAULT;
13729             } else {
13730                 ret = get_errno(pivot_root(p, p2));
13731             }
13732             unlock_user(p2, arg2, 0);
13733             unlock_user(p, arg1, 0);
13734         }
13735         return ret;
13736 #endif
13737 
13738 #if defined(TARGET_NR_riscv_hwprobe)
13739     case TARGET_NR_riscv_hwprobe:
13740         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13741 #endif
13742 
13743     default:
13744         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13745         return -TARGET_ENOSYS;
13746     }
13747     return ret;
13748 }
13749 
do_syscall(CPUArchState * cpu_env,int num,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,abi_long arg7,abi_long arg8)13750 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13751                     abi_long arg2, abi_long arg3, abi_long arg4,
13752                     abi_long arg5, abi_long arg6, abi_long arg7,
13753                     abi_long arg8)
13754 {
13755     CPUState *cpu = env_cpu(cpu_env);
13756     abi_long ret;
13757 
13758 #ifdef DEBUG_ERESTARTSYS
13759     /* Debug-only code for exercising the syscall-restart code paths
13760      * in the per-architecture cpu main loops: restart every syscall
13761      * the guest makes once before letting it through.
13762      */
13763     {
13764         static bool flag;
13765         flag = !flag;
13766         if (flag) {
13767             return -QEMU_ERESTARTSYS;
13768         }
13769     }
13770 #endif
13771 
13772     record_syscall_start(cpu, num, arg1,
13773                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13774 
13775     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13776         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13777     }
13778 
13779     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13780                       arg5, arg6, arg7, arg8);
13781 
13782     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13783         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13784                           arg3, arg4, arg5, arg6);
13785     }
13786 
13787     record_syscall_return(cpu, num, ret);
13788     return ret;
13789 }
13790