1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "qapi/error.h"
140 #include "fd-trans.h"
141 #include "tcg/tcg.h"
142 
143 #ifndef CLONE_IO
144 #define CLONE_IO                0x80000000      /* Clone io context */
145 #endif
146 
147 /* We can't directly call the host clone syscall, because this will
148  * badly confuse libc (breaking mutexes, for example). So we must
149  * divide clone flags into:
150  *  * flag combinations that look like pthread_create()
151  *  * flag combinations that look like fork()
152  *  * flags we can implement within QEMU itself
153  *  * flags we can't support and will return an error for
154  */
155 /* For thread creation, all these flags must be present; for
156  * fork, none must be present.
157  */
158 #define CLONE_THREAD_FLAGS                              \
159     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
160      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
161 
162 /* These flags are ignored:
163  * CLONE_DETACHED is now ignored by the kernel;
164  * CLONE_IO is just an optimisation hint to the I/O scheduler
165  */
166 #define CLONE_IGNORED_FLAGS                     \
167     (CLONE_DETACHED | CLONE_IO)
168 
169 /* Flags for fork which we can implement within QEMU itself */
170 #define CLONE_OPTIONAL_FORK_FLAGS               \
171     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
172      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
173 
174 /* Flags for thread creation which we can implement within QEMU itself */
175 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
176     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
177      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
178 
179 #define CLONE_INVALID_FORK_FLAGS                                        \
180     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
181 
182 #define CLONE_INVALID_THREAD_FLAGS                                      \
183     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
184        CLONE_IGNORED_FLAGS))
185 
186 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
187  * have almost all been allocated. We cannot support any of
188  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
189  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
190  * The checks against the invalid thread masks above will catch these.
191  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
192  */
193 
194 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
195  * once. This exercises the codepaths for restart.
196  */
197 //#define DEBUG_ERESTARTSYS
198 
199 //#include <linux/msdos_fs.h>
200 #define VFAT_IOCTL_READDIR_BOTH \
201     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
202 #define VFAT_IOCTL_READDIR_SHORT \
203     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
204 
205 #undef _syscall0
206 #undef _syscall1
207 #undef _syscall2
208 #undef _syscall3
209 #undef _syscall4
210 #undef _syscall5
211 #undef _syscall6
212 
213 #define _syscall0(type,name)		\
214 static type name (void)			\
215 {					\
216 	return syscall(__NR_##name);	\
217 }
218 
219 #define _syscall1(type,name,type1,arg1)		\
220 static type name (type1 arg1)			\
221 {						\
222 	return syscall(__NR_##name, arg1);	\
223 }
224 
225 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
226 static type name (type1 arg1,type2 arg2)		\
227 {							\
228 	return syscall(__NR_##name, arg1, arg2);	\
229 }
230 
231 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
232 static type name (type1 arg1,type2 arg2,type3 arg3)		\
233 {								\
234 	return syscall(__NR_##name, arg1, arg2, arg3);		\
235 }
236 
237 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
238 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
239 {										\
240 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
241 }
242 
243 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
244 		  type5,arg5)							\
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
246 {										\
247 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
248 }
249 
250 
251 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
252 		  type5,arg5,type6,arg6)					\
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
254                   type6 arg6)							\
255 {										\
256 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
257 }
258 
259 
260 #define __NR_sys_uname __NR_uname
261 #define __NR_sys_getcwd1 __NR_getcwd
262 #define __NR_sys_getdents __NR_getdents
263 #define __NR_sys_getdents64 __NR_getdents64
264 #define __NR_sys_getpriority __NR_getpriority
265 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
266 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
267 #define __NR_sys_syslog __NR_syslog
268 #if defined(__NR_futex)
269 # define __NR_sys_futex __NR_futex
270 #endif
271 #if defined(__NR_futex_time64)
272 # define __NR_sys_futex_time64 __NR_futex_time64
273 #endif
274 #define __NR_sys_inotify_init __NR_inotify_init
275 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
276 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
277 #define __NR_sys_statx __NR_statx
278 
279 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
280 #define __NR__llseek __NR_lseek
281 #endif
282 
283 /* Newer kernel ports have llseek() instead of _llseek() */
284 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
285 #define TARGET_NR__llseek TARGET_NR_llseek
286 #endif
287 
288 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
289 #ifndef TARGET_O_NONBLOCK_MASK
290 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
291 #endif
292 
293 #define __NR_sys_gettid __NR_gettid
294 _syscall0(int, sys_gettid)
295 
296 /* For the 64-bit guest on 32-bit host case we must emulate
297  * getdents using getdents64, because otherwise the host
298  * might hand us back more dirent records than we can fit
299  * into the guest buffer after structure format conversion.
300  * Otherwise we emulate getdents with getdents if the host has it.
301  */
302 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
303 #define EMULATE_GETDENTS_WITH_GETDENTS
304 #endif
305 
306 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
307 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
308 #endif
309 #if (defined(TARGET_NR_getdents) && \
310       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
311     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
312 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
313 #endif
314 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
315 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
316           loff_t *, res, uint, wh);
317 #endif
318 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
319 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
320           siginfo_t *, uinfo)
321 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
322 #ifdef __NR_exit_group
323 _syscall1(int,exit_group,int,error_code)
324 #endif
325 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
326 _syscall1(int,set_tid_address,int *,tidptr)
327 #endif
328 #if defined(__NR_futex)
329 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
330           const struct timespec *,timeout,int *,uaddr2,int,val3)
331 #endif
332 #if defined(__NR_futex_time64)
333 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
334           const struct timespec *,timeout,int *,uaddr2,int,val3)
335 #endif
336 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
337 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
338           unsigned long *, user_mask_ptr);
339 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
340 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
341           unsigned long *, user_mask_ptr);
342 #define __NR_sys_getcpu __NR_getcpu
343 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
344 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
345           void *, arg);
346 _syscall2(int, capget, struct __user_cap_header_struct *, header,
347           struct __user_cap_data_struct *, data);
348 _syscall2(int, capset, struct __user_cap_header_struct *, header,
349           struct __user_cap_data_struct *, data);
350 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
351 _syscall2(int, ioprio_get, int, which, int, who)
352 #endif
353 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
354 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
355 #endif
356 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
357 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
358 #endif
359 
360 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
361 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
362           unsigned long, idx1, unsigned long, idx2)
363 #endif
364 
365 /*
366  * It is assumed that struct statx is architecture independent.
367  */
368 #if defined(TARGET_NR_statx) && defined(__NR_statx)
369 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
370           unsigned int, mask, struct target_statx *, statxbuf)
371 #endif
372 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
373 _syscall2(int, membarrier, int, cmd, int, flags)
374 #endif
375 
376 static const bitmask_transtbl fcntl_flags_tbl[] = {
377   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
378   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
379   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
380   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
381   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
382   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
383   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
384   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
385   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
386   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
387   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
388   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
389   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
390 #if defined(O_DIRECT)
391   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
392 #endif
393 #if defined(O_NOATIME)
394   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
395 #endif
396 #if defined(O_CLOEXEC)
397   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
398 #endif
399 #if defined(O_PATH)
400   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
401 #endif
402 #if defined(O_TMPFILE)
403   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
404 #endif
405   /* Don't terminate the list prematurely on 64-bit host+guest.  */
406 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
407   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
408 #endif
409   { 0, 0, 0, 0 }
410 };
411 
_syscall2(int,sys_getcwd1,char *,buf,size_t,size)412 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
413 
414 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
415 #if defined(__NR_utimensat)
416 #define __NR_sys_utimensat __NR_utimensat
417 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
418           const struct timespec *,tsp,int,flags)
419 #else
420 static int sys_utimensat(int dirfd, const char *pathname,
421                          const struct timespec times[2], int flags)
422 {
423     errno = ENOSYS;
424     return -1;
425 }
426 #endif
427 #endif /* TARGET_NR_utimensat */
428 
429 #ifdef TARGET_NR_renameat2
430 #if defined(__NR_renameat2)
431 #define __NR_sys_renameat2 __NR_renameat2
432 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
433           const char *, new, unsigned int, flags)
434 #else
435 static int sys_renameat2(int oldfd, const char *old,
436                          int newfd, const char *new, int flags)
437 {
438     if (flags == 0) {
439         return renameat(oldfd, old, newfd, new);
440     }
441     errno = ENOSYS;
442     return -1;
443 }
444 #endif
445 #endif /* TARGET_NR_renameat2 */
446 
447 #ifdef CONFIG_INOTIFY
448 #include <sys/inotify.h>
449 
450 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
451 static int sys_inotify_init(void)
452 {
453   return (inotify_init());
454 }
455 #endif
456 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
sys_inotify_add_watch(int fd,const char * pathname,int32_t mask)457 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
458 {
459   return (inotify_add_watch(fd, pathname, mask));
460 }
461 #endif
462 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
sys_inotify_rm_watch(int fd,int32_t wd)463 static int sys_inotify_rm_watch(int fd, int32_t wd)
464 {
465   return (inotify_rm_watch(fd, wd));
466 }
467 #endif
468 #ifdef CONFIG_INOTIFY1
469 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
sys_inotify_init1(int flags)470 static int sys_inotify_init1(int flags)
471 {
472   return (inotify_init1(flags));
473 }
474 #endif
475 #endif
476 #else
477 /* Userspace can usually survive runtime without inotify */
478 #undef TARGET_NR_inotify_init
479 #undef TARGET_NR_inotify_init1
480 #undef TARGET_NR_inotify_add_watch
481 #undef TARGET_NR_inotify_rm_watch
482 #endif /* CONFIG_INOTIFY  */
483 
484 #if defined(TARGET_NR_prlimit64)
485 #ifndef __NR_prlimit64
486 # define __NR_prlimit64 -1
487 #endif
488 #define __NR_sys_prlimit64 __NR_prlimit64
489 /* The glibc rlimit structure may not be that used by the underlying syscall */
490 struct host_rlimit64 {
491     uint64_t rlim_cur;
492     uint64_t rlim_max;
493 };
494 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
495           const struct host_rlimit64 *, new_limit,
496           struct host_rlimit64 *, old_limit)
497 #endif
498 
499 
500 #if defined(TARGET_NR_timer_create)
501 /* Maximum of 32 active POSIX timers allowed at any one time. */
502 static timer_t g_posix_timers[32] = { 0, } ;
503 
next_free_host_timer(void)504 static inline int next_free_host_timer(void)
505 {
506     int k ;
507     /* FIXME: Does finding the next free slot require a lock? */
508     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
509         if (g_posix_timers[k] == 0) {
510             g_posix_timers[k] = (timer_t) 1;
511             return k;
512         }
513     }
514     return -1;
515 }
516 #endif
517 
host_to_target_errno(int host_errno)518 static inline int host_to_target_errno(int host_errno)
519 {
520     switch (host_errno) {
521 #define E(X)  case X: return TARGET_##X;
522 #include "errnos.c.inc"
523 #undef E
524     default:
525         return host_errno;
526     }
527 }
528 
target_to_host_errno(int target_errno)529 static inline int target_to_host_errno(int target_errno)
530 {
531     switch (target_errno) {
532 #define E(X)  case TARGET_##X: return X;
533 #include "errnos.c.inc"
534 #undef E
535     default:
536         return target_errno;
537     }
538 }
539 
get_errno(abi_long ret)540 static inline abi_long get_errno(abi_long ret)
541 {
542     if (ret == -1)
543         return -host_to_target_errno(errno);
544     else
545         return ret;
546 }
547 
target_strerror(int err)548 const char *target_strerror(int err)
549 {
550     if (err == TARGET_ERESTARTSYS) {
551         return "To be restarted";
552     }
553     if (err == TARGET_QEMU_ESIGRETURN) {
554         return "Successful exit from sigreturn";
555     }
556 
557     return strerror(target_to_host_errno(err));
558 }
559 
560 #define safe_syscall0(type, name) \
561 static type safe_##name(void) \
562 { \
563     return safe_syscall(__NR_##name); \
564 }
565 
566 #define safe_syscall1(type, name, type1, arg1) \
567 static type safe_##name(type1 arg1) \
568 { \
569     return safe_syscall(__NR_##name, arg1); \
570 }
571 
572 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
573 static type safe_##name(type1 arg1, type2 arg2) \
574 { \
575     return safe_syscall(__NR_##name, arg1, arg2); \
576 }
577 
578 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
579 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
580 { \
581     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
582 }
583 
584 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
585     type4, arg4) \
586 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
587 { \
588     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
589 }
590 
591 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
592     type4, arg4, type5, arg5) \
593 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
594     type5 arg5) \
595 { \
596     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
597 }
598 
599 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
600     type4, arg4, type5, arg5, type6, arg6) \
601 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
602     type5 arg5, type6 arg6) \
603 { \
604     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
605 }
606 
safe_syscall3(ssize_t,read,int,fd,void *,buff,size_t,count)607 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
608 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
609 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
610               int, flags, mode_t, mode)
611 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
612 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
613               struct rusage *, rusage)
614 #endif
615 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
616               int, options, struct rusage *, rusage)
617 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
618 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
619     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
620 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
621               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
622 #endif
623 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
624 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
625               struct timespec *, tsp, const sigset_t *, sigmask,
626               size_t, sigsetsize)
627 #endif
628 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
629               int, maxevents, int, timeout, const sigset_t *, sigmask,
630               size_t, sigsetsize)
631 #if defined(__NR_futex)
632 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
633               const struct timespec *,timeout,int *,uaddr2,int,val3)
634 #endif
635 #if defined(__NR_futex_time64)
636 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
637               const struct timespec *,timeout,int *,uaddr2,int,val3)
638 #endif
639 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
640 safe_syscall2(int, kill, pid_t, pid, int, sig)
641 safe_syscall2(int, tkill, int, tid, int, sig)
642 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
643 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
644 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
645 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
646               unsigned long, pos_l, unsigned long, pos_h)
647 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
648               unsigned long, pos_l, unsigned long, pos_h)
649 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
650               socklen_t, addrlen)
651 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
652               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
653 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
654               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
655 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
656 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
657 safe_syscall2(int, flock, int, fd, int, operation)
658 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
659 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
660               const struct timespec *, uts, size_t, sigsetsize)
661 #endif
662 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
663               int, flags)
664 #if defined(TARGET_NR_nanosleep)
665 safe_syscall2(int, nanosleep, const struct timespec *, req,
666               struct timespec *, rem)
667 #endif
668 #if defined(TARGET_NR_clock_nanosleep) || \
669     defined(TARGET_NR_clock_nanosleep_time64)
670 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
671               const struct timespec *, req, struct timespec *, rem)
672 #endif
673 #ifdef __NR_ipc
674 #ifdef __s390x__
675 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
676               void *, ptr)
677 #else
678 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
679               void *, ptr, long, fifth)
680 #endif
681 #endif
682 #ifdef __NR_msgsnd
683 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
684               int, flags)
685 #endif
686 #ifdef __NR_msgrcv
687 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
688               long, msgtype, int, flags)
689 #endif
690 #ifdef __NR_semtimedop
691 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
692               unsigned, nsops, const struct timespec *, timeout)
693 #endif
694 #if defined(TARGET_NR_mq_timedsend) || \
695     defined(TARGET_NR_mq_timedsend_time64)
696 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
697               size_t, len, unsigned, prio, const struct timespec *, timeout)
698 #endif
699 #if defined(TARGET_NR_mq_timedreceive) || \
700     defined(TARGET_NR_mq_timedreceive_time64)
701 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
702               size_t, len, unsigned *, prio, const struct timespec *, timeout)
703 #endif
704 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
705 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
706               int, outfd, loff_t *, poutoff, size_t, length,
707               unsigned int, flags)
708 #endif
709 
710 /* We do ioctl like this rather than via safe_syscall3 to preserve the
711  * "third argument might be integer or pointer or not present" behaviour of
712  * the libc function.
713  */
714 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
715 /* Similarly for fcntl. Note that callers must always:
716  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
717  *  use the flock64 struct rather than unsuffixed flock
718  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
719  */
720 #ifdef __NR_fcntl64
721 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
722 #else
723 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
724 #endif
725 
726 static inline int host_to_target_sock_type(int host_type)
727 {
728     int target_type;
729 
730     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
731     case SOCK_DGRAM:
732         target_type = TARGET_SOCK_DGRAM;
733         break;
734     case SOCK_STREAM:
735         target_type = TARGET_SOCK_STREAM;
736         break;
737     default:
738         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
739         break;
740     }
741 
742 #if defined(SOCK_CLOEXEC)
743     if (host_type & SOCK_CLOEXEC) {
744         target_type |= TARGET_SOCK_CLOEXEC;
745     }
746 #endif
747 
748 #if defined(SOCK_NONBLOCK)
749     if (host_type & SOCK_NONBLOCK) {
750         target_type |= TARGET_SOCK_NONBLOCK;
751     }
752 #endif
753 
754     return target_type;
755 }
756 
757 static abi_ulong target_brk;
758 static abi_ulong target_original_brk;
759 static abi_ulong brk_page;
760 
target_set_brk(abi_ulong new_brk)761 void target_set_brk(abi_ulong new_brk)
762 {
763     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
764     brk_page = HOST_PAGE_ALIGN(target_brk);
765 }
766 
767 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
768 #define DEBUGF_BRK(message, args...)
769 
770 /* do_brk() must return target values and target errnos. */
do_brk(abi_ulong new_brk)771 abi_long do_brk(abi_ulong new_brk)
772 {
773     abi_long mapped_addr;
774     abi_ulong new_alloc_size;
775 
776     /* brk pointers are always untagged */
777 
778     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
779 
780     if (!new_brk) {
781         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
782         return target_brk;
783     }
784     if (new_brk < target_original_brk) {
785         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
786                    target_brk);
787         return target_brk;
788     }
789 
790     /* If the new brk is less than the highest page reserved to the
791      * target heap allocation, set it and we're almost done...  */
792     if (new_brk <= brk_page) {
793         /* Heap contents are initialized to zero, as for anonymous
794          * mapped pages.  */
795         if (new_brk > target_brk) {
796             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
797         }
798 	target_brk = new_brk;
799         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
800 	return target_brk;
801     }
802 
803     /* We need to allocate more memory after the brk... Note that
804      * we don't use MAP_FIXED because that will map over the top of
805      * any existing mapping (like the one with the host libc or qemu
806      * itself); instead we treat "mapped but at wrong address" as
807      * a failure and unmap again.
808      */
809     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
810     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
811                                         PROT_READ|PROT_WRITE,
812                                         MAP_ANON|MAP_PRIVATE, 0, 0));
813 
814     if (mapped_addr == brk_page) {
815         /* Heap contents are initialized to zero, as for anonymous
816          * mapped pages.  Technically the new pages are already
817          * initialized to zero since they *are* anonymous mapped
818          * pages, however we have to take care with the contents that
819          * come from the remaining part of the previous page: it may
820          * contains garbage data due to a previous heap usage (grown
821          * then shrunken).  */
822         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
823 
824         target_brk = new_brk;
825         brk_page = HOST_PAGE_ALIGN(target_brk);
826         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
827             target_brk);
828         return target_brk;
829     } else if (mapped_addr != -1) {
830         /* Mapped but at wrong address, meaning there wasn't actually
831          * enough space for this brk.
832          */
833         target_munmap(mapped_addr, new_alloc_size);
834         mapped_addr = -1;
835         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
836     }
837     else {
838         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
839     }
840 
841 #if defined(TARGET_ALPHA)
842     /* We (partially) emulate OSF/1 on Alpha, which requires we
843        return a proper errno, not an unchanged brk value.  */
844     return -TARGET_ENOMEM;
845 #endif
846     /* For everything else, return the previous break. */
847     return target_brk;
848 }
849 
850 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
851     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
copy_from_user_fdset(fd_set * fds,abi_ulong target_fds_addr,int n)852 static inline abi_long copy_from_user_fdset(fd_set *fds,
853                                             abi_ulong target_fds_addr,
854                                             int n)
855 {
856     int i, nw, j, k;
857     abi_ulong b, *target_fds;
858 
859     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
860     if (!(target_fds = lock_user(VERIFY_READ,
861                                  target_fds_addr,
862                                  sizeof(abi_ulong) * nw,
863                                  1)))
864         return -TARGET_EFAULT;
865 
866     FD_ZERO(fds);
867     k = 0;
868     for (i = 0; i < nw; i++) {
869         /* grab the abi_ulong */
870         __get_user(b, &target_fds[i]);
871         for (j = 0; j < TARGET_ABI_BITS; j++) {
872             /* check the bit inside the abi_ulong */
873             if ((b >> j) & 1)
874                 FD_SET(k, fds);
875             k++;
876         }
877     }
878 
879     unlock_user(target_fds, target_fds_addr, 0);
880 
881     return 0;
882 }
883 
copy_from_user_fdset_ptr(fd_set * fds,fd_set ** fds_ptr,abi_ulong target_fds_addr,int n)884 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
885                                                  abi_ulong target_fds_addr,
886                                                  int n)
887 {
888     if (target_fds_addr) {
889         if (copy_from_user_fdset(fds, target_fds_addr, n))
890             return -TARGET_EFAULT;
891         *fds_ptr = fds;
892     } else {
893         *fds_ptr = NULL;
894     }
895     return 0;
896 }
897 
copy_to_user_fdset(abi_ulong target_fds_addr,const fd_set * fds,int n)898 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
899                                           const fd_set *fds,
900                                           int n)
901 {
902     int i, nw, j, k;
903     abi_long v;
904     abi_ulong *target_fds;
905 
906     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
907     if (!(target_fds = lock_user(VERIFY_WRITE,
908                                  target_fds_addr,
909                                  sizeof(abi_ulong) * nw,
910                                  0)))
911         return -TARGET_EFAULT;
912 
913     k = 0;
914     for (i = 0; i < nw; i++) {
915         v = 0;
916         for (j = 0; j < TARGET_ABI_BITS; j++) {
917             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
918             k++;
919         }
920         __put_user(v, &target_fds[i]);
921     }
922 
923     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
924 
925     return 0;
926 }
927 #endif
928 
929 #if defined(__alpha__)
930 #define HOST_HZ 1024
931 #else
932 #define HOST_HZ 100
933 #endif
934 
host_to_target_clock_t(long ticks)935 static inline abi_long host_to_target_clock_t(long ticks)
936 {
937 #if HOST_HZ == TARGET_HZ
938     return ticks;
939 #else
940     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
941 #endif
942 }
943 
host_to_target_rusage(abi_ulong target_addr,const struct rusage * rusage)944 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
945                                              const struct rusage *rusage)
946 {
947     struct target_rusage *target_rusage;
948 
949     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
950         return -TARGET_EFAULT;
951     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
952     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
953     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
954     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
955     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
956     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
957     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
958     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
959     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
960     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
961     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
962     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
963     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
964     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
965     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
966     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
967     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
968     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
969     unlock_user_struct(target_rusage, target_addr, 1);
970 
971     return 0;
972 }
973 
974 #ifdef TARGET_NR_setrlimit
target_to_host_rlim(abi_ulong target_rlim)975 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
976 {
977     abi_ulong target_rlim_swap;
978     rlim_t result;
979 
980     target_rlim_swap = tswapal(target_rlim);
981     if (target_rlim_swap == TARGET_RLIM_INFINITY)
982         return RLIM_INFINITY;
983 
984     result = target_rlim_swap;
985     if (target_rlim_swap != (rlim_t)result)
986         return RLIM_INFINITY;
987 
988     return result;
989 }
990 #endif
991 
992 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
host_to_target_rlim(rlim_t rlim)993 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
994 {
995     abi_ulong target_rlim_swap;
996     abi_ulong result;
997 
998     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
999         target_rlim_swap = TARGET_RLIM_INFINITY;
1000     else
1001         target_rlim_swap = rlim;
1002     result = tswapal(target_rlim_swap);
1003 
1004     return result;
1005 }
1006 #endif
1007 
target_to_host_resource(int code)1008 static inline int target_to_host_resource(int code)
1009 {
1010     switch (code) {
1011     case TARGET_RLIMIT_AS:
1012         return RLIMIT_AS;
1013     case TARGET_RLIMIT_CORE:
1014         return RLIMIT_CORE;
1015     case TARGET_RLIMIT_CPU:
1016         return RLIMIT_CPU;
1017     case TARGET_RLIMIT_DATA:
1018         return RLIMIT_DATA;
1019     case TARGET_RLIMIT_FSIZE:
1020         return RLIMIT_FSIZE;
1021     case TARGET_RLIMIT_LOCKS:
1022         return RLIMIT_LOCKS;
1023     case TARGET_RLIMIT_MEMLOCK:
1024         return RLIMIT_MEMLOCK;
1025     case TARGET_RLIMIT_MSGQUEUE:
1026         return RLIMIT_MSGQUEUE;
1027     case TARGET_RLIMIT_NICE:
1028         return RLIMIT_NICE;
1029     case TARGET_RLIMIT_NOFILE:
1030         return RLIMIT_NOFILE;
1031     case TARGET_RLIMIT_NPROC:
1032         return RLIMIT_NPROC;
1033     case TARGET_RLIMIT_RSS:
1034         return RLIMIT_RSS;
1035     case TARGET_RLIMIT_RTPRIO:
1036         return RLIMIT_RTPRIO;
1037     case TARGET_RLIMIT_SIGPENDING:
1038         return RLIMIT_SIGPENDING;
1039     case TARGET_RLIMIT_STACK:
1040         return RLIMIT_STACK;
1041     default:
1042         return code;
1043     }
1044 }
1045 
copy_from_user_timeval(struct timeval * tv,abi_ulong target_tv_addr)1046 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1047                                               abi_ulong target_tv_addr)
1048 {
1049     struct target_timeval *target_tv;
1050 
1051     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1052         return -TARGET_EFAULT;
1053     }
1054 
1055     __get_user(tv->tv_sec, &target_tv->tv_sec);
1056     __get_user(tv->tv_usec, &target_tv->tv_usec);
1057 
1058     unlock_user_struct(target_tv, target_tv_addr, 0);
1059 
1060     return 0;
1061 }
1062 
copy_to_user_timeval(abi_ulong target_tv_addr,const struct timeval * tv)1063 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1064                                             const struct timeval *tv)
1065 {
1066     struct target_timeval *target_tv;
1067 
1068     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1069         return -TARGET_EFAULT;
1070     }
1071 
1072     __put_user(tv->tv_sec, &target_tv->tv_sec);
1073     __put_user(tv->tv_usec, &target_tv->tv_usec);
1074 
1075     unlock_user_struct(target_tv, target_tv_addr, 1);
1076 
1077     return 0;
1078 }
1079 
1080 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
copy_from_user_timeval64(struct timeval * tv,abi_ulong target_tv_addr)1081 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1082                                                 abi_ulong target_tv_addr)
1083 {
1084     struct target__kernel_sock_timeval *target_tv;
1085 
1086     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1087         return -TARGET_EFAULT;
1088     }
1089 
1090     __get_user(tv->tv_sec, &target_tv->tv_sec);
1091     __get_user(tv->tv_usec, &target_tv->tv_usec);
1092 
1093     unlock_user_struct(target_tv, target_tv_addr, 0);
1094 
1095     return 0;
1096 }
1097 #endif
1098 
copy_to_user_timeval64(abi_ulong target_tv_addr,const struct timeval * tv)1099 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1100                                               const struct timeval *tv)
1101 {
1102     struct target__kernel_sock_timeval *target_tv;
1103 
1104     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1105         return -TARGET_EFAULT;
1106     }
1107 
1108     __put_user(tv->tv_sec, &target_tv->tv_sec);
1109     __put_user(tv->tv_usec, &target_tv->tv_usec);
1110 
1111     unlock_user_struct(target_tv, target_tv_addr, 1);
1112 
1113     return 0;
1114 }
1115 
1116 #if defined(TARGET_NR_futex) || \
1117     defined(TARGET_NR_rt_sigtimedwait) || \
1118     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1119     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1120     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1121     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1122     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1123     defined(TARGET_NR_timer_settime) || \
1124     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_timespec(struct timespec * host_ts,abi_ulong target_addr)1125 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1126                                                abi_ulong target_addr)
1127 {
1128     struct target_timespec *target_ts;
1129 
1130     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1131         return -TARGET_EFAULT;
1132     }
1133     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1134     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1135     unlock_user_struct(target_ts, target_addr, 0);
1136     return 0;
1137 }
1138 #endif
1139 
1140 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1141     defined(TARGET_NR_timer_settime64) || \
1142     defined(TARGET_NR_mq_timedsend_time64) || \
1143     defined(TARGET_NR_mq_timedreceive_time64) || \
1144     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1145     defined(TARGET_NR_clock_nanosleep_time64) || \
1146     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1147     defined(TARGET_NR_utimensat) || \
1148     defined(TARGET_NR_utimensat_time64) || \
1149     defined(TARGET_NR_semtimedop_time64) || \
1150     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
target_to_host_timespec64(struct timespec * host_ts,abi_ulong target_addr)1151 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1152                                                  abi_ulong target_addr)
1153 {
1154     struct target__kernel_timespec *target_ts;
1155 
1156     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1157         return -TARGET_EFAULT;
1158     }
1159     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1160     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1161     /* in 32bit mode, this drops the padding */
1162     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1163     unlock_user_struct(target_ts, target_addr, 0);
1164     return 0;
1165 }
1166 #endif
1167 
host_to_target_timespec(abi_ulong target_addr,struct timespec * host_ts)1168 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1169                                                struct timespec *host_ts)
1170 {
1171     struct target_timespec *target_ts;
1172 
1173     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1174         return -TARGET_EFAULT;
1175     }
1176     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1177     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1178     unlock_user_struct(target_ts, target_addr, 1);
1179     return 0;
1180 }
1181 
host_to_target_timespec64(abi_ulong target_addr,struct timespec * host_ts)1182 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1183                                                  struct timespec *host_ts)
1184 {
1185     struct target__kernel_timespec *target_ts;
1186 
1187     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1188         return -TARGET_EFAULT;
1189     }
1190     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1191     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1192     unlock_user_struct(target_ts, target_addr, 1);
1193     return 0;
1194 }
1195 
1196 #if defined(TARGET_NR_gettimeofday)
copy_to_user_timezone(abi_ulong target_tz_addr,struct timezone * tz)1197 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1198                                              struct timezone *tz)
1199 {
1200     struct target_timezone *target_tz;
1201 
1202     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1203         return -TARGET_EFAULT;
1204     }
1205 
1206     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1207     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1208 
1209     unlock_user_struct(target_tz, target_tz_addr, 1);
1210 
1211     return 0;
1212 }
1213 #endif
1214 
1215 #if defined(TARGET_NR_settimeofday)
copy_from_user_timezone(struct timezone * tz,abi_ulong target_tz_addr)1216 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1217                                                abi_ulong target_tz_addr)
1218 {
1219     struct target_timezone *target_tz;
1220 
1221     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1222         return -TARGET_EFAULT;
1223     }
1224 
1225     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1226     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1227 
1228     unlock_user_struct(target_tz, target_tz_addr, 0);
1229 
1230     return 0;
1231 }
1232 #endif
1233 
1234 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1235 #include <mqueue.h>
1236 
copy_from_user_mq_attr(struct mq_attr * attr,abi_ulong target_mq_attr_addr)1237 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1238                                               abi_ulong target_mq_attr_addr)
1239 {
1240     struct target_mq_attr *target_mq_attr;
1241 
1242     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1243                           target_mq_attr_addr, 1))
1244         return -TARGET_EFAULT;
1245 
1246     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1247     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1248     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1249     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1250 
1251     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1252 
1253     return 0;
1254 }
1255 
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,const struct mq_attr * attr)1256 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1257                                             const struct mq_attr *attr)
1258 {
1259     struct target_mq_attr *target_mq_attr;
1260 
1261     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1262                           target_mq_attr_addr, 0))
1263         return -TARGET_EFAULT;
1264 
1265     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1266     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1267     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1268     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1269 
1270     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1271 
1272     return 0;
1273 }
1274 #endif
1275 
1276 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1277 /* do_select() must return target values and target errnos. */
do_select(int n,abi_ulong rfd_addr,abi_ulong wfd_addr,abi_ulong efd_addr,abi_ulong target_tv_addr)1278 static abi_long do_select(int n,
1279                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1280                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1281 {
1282     fd_set rfds, wfds, efds;
1283     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1284     struct timeval tv;
1285     struct timespec ts, *ts_ptr;
1286     abi_long ret;
1287 
1288     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1289     if (ret) {
1290         return ret;
1291     }
1292     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1293     if (ret) {
1294         return ret;
1295     }
1296     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1297     if (ret) {
1298         return ret;
1299     }
1300 
1301     if (target_tv_addr) {
1302         if (copy_from_user_timeval(&tv, target_tv_addr))
1303             return -TARGET_EFAULT;
1304         ts.tv_sec = tv.tv_sec;
1305         ts.tv_nsec = tv.tv_usec * 1000;
1306         ts_ptr = &ts;
1307     } else {
1308         ts_ptr = NULL;
1309     }
1310 
1311     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1312                                   ts_ptr, NULL));
1313 
1314     if (!is_error(ret)) {
1315         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1316             return -TARGET_EFAULT;
1317         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1318             return -TARGET_EFAULT;
1319         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1320             return -TARGET_EFAULT;
1321 
1322         if (target_tv_addr) {
1323             tv.tv_sec = ts.tv_sec;
1324             tv.tv_usec = ts.tv_nsec / 1000;
1325             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1326                 return -TARGET_EFAULT;
1327             }
1328         }
1329     }
1330 
1331     return ret;
1332 }
1333 
1334 #if defined(TARGET_WANT_OLD_SYS_SELECT)
do_old_select(abi_ulong arg1)1335 static abi_long do_old_select(abi_ulong arg1)
1336 {
1337     struct target_sel_arg_struct *sel;
1338     abi_ulong inp, outp, exp, tvp;
1339     long nsel;
1340 
1341     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1342         return -TARGET_EFAULT;
1343     }
1344 
1345     nsel = tswapal(sel->n);
1346     inp = tswapal(sel->inp);
1347     outp = tswapal(sel->outp);
1348     exp = tswapal(sel->exp);
1349     tvp = tswapal(sel->tvp);
1350 
1351     unlock_user_struct(sel, arg1, 0);
1352 
1353     return do_select(nsel, inp, outp, exp, tvp);
1354 }
1355 #endif
1356 #endif
1357 
1358 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
do_pselect6(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,bool time64)1359 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1360                             abi_long arg4, abi_long arg5, abi_long arg6,
1361                             bool time64)
1362 {
1363     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1364     fd_set rfds, wfds, efds;
1365     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1366     struct timespec ts, *ts_ptr;
1367     abi_long ret;
1368 
1369     /*
1370      * The 6th arg is actually two args smashed together,
1371      * so we cannot use the C library.
1372      */
1373     sigset_t set;
1374     struct {
1375         sigset_t *set;
1376         size_t size;
1377     } sig, *sig_ptr;
1378 
1379     abi_ulong arg_sigset, arg_sigsize, *arg7;
1380     target_sigset_t *target_sigset;
1381 
1382     n = arg1;
1383     rfd_addr = arg2;
1384     wfd_addr = arg3;
1385     efd_addr = arg4;
1386     ts_addr = arg5;
1387 
1388     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1389     if (ret) {
1390         return ret;
1391     }
1392     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1393     if (ret) {
1394         return ret;
1395     }
1396     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1397     if (ret) {
1398         return ret;
1399     }
1400 
1401     /*
1402      * This takes a timespec, and not a timeval, so we cannot
1403      * use the do_select() helper ...
1404      */
1405     if (ts_addr) {
1406         if (time64) {
1407             if (target_to_host_timespec64(&ts, ts_addr)) {
1408                 return -TARGET_EFAULT;
1409             }
1410         } else {
1411             if (target_to_host_timespec(&ts, ts_addr)) {
1412                 return -TARGET_EFAULT;
1413             }
1414         }
1415             ts_ptr = &ts;
1416     } else {
1417         ts_ptr = NULL;
1418     }
1419 
1420     /* Extract the two packed args for the sigset */
1421     if (arg6) {
1422         sig_ptr = &sig;
1423         sig.size = SIGSET_T_SIZE;
1424 
1425         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1426         if (!arg7) {
1427             return -TARGET_EFAULT;
1428         }
1429         arg_sigset = tswapal(arg7[0]);
1430         arg_sigsize = tswapal(arg7[1]);
1431         unlock_user(arg7, arg6, 0);
1432 
1433         if (arg_sigset) {
1434             sig.set = &set;
1435             if (arg_sigsize != sizeof(*target_sigset)) {
1436                 /* Like the kernel, we enforce correct size sigsets */
1437                 return -TARGET_EINVAL;
1438             }
1439             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1440                                       sizeof(*target_sigset), 1);
1441             if (!target_sigset) {
1442                 return -TARGET_EFAULT;
1443             }
1444             target_to_host_sigset(&set, target_sigset);
1445             unlock_user(target_sigset, arg_sigset, 0);
1446         } else {
1447             sig.set = NULL;
1448         }
1449     } else {
1450         sig_ptr = NULL;
1451     }
1452 
1453     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1454                                   ts_ptr, sig_ptr));
1455 
1456     if (!is_error(ret)) {
1457         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1458             return -TARGET_EFAULT;
1459         }
1460         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1461             return -TARGET_EFAULT;
1462         }
1463         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1464             return -TARGET_EFAULT;
1465         }
1466         if (time64) {
1467             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1468                 return -TARGET_EFAULT;
1469             }
1470         } else {
1471             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1472                 return -TARGET_EFAULT;
1473             }
1474         }
1475     }
1476     return ret;
1477 }
1478 #endif
1479 
1480 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1481     defined(TARGET_NR_ppoll_time64)
do_ppoll(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,bool ppoll,bool time64)1482 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1483                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1484 {
1485     struct target_pollfd *target_pfd;
1486     unsigned int nfds = arg2;
1487     struct pollfd *pfd;
1488     unsigned int i;
1489     abi_long ret;
1490 
1491     pfd = NULL;
1492     target_pfd = NULL;
1493     if (nfds) {
1494         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1495             return -TARGET_EINVAL;
1496         }
1497         target_pfd = lock_user(VERIFY_WRITE, arg1,
1498                                sizeof(struct target_pollfd) * nfds, 1);
1499         if (!target_pfd) {
1500             return -TARGET_EFAULT;
1501         }
1502 
1503         pfd = alloca(sizeof(struct pollfd) * nfds);
1504         for (i = 0; i < nfds; i++) {
1505             pfd[i].fd = tswap32(target_pfd[i].fd);
1506             pfd[i].events = tswap16(target_pfd[i].events);
1507         }
1508     }
1509     if (ppoll) {
1510         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1511         target_sigset_t *target_set;
1512         sigset_t _set, *set = &_set;
1513 
1514         if (arg3) {
1515             if (time64) {
1516                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1517                     unlock_user(target_pfd, arg1, 0);
1518                     return -TARGET_EFAULT;
1519                 }
1520             } else {
1521                 if (target_to_host_timespec(timeout_ts, arg3)) {
1522                     unlock_user(target_pfd, arg1, 0);
1523                     return -TARGET_EFAULT;
1524                 }
1525             }
1526         } else {
1527             timeout_ts = NULL;
1528         }
1529 
1530         if (arg4) {
1531             if (arg5 != sizeof(target_sigset_t)) {
1532                 unlock_user(target_pfd, arg1, 0);
1533                 return -TARGET_EINVAL;
1534             }
1535 
1536             target_set = lock_user(VERIFY_READ, arg4,
1537                                    sizeof(target_sigset_t), 1);
1538             if (!target_set) {
1539                 unlock_user(target_pfd, arg1, 0);
1540                 return -TARGET_EFAULT;
1541             }
1542             target_to_host_sigset(set, target_set);
1543         } else {
1544             set = NULL;
1545         }
1546 
1547         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1548                                    set, SIGSET_T_SIZE));
1549 
1550         if (!is_error(ret) && arg3) {
1551             if (time64) {
1552                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1553                     return -TARGET_EFAULT;
1554                 }
1555             } else {
1556                 if (host_to_target_timespec(arg3, timeout_ts)) {
1557                     return -TARGET_EFAULT;
1558                 }
1559             }
1560         }
1561         if (arg4) {
1562             unlock_user(target_set, arg4, 0);
1563         }
1564     } else {
1565           struct timespec ts, *pts;
1566 
1567           if (arg3 >= 0) {
1568               /* Convert ms to secs, ns */
1569               ts.tv_sec = arg3 / 1000;
1570               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1571               pts = &ts;
1572           } else {
1573               /* -ve poll() timeout means "infinite" */
1574               pts = NULL;
1575           }
1576           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1577     }
1578 
1579     if (!is_error(ret)) {
1580         for (i = 0; i < nfds; i++) {
1581             target_pfd[i].revents = tswap16(pfd[i].revents);
1582         }
1583     }
1584     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1585     return ret;
1586 }
1587 #endif
1588 
do_pipe2(int host_pipe[],int flags)1589 static abi_long do_pipe2(int host_pipe[], int flags)
1590 {
1591 #ifdef CONFIG_PIPE2
1592     return pipe2(host_pipe, flags);
1593 #else
1594     return -ENOSYS;
1595 #endif
1596 }
1597 
do_pipe(void * cpu_env,abi_ulong pipedes,int flags,int is_pipe2)1598 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1599                         int flags, int is_pipe2)
1600 {
1601     int host_pipe[2];
1602     abi_long ret;
1603     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1604 
1605     if (is_error(ret))
1606         return get_errno(ret);
1607 
1608     /* Several targets have special calling conventions for the original
1609        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1610     if (!is_pipe2) {
1611 #if defined(TARGET_ALPHA)
1612         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1613         return host_pipe[0];
1614 #elif defined(TARGET_MIPS)
1615         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1616         return host_pipe[0];
1617 #elif defined(TARGET_SH4)
1618         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1619         return host_pipe[0];
1620 #elif defined(TARGET_SPARC)
1621         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1622         return host_pipe[0];
1623 #endif
1624     }
1625 
1626     if (put_user_s32(host_pipe[0], pipedes)
1627         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1628         return -TARGET_EFAULT;
1629     return get_errno(ret);
1630 }
1631 
target_to_host_ip_mreq(struct ip_mreqn * mreqn,abi_ulong target_addr,socklen_t len)1632 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1633                                               abi_ulong target_addr,
1634                                               socklen_t len)
1635 {
1636     struct target_ip_mreqn *target_smreqn;
1637 
1638     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1639     if (!target_smreqn)
1640         return -TARGET_EFAULT;
1641     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1642     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1643     if (len == sizeof(struct target_ip_mreqn))
1644         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1645     unlock_user(target_smreqn, target_addr, 0);
1646 
1647     return 0;
1648 }
1649 
target_to_host_sockaddr(int fd,struct sockaddr * addr,abi_ulong target_addr,socklen_t len)1650 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1651                                                abi_ulong target_addr,
1652                                                socklen_t len)
1653 {
1654     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1655     sa_family_t sa_family;
1656     struct target_sockaddr *target_saddr;
1657 
1658     if (fd_trans_target_to_host_addr(fd)) {
1659         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1660     }
1661 
1662     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1663     if (!target_saddr)
1664         return -TARGET_EFAULT;
1665 
1666     sa_family = tswap16(target_saddr->sa_family);
1667 
1668     /* Oops. The caller might send a incomplete sun_path; sun_path
1669      * must be terminated by \0 (see the manual page), but
1670      * unfortunately it is quite common to specify sockaddr_un
1671      * length as "strlen(x->sun_path)" while it should be
1672      * "strlen(...) + 1". We'll fix that here if needed.
1673      * Linux kernel has a similar feature.
1674      */
1675 
1676     if (sa_family == AF_UNIX) {
1677         if (len < unix_maxlen && len > 0) {
1678             char *cp = (char*)target_saddr;
1679 
1680             if ( cp[len-1] && !cp[len] )
1681                 len++;
1682         }
1683         if (len > unix_maxlen)
1684             len = unix_maxlen;
1685     }
1686 
1687     memcpy(addr, target_saddr, len);
1688     addr->sa_family = sa_family;
1689     if (sa_family == AF_NETLINK) {
1690         struct sockaddr_nl *nladdr;
1691 
1692         nladdr = (struct sockaddr_nl *)addr;
1693         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1694         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1695     } else if (sa_family == AF_PACKET) {
1696 	struct target_sockaddr_ll *lladdr;
1697 
1698 	lladdr = (struct target_sockaddr_ll *)addr;
1699 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1700 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1701     }
1702     unlock_user(target_saddr, target_addr, 0);
1703 
1704     return 0;
1705 }
1706 
host_to_target_sockaddr(abi_ulong target_addr,struct sockaddr * addr,socklen_t len)1707 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1708                                                struct sockaddr *addr,
1709                                                socklen_t len)
1710 {
1711     struct target_sockaddr *target_saddr;
1712 
1713     if (len == 0) {
1714         return 0;
1715     }
1716     assert(addr);
1717 
1718     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1719     if (!target_saddr)
1720         return -TARGET_EFAULT;
1721     memcpy(target_saddr, addr, len);
1722     if (len >= offsetof(struct target_sockaddr, sa_family) +
1723         sizeof(target_saddr->sa_family)) {
1724         target_saddr->sa_family = tswap16(addr->sa_family);
1725     }
1726     if (addr->sa_family == AF_NETLINK &&
1727         len >= sizeof(struct target_sockaddr_nl)) {
1728         struct target_sockaddr_nl *target_nl =
1729                (struct target_sockaddr_nl *)target_saddr;
1730         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1731         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1732     } else if (addr->sa_family == AF_PACKET) {
1733         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1734         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1735         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1736     } else if (addr->sa_family == AF_INET6 &&
1737                len >= sizeof(struct target_sockaddr_in6)) {
1738         struct target_sockaddr_in6 *target_in6 =
1739                (struct target_sockaddr_in6 *)target_saddr;
1740         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1741     }
1742     unlock_user(target_saddr, target_addr, len);
1743 
1744     return 0;
1745 }
1746 
target_to_host_cmsg(struct msghdr * msgh,struct target_msghdr * target_msgh)1747 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1748                                            struct target_msghdr *target_msgh)
1749 {
1750     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1751     abi_long msg_controllen;
1752     abi_ulong target_cmsg_addr;
1753     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1754     socklen_t space = 0;
1755 
1756     msg_controllen = tswapal(target_msgh->msg_controllen);
1757     if (msg_controllen < sizeof (struct target_cmsghdr))
1758         goto the_end;
1759     target_cmsg_addr = tswapal(target_msgh->msg_control);
1760     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1761     target_cmsg_start = target_cmsg;
1762     if (!target_cmsg)
1763         return -TARGET_EFAULT;
1764 
1765     while (cmsg && target_cmsg) {
1766         void *data = CMSG_DATA(cmsg);
1767         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1768 
1769         int len = tswapal(target_cmsg->cmsg_len)
1770             - sizeof(struct target_cmsghdr);
1771 
1772         space += CMSG_SPACE(len);
1773         if (space > msgh->msg_controllen) {
1774             space -= CMSG_SPACE(len);
1775             /* This is a QEMU bug, since we allocated the payload
1776              * area ourselves (unlike overflow in host-to-target
1777              * conversion, which is just the guest giving us a buffer
1778              * that's too small). It can't happen for the payload types
1779              * we currently support; if it becomes an issue in future
1780              * we would need to improve our allocation strategy to
1781              * something more intelligent than "twice the size of the
1782              * target buffer we're reading from".
1783              */
1784             qemu_log_mask(LOG_UNIMP,
1785                           ("Unsupported ancillary data %d/%d: "
1786                            "unhandled msg size\n"),
1787                           tswap32(target_cmsg->cmsg_level),
1788                           tswap32(target_cmsg->cmsg_type));
1789             break;
1790         }
1791 
1792         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1793             cmsg->cmsg_level = SOL_SOCKET;
1794         } else {
1795             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1796         }
1797         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1798         cmsg->cmsg_len = CMSG_LEN(len);
1799 
1800         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1801             int *fd = (int *)data;
1802             int *target_fd = (int *)target_data;
1803             int i, numfds = len / sizeof(int);
1804 
1805             for (i = 0; i < numfds; i++) {
1806                 __get_user(fd[i], target_fd + i);
1807             }
1808         } else if (cmsg->cmsg_level == SOL_SOCKET
1809                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1810             struct ucred *cred = (struct ucred *)data;
1811             struct target_ucred *target_cred =
1812                 (struct target_ucred *)target_data;
1813 
1814             __get_user(cred->pid, &target_cred->pid);
1815             __get_user(cred->uid, &target_cred->uid);
1816             __get_user(cred->gid, &target_cred->gid);
1817         } else {
1818             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1819                           cmsg->cmsg_level, cmsg->cmsg_type);
1820             memcpy(data, target_data, len);
1821         }
1822 
1823         cmsg = CMSG_NXTHDR(msgh, cmsg);
1824         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1825                                          target_cmsg_start);
1826     }
1827     unlock_user(target_cmsg, target_cmsg_addr, 0);
1828  the_end:
1829     msgh->msg_controllen = space;
1830     return 0;
1831 }
1832 
host_to_target_cmsg(struct target_msghdr * target_msgh,struct msghdr * msgh)1833 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1834                                            struct msghdr *msgh)
1835 {
1836     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1837     abi_long msg_controllen;
1838     abi_ulong target_cmsg_addr;
1839     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1840     socklen_t space = 0;
1841 
1842     msg_controllen = tswapal(target_msgh->msg_controllen);
1843     if (msg_controllen < sizeof (struct target_cmsghdr))
1844         goto the_end;
1845     target_cmsg_addr = tswapal(target_msgh->msg_control);
1846     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1847     target_cmsg_start = target_cmsg;
1848     if (!target_cmsg)
1849         return -TARGET_EFAULT;
1850 
1851     while (cmsg && target_cmsg) {
1852         void *data = CMSG_DATA(cmsg);
1853         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1854 
1855         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1856         int tgt_len, tgt_space;
1857 
1858         /* We never copy a half-header but may copy half-data;
1859          * this is Linux's behaviour in put_cmsg(). Note that
1860          * truncation here is a guest problem (which we report
1861          * to the guest via the CTRUNC bit), unlike truncation
1862          * in target_to_host_cmsg, which is a QEMU bug.
1863          */
1864         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1865             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1866             break;
1867         }
1868 
1869         if (cmsg->cmsg_level == SOL_SOCKET) {
1870             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1871         } else {
1872             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1873         }
1874         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1875 
1876         /* Payload types which need a different size of payload on
1877          * the target must adjust tgt_len here.
1878          */
1879         tgt_len = len;
1880         switch (cmsg->cmsg_level) {
1881         case SOL_SOCKET:
1882             switch (cmsg->cmsg_type) {
1883             case SO_TIMESTAMP:
1884                 tgt_len = sizeof(struct target_timeval);
1885                 break;
1886             default:
1887                 break;
1888             }
1889             break;
1890         default:
1891             break;
1892         }
1893 
1894         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1895             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1896             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1897         }
1898 
1899         /* We must now copy-and-convert len bytes of payload
1900          * into tgt_len bytes of destination space. Bear in mind
1901          * that in both source and destination we may be dealing
1902          * with a truncated value!
1903          */
1904         switch (cmsg->cmsg_level) {
1905         case SOL_SOCKET:
1906             switch (cmsg->cmsg_type) {
1907             case SCM_RIGHTS:
1908             {
1909                 int *fd = (int *)data;
1910                 int *target_fd = (int *)target_data;
1911                 int i, numfds = tgt_len / sizeof(int);
1912 
1913                 for (i = 0; i < numfds; i++) {
1914                     __put_user(fd[i], target_fd + i);
1915                 }
1916                 break;
1917             }
1918             case SO_TIMESTAMP:
1919             {
1920                 struct timeval *tv = (struct timeval *)data;
1921                 struct target_timeval *target_tv =
1922                     (struct target_timeval *)target_data;
1923 
1924                 if (len != sizeof(struct timeval) ||
1925                     tgt_len != sizeof(struct target_timeval)) {
1926                     goto unimplemented;
1927                 }
1928 
1929                 /* copy struct timeval to target */
1930                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1931                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1932                 break;
1933             }
1934             case SCM_CREDENTIALS:
1935             {
1936                 struct ucred *cred = (struct ucred *)data;
1937                 struct target_ucred *target_cred =
1938                     (struct target_ucred *)target_data;
1939 
1940                 __put_user(cred->pid, &target_cred->pid);
1941                 __put_user(cred->uid, &target_cred->uid);
1942                 __put_user(cred->gid, &target_cred->gid);
1943                 break;
1944             }
1945             default:
1946                 goto unimplemented;
1947             }
1948             break;
1949 
1950         case SOL_IP:
1951             switch (cmsg->cmsg_type) {
1952             case IP_TTL:
1953             {
1954                 uint32_t *v = (uint32_t *)data;
1955                 uint32_t *t_int = (uint32_t *)target_data;
1956 
1957                 if (len != sizeof(uint32_t) ||
1958                     tgt_len != sizeof(uint32_t)) {
1959                     goto unimplemented;
1960                 }
1961                 __put_user(*v, t_int);
1962                 break;
1963             }
1964             case IP_RECVERR:
1965             {
1966                 struct errhdr_t {
1967                    struct sock_extended_err ee;
1968                    struct sockaddr_in offender;
1969                 };
1970                 struct errhdr_t *errh = (struct errhdr_t *)data;
1971                 struct errhdr_t *target_errh =
1972                     (struct errhdr_t *)target_data;
1973 
1974                 if (len != sizeof(struct errhdr_t) ||
1975                     tgt_len != sizeof(struct errhdr_t)) {
1976                     goto unimplemented;
1977                 }
1978                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1979                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1980                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1981                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1982                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1983                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1984                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1985                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1986                     (void *) &errh->offender, sizeof(errh->offender));
1987                 break;
1988             }
1989             default:
1990                 goto unimplemented;
1991             }
1992             break;
1993 
1994         case SOL_IPV6:
1995             switch (cmsg->cmsg_type) {
1996             case IPV6_HOPLIMIT:
1997             {
1998                 uint32_t *v = (uint32_t *)data;
1999                 uint32_t *t_int = (uint32_t *)target_data;
2000 
2001                 if (len != sizeof(uint32_t) ||
2002                     tgt_len != sizeof(uint32_t)) {
2003                     goto unimplemented;
2004                 }
2005                 __put_user(*v, t_int);
2006                 break;
2007             }
2008             case IPV6_RECVERR:
2009             {
2010                 struct errhdr6_t {
2011                    struct sock_extended_err ee;
2012                    struct sockaddr_in6 offender;
2013                 };
2014                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2015                 struct errhdr6_t *target_errh =
2016                     (struct errhdr6_t *)target_data;
2017 
2018                 if (len != sizeof(struct errhdr6_t) ||
2019                     tgt_len != sizeof(struct errhdr6_t)) {
2020                     goto unimplemented;
2021                 }
2022                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2023                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2024                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2025                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2026                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2027                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2028                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2029                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2030                     (void *) &errh->offender, sizeof(errh->offender));
2031                 break;
2032             }
2033             default:
2034                 goto unimplemented;
2035             }
2036             break;
2037 
2038         default:
2039         unimplemented:
2040             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2041                           cmsg->cmsg_level, cmsg->cmsg_type);
2042             memcpy(target_data, data, MIN(len, tgt_len));
2043             if (tgt_len > len) {
2044                 memset(target_data + len, 0, tgt_len - len);
2045             }
2046         }
2047 
2048         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2049         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2050         if (msg_controllen < tgt_space) {
2051             tgt_space = msg_controllen;
2052         }
2053         msg_controllen -= tgt_space;
2054         space += tgt_space;
2055         cmsg = CMSG_NXTHDR(msgh, cmsg);
2056         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2057                                          target_cmsg_start);
2058     }
2059     unlock_user(target_cmsg, target_cmsg_addr, space);
2060  the_end:
2061     target_msgh->msg_controllen = tswapal(space);
2062     return 0;
2063 }
2064 
2065 /* do_setsockopt() Must return target values and target errnos. */
do_setsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,socklen_t optlen)2066 static abi_long do_setsockopt(int sockfd, int level, int optname,
2067                               abi_ulong optval_addr, socklen_t optlen)
2068 {
2069     abi_long ret;
2070     int val;
2071     struct ip_mreqn *ip_mreq;
2072     struct ip_mreq_source *ip_mreq_source;
2073 
2074     switch(level) {
2075     case SOL_TCP:
2076     case SOL_UDP:
2077         /* TCP and UDP options all take an 'int' value.  */
2078         if (optlen < sizeof(uint32_t))
2079             return -TARGET_EINVAL;
2080 
2081         if (get_user_u32(val, optval_addr))
2082             return -TARGET_EFAULT;
2083         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2084         break;
2085     case SOL_IP:
2086         switch(optname) {
2087         case IP_TOS:
2088         case IP_TTL:
2089         case IP_HDRINCL:
2090         case IP_ROUTER_ALERT:
2091         case IP_RECVOPTS:
2092         case IP_RETOPTS:
2093         case IP_PKTINFO:
2094         case IP_MTU_DISCOVER:
2095         case IP_RECVERR:
2096         case IP_RECVTTL:
2097         case IP_RECVTOS:
2098 #ifdef IP_FREEBIND
2099         case IP_FREEBIND:
2100 #endif
2101         case IP_MULTICAST_TTL:
2102         case IP_MULTICAST_LOOP:
2103             val = 0;
2104             if (optlen >= sizeof(uint32_t)) {
2105                 if (get_user_u32(val, optval_addr))
2106                     return -TARGET_EFAULT;
2107             } else if (optlen >= 1) {
2108                 if (get_user_u8(val, optval_addr))
2109                     return -TARGET_EFAULT;
2110             }
2111             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2112             break;
2113         case IP_ADD_MEMBERSHIP:
2114         case IP_DROP_MEMBERSHIP:
2115             if (optlen < sizeof (struct target_ip_mreq) ||
2116                 optlen > sizeof (struct target_ip_mreqn))
2117                 return -TARGET_EINVAL;
2118 
2119             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2120             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2121             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2122             break;
2123 
2124         case IP_BLOCK_SOURCE:
2125         case IP_UNBLOCK_SOURCE:
2126         case IP_ADD_SOURCE_MEMBERSHIP:
2127         case IP_DROP_SOURCE_MEMBERSHIP:
2128             if (optlen != sizeof (struct target_ip_mreq_source))
2129                 return -TARGET_EINVAL;
2130 
2131             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2132             if (!ip_mreq_source) {
2133                 return -TARGET_EFAULT;
2134             }
2135             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2136             unlock_user (ip_mreq_source, optval_addr, 0);
2137             break;
2138 
2139         default:
2140             goto unimplemented;
2141         }
2142         break;
2143     case SOL_IPV6:
2144         switch (optname) {
2145         case IPV6_MTU_DISCOVER:
2146         case IPV6_MTU:
2147         case IPV6_V6ONLY:
2148         case IPV6_RECVPKTINFO:
2149         case IPV6_UNICAST_HOPS:
2150         case IPV6_MULTICAST_HOPS:
2151         case IPV6_MULTICAST_LOOP:
2152         case IPV6_RECVERR:
2153         case IPV6_RECVHOPLIMIT:
2154         case IPV6_2292HOPLIMIT:
2155         case IPV6_CHECKSUM:
2156         case IPV6_ADDRFORM:
2157         case IPV6_2292PKTINFO:
2158         case IPV6_RECVTCLASS:
2159         case IPV6_RECVRTHDR:
2160         case IPV6_2292RTHDR:
2161         case IPV6_RECVHOPOPTS:
2162         case IPV6_2292HOPOPTS:
2163         case IPV6_RECVDSTOPTS:
2164         case IPV6_2292DSTOPTS:
2165         case IPV6_TCLASS:
2166         case IPV6_ADDR_PREFERENCES:
2167 #ifdef IPV6_RECVPATHMTU
2168         case IPV6_RECVPATHMTU:
2169 #endif
2170 #ifdef IPV6_TRANSPARENT
2171         case IPV6_TRANSPARENT:
2172 #endif
2173 #ifdef IPV6_FREEBIND
2174         case IPV6_FREEBIND:
2175 #endif
2176 #ifdef IPV6_RECVORIGDSTADDR
2177         case IPV6_RECVORIGDSTADDR:
2178 #endif
2179             val = 0;
2180             if (optlen < sizeof(uint32_t)) {
2181                 return -TARGET_EINVAL;
2182             }
2183             if (get_user_u32(val, optval_addr)) {
2184                 return -TARGET_EFAULT;
2185             }
2186             ret = get_errno(setsockopt(sockfd, level, optname,
2187                                        &val, sizeof(val)));
2188             break;
2189         case IPV6_PKTINFO:
2190         {
2191             struct in6_pktinfo pki;
2192 
2193             if (optlen < sizeof(pki)) {
2194                 return -TARGET_EINVAL;
2195             }
2196 
2197             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2198                 return -TARGET_EFAULT;
2199             }
2200 
2201             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2202 
2203             ret = get_errno(setsockopt(sockfd, level, optname,
2204                                        &pki, sizeof(pki)));
2205             break;
2206         }
2207         case IPV6_ADD_MEMBERSHIP:
2208         case IPV6_DROP_MEMBERSHIP:
2209         {
2210             struct ipv6_mreq ipv6mreq;
2211 
2212             if (optlen < sizeof(ipv6mreq)) {
2213                 return -TARGET_EINVAL;
2214             }
2215 
2216             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2217                 return -TARGET_EFAULT;
2218             }
2219 
2220             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2221 
2222             ret = get_errno(setsockopt(sockfd, level, optname,
2223                                        &ipv6mreq, sizeof(ipv6mreq)));
2224             break;
2225         }
2226         default:
2227             goto unimplemented;
2228         }
2229         break;
2230     case SOL_ICMPV6:
2231         switch (optname) {
2232         case ICMPV6_FILTER:
2233         {
2234             struct icmp6_filter icmp6f;
2235 
2236             if (optlen > sizeof(icmp6f)) {
2237                 optlen = sizeof(icmp6f);
2238             }
2239 
2240             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2241                 return -TARGET_EFAULT;
2242             }
2243 
2244             for (val = 0; val < 8; val++) {
2245                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2246             }
2247 
2248             ret = get_errno(setsockopt(sockfd, level, optname,
2249                                        &icmp6f, optlen));
2250             break;
2251         }
2252         default:
2253             goto unimplemented;
2254         }
2255         break;
2256     case SOL_RAW:
2257         switch (optname) {
2258         case ICMP_FILTER:
2259         case IPV6_CHECKSUM:
2260             /* those take an u32 value */
2261             if (optlen < sizeof(uint32_t)) {
2262                 return -TARGET_EINVAL;
2263             }
2264 
2265             if (get_user_u32(val, optval_addr)) {
2266                 return -TARGET_EFAULT;
2267             }
2268             ret = get_errno(setsockopt(sockfd, level, optname,
2269                                        &val, sizeof(val)));
2270             break;
2271 
2272         default:
2273             goto unimplemented;
2274         }
2275         break;
2276 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2277     case SOL_ALG:
2278         switch (optname) {
2279         case ALG_SET_KEY:
2280         {
2281             char *alg_key = g_malloc(optlen);
2282 
2283             if (!alg_key) {
2284                 return -TARGET_ENOMEM;
2285             }
2286             if (copy_from_user(alg_key, optval_addr, optlen)) {
2287                 g_free(alg_key);
2288                 return -TARGET_EFAULT;
2289             }
2290             ret = get_errno(setsockopt(sockfd, level, optname,
2291                                        alg_key, optlen));
2292             g_free(alg_key);
2293             break;
2294         }
2295         case ALG_SET_AEAD_AUTHSIZE:
2296         {
2297             ret = get_errno(setsockopt(sockfd, level, optname,
2298                                        NULL, optlen));
2299             break;
2300         }
2301         default:
2302             goto unimplemented;
2303         }
2304         break;
2305 #endif
2306     case TARGET_SOL_SOCKET:
2307         switch (optname) {
2308         case TARGET_SO_RCVTIMEO:
2309         {
2310                 struct timeval tv;
2311 
2312                 optname = SO_RCVTIMEO;
2313 
2314 set_timeout:
2315                 if (optlen != sizeof(struct target_timeval)) {
2316                     return -TARGET_EINVAL;
2317                 }
2318 
2319                 if (copy_from_user_timeval(&tv, optval_addr)) {
2320                     return -TARGET_EFAULT;
2321                 }
2322 
2323                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2324                                 &tv, sizeof(tv)));
2325                 return ret;
2326         }
2327         case TARGET_SO_SNDTIMEO:
2328                 optname = SO_SNDTIMEO;
2329                 goto set_timeout;
2330         case TARGET_SO_ATTACH_FILTER:
2331         {
2332                 struct target_sock_fprog *tfprog;
2333                 struct target_sock_filter *tfilter;
2334                 struct sock_fprog fprog;
2335                 struct sock_filter *filter;
2336                 int i;
2337 
2338                 if (optlen != sizeof(*tfprog)) {
2339                     return -TARGET_EINVAL;
2340                 }
2341                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2342                     return -TARGET_EFAULT;
2343                 }
2344                 if (!lock_user_struct(VERIFY_READ, tfilter,
2345                                       tswapal(tfprog->filter), 0)) {
2346                     unlock_user_struct(tfprog, optval_addr, 1);
2347                     return -TARGET_EFAULT;
2348                 }
2349 
2350                 fprog.len = tswap16(tfprog->len);
2351                 filter = g_try_new(struct sock_filter, fprog.len);
2352                 if (filter == NULL) {
2353                     unlock_user_struct(tfilter, tfprog->filter, 1);
2354                     unlock_user_struct(tfprog, optval_addr, 1);
2355                     return -TARGET_ENOMEM;
2356                 }
2357                 for (i = 0; i < fprog.len; i++) {
2358                     filter[i].code = tswap16(tfilter[i].code);
2359                     filter[i].jt = tfilter[i].jt;
2360                     filter[i].jf = tfilter[i].jf;
2361                     filter[i].k = tswap32(tfilter[i].k);
2362                 }
2363                 fprog.filter = filter;
2364 
2365                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2366                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2367                 g_free(filter);
2368 
2369                 unlock_user_struct(tfilter, tfprog->filter, 1);
2370                 unlock_user_struct(tfprog, optval_addr, 1);
2371                 return ret;
2372         }
2373 	case TARGET_SO_BINDTODEVICE:
2374 	{
2375 		char *dev_ifname, *addr_ifname;
2376 
2377 		if (optlen > IFNAMSIZ - 1) {
2378 		    optlen = IFNAMSIZ - 1;
2379 		}
2380 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2381 		if (!dev_ifname) {
2382 		    return -TARGET_EFAULT;
2383 		}
2384 		optname = SO_BINDTODEVICE;
2385 		addr_ifname = alloca(IFNAMSIZ);
2386 		memcpy(addr_ifname, dev_ifname, optlen);
2387 		addr_ifname[optlen] = 0;
2388 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2389                                            addr_ifname, optlen));
2390 		unlock_user (dev_ifname, optval_addr, 0);
2391 		return ret;
2392 	}
2393         case TARGET_SO_LINGER:
2394         {
2395                 struct linger lg;
2396                 struct target_linger *tlg;
2397 
2398                 if (optlen != sizeof(struct target_linger)) {
2399                     return -TARGET_EINVAL;
2400                 }
2401                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2402                     return -TARGET_EFAULT;
2403                 }
2404                 __get_user(lg.l_onoff, &tlg->l_onoff);
2405                 __get_user(lg.l_linger, &tlg->l_linger);
2406                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2407                                 &lg, sizeof(lg)));
2408                 unlock_user_struct(tlg, optval_addr, 0);
2409                 return ret;
2410         }
2411             /* Options with 'int' argument.  */
2412         case TARGET_SO_DEBUG:
2413 		optname = SO_DEBUG;
2414 		break;
2415         case TARGET_SO_REUSEADDR:
2416 		optname = SO_REUSEADDR;
2417 		break;
2418 #ifdef SO_REUSEPORT
2419         case TARGET_SO_REUSEPORT:
2420                 optname = SO_REUSEPORT;
2421                 break;
2422 #endif
2423         case TARGET_SO_TYPE:
2424 		optname = SO_TYPE;
2425 		break;
2426         case TARGET_SO_ERROR:
2427 		optname = SO_ERROR;
2428 		break;
2429         case TARGET_SO_DONTROUTE:
2430 		optname = SO_DONTROUTE;
2431 		break;
2432         case TARGET_SO_BROADCAST:
2433 		optname = SO_BROADCAST;
2434 		break;
2435         case TARGET_SO_SNDBUF:
2436 		optname = SO_SNDBUF;
2437 		break;
2438         case TARGET_SO_SNDBUFFORCE:
2439                 optname = SO_SNDBUFFORCE;
2440                 break;
2441         case TARGET_SO_RCVBUF:
2442 		optname = SO_RCVBUF;
2443 		break;
2444         case TARGET_SO_RCVBUFFORCE:
2445                 optname = SO_RCVBUFFORCE;
2446                 break;
2447         case TARGET_SO_KEEPALIVE:
2448 		optname = SO_KEEPALIVE;
2449 		break;
2450         case TARGET_SO_OOBINLINE:
2451 		optname = SO_OOBINLINE;
2452 		break;
2453         case TARGET_SO_NO_CHECK:
2454 		optname = SO_NO_CHECK;
2455 		break;
2456         case TARGET_SO_PRIORITY:
2457 		optname = SO_PRIORITY;
2458 		break;
2459 #ifdef SO_BSDCOMPAT
2460         case TARGET_SO_BSDCOMPAT:
2461 		optname = SO_BSDCOMPAT;
2462 		break;
2463 #endif
2464         case TARGET_SO_PASSCRED:
2465 		optname = SO_PASSCRED;
2466 		break;
2467         case TARGET_SO_PASSSEC:
2468                 optname = SO_PASSSEC;
2469                 break;
2470         case TARGET_SO_TIMESTAMP:
2471 		optname = SO_TIMESTAMP;
2472 		break;
2473         case TARGET_SO_RCVLOWAT:
2474 		optname = SO_RCVLOWAT;
2475 		break;
2476         default:
2477             goto unimplemented;
2478         }
2479 	if (optlen < sizeof(uint32_t))
2480             return -TARGET_EINVAL;
2481 
2482 	if (get_user_u32(val, optval_addr))
2483             return -TARGET_EFAULT;
2484 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2485         break;
2486 #ifdef SOL_NETLINK
2487     case SOL_NETLINK:
2488         switch (optname) {
2489         case NETLINK_PKTINFO:
2490         case NETLINK_ADD_MEMBERSHIP:
2491         case NETLINK_DROP_MEMBERSHIP:
2492         case NETLINK_BROADCAST_ERROR:
2493         case NETLINK_NO_ENOBUFS:
2494 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2495         case NETLINK_LISTEN_ALL_NSID:
2496         case NETLINK_CAP_ACK:
2497 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2498 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2499         case NETLINK_EXT_ACK:
2500 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2501 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2502         case NETLINK_GET_STRICT_CHK:
2503 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2504             break;
2505         default:
2506             goto unimplemented;
2507         }
2508         val = 0;
2509         if (optlen < sizeof(uint32_t)) {
2510             return -TARGET_EINVAL;
2511         }
2512         if (get_user_u32(val, optval_addr)) {
2513             return -TARGET_EFAULT;
2514         }
2515         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2516                                    sizeof(val)));
2517         break;
2518 #endif /* SOL_NETLINK */
2519     default:
2520     unimplemented:
2521         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2522                       level, optname);
2523         ret = -TARGET_ENOPROTOOPT;
2524     }
2525     return ret;
2526 }
2527 
2528 /* do_getsockopt() Must return target values and target errnos. */
do_getsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,abi_ulong optlen)2529 static abi_long do_getsockopt(int sockfd, int level, int optname,
2530                               abi_ulong optval_addr, abi_ulong optlen)
2531 {
2532     abi_long ret;
2533     int len, val;
2534     socklen_t lv;
2535 
2536     switch(level) {
2537     case TARGET_SOL_SOCKET:
2538         level = SOL_SOCKET;
2539         switch (optname) {
2540         /* These don't just return a single integer */
2541         case TARGET_SO_PEERNAME:
2542             goto unimplemented;
2543         case TARGET_SO_RCVTIMEO: {
2544             struct timeval tv;
2545             socklen_t tvlen;
2546 
2547             optname = SO_RCVTIMEO;
2548 
2549 get_timeout:
2550             if (get_user_u32(len, optlen)) {
2551                 return -TARGET_EFAULT;
2552             }
2553             if (len < 0) {
2554                 return -TARGET_EINVAL;
2555             }
2556 
2557             tvlen = sizeof(tv);
2558             ret = get_errno(getsockopt(sockfd, level, optname,
2559                                        &tv, &tvlen));
2560             if (ret < 0) {
2561                 return ret;
2562             }
2563             if (len > sizeof(struct target_timeval)) {
2564                 len = sizeof(struct target_timeval);
2565             }
2566             if (copy_to_user_timeval(optval_addr, &tv)) {
2567                 return -TARGET_EFAULT;
2568             }
2569             if (put_user_u32(len, optlen)) {
2570                 return -TARGET_EFAULT;
2571             }
2572             break;
2573         }
2574         case TARGET_SO_SNDTIMEO:
2575             optname = SO_SNDTIMEO;
2576             goto get_timeout;
2577         case TARGET_SO_PEERCRED: {
2578             struct ucred cr;
2579             socklen_t crlen;
2580             struct target_ucred *tcr;
2581 
2582             if (get_user_u32(len, optlen)) {
2583                 return -TARGET_EFAULT;
2584             }
2585             if (len < 0) {
2586                 return -TARGET_EINVAL;
2587             }
2588 
2589             crlen = sizeof(cr);
2590             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2591                                        &cr, &crlen));
2592             if (ret < 0) {
2593                 return ret;
2594             }
2595             if (len > crlen) {
2596                 len = crlen;
2597             }
2598             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2599                 return -TARGET_EFAULT;
2600             }
2601             __put_user(cr.pid, &tcr->pid);
2602             __put_user(cr.uid, &tcr->uid);
2603             __put_user(cr.gid, &tcr->gid);
2604             unlock_user_struct(tcr, optval_addr, 1);
2605             if (put_user_u32(len, optlen)) {
2606                 return -TARGET_EFAULT;
2607             }
2608             break;
2609         }
2610         case TARGET_SO_PEERSEC: {
2611             char *name;
2612 
2613             if (get_user_u32(len, optlen)) {
2614                 return -TARGET_EFAULT;
2615             }
2616             if (len < 0) {
2617                 return -TARGET_EINVAL;
2618             }
2619             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2620             if (!name) {
2621                 return -TARGET_EFAULT;
2622             }
2623             lv = len;
2624             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2625                                        name, &lv));
2626             if (put_user_u32(lv, optlen)) {
2627                 ret = -TARGET_EFAULT;
2628             }
2629             unlock_user(name, optval_addr, lv);
2630             break;
2631         }
2632         case TARGET_SO_LINGER:
2633         {
2634             struct linger lg;
2635             socklen_t lglen;
2636             struct target_linger *tlg;
2637 
2638             if (get_user_u32(len, optlen)) {
2639                 return -TARGET_EFAULT;
2640             }
2641             if (len < 0) {
2642                 return -TARGET_EINVAL;
2643             }
2644 
2645             lglen = sizeof(lg);
2646             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2647                                        &lg, &lglen));
2648             if (ret < 0) {
2649                 return ret;
2650             }
2651             if (len > lglen) {
2652                 len = lglen;
2653             }
2654             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2655                 return -TARGET_EFAULT;
2656             }
2657             __put_user(lg.l_onoff, &tlg->l_onoff);
2658             __put_user(lg.l_linger, &tlg->l_linger);
2659             unlock_user_struct(tlg, optval_addr, 1);
2660             if (put_user_u32(len, optlen)) {
2661                 return -TARGET_EFAULT;
2662             }
2663             break;
2664         }
2665         /* Options with 'int' argument.  */
2666         case TARGET_SO_DEBUG:
2667             optname = SO_DEBUG;
2668             goto int_case;
2669         case TARGET_SO_REUSEADDR:
2670             optname = SO_REUSEADDR;
2671             goto int_case;
2672 #ifdef SO_REUSEPORT
2673         case TARGET_SO_REUSEPORT:
2674             optname = SO_REUSEPORT;
2675             goto int_case;
2676 #endif
2677         case TARGET_SO_TYPE:
2678             optname = SO_TYPE;
2679             goto int_case;
2680         case TARGET_SO_ERROR:
2681             optname = SO_ERROR;
2682             goto int_case;
2683         case TARGET_SO_DONTROUTE:
2684             optname = SO_DONTROUTE;
2685             goto int_case;
2686         case TARGET_SO_BROADCAST:
2687             optname = SO_BROADCAST;
2688             goto int_case;
2689         case TARGET_SO_SNDBUF:
2690             optname = SO_SNDBUF;
2691             goto int_case;
2692         case TARGET_SO_RCVBUF:
2693             optname = SO_RCVBUF;
2694             goto int_case;
2695         case TARGET_SO_KEEPALIVE:
2696             optname = SO_KEEPALIVE;
2697             goto int_case;
2698         case TARGET_SO_OOBINLINE:
2699             optname = SO_OOBINLINE;
2700             goto int_case;
2701         case TARGET_SO_NO_CHECK:
2702             optname = SO_NO_CHECK;
2703             goto int_case;
2704         case TARGET_SO_PRIORITY:
2705             optname = SO_PRIORITY;
2706             goto int_case;
2707 #ifdef SO_BSDCOMPAT
2708         case TARGET_SO_BSDCOMPAT:
2709             optname = SO_BSDCOMPAT;
2710             goto int_case;
2711 #endif
2712         case TARGET_SO_PASSCRED:
2713             optname = SO_PASSCRED;
2714             goto int_case;
2715         case TARGET_SO_TIMESTAMP:
2716             optname = SO_TIMESTAMP;
2717             goto int_case;
2718         case TARGET_SO_RCVLOWAT:
2719             optname = SO_RCVLOWAT;
2720             goto int_case;
2721         case TARGET_SO_ACCEPTCONN:
2722             optname = SO_ACCEPTCONN;
2723             goto int_case;
2724         case TARGET_SO_PROTOCOL:
2725             optname = SO_PROTOCOL;
2726             goto int_case;
2727         case TARGET_SO_DOMAIN:
2728             optname = SO_DOMAIN;
2729             goto int_case;
2730         default:
2731             goto int_case;
2732         }
2733         break;
2734     case SOL_TCP:
2735     case SOL_UDP:
2736         /* TCP and UDP options all take an 'int' value.  */
2737     int_case:
2738         if (get_user_u32(len, optlen))
2739             return -TARGET_EFAULT;
2740         if (len < 0)
2741             return -TARGET_EINVAL;
2742         lv = sizeof(lv);
2743         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2744         if (ret < 0)
2745             return ret;
2746         if (optname == SO_TYPE) {
2747             val = host_to_target_sock_type(val);
2748         }
2749         if (len > lv)
2750             len = lv;
2751         if (len == 4) {
2752             if (put_user_u32(val, optval_addr))
2753                 return -TARGET_EFAULT;
2754         } else {
2755             if (put_user_u8(val, optval_addr))
2756                 return -TARGET_EFAULT;
2757         }
2758         if (put_user_u32(len, optlen))
2759             return -TARGET_EFAULT;
2760         break;
2761     case SOL_IP:
2762         switch(optname) {
2763         case IP_TOS:
2764         case IP_TTL:
2765         case IP_HDRINCL:
2766         case IP_ROUTER_ALERT:
2767         case IP_RECVOPTS:
2768         case IP_RETOPTS:
2769         case IP_PKTINFO:
2770         case IP_MTU_DISCOVER:
2771         case IP_RECVERR:
2772         case IP_RECVTOS:
2773 #ifdef IP_FREEBIND
2774         case IP_FREEBIND:
2775 #endif
2776         case IP_MULTICAST_TTL:
2777         case IP_MULTICAST_LOOP:
2778             if (get_user_u32(len, optlen))
2779                 return -TARGET_EFAULT;
2780             if (len < 0)
2781                 return -TARGET_EINVAL;
2782             lv = sizeof(lv);
2783             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2784             if (ret < 0)
2785                 return ret;
2786             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2787                 len = 1;
2788                 if (put_user_u32(len, optlen)
2789                     || put_user_u8(val, optval_addr))
2790                     return -TARGET_EFAULT;
2791             } else {
2792                 if (len > sizeof(int))
2793                     len = sizeof(int);
2794                 if (put_user_u32(len, optlen)
2795                     || put_user_u32(val, optval_addr))
2796                     return -TARGET_EFAULT;
2797             }
2798             break;
2799         default:
2800             ret = -TARGET_ENOPROTOOPT;
2801             break;
2802         }
2803         break;
2804     case SOL_IPV6:
2805         switch (optname) {
2806         case IPV6_MTU_DISCOVER:
2807         case IPV6_MTU:
2808         case IPV6_V6ONLY:
2809         case IPV6_RECVPKTINFO:
2810         case IPV6_UNICAST_HOPS:
2811         case IPV6_MULTICAST_HOPS:
2812         case IPV6_MULTICAST_LOOP:
2813         case IPV6_RECVERR:
2814         case IPV6_RECVHOPLIMIT:
2815         case IPV6_2292HOPLIMIT:
2816         case IPV6_CHECKSUM:
2817         case IPV6_ADDRFORM:
2818         case IPV6_2292PKTINFO:
2819         case IPV6_RECVTCLASS:
2820         case IPV6_RECVRTHDR:
2821         case IPV6_2292RTHDR:
2822         case IPV6_RECVHOPOPTS:
2823         case IPV6_2292HOPOPTS:
2824         case IPV6_RECVDSTOPTS:
2825         case IPV6_2292DSTOPTS:
2826         case IPV6_TCLASS:
2827         case IPV6_ADDR_PREFERENCES:
2828 #ifdef IPV6_RECVPATHMTU
2829         case IPV6_RECVPATHMTU:
2830 #endif
2831 #ifdef IPV6_TRANSPARENT
2832         case IPV6_TRANSPARENT:
2833 #endif
2834 #ifdef IPV6_FREEBIND
2835         case IPV6_FREEBIND:
2836 #endif
2837 #ifdef IPV6_RECVORIGDSTADDR
2838         case IPV6_RECVORIGDSTADDR:
2839 #endif
2840             if (get_user_u32(len, optlen))
2841                 return -TARGET_EFAULT;
2842             if (len < 0)
2843                 return -TARGET_EINVAL;
2844             lv = sizeof(lv);
2845             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2846             if (ret < 0)
2847                 return ret;
2848             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2849                 len = 1;
2850                 if (put_user_u32(len, optlen)
2851                     || put_user_u8(val, optval_addr))
2852                     return -TARGET_EFAULT;
2853             } else {
2854                 if (len > sizeof(int))
2855                     len = sizeof(int);
2856                 if (put_user_u32(len, optlen)
2857                     || put_user_u32(val, optval_addr))
2858                     return -TARGET_EFAULT;
2859             }
2860             break;
2861         default:
2862             ret = -TARGET_ENOPROTOOPT;
2863             break;
2864         }
2865         break;
2866 #ifdef SOL_NETLINK
2867     case SOL_NETLINK:
2868         switch (optname) {
2869         case NETLINK_PKTINFO:
2870         case NETLINK_BROADCAST_ERROR:
2871         case NETLINK_NO_ENOBUFS:
2872 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2873         case NETLINK_LISTEN_ALL_NSID:
2874         case NETLINK_CAP_ACK:
2875 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2876 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2877         case NETLINK_EXT_ACK:
2878 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2879 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2880         case NETLINK_GET_STRICT_CHK:
2881 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2882             if (get_user_u32(len, optlen)) {
2883                 return -TARGET_EFAULT;
2884             }
2885             if (len != sizeof(val)) {
2886                 return -TARGET_EINVAL;
2887             }
2888             lv = len;
2889             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2890             if (ret < 0) {
2891                 return ret;
2892             }
2893             if (put_user_u32(lv, optlen)
2894                 || put_user_u32(val, optval_addr)) {
2895                 return -TARGET_EFAULT;
2896             }
2897             break;
2898 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2899         case NETLINK_LIST_MEMBERSHIPS:
2900         {
2901             uint32_t *results;
2902             int i;
2903             if (get_user_u32(len, optlen)) {
2904                 return -TARGET_EFAULT;
2905             }
2906             if (len < 0) {
2907                 return -TARGET_EINVAL;
2908             }
2909             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2910             if (!results && len > 0) {
2911                 return -TARGET_EFAULT;
2912             }
2913             lv = len;
2914             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2915             if (ret < 0) {
2916                 unlock_user(results, optval_addr, 0);
2917                 return ret;
2918             }
2919             /* swap host endianess to target endianess. */
2920             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2921                 results[i] = tswap32(results[i]);
2922             }
2923             if (put_user_u32(lv, optlen)) {
2924                 return -TARGET_EFAULT;
2925             }
2926             unlock_user(results, optval_addr, 0);
2927             break;
2928         }
2929 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2930         default:
2931             goto unimplemented;
2932         }
2933         break;
2934 #endif /* SOL_NETLINK */
2935     default:
2936     unimplemented:
2937         qemu_log_mask(LOG_UNIMP,
2938                       "getsockopt level=%d optname=%d not yet supported\n",
2939                       level, optname);
2940         ret = -TARGET_EOPNOTSUPP;
2941         break;
2942     }
2943     return ret;
2944 }
2945 
2946 /* Convert target low/high pair representing file offset into the host
2947  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2948  * as the kernel doesn't handle them either.
2949  */
target_to_host_low_high(abi_ulong tlow,abi_ulong thigh,unsigned long * hlow,unsigned long * hhigh)2950 static void target_to_host_low_high(abi_ulong tlow,
2951                                     abi_ulong thigh,
2952                                     unsigned long *hlow,
2953                                     unsigned long *hhigh)
2954 {
2955     uint64_t off = tlow |
2956         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2957         TARGET_LONG_BITS / 2;
2958 
2959     *hlow = off;
2960     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2961 }
2962 
lock_iovec(int type,abi_ulong target_addr,abi_ulong count,int copy)2963 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2964                                 abi_ulong count, int copy)
2965 {
2966     struct target_iovec *target_vec;
2967     struct iovec *vec;
2968     abi_ulong total_len, max_len;
2969     int i;
2970     int err = 0;
2971     bool bad_address = false;
2972 
2973     if (count == 0) {
2974         errno = 0;
2975         return NULL;
2976     }
2977     if (count > IOV_MAX) {
2978         errno = EINVAL;
2979         return NULL;
2980     }
2981 
2982     vec = g_try_new0(struct iovec, count);
2983     if (vec == NULL) {
2984         errno = ENOMEM;
2985         return NULL;
2986     }
2987 
2988     target_vec = lock_user(VERIFY_READ, target_addr,
2989                            count * sizeof(struct target_iovec), 1);
2990     if (target_vec == NULL) {
2991         err = EFAULT;
2992         goto fail2;
2993     }
2994 
2995     /* ??? If host page size > target page size, this will result in a
2996        value larger than what we can actually support.  */
2997     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2998     total_len = 0;
2999 
3000     for (i = 0; i < count; i++) {
3001         abi_ulong base = tswapal(target_vec[i].iov_base);
3002         abi_long len = tswapal(target_vec[i].iov_len);
3003 
3004         if (len < 0) {
3005             err = EINVAL;
3006             goto fail;
3007         } else if (len == 0) {
3008             /* Zero length pointer is ignored.  */
3009             vec[i].iov_base = 0;
3010         } else {
3011             vec[i].iov_base = lock_user(type, base, len, copy);
3012             /* If the first buffer pointer is bad, this is a fault.  But
3013              * subsequent bad buffers will result in a partial write; this
3014              * is realized by filling the vector with null pointers and
3015              * zero lengths. */
3016             if (!vec[i].iov_base) {
3017                 if (i == 0) {
3018                     err = EFAULT;
3019                     goto fail;
3020                 } else {
3021                     bad_address = true;
3022                 }
3023             }
3024             if (bad_address) {
3025                 len = 0;
3026             }
3027             if (len > max_len - total_len) {
3028                 len = max_len - total_len;
3029             }
3030         }
3031         vec[i].iov_len = len;
3032         total_len += len;
3033     }
3034 
3035     unlock_user(target_vec, target_addr, 0);
3036     return vec;
3037 
3038  fail:
3039     while (--i >= 0) {
3040         if (tswapal(target_vec[i].iov_len) > 0) {
3041             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3042         }
3043     }
3044     unlock_user(target_vec, target_addr, 0);
3045  fail2:
3046     g_free(vec);
3047     errno = err;
3048     return NULL;
3049 }
3050 
unlock_iovec(struct iovec * vec,abi_ulong target_addr,abi_ulong count,int copy)3051 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3052                          abi_ulong count, int copy)
3053 {
3054     struct target_iovec *target_vec;
3055     int i;
3056 
3057     target_vec = lock_user(VERIFY_READ, target_addr,
3058                            count * sizeof(struct target_iovec), 1);
3059     if (target_vec) {
3060         for (i = 0; i < count; i++) {
3061             abi_ulong base = tswapal(target_vec[i].iov_base);
3062             abi_long len = tswapal(target_vec[i].iov_len);
3063             if (len < 0) {
3064                 break;
3065             }
3066             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3067         }
3068         unlock_user(target_vec, target_addr, 0);
3069     }
3070 
3071     g_free(vec);
3072 }
3073 
target_to_host_sock_type(int * type)3074 static inline int target_to_host_sock_type(int *type)
3075 {
3076     int host_type = 0;
3077     int target_type = *type;
3078 
3079     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3080     case TARGET_SOCK_DGRAM:
3081         host_type = SOCK_DGRAM;
3082         break;
3083     case TARGET_SOCK_STREAM:
3084         host_type = SOCK_STREAM;
3085         break;
3086     default:
3087         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3088         break;
3089     }
3090     if (target_type & TARGET_SOCK_CLOEXEC) {
3091 #if defined(SOCK_CLOEXEC)
3092         host_type |= SOCK_CLOEXEC;
3093 #else
3094         return -TARGET_EINVAL;
3095 #endif
3096     }
3097     if (target_type & TARGET_SOCK_NONBLOCK) {
3098 #if defined(SOCK_NONBLOCK)
3099         host_type |= SOCK_NONBLOCK;
3100 #elif !defined(O_NONBLOCK)
3101         return -TARGET_EINVAL;
3102 #endif
3103     }
3104     *type = host_type;
3105     return 0;
3106 }
3107 
3108 /* Try to emulate socket type flags after socket creation.  */
sock_flags_fixup(int fd,int target_type)3109 static int sock_flags_fixup(int fd, int target_type)
3110 {
3111 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3112     if (target_type & TARGET_SOCK_NONBLOCK) {
3113         int flags = fcntl(fd, F_GETFL);
3114         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3115             close(fd);
3116             return -TARGET_EINVAL;
3117         }
3118     }
3119 #endif
3120     return fd;
3121 }
3122 
3123 /* do_socket() Must return target values and target errnos. */
do_socket(int domain,int type,int protocol)3124 static abi_long do_socket(int domain, int type, int protocol)
3125 {
3126     int target_type = type;
3127     int ret;
3128 
3129     ret = target_to_host_sock_type(&type);
3130     if (ret) {
3131         return ret;
3132     }
3133 
3134     if (domain == PF_NETLINK && !(
3135 #ifdef CONFIG_RTNETLINK
3136          protocol == NETLINK_ROUTE ||
3137 #endif
3138          protocol == NETLINK_KOBJECT_UEVENT ||
3139          protocol == NETLINK_AUDIT)) {
3140         return -TARGET_EPROTONOSUPPORT;
3141     }
3142 
3143     if (domain == AF_PACKET ||
3144         (domain == AF_INET && type == SOCK_PACKET)) {
3145         protocol = tswap16(protocol);
3146     }
3147 
3148     ret = get_errno(socket(domain, type, protocol));
3149     if (ret >= 0) {
3150         ret = sock_flags_fixup(ret, target_type);
3151         if (type == SOCK_PACKET) {
3152             /* Manage an obsolete case :
3153              * if socket type is SOCK_PACKET, bind by name
3154              */
3155             fd_trans_register(ret, &target_packet_trans);
3156         } else if (domain == PF_NETLINK) {
3157             switch (protocol) {
3158 #ifdef CONFIG_RTNETLINK
3159             case NETLINK_ROUTE:
3160                 fd_trans_register(ret, &target_netlink_route_trans);
3161                 break;
3162 #endif
3163             case NETLINK_KOBJECT_UEVENT:
3164                 /* nothing to do: messages are strings */
3165                 break;
3166             case NETLINK_AUDIT:
3167                 fd_trans_register(ret, &target_netlink_audit_trans);
3168                 break;
3169             default:
3170                 g_assert_not_reached();
3171             }
3172         }
3173     }
3174     return ret;
3175 }
3176 
3177 /* do_bind() Must return target values and target errnos. */
do_bind(int sockfd,abi_ulong target_addr,socklen_t addrlen)3178 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3179                         socklen_t addrlen)
3180 {
3181     void *addr;
3182     abi_long ret;
3183 
3184     if ((int)addrlen < 0) {
3185         return -TARGET_EINVAL;
3186     }
3187 
3188     addr = alloca(addrlen+1);
3189 
3190     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3191     if (ret)
3192         return ret;
3193 
3194     return get_errno(bind(sockfd, addr, addrlen));
3195 }
3196 
3197 /* do_connect() Must return target values and target errnos. */
do_connect(int sockfd,abi_ulong target_addr,socklen_t addrlen)3198 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3199                            socklen_t addrlen)
3200 {
3201     void *addr;
3202     abi_long ret;
3203 
3204     if ((int)addrlen < 0) {
3205         return -TARGET_EINVAL;
3206     }
3207 
3208     addr = alloca(addrlen+1);
3209 
3210     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3211     if (ret)
3212         return ret;
3213 
3214     return get_errno(safe_connect(sockfd, addr, addrlen));
3215 }
3216 
3217 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
do_sendrecvmsg_locked(int fd,struct target_msghdr * msgp,int flags,int send)3218 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3219                                       int flags, int send)
3220 {
3221     abi_long ret, len;
3222     struct msghdr msg;
3223     abi_ulong count;
3224     struct iovec *vec;
3225     abi_ulong target_vec;
3226 
3227     if (msgp->msg_name) {
3228         msg.msg_namelen = tswap32(msgp->msg_namelen);
3229         msg.msg_name = alloca(msg.msg_namelen+1);
3230         ret = target_to_host_sockaddr(fd, msg.msg_name,
3231                                       tswapal(msgp->msg_name),
3232                                       msg.msg_namelen);
3233         if (ret == -TARGET_EFAULT) {
3234             /* For connected sockets msg_name and msg_namelen must
3235              * be ignored, so returning EFAULT immediately is wrong.
3236              * Instead, pass a bad msg_name to the host kernel, and
3237              * let it decide whether to return EFAULT or not.
3238              */
3239             msg.msg_name = (void *)-1;
3240         } else if (ret) {
3241             goto out2;
3242         }
3243     } else {
3244         msg.msg_name = NULL;
3245         msg.msg_namelen = 0;
3246     }
3247     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3248     msg.msg_control = alloca(msg.msg_controllen);
3249     memset(msg.msg_control, 0, msg.msg_controllen);
3250 
3251     msg.msg_flags = tswap32(msgp->msg_flags);
3252 
3253     count = tswapal(msgp->msg_iovlen);
3254     target_vec = tswapal(msgp->msg_iov);
3255 
3256     if (count > IOV_MAX) {
3257         /* sendrcvmsg returns a different errno for this condition than
3258          * readv/writev, so we must catch it here before lock_iovec() does.
3259          */
3260         ret = -TARGET_EMSGSIZE;
3261         goto out2;
3262     }
3263 
3264     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3265                      target_vec, count, send);
3266     if (vec == NULL) {
3267         ret = -host_to_target_errno(errno);
3268         goto out2;
3269     }
3270     msg.msg_iovlen = count;
3271     msg.msg_iov = vec;
3272 
3273     if (send) {
3274         if (fd_trans_target_to_host_data(fd)) {
3275             void *host_msg;
3276 
3277             host_msg = g_malloc(msg.msg_iov->iov_len);
3278             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3279             ret = fd_trans_target_to_host_data(fd)(host_msg,
3280                                                    msg.msg_iov->iov_len);
3281             if (ret >= 0) {
3282                 msg.msg_iov->iov_base = host_msg;
3283                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3284             }
3285             g_free(host_msg);
3286         } else {
3287             ret = target_to_host_cmsg(&msg, msgp);
3288             if (ret == 0) {
3289                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3290             }
3291         }
3292     } else {
3293         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3294         if (!is_error(ret)) {
3295             len = ret;
3296             if (fd_trans_host_to_target_data(fd)) {
3297                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3298                                                MIN(msg.msg_iov->iov_len, len));
3299             } else {
3300                 ret = host_to_target_cmsg(msgp, &msg);
3301             }
3302             if (!is_error(ret)) {
3303                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3304                 msgp->msg_flags = tswap32(msg.msg_flags);
3305                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3306                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3307                                     msg.msg_name, msg.msg_namelen);
3308                     if (ret) {
3309                         goto out;
3310                     }
3311                 }
3312 
3313                 ret = len;
3314             }
3315         }
3316     }
3317 
3318 out:
3319     unlock_iovec(vec, target_vec, count, !send);
3320 out2:
3321     return ret;
3322 }
3323 
do_sendrecvmsg(int fd,abi_ulong target_msg,int flags,int send)3324 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3325                                int flags, int send)
3326 {
3327     abi_long ret;
3328     struct target_msghdr *msgp;
3329 
3330     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3331                           msgp,
3332                           target_msg,
3333                           send ? 1 : 0)) {
3334         return -TARGET_EFAULT;
3335     }
3336     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3337     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3338     return ret;
3339 }
3340 
3341 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3342  * so it might not have this *mmsg-specific flag either.
3343  */
3344 #ifndef MSG_WAITFORONE
3345 #define MSG_WAITFORONE 0x10000
3346 #endif
3347 
do_sendrecvmmsg(int fd,abi_ulong target_msgvec,unsigned int vlen,unsigned int flags,int send)3348 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3349                                 unsigned int vlen, unsigned int flags,
3350                                 int send)
3351 {
3352     struct target_mmsghdr *mmsgp;
3353     abi_long ret = 0;
3354     int i;
3355 
3356     if (vlen > UIO_MAXIOV) {
3357         vlen = UIO_MAXIOV;
3358     }
3359 
3360     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3361     if (!mmsgp) {
3362         return -TARGET_EFAULT;
3363     }
3364 
3365     for (i = 0; i < vlen; i++) {
3366         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3367         if (is_error(ret)) {
3368             break;
3369         }
3370         mmsgp[i].msg_len = tswap32(ret);
3371         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3372         if (flags & MSG_WAITFORONE) {
3373             flags |= MSG_DONTWAIT;
3374         }
3375     }
3376 
3377     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3378 
3379     /* Return number of datagrams sent if we sent any at all;
3380      * otherwise return the error.
3381      */
3382     if (i) {
3383         return i;
3384     }
3385     return ret;
3386 }
3387 
3388 /* do_accept4() Must return target values and target errnos. */
do_accept4(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr,int flags)3389 static abi_long do_accept4(int fd, abi_ulong target_addr,
3390                            abi_ulong target_addrlen_addr, int flags)
3391 {
3392     socklen_t addrlen, ret_addrlen;
3393     void *addr;
3394     abi_long ret;
3395     int host_flags;
3396 
3397     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3398 
3399     if (target_addr == 0) {
3400         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3401     }
3402 
3403     /* linux returns EFAULT if addrlen pointer is invalid */
3404     if (get_user_u32(addrlen, target_addrlen_addr))
3405         return -TARGET_EFAULT;
3406 
3407     if ((int)addrlen < 0) {
3408         return -TARGET_EINVAL;
3409     }
3410 
3411     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3412         return -TARGET_EFAULT;
3413     }
3414 
3415     addr = alloca(addrlen);
3416 
3417     ret_addrlen = addrlen;
3418     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3419     if (!is_error(ret)) {
3420         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3421         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3422             ret = -TARGET_EFAULT;
3423         }
3424     }
3425     return ret;
3426 }
3427 
3428 /* do_getpeername() Must return target values and target errnos. */
do_getpeername(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3429 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3430                                abi_ulong target_addrlen_addr)
3431 {
3432     socklen_t addrlen, ret_addrlen;
3433     void *addr;
3434     abi_long ret;
3435 
3436     if (get_user_u32(addrlen, target_addrlen_addr))
3437         return -TARGET_EFAULT;
3438 
3439     if ((int)addrlen < 0) {
3440         return -TARGET_EINVAL;
3441     }
3442 
3443     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3444         return -TARGET_EFAULT;
3445     }
3446 
3447     addr = alloca(addrlen);
3448 
3449     ret_addrlen = addrlen;
3450     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3451     if (!is_error(ret)) {
3452         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3453         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3454             ret = -TARGET_EFAULT;
3455         }
3456     }
3457     return ret;
3458 }
3459 
3460 /* do_getsockname() Must return target values and target errnos. */
do_getsockname(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3461 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3462                                abi_ulong target_addrlen_addr)
3463 {
3464     socklen_t addrlen, ret_addrlen;
3465     void *addr;
3466     abi_long ret;
3467 
3468     if (get_user_u32(addrlen, target_addrlen_addr))
3469         return -TARGET_EFAULT;
3470 
3471     if ((int)addrlen < 0) {
3472         return -TARGET_EINVAL;
3473     }
3474 
3475     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3476         return -TARGET_EFAULT;
3477     }
3478 
3479     addr = alloca(addrlen);
3480 
3481     ret_addrlen = addrlen;
3482     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3483     if (!is_error(ret)) {
3484         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3485         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3486             ret = -TARGET_EFAULT;
3487         }
3488     }
3489     return ret;
3490 }
3491 
3492 /* do_socketpair() Must return target values and target errnos. */
do_socketpair(int domain,int type,int protocol,abi_ulong target_tab_addr)3493 static abi_long do_socketpair(int domain, int type, int protocol,
3494                               abi_ulong target_tab_addr)
3495 {
3496     int tab[2];
3497     abi_long ret;
3498 
3499     target_to_host_sock_type(&type);
3500 
3501     ret = get_errno(socketpair(domain, type, protocol, tab));
3502     if (!is_error(ret)) {
3503         if (put_user_s32(tab[0], target_tab_addr)
3504             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3505             ret = -TARGET_EFAULT;
3506     }
3507     return ret;
3508 }
3509 
3510 /* do_sendto() Must return target values and target errnos. */
do_sendto(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,socklen_t addrlen)3511 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3512                           abi_ulong target_addr, socklen_t addrlen)
3513 {
3514     void *addr;
3515     void *host_msg;
3516     void *copy_msg = NULL;
3517     abi_long ret;
3518 
3519     if ((int)addrlen < 0) {
3520         return -TARGET_EINVAL;
3521     }
3522 
3523     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3524     if (!host_msg)
3525         return -TARGET_EFAULT;
3526     if (fd_trans_target_to_host_data(fd)) {
3527         copy_msg = host_msg;
3528         host_msg = g_malloc(len);
3529         memcpy(host_msg, copy_msg, len);
3530         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3531         if (ret < 0) {
3532             goto fail;
3533         }
3534     }
3535     if (target_addr) {
3536         addr = alloca(addrlen+1);
3537         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3538         if (ret) {
3539             goto fail;
3540         }
3541         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3542     } else {
3543         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3544     }
3545 fail:
3546     if (copy_msg) {
3547         g_free(host_msg);
3548         host_msg = copy_msg;
3549     }
3550     unlock_user(host_msg, msg, 0);
3551     return ret;
3552 }
3553 
3554 /* do_recvfrom() Must return target values and target errnos. */
do_recvfrom(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,abi_ulong target_addrlen)3555 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3556                             abi_ulong target_addr,
3557                             abi_ulong target_addrlen)
3558 {
3559     socklen_t addrlen, ret_addrlen;
3560     void *addr;
3561     void *host_msg;
3562     abi_long ret;
3563 
3564     if (!msg) {
3565         host_msg = NULL;
3566     } else {
3567         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3568         if (!host_msg) {
3569             return -TARGET_EFAULT;
3570         }
3571     }
3572     if (target_addr) {
3573         if (get_user_u32(addrlen, target_addrlen)) {
3574             ret = -TARGET_EFAULT;
3575             goto fail;
3576         }
3577         if ((int)addrlen < 0) {
3578             ret = -TARGET_EINVAL;
3579             goto fail;
3580         }
3581         addr = alloca(addrlen);
3582         ret_addrlen = addrlen;
3583         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3584                                       addr, &ret_addrlen));
3585     } else {
3586         addr = NULL; /* To keep compiler quiet.  */
3587         addrlen = 0; /* To keep compiler quiet.  */
3588         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3589     }
3590     if (!is_error(ret)) {
3591         if (fd_trans_host_to_target_data(fd)) {
3592             abi_long trans;
3593             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3594             if (is_error(trans)) {
3595                 ret = trans;
3596                 goto fail;
3597             }
3598         }
3599         if (target_addr) {
3600             host_to_target_sockaddr(target_addr, addr,
3601                                     MIN(addrlen, ret_addrlen));
3602             if (put_user_u32(ret_addrlen, target_addrlen)) {
3603                 ret = -TARGET_EFAULT;
3604                 goto fail;
3605             }
3606         }
3607         unlock_user(host_msg, msg, len);
3608     } else {
3609 fail:
3610         unlock_user(host_msg, msg, 0);
3611     }
3612     return ret;
3613 }
3614 
3615 #ifdef TARGET_NR_socketcall
3616 /* do_socketcall() must return target values and target errnos. */
do_socketcall(int num,abi_ulong vptr)3617 static abi_long do_socketcall(int num, abi_ulong vptr)
3618 {
3619     static const unsigned nargs[] = { /* number of arguments per operation */
3620         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3621         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3622         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3623         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3624         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3625         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3626         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3627         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3628         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3629         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3630         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3631         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3632         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3633         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3634         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3635         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3636         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3637         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3638         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3639         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3640     };
3641     abi_long a[6]; /* max 6 args */
3642     unsigned i;
3643 
3644     /* check the range of the first argument num */
3645     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3646     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3647         return -TARGET_EINVAL;
3648     }
3649     /* ensure we have space for args */
3650     if (nargs[num] > ARRAY_SIZE(a)) {
3651         return -TARGET_EINVAL;
3652     }
3653     /* collect the arguments in a[] according to nargs[] */
3654     for (i = 0; i < nargs[num]; ++i) {
3655         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3656             return -TARGET_EFAULT;
3657         }
3658     }
3659     /* now when we have the args, invoke the appropriate underlying function */
3660     switch (num) {
3661     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3662         return do_socket(a[0], a[1], a[2]);
3663     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3664         return do_bind(a[0], a[1], a[2]);
3665     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3666         return do_connect(a[0], a[1], a[2]);
3667     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3668         return get_errno(listen(a[0], a[1]));
3669     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3670         return do_accept4(a[0], a[1], a[2], 0);
3671     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3672         return do_getsockname(a[0], a[1], a[2]);
3673     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3674         return do_getpeername(a[0], a[1], a[2]);
3675     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3676         return do_socketpair(a[0], a[1], a[2], a[3]);
3677     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3678         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3679     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3680         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3681     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3682         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3683     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3684         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3685     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3686         return get_errno(shutdown(a[0], a[1]));
3687     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3688         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3689     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3690         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3691     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3692         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3693     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3694         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3695     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3696         return do_accept4(a[0], a[1], a[2], a[3]);
3697     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3698         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3699     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3700         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3701     default:
3702         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3703         return -TARGET_EINVAL;
3704     }
3705 }
3706 #endif
3707 
3708 #define N_SHM_REGIONS	32
3709 
3710 static struct shm_region {
3711     abi_ulong start;
3712     abi_ulong size;
3713     bool in_use;
3714 } shm_regions[N_SHM_REGIONS];
3715 
3716 #ifndef TARGET_SEMID64_DS
3717 /* asm-generic version of this struct */
3718 struct target_semid64_ds
3719 {
3720   struct target_ipc_perm sem_perm;
3721   abi_ulong sem_otime;
3722 #if TARGET_ABI_BITS == 32
3723   abi_ulong __unused1;
3724 #endif
3725   abi_ulong sem_ctime;
3726 #if TARGET_ABI_BITS == 32
3727   abi_ulong __unused2;
3728 #endif
3729   abi_ulong sem_nsems;
3730   abi_ulong __unused3;
3731   abi_ulong __unused4;
3732 };
3733 #endif
3734 
target_to_host_ipc_perm(struct ipc_perm * host_ip,abi_ulong target_addr)3735 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3736                                                abi_ulong target_addr)
3737 {
3738     struct target_ipc_perm *target_ip;
3739     struct target_semid64_ds *target_sd;
3740 
3741     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3742         return -TARGET_EFAULT;
3743     target_ip = &(target_sd->sem_perm);
3744     host_ip->__key = tswap32(target_ip->__key);
3745     host_ip->uid = tswap32(target_ip->uid);
3746     host_ip->gid = tswap32(target_ip->gid);
3747     host_ip->cuid = tswap32(target_ip->cuid);
3748     host_ip->cgid = tswap32(target_ip->cgid);
3749 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3750     host_ip->mode = tswap32(target_ip->mode);
3751 #else
3752     host_ip->mode = tswap16(target_ip->mode);
3753 #endif
3754 #if defined(TARGET_PPC)
3755     host_ip->__seq = tswap32(target_ip->__seq);
3756 #else
3757     host_ip->__seq = tswap16(target_ip->__seq);
3758 #endif
3759     unlock_user_struct(target_sd, target_addr, 0);
3760     return 0;
3761 }
3762 
host_to_target_ipc_perm(abi_ulong target_addr,struct ipc_perm * host_ip)3763 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3764                                                struct ipc_perm *host_ip)
3765 {
3766     struct target_ipc_perm *target_ip;
3767     struct target_semid64_ds *target_sd;
3768 
3769     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3770         return -TARGET_EFAULT;
3771     target_ip = &(target_sd->sem_perm);
3772     target_ip->__key = tswap32(host_ip->__key);
3773     target_ip->uid = tswap32(host_ip->uid);
3774     target_ip->gid = tswap32(host_ip->gid);
3775     target_ip->cuid = tswap32(host_ip->cuid);
3776     target_ip->cgid = tswap32(host_ip->cgid);
3777 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3778     target_ip->mode = tswap32(host_ip->mode);
3779 #else
3780     target_ip->mode = tswap16(host_ip->mode);
3781 #endif
3782 #if defined(TARGET_PPC)
3783     target_ip->__seq = tswap32(host_ip->__seq);
3784 #else
3785     target_ip->__seq = tswap16(host_ip->__seq);
3786 #endif
3787     unlock_user_struct(target_sd, target_addr, 1);
3788     return 0;
3789 }
3790 
target_to_host_semid_ds(struct semid_ds * host_sd,abi_ulong target_addr)3791 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3792                                                abi_ulong target_addr)
3793 {
3794     struct target_semid64_ds *target_sd;
3795 
3796     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3797         return -TARGET_EFAULT;
3798     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3799         return -TARGET_EFAULT;
3800     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3801     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3802     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3803     unlock_user_struct(target_sd, target_addr, 0);
3804     return 0;
3805 }
3806 
host_to_target_semid_ds(abi_ulong target_addr,struct semid_ds * host_sd)3807 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3808                                                struct semid_ds *host_sd)
3809 {
3810     struct target_semid64_ds *target_sd;
3811 
3812     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3813         return -TARGET_EFAULT;
3814     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3815         return -TARGET_EFAULT;
3816     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3817     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3818     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3819     unlock_user_struct(target_sd, target_addr, 1);
3820     return 0;
3821 }
3822 
3823 struct target_seminfo {
3824     int semmap;
3825     int semmni;
3826     int semmns;
3827     int semmnu;
3828     int semmsl;
3829     int semopm;
3830     int semume;
3831     int semusz;
3832     int semvmx;
3833     int semaem;
3834 };
3835 
host_to_target_seminfo(abi_ulong target_addr,struct seminfo * host_seminfo)3836 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3837                                               struct seminfo *host_seminfo)
3838 {
3839     struct target_seminfo *target_seminfo;
3840     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3841         return -TARGET_EFAULT;
3842     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3843     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3844     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3845     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3846     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3847     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3848     __put_user(host_seminfo->semume, &target_seminfo->semume);
3849     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3850     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3851     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3852     unlock_user_struct(target_seminfo, target_addr, 1);
3853     return 0;
3854 }
3855 
3856 union semun {
3857 	int val;
3858 	struct semid_ds *buf;
3859 	unsigned short *array;
3860 	struct seminfo *__buf;
3861 };
3862 
3863 union target_semun {
3864 	int val;
3865 	abi_ulong buf;
3866 	abi_ulong array;
3867 	abi_ulong __buf;
3868 };
3869 
target_to_host_semarray(int semid,unsigned short ** host_array,abi_ulong target_addr)3870 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3871                                                abi_ulong target_addr)
3872 {
3873     int nsems;
3874     unsigned short *array;
3875     union semun semun;
3876     struct semid_ds semid_ds;
3877     int i, ret;
3878 
3879     semun.buf = &semid_ds;
3880 
3881     ret = semctl(semid, 0, IPC_STAT, semun);
3882     if (ret == -1)
3883         return get_errno(ret);
3884 
3885     nsems = semid_ds.sem_nsems;
3886 
3887     *host_array = g_try_new(unsigned short, nsems);
3888     if (!*host_array) {
3889         return -TARGET_ENOMEM;
3890     }
3891     array = lock_user(VERIFY_READ, target_addr,
3892                       nsems*sizeof(unsigned short), 1);
3893     if (!array) {
3894         g_free(*host_array);
3895         return -TARGET_EFAULT;
3896     }
3897 
3898     for(i=0; i<nsems; i++) {
3899         __get_user((*host_array)[i], &array[i]);
3900     }
3901     unlock_user(array, target_addr, 0);
3902 
3903     return 0;
3904 }
3905 
host_to_target_semarray(int semid,abi_ulong target_addr,unsigned short ** host_array)3906 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3907                                                unsigned short **host_array)
3908 {
3909     int nsems;
3910     unsigned short *array;
3911     union semun semun;
3912     struct semid_ds semid_ds;
3913     int i, ret;
3914 
3915     semun.buf = &semid_ds;
3916 
3917     ret = semctl(semid, 0, IPC_STAT, semun);
3918     if (ret == -1)
3919         return get_errno(ret);
3920 
3921     nsems = semid_ds.sem_nsems;
3922 
3923     array = lock_user(VERIFY_WRITE, target_addr,
3924                       nsems*sizeof(unsigned short), 0);
3925     if (!array)
3926         return -TARGET_EFAULT;
3927 
3928     for(i=0; i<nsems; i++) {
3929         __put_user((*host_array)[i], &array[i]);
3930     }
3931     g_free(*host_array);
3932     unlock_user(array, target_addr, 1);
3933 
3934     return 0;
3935 }
3936 
do_semctl(int semid,int semnum,int cmd,abi_ulong target_arg)3937 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3938                                  abi_ulong target_arg)
3939 {
3940     union target_semun target_su = { .buf = target_arg };
3941     union semun arg;
3942     struct semid_ds dsarg;
3943     unsigned short *array = NULL;
3944     struct seminfo seminfo;
3945     abi_long ret = -TARGET_EINVAL;
3946     abi_long err;
3947     cmd &= 0xff;
3948 
3949     switch( cmd ) {
3950 	case GETVAL:
3951 	case SETVAL:
3952             /* In 64 bit cross-endian situations, we will erroneously pick up
3953              * the wrong half of the union for the "val" element.  To rectify
3954              * this, the entire 8-byte structure is byteswapped, followed by
3955 	     * a swap of the 4 byte val field. In other cases, the data is
3956 	     * already in proper host byte order. */
3957 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3958 		target_su.buf = tswapal(target_su.buf);
3959 		arg.val = tswap32(target_su.val);
3960 	    } else {
3961 		arg.val = target_su.val;
3962 	    }
3963             ret = get_errno(semctl(semid, semnum, cmd, arg));
3964             break;
3965 	case GETALL:
3966 	case SETALL:
3967             err = target_to_host_semarray(semid, &array, target_su.array);
3968             if (err)
3969                 return err;
3970             arg.array = array;
3971             ret = get_errno(semctl(semid, semnum, cmd, arg));
3972             err = host_to_target_semarray(semid, target_su.array, &array);
3973             if (err)
3974                 return err;
3975             break;
3976 	case IPC_STAT:
3977 	case IPC_SET:
3978 	case SEM_STAT:
3979             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3980             if (err)
3981                 return err;
3982             arg.buf = &dsarg;
3983             ret = get_errno(semctl(semid, semnum, cmd, arg));
3984             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3985             if (err)
3986                 return err;
3987             break;
3988 	case IPC_INFO:
3989 	case SEM_INFO:
3990             arg.__buf = &seminfo;
3991             ret = get_errno(semctl(semid, semnum, cmd, arg));
3992             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3993             if (err)
3994                 return err;
3995             break;
3996 	case IPC_RMID:
3997 	case GETPID:
3998 	case GETNCNT:
3999 	case GETZCNT:
4000             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4001             break;
4002     }
4003 
4004     return ret;
4005 }
4006 
4007 struct target_sembuf {
4008     unsigned short sem_num;
4009     short sem_op;
4010     short sem_flg;
4011 };
4012 
target_to_host_sembuf(struct sembuf * host_sembuf,abi_ulong target_addr,unsigned nsops)4013 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4014                                              abi_ulong target_addr,
4015                                              unsigned nsops)
4016 {
4017     struct target_sembuf *target_sembuf;
4018     int i;
4019 
4020     target_sembuf = lock_user(VERIFY_READ, target_addr,
4021                               nsops*sizeof(struct target_sembuf), 1);
4022     if (!target_sembuf)
4023         return -TARGET_EFAULT;
4024 
4025     for(i=0; i<nsops; i++) {
4026         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4027         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4028         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4029     }
4030 
4031     unlock_user(target_sembuf, target_addr, 0);
4032 
4033     return 0;
4034 }
4035 
4036 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4037     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4038 
4039 /*
4040  * This macro is required to handle the s390 variants, which passes the
4041  * arguments in a different order than default.
4042  */
4043 #ifdef __s390x__
4044 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4045   (__nsops), (__timeout), (__sops)
4046 #else
4047 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4048   (__nsops), 0, (__sops), (__timeout)
4049 #endif
4050 
do_semtimedop(int semid,abi_long ptr,unsigned nsops,abi_long timeout,bool time64)4051 static inline abi_long do_semtimedop(int semid,
4052                                      abi_long ptr,
4053                                      unsigned nsops,
4054                                      abi_long timeout, bool time64)
4055 {
4056     struct sembuf *sops;
4057     struct timespec ts, *pts = NULL;
4058     abi_long ret;
4059 
4060     if (timeout) {
4061         pts = &ts;
4062         if (time64) {
4063             if (target_to_host_timespec64(pts, timeout)) {
4064                 return -TARGET_EFAULT;
4065             }
4066         } else {
4067             if (target_to_host_timespec(pts, timeout)) {
4068                 return -TARGET_EFAULT;
4069             }
4070         }
4071     }
4072 
4073     if (nsops > TARGET_SEMOPM) {
4074         return -TARGET_E2BIG;
4075     }
4076 
4077     sops = g_new(struct sembuf, nsops);
4078 
4079     if (target_to_host_sembuf(sops, ptr, nsops)) {
4080         g_free(sops);
4081         return -TARGET_EFAULT;
4082     }
4083 
4084     ret = -TARGET_ENOSYS;
4085 #ifdef __NR_semtimedop
4086     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4087 #endif
4088 #ifdef __NR_ipc
4089     if (ret == -TARGET_ENOSYS) {
4090         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4091                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4092     }
4093 #endif
4094     g_free(sops);
4095     return ret;
4096 }
4097 #endif
4098 
4099 struct target_msqid_ds
4100 {
4101     struct target_ipc_perm msg_perm;
4102     abi_ulong msg_stime;
4103 #if TARGET_ABI_BITS == 32
4104     abi_ulong __unused1;
4105 #endif
4106     abi_ulong msg_rtime;
4107 #if TARGET_ABI_BITS == 32
4108     abi_ulong __unused2;
4109 #endif
4110     abi_ulong msg_ctime;
4111 #if TARGET_ABI_BITS == 32
4112     abi_ulong __unused3;
4113 #endif
4114     abi_ulong __msg_cbytes;
4115     abi_ulong msg_qnum;
4116     abi_ulong msg_qbytes;
4117     abi_ulong msg_lspid;
4118     abi_ulong msg_lrpid;
4119     abi_ulong __unused4;
4120     abi_ulong __unused5;
4121 };
4122 
target_to_host_msqid_ds(struct msqid_ds * host_md,abi_ulong target_addr)4123 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4124                                                abi_ulong target_addr)
4125 {
4126     struct target_msqid_ds *target_md;
4127 
4128     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4129         return -TARGET_EFAULT;
4130     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4131         return -TARGET_EFAULT;
4132     host_md->msg_stime = tswapal(target_md->msg_stime);
4133     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4134     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4135     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4136     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4137     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4138     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4139     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4140     unlock_user_struct(target_md, target_addr, 0);
4141     return 0;
4142 }
4143 
host_to_target_msqid_ds(abi_ulong target_addr,struct msqid_ds * host_md)4144 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4145                                                struct msqid_ds *host_md)
4146 {
4147     struct target_msqid_ds *target_md;
4148 
4149     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4150         return -TARGET_EFAULT;
4151     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4152         return -TARGET_EFAULT;
4153     target_md->msg_stime = tswapal(host_md->msg_stime);
4154     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4155     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4156     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4157     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4158     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4159     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4160     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4161     unlock_user_struct(target_md, target_addr, 1);
4162     return 0;
4163 }
4164 
4165 struct target_msginfo {
4166     int msgpool;
4167     int msgmap;
4168     int msgmax;
4169     int msgmnb;
4170     int msgmni;
4171     int msgssz;
4172     int msgtql;
4173     unsigned short int msgseg;
4174 };
4175 
host_to_target_msginfo(abi_ulong target_addr,struct msginfo * host_msginfo)4176 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4177                                               struct msginfo *host_msginfo)
4178 {
4179     struct target_msginfo *target_msginfo;
4180     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4181         return -TARGET_EFAULT;
4182     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4183     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4184     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4185     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4186     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4187     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4188     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4189     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4190     unlock_user_struct(target_msginfo, target_addr, 1);
4191     return 0;
4192 }
4193 
do_msgctl(int msgid,int cmd,abi_long ptr)4194 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4195 {
4196     struct msqid_ds dsarg;
4197     struct msginfo msginfo;
4198     abi_long ret = -TARGET_EINVAL;
4199 
4200     cmd &= 0xff;
4201 
4202     switch (cmd) {
4203     case IPC_STAT:
4204     case IPC_SET:
4205     case MSG_STAT:
4206         if (target_to_host_msqid_ds(&dsarg,ptr))
4207             return -TARGET_EFAULT;
4208         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4209         if (host_to_target_msqid_ds(ptr,&dsarg))
4210             return -TARGET_EFAULT;
4211         break;
4212     case IPC_RMID:
4213         ret = get_errno(msgctl(msgid, cmd, NULL));
4214         break;
4215     case IPC_INFO:
4216     case MSG_INFO:
4217         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4218         if (host_to_target_msginfo(ptr, &msginfo))
4219             return -TARGET_EFAULT;
4220         break;
4221     }
4222 
4223     return ret;
4224 }
4225 
4226 struct target_msgbuf {
4227     abi_long mtype;
4228     char	mtext[1];
4229 };
4230 
do_msgsnd(int msqid,abi_long msgp,ssize_t msgsz,int msgflg)4231 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4232                                  ssize_t msgsz, int msgflg)
4233 {
4234     struct target_msgbuf *target_mb;
4235     struct msgbuf *host_mb;
4236     abi_long ret = 0;
4237 
4238     if (msgsz < 0) {
4239         return -TARGET_EINVAL;
4240     }
4241 
4242     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4243         return -TARGET_EFAULT;
4244     host_mb = g_try_malloc(msgsz + sizeof(long));
4245     if (!host_mb) {
4246         unlock_user_struct(target_mb, msgp, 0);
4247         return -TARGET_ENOMEM;
4248     }
4249     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4250     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4251     ret = -TARGET_ENOSYS;
4252 #ifdef __NR_msgsnd
4253     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4254 #endif
4255 #ifdef __NR_ipc
4256     if (ret == -TARGET_ENOSYS) {
4257 #ifdef __s390x__
4258         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4259                                  host_mb));
4260 #else
4261         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4262                                  host_mb, 0));
4263 #endif
4264     }
4265 #endif
4266     g_free(host_mb);
4267     unlock_user_struct(target_mb, msgp, 0);
4268 
4269     return ret;
4270 }
4271 
4272 #ifdef __NR_ipc
4273 #if defined(__sparc__)
4274 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4275 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4276 #elif defined(__s390x__)
4277 /* The s390 sys_ipc variant has only five parameters.  */
4278 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4279     ((long int[]){(long int)__msgp, __msgtyp})
4280 #else
4281 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4282     ((long int[]){(long int)__msgp, __msgtyp}), 0
4283 #endif
4284 #endif
4285 
do_msgrcv(int msqid,abi_long msgp,ssize_t msgsz,abi_long msgtyp,int msgflg)4286 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4287                                  ssize_t msgsz, abi_long msgtyp,
4288                                  int msgflg)
4289 {
4290     struct target_msgbuf *target_mb;
4291     char *target_mtext;
4292     struct msgbuf *host_mb;
4293     abi_long ret = 0;
4294 
4295     if (msgsz < 0) {
4296         return -TARGET_EINVAL;
4297     }
4298 
4299     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4300         return -TARGET_EFAULT;
4301 
4302     host_mb = g_try_malloc(msgsz + sizeof(long));
4303     if (!host_mb) {
4304         ret = -TARGET_ENOMEM;
4305         goto end;
4306     }
4307     ret = -TARGET_ENOSYS;
4308 #ifdef __NR_msgrcv
4309     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4310 #endif
4311 #ifdef __NR_ipc
4312     if (ret == -TARGET_ENOSYS) {
4313         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4314                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4315     }
4316 #endif
4317 
4318     if (ret > 0) {
4319         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4320         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4321         if (!target_mtext) {
4322             ret = -TARGET_EFAULT;
4323             goto end;
4324         }
4325         memcpy(target_mb->mtext, host_mb->mtext, ret);
4326         unlock_user(target_mtext, target_mtext_addr, ret);
4327     }
4328 
4329     target_mb->mtype = tswapal(host_mb->mtype);
4330 
4331 end:
4332     if (target_mb)
4333         unlock_user_struct(target_mb, msgp, 1);
4334     g_free(host_mb);
4335     return ret;
4336 }
4337 
target_to_host_shmid_ds(struct shmid_ds * host_sd,abi_ulong target_addr)4338 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4339                                                abi_ulong target_addr)
4340 {
4341     struct target_shmid_ds *target_sd;
4342 
4343     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4344         return -TARGET_EFAULT;
4345     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4346         return -TARGET_EFAULT;
4347     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4348     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4349     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4350     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4351     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4352     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4353     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4354     unlock_user_struct(target_sd, target_addr, 0);
4355     return 0;
4356 }
4357 
host_to_target_shmid_ds(abi_ulong target_addr,struct shmid_ds * host_sd)4358 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4359                                                struct shmid_ds *host_sd)
4360 {
4361     struct target_shmid_ds *target_sd;
4362 
4363     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4364         return -TARGET_EFAULT;
4365     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4366         return -TARGET_EFAULT;
4367     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4368     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4369     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4370     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4371     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4372     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4373     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4374     unlock_user_struct(target_sd, target_addr, 1);
4375     return 0;
4376 }
4377 
4378 struct  target_shminfo {
4379     abi_ulong shmmax;
4380     abi_ulong shmmin;
4381     abi_ulong shmmni;
4382     abi_ulong shmseg;
4383     abi_ulong shmall;
4384 };
4385 
host_to_target_shminfo(abi_ulong target_addr,struct shminfo * host_shminfo)4386 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4387                                               struct shminfo *host_shminfo)
4388 {
4389     struct target_shminfo *target_shminfo;
4390     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4391         return -TARGET_EFAULT;
4392     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4393     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4394     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4395     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4396     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4397     unlock_user_struct(target_shminfo, target_addr, 1);
4398     return 0;
4399 }
4400 
4401 struct target_shm_info {
4402     int used_ids;
4403     abi_ulong shm_tot;
4404     abi_ulong shm_rss;
4405     abi_ulong shm_swp;
4406     abi_ulong swap_attempts;
4407     abi_ulong swap_successes;
4408 };
4409 
host_to_target_shm_info(abi_ulong target_addr,struct shm_info * host_shm_info)4410 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4411                                                struct shm_info *host_shm_info)
4412 {
4413     struct target_shm_info *target_shm_info;
4414     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4415         return -TARGET_EFAULT;
4416     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4417     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4418     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4419     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4420     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4421     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4422     unlock_user_struct(target_shm_info, target_addr, 1);
4423     return 0;
4424 }
4425 
do_shmctl(int shmid,int cmd,abi_long buf)4426 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4427 {
4428     struct shmid_ds dsarg;
4429     struct shminfo shminfo;
4430     struct shm_info shm_info;
4431     abi_long ret = -TARGET_EINVAL;
4432 
4433     cmd &= 0xff;
4434 
4435     switch(cmd) {
4436     case IPC_STAT:
4437     case IPC_SET:
4438     case SHM_STAT:
4439         if (target_to_host_shmid_ds(&dsarg, buf))
4440             return -TARGET_EFAULT;
4441         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4442         if (host_to_target_shmid_ds(buf, &dsarg))
4443             return -TARGET_EFAULT;
4444         break;
4445     case IPC_INFO:
4446         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4447         if (host_to_target_shminfo(buf, &shminfo))
4448             return -TARGET_EFAULT;
4449         break;
4450     case SHM_INFO:
4451         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4452         if (host_to_target_shm_info(buf, &shm_info))
4453             return -TARGET_EFAULT;
4454         break;
4455     case IPC_RMID:
4456     case SHM_LOCK:
4457     case SHM_UNLOCK:
4458         ret = get_errno(shmctl(shmid, cmd, NULL));
4459         break;
4460     }
4461 
4462     return ret;
4463 }
4464 
4465 #ifndef TARGET_FORCE_SHMLBA
4466 /* For most architectures, SHMLBA is the same as the page size;
4467  * some architectures have larger values, in which case they should
4468  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4469  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4470  * and defining its own value for SHMLBA.
4471  *
4472  * The kernel also permits SHMLBA to be set by the architecture to a
4473  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4474  * this means that addresses are rounded to the large size if
4475  * SHM_RND is set but addresses not aligned to that size are not rejected
4476  * as long as they are at least page-aligned. Since the only architecture
4477  * which uses this is ia64 this code doesn't provide for that oddity.
4478  */
target_shmlba(CPUArchState * cpu_env)4479 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4480 {
4481     return TARGET_PAGE_SIZE;
4482 }
4483 #endif
4484 
do_shmat(CPUArchState * cpu_env,int shmid,abi_ulong shmaddr,int shmflg)4485 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4486                                  int shmid, abi_ulong shmaddr, int shmflg)
4487 {
4488     CPUState *cpu = env_cpu(cpu_env);
4489     abi_long raddr;
4490     void *host_raddr;
4491     struct shmid_ds shm_info;
4492     int i,ret;
4493     abi_ulong shmlba;
4494 
4495     /* shmat pointers are always untagged */
4496 
4497     /* find out the length of the shared memory segment */
4498     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4499     if (is_error(ret)) {
4500         /* can't get length, bail out */
4501         return ret;
4502     }
4503 
4504     shmlba = target_shmlba(cpu_env);
4505 
4506     if (shmaddr & (shmlba - 1)) {
4507         if (shmflg & SHM_RND) {
4508             shmaddr &= ~(shmlba - 1);
4509         } else {
4510             return -TARGET_EINVAL;
4511         }
4512     }
4513     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4514         return -TARGET_EINVAL;
4515     }
4516 
4517     mmap_lock();
4518 
4519     /*
4520      * We're mapping shared memory, so ensure we generate code for parallel
4521      * execution and flush old translations.  This will work up to the level
4522      * supported by the host -- anything that requires EXCP_ATOMIC will not
4523      * be atomic with respect to an external process.
4524      */
4525     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4526         cpu->tcg_cflags |= CF_PARALLEL;
4527         tb_flush(cpu);
4528     }
4529 
4530     if (shmaddr)
4531         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4532     else {
4533         abi_ulong mmap_start;
4534 
4535         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4536         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4537 
4538         if (mmap_start == -1) {
4539             errno = ENOMEM;
4540             host_raddr = (void *)-1;
4541         } else
4542             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4543                                shmflg | SHM_REMAP);
4544     }
4545 
4546     if (host_raddr == (void *)-1) {
4547         mmap_unlock();
4548         return get_errno((long)host_raddr);
4549     }
4550     raddr=h2g((unsigned long)host_raddr);
4551 
4552     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4553                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4554                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4555 
4556     for (i = 0; i < N_SHM_REGIONS; i++) {
4557         if (!shm_regions[i].in_use) {
4558             shm_regions[i].in_use = true;
4559             shm_regions[i].start = raddr;
4560             shm_regions[i].size = shm_info.shm_segsz;
4561             break;
4562         }
4563     }
4564 
4565     mmap_unlock();
4566     return raddr;
4567 
4568 }
4569 
do_shmdt(abi_ulong shmaddr)4570 static inline abi_long do_shmdt(abi_ulong shmaddr)
4571 {
4572     int i;
4573     abi_long rv;
4574 
4575     /* shmdt pointers are always untagged */
4576 
4577     mmap_lock();
4578 
4579     for (i = 0; i < N_SHM_REGIONS; ++i) {
4580         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4581             shm_regions[i].in_use = false;
4582             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4583             break;
4584         }
4585     }
4586     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4587 
4588     mmap_unlock();
4589 
4590     return rv;
4591 }
4592 
4593 #ifdef TARGET_NR_ipc
4594 /* ??? This only works with linear mappings.  */
4595 /* do_ipc() must return target values and target errnos. */
do_ipc(CPUArchState * cpu_env,unsigned int call,abi_long first,abi_long second,abi_long third,abi_long ptr,abi_long fifth)4596 static abi_long do_ipc(CPUArchState *cpu_env,
4597                        unsigned int call, abi_long first,
4598                        abi_long second, abi_long third,
4599                        abi_long ptr, abi_long fifth)
4600 {
4601     int version;
4602     abi_long ret = 0;
4603 
4604     version = call >> 16;
4605     call &= 0xffff;
4606 
4607     switch (call) {
4608     case IPCOP_semop:
4609         ret = do_semtimedop(first, ptr, second, 0, false);
4610         break;
4611     case IPCOP_semtimedop:
4612     /*
4613      * The s390 sys_ipc variant has only five parameters instead of six
4614      * (as for default variant) and the only difference is the handling of
4615      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4616      * to a struct timespec where the generic variant uses fifth parameter.
4617      */
4618 #if defined(TARGET_S390X)
4619         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4620 #else
4621         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4622 #endif
4623         break;
4624 
4625     case IPCOP_semget:
4626         ret = get_errno(semget(first, second, third));
4627         break;
4628 
4629     case IPCOP_semctl: {
4630         /* The semun argument to semctl is passed by value, so dereference the
4631          * ptr argument. */
4632         abi_ulong atptr;
4633         get_user_ual(atptr, ptr);
4634         ret = do_semctl(first, second, third, atptr);
4635         break;
4636     }
4637 
4638     case IPCOP_msgget:
4639         ret = get_errno(msgget(first, second));
4640         break;
4641 
4642     case IPCOP_msgsnd:
4643         ret = do_msgsnd(first, ptr, second, third);
4644         break;
4645 
4646     case IPCOP_msgctl:
4647         ret = do_msgctl(first, second, ptr);
4648         break;
4649 
4650     case IPCOP_msgrcv:
4651         switch (version) {
4652         case 0:
4653             {
4654                 struct target_ipc_kludge {
4655                     abi_long msgp;
4656                     abi_long msgtyp;
4657                 } *tmp;
4658 
4659                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4660                     ret = -TARGET_EFAULT;
4661                     break;
4662                 }
4663 
4664                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4665 
4666                 unlock_user_struct(tmp, ptr, 0);
4667                 break;
4668             }
4669         default:
4670             ret = do_msgrcv(first, ptr, second, fifth, third);
4671         }
4672         break;
4673 
4674     case IPCOP_shmat:
4675         switch (version) {
4676         default:
4677         {
4678             abi_ulong raddr;
4679             raddr = do_shmat(cpu_env, first, ptr, second);
4680             if (is_error(raddr))
4681                 return get_errno(raddr);
4682             if (put_user_ual(raddr, third))
4683                 return -TARGET_EFAULT;
4684             break;
4685         }
4686         case 1:
4687             ret = -TARGET_EINVAL;
4688             break;
4689         }
4690 	break;
4691     case IPCOP_shmdt:
4692         ret = do_shmdt(ptr);
4693 	break;
4694 
4695     case IPCOP_shmget:
4696 	/* IPC_* flag values are the same on all linux platforms */
4697 	ret = get_errno(shmget(first, second, third));
4698 	break;
4699 
4700 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4701     case IPCOP_shmctl:
4702         ret = do_shmctl(first, second, ptr);
4703         break;
4704     default:
4705         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4706                       call, version);
4707 	ret = -TARGET_ENOSYS;
4708 	break;
4709     }
4710     return ret;
4711 }
4712 #endif
4713 
4714 /* kernel structure types definitions */
4715 
4716 #define STRUCT(name, ...) STRUCT_ ## name,
4717 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4718 enum {
4719 #include "syscall_types.h"
4720 STRUCT_MAX
4721 };
4722 #undef STRUCT
4723 #undef STRUCT_SPECIAL
4724 
4725 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4726 #define STRUCT_SPECIAL(name)
4727 #include "syscall_types.h"
4728 #undef STRUCT
4729 #undef STRUCT_SPECIAL
4730 
4731 #define MAX_STRUCT_SIZE 4096
4732 
4733 #ifdef CONFIG_FIEMAP
4734 /* So fiemap access checks don't overflow on 32 bit systems.
4735  * This is very slightly smaller than the limit imposed by
4736  * the underlying kernel.
4737  */
4738 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4739                             / sizeof(struct fiemap_extent))
4740 
do_ioctl_fs_ioc_fiemap(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4741 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4742                                        int fd, int cmd, abi_long arg)
4743 {
4744     /* The parameter for this ioctl is a struct fiemap followed
4745      * by an array of struct fiemap_extent whose size is set
4746      * in fiemap->fm_extent_count. The array is filled in by the
4747      * ioctl.
4748      */
4749     int target_size_in, target_size_out;
4750     struct fiemap *fm;
4751     const argtype *arg_type = ie->arg_type;
4752     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4753     void *argptr, *p;
4754     abi_long ret;
4755     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4756     uint32_t outbufsz;
4757     int free_fm = 0;
4758 
4759     assert(arg_type[0] == TYPE_PTR);
4760     assert(ie->access == IOC_RW);
4761     arg_type++;
4762     target_size_in = thunk_type_size(arg_type, 0);
4763     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4764     if (!argptr) {
4765         return -TARGET_EFAULT;
4766     }
4767     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4768     unlock_user(argptr, arg, 0);
4769     fm = (struct fiemap *)buf_temp;
4770     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4771         return -TARGET_EINVAL;
4772     }
4773 
4774     outbufsz = sizeof (*fm) +
4775         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4776 
4777     if (outbufsz > MAX_STRUCT_SIZE) {
4778         /* We can't fit all the extents into the fixed size buffer.
4779          * Allocate one that is large enough and use it instead.
4780          */
4781         fm = g_try_malloc(outbufsz);
4782         if (!fm) {
4783             return -TARGET_ENOMEM;
4784         }
4785         memcpy(fm, buf_temp, sizeof(struct fiemap));
4786         free_fm = 1;
4787     }
4788     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4789     if (!is_error(ret)) {
4790         target_size_out = target_size_in;
4791         /* An extent_count of 0 means we were only counting the extents
4792          * so there are no structs to copy
4793          */
4794         if (fm->fm_extent_count != 0) {
4795             target_size_out += fm->fm_mapped_extents * extent_size;
4796         }
4797         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4798         if (!argptr) {
4799             ret = -TARGET_EFAULT;
4800         } else {
4801             /* Convert the struct fiemap */
4802             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4803             if (fm->fm_extent_count != 0) {
4804                 p = argptr + target_size_in;
4805                 /* ...and then all the struct fiemap_extents */
4806                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4807                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4808                                   THUNK_TARGET);
4809                     p += extent_size;
4810                 }
4811             }
4812             unlock_user(argptr, arg, target_size_out);
4813         }
4814     }
4815     if (free_fm) {
4816         g_free(fm);
4817     }
4818     return ret;
4819 }
4820 #endif
4821 
do_ioctl_ifconf(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4822 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4823                                 int fd, int cmd, abi_long arg)
4824 {
4825     const argtype *arg_type = ie->arg_type;
4826     int target_size;
4827     void *argptr;
4828     int ret;
4829     struct ifconf *host_ifconf;
4830     uint32_t outbufsz;
4831     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4832     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4833     int target_ifreq_size;
4834     int nb_ifreq;
4835     int free_buf = 0;
4836     int i;
4837     int target_ifc_len;
4838     abi_long target_ifc_buf;
4839     int host_ifc_len;
4840     char *host_ifc_buf;
4841 
4842     assert(arg_type[0] == TYPE_PTR);
4843     assert(ie->access == IOC_RW);
4844 
4845     arg_type++;
4846     target_size = thunk_type_size(arg_type, 0);
4847 
4848     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4849     if (!argptr)
4850         return -TARGET_EFAULT;
4851     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4852     unlock_user(argptr, arg, 0);
4853 
4854     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4855     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4856     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4857 
4858     if (target_ifc_buf != 0) {
4859         target_ifc_len = host_ifconf->ifc_len;
4860         nb_ifreq = target_ifc_len / target_ifreq_size;
4861         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4862 
4863         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4864         if (outbufsz > MAX_STRUCT_SIZE) {
4865             /*
4866              * We can't fit all the extents into the fixed size buffer.
4867              * Allocate one that is large enough and use it instead.
4868              */
4869             host_ifconf = malloc(outbufsz);
4870             if (!host_ifconf) {
4871                 return -TARGET_ENOMEM;
4872             }
4873             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4874             free_buf = 1;
4875         }
4876         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4877 
4878         host_ifconf->ifc_len = host_ifc_len;
4879     } else {
4880       host_ifc_buf = NULL;
4881     }
4882     host_ifconf->ifc_buf = host_ifc_buf;
4883 
4884     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4885     if (!is_error(ret)) {
4886 	/* convert host ifc_len to target ifc_len */
4887 
4888         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4889         target_ifc_len = nb_ifreq * target_ifreq_size;
4890         host_ifconf->ifc_len = target_ifc_len;
4891 
4892 	/* restore target ifc_buf */
4893 
4894         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4895 
4896 	/* copy struct ifconf to target user */
4897 
4898         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4899         if (!argptr)
4900             return -TARGET_EFAULT;
4901         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4902         unlock_user(argptr, arg, target_size);
4903 
4904         if (target_ifc_buf != 0) {
4905             /* copy ifreq[] to target user */
4906             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4907             for (i = 0; i < nb_ifreq ; i++) {
4908                 thunk_convert(argptr + i * target_ifreq_size,
4909                               host_ifc_buf + i * sizeof(struct ifreq),
4910                               ifreq_arg_type, THUNK_TARGET);
4911             }
4912             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4913         }
4914     }
4915 
4916     if (free_buf) {
4917         free(host_ifconf);
4918     }
4919 
4920     return ret;
4921 }
4922 
4923 #if defined(CONFIG_USBFS)
4924 #if HOST_LONG_BITS > 64
4925 #error USBDEVFS thunks do not support >64 bit hosts yet.
4926 #endif
4927 struct live_urb {
4928     uint64_t target_urb_adr;
4929     uint64_t target_buf_adr;
4930     char *target_buf_ptr;
4931     struct usbdevfs_urb host_urb;
4932 };
4933 
usbdevfs_urb_hashtable(void)4934 static GHashTable *usbdevfs_urb_hashtable(void)
4935 {
4936     static GHashTable *urb_hashtable;
4937 
4938     if (!urb_hashtable) {
4939         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4940     }
4941     return urb_hashtable;
4942 }
4943 
urb_hashtable_insert(struct live_urb * urb)4944 static void urb_hashtable_insert(struct live_urb *urb)
4945 {
4946     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4947     g_hash_table_insert(urb_hashtable, urb, urb);
4948 }
4949 
urb_hashtable_lookup(uint64_t target_urb_adr)4950 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4951 {
4952     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4953     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4954 }
4955 
urb_hashtable_remove(struct live_urb * urb)4956 static void urb_hashtable_remove(struct live_urb *urb)
4957 {
4958     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4959     g_hash_table_remove(urb_hashtable, urb);
4960 }
4961 
4962 static abi_long
do_ioctl_usbdevfs_reapurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4963 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4964                           int fd, int cmd, abi_long arg)
4965 {
4966     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4967     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4968     struct live_urb *lurb;
4969     void *argptr;
4970     uint64_t hurb;
4971     int target_size;
4972     uintptr_t target_urb_adr;
4973     abi_long ret;
4974 
4975     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4976 
4977     memset(buf_temp, 0, sizeof(uint64_t));
4978     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4979     if (is_error(ret)) {
4980         return ret;
4981     }
4982 
4983     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4984     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4985     if (!lurb->target_urb_adr) {
4986         return -TARGET_EFAULT;
4987     }
4988     urb_hashtable_remove(lurb);
4989     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4990         lurb->host_urb.buffer_length);
4991     lurb->target_buf_ptr = NULL;
4992 
4993     /* restore the guest buffer pointer */
4994     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4995 
4996     /* update the guest urb struct */
4997     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4998     if (!argptr) {
4999         g_free(lurb);
5000         return -TARGET_EFAULT;
5001     }
5002     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5003     unlock_user(argptr, lurb->target_urb_adr, target_size);
5004 
5005     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5006     /* write back the urb handle */
5007     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5008     if (!argptr) {
5009         g_free(lurb);
5010         return -TARGET_EFAULT;
5011     }
5012 
5013     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5014     target_urb_adr = lurb->target_urb_adr;
5015     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5016     unlock_user(argptr, arg, target_size);
5017 
5018     g_free(lurb);
5019     return ret;
5020 }
5021 
5022 static abi_long
do_ioctl_usbdevfs_discardurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5023 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5024                              uint8_t *buf_temp __attribute__((unused)),
5025                              int fd, int cmd, abi_long arg)
5026 {
5027     struct live_urb *lurb;
5028 
5029     /* map target address back to host URB with metadata. */
5030     lurb = urb_hashtable_lookup(arg);
5031     if (!lurb) {
5032         return -TARGET_EFAULT;
5033     }
5034     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5035 }
5036 
5037 static abi_long
do_ioctl_usbdevfs_submiturb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5038 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5039                             int fd, int cmd, abi_long arg)
5040 {
5041     const argtype *arg_type = ie->arg_type;
5042     int target_size;
5043     abi_long ret;
5044     void *argptr;
5045     int rw_dir;
5046     struct live_urb *lurb;
5047 
5048     /*
5049      * each submitted URB needs to map to a unique ID for the
5050      * kernel, and that unique ID needs to be a pointer to
5051      * host memory.  hence, we need to malloc for each URB.
5052      * isochronous transfers have a variable length struct.
5053      */
5054     arg_type++;
5055     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5056 
5057     /* construct host copy of urb and metadata */
5058     lurb = g_try_malloc0(sizeof(struct live_urb));
5059     if (!lurb) {
5060         return -TARGET_ENOMEM;
5061     }
5062 
5063     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5064     if (!argptr) {
5065         g_free(lurb);
5066         return -TARGET_EFAULT;
5067     }
5068     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5069     unlock_user(argptr, arg, 0);
5070 
5071     lurb->target_urb_adr = arg;
5072     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5073 
5074     /* buffer space used depends on endpoint type so lock the entire buffer */
5075     /* control type urbs should check the buffer contents for true direction */
5076     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5077     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5078         lurb->host_urb.buffer_length, 1);
5079     if (lurb->target_buf_ptr == NULL) {
5080         g_free(lurb);
5081         return -TARGET_EFAULT;
5082     }
5083 
5084     /* update buffer pointer in host copy */
5085     lurb->host_urb.buffer = lurb->target_buf_ptr;
5086 
5087     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5088     if (is_error(ret)) {
5089         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5090         g_free(lurb);
5091     } else {
5092         urb_hashtable_insert(lurb);
5093     }
5094 
5095     return ret;
5096 }
5097 #endif /* CONFIG_USBFS */
5098 
do_ioctl_dm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5099 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5100                             int cmd, abi_long arg)
5101 {
5102     void *argptr;
5103     struct dm_ioctl *host_dm;
5104     abi_long guest_data;
5105     uint32_t guest_data_size;
5106     int target_size;
5107     const argtype *arg_type = ie->arg_type;
5108     abi_long ret;
5109     void *big_buf = NULL;
5110     char *host_data;
5111 
5112     arg_type++;
5113     target_size = thunk_type_size(arg_type, 0);
5114     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5115     if (!argptr) {
5116         ret = -TARGET_EFAULT;
5117         goto out;
5118     }
5119     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5120     unlock_user(argptr, arg, 0);
5121 
5122     /* buf_temp is too small, so fetch things into a bigger buffer */
5123     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5124     memcpy(big_buf, buf_temp, target_size);
5125     buf_temp = big_buf;
5126     host_dm = big_buf;
5127 
5128     guest_data = arg + host_dm->data_start;
5129     if ((guest_data - arg) < 0) {
5130         ret = -TARGET_EINVAL;
5131         goto out;
5132     }
5133     guest_data_size = host_dm->data_size - host_dm->data_start;
5134     host_data = (char*)host_dm + host_dm->data_start;
5135 
5136     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5137     if (!argptr) {
5138         ret = -TARGET_EFAULT;
5139         goto out;
5140     }
5141 
5142     switch (ie->host_cmd) {
5143     case DM_REMOVE_ALL:
5144     case DM_LIST_DEVICES:
5145     case DM_DEV_CREATE:
5146     case DM_DEV_REMOVE:
5147     case DM_DEV_SUSPEND:
5148     case DM_DEV_STATUS:
5149     case DM_DEV_WAIT:
5150     case DM_TABLE_STATUS:
5151     case DM_TABLE_CLEAR:
5152     case DM_TABLE_DEPS:
5153     case DM_LIST_VERSIONS:
5154         /* no input data */
5155         break;
5156     case DM_DEV_RENAME:
5157     case DM_DEV_SET_GEOMETRY:
5158         /* data contains only strings */
5159         memcpy(host_data, argptr, guest_data_size);
5160         break;
5161     case DM_TARGET_MSG:
5162         memcpy(host_data, argptr, guest_data_size);
5163         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5164         break;
5165     case DM_TABLE_LOAD:
5166     {
5167         void *gspec = argptr;
5168         void *cur_data = host_data;
5169         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5170         int spec_size = thunk_type_size(arg_type, 0);
5171         int i;
5172 
5173         for (i = 0; i < host_dm->target_count; i++) {
5174             struct dm_target_spec *spec = cur_data;
5175             uint32_t next;
5176             int slen;
5177 
5178             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5179             slen = strlen((char*)gspec + spec_size) + 1;
5180             next = spec->next;
5181             spec->next = sizeof(*spec) + slen;
5182             strcpy((char*)&spec[1], gspec + spec_size);
5183             gspec += next;
5184             cur_data += spec->next;
5185         }
5186         break;
5187     }
5188     default:
5189         ret = -TARGET_EINVAL;
5190         unlock_user(argptr, guest_data, 0);
5191         goto out;
5192     }
5193     unlock_user(argptr, guest_data, 0);
5194 
5195     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5196     if (!is_error(ret)) {
5197         guest_data = arg + host_dm->data_start;
5198         guest_data_size = host_dm->data_size - host_dm->data_start;
5199         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5200         switch (ie->host_cmd) {
5201         case DM_REMOVE_ALL:
5202         case DM_DEV_CREATE:
5203         case DM_DEV_REMOVE:
5204         case DM_DEV_RENAME:
5205         case DM_DEV_SUSPEND:
5206         case DM_DEV_STATUS:
5207         case DM_TABLE_LOAD:
5208         case DM_TABLE_CLEAR:
5209         case DM_TARGET_MSG:
5210         case DM_DEV_SET_GEOMETRY:
5211             /* no return data */
5212             break;
5213         case DM_LIST_DEVICES:
5214         {
5215             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5216             uint32_t remaining_data = guest_data_size;
5217             void *cur_data = argptr;
5218             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5219             int nl_size = 12; /* can't use thunk_size due to alignment */
5220 
5221             while (1) {
5222                 uint32_t next = nl->next;
5223                 if (next) {
5224                     nl->next = nl_size + (strlen(nl->name) + 1);
5225                 }
5226                 if (remaining_data < nl->next) {
5227                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5228                     break;
5229                 }
5230                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5231                 strcpy(cur_data + nl_size, nl->name);
5232                 cur_data += nl->next;
5233                 remaining_data -= nl->next;
5234                 if (!next) {
5235                     break;
5236                 }
5237                 nl = (void*)nl + next;
5238             }
5239             break;
5240         }
5241         case DM_DEV_WAIT:
5242         case DM_TABLE_STATUS:
5243         {
5244             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5245             void *cur_data = argptr;
5246             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5247             int spec_size = thunk_type_size(arg_type, 0);
5248             int i;
5249 
5250             for (i = 0; i < host_dm->target_count; i++) {
5251                 uint32_t next = spec->next;
5252                 int slen = strlen((char*)&spec[1]) + 1;
5253                 spec->next = (cur_data - argptr) + spec_size + slen;
5254                 if (guest_data_size < spec->next) {
5255                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5256                     break;
5257                 }
5258                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5259                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5260                 cur_data = argptr + spec->next;
5261                 spec = (void*)host_dm + host_dm->data_start + next;
5262             }
5263             break;
5264         }
5265         case DM_TABLE_DEPS:
5266         {
5267             void *hdata = (void*)host_dm + host_dm->data_start;
5268             int count = *(uint32_t*)hdata;
5269             uint64_t *hdev = hdata + 8;
5270             uint64_t *gdev = argptr + 8;
5271             int i;
5272 
5273             *(uint32_t*)argptr = tswap32(count);
5274             for (i = 0; i < count; i++) {
5275                 *gdev = tswap64(*hdev);
5276                 gdev++;
5277                 hdev++;
5278             }
5279             break;
5280         }
5281         case DM_LIST_VERSIONS:
5282         {
5283             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5284             uint32_t remaining_data = guest_data_size;
5285             void *cur_data = argptr;
5286             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5287             int vers_size = thunk_type_size(arg_type, 0);
5288 
5289             while (1) {
5290                 uint32_t next = vers->next;
5291                 if (next) {
5292                     vers->next = vers_size + (strlen(vers->name) + 1);
5293                 }
5294                 if (remaining_data < vers->next) {
5295                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5296                     break;
5297                 }
5298                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5299                 strcpy(cur_data + vers_size, vers->name);
5300                 cur_data += vers->next;
5301                 remaining_data -= vers->next;
5302                 if (!next) {
5303                     break;
5304                 }
5305                 vers = (void*)vers + next;
5306             }
5307             break;
5308         }
5309         default:
5310             unlock_user(argptr, guest_data, 0);
5311             ret = -TARGET_EINVAL;
5312             goto out;
5313         }
5314         unlock_user(argptr, guest_data, guest_data_size);
5315 
5316         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5317         if (!argptr) {
5318             ret = -TARGET_EFAULT;
5319             goto out;
5320         }
5321         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5322         unlock_user(argptr, arg, target_size);
5323     }
5324 out:
5325     g_free(big_buf);
5326     return ret;
5327 }
5328 
do_ioctl_blkpg(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5329 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5330                                int cmd, abi_long arg)
5331 {
5332     void *argptr;
5333     int target_size;
5334     const argtype *arg_type = ie->arg_type;
5335     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5336     abi_long ret;
5337 
5338     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5339     struct blkpg_partition host_part;
5340 
5341     /* Read and convert blkpg */
5342     arg_type++;
5343     target_size = thunk_type_size(arg_type, 0);
5344     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5345     if (!argptr) {
5346         ret = -TARGET_EFAULT;
5347         goto out;
5348     }
5349     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5350     unlock_user(argptr, arg, 0);
5351 
5352     switch (host_blkpg->op) {
5353     case BLKPG_ADD_PARTITION:
5354     case BLKPG_DEL_PARTITION:
5355         /* payload is struct blkpg_partition */
5356         break;
5357     default:
5358         /* Unknown opcode */
5359         ret = -TARGET_EINVAL;
5360         goto out;
5361     }
5362 
5363     /* Read and convert blkpg->data */
5364     arg = (abi_long)(uintptr_t)host_blkpg->data;
5365     target_size = thunk_type_size(part_arg_type, 0);
5366     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5367     if (!argptr) {
5368         ret = -TARGET_EFAULT;
5369         goto out;
5370     }
5371     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5372     unlock_user(argptr, arg, 0);
5373 
5374     /* Swizzle the data pointer to our local copy and call! */
5375     host_blkpg->data = &host_part;
5376     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5377 
5378 out:
5379     return ret;
5380 }
5381 
do_ioctl_rt(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5382 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5383                                 int fd, int cmd, abi_long arg)
5384 {
5385     const argtype *arg_type = ie->arg_type;
5386     const StructEntry *se;
5387     const argtype *field_types;
5388     const int *dst_offsets, *src_offsets;
5389     int target_size;
5390     void *argptr;
5391     abi_ulong *target_rt_dev_ptr = NULL;
5392     unsigned long *host_rt_dev_ptr = NULL;
5393     abi_long ret;
5394     int i;
5395 
5396     assert(ie->access == IOC_W);
5397     assert(*arg_type == TYPE_PTR);
5398     arg_type++;
5399     assert(*arg_type == TYPE_STRUCT);
5400     target_size = thunk_type_size(arg_type, 0);
5401     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5402     if (!argptr) {
5403         return -TARGET_EFAULT;
5404     }
5405     arg_type++;
5406     assert(*arg_type == (int)STRUCT_rtentry);
5407     se = struct_entries + *arg_type++;
5408     assert(se->convert[0] == NULL);
5409     /* convert struct here to be able to catch rt_dev string */
5410     field_types = se->field_types;
5411     dst_offsets = se->field_offsets[THUNK_HOST];
5412     src_offsets = se->field_offsets[THUNK_TARGET];
5413     for (i = 0; i < se->nb_fields; i++) {
5414         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5415             assert(*field_types == TYPE_PTRVOID);
5416             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5417             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5418             if (*target_rt_dev_ptr != 0) {
5419                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5420                                                   tswapal(*target_rt_dev_ptr));
5421                 if (!*host_rt_dev_ptr) {
5422                     unlock_user(argptr, arg, 0);
5423                     return -TARGET_EFAULT;
5424                 }
5425             } else {
5426                 *host_rt_dev_ptr = 0;
5427             }
5428             field_types++;
5429             continue;
5430         }
5431         field_types = thunk_convert(buf_temp + dst_offsets[i],
5432                                     argptr + src_offsets[i],
5433                                     field_types, THUNK_HOST);
5434     }
5435     unlock_user(argptr, arg, 0);
5436 
5437     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5438 
5439     assert(host_rt_dev_ptr != NULL);
5440     assert(target_rt_dev_ptr != NULL);
5441     if (*host_rt_dev_ptr != 0) {
5442         unlock_user((void *)*host_rt_dev_ptr,
5443                     *target_rt_dev_ptr, 0);
5444     }
5445     return ret;
5446 }
5447 
do_ioctl_kdsigaccept(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5448 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5449                                      int fd, int cmd, abi_long arg)
5450 {
5451     int sig = target_to_host_signal(arg);
5452     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5453 }
5454 
do_ioctl_SIOCGSTAMP(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5455 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5456                                     int fd, int cmd, abi_long arg)
5457 {
5458     struct timeval tv;
5459     abi_long ret;
5460 
5461     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5462     if (is_error(ret)) {
5463         return ret;
5464     }
5465 
5466     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5467         if (copy_to_user_timeval(arg, &tv)) {
5468             return -TARGET_EFAULT;
5469         }
5470     } else {
5471         if (copy_to_user_timeval64(arg, &tv)) {
5472             return -TARGET_EFAULT;
5473         }
5474     }
5475 
5476     return ret;
5477 }
5478 
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5479 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5480                                       int fd, int cmd, abi_long arg)
5481 {
5482     struct timespec ts;
5483     abi_long ret;
5484 
5485     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5486     if (is_error(ret)) {
5487         return ret;
5488     }
5489 
5490     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5491         if (host_to_target_timespec(arg, &ts)) {
5492             return -TARGET_EFAULT;
5493         }
5494     } else{
5495         if (host_to_target_timespec64(arg, &ts)) {
5496             return -TARGET_EFAULT;
5497         }
5498     }
5499 
5500     return ret;
5501 }
5502 
5503 #ifdef TIOCGPTPEER
do_ioctl_tiocgptpeer(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5504 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5505                                      int fd, int cmd, abi_long arg)
5506 {
5507     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5508     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5509 }
5510 #endif
5511 
5512 #ifdef HAVE_DRM_H
5513 
unlock_drm_version(struct drm_version * host_ver,struct target_drm_version * target_ver,bool copy)5514 static void unlock_drm_version(struct drm_version *host_ver,
5515                                struct target_drm_version *target_ver,
5516                                bool copy)
5517 {
5518     unlock_user(host_ver->name, target_ver->name,
5519                                 copy ? host_ver->name_len : 0);
5520     unlock_user(host_ver->date, target_ver->date,
5521                                 copy ? host_ver->date_len : 0);
5522     unlock_user(host_ver->desc, target_ver->desc,
5523                                 copy ? host_ver->desc_len : 0);
5524 }
5525 
target_to_host_drmversion(struct drm_version * host_ver,struct target_drm_version * target_ver)5526 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5527                                           struct target_drm_version *target_ver)
5528 {
5529     memset(host_ver, 0, sizeof(*host_ver));
5530 
5531     __get_user(host_ver->name_len, &target_ver->name_len);
5532     if (host_ver->name_len) {
5533         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5534                                    target_ver->name_len, 0);
5535         if (!host_ver->name) {
5536             return -EFAULT;
5537         }
5538     }
5539 
5540     __get_user(host_ver->date_len, &target_ver->date_len);
5541     if (host_ver->date_len) {
5542         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5543                                    target_ver->date_len, 0);
5544         if (!host_ver->date) {
5545             goto err;
5546         }
5547     }
5548 
5549     __get_user(host_ver->desc_len, &target_ver->desc_len);
5550     if (host_ver->desc_len) {
5551         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5552                                    target_ver->desc_len, 0);
5553         if (!host_ver->desc) {
5554             goto err;
5555         }
5556     }
5557 
5558     return 0;
5559 err:
5560     unlock_drm_version(host_ver, target_ver, false);
5561     return -EFAULT;
5562 }
5563 
host_to_target_drmversion(struct target_drm_version * target_ver,struct drm_version * host_ver)5564 static inline void host_to_target_drmversion(
5565                                           struct target_drm_version *target_ver,
5566                                           struct drm_version *host_ver)
5567 {
5568     __put_user(host_ver->version_major, &target_ver->version_major);
5569     __put_user(host_ver->version_minor, &target_ver->version_minor);
5570     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5571     __put_user(host_ver->name_len, &target_ver->name_len);
5572     __put_user(host_ver->date_len, &target_ver->date_len);
5573     __put_user(host_ver->desc_len, &target_ver->desc_len);
5574     unlock_drm_version(host_ver, target_ver, true);
5575 }
5576 
do_ioctl_drm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5577 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5578                              int fd, int cmd, abi_long arg)
5579 {
5580     struct drm_version *ver;
5581     struct target_drm_version *target_ver;
5582     abi_long ret;
5583 
5584     switch (ie->host_cmd) {
5585     case DRM_IOCTL_VERSION:
5586         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5587             return -TARGET_EFAULT;
5588         }
5589         ver = (struct drm_version *)buf_temp;
5590         ret = target_to_host_drmversion(ver, target_ver);
5591         if (!is_error(ret)) {
5592             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5593             if (is_error(ret)) {
5594                 unlock_drm_version(ver, target_ver, false);
5595             } else {
5596                 host_to_target_drmversion(target_ver, ver);
5597             }
5598         }
5599         unlock_user_struct(target_ver, arg, 0);
5600         return ret;
5601     }
5602     return -TARGET_ENOSYS;
5603 }
5604 
do_ioctl_drm_i915_getparam(const IOCTLEntry * ie,struct drm_i915_getparam * gparam,int fd,abi_long arg)5605 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5606                                            struct drm_i915_getparam *gparam,
5607                                            int fd, abi_long arg)
5608 {
5609     abi_long ret;
5610     int value;
5611     struct target_drm_i915_getparam *target_gparam;
5612 
5613     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5614         return -TARGET_EFAULT;
5615     }
5616 
5617     __get_user(gparam->param, &target_gparam->param);
5618     gparam->value = &value;
5619     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5620     put_user_s32(value, target_gparam->value);
5621 
5622     unlock_user_struct(target_gparam, arg, 0);
5623     return ret;
5624 }
5625 
do_ioctl_drm_i915(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5626 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5627                                   int fd, int cmd, abi_long arg)
5628 {
5629     switch (ie->host_cmd) {
5630     case DRM_IOCTL_I915_GETPARAM:
5631         return do_ioctl_drm_i915_getparam(ie,
5632                                           (struct drm_i915_getparam *)buf_temp,
5633                                           fd, arg);
5634     default:
5635         return -TARGET_ENOSYS;
5636     }
5637 }
5638 
5639 #endif
5640 
do_ioctl_TUNSETTXFILTER(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5641 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5642                                         int fd, int cmd, abi_long arg)
5643 {
5644     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5645     struct tun_filter *target_filter;
5646     char *target_addr;
5647 
5648     assert(ie->access == IOC_W);
5649 
5650     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5651     if (!target_filter) {
5652         return -TARGET_EFAULT;
5653     }
5654     filter->flags = tswap16(target_filter->flags);
5655     filter->count = tswap16(target_filter->count);
5656     unlock_user(target_filter, arg, 0);
5657 
5658     if (filter->count) {
5659         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5660             MAX_STRUCT_SIZE) {
5661             return -TARGET_EFAULT;
5662         }
5663 
5664         target_addr = lock_user(VERIFY_READ,
5665                                 arg + offsetof(struct tun_filter, addr),
5666                                 filter->count * ETH_ALEN, 1);
5667         if (!target_addr) {
5668             return -TARGET_EFAULT;
5669         }
5670         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5671         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5672     }
5673 
5674     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5675 }
5676 
5677 IOCTLEntry ioctl_entries[] = {
5678 #define IOCTL(cmd, access, ...) \
5679     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5680 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5681     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5682 #define IOCTL_IGNORE(cmd) \
5683     { TARGET_ ## cmd, 0, #cmd },
5684 #include "ioctls.h"
5685     { 0, 0, },
5686 };
5687 
5688 /* ??? Implement proper locking for ioctls.  */
5689 /* do_ioctl() Must return target values and target errnos. */
do_ioctl(int fd,int cmd,abi_long arg)5690 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5691 {
5692     const IOCTLEntry *ie;
5693     const argtype *arg_type;
5694     abi_long ret;
5695     uint8_t buf_temp[MAX_STRUCT_SIZE];
5696     int target_size;
5697     void *argptr;
5698 
5699     ie = ioctl_entries;
5700     for(;;) {
5701         if (ie->target_cmd == 0) {
5702             qemu_log_mask(
5703                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5704             return -TARGET_ENOSYS;
5705         }
5706         if (ie->target_cmd == cmd)
5707             break;
5708         ie++;
5709     }
5710     arg_type = ie->arg_type;
5711     if (ie->do_ioctl) {
5712         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5713     } else if (!ie->host_cmd) {
5714         /* Some architectures define BSD ioctls in their headers
5715            that are not implemented in Linux.  */
5716         return -TARGET_ENOSYS;
5717     }
5718 
5719     switch(arg_type[0]) {
5720     case TYPE_NULL:
5721         /* no argument */
5722         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5723         break;
5724     case TYPE_PTRVOID:
5725     case TYPE_INT:
5726     case TYPE_LONG:
5727     case TYPE_ULONG:
5728         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5729         break;
5730     case TYPE_PTR:
5731         arg_type++;
5732         target_size = thunk_type_size(arg_type, 0);
5733         switch(ie->access) {
5734         case IOC_R:
5735             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5736             if (!is_error(ret)) {
5737                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5738                 if (!argptr)
5739                     return -TARGET_EFAULT;
5740                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5741                 unlock_user(argptr, arg, target_size);
5742             }
5743             break;
5744         case IOC_W:
5745             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5746             if (!argptr)
5747                 return -TARGET_EFAULT;
5748             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5749             unlock_user(argptr, arg, 0);
5750             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5751             break;
5752         default:
5753         case IOC_RW:
5754             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5755             if (!argptr)
5756                 return -TARGET_EFAULT;
5757             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5758             unlock_user(argptr, arg, 0);
5759             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5760             if (!is_error(ret)) {
5761                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5762                 if (!argptr)
5763                     return -TARGET_EFAULT;
5764                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5765                 unlock_user(argptr, arg, target_size);
5766             }
5767             break;
5768         }
5769         break;
5770     default:
5771         qemu_log_mask(LOG_UNIMP,
5772                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5773                       (long)cmd, arg_type[0]);
5774         ret = -TARGET_ENOSYS;
5775         break;
5776     }
5777     return ret;
5778 }
5779 
5780 static const bitmask_transtbl iflag_tbl[] = {
5781         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5782         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5783         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5784         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5785         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5786         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5787         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5788         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5789         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5790         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5791         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5792         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5793         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5794         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5795         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5796         { 0, 0, 0, 0 }
5797 };
5798 
5799 static const bitmask_transtbl oflag_tbl[] = {
5800 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5801 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5802 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5803 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5804 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5805 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5806 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5807 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5808 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5809 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5810 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5811 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5812 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5813 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5814 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5815 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5816 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5817 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5818 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5819 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5820 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5821 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5822 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5823 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5824 	{ 0, 0, 0, 0 }
5825 };
5826 
5827 static const bitmask_transtbl cflag_tbl[] = {
5828 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5829 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5830 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5831 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5832 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5833 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5834 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5835 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5836 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5837 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5838 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5839 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5840 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5841 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5842 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5843 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5844 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5845 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5846 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5847 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5848 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5849 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5850 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5851 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5852 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5853 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5854 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5855 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5856 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5857 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5858 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5859 	{ 0, 0, 0, 0 }
5860 };
5861 
5862 static const bitmask_transtbl lflag_tbl[] = {
5863   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5864   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5865   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5866   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5867   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5868   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5869   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5870   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5871   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5872   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5873   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5874   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5875   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5876   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5877   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5878   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5879   { 0, 0, 0, 0 }
5880 };
5881 
target_to_host_termios(void * dst,const void * src)5882 static void target_to_host_termios (void *dst, const void *src)
5883 {
5884     struct host_termios *host = dst;
5885     const struct target_termios *target = src;
5886 
5887     host->c_iflag =
5888         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5889     host->c_oflag =
5890         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5891     host->c_cflag =
5892         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5893     host->c_lflag =
5894         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5895     host->c_line = target->c_line;
5896 
5897     memset(host->c_cc, 0, sizeof(host->c_cc));
5898     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5899     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5900     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5901     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5902     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5903     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5904     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5905     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5906     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5907     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5908     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5909     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5910     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5911     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5912     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5913     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5914     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5915 }
5916 
host_to_target_termios(void * dst,const void * src)5917 static void host_to_target_termios (void *dst, const void *src)
5918 {
5919     struct target_termios *target = dst;
5920     const struct host_termios *host = src;
5921 
5922     target->c_iflag =
5923         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5924     target->c_oflag =
5925         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5926     target->c_cflag =
5927         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5928     target->c_lflag =
5929         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5930     target->c_line = host->c_line;
5931 
5932     memset(target->c_cc, 0, sizeof(target->c_cc));
5933     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5934     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5935     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5936     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5937     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5938     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5939     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5940     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5941     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5942     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5943     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5944     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5945     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5946     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5947     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5948     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5949     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5950 }
5951 
5952 static const StructEntry struct_termios_def = {
5953     .convert = { host_to_target_termios, target_to_host_termios },
5954     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5955     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5956     .print = print_termios,
5957 };
5958 
5959 static const bitmask_transtbl mmap_flags_tbl[] = {
5960     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5961     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5962     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5963     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5964       MAP_ANONYMOUS, MAP_ANONYMOUS },
5965     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5966       MAP_GROWSDOWN, MAP_GROWSDOWN },
5967     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5968       MAP_DENYWRITE, MAP_DENYWRITE },
5969     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5970       MAP_EXECUTABLE, MAP_EXECUTABLE },
5971     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5972     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5973       MAP_NORESERVE, MAP_NORESERVE },
5974     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5975     /* MAP_STACK had been ignored by the kernel for quite some time.
5976        Recognize it for the target insofar as we do not want to pass
5977        it through to the host.  */
5978     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5979     { 0, 0, 0, 0 }
5980 };
5981 
5982 /*
5983  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5984  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5985  */
5986 #if defined(TARGET_I386)
5987 
5988 /* NOTE: there is really one LDT for all the threads */
5989 static uint8_t *ldt_table;
5990 
read_ldt(abi_ulong ptr,unsigned long bytecount)5991 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5992 {
5993     int size;
5994     void *p;
5995 
5996     if (!ldt_table)
5997         return 0;
5998     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5999     if (size > bytecount)
6000         size = bytecount;
6001     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6002     if (!p)
6003         return -TARGET_EFAULT;
6004     /* ??? Should this by byteswapped?  */
6005     memcpy(p, ldt_table, size);
6006     unlock_user(p, ptr, size);
6007     return size;
6008 }
6009 
6010 /* XXX: add locking support */
write_ldt(CPUX86State * env,abi_ulong ptr,unsigned long bytecount,int oldmode)6011 static abi_long write_ldt(CPUX86State *env,
6012                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6013 {
6014     struct target_modify_ldt_ldt_s ldt_info;
6015     struct target_modify_ldt_ldt_s *target_ldt_info;
6016     int seg_32bit, contents, read_exec_only, limit_in_pages;
6017     int seg_not_present, useable, lm;
6018     uint32_t *lp, entry_1, entry_2;
6019 
6020     if (bytecount != sizeof(ldt_info))
6021         return -TARGET_EINVAL;
6022     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6023         return -TARGET_EFAULT;
6024     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6025     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6026     ldt_info.limit = tswap32(target_ldt_info->limit);
6027     ldt_info.flags = tswap32(target_ldt_info->flags);
6028     unlock_user_struct(target_ldt_info, ptr, 0);
6029 
6030     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6031         return -TARGET_EINVAL;
6032     seg_32bit = ldt_info.flags & 1;
6033     contents = (ldt_info.flags >> 1) & 3;
6034     read_exec_only = (ldt_info.flags >> 3) & 1;
6035     limit_in_pages = (ldt_info.flags >> 4) & 1;
6036     seg_not_present = (ldt_info.flags >> 5) & 1;
6037     useable = (ldt_info.flags >> 6) & 1;
6038 #ifdef TARGET_ABI32
6039     lm = 0;
6040 #else
6041     lm = (ldt_info.flags >> 7) & 1;
6042 #endif
6043     if (contents == 3) {
6044         if (oldmode)
6045             return -TARGET_EINVAL;
6046         if (seg_not_present == 0)
6047             return -TARGET_EINVAL;
6048     }
6049     /* allocate the LDT */
6050     if (!ldt_table) {
6051         env->ldt.base = target_mmap(0,
6052                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6053                                     PROT_READ|PROT_WRITE,
6054                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6055         if (env->ldt.base == -1)
6056             return -TARGET_ENOMEM;
6057         memset(g2h_untagged(env->ldt.base), 0,
6058                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6059         env->ldt.limit = 0xffff;
6060         ldt_table = g2h_untagged(env->ldt.base);
6061     }
6062 
6063     /* NOTE: same code as Linux kernel */
6064     /* Allow LDTs to be cleared by the user. */
6065     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6066         if (oldmode ||
6067             (contents == 0		&&
6068              read_exec_only == 1	&&
6069              seg_32bit == 0		&&
6070              limit_in_pages == 0	&&
6071              seg_not_present == 1	&&
6072              useable == 0 )) {
6073             entry_1 = 0;
6074             entry_2 = 0;
6075             goto install;
6076         }
6077     }
6078 
6079     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6080         (ldt_info.limit & 0x0ffff);
6081     entry_2 = (ldt_info.base_addr & 0xff000000) |
6082         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6083         (ldt_info.limit & 0xf0000) |
6084         ((read_exec_only ^ 1) << 9) |
6085         (contents << 10) |
6086         ((seg_not_present ^ 1) << 15) |
6087         (seg_32bit << 22) |
6088         (limit_in_pages << 23) |
6089         (lm << 21) |
6090         0x7000;
6091     if (!oldmode)
6092         entry_2 |= (useable << 20);
6093 
6094     /* Install the new entry ...  */
6095 install:
6096     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6097     lp[0] = tswap32(entry_1);
6098     lp[1] = tswap32(entry_2);
6099     return 0;
6100 }
6101 
6102 /* specific and weird i386 syscalls */
do_modify_ldt(CPUX86State * env,int func,abi_ulong ptr,unsigned long bytecount)6103 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6104                               unsigned long bytecount)
6105 {
6106     abi_long ret;
6107 
6108     switch (func) {
6109     case 0:
6110         ret = read_ldt(ptr, bytecount);
6111         break;
6112     case 1:
6113         ret = write_ldt(env, ptr, bytecount, 1);
6114         break;
6115     case 0x11:
6116         ret = write_ldt(env, ptr, bytecount, 0);
6117         break;
6118     default:
6119         ret = -TARGET_ENOSYS;
6120         break;
6121     }
6122     return ret;
6123 }
6124 
6125 #if defined(TARGET_ABI32)
do_set_thread_area(CPUX86State * env,abi_ulong ptr)6126 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6127 {
6128     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6129     struct target_modify_ldt_ldt_s ldt_info;
6130     struct target_modify_ldt_ldt_s *target_ldt_info;
6131     int seg_32bit, contents, read_exec_only, limit_in_pages;
6132     int seg_not_present, useable, lm;
6133     uint32_t *lp, entry_1, entry_2;
6134     int i;
6135 
6136     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6137     if (!target_ldt_info)
6138         return -TARGET_EFAULT;
6139     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6140     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6141     ldt_info.limit = tswap32(target_ldt_info->limit);
6142     ldt_info.flags = tswap32(target_ldt_info->flags);
6143     if (ldt_info.entry_number == -1) {
6144         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6145             if (gdt_table[i] == 0) {
6146                 ldt_info.entry_number = i;
6147                 target_ldt_info->entry_number = tswap32(i);
6148                 break;
6149             }
6150         }
6151     }
6152     unlock_user_struct(target_ldt_info, ptr, 1);
6153 
6154     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6155         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6156            return -TARGET_EINVAL;
6157     seg_32bit = ldt_info.flags & 1;
6158     contents = (ldt_info.flags >> 1) & 3;
6159     read_exec_only = (ldt_info.flags >> 3) & 1;
6160     limit_in_pages = (ldt_info.flags >> 4) & 1;
6161     seg_not_present = (ldt_info.flags >> 5) & 1;
6162     useable = (ldt_info.flags >> 6) & 1;
6163 #ifdef TARGET_ABI32
6164     lm = 0;
6165 #else
6166     lm = (ldt_info.flags >> 7) & 1;
6167 #endif
6168 
6169     if (contents == 3) {
6170         if (seg_not_present == 0)
6171             return -TARGET_EINVAL;
6172     }
6173 
6174     /* NOTE: same code as Linux kernel */
6175     /* Allow LDTs to be cleared by the user. */
6176     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6177         if ((contents == 0             &&
6178              read_exec_only == 1       &&
6179              seg_32bit == 0            &&
6180              limit_in_pages == 0       &&
6181              seg_not_present == 1      &&
6182              useable == 0 )) {
6183             entry_1 = 0;
6184             entry_2 = 0;
6185             goto install;
6186         }
6187     }
6188 
6189     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6190         (ldt_info.limit & 0x0ffff);
6191     entry_2 = (ldt_info.base_addr & 0xff000000) |
6192         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6193         (ldt_info.limit & 0xf0000) |
6194         ((read_exec_only ^ 1) << 9) |
6195         (contents << 10) |
6196         ((seg_not_present ^ 1) << 15) |
6197         (seg_32bit << 22) |
6198         (limit_in_pages << 23) |
6199         (useable << 20) |
6200         (lm << 21) |
6201         0x7000;
6202 
6203     /* Install the new entry ...  */
6204 install:
6205     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6206     lp[0] = tswap32(entry_1);
6207     lp[1] = tswap32(entry_2);
6208     return 0;
6209 }
6210 
do_get_thread_area(CPUX86State * env,abi_ulong ptr)6211 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6212 {
6213     struct target_modify_ldt_ldt_s *target_ldt_info;
6214     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6215     uint32_t base_addr, limit, flags;
6216     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6217     int seg_not_present, useable, lm;
6218     uint32_t *lp, entry_1, entry_2;
6219 
6220     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6221     if (!target_ldt_info)
6222         return -TARGET_EFAULT;
6223     idx = tswap32(target_ldt_info->entry_number);
6224     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6225         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6226         unlock_user_struct(target_ldt_info, ptr, 1);
6227         return -TARGET_EINVAL;
6228     }
6229     lp = (uint32_t *)(gdt_table + idx);
6230     entry_1 = tswap32(lp[0]);
6231     entry_2 = tswap32(lp[1]);
6232 
6233     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6234     contents = (entry_2 >> 10) & 3;
6235     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6236     seg_32bit = (entry_2 >> 22) & 1;
6237     limit_in_pages = (entry_2 >> 23) & 1;
6238     useable = (entry_2 >> 20) & 1;
6239 #ifdef TARGET_ABI32
6240     lm = 0;
6241 #else
6242     lm = (entry_2 >> 21) & 1;
6243 #endif
6244     flags = (seg_32bit << 0) | (contents << 1) |
6245         (read_exec_only << 3) | (limit_in_pages << 4) |
6246         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6247     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6248     base_addr = (entry_1 >> 16) |
6249         (entry_2 & 0xff000000) |
6250         ((entry_2 & 0xff) << 16);
6251     target_ldt_info->base_addr = tswapal(base_addr);
6252     target_ldt_info->limit = tswap32(limit);
6253     target_ldt_info->flags = tswap32(flags);
6254     unlock_user_struct(target_ldt_info, ptr, 1);
6255     return 0;
6256 }
6257 
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6258 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6259 {
6260     return -TARGET_ENOSYS;
6261 }
6262 #else
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6263 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6264 {
6265     abi_long ret = 0;
6266     abi_ulong val;
6267     int idx;
6268 
6269     switch(code) {
6270     case TARGET_ARCH_SET_GS:
6271     case TARGET_ARCH_SET_FS:
6272         if (code == TARGET_ARCH_SET_GS)
6273             idx = R_GS;
6274         else
6275             idx = R_FS;
6276         cpu_x86_load_seg(env, idx, 0);
6277         env->segs[idx].base = addr;
6278         break;
6279     case TARGET_ARCH_GET_GS:
6280     case TARGET_ARCH_GET_FS:
6281         if (code == TARGET_ARCH_GET_GS)
6282             idx = R_GS;
6283         else
6284             idx = R_FS;
6285         val = env->segs[idx].base;
6286         if (put_user(val, addr, abi_ulong))
6287             ret = -TARGET_EFAULT;
6288         break;
6289     default:
6290         ret = -TARGET_EINVAL;
6291         break;
6292     }
6293     return ret;
6294 }
6295 #endif /* defined(TARGET_ABI32 */
6296 
6297 #endif /* defined(TARGET_I386) */
6298 
6299 #define NEW_STACK_SIZE 0x40000
6300 
6301 
6302 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6303 typedef struct {
6304     CPUArchState *env;
6305     pthread_mutex_t mutex;
6306     pthread_cond_t cond;
6307     pthread_t thread;
6308     uint32_t tid;
6309     abi_ulong child_tidptr;
6310     abi_ulong parent_tidptr;
6311     sigset_t sigmask;
6312 } new_thread_info;
6313 
clone_func(void * arg)6314 static void *clone_func(void *arg)
6315 {
6316     new_thread_info *info = arg;
6317     CPUArchState *env;
6318     CPUState *cpu;
6319     TaskState *ts;
6320 
6321     rcu_register_thread();
6322     tcg_register_thread();
6323     env = info->env;
6324     cpu = env_cpu(env);
6325     thread_cpu = cpu;
6326     ts = (TaskState *)cpu->opaque;
6327     info->tid = sys_gettid();
6328     task_settid(ts);
6329     if (info->child_tidptr)
6330         put_user_u32(info->tid, info->child_tidptr);
6331     if (info->parent_tidptr)
6332         put_user_u32(info->tid, info->parent_tidptr);
6333     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6334     /* Enable signals.  */
6335     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6336     /* Signal to the parent that we're ready.  */
6337     pthread_mutex_lock(&info->mutex);
6338     pthread_cond_broadcast(&info->cond);
6339     pthread_mutex_unlock(&info->mutex);
6340     /* Wait until the parent has finished initializing the tls state.  */
6341     pthread_mutex_lock(&clone_lock);
6342     pthread_mutex_unlock(&clone_lock);
6343     cpu_loop(env);
6344     /* never exits */
6345     return NULL;
6346 }
6347 
6348 /* do_fork() Must return host values and target errnos (unlike most
6349    do_*() functions). */
do_fork(CPUArchState * env,unsigned int flags,abi_ulong newsp,abi_ulong parent_tidptr,target_ulong newtls,abi_ulong child_tidptr)6350 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6351                    abi_ulong parent_tidptr, target_ulong newtls,
6352                    abi_ulong child_tidptr)
6353 {
6354     CPUState *cpu = env_cpu(env);
6355     int ret;
6356     TaskState *ts;
6357     CPUState *new_cpu;
6358     CPUArchState *new_env;
6359     sigset_t sigmask;
6360 
6361     flags &= ~CLONE_IGNORED_FLAGS;
6362 
6363     /* Emulate vfork() with fork() */
6364     if (flags & CLONE_VFORK)
6365         flags &= ~(CLONE_VFORK | CLONE_VM);
6366 
6367     if (flags & CLONE_VM) {
6368         TaskState *parent_ts = (TaskState *)cpu->opaque;
6369         new_thread_info info;
6370         pthread_attr_t attr;
6371 
6372         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6373             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6374             return -TARGET_EINVAL;
6375         }
6376 
6377         ts = g_new0(TaskState, 1);
6378         init_task_state(ts);
6379 
6380         /* Grab a mutex so that thread setup appears atomic.  */
6381         pthread_mutex_lock(&clone_lock);
6382 
6383         /*
6384          * If this is our first additional thread, we need to ensure we
6385          * generate code for parallel execution and flush old translations.
6386          * Do this now so that the copy gets CF_PARALLEL too.
6387          */
6388         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6389             cpu->tcg_cflags |= CF_PARALLEL;
6390             tb_flush(cpu);
6391         }
6392 
6393         /* we create a new CPU instance. */
6394         new_env = cpu_copy(env);
6395         /* Init regs that differ from the parent.  */
6396         cpu_clone_regs_child(new_env, newsp, flags);
6397         cpu_clone_regs_parent(env, flags);
6398         new_cpu = env_cpu(new_env);
6399         new_cpu->opaque = ts;
6400         ts->bprm = parent_ts->bprm;
6401         ts->info = parent_ts->info;
6402         ts->signal_mask = parent_ts->signal_mask;
6403 
6404         if (flags & CLONE_CHILD_CLEARTID) {
6405             ts->child_tidptr = child_tidptr;
6406         }
6407 
6408         if (flags & CLONE_SETTLS) {
6409             cpu_set_tls (new_env, newtls);
6410         }
6411 
6412         memset(&info, 0, sizeof(info));
6413         pthread_mutex_init(&info.mutex, NULL);
6414         pthread_mutex_lock(&info.mutex);
6415         pthread_cond_init(&info.cond, NULL);
6416         info.env = new_env;
6417         if (flags & CLONE_CHILD_SETTID) {
6418             info.child_tidptr = child_tidptr;
6419         }
6420         if (flags & CLONE_PARENT_SETTID) {
6421             info.parent_tidptr = parent_tidptr;
6422         }
6423 
6424         ret = pthread_attr_init(&attr);
6425         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6426         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6427         /* It is not safe to deliver signals until the child has finished
6428            initializing, so temporarily block all signals.  */
6429         sigfillset(&sigmask);
6430         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6431         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6432 
6433         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6434         /* TODO: Free new CPU state if thread creation failed.  */
6435 
6436         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6437         pthread_attr_destroy(&attr);
6438         if (ret == 0) {
6439             /* Wait for the child to initialize.  */
6440             pthread_cond_wait(&info.cond, &info.mutex);
6441             ret = info.tid;
6442         } else {
6443             ret = -1;
6444         }
6445         pthread_mutex_unlock(&info.mutex);
6446         pthread_cond_destroy(&info.cond);
6447         pthread_mutex_destroy(&info.mutex);
6448         pthread_mutex_unlock(&clone_lock);
6449     } else {
6450         /* if no CLONE_VM, we consider it is a fork */
6451         if (flags & CLONE_INVALID_FORK_FLAGS) {
6452             return -TARGET_EINVAL;
6453         }
6454 
6455         /* We can't support custom termination signals */
6456         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6457             return -TARGET_EINVAL;
6458         }
6459 
6460         if (block_signals()) {
6461             return -TARGET_ERESTARTSYS;
6462         }
6463 
6464         fork_start();
6465         ret = fork();
6466         if (ret == 0) {
6467             /* Child Process.  */
6468             cpu_clone_regs_child(env, newsp, flags);
6469             fork_end(1);
6470             /* There is a race condition here.  The parent process could
6471                theoretically read the TID in the child process before the child
6472                tid is set.  This would require using either ptrace
6473                (not implemented) or having *_tidptr to point at a shared memory
6474                mapping.  We can't repeat the spinlock hack used above because
6475                the child process gets its own copy of the lock.  */
6476             if (flags & CLONE_CHILD_SETTID)
6477                 put_user_u32(sys_gettid(), child_tidptr);
6478             if (flags & CLONE_PARENT_SETTID)
6479                 put_user_u32(sys_gettid(), parent_tidptr);
6480             ts = (TaskState *)cpu->opaque;
6481             if (flags & CLONE_SETTLS)
6482                 cpu_set_tls (env, newtls);
6483             if (flags & CLONE_CHILD_CLEARTID)
6484                 ts->child_tidptr = child_tidptr;
6485         } else {
6486             cpu_clone_regs_parent(env, flags);
6487             fork_end(0);
6488         }
6489     }
6490     return ret;
6491 }
6492 
6493 /* warning : doesn't handle linux specific flags... */
target_to_host_fcntl_cmd(int cmd)6494 static int target_to_host_fcntl_cmd(int cmd)
6495 {
6496     int ret;
6497 
6498     switch(cmd) {
6499     case TARGET_F_DUPFD:
6500     case TARGET_F_GETFD:
6501     case TARGET_F_SETFD:
6502     case TARGET_F_GETFL:
6503     case TARGET_F_SETFL:
6504     case TARGET_F_OFD_GETLK:
6505     case TARGET_F_OFD_SETLK:
6506     case TARGET_F_OFD_SETLKW:
6507         ret = cmd;
6508         break;
6509     case TARGET_F_GETLK:
6510         ret = F_GETLK64;
6511         break;
6512     case TARGET_F_SETLK:
6513         ret = F_SETLK64;
6514         break;
6515     case TARGET_F_SETLKW:
6516         ret = F_SETLKW64;
6517         break;
6518     case TARGET_F_GETOWN:
6519         ret = F_GETOWN;
6520         break;
6521     case TARGET_F_SETOWN:
6522         ret = F_SETOWN;
6523         break;
6524     case TARGET_F_GETSIG:
6525         ret = F_GETSIG;
6526         break;
6527     case TARGET_F_SETSIG:
6528         ret = F_SETSIG;
6529         break;
6530 #if TARGET_ABI_BITS == 32
6531     case TARGET_F_GETLK64:
6532         ret = F_GETLK64;
6533         break;
6534     case TARGET_F_SETLK64:
6535         ret = F_SETLK64;
6536         break;
6537     case TARGET_F_SETLKW64:
6538         ret = F_SETLKW64;
6539         break;
6540 #endif
6541     case TARGET_F_SETLEASE:
6542         ret = F_SETLEASE;
6543         break;
6544     case TARGET_F_GETLEASE:
6545         ret = F_GETLEASE;
6546         break;
6547 #ifdef F_DUPFD_CLOEXEC
6548     case TARGET_F_DUPFD_CLOEXEC:
6549         ret = F_DUPFD_CLOEXEC;
6550         break;
6551 #endif
6552     case TARGET_F_NOTIFY:
6553         ret = F_NOTIFY;
6554         break;
6555 #ifdef F_GETOWN_EX
6556     case TARGET_F_GETOWN_EX:
6557         ret = F_GETOWN_EX;
6558         break;
6559 #endif
6560 #ifdef F_SETOWN_EX
6561     case TARGET_F_SETOWN_EX:
6562         ret = F_SETOWN_EX;
6563         break;
6564 #endif
6565 #ifdef F_SETPIPE_SZ
6566     case TARGET_F_SETPIPE_SZ:
6567         ret = F_SETPIPE_SZ;
6568         break;
6569     case TARGET_F_GETPIPE_SZ:
6570         ret = F_GETPIPE_SZ;
6571         break;
6572 #endif
6573 #ifdef F_ADD_SEALS
6574     case TARGET_F_ADD_SEALS:
6575         ret = F_ADD_SEALS;
6576         break;
6577     case TARGET_F_GET_SEALS:
6578         ret = F_GET_SEALS;
6579         break;
6580 #endif
6581     default:
6582         ret = -TARGET_EINVAL;
6583         break;
6584     }
6585 
6586 #if defined(__powerpc64__)
6587     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6588      * is not supported by kernel. The glibc fcntl call actually adjusts
6589      * them to 5, 6 and 7 before making the syscall(). Since we make the
6590      * syscall directly, adjust to what is supported by the kernel.
6591      */
6592     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6593         ret -= F_GETLK64 - 5;
6594     }
6595 #endif
6596 
6597     return ret;
6598 }
6599 
6600 #define FLOCK_TRANSTBL \
6601     switch (type) { \
6602     TRANSTBL_CONVERT(F_RDLCK); \
6603     TRANSTBL_CONVERT(F_WRLCK); \
6604     TRANSTBL_CONVERT(F_UNLCK); \
6605     }
6606 
target_to_host_flock(int type)6607 static int target_to_host_flock(int type)
6608 {
6609 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6610     FLOCK_TRANSTBL
6611 #undef  TRANSTBL_CONVERT
6612     return -TARGET_EINVAL;
6613 }
6614 
host_to_target_flock(int type)6615 static int host_to_target_flock(int type)
6616 {
6617 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6618     FLOCK_TRANSTBL
6619 #undef  TRANSTBL_CONVERT
6620     /* if we don't know how to convert the value coming
6621      * from the host we copy to the target field as-is
6622      */
6623     return type;
6624 }
6625 
copy_from_user_flock(struct flock64 * fl,abi_ulong target_flock_addr)6626 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6627                                             abi_ulong target_flock_addr)
6628 {
6629     struct target_flock *target_fl;
6630     int l_type;
6631 
6632     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6633         return -TARGET_EFAULT;
6634     }
6635 
6636     __get_user(l_type, &target_fl->l_type);
6637     l_type = target_to_host_flock(l_type);
6638     if (l_type < 0) {
6639         return l_type;
6640     }
6641     fl->l_type = l_type;
6642     __get_user(fl->l_whence, &target_fl->l_whence);
6643     __get_user(fl->l_start, &target_fl->l_start);
6644     __get_user(fl->l_len, &target_fl->l_len);
6645     __get_user(fl->l_pid, &target_fl->l_pid);
6646     unlock_user_struct(target_fl, target_flock_addr, 0);
6647     return 0;
6648 }
6649 
copy_to_user_flock(abi_ulong target_flock_addr,const struct flock64 * fl)6650 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6651                                           const struct flock64 *fl)
6652 {
6653     struct target_flock *target_fl;
6654     short l_type;
6655 
6656     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6657         return -TARGET_EFAULT;
6658     }
6659 
6660     l_type = host_to_target_flock(fl->l_type);
6661     __put_user(l_type, &target_fl->l_type);
6662     __put_user(fl->l_whence, &target_fl->l_whence);
6663     __put_user(fl->l_start, &target_fl->l_start);
6664     __put_user(fl->l_len, &target_fl->l_len);
6665     __put_user(fl->l_pid, &target_fl->l_pid);
6666     unlock_user_struct(target_fl, target_flock_addr, 1);
6667     return 0;
6668 }
6669 
6670 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6671 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6672 
6673 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
copy_from_user_oabi_flock64(struct flock64 * fl,abi_ulong target_flock_addr)6674 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6675                                                    abi_ulong target_flock_addr)
6676 {
6677     struct target_oabi_flock64 *target_fl;
6678     int l_type;
6679 
6680     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6681         return -TARGET_EFAULT;
6682     }
6683 
6684     __get_user(l_type, &target_fl->l_type);
6685     l_type = target_to_host_flock(l_type);
6686     if (l_type < 0) {
6687         return l_type;
6688     }
6689     fl->l_type = l_type;
6690     __get_user(fl->l_whence, &target_fl->l_whence);
6691     __get_user(fl->l_start, &target_fl->l_start);
6692     __get_user(fl->l_len, &target_fl->l_len);
6693     __get_user(fl->l_pid, &target_fl->l_pid);
6694     unlock_user_struct(target_fl, target_flock_addr, 0);
6695     return 0;
6696 }
6697 
copy_to_user_oabi_flock64(abi_ulong target_flock_addr,const struct flock64 * fl)6698 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6699                                                  const struct flock64 *fl)
6700 {
6701     struct target_oabi_flock64 *target_fl;
6702     short l_type;
6703 
6704     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6705         return -TARGET_EFAULT;
6706     }
6707 
6708     l_type = host_to_target_flock(fl->l_type);
6709     __put_user(l_type, &target_fl->l_type);
6710     __put_user(fl->l_whence, &target_fl->l_whence);
6711     __put_user(fl->l_start, &target_fl->l_start);
6712     __put_user(fl->l_len, &target_fl->l_len);
6713     __put_user(fl->l_pid, &target_fl->l_pid);
6714     unlock_user_struct(target_fl, target_flock_addr, 1);
6715     return 0;
6716 }
6717 #endif
6718 
copy_from_user_flock64(struct flock64 * fl,abi_ulong target_flock_addr)6719 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6720                                               abi_ulong target_flock_addr)
6721 {
6722     struct target_flock64 *target_fl;
6723     int l_type;
6724 
6725     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6726         return -TARGET_EFAULT;
6727     }
6728 
6729     __get_user(l_type, &target_fl->l_type);
6730     l_type = target_to_host_flock(l_type);
6731     if (l_type < 0) {
6732         return l_type;
6733     }
6734     fl->l_type = l_type;
6735     __get_user(fl->l_whence, &target_fl->l_whence);
6736     __get_user(fl->l_start, &target_fl->l_start);
6737     __get_user(fl->l_len, &target_fl->l_len);
6738     __get_user(fl->l_pid, &target_fl->l_pid);
6739     unlock_user_struct(target_fl, target_flock_addr, 0);
6740     return 0;
6741 }
6742 
copy_to_user_flock64(abi_ulong target_flock_addr,const struct flock64 * fl)6743 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6744                                             const struct flock64 *fl)
6745 {
6746     struct target_flock64 *target_fl;
6747     short l_type;
6748 
6749     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6750         return -TARGET_EFAULT;
6751     }
6752 
6753     l_type = host_to_target_flock(fl->l_type);
6754     __put_user(l_type, &target_fl->l_type);
6755     __put_user(fl->l_whence, &target_fl->l_whence);
6756     __put_user(fl->l_start, &target_fl->l_start);
6757     __put_user(fl->l_len, &target_fl->l_len);
6758     __put_user(fl->l_pid, &target_fl->l_pid);
6759     unlock_user_struct(target_fl, target_flock_addr, 1);
6760     return 0;
6761 }
6762 
do_fcntl(int fd,int cmd,abi_ulong arg)6763 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6764 {
6765     struct flock64 fl64;
6766 #ifdef F_GETOWN_EX
6767     struct f_owner_ex fox;
6768     struct target_f_owner_ex *target_fox;
6769 #endif
6770     abi_long ret;
6771     int host_cmd = target_to_host_fcntl_cmd(cmd);
6772 
6773     if (host_cmd == -TARGET_EINVAL)
6774 	    return host_cmd;
6775 
6776     switch(cmd) {
6777     case TARGET_F_GETLK:
6778         ret = copy_from_user_flock(&fl64, arg);
6779         if (ret) {
6780             return ret;
6781         }
6782         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6783         if (ret == 0) {
6784             ret = copy_to_user_flock(arg, &fl64);
6785         }
6786         break;
6787 
6788     case TARGET_F_SETLK:
6789     case TARGET_F_SETLKW:
6790         ret = copy_from_user_flock(&fl64, arg);
6791         if (ret) {
6792             return ret;
6793         }
6794         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6795         break;
6796 
6797     case TARGET_F_GETLK64:
6798     case TARGET_F_OFD_GETLK:
6799         ret = copy_from_user_flock64(&fl64, arg);
6800         if (ret) {
6801             return ret;
6802         }
6803         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6804         if (ret == 0) {
6805             ret = copy_to_user_flock64(arg, &fl64);
6806         }
6807         break;
6808     case TARGET_F_SETLK64:
6809     case TARGET_F_SETLKW64:
6810     case TARGET_F_OFD_SETLK:
6811     case TARGET_F_OFD_SETLKW:
6812         ret = copy_from_user_flock64(&fl64, arg);
6813         if (ret) {
6814             return ret;
6815         }
6816         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6817         break;
6818 
6819     case TARGET_F_GETFL:
6820         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6821         if (ret >= 0) {
6822             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6823         }
6824         break;
6825 
6826     case TARGET_F_SETFL:
6827         ret = get_errno(safe_fcntl(fd, host_cmd,
6828                                    target_to_host_bitmask(arg,
6829                                                           fcntl_flags_tbl)));
6830         break;
6831 
6832 #ifdef F_GETOWN_EX
6833     case TARGET_F_GETOWN_EX:
6834         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6835         if (ret >= 0) {
6836             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6837                 return -TARGET_EFAULT;
6838             target_fox->type = tswap32(fox.type);
6839             target_fox->pid = tswap32(fox.pid);
6840             unlock_user_struct(target_fox, arg, 1);
6841         }
6842         break;
6843 #endif
6844 
6845 #ifdef F_SETOWN_EX
6846     case TARGET_F_SETOWN_EX:
6847         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6848             return -TARGET_EFAULT;
6849         fox.type = tswap32(target_fox->type);
6850         fox.pid = tswap32(target_fox->pid);
6851         unlock_user_struct(target_fox, arg, 0);
6852         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6853         break;
6854 #endif
6855 
6856     case TARGET_F_SETSIG:
6857         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6858         break;
6859 
6860     case TARGET_F_GETSIG:
6861         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6862         break;
6863 
6864     case TARGET_F_SETOWN:
6865     case TARGET_F_GETOWN:
6866     case TARGET_F_SETLEASE:
6867     case TARGET_F_GETLEASE:
6868     case TARGET_F_SETPIPE_SZ:
6869     case TARGET_F_GETPIPE_SZ:
6870     case TARGET_F_ADD_SEALS:
6871     case TARGET_F_GET_SEALS:
6872         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6873         break;
6874 
6875     default:
6876         ret = get_errno(safe_fcntl(fd, cmd, arg));
6877         break;
6878     }
6879     return ret;
6880 }
6881 
6882 #ifdef USE_UID16
6883 
high2lowuid(int uid)6884 static inline int high2lowuid(int uid)
6885 {
6886     if (uid > 65535)
6887         return 65534;
6888     else
6889         return uid;
6890 }
6891 
high2lowgid(int gid)6892 static inline int high2lowgid(int gid)
6893 {
6894     if (gid > 65535)
6895         return 65534;
6896     else
6897         return gid;
6898 }
6899 
low2highuid(int uid)6900 static inline int low2highuid(int uid)
6901 {
6902     if ((int16_t)uid == -1)
6903         return -1;
6904     else
6905         return uid;
6906 }
6907 
low2highgid(int gid)6908 static inline int low2highgid(int gid)
6909 {
6910     if ((int16_t)gid == -1)
6911         return -1;
6912     else
6913         return gid;
6914 }
tswapid(int id)6915 static inline int tswapid(int id)
6916 {
6917     return tswap16(id);
6918 }
6919 
6920 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6921 
6922 #else /* !USE_UID16 */
high2lowuid(int uid)6923 static inline int high2lowuid(int uid)
6924 {
6925     return uid;
6926 }
high2lowgid(int gid)6927 static inline int high2lowgid(int gid)
6928 {
6929     return gid;
6930 }
low2highuid(int uid)6931 static inline int low2highuid(int uid)
6932 {
6933     return uid;
6934 }
low2highgid(int gid)6935 static inline int low2highgid(int gid)
6936 {
6937     return gid;
6938 }
tswapid(int id)6939 static inline int tswapid(int id)
6940 {
6941     return tswap32(id);
6942 }
6943 
6944 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6945 
6946 #endif /* USE_UID16 */
6947 
6948 /* We must do direct syscalls for setting UID/GID, because we want to
6949  * implement the Linux system call semantics of "change only for this thread",
6950  * not the libc/POSIX semantics of "change for all threads in process".
6951  * (See http://ewontfix.com/17/ for more details.)
6952  * We use the 32-bit version of the syscalls if present; if it is not
6953  * then either the host architecture supports 32-bit UIDs natively with
6954  * the standard syscall, or the 16-bit UID is the best we can do.
6955  */
6956 #ifdef __NR_setuid32
6957 #define __NR_sys_setuid __NR_setuid32
6958 #else
6959 #define __NR_sys_setuid __NR_setuid
6960 #endif
6961 #ifdef __NR_setgid32
6962 #define __NR_sys_setgid __NR_setgid32
6963 #else
6964 #define __NR_sys_setgid __NR_setgid
6965 #endif
6966 #ifdef __NR_setresuid32
6967 #define __NR_sys_setresuid __NR_setresuid32
6968 #else
6969 #define __NR_sys_setresuid __NR_setresuid
6970 #endif
6971 #ifdef __NR_setresgid32
6972 #define __NR_sys_setresgid __NR_setresgid32
6973 #else
6974 #define __NR_sys_setresgid __NR_setresgid
6975 #endif
6976 
_syscall1(int,sys_setuid,uid_t,uid)6977 _syscall1(int, sys_setuid, uid_t, uid)
6978 _syscall1(int, sys_setgid, gid_t, gid)
6979 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6980 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6981 
6982 void syscall_init(void)
6983 {
6984     IOCTLEntry *ie;
6985     const argtype *arg_type;
6986     int size;
6987 
6988     thunk_init(STRUCT_MAX);
6989 
6990 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6991 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6992 #include "syscall_types.h"
6993 #undef STRUCT
6994 #undef STRUCT_SPECIAL
6995 
6996     /* we patch the ioctl size if necessary. We rely on the fact that
6997        no ioctl has all the bits at '1' in the size field */
6998     ie = ioctl_entries;
6999     while (ie->target_cmd != 0) {
7000         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7001             TARGET_IOC_SIZEMASK) {
7002             arg_type = ie->arg_type;
7003             if (arg_type[0] != TYPE_PTR) {
7004                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7005                         ie->target_cmd);
7006                 exit(1);
7007             }
7008             arg_type++;
7009             size = thunk_type_size(arg_type, 0);
7010             ie->target_cmd = (ie->target_cmd &
7011                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7012                 (size << TARGET_IOC_SIZESHIFT);
7013         }
7014 
7015         /* automatic consistency check if same arch */
7016 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7017     (defined(__x86_64__) && defined(TARGET_X86_64))
7018         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7019             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7020                     ie->name, ie->target_cmd, ie->host_cmd);
7021         }
7022 #endif
7023         ie++;
7024     }
7025 }
7026 
7027 #ifdef TARGET_NR_truncate64
target_truncate64(void * cpu_env,const char * arg1,abi_long arg2,abi_long arg3,abi_long arg4)7028 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7029                                          abi_long arg2,
7030                                          abi_long arg3,
7031                                          abi_long arg4)
7032 {
7033     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7034         arg2 = arg3;
7035         arg3 = arg4;
7036     }
7037     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7038 }
7039 #endif
7040 
7041 #ifdef TARGET_NR_ftruncate64
target_ftruncate64(void * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4)7042 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7043                                           abi_long arg2,
7044                                           abi_long arg3,
7045                                           abi_long arg4)
7046 {
7047     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7048         arg2 = arg3;
7049         arg3 = arg4;
7050     }
7051     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7052 }
7053 #endif
7054 
7055 #if defined(TARGET_NR_timer_settime) || \
7056     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec(struct itimerspec * host_its,abi_ulong target_addr)7057 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7058                                                  abi_ulong target_addr)
7059 {
7060     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7061                                 offsetof(struct target_itimerspec,
7062                                          it_interval)) ||
7063         target_to_host_timespec(&host_its->it_value, target_addr +
7064                                 offsetof(struct target_itimerspec,
7065                                          it_value))) {
7066         return -TARGET_EFAULT;
7067     }
7068 
7069     return 0;
7070 }
7071 #endif
7072 
7073 #if defined(TARGET_NR_timer_settime64) || \
7074     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec64(struct itimerspec * host_its,abi_ulong target_addr)7075 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7076                                                    abi_ulong target_addr)
7077 {
7078     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7079                                   offsetof(struct target__kernel_itimerspec,
7080                                            it_interval)) ||
7081         target_to_host_timespec64(&host_its->it_value, target_addr +
7082                                   offsetof(struct target__kernel_itimerspec,
7083                                            it_value))) {
7084         return -TARGET_EFAULT;
7085     }
7086 
7087     return 0;
7088 }
7089 #endif
7090 
7091 #if ((defined(TARGET_NR_timerfd_gettime) || \
7092       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7093       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
host_to_target_itimerspec(abi_ulong target_addr,struct itimerspec * host_its)7094 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7095                                                  struct itimerspec *host_its)
7096 {
7097     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7098                                                        it_interval),
7099                                 &host_its->it_interval) ||
7100         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7101                                                        it_value),
7102                                 &host_its->it_value)) {
7103         return -TARGET_EFAULT;
7104     }
7105     return 0;
7106 }
7107 #endif
7108 
7109 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7110       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7111       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
host_to_target_itimerspec64(abi_ulong target_addr,struct itimerspec * host_its)7112 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7113                                                    struct itimerspec *host_its)
7114 {
7115     if (host_to_target_timespec64(target_addr +
7116                                   offsetof(struct target__kernel_itimerspec,
7117                                            it_interval),
7118                                   &host_its->it_interval) ||
7119         host_to_target_timespec64(target_addr +
7120                                   offsetof(struct target__kernel_itimerspec,
7121                                            it_value),
7122                                   &host_its->it_value)) {
7123         return -TARGET_EFAULT;
7124     }
7125     return 0;
7126 }
7127 #endif
7128 
7129 #if defined(TARGET_NR_adjtimex) || \
7130     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
target_to_host_timex(struct timex * host_tx,abi_long target_addr)7131 static inline abi_long target_to_host_timex(struct timex *host_tx,
7132                                             abi_long target_addr)
7133 {
7134     struct target_timex *target_tx;
7135 
7136     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7137         return -TARGET_EFAULT;
7138     }
7139 
7140     __get_user(host_tx->modes, &target_tx->modes);
7141     __get_user(host_tx->offset, &target_tx->offset);
7142     __get_user(host_tx->freq, &target_tx->freq);
7143     __get_user(host_tx->maxerror, &target_tx->maxerror);
7144     __get_user(host_tx->esterror, &target_tx->esterror);
7145     __get_user(host_tx->status, &target_tx->status);
7146     __get_user(host_tx->constant, &target_tx->constant);
7147     __get_user(host_tx->precision, &target_tx->precision);
7148     __get_user(host_tx->tolerance, &target_tx->tolerance);
7149     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7150     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7151     __get_user(host_tx->tick, &target_tx->tick);
7152     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7153     __get_user(host_tx->jitter, &target_tx->jitter);
7154     __get_user(host_tx->shift, &target_tx->shift);
7155     __get_user(host_tx->stabil, &target_tx->stabil);
7156     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7157     __get_user(host_tx->calcnt, &target_tx->calcnt);
7158     __get_user(host_tx->errcnt, &target_tx->errcnt);
7159     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7160     __get_user(host_tx->tai, &target_tx->tai);
7161 
7162     unlock_user_struct(target_tx, target_addr, 0);
7163     return 0;
7164 }
7165 
host_to_target_timex(abi_long target_addr,struct timex * host_tx)7166 static inline abi_long host_to_target_timex(abi_long target_addr,
7167                                             struct timex *host_tx)
7168 {
7169     struct target_timex *target_tx;
7170 
7171     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7172         return -TARGET_EFAULT;
7173     }
7174 
7175     __put_user(host_tx->modes, &target_tx->modes);
7176     __put_user(host_tx->offset, &target_tx->offset);
7177     __put_user(host_tx->freq, &target_tx->freq);
7178     __put_user(host_tx->maxerror, &target_tx->maxerror);
7179     __put_user(host_tx->esterror, &target_tx->esterror);
7180     __put_user(host_tx->status, &target_tx->status);
7181     __put_user(host_tx->constant, &target_tx->constant);
7182     __put_user(host_tx->precision, &target_tx->precision);
7183     __put_user(host_tx->tolerance, &target_tx->tolerance);
7184     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7185     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7186     __put_user(host_tx->tick, &target_tx->tick);
7187     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7188     __put_user(host_tx->jitter, &target_tx->jitter);
7189     __put_user(host_tx->shift, &target_tx->shift);
7190     __put_user(host_tx->stabil, &target_tx->stabil);
7191     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7192     __put_user(host_tx->calcnt, &target_tx->calcnt);
7193     __put_user(host_tx->errcnt, &target_tx->errcnt);
7194     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7195     __put_user(host_tx->tai, &target_tx->tai);
7196 
7197     unlock_user_struct(target_tx, target_addr, 1);
7198     return 0;
7199 }
7200 #endif
7201 
7202 
7203 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
target_to_host_timex64(struct timex * host_tx,abi_long target_addr)7204 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7205                                               abi_long target_addr)
7206 {
7207     struct target__kernel_timex *target_tx;
7208 
7209     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7210                                  offsetof(struct target__kernel_timex,
7211                                           time))) {
7212         return -TARGET_EFAULT;
7213     }
7214 
7215     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7216         return -TARGET_EFAULT;
7217     }
7218 
7219     __get_user(host_tx->modes, &target_tx->modes);
7220     __get_user(host_tx->offset, &target_tx->offset);
7221     __get_user(host_tx->freq, &target_tx->freq);
7222     __get_user(host_tx->maxerror, &target_tx->maxerror);
7223     __get_user(host_tx->esterror, &target_tx->esterror);
7224     __get_user(host_tx->status, &target_tx->status);
7225     __get_user(host_tx->constant, &target_tx->constant);
7226     __get_user(host_tx->precision, &target_tx->precision);
7227     __get_user(host_tx->tolerance, &target_tx->tolerance);
7228     __get_user(host_tx->tick, &target_tx->tick);
7229     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7230     __get_user(host_tx->jitter, &target_tx->jitter);
7231     __get_user(host_tx->shift, &target_tx->shift);
7232     __get_user(host_tx->stabil, &target_tx->stabil);
7233     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7234     __get_user(host_tx->calcnt, &target_tx->calcnt);
7235     __get_user(host_tx->errcnt, &target_tx->errcnt);
7236     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7237     __get_user(host_tx->tai, &target_tx->tai);
7238 
7239     unlock_user_struct(target_tx, target_addr, 0);
7240     return 0;
7241 }
7242 
host_to_target_timex64(abi_long target_addr,struct timex * host_tx)7243 static inline abi_long host_to_target_timex64(abi_long target_addr,
7244                                               struct timex *host_tx)
7245 {
7246     struct target__kernel_timex *target_tx;
7247 
7248    if (copy_to_user_timeval64(target_addr +
7249                               offsetof(struct target__kernel_timex, time),
7250                               &host_tx->time)) {
7251         return -TARGET_EFAULT;
7252     }
7253 
7254     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7255         return -TARGET_EFAULT;
7256     }
7257 
7258     __put_user(host_tx->modes, &target_tx->modes);
7259     __put_user(host_tx->offset, &target_tx->offset);
7260     __put_user(host_tx->freq, &target_tx->freq);
7261     __put_user(host_tx->maxerror, &target_tx->maxerror);
7262     __put_user(host_tx->esterror, &target_tx->esterror);
7263     __put_user(host_tx->status, &target_tx->status);
7264     __put_user(host_tx->constant, &target_tx->constant);
7265     __put_user(host_tx->precision, &target_tx->precision);
7266     __put_user(host_tx->tolerance, &target_tx->tolerance);
7267     __put_user(host_tx->tick, &target_tx->tick);
7268     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7269     __put_user(host_tx->jitter, &target_tx->jitter);
7270     __put_user(host_tx->shift, &target_tx->shift);
7271     __put_user(host_tx->stabil, &target_tx->stabil);
7272     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7273     __put_user(host_tx->calcnt, &target_tx->calcnt);
7274     __put_user(host_tx->errcnt, &target_tx->errcnt);
7275     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7276     __put_user(host_tx->tai, &target_tx->tai);
7277 
7278     unlock_user_struct(target_tx, target_addr, 1);
7279     return 0;
7280 }
7281 #endif
7282 
7283 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7284 #define sigev_notify_thread_id _sigev_un._tid
7285 #endif
7286 
target_to_host_sigevent(struct sigevent * host_sevp,abi_ulong target_addr)7287 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7288                                                abi_ulong target_addr)
7289 {
7290     struct target_sigevent *target_sevp;
7291 
7292     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7293         return -TARGET_EFAULT;
7294     }
7295 
7296     /* This union is awkward on 64 bit systems because it has a 32 bit
7297      * integer and a pointer in it; we follow the conversion approach
7298      * used for handling sigval types in signal.c so the guest should get
7299      * the correct value back even if we did a 64 bit byteswap and it's
7300      * using the 32 bit integer.
7301      */
7302     host_sevp->sigev_value.sival_ptr =
7303         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7304     host_sevp->sigev_signo =
7305         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7306     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7307     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7308 
7309     unlock_user_struct(target_sevp, target_addr, 1);
7310     return 0;
7311 }
7312 
7313 #if defined(TARGET_NR_mlockall)
target_to_host_mlockall_arg(int arg)7314 static inline int target_to_host_mlockall_arg(int arg)
7315 {
7316     int result = 0;
7317 
7318     if (arg & TARGET_MCL_CURRENT) {
7319         result |= MCL_CURRENT;
7320     }
7321     if (arg & TARGET_MCL_FUTURE) {
7322         result |= MCL_FUTURE;
7323     }
7324 #ifdef MCL_ONFAULT
7325     if (arg & TARGET_MCL_ONFAULT) {
7326         result |= MCL_ONFAULT;
7327     }
7328 #endif
7329 
7330     return result;
7331 }
7332 #endif
7333 
7334 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7335      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7336      defined(TARGET_NR_newfstatat))
host_to_target_stat64(void * cpu_env,abi_ulong target_addr,struct stat * host_st)7337 static inline abi_long host_to_target_stat64(void *cpu_env,
7338                                              abi_ulong target_addr,
7339                                              struct stat *host_st)
7340 {
7341 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7342     if (((CPUARMState *)cpu_env)->eabi) {
7343         struct target_eabi_stat64 *target_st;
7344 
7345         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7346             return -TARGET_EFAULT;
7347         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7348         __put_user(host_st->st_dev, &target_st->st_dev);
7349         __put_user(host_st->st_ino, &target_st->st_ino);
7350 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7351         __put_user(host_st->st_ino, &target_st->__st_ino);
7352 #endif
7353         __put_user(host_st->st_mode, &target_st->st_mode);
7354         __put_user(host_st->st_nlink, &target_st->st_nlink);
7355         __put_user(host_st->st_uid, &target_st->st_uid);
7356         __put_user(host_st->st_gid, &target_st->st_gid);
7357         __put_user(host_st->st_rdev, &target_st->st_rdev);
7358         __put_user(host_st->st_size, &target_st->st_size);
7359         __put_user(host_st->st_blksize, &target_st->st_blksize);
7360         __put_user(host_st->st_blocks, &target_st->st_blocks);
7361         __put_user(host_st->st_atime, &target_st->target_st_atime);
7362         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7363         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7364 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7365         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7366         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7367         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7368 #endif
7369         unlock_user_struct(target_st, target_addr, 1);
7370     } else
7371 #endif
7372     {
7373 #if defined(TARGET_HAS_STRUCT_STAT64)
7374         struct target_stat64 *target_st;
7375 #else
7376         struct target_stat *target_st;
7377 #endif
7378 
7379         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7380             return -TARGET_EFAULT;
7381         memset(target_st, 0, sizeof(*target_st));
7382         __put_user(host_st->st_dev, &target_st->st_dev);
7383         __put_user(host_st->st_ino, &target_st->st_ino);
7384 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7385         __put_user(host_st->st_ino, &target_st->__st_ino);
7386 #endif
7387         __put_user(host_st->st_mode, &target_st->st_mode);
7388         __put_user(host_st->st_nlink, &target_st->st_nlink);
7389         __put_user(host_st->st_uid, &target_st->st_uid);
7390         __put_user(host_st->st_gid, &target_st->st_gid);
7391         __put_user(host_st->st_rdev, &target_st->st_rdev);
7392         /* XXX: better use of kernel struct */
7393         __put_user(host_st->st_size, &target_st->st_size);
7394         __put_user(host_st->st_blksize, &target_st->st_blksize);
7395         __put_user(host_st->st_blocks, &target_st->st_blocks);
7396         __put_user(host_st->st_atime, &target_st->target_st_atime);
7397         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7398         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7399 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7400         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7401         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7402         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7403 #endif
7404         unlock_user_struct(target_st, target_addr, 1);
7405     }
7406 
7407     return 0;
7408 }
7409 #endif
7410 
7411 #if defined(TARGET_NR_statx) && defined(__NR_statx)
host_to_target_statx(struct target_statx * host_stx,abi_ulong target_addr)7412 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7413                                             abi_ulong target_addr)
7414 {
7415     struct target_statx *target_stx;
7416 
7417     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7418         return -TARGET_EFAULT;
7419     }
7420     memset(target_stx, 0, sizeof(*target_stx));
7421 
7422     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7423     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7424     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7425     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7426     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7427     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7428     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7429     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7430     __put_user(host_stx->stx_size, &target_stx->stx_size);
7431     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7432     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7433     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7434     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7435     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7436     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7437     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7438     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7439     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7440     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7441     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7442     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7443     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7444     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7445 
7446     unlock_user_struct(target_stx, target_addr, 1);
7447 
7448     return 0;
7449 }
7450 #endif
7451 
do_sys_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7452 static int do_sys_futex(int *uaddr, int op, int val,
7453                          const struct timespec *timeout, int *uaddr2,
7454                          int val3)
7455 {
7456 #if HOST_LONG_BITS == 64
7457 #if defined(__NR_futex)
7458     /* always a 64-bit time_t, it doesn't define _time64 version  */
7459     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7460 
7461 #endif
7462 #else /* HOST_LONG_BITS == 64 */
7463 #if defined(__NR_futex_time64)
7464     if (sizeof(timeout->tv_sec) == 8) {
7465         /* _time64 function on 32bit arch */
7466         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7467     }
7468 #endif
7469 #if defined(__NR_futex)
7470     /* old function on 32bit arch */
7471     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7472 #endif
7473 #endif /* HOST_LONG_BITS == 64 */
7474     g_assert_not_reached();
7475 }
7476 
do_safe_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7477 static int do_safe_futex(int *uaddr, int op, int val,
7478                          const struct timespec *timeout, int *uaddr2,
7479                          int val3)
7480 {
7481 #if HOST_LONG_BITS == 64
7482 #if defined(__NR_futex)
7483     /* always a 64-bit time_t, it doesn't define _time64 version  */
7484     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7485 #endif
7486 #else /* HOST_LONG_BITS == 64 */
7487 #if defined(__NR_futex_time64)
7488     if (sizeof(timeout->tv_sec) == 8) {
7489         /* _time64 function on 32bit arch */
7490         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7491                                            val3));
7492     }
7493 #endif
7494 #if defined(__NR_futex)
7495     /* old function on 32bit arch */
7496     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7497 #endif
7498 #endif /* HOST_LONG_BITS == 64 */
7499     return -TARGET_ENOSYS;
7500 }
7501 
7502 /* ??? Using host futex calls even when target atomic operations
7503    are not really atomic probably breaks things.  However implementing
7504    futexes locally would make futexes shared between multiple processes
7505    tricky.  However they're probably useless because guest atomic
7506    operations won't work either.  */
7507 #if defined(TARGET_NR_futex)
do_futex(CPUState * cpu,target_ulong uaddr,int op,int val,target_ulong timeout,target_ulong uaddr2,int val3)7508 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7509                     target_ulong timeout, target_ulong uaddr2, int val3)
7510 {
7511     struct timespec ts, *pts;
7512     int base_op;
7513 
7514     /* ??? We assume FUTEX_* constants are the same on both host
7515        and target.  */
7516 #ifdef FUTEX_CMD_MASK
7517     base_op = op & FUTEX_CMD_MASK;
7518 #else
7519     base_op = op;
7520 #endif
7521     switch (base_op) {
7522     case FUTEX_WAIT:
7523     case FUTEX_WAIT_BITSET:
7524         if (timeout) {
7525             pts = &ts;
7526             target_to_host_timespec(pts, timeout);
7527         } else {
7528             pts = NULL;
7529         }
7530         return do_safe_futex(g2h(cpu, uaddr),
7531                              op, tswap32(val), pts, NULL, val3);
7532     case FUTEX_WAKE:
7533         return do_safe_futex(g2h(cpu, uaddr),
7534                              op, val, NULL, NULL, 0);
7535     case FUTEX_FD:
7536         return do_safe_futex(g2h(cpu, uaddr),
7537                              op, val, NULL, NULL, 0);
7538     case FUTEX_REQUEUE:
7539     case FUTEX_CMP_REQUEUE:
7540     case FUTEX_WAKE_OP:
7541         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7542            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7543            But the prototype takes a `struct timespec *'; insert casts
7544            to satisfy the compiler.  We do not need to tswap TIMEOUT
7545            since it's not compared to guest memory.  */
7546         pts = (struct timespec *)(uintptr_t) timeout;
7547         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7548                              (base_op == FUTEX_CMP_REQUEUE
7549                               ? tswap32(val3) : val3));
7550     default:
7551         return -TARGET_ENOSYS;
7552     }
7553 }
7554 #endif
7555 
7556 #if defined(TARGET_NR_futex_time64)
do_futex_time64(CPUState * cpu,target_ulong uaddr,int op,int val,target_ulong timeout,target_ulong uaddr2,int val3)7557 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7558                            int val, target_ulong timeout,
7559                            target_ulong uaddr2, int val3)
7560 {
7561     struct timespec ts, *pts;
7562     int base_op;
7563 
7564     /* ??? We assume FUTEX_* constants are the same on both host
7565        and target.  */
7566 #ifdef FUTEX_CMD_MASK
7567     base_op = op & FUTEX_CMD_MASK;
7568 #else
7569     base_op = op;
7570 #endif
7571     switch (base_op) {
7572     case FUTEX_WAIT:
7573     case FUTEX_WAIT_BITSET:
7574         if (timeout) {
7575             pts = &ts;
7576             if (target_to_host_timespec64(pts, timeout)) {
7577                 return -TARGET_EFAULT;
7578             }
7579         } else {
7580             pts = NULL;
7581         }
7582         return do_safe_futex(g2h(cpu, uaddr), op,
7583                              tswap32(val), pts, NULL, val3);
7584     case FUTEX_WAKE:
7585         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7586     case FUTEX_FD:
7587         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7588     case FUTEX_REQUEUE:
7589     case FUTEX_CMP_REQUEUE:
7590     case FUTEX_WAKE_OP:
7591         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7592            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7593            But the prototype takes a `struct timespec *'; insert casts
7594            to satisfy the compiler.  We do not need to tswap TIMEOUT
7595            since it's not compared to guest memory.  */
7596         pts = (struct timespec *)(uintptr_t) timeout;
7597         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7598                              (base_op == FUTEX_CMP_REQUEUE
7599                               ? tswap32(val3) : val3));
7600     default:
7601         return -TARGET_ENOSYS;
7602     }
7603 }
7604 #endif
7605 
7606 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_name_to_handle_at(abi_long dirfd,abi_long pathname,abi_long handle,abi_long mount_id,abi_long flags)7607 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7608                                      abi_long handle, abi_long mount_id,
7609                                      abi_long flags)
7610 {
7611     struct file_handle *target_fh;
7612     struct file_handle *fh;
7613     int mid = 0;
7614     abi_long ret;
7615     char *name;
7616     unsigned int size, total_size;
7617 
7618     if (get_user_s32(size, handle)) {
7619         return -TARGET_EFAULT;
7620     }
7621 
7622     name = lock_user_string(pathname);
7623     if (!name) {
7624         return -TARGET_EFAULT;
7625     }
7626 
7627     total_size = sizeof(struct file_handle) + size;
7628     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7629     if (!target_fh) {
7630         unlock_user(name, pathname, 0);
7631         return -TARGET_EFAULT;
7632     }
7633 
7634     fh = g_malloc0(total_size);
7635     fh->handle_bytes = size;
7636 
7637     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7638     unlock_user(name, pathname, 0);
7639 
7640     /* man name_to_handle_at(2):
7641      * Other than the use of the handle_bytes field, the caller should treat
7642      * the file_handle structure as an opaque data type
7643      */
7644 
7645     memcpy(target_fh, fh, total_size);
7646     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7647     target_fh->handle_type = tswap32(fh->handle_type);
7648     g_free(fh);
7649     unlock_user(target_fh, handle, total_size);
7650 
7651     if (put_user_s32(mid, mount_id)) {
7652         return -TARGET_EFAULT;
7653     }
7654 
7655     return ret;
7656 
7657 }
7658 #endif
7659 
7660 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_open_by_handle_at(abi_long mount_fd,abi_long handle,abi_long flags)7661 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7662                                      abi_long flags)
7663 {
7664     struct file_handle *target_fh;
7665     struct file_handle *fh;
7666     unsigned int size, total_size;
7667     abi_long ret;
7668 
7669     if (get_user_s32(size, handle)) {
7670         return -TARGET_EFAULT;
7671     }
7672 
7673     total_size = sizeof(struct file_handle) + size;
7674     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7675     if (!target_fh) {
7676         return -TARGET_EFAULT;
7677     }
7678 
7679     fh = g_memdup(target_fh, total_size);
7680     fh->handle_bytes = size;
7681     fh->handle_type = tswap32(target_fh->handle_type);
7682 
7683     ret = get_errno(open_by_handle_at(mount_fd, fh,
7684                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7685 
7686     g_free(fh);
7687 
7688     unlock_user(target_fh, handle, total_size);
7689 
7690     return ret;
7691 }
7692 #endif
7693 
7694 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7695 
do_signalfd4(int fd,abi_long mask,int flags)7696 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7697 {
7698     int host_flags;
7699     target_sigset_t *target_mask;
7700     sigset_t host_mask;
7701     abi_long ret;
7702 
7703     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7704         return -TARGET_EINVAL;
7705     }
7706     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7707         return -TARGET_EFAULT;
7708     }
7709 
7710     target_to_host_sigset(&host_mask, target_mask);
7711 
7712     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7713 
7714     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7715     if (ret >= 0) {
7716         fd_trans_register(ret, &target_signalfd_trans);
7717     }
7718 
7719     unlock_user_struct(target_mask, mask, 0);
7720 
7721     return ret;
7722 }
7723 #endif
7724 
7725 /* Map host to target signal numbers for the wait family of syscalls.
7726    Assume all other status bits are the same.  */
host_to_target_waitstatus(int status)7727 int host_to_target_waitstatus(int status)
7728 {
7729     if (WIFSIGNALED(status)) {
7730         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7731     }
7732     if (WIFSTOPPED(status)) {
7733         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7734                | (status & 0xff);
7735     }
7736     return status;
7737 }
7738 
open_self_cmdline(void * cpu_env,int fd)7739 static int open_self_cmdline(void *cpu_env, int fd)
7740 {
7741     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7742     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7743     int i;
7744 
7745     for (i = 0; i < bprm->argc; i++) {
7746         size_t len = strlen(bprm->argv[i]) + 1;
7747 
7748         if (write(fd, bprm->argv[i], len) != len) {
7749             return -1;
7750         }
7751     }
7752 
7753     return 0;
7754 }
7755 
open_self_maps(void * cpu_env,int fd)7756 static int open_self_maps(void *cpu_env, int fd)
7757 {
7758     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7759     TaskState *ts = cpu->opaque;
7760     GSList *map_info = read_self_maps();
7761     GSList *s;
7762     int count;
7763 
7764     for (s = map_info; s; s = g_slist_next(s)) {
7765         MapInfo *e = (MapInfo *) s->data;
7766 
7767         if (h2g_valid(e->start)) {
7768             unsigned long min = e->start;
7769             unsigned long max = e->end;
7770             int flags = page_get_flags(h2g(min));
7771             const char *path;
7772 
7773             max = h2g_valid(max - 1) ?
7774                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7775 
7776             if (page_check_range(h2g(min), max - min, flags) == -1) {
7777                 continue;
7778             }
7779 
7780             if (h2g(min) == ts->info->stack_limit) {
7781                 path = "[stack]";
7782             } else {
7783                 path = e->path;
7784             }
7785 
7786             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7787                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7788                             h2g(min), h2g(max - 1) + 1,
7789                             (flags & PAGE_READ) ? 'r' : '-',
7790                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7791                             (flags & PAGE_EXEC) ? 'x' : '-',
7792                             e->is_priv ? 'p' : '-',
7793                             (uint64_t) e->offset, e->dev, e->inode);
7794             if (path) {
7795                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7796             } else {
7797                 dprintf(fd, "\n");
7798             }
7799         }
7800     }
7801 
7802     free_self_maps(map_info);
7803 
7804 #ifdef TARGET_VSYSCALL_PAGE
7805     /*
7806      * We only support execution from the vsyscall page.
7807      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7808      */
7809     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7810                     " --xp 00000000 00:00 0",
7811                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7812     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7813 #endif
7814 
7815     return 0;
7816 }
7817 
open_self_stat(void * cpu_env,int fd)7818 static int open_self_stat(void *cpu_env, int fd)
7819 {
7820     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7821     TaskState *ts = cpu->opaque;
7822     g_autoptr(GString) buf = g_string_new(NULL);
7823     int i;
7824 
7825     for (i = 0; i < 44; i++) {
7826         if (i == 0) {
7827             /* pid */
7828             g_string_printf(buf, FMT_pid " ", getpid());
7829         } else if (i == 1) {
7830             /* app name */
7831             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7832             bin = bin ? bin + 1 : ts->bprm->argv[0];
7833             g_string_printf(buf, "(%.15s) ", bin);
7834         } else if (i == 3) {
7835             /* ppid */
7836             g_string_printf(buf, FMT_pid " ", getppid());
7837         } else if (i == 27) {
7838             /* stack bottom */
7839             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7840         } else {
7841             /* for the rest, there is MasterCard */
7842             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7843         }
7844 
7845         if (write(fd, buf->str, buf->len) != buf->len) {
7846             return -1;
7847         }
7848     }
7849 
7850     return 0;
7851 }
7852 
open_self_auxv(void * cpu_env,int fd)7853 static int open_self_auxv(void *cpu_env, int fd)
7854 {
7855     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7856     TaskState *ts = cpu->opaque;
7857     abi_ulong auxv = ts->info->saved_auxv;
7858     abi_ulong len = ts->info->auxv_len;
7859     char *ptr;
7860 
7861     /*
7862      * Auxiliary vector is stored in target process stack.
7863      * read in whole auxv vector and copy it to file
7864      */
7865     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7866     if (ptr != NULL) {
7867         while (len > 0) {
7868             ssize_t r;
7869             r = write(fd, ptr, len);
7870             if (r <= 0) {
7871                 break;
7872             }
7873             len -= r;
7874             ptr += r;
7875         }
7876         lseek(fd, 0, SEEK_SET);
7877         unlock_user(ptr, auxv, len);
7878     }
7879 
7880     return 0;
7881 }
7882 
is_proc_myself(const char * filename,const char * entry)7883 static int is_proc_myself(const char *filename, const char *entry)
7884 {
7885     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7886         filename += strlen("/proc/");
7887         if (!strncmp(filename, "self/", strlen("self/"))) {
7888             filename += strlen("self/");
7889         } else if (*filename >= '1' && *filename <= '9') {
7890             char myself[80];
7891             snprintf(myself, sizeof(myself), "%d/", getpid());
7892             if (!strncmp(filename, myself, strlen(myself))) {
7893                 filename += strlen(myself);
7894             } else {
7895                 return 0;
7896             }
7897         } else {
7898             return 0;
7899         }
7900         if (!strcmp(filename, entry)) {
7901             return 1;
7902         }
7903     }
7904     return 0;
7905 }
7906 
7907 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7908     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
is_proc(const char * filename,const char * entry)7909 static int is_proc(const char *filename, const char *entry)
7910 {
7911     return strcmp(filename, entry) == 0;
7912 }
7913 #endif
7914 
7915 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
open_net_route(void * cpu_env,int fd)7916 static int open_net_route(void *cpu_env, int fd)
7917 {
7918     FILE *fp;
7919     char *line = NULL;
7920     size_t len = 0;
7921     ssize_t read;
7922 
7923     fp = fopen("/proc/net/route", "r");
7924     if (fp == NULL) {
7925         return -1;
7926     }
7927 
7928     /* read header */
7929 
7930     read = getline(&line, &len, fp);
7931     dprintf(fd, "%s", line);
7932 
7933     /* read routes */
7934 
7935     while ((read = getline(&line, &len, fp)) != -1) {
7936         char iface[16];
7937         uint32_t dest, gw, mask;
7938         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7939         int fields;
7940 
7941         fields = sscanf(line,
7942                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7943                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7944                         &mask, &mtu, &window, &irtt);
7945         if (fields != 11) {
7946             continue;
7947         }
7948         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7949                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7950                 metric, tswap32(mask), mtu, window, irtt);
7951     }
7952 
7953     free(line);
7954     fclose(fp);
7955 
7956     return 0;
7957 }
7958 #endif
7959 
7960 #if defined(TARGET_SPARC)
open_cpuinfo(void * cpu_env,int fd)7961 static int open_cpuinfo(void *cpu_env, int fd)
7962 {
7963     dprintf(fd, "type\t\t: sun4u\n");
7964     return 0;
7965 }
7966 #endif
7967 
7968 #if defined(TARGET_HPPA)
open_cpuinfo(void * cpu_env,int fd)7969 static int open_cpuinfo(void *cpu_env, int fd)
7970 {
7971     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7972     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7973     dprintf(fd, "capabilities\t: os32\n");
7974     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7975     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7976     return 0;
7977 }
7978 #endif
7979 
7980 #if defined(TARGET_M68K)
open_hardware(void * cpu_env,int fd)7981 static int open_hardware(void *cpu_env, int fd)
7982 {
7983     dprintf(fd, "Model:\t\tqemu-m68k\n");
7984     return 0;
7985 }
7986 #endif
7987 
do_openat(void * cpu_env,int dirfd,const char * pathname,int flags,mode_t mode)7988 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7989 {
7990     struct fake_open {
7991         const char *filename;
7992         int (*fill)(void *cpu_env, int fd);
7993         int (*cmp)(const char *s1, const char *s2);
7994     };
7995     const struct fake_open *fake_open;
7996     static const struct fake_open fakes[] = {
7997         { "maps", open_self_maps, is_proc_myself },
7998         { "stat", open_self_stat, is_proc_myself },
7999         { "auxv", open_self_auxv, is_proc_myself },
8000         { "cmdline", open_self_cmdline, is_proc_myself },
8001 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8002         { "/proc/net/route", open_net_route, is_proc },
8003 #endif
8004 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8005         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8006 #endif
8007 #if defined(TARGET_M68K)
8008         { "/proc/hardware", open_hardware, is_proc },
8009 #endif
8010         { NULL, NULL, NULL }
8011     };
8012 
8013     if (is_proc_myself(pathname, "exe")) {
8014         int execfd = qemu_getauxval(AT_EXECFD);
8015         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8016     }
8017 
8018     for (fake_open = fakes; fake_open->filename; fake_open++) {
8019         if (fake_open->cmp(pathname, fake_open->filename)) {
8020             break;
8021         }
8022     }
8023 
8024     if (fake_open->filename) {
8025         const char *tmpdir;
8026         char filename[PATH_MAX];
8027         int fd, r;
8028 
8029         /* create temporary file to map stat to */
8030         tmpdir = getenv("TMPDIR");
8031         if (!tmpdir)
8032             tmpdir = "/tmp";
8033         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8034         fd = mkstemp(filename);
8035         if (fd < 0) {
8036             return fd;
8037         }
8038         unlink(filename);
8039 
8040         if ((r = fake_open->fill(cpu_env, fd))) {
8041             int e = errno;
8042             close(fd);
8043             errno = e;
8044             return r;
8045         }
8046         lseek(fd, 0, SEEK_SET);
8047 
8048         return fd;
8049     }
8050 
8051     return safe_openat(dirfd, path(pathname), flags, mode);
8052 }
8053 
8054 #define TIMER_MAGIC 0x0caf0000
8055 #define TIMER_MAGIC_MASK 0xffff0000
8056 
8057 /* Convert QEMU provided timer ID back to internal 16bit index format */
get_timer_id(abi_long arg)8058 static target_timer_t get_timer_id(abi_long arg)
8059 {
8060     target_timer_t timerid = arg;
8061 
8062     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8063         return -TARGET_EINVAL;
8064     }
8065 
8066     timerid &= 0xffff;
8067 
8068     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8069         return -TARGET_EINVAL;
8070     }
8071 
8072     return timerid;
8073 }
8074 
target_to_host_cpu_mask(unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8075 static int target_to_host_cpu_mask(unsigned long *host_mask,
8076                                    size_t host_size,
8077                                    abi_ulong target_addr,
8078                                    size_t target_size)
8079 {
8080     unsigned target_bits = sizeof(abi_ulong) * 8;
8081     unsigned host_bits = sizeof(*host_mask) * 8;
8082     abi_ulong *target_mask;
8083     unsigned i, j;
8084 
8085     assert(host_size >= target_size);
8086 
8087     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8088     if (!target_mask) {
8089         return -TARGET_EFAULT;
8090     }
8091     memset(host_mask, 0, host_size);
8092 
8093     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8094         unsigned bit = i * target_bits;
8095         abi_ulong val;
8096 
8097         __get_user(val, &target_mask[i]);
8098         for (j = 0; j < target_bits; j++, bit++) {
8099             if (val & (1UL << j)) {
8100                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8101             }
8102         }
8103     }
8104 
8105     unlock_user(target_mask, target_addr, 0);
8106     return 0;
8107 }
8108 
host_to_target_cpu_mask(const unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8109 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8110                                    size_t host_size,
8111                                    abi_ulong target_addr,
8112                                    size_t target_size)
8113 {
8114     unsigned target_bits = sizeof(abi_ulong) * 8;
8115     unsigned host_bits = sizeof(*host_mask) * 8;
8116     abi_ulong *target_mask;
8117     unsigned i, j;
8118 
8119     assert(host_size >= target_size);
8120 
8121     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8122     if (!target_mask) {
8123         return -TARGET_EFAULT;
8124     }
8125 
8126     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8127         unsigned bit = i * target_bits;
8128         abi_ulong val = 0;
8129 
8130         for (j = 0; j < target_bits; j++, bit++) {
8131             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8132                 val |= 1UL << j;
8133             }
8134         }
8135         __put_user(val, &target_mask[i]);
8136     }
8137 
8138     unlock_user(target_mask, target_addr, target_size);
8139     return 0;
8140 }
8141 
8142 #ifdef TARGET_NR_getdents
do_getdents(abi_long dirfd,abi_long arg2,abi_long count)8143 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8144 {
8145     g_autofree void *hdirp = NULL;
8146     void *tdirp;
8147     int hlen, hoff, toff;
8148     int hreclen, treclen;
8149     off64_t prev_diroff = 0;
8150 
8151     hdirp = g_try_malloc(count);
8152     if (!hdirp) {
8153         return -TARGET_ENOMEM;
8154     }
8155 
8156 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8157     hlen = sys_getdents(dirfd, hdirp, count);
8158 #else
8159     hlen = sys_getdents64(dirfd, hdirp, count);
8160 #endif
8161 
8162     hlen = get_errno(hlen);
8163     if (is_error(hlen)) {
8164         return hlen;
8165     }
8166 
8167     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8168     if (!tdirp) {
8169         return -TARGET_EFAULT;
8170     }
8171 
8172     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8173 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8174         struct linux_dirent *hde = hdirp + hoff;
8175 #else
8176         struct linux_dirent64 *hde = hdirp + hoff;
8177 #endif
8178         struct target_dirent *tde = tdirp + toff;
8179         int namelen;
8180         uint8_t type;
8181 
8182         namelen = strlen(hde->d_name);
8183         hreclen = hde->d_reclen;
8184         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8185         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8186 
8187         if (toff + treclen > count) {
8188             /*
8189              * If the host struct is smaller than the target struct, or
8190              * requires less alignment and thus packs into less space,
8191              * then the host can return more entries than we can pass
8192              * on to the guest.
8193              */
8194             if (toff == 0) {
8195                 toff = -TARGET_EINVAL; /* result buffer is too small */
8196                 break;
8197             }
8198             /*
8199              * Return what we have, resetting the file pointer to the
8200              * location of the first record not returned.
8201              */
8202             lseek64(dirfd, prev_diroff, SEEK_SET);
8203             break;
8204         }
8205 
8206         prev_diroff = hde->d_off;
8207         tde->d_ino = tswapal(hde->d_ino);
8208         tde->d_off = tswapal(hde->d_off);
8209         tde->d_reclen = tswap16(treclen);
8210         memcpy(tde->d_name, hde->d_name, namelen + 1);
8211 
8212         /*
8213          * The getdents type is in what was formerly a padding byte at the
8214          * end of the structure.
8215          */
8216 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8217         type = *((uint8_t *)hde + hreclen - 1);
8218 #else
8219         type = hde->d_type;
8220 #endif
8221         *((uint8_t *)tde + treclen - 1) = type;
8222     }
8223 
8224     unlock_user(tdirp, arg2, toff);
8225     return toff;
8226 }
8227 #endif /* TARGET_NR_getdents */
8228 
8229 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
do_getdents64(abi_long dirfd,abi_long arg2,abi_long count)8230 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8231 {
8232     g_autofree void *hdirp = NULL;
8233     void *tdirp;
8234     int hlen, hoff, toff;
8235     int hreclen, treclen;
8236     off64_t prev_diroff = 0;
8237 
8238     hdirp = g_try_malloc(count);
8239     if (!hdirp) {
8240         return -TARGET_ENOMEM;
8241     }
8242 
8243     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8244     if (is_error(hlen)) {
8245         return hlen;
8246     }
8247 
8248     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8249     if (!tdirp) {
8250         return -TARGET_EFAULT;
8251     }
8252 
8253     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8254         struct linux_dirent64 *hde = hdirp + hoff;
8255         struct target_dirent64 *tde = tdirp + toff;
8256         int namelen;
8257 
8258         namelen = strlen(hde->d_name) + 1;
8259         hreclen = hde->d_reclen;
8260         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8261         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8262 
8263         if (toff + treclen > count) {
8264             /*
8265              * If the host struct is smaller than the target struct, or
8266              * requires less alignment and thus packs into less space,
8267              * then the host can return more entries than we can pass
8268              * on to the guest.
8269              */
8270             if (toff == 0) {
8271                 toff = -TARGET_EINVAL; /* result buffer is too small */
8272                 break;
8273             }
8274             /*
8275              * Return what we have, resetting the file pointer to the
8276              * location of the first record not returned.
8277              */
8278             lseek64(dirfd, prev_diroff, SEEK_SET);
8279             break;
8280         }
8281 
8282         prev_diroff = hde->d_off;
8283         tde->d_ino = tswap64(hde->d_ino);
8284         tde->d_off = tswap64(hde->d_off);
8285         tde->d_reclen = tswap16(treclen);
8286         tde->d_type = hde->d_type;
8287         memcpy(tde->d_name, hde->d_name, namelen);
8288     }
8289 
8290     unlock_user(tdirp, arg2, toff);
8291     return toff;
8292 }
8293 #endif /* TARGET_NR_getdents64 */
8294 
8295 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
_syscall2(int,pivot_root,const char *,new_root,const char *,put_old)8296 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8297 #endif
8298 
8299 /* This is an internal helper for do_syscall so that it is easier
8300  * to have a single return point, so that actions, such as logging
8301  * of syscall results, can be performed.
8302  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8303  */
8304 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8305                             abi_long arg2, abi_long arg3, abi_long arg4,
8306                             abi_long arg5, abi_long arg6, abi_long arg7,
8307                             abi_long arg8)
8308 {
8309     CPUState *cpu = env_cpu(cpu_env);
8310     abi_long ret;
8311 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8312     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8313     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8314     || defined(TARGET_NR_statx)
8315     struct stat st;
8316 #endif
8317 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8318     || defined(TARGET_NR_fstatfs)
8319     struct statfs stfs;
8320 #endif
8321     void *p;
8322 
8323     switch(num) {
8324     case TARGET_NR_exit:
8325         /* In old applications this may be used to implement _exit(2).
8326            However in threaded applications it is used for thread termination,
8327            and _exit_group is used for application termination.
8328            Do thread termination if we have more then one thread.  */
8329 
8330         if (block_signals()) {
8331             return -TARGET_ERESTARTSYS;
8332         }
8333 
8334         pthread_mutex_lock(&clone_lock);
8335 
8336         if (CPU_NEXT(first_cpu)) {
8337             TaskState *ts = cpu->opaque;
8338 
8339             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8340             object_unref(OBJECT(cpu));
8341             /*
8342              * At this point the CPU should be unrealized and removed
8343              * from cpu lists. We can clean-up the rest of the thread
8344              * data without the lock held.
8345              */
8346 
8347             pthread_mutex_unlock(&clone_lock);
8348 
8349             if (ts->child_tidptr) {
8350                 put_user_u32(0, ts->child_tidptr);
8351                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8352                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8353             }
8354             thread_cpu = NULL;
8355             g_free(ts);
8356             rcu_unregister_thread();
8357             pthread_exit(NULL);
8358         }
8359 
8360         pthread_mutex_unlock(&clone_lock);
8361         preexit_cleanup(cpu_env, arg1);
8362         _exit(arg1);
8363         return 0; /* avoid warning */
8364     case TARGET_NR_read:
8365         if (arg2 == 0 && arg3 == 0) {
8366             return get_errno(safe_read(arg1, 0, 0));
8367         } else {
8368             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8369                 return -TARGET_EFAULT;
8370             ret = get_errno(safe_read(arg1, p, arg3));
8371             if (ret >= 0 &&
8372                 fd_trans_host_to_target_data(arg1)) {
8373                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8374             }
8375             unlock_user(p, arg2, ret);
8376         }
8377         return ret;
8378     case TARGET_NR_write:
8379         if (arg2 == 0 && arg3 == 0) {
8380             return get_errno(safe_write(arg1, 0, 0));
8381         }
8382         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8383             return -TARGET_EFAULT;
8384         if (fd_trans_target_to_host_data(arg1)) {
8385             void *copy = g_malloc(arg3);
8386             memcpy(copy, p, arg3);
8387             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8388             if (ret >= 0) {
8389                 ret = get_errno(safe_write(arg1, copy, ret));
8390             }
8391             g_free(copy);
8392         } else {
8393             ret = get_errno(safe_write(arg1, p, arg3));
8394         }
8395         unlock_user(p, arg2, 0);
8396         return ret;
8397 
8398 #ifdef TARGET_NR_open
8399     case TARGET_NR_open:
8400         if (!(p = lock_user_string(arg1)))
8401             return -TARGET_EFAULT;
8402         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8403                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8404                                   arg3));
8405         fd_trans_unregister(ret);
8406         unlock_user(p, arg1, 0);
8407         return ret;
8408 #endif
8409     case TARGET_NR_openat:
8410         if (!(p = lock_user_string(arg2)))
8411             return -TARGET_EFAULT;
8412         ret = get_errno(do_openat(cpu_env, arg1, p,
8413                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8414                                   arg4));
8415         fd_trans_unregister(ret);
8416         unlock_user(p, arg2, 0);
8417         return ret;
8418 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8419     case TARGET_NR_name_to_handle_at:
8420         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8421         return ret;
8422 #endif
8423 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8424     case TARGET_NR_open_by_handle_at:
8425         ret = do_open_by_handle_at(arg1, arg2, arg3);
8426         fd_trans_unregister(ret);
8427         return ret;
8428 #endif
8429     case TARGET_NR_close:
8430         fd_trans_unregister(arg1);
8431         return get_errno(close(arg1));
8432 
8433     case TARGET_NR_brk:
8434         return do_brk(arg1);
8435 #ifdef TARGET_NR_fork
8436     case TARGET_NR_fork:
8437         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8438 #endif
8439 #ifdef TARGET_NR_waitpid
8440     case TARGET_NR_waitpid:
8441         {
8442             int status;
8443             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8444             if (!is_error(ret) && arg2 && ret
8445                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8446                 return -TARGET_EFAULT;
8447         }
8448         return ret;
8449 #endif
8450 #ifdef TARGET_NR_waitid
8451     case TARGET_NR_waitid:
8452         {
8453             siginfo_t info;
8454             info.si_pid = 0;
8455             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8456             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8457                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8458                     return -TARGET_EFAULT;
8459                 host_to_target_siginfo(p, &info);
8460                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8461             }
8462         }
8463         return ret;
8464 #endif
8465 #ifdef TARGET_NR_creat /* not on alpha */
8466     case TARGET_NR_creat:
8467         if (!(p = lock_user_string(arg1)))
8468             return -TARGET_EFAULT;
8469         ret = get_errno(creat(p, arg2));
8470         fd_trans_unregister(ret);
8471         unlock_user(p, arg1, 0);
8472         return ret;
8473 #endif
8474 #ifdef TARGET_NR_link
8475     case TARGET_NR_link:
8476         {
8477             void * p2;
8478             p = lock_user_string(arg1);
8479             p2 = lock_user_string(arg2);
8480             if (!p || !p2)
8481                 ret = -TARGET_EFAULT;
8482             else
8483                 ret = get_errno(link(p, p2));
8484             unlock_user(p2, arg2, 0);
8485             unlock_user(p, arg1, 0);
8486         }
8487         return ret;
8488 #endif
8489 #if defined(TARGET_NR_linkat)
8490     case TARGET_NR_linkat:
8491         {
8492             void * p2 = NULL;
8493             if (!arg2 || !arg4)
8494                 return -TARGET_EFAULT;
8495             p  = lock_user_string(arg2);
8496             p2 = lock_user_string(arg4);
8497             if (!p || !p2)
8498                 ret = -TARGET_EFAULT;
8499             else
8500                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8501             unlock_user(p, arg2, 0);
8502             unlock_user(p2, arg4, 0);
8503         }
8504         return ret;
8505 #endif
8506 #ifdef TARGET_NR_unlink
8507     case TARGET_NR_unlink:
8508         if (!(p = lock_user_string(arg1)))
8509             return -TARGET_EFAULT;
8510         ret = get_errno(unlink(p));
8511         unlock_user(p, arg1, 0);
8512         return ret;
8513 #endif
8514 #if defined(TARGET_NR_unlinkat)
8515     case TARGET_NR_unlinkat:
8516         if (!(p = lock_user_string(arg2)))
8517             return -TARGET_EFAULT;
8518         ret = get_errno(unlinkat(arg1, p, arg3));
8519         unlock_user(p, arg2, 0);
8520         return ret;
8521 #endif
8522     case TARGET_NR_execve:
8523         {
8524             char **argp, **envp;
8525             int argc, envc;
8526             abi_ulong gp;
8527             abi_ulong guest_argp;
8528             abi_ulong guest_envp;
8529             abi_ulong addr;
8530             char **q;
8531 
8532             argc = 0;
8533             guest_argp = arg2;
8534             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8535                 if (get_user_ual(addr, gp))
8536                     return -TARGET_EFAULT;
8537                 if (!addr)
8538                     break;
8539                 argc++;
8540             }
8541             envc = 0;
8542             guest_envp = arg3;
8543             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8544                 if (get_user_ual(addr, gp))
8545                     return -TARGET_EFAULT;
8546                 if (!addr)
8547                     break;
8548                 envc++;
8549             }
8550 
8551             argp = g_new0(char *, argc + 1);
8552             envp = g_new0(char *, envc + 1);
8553 
8554             for (gp = guest_argp, q = argp; gp;
8555                   gp += sizeof(abi_ulong), q++) {
8556                 if (get_user_ual(addr, gp))
8557                     goto execve_efault;
8558                 if (!addr)
8559                     break;
8560                 if (!(*q = lock_user_string(addr)))
8561                     goto execve_efault;
8562             }
8563             *q = NULL;
8564 
8565             for (gp = guest_envp, q = envp; gp;
8566                   gp += sizeof(abi_ulong), q++) {
8567                 if (get_user_ual(addr, gp))
8568                     goto execve_efault;
8569                 if (!addr)
8570                     break;
8571                 if (!(*q = lock_user_string(addr)))
8572                     goto execve_efault;
8573             }
8574             *q = NULL;
8575 
8576             if (!(p = lock_user_string(arg1)))
8577                 goto execve_efault;
8578             /* Although execve() is not an interruptible syscall it is
8579              * a special case where we must use the safe_syscall wrapper:
8580              * if we allow a signal to happen before we make the host
8581              * syscall then we will 'lose' it, because at the point of
8582              * execve the process leaves QEMU's control. So we use the
8583              * safe syscall wrapper to ensure that we either take the
8584              * signal as a guest signal, or else it does not happen
8585              * before the execve completes and makes it the other
8586              * program's problem.
8587              */
8588             ret = get_errno(safe_execve(p, argp, envp));
8589             unlock_user(p, arg1, 0);
8590 
8591             goto execve_end;
8592 
8593         execve_efault:
8594             ret = -TARGET_EFAULT;
8595 
8596         execve_end:
8597             for (gp = guest_argp, q = argp; *q;
8598                   gp += sizeof(abi_ulong), q++) {
8599                 if (get_user_ual(addr, gp)
8600                     || !addr)
8601                     break;
8602                 unlock_user(*q, addr, 0);
8603             }
8604             for (gp = guest_envp, q = envp; *q;
8605                   gp += sizeof(abi_ulong), q++) {
8606                 if (get_user_ual(addr, gp)
8607                     || !addr)
8608                     break;
8609                 unlock_user(*q, addr, 0);
8610             }
8611 
8612             g_free(argp);
8613             g_free(envp);
8614         }
8615         return ret;
8616     case TARGET_NR_chdir:
8617         if (!(p = lock_user_string(arg1)))
8618             return -TARGET_EFAULT;
8619         ret = get_errno(chdir(p));
8620         unlock_user(p, arg1, 0);
8621         return ret;
8622 #ifdef TARGET_NR_time
8623     case TARGET_NR_time:
8624         {
8625             time_t host_time;
8626             ret = get_errno(time(&host_time));
8627             if (!is_error(ret)
8628                 && arg1
8629                 && put_user_sal(host_time, arg1))
8630                 return -TARGET_EFAULT;
8631         }
8632         return ret;
8633 #endif
8634 #ifdef TARGET_NR_mknod
8635     case TARGET_NR_mknod:
8636         if (!(p = lock_user_string(arg1)))
8637             return -TARGET_EFAULT;
8638         ret = get_errno(mknod(p, arg2, arg3));
8639         unlock_user(p, arg1, 0);
8640         return ret;
8641 #endif
8642 #if defined(TARGET_NR_mknodat)
8643     case TARGET_NR_mknodat:
8644         if (!(p = lock_user_string(arg2)))
8645             return -TARGET_EFAULT;
8646         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8647         unlock_user(p, arg2, 0);
8648         return ret;
8649 #endif
8650 #ifdef TARGET_NR_chmod
8651     case TARGET_NR_chmod:
8652         if (!(p = lock_user_string(arg1)))
8653             return -TARGET_EFAULT;
8654         ret = get_errno(chmod(p, arg2));
8655         unlock_user(p, arg1, 0);
8656         return ret;
8657 #endif
8658 #ifdef TARGET_NR_lseek
8659     case TARGET_NR_lseek:
8660         return get_errno(lseek(arg1, arg2, arg3));
8661 #endif
8662 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8663     /* Alpha specific */
8664     case TARGET_NR_getxpid:
8665         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8666         return get_errno(getpid());
8667 #endif
8668 #ifdef TARGET_NR_getpid
8669     case TARGET_NR_getpid:
8670         return get_errno(getpid());
8671 #endif
8672     case TARGET_NR_mount:
8673         {
8674             /* need to look at the data field */
8675             void *p2, *p3;
8676 
8677             if (arg1) {
8678                 p = lock_user_string(arg1);
8679                 if (!p) {
8680                     return -TARGET_EFAULT;
8681                 }
8682             } else {
8683                 p = NULL;
8684             }
8685 
8686             p2 = lock_user_string(arg2);
8687             if (!p2) {
8688                 if (arg1) {
8689                     unlock_user(p, arg1, 0);
8690                 }
8691                 return -TARGET_EFAULT;
8692             }
8693 
8694             if (arg3) {
8695                 p3 = lock_user_string(arg3);
8696                 if (!p3) {
8697                     if (arg1) {
8698                         unlock_user(p, arg1, 0);
8699                     }
8700                     unlock_user(p2, arg2, 0);
8701                     return -TARGET_EFAULT;
8702                 }
8703             } else {
8704                 p3 = NULL;
8705             }
8706 
8707             /* FIXME - arg5 should be locked, but it isn't clear how to
8708              * do that since it's not guaranteed to be a NULL-terminated
8709              * string.
8710              */
8711             if (!arg5) {
8712                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8713             } else {
8714                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8715             }
8716             ret = get_errno(ret);
8717 
8718             if (arg1) {
8719                 unlock_user(p, arg1, 0);
8720             }
8721             unlock_user(p2, arg2, 0);
8722             if (arg3) {
8723                 unlock_user(p3, arg3, 0);
8724             }
8725         }
8726         return ret;
8727 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8728 #if defined(TARGET_NR_umount)
8729     case TARGET_NR_umount:
8730 #endif
8731 #if defined(TARGET_NR_oldumount)
8732     case TARGET_NR_oldumount:
8733 #endif
8734         if (!(p = lock_user_string(arg1)))
8735             return -TARGET_EFAULT;
8736         ret = get_errno(umount(p));
8737         unlock_user(p, arg1, 0);
8738         return ret;
8739 #endif
8740 #ifdef TARGET_NR_stime /* not on alpha */
8741     case TARGET_NR_stime:
8742         {
8743             struct timespec ts;
8744             ts.tv_nsec = 0;
8745             if (get_user_sal(ts.tv_sec, arg1)) {
8746                 return -TARGET_EFAULT;
8747             }
8748             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8749         }
8750 #endif
8751 #ifdef TARGET_NR_alarm /* not on alpha */
8752     case TARGET_NR_alarm:
8753         return alarm(arg1);
8754 #endif
8755 #ifdef TARGET_NR_pause /* not on alpha */
8756     case TARGET_NR_pause:
8757         if (!block_signals()) {
8758             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8759         }
8760         return -TARGET_EINTR;
8761 #endif
8762 #ifdef TARGET_NR_utime
8763     case TARGET_NR_utime:
8764         {
8765             struct utimbuf tbuf, *host_tbuf;
8766             struct target_utimbuf *target_tbuf;
8767             if (arg2) {
8768                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8769                     return -TARGET_EFAULT;
8770                 tbuf.actime = tswapal(target_tbuf->actime);
8771                 tbuf.modtime = tswapal(target_tbuf->modtime);
8772                 unlock_user_struct(target_tbuf, arg2, 0);
8773                 host_tbuf = &tbuf;
8774             } else {
8775                 host_tbuf = NULL;
8776             }
8777             if (!(p = lock_user_string(arg1)))
8778                 return -TARGET_EFAULT;
8779             ret = get_errno(utime(p, host_tbuf));
8780             unlock_user(p, arg1, 0);
8781         }
8782         return ret;
8783 #endif
8784 #ifdef TARGET_NR_utimes
8785     case TARGET_NR_utimes:
8786         {
8787             struct timeval *tvp, tv[2];
8788             if (arg2) {
8789                 if (copy_from_user_timeval(&tv[0], arg2)
8790                     || copy_from_user_timeval(&tv[1],
8791                                               arg2 + sizeof(struct target_timeval)))
8792                     return -TARGET_EFAULT;
8793                 tvp = tv;
8794             } else {
8795                 tvp = NULL;
8796             }
8797             if (!(p = lock_user_string(arg1)))
8798                 return -TARGET_EFAULT;
8799             ret = get_errno(utimes(p, tvp));
8800             unlock_user(p, arg1, 0);
8801         }
8802         return ret;
8803 #endif
8804 #if defined(TARGET_NR_futimesat)
8805     case TARGET_NR_futimesat:
8806         {
8807             struct timeval *tvp, tv[2];
8808             if (arg3) {
8809                 if (copy_from_user_timeval(&tv[0], arg3)
8810                     || copy_from_user_timeval(&tv[1],
8811                                               arg3 + sizeof(struct target_timeval)))
8812                     return -TARGET_EFAULT;
8813                 tvp = tv;
8814             } else {
8815                 tvp = NULL;
8816             }
8817             if (!(p = lock_user_string(arg2))) {
8818                 return -TARGET_EFAULT;
8819             }
8820             ret = get_errno(futimesat(arg1, path(p), tvp));
8821             unlock_user(p, arg2, 0);
8822         }
8823         return ret;
8824 #endif
8825 #ifdef TARGET_NR_access
8826     case TARGET_NR_access:
8827         if (!(p = lock_user_string(arg1))) {
8828             return -TARGET_EFAULT;
8829         }
8830         ret = get_errno(access(path(p), arg2));
8831         unlock_user(p, arg1, 0);
8832         return ret;
8833 #endif
8834 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8835     case TARGET_NR_faccessat:
8836         if (!(p = lock_user_string(arg2))) {
8837             return -TARGET_EFAULT;
8838         }
8839         ret = get_errno(faccessat(arg1, p, arg3, 0));
8840         unlock_user(p, arg2, 0);
8841         return ret;
8842 #endif
8843 #ifdef TARGET_NR_nice /* not on alpha */
8844     case TARGET_NR_nice:
8845         return get_errno(nice(arg1));
8846 #endif
8847     case TARGET_NR_sync:
8848         sync();
8849         return 0;
8850 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8851     case TARGET_NR_syncfs:
8852         return get_errno(syncfs(arg1));
8853 #endif
8854     case TARGET_NR_kill:
8855         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8856 #ifdef TARGET_NR_rename
8857     case TARGET_NR_rename:
8858         {
8859             void *p2;
8860             p = lock_user_string(arg1);
8861             p2 = lock_user_string(arg2);
8862             if (!p || !p2)
8863                 ret = -TARGET_EFAULT;
8864             else
8865                 ret = get_errno(rename(p, p2));
8866             unlock_user(p2, arg2, 0);
8867             unlock_user(p, arg1, 0);
8868         }
8869         return ret;
8870 #endif
8871 #if defined(TARGET_NR_renameat)
8872     case TARGET_NR_renameat:
8873         {
8874             void *p2;
8875             p  = lock_user_string(arg2);
8876             p2 = lock_user_string(arg4);
8877             if (!p || !p2)
8878                 ret = -TARGET_EFAULT;
8879             else
8880                 ret = get_errno(renameat(arg1, p, arg3, p2));
8881             unlock_user(p2, arg4, 0);
8882             unlock_user(p, arg2, 0);
8883         }
8884         return ret;
8885 #endif
8886 #if defined(TARGET_NR_renameat2)
8887     case TARGET_NR_renameat2:
8888         {
8889             void *p2;
8890             p  = lock_user_string(arg2);
8891             p2 = lock_user_string(arg4);
8892             if (!p || !p2) {
8893                 ret = -TARGET_EFAULT;
8894             } else {
8895                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8896             }
8897             unlock_user(p2, arg4, 0);
8898             unlock_user(p, arg2, 0);
8899         }
8900         return ret;
8901 #endif
8902 #ifdef TARGET_NR_mkdir
8903     case TARGET_NR_mkdir:
8904         if (!(p = lock_user_string(arg1)))
8905             return -TARGET_EFAULT;
8906         ret = get_errno(mkdir(p, arg2));
8907         unlock_user(p, arg1, 0);
8908         return ret;
8909 #endif
8910 #if defined(TARGET_NR_mkdirat)
8911     case TARGET_NR_mkdirat:
8912         if (!(p = lock_user_string(arg2)))
8913             return -TARGET_EFAULT;
8914         ret = get_errno(mkdirat(arg1, p, arg3));
8915         unlock_user(p, arg2, 0);
8916         return ret;
8917 #endif
8918 #ifdef TARGET_NR_rmdir
8919     case TARGET_NR_rmdir:
8920         if (!(p = lock_user_string(arg1)))
8921             return -TARGET_EFAULT;
8922         ret = get_errno(rmdir(p));
8923         unlock_user(p, arg1, 0);
8924         return ret;
8925 #endif
8926     case TARGET_NR_dup:
8927         ret = get_errno(dup(arg1));
8928         if (ret >= 0) {
8929             fd_trans_dup(arg1, ret);
8930         }
8931         return ret;
8932 #ifdef TARGET_NR_pipe
8933     case TARGET_NR_pipe:
8934         return do_pipe(cpu_env, arg1, 0, 0);
8935 #endif
8936 #ifdef TARGET_NR_pipe2
8937     case TARGET_NR_pipe2:
8938         return do_pipe(cpu_env, arg1,
8939                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8940 #endif
8941     case TARGET_NR_times:
8942         {
8943             struct target_tms *tmsp;
8944             struct tms tms;
8945             ret = get_errno(times(&tms));
8946             if (arg1) {
8947                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8948                 if (!tmsp)
8949                     return -TARGET_EFAULT;
8950                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8951                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8952                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8953                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8954             }
8955             if (!is_error(ret))
8956                 ret = host_to_target_clock_t(ret);
8957         }
8958         return ret;
8959     case TARGET_NR_acct:
8960         if (arg1 == 0) {
8961             ret = get_errno(acct(NULL));
8962         } else {
8963             if (!(p = lock_user_string(arg1))) {
8964                 return -TARGET_EFAULT;
8965             }
8966             ret = get_errno(acct(path(p)));
8967             unlock_user(p, arg1, 0);
8968         }
8969         return ret;
8970 #ifdef TARGET_NR_umount2
8971     case TARGET_NR_umount2:
8972         if (!(p = lock_user_string(arg1)))
8973             return -TARGET_EFAULT;
8974         ret = get_errno(umount2(p, arg2));
8975         unlock_user(p, arg1, 0);
8976         return ret;
8977 #endif
8978     case TARGET_NR_ioctl:
8979         return do_ioctl(arg1, arg2, arg3);
8980 #ifdef TARGET_NR_fcntl
8981     case TARGET_NR_fcntl:
8982         return do_fcntl(arg1, arg2, arg3);
8983 #endif
8984     case TARGET_NR_setpgid:
8985         return get_errno(setpgid(arg1, arg2));
8986     case TARGET_NR_umask:
8987         return get_errno(umask(arg1));
8988     case TARGET_NR_chroot:
8989         if (!(p = lock_user_string(arg1)))
8990             return -TARGET_EFAULT;
8991         ret = get_errno(chroot(p));
8992         unlock_user(p, arg1, 0);
8993         return ret;
8994 #ifdef TARGET_NR_dup2
8995     case TARGET_NR_dup2:
8996         ret = get_errno(dup2(arg1, arg2));
8997         if (ret >= 0) {
8998             fd_trans_dup(arg1, arg2);
8999         }
9000         return ret;
9001 #endif
9002 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9003     case TARGET_NR_dup3:
9004     {
9005         int host_flags;
9006 
9007         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9008             return -EINVAL;
9009         }
9010         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9011         ret = get_errno(dup3(arg1, arg2, host_flags));
9012         if (ret >= 0) {
9013             fd_trans_dup(arg1, arg2);
9014         }
9015         return ret;
9016     }
9017 #endif
9018 #ifdef TARGET_NR_getppid /* not on alpha */
9019     case TARGET_NR_getppid:
9020         return get_errno(getppid());
9021 #endif
9022 #ifdef TARGET_NR_getpgrp
9023     case TARGET_NR_getpgrp:
9024         return get_errno(getpgrp());
9025 #endif
9026     case TARGET_NR_setsid:
9027         return get_errno(setsid());
9028 #ifdef TARGET_NR_sigaction
9029     case TARGET_NR_sigaction:
9030         {
9031 #if defined(TARGET_MIPS)
9032 	    struct target_sigaction act, oact, *pact, *old_act;
9033 
9034 	    if (arg2) {
9035                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9036                     return -TARGET_EFAULT;
9037 		act._sa_handler = old_act->_sa_handler;
9038 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9039 		act.sa_flags = old_act->sa_flags;
9040 		unlock_user_struct(old_act, arg2, 0);
9041 		pact = &act;
9042 	    } else {
9043 		pact = NULL;
9044 	    }
9045 
9046         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9047 
9048 	    if (!is_error(ret) && arg3) {
9049                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9050                     return -TARGET_EFAULT;
9051 		old_act->_sa_handler = oact._sa_handler;
9052 		old_act->sa_flags = oact.sa_flags;
9053 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9054 		old_act->sa_mask.sig[1] = 0;
9055 		old_act->sa_mask.sig[2] = 0;
9056 		old_act->sa_mask.sig[3] = 0;
9057 		unlock_user_struct(old_act, arg3, 1);
9058 	    }
9059 #else
9060             struct target_old_sigaction *old_act;
9061             struct target_sigaction act, oact, *pact;
9062             if (arg2) {
9063                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9064                     return -TARGET_EFAULT;
9065                 act._sa_handler = old_act->_sa_handler;
9066                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9067                 act.sa_flags = old_act->sa_flags;
9068 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9069                 act.sa_restorer = old_act->sa_restorer;
9070 #endif
9071                 unlock_user_struct(old_act, arg2, 0);
9072                 pact = &act;
9073             } else {
9074                 pact = NULL;
9075             }
9076             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9077             if (!is_error(ret) && arg3) {
9078                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9079                     return -TARGET_EFAULT;
9080                 old_act->_sa_handler = oact._sa_handler;
9081                 old_act->sa_mask = oact.sa_mask.sig[0];
9082                 old_act->sa_flags = oact.sa_flags;
9083 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9084                 old_act->sa_restorer = oact.sa_restorer;
9085 #endif
9086                 unlock_user_struct(old_act, arg3, 1);
9087             }
9088 #endif
9089         }
9090         return ret;
9091 #endif
9092     case TARGET_NR_rt_sigaction:
9093         {
9094             /*
9095              * For Alpha and SPARC this is a 5 argument syscall, with
9096              * a 'restorer' parameter which must be copied into the
9097              * sa_restorer field of the sigaction struct.
9098              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9099              * and arg5 is the sigsetsize.
9100              */
9101 #if defined(TARGET_ALPHA)
9102             target_ulong sigsetsize = arg4;
9103             target_ulong restorer = arg5;
9104 #elif defined(TARGET_SPARC)
9105             target_ulong restorer = arg4;
9106             target_ulong sigsetsize = arg5;
9107 #else
9108             target_ulong sigsetsize = arg4;
9109             target_ulong restorer = 0;
9110 #endif
9111             struct target_sigaction *act = NULL;
9112             struct target_sigaction *oact = NULL;
9113 
9114             if (sigsetsize != sizeof(target_sigset_t)) {
9115                 return -TARGET_EINVAL;
9116             }
9117             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9118                 return -TARGET_EFAULT;
9119             }
9120             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9121                 ret = -TARGET_EFAULT;
9122             } else {
9123                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9124                 if (oact) {
9125                     unlock_user_struct(oact, arg3, 1);
9126                 }
9127             }
9128             if (act) {
9129                 unlock_user_struct(act, arg2, 0);
9130             }
9131         }
9132         return ret;
9133 #ifdef TARGET_NR_sgetmask /* not on alpha */
9134     case TARGET_NR_sgetmask:
9135         {
9136             sigset_t cur_set;
9137             abi_ulong target_set;
9138             ret = do_sigprocmask(0, NULL, &cur_set);
9139             if (!ret) {
9140                 host_to_target_old_sigset(&target_set, &cur_set);
9141                 ret = target_set;
9142             }
9143         }
9144         return ret;
9145 #endif
9146 #ifdef TARGET_NR_ssetmask /* not on alpha */
9147     case TARGET_NR_ssetmask:
9148         {
9149             sigset_t set, oset;
9150             abi_ulong target_set = arg1;
9151             target_to_host_old_sigset(&set, &target_set);
9152             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9153             if (!ret) {
9154                 host_to_target_old_sigset(&target_set, &oset);
9155                 ret = target_set;
9156             }
9157         }
9158         return ret;
9159 #endif
9160 #ifdef TARGET_NR_sigprocmask
9161     case TARGET_NR_sigprocmask:
9162         {
9163 #if defined(TARGET_ALPHA)
9164             sigset_t set, oldset;
9165             abi_ulong mask;
9166             int how;
9167 
9168             switch (arg1) {
9169             case TARGET_SIG_BLOCK:
9170                 how = SIG_BLOCK;
9171                 break;
9172             case TARGET_SIG_UNBLOCK:
9173                 how = SIG_UNBLOCK;
9174                 break;
9175             case TARGET_SIG_SETMASK:
9176                 how = SIG_SETMASK;
9177                 break;
9178             default:
9179                 return -TARGET_EINVAL;
9180             }
9181             mask = arg2;
9182             target_to_host_old_sigset(&set, &mask);
9183 
9184             ret = do_sigprocmask(how, &set, &oldset);
9185             if (!is_error(ret)) {
9186                 host_to_target_old_sigset(&mask, &oldset);
9187                 ret = mask;
9188                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9189             }
9190 #else
9191             sigset_t set, oldset, *set_ptr;
9192             int how;
9193 
9194             if (arg2) {
9195                 switch (arg1) {
9196                 case TARGET_SIG_BLOCK:
9197                     how = SIG_BLOCK;
9198                     break;
9199                 case TARGET_SIG_UNBLOCK:
9200                     how = SIG_UNBLOCK;
9201                     break;
9202                 case TARGET_SIG_SETMASK:
9203                     how = SIG_SETMASK;
9204                     break;
9205                 default:
9206                     return -TARGET_EINVAL;
9207                 }
9208                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9209                     return -TARGET_EFAULT;
9210                 target_to_host_old_sigset(&set, p);
9211                 unlock_user(p, arg2, 0);
9212                 set_ptr = &set;
9213             } else {
9214                 how = 0;
9215                 set_ptr = NULL;
9216             }
9217             ret = do_sigprocmask(how, set_ptr, &oldset);
9218             if (!is_error(ret) && arg3) {
9219                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9220                     return -TARGET_EFAULT;
9221                 host_to_target_old_sigset(p, &oldset);
9222                 unlock_user(p, arg3, sizeof(target_sigset_t));
9223             }
9224 #endif
9225         }
9226         return ret;
9227 #endif
9228     case TARGET_NR_rt_sigprocmask:
9229         {
9230             int how = arg1;
9231             sigset_t set, oldset, *set_ptr;
9232 
9233             if (arg4 != sizeof(target_sigset_t)) {
9234                 return -TARGET_EINVAL;
9235             }
9236 
9237             if (arg2) {
9238                 switch(how) {
9239                 case TARGET_SIG_BLOCK:
9240                     how = SIG_BLOCK;
9241                     break;
9242                 case TARGET_SIG_UNBLOCK:
9243                     how = SIG_UNBLOCK;
9244                     break;
9245                 case TARGET_SIG_SETMASK:
9246                     how = SIG_SETMASK;
9247                     break;
9248                 default:
9249                     return -TARGET_EINVAL;
9250                 }
9251                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9252                     return -TARGET_EFAULT;
9253                 target_to_host_sigset(&set, p);
9254                 unlock_user(p, arg2, 0);
9255                 set_ptr = &set;
9256             } else {
9257                 how = 0;
9258                 set_ptr = NULL;
9259             }
9260             ret = do_sigprocmask(how, set_ptr, &oldset);
9261             if (!is_error(ret) && arg3) {
9262                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9263                     return -TARGET_EFAULT;
9264                 host_to_target_sigset(p, &oldset);
9265                 unlock_user(p, arg3, sizeof(target_sigset_t));
9266             }
9267         }
9268         return ret;
9269 #ifdef TARGET_NR_sigpending
9270     case TARGET_NR_sigpending:
9271         {
9272             sigset_t set;
9273             ret = get_errno(sigpending(&set));
9274             if (!is_error(ret)) {
9275                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9276                     return -TARGET_EFAULT;
9277                 host_to_target_old_sigset(p, &set);
9278                 unlock_user(p, arg1, sizeof(target_sigset_t));
9279             }
9280         }
9281         return ret;
9282 #endif
9283     case TARGET_NR_rt_sigpending:
9284         {
9285             sigset_t set;
9286 
9287             /* Yes, this check is >, not != like most. We follow the kernel's
9288              * logic and it does it like this because it implements
9289              * NR_sigpending through the same code path, and in that case
9290              * the old_sigset_t is smaller in size.
9291              */
9292             if (arg2 > sizeof(target_sigset_t)) {
9293                 return -TARGET_EINVAL;
9294             }
9295 
9296             ret = get_errno(sigpending(&set));
9297             if (!is_error(ret)) {
9298                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9299                     return -TARGET_EFAULT;
9300                 host_to_target_sigset(p, &set);
9301                 unlock_user(p, arg1, sizeof(target_sigset_t));
9302             }
9303         }
9304         return ret;
9305 #ifdef TARGET_NR_sigsuspend
9306     case TARGET_NR_sigsuspend:
9307         {
9308             TaskState *ts = cpu->opaque;
9309 #if defined(TARGET_ALPHA)
9310             abi_ulong mask = arg1;
9311             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9312 #else
9313             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9314                 return -TARGET_EFAULT;
9315             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9316             unlock_user(p, arg1, 0);
9317 #endif
9318             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9319                                                SIGSET_T_SIZE));
9320             if (ret != -TARGET_ERESTARTSYS) {
9321                 ts->in_sigsuspend = 1;
9322             }
9323         }
9324         return ret;
9325 #endif
9326     case TARGET_NR_rt_sigsuspend:
9327         {
9328             TaskState *ts = cpu->opaque;
9329 
9330             if (arg2 != sizeof(target_sigset_t)) {
9331                 return -TARGET_EINVAL;
9332             }
9333             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9334                 return -TARGET_EFAULT;
9335             target_to_host_sigset(&ts->sigsuspend_mask, p);
9336             unlock_user(p, arg1, 0);
9337             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9338                                                SIGSET_T_SIZE));
9339             if (ret != -TARGET_ERESTARTSYS) {
9340                 ts->in_sigsuspend = 1;
9341             }
9342         }
9343         return ret;
9344 #ifdef TARGET_NR_rt_sigtimedwait
9345     case TARGET_NR_rt_sigtimedwait:
9346         {
9347             sigset_t set;
9348             struct timespec uts, *puts;
9349             siginfo_t uinfo;
9350 
9351             if (arg4 != sizeof(target_sigset_t)) {
9352                 return -TARGET_EINVAL;
9353             }
9354 
9355             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9356                 return -TARGET_EFAULT;
9357             target_to_host_sigset(&set, p);
9358             unlock_user(p, arg1, 0);
9359             if (arg3) {
9360                 puts = &uts;
9361                 if (target_to_host_timespec(puts, arg3)) {
9362                     return -TARGET_EFAULT;
9363                 }
9364             } else {
9365                 puts = NULL;
9366             }
9367             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9368                                                  SIGSET_T_SIZE));
9369             if (!is_error(ret)) {
9370                 if (arg2) {
9371                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9372                                   0);
9373                     if (!p) {
9374                         return -TARGET_EFAULT;
9375                     }
9376                     host_to_target_siginfo(p, &uinfo);
9377                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9378                 }
9379                 ret = host_to_target_signal(ret);
9380             }
9381         }
9382         return ret;
9383 #endif
9384 #ifdef TARGET_NR_rt_sigtimedwait_time64
9385     case TARGET_NR_rt_sigtimedwait_time64:
9386         {
9387             sigset_t set;
9388             struct timespec uts, *puts;
9389             siginfo_t uinfo;
9390 
9391             if (arg4 != sizeof(target_sigset_t)) {
9392                 return -TARGET_EINVAL;
9393             }
9394 
9395             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9396             if (!p) {
9397                 return -TARGET_EFAULT;
9398             }
9399             target_to_host_sigset(&set, p);
9400             unlock_user(p, arg1, 0);
9401             if (arg3) {
9402                 puts = &uts;
9403                 if (target_to_host_timespec64(puts, arg3)) {
9404                     return -TARGET_EFAULT;
9405                 }
9406             } else {
9407                 puts = NULL;
9408             }
9409             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9410                                                  SIGSET_T_SIZE));
9411             if (!is_error(ret)) {
9412                 if (arg2) {
9413                     p = lock_user(VERIFY_WRITE, arg2,
9414                                   sizeof(target_siginfo_t), 0);
9415                     if (!p) {
9416                         return -TARGET_EFAULT;
9417                     }
9418                     host_to_target_siginfo(p, &uinfo);
9419                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9420                 }
9421                 ret = host_to_target_signal(ret);
9422             }
9423         }
9424         return ret;
9425 #endif
9426     case TARGET_NR_rt_sigqueueinfo:
9427         {
9428             siginfo_t uinfo;
9429 
9430             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9431             if (!p) {
9432                 return -TARGET_EFAULT;
9433             }
9434             target_to_host_siginfo(&uinfo, p);
9435             unlock_user(p, arg3, 0);
9436             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9437         }
9438         return ret;
9439     case TARGET_NR_rt_tgsigqueueinfo:
9440         {
9441             siginfo_t uinfo;
9442 
9443             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9444             if (!p) {
9445                 return -TARGET_EFAULT;
9446             }
9447             target_to_host_siginfo(&uinfo, p);
9448             unlock_user(p, arg4, 0);
9449             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9450         }
9451         return ret;
9452 #ifdef TARGET_NR_sigreturn
9453     case TARGET_NR_sigreturn:
9454         if (block_signals()) {
9455             return -TARGET_ERESTARTSYS;
9456         }
9457         return do_sigreturn(cpu_env);
9458 #endif
9459     case TARGET_NR_rt_sigreturn:
9460         if (block_signals()) {
9461             return -TARGET_ERESTARTSYS;
9462         }
9463         return do_rt_sigreturn(cpu_env);
9464     case TARGET_NR_sethostname:
9465         if (!(p = lock_user_string(arg1)))
9466             return -TARGET_EFAULT;
9467         ret = get_errno(sethostname(p, arg2));
9468         unlock_user(p, arg1, 0);
9469         return ret;
9470 #ifdef TARGET_NR_setrlimit
9471     case TARGET_NR_setrlimit:
9472         {
9473             int resource = target_to_host_resource(arg1);
9474             struct target_rlimit *target_rlim;
9475             struct rlimit rlim;
9476             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9477                 return -TARGET_EFAULT;
9478             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9479             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9480             unlock_user_struct(target_rlim, arg2, 0);
9481             /*
9482              * If we just passed through resource limit settings for memory then
9483              * they would also apply to QEMU's own allocations, and QEMU will
9484              * crash or hang or die if its allocations fail. Ideally we would
9485              * track the guest allocations in QEMU and apply the limits ourselves.
9486              * For now, just tell the guest the call succeeded but don't actually
9487              * limit anything.
9488              */
9489             if (resource != RLIMIT_AS &&
9490                 resource != RLIMIT_DATA &&
9491                 resource != RLIMIT_STACK) {
9492                 return get_errno(setrlimit(resource, &rlim));
9493             } else {
9494                 return 0;
9495             }
9496         }
9497 #endif
9498 #ifdef TARGET_NR_getrlimit
9499     case TARGET_NR_getrlimit:
9500         {
9501             int resource = target_to_host_resource(arg1);
9502             struct target_rlimit *target_rlim;
9503             struct rlimit rlim;
9504 
9505             ret = get_errno(getrlimit(resource, &rlim));
9506             if (!is_error(ret)) {
9507                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9508                     return -TARGET_EFAULT;
9509                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9510                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9511                 unlock_user_struct(target_rlim, arg2, 1);
9512             }
9513         }
9514         return ret;
9515 #endif
9516     case TARGET_NR_getrusage:
9517         {
9518             struct rusage rusage;
9519             ret = get_errno(getrusage(arg1, &rusage));
9520             if (!is_error(ret)) {
9521                 ret = host_to_target_rusage(arg2, &rusage);
9522             }
9523         }
9524         return ret;
9525 #if defined(TARGET_NR_gettimeofday)
9526     case TARGET_NR_gettimeofday:
9527         {
9528             struct timeval tv;
9529             struct timezone tz;
9530 
9531             ret = get_errno(gettimeofday(&tv, &tz));
9532             if (!is_error(ret)) {
9533                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9534                     return -TARGET_EFAULT;
9535                 }
9536                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9537                     return -TARGET_EFAULT;
9538                 }
9539             }
9540         }
9541         return ret;
9542 #endif
9543 #if defined(TARGET_NR_settimeofday)
9544     case TARGET_NR_settimeofday:
9545         {
9546             struct timeval tv, *ptv = NULL;
9547             struct timezone tz, *ptz = NULL;
9548 
9549             if (arg1) {
9550                 if (copy_from_user_timeval(&tv, arg1)) {
9551                     return -TARGET_EFAULT;
9552                 }
9553                 ptv = &tv;
9554             }
9555 
9556             if (arg2) {
9557                 if (copy_from_user_timezone(&tz, arg2)) {
9558                     return -TARGET_EFAULT;
9559                 }
9560                 ptz = &tz;
9561             }
9562 
9563             return get_errno(settimeofday(ptv, ptz));
9564         }
9565 #endif
9566 #if defined(TARGET_NR_select)
9567     case TARGET_NR_select:
9568 #if defined(TARGET_WANT_NI_OLD_SELECT)
9569         /* some architectures used to have old_select here
9570          * but now ENOSYS it.
9571          */
9572         ret = -TARGET_ENOSYS;
9573 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9574         ret = do_old_select(arg1);
9575 #else
9576         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9577 #endif
9578         return ret;
9579 #endif
9580 #ifdef TARGET_NR_pselect6
9581     case TARGET_NR_pselect6:
9582         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9583 #endif
9584 #ifdef TARGET_NR_pselect6_time64
9585     case TARGET_NR_pselect6_time64:
9586         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9587 #endif
9588 #ifdef TARGET_NR_symlink
9589     case TARGET_NR_symlink:
9590         {
9591             void *p2;
9592             p = lock_user_string(arg1);
9593             p2 = lock_user_string(arg2);
9594             if (!p || !p2)
9595                 ret = -TARGET_EFAULT;
9596             else
9597                 ret = get_errno(symlink(p, p2));
9598             unlock_user(p2, arg2, 0);
9599             unlock_user(p, arg1, 0);
9600         }
9601         return ret;
9602 #endif
9603 #if defined(TARGET_NR_symlinkat)
9604     case TARGET_NR_symlinkat:
9605         {
9606             void *p2;
9607             p  = lock_user_string(arg1);
9608             p2 = lock_user_string(arg3);
9609             if (!p || !p2)
9610                 ret = -TARGET_EFAULT;
9611             else
9612                 ret = get_errno(symlinkat(p, arg2, p2));
9613             unlock_user(p2, arg3, 0);
9614             unlock_user(p, arg1, 0);
9615         }
9616         return ret;
9617 #endif
9618 #ifdef TARGET_NR_readlink
9619     case TARGET_NR_readlink:
9620         {
9621             void *p2;
9622             p = lock_user_string(arg1);
9623             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9624             if (!p || !p2) {
9625                 ret = -TARGET_EFAULT;
9626             } else if (!arg3) {
9627                 /* Short circuit this for the magic exe check. */
9628                 ret = -TARGET_EINVAL;
9629             } else if (is_proc_myself((const char *)p, "exe")) {
9630                 char real[PATH_MAX], *temp;
9631                 temp = realpath(exec_path, real);
9632                 /* Return value is # of bytes that we wrote to the buffer. */
9633                 if (temp == NULL) {
9634                     ret = get_errno(-1);
9635                 } else {
9636                     /* Don't worry about sign mismatch as earlier mapping
9637                      * logic would have thrown a bad address error. */
9638                     ret = MIN(strlen(real), arg3);
9639                     /* We cannot NUL terminate the string. */
9640                     memcpy(p2, real, ret);
9641                 }
9642             } else {
9643                 ret = get_errno(readlink(path(p), p2, arg3));
9644             }
9645             unlock_user(p2, arg2, ret);
9646             unlock_user(p, arg1, 0);
9647         }
9648         return ret;
9649 #endif
9650 #if defined(TARGET_NR_readlinkat)
9651     case TARGET_NR_readlinkat:
9652         {
9653             void *p2;
9654             p  = lock_user_string(arg2);
9655             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9656             if (!p || !p2) {
9657                 ret = -TARGET_EFAULT;
9658             } else if (is_proc_myself((const char *)p, "exe")) {
9659                 char real[PATH_MAX], *temp;
9660                 temp = realpath(exec_path, real);
9661                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9662                 snprintf((char *)p2, arg4, "%s", real);
9663             } else {
9664                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9665             }
9666             unlock_user(p2, arg3, ret);
9667             unlock_user(p, arg2, 0);
9668         }
9669         return ret;
9670 #endif
9671 #ifdef TARGET_NR_swapon
9672     case TARGET_NR_swapon:
9673         if (!(p = lock_user_string(arg1)))
9674             return -TARGET_EFAULT;
9675         ret = get_errno(swapon(p, arg2));
9676         unlock_user(p, arg1, 0);
9677         return ret;
9678 #endif
9679     case TARGET_NR_reboot:
9680         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9681            /* arg4 must be ignored in all other cases */
9682            p = lock_user_string(arg4);
9683            if (!p) {
9684                return -TARGET_EFAULT;
9685            }
9686            ret = get_errno(reboot(arg1, arg2, arg3, p));
9687            unlock_user(p, arg4, 0);
9688         } else {
9689            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9690         }
9691         return ret;
9692 #ifdef TARGET_NR_mmap
9693     case TARGET_NR_mmap:
9694 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9695     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9696     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9697     || defined(TARGET_S390X)
9698         {
9699             abi_ulong *v;
9700             abi_ulong v1, v2, v3, v4, v5, v6;
9701             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9702                 return -TARGET_EFAULT;
9703             v1 = tswapal(v[0]);
9704             v2 = tswapal(v[1]);
9705             v3 = tswapal(v[2]);
9706             v4 = tswapal(v[3]);
9707             v5 = tswapal(v[4]);
9708             v6 = tswapal(v[5]);
9709             unlock_user(v, arg1, 0);
9710             ret = get_errno(target_mmap(v1, v2, v3,
9711                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9712                                         v5, v6));
9713         }
9714 #else
9715         /* mmap pointers are always untagged */
9716         ret = get_errno(target_mmap(arg1, arg2, arg3,
9717                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9718                                     arg5,
9719                                     arg6));
9720 #endif
9721         return ret;
9722 #endif
9723 #ifdef TARGET_NR_mmap2
9724     case TARGET_NR_mmap2:
9725 #ifndef MMAP_SHIFT
9726 #define MMAP_SHIFT 12
9727 #endif
9728         ret = target_mmap(arg1, arg2, arg3,
9729                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9730                           arg5, arg6 << MMAP_SHIFT);
9731         return get_errno(ret);
9732 #endif
9733     case TARGET_NR_munmap:
9734         arg1 = cpu_untagged_addr(cpu, arg1);
9735         return get_errno(target_munmap(arg1, arg2));
9736     case TARGET_NR_mprotect:
9737         arg1 = cpu_untagged_addr(cpu, arg1);
9738         {
9739             TaskState *ts = cpu->opaque;
9740             /* Special hack to detect libc making the stack executable.  */
9741             if ((arg3 & PROT_GROWSDOWN)
9742                 && arg1 >= ts->info->stack_limit
9743                 && arg1 <= ts->info->start_stack) {
9744                 arg3 &= ~PROT_GROWSDOWN;
9745                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9746                 arg1 = ts->info->stack_limit;
9747             }
9748         }
9749         return get_errno(target_mprotect(arg1, arg2, arg3));
9750 #ifdef TARGET_NR_mremap
9751     case TARGET_NR_mremap:
9752         arg1 = cpu_untagged_addr(cpu, arg1);
9753         /* mremap new_addr (arg5) is always untagged */
9754         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9755 #endif
9756         /* ??? msync/mlock/munlock are broken for softmmu.  */
9757 #ifdef TARGET_NR_msync
9758     case TARGET_NR_msync:
9759         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9760 #endif
9761 #ifdef TARGET_NR_mlock
9762     case TARGET_NR_mlock:
9763         return get_errno(mlock(g2h(cpu, arg1), arg2));
9764 #endif
9765 #ifdef TARGET_NR_munlock
9766     case TARGET_NR_munlock:
9767         return get_errno(munlock(g2h(cpu, arg1), arg2));
9768 #endif
9769 #ifdef TARGET_NR_mlockall
9770     case TARGET_NR_mlockall:
9771         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9772 #endif
9773 #ifdef TARGET_NR_munlockall
9774     case TARGET_NR_munlockall:
9775         return get_errno(munlockall());
9776 #endif
9777 #ifdef TARGET_NR_truncate
9778     case TARGET_NR_truncate:
9779         if (!(p = lock_user_string(arg1)))
9780             return -TARGET_EFAULT;
9781         ret = get_errno(truncate(p, arg2));
9782         unlock_user(p, arg1, 0);
9783         return ret;
9784 #endif
9785 #ifdef TARGET_NR_ftruncate
9786     case TARGET_NR_ftruncate:
9787         return get_errno(ftruncate(arg1, arg2));
9788 #endif
9789     case TARGET_NR_fchmod:
9790         return get_errno(fchmod(arg1, arg2));
9791 #if defined(TARGET_NR_fchmodat)
9792     case TARGET_NR_fchmodat:
9793         if (!(p = lock_user_string(arg2)))
9794             return -TARGET_EFAULT;
9795         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9796         unlock_user(p, arg2, 0);
9797         return ret;
9798 #endif
9799     case TARGET_NR_getpriority:
9800         /* Note that negative values are valid for getpriority, so we must
9801            differentiate based on errno settings.  */
9802         errno = 0;
9803         ret = getpriority(arg1, arg2);
9804         if (ret == -1 && errno != 0) {
9805             return -host_to_target_errno(errno);
9806         }
9807 #ifdef TARGET_ALPHA
9808         /* Return value is the unbiased priority.  Signal no error.  */
9809         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9810 #else
9811         /* Return value is a biased priority to avoid negative numbers.  */
9812         ret = 20 - ret;
9813 #endif
9814         return ret;
9815     case TARGET_NR_setpriority:
9816         return get_errno(setpriority(arg1, arg2, arg3));
9817 #ifdef TARGET_NR_statfs
9818     case TARGET_NR_statfs:
9819         if (!(p = lock_user_string(arg1))) {
9820             return -TARGET_EFAULT;
9821         }
9822         ret = get_errno(statfs(path(p), &stfs));
9823         unlock_user(p, arg1, 0);
9824     convert_statfs:
9825         if (!is_error(ret)) {
9826             struct target_statfs *target_stfs;
9827 
9828             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9829                 return -TARGET_EFAULT;
9830             __put_user(stfs.f_type, &target_stfs->f_type);
9831             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9832             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9833             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9834             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9835             __put_user(stfs.f_files, &target_stfs->f_files);
9836             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9837             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9838             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9839             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9840             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9841 #ifdef _STATFS_F_FLAGS
9842             __put_user(stfs.f_flags, &target_stfs->f_flags);
9843 #else
9844             __put_user(0, &target_stfs->f_flags);
9845 #endif
9846             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9847             unlock_user_struct(target_stfs, arg2, 1);
9848         }
9849         return ret;
9850 #endif
9851 #ifdef TARGET_NR_fstatfs
9852     case TARGET_NR_fstatfs:
9853         ret = get_errno(fstatfs(arg1, &stfs));
9854         goto convert_statfs;
9855 #endif
9856 #ifdef TARGET_NR_statfs64
9857     case TARGET_NR_statfs64:
9858         if (!(p = lock_user_string(arg1))) {
9859             return -TARGET_EFAULT;
9860         }
9861         ret = get_errno(statfs(path(p), &stfs));
9862         unlock_user(p, arg1, 0);
9863     convert_statfs64:
9864         if (!is_error(ret)) {
9865             struct target_statfs64 *target_stfs;
9866 
9867             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9868                 return -TARGET_EFAULT;
9869             __put_user(stfs.f_type, &target_stfs->f_type);
9870             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9871             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9872             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9873             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9874             __put_user(stfs.f_files, &target_stfs->f_files);
9875             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9876             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9877             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9878             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9879             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9880 #ifdef _STATFS_F_FLAGS
9881             __put_user(stfs.f_flags, &target_stfs->f_flags);
9882 #else
9883             __put_user(0, &target_stfs->f_flags);
9884 #endif
9885             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9886             unlock_user_struct(target_stfs, arg3, 1);
9887         }
9888         return ret;
9889     case TARGET_NR_fstatfs64:
9890         ret = get_errno(fstatfs(arg1, &stfs));
9891         goto convert_statfs64;
9892 #endif
9893 #ifdef TARGET_NR_socketcall
9894     case TARGET_NR_socketcall:
9895         return do_socketcall(arg1, arg2);
9896 #endif
9897 #ifdef TARGET_NR_accept
9898     case TARGET_NR_accept:
9899         return do_accept4(arg1, arg2, arg3, 0);
9900 #endif
9901 #ifdef TARGET_NR_accept4
9902     case TARGET_NR_accept4:
9903         return do_accept4(arg1, arg2, arg3, arg4);
9904 #endif
9905 #ifdef TARGET_NR_bind
9906     case TARGET_NR_bind:
9907         return do_bind(arg1, arg2, arg3);
9908 #endif
9909 #ifdef TARGET_NR_connect
9910     case TARGET_NR_connect:
9911         return do_connect(arg1, arg2, arg3);
9912 #endif
9913 #ifdef TARGET_NR_getpeername
9914     case TARGET_NR_getpeername:
9915         return do_getpeername(arg1, arg2, arg3);
9916 #endif
9917 #ifdef TARGET_NR_getsockname
9918     case TARGET_NR_getsockname:
9919         return do_getsockname(arg1, arg2, arg3);
9920 #endif
9921 #ifdef TARGET_NR_getsockopt
9922     case TARGET_NR_getsockopt:
9923         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9924 #endif
9925 #ifdef TARGET_NR_listen
9926     case TARGET_NR_listen:
9927         return get_errno(listen(arg1, arg2));
9928 #endif
9929 #ifdef TARGET_NR_recv
9930     case TARGET_NR_recv:
9931         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9932 #endif
9933 #ifdef TARGET_NR_recvfrom
9934     case TARGET_NR_recvfrom:
9935         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9936 #endif
9937 #ifdef TARGET_NR_recvmsg
9938     case TARGET_NR_recvmsg:
9939         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9940 #endif
9941 #ifdef TARGET_NR_send
9942     case TARGET_NR_send:
9943         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9944 #endif
9945 #ifdef TARGET_NR_sendmsg
9946     case TARGET_NR_sendmsg:
9947         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9948 #endif
9949 #ifdef TARGET_NR_sendmmsg
9950     case TARGET_NR_sendmmsg:
9951         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9952 #endif
9953 #ifdef TARGET_NR_recvmmsg
9954     case TARGET_NR_recvmmsg:
9955         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9956 #endif
9957 #ifdef TARGET_NR_sendto
9958     case TARGET_NR_sendto:
9959         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9960 #endif
9961 #ifdef TARGET_NR_shutdown
9962     case TARGET_NR_shutdown:
9963         return get_errno(shutdown(arg1, arg2));
9964 #endif
9965 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9966     case TARGET_NR_getrandom:
9967         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9968         if (!p) {
9969             return -TARGET_EFAULT;
9970         }
9971         ret = get_errno(getrandom(p, arg2, arg3));
9972         unlock_user(p, arg1, ret);
9973         return ret;
9974 #endif
9975 #ifdef TARGET_NR_socket
9976     case TARGET_NR_socket:
9977         return do_socket(arg1, arg2, arg3);
9978 #endif
9979 #ifdef TARGET_NR_socketpair
9980     case TARGET_NR_socketpair:
9981         return do_socketpair(arg1, arg2, arg3, arg4);
9982 #endif
9983 #ifdef TARGET_NR_setsockopt
9984     case TARGET_NR_setsockopt:
9985         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9986 #endif
9987 #if defined(TARGET_NR_syslog)
9988     case TARGET_NR_syslog:
9989         {
9990             int len = arg2;
9991 
9992             switch (arg1) {
9993             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9994             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9995             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9996             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9997             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9998             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9999             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10000             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10001                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10002             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10003             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10004             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10005                 {
10006                     if (len < 0) {
10007                         return -TARGET_EINVAL;
10008                     }
10009                     if (len == 0) {
10010                         return 0;
10011                     }
10012                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10013                     if (!p) {
10014                         return -TARGET_EFAULT;
10015                     }
10016                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10017                     unlock_user(p, arg2, arg3);
10018                 }
10019                 return ret;
10020             default:
10021                 return -TARGET_EINVAL;
10022             }
10023         }
10024         break;
10025 #endif
10026     case TARGET_NR_setitimer:
10027         {
10028             struct itimerval value, ovalue, *pvalue;
10029 
10030             if (arg2) {
10031                 pvalue = &value;
10032                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10033                     || copy_from_user_timeval(&pvalue->it_value,
10034                                               arg2 + sizeof(struct target_timeval)))
10035                     return -TARGET_EFAULT;
10036             } else {
10037                 pvalue = NULL;
10038             }
10039             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10040             if (!is_error(ret) && arg3) {
10041                 if (copy_to_user_timeval(arg3,
10042                                          &ovalue.it_interval)
10043                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10044                                             &ovalue.it_value))
10045                     return -TARGET_EFAULT;
10046             }
10047         }
10048         return ret;
10049     case TARGET_NR_getitimer:
10050         {
10051             struct itimerval value;
10052 
10053             ret = get_errno(getitimer(arg1, &value));
10054             if (!is_error(ret) && arg2) {
10055                 if (copy_to_user_timeval(arg2,
10056                                          &value.it_interval)
10057                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10058                                             &value.it_value))
10059                     return -TARGET_EFAULT;
10060             }
10061         }
10062         return ret;
10063 #ifdef TARGET_NR_stat
10064     case TARGET_NR_stat:
10065         if (!(p = lock_user_string(arg1))) {
10066             return -TARGET_EFAULT;
10067         }
10068         ret = get_errno(stat(path(p), &st));
10069         unlock_user(p, arg1, 0);
10070         goto do_stat;
10071 #endif
10072 #ifdef TARGET_NR_lstat
10073     case TARGET_NR_lstat:
10074         if (!(p = lock_user_string(arg1))) {
10075             return -TARGET_EFAULT;
10076         }
10077         ret = get_errno(lstat(path(p), &st));
10078         unlock_user(p, arg1, 0);
10079         goto do_stat;
10080 #endif
10081 #ifdef TARGET_NR_fstat
10082     case TARGET_NR_fstat:
10083         {
10084             ret = get_errno(fstat(arg1, &st));
10085 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10086         do_stat:
10087 #endif
10088             if (!is_error(ret)) {
10089                 struct target_stat *target_st;
10090 
10091                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10092                     return -TARGET_EFAULT;
10093                 memset(target_st, 0, sizeof(*target_st));
10094                 __put_user(st.st_dev, &target_st->st_dev);
10095                 __put_user(st.st_ino, &target_st->st_ino);
10096                 __put_user(st.st_mode, &target_st->st_mode);
10097                 __put_user(st.st_uid, &target_st->st_uid);
10098                 __put_user(st.st_gid, &target_st->st_gid);
10099                 __put_user(st.st_nlink, &target_st->st_nlink);
10100                 __put_user(st.st_rdev, &target_st->st_rdev);
10101                 __put_user(st.st_size, &target_st->st_size);
10102                 __put_user(st.st_blksize, &target_st->st_blksize);
10103                 __put_user(st.st_blocks, &target_st->st_blocks);
10104                 __put_user(st.st_atime, &target_st->target_st_atime);
10105                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10106                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10107 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10108                 __put_user(st.st_atim.tv_nsec,
10109                            &target_st->target_st_atime_nsec);
10110                 __put_user(st.st_mtim.tv_nsec,
10111                            &target_st->target_st_mtime_nsec);
10112                 __put_user(st.st_ctim.tv_nsec,
10113                            &target_st->target_st_ctime_nsec);
10114 #endif
10115                 unlock_user_struct(target_st, arg2, 1);
10116             }
10117         }
10118         return ret;
10119 #endif
10120     case TARGET_NR_vhangup:
10121         return get_errno(vhangup());
10122 #ifdef TARGET_NR_syscall
10123     case TARGET_NR_syscall:
10124         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10125                           arg6, arg7, arg8, 0);
10126 #endif
10127 #if defined(TARGET_NR_wait4)
10128     case TARGET_NR_wait4:
10129         {
10130             int status;
10131             abi_long status_ptr = arg2;
10132             struct rusage rusage, *rusage_ptr;
10133             abi_ulong target_rusage = arg4;
10134             abi_long rusage_err;
10135             if (target_rusage)
10136                 rusage_ptr = &rusage;
10137             else
10138                 rusage_ptr = NULL;
10139             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10140             if (!is_error(ret)) {
10141                 if (status_ptr && ret) {
10142                     status = host_to_target_waitstatus(status);
10143                     if (put_user_s32(status, status_ptr))
10144                         return -TARGET_EFAULT;
10145                 }
10146                 if (target_rusage) {
10147                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10148                     if (rusage_err) {
10149                         ret = rusage_err;
10150                     }
10151                 }
10152             }
10153         }
10154         return ret;
10155 #endif
10156 #ifdef TARGET_NR_swapoff
10157     case TARGET_NR_swapoff:
10158         if (!(p = lock_user_string(arg1)))
10159             return -TARGET_EFAULT;
10160         ret = get_errno(swapoff(p));
10161         unlock_user(p, arg1, 0);
10162         return ret;
10163 #endif
10164     case TARGET_NR_sysinfo:
10165         {
10166             struct target_sysinfo *target_value;
10167             struct sysinfo value;
10168             ret = get_errno(sysinfo(&value));
10169             if (!is_error(ret) && arg1)
10170             {
10171                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10172                     return -TARGET_EFAULT;
10173                 __put_user(value.uptime, &target_value->uptime);
10174                 __put_user(value.loads[0], &target_value->loads[0]);
10175                 __put_user(value.loads[1], &target_value->loads[1]);
10176                 __put_user(value.loads[2], &target_value->loads[2]);
10177                 __put_user(value.totalram, &target_value->totalram);
10178                 __put_user(value.freeram, &target_value->freeram);
10179                 __put_user(value.sharedram, &target_value->sharedram);
10180                 __put_user(value.bufferram, &target_value->bufferram);
10181                 __put_user(value.totalswap, &target_value->totalswap);
10182                 __put_user(value.freeswap, &target_value->freeswap);
10183                 __put_user(value.procs, &target_value->procs);
10184                 __put_user(value.totalhigh, &target_value->totalhigh);
10185                 __put_user(value.freehigh, &target_value->freehigh);
10186                 __put_user(value.mem_unit, &target_value->mem_unit);
10187                 unlock_user_struct(target_value, arg1, 1);
10188             }
10189         }
10190         return ret;
10191 #ifdef TARGET_NR_ipc
10192     case TARGET_NR_ipc:
10193         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10194 #endif
10195 #ifdef TARGET_NR_semget
10196     case TARGET_NR_semget:
10197         return get_errno(semget(arg1, arg2, arg3));
10198 #endif
10199 #ifdef TARGET_NR_semop
10200     case TARGET_NR_semop:
10201         return do_semtimedop(arg1, arg2, arg3, 0, false);
10202 #endif
10203 #ifdef TARGET_NR_semtimedop
10204     case TARGET_NR_semtimedop:
10205         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10206 #endif
10207 #ifdef TARGET_NR_semtimedop_time64
10208     case TARGET_NR_semtimedop_time64:
10209         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10210 #endif
10211 #ifdef TARGET_NR_semctl
10212     case TARGET_NR_semctl:
10213         return do_semctl(arg1, arg2, arg3, arg4);
10214 #endif
10215 #ifdef TARGET_NR_msgctl
10216     case TARGET_NR_msgctl:
10217         return do_msgctl(arg1, arg2, arg3);
10218 #endif
10219 #ifdef TARGET_NR_msgget
10220     case TARGET_NR_msgget:
10221         return get_errno(msgget(arg1, arg2));
10222 #endif
10223 #ifdef TARGET_NR_msgrcv
10224     case TARGET_NR_msgrcv:
10225         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10226 #endif
10227 #ifdef TARGET_NR_msgsnd
10228     case TARGET_NR_msgsnd:
10229         return do_msgsnd(arg1, arg2, arg3, arg4);
10230 #endif
10231 #ifdef TARGET_NR_shmget
10232     case TARGET_NR_shmget:
10233         return get_errno(shmget(arg1, arg2, arg3));
10234 #endif
10235 #ifdef TARGET_NR_shmctl
10236     case TARGET_NR_shmctl:
10237         return do_shmctl(arg1, arg2, arg3);
10238 #endif
10239 #ifdef TARGET_NR_shmat
10240     case TARGET_NR_shmat:
10241         return do_shmat(cpu_env, arg1, arg2, arg3);
10242 #endif
10243 #ifdef TARGET_NR_shmdt
10244     case TARGET_NR_shmdt:
10245         return do_shmdt(arg1);
10246 #endif
10247     case TARGET_NR_fsync:
10248         return get_errno(fsync(arg1));
10249     case TARGET_NR_clone:
10250         /* Linux manages to have three different orderings for its
10251          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10252          * match the kernel's CONFIG_CLONE_* settings.
10253          * Microblaze is further special in that it uses a sixth
10254          * implicit argument to clone for the TLS pointer.
10255          */
10256 #if defined(TARGET_MICROBLAZE)
10257         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10258 #elif defined(TARGET_CLONE_BACKWARDS)
10259         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10260 #elif defined(TARGET_CLONE_BACKWARDS2)
10261         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10262 #else
10263         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10264 #endif
10265         return ret;
10266 #ifdef __NR_exit_group
10267         /* new thread calls */
10268     case TARGET_NR_exit_group:
10269         preexit_cleanup(cpu_env, arg1);
10270         return get_errno(exit_group(arg1));
10271 #endif
10272     case TARGET_NR_setdomainname:
10273         if (!(p = lock_user_string(arg1)))
10274             return -TARGET_EFAULT;
10275         ret = get_errno(setdomainname(p, arg2));
10276         unlock_user(p, arg1, 0);
10277         return ret;
10278     case TARGET_NR_uname:
10279         /* no need to transcode because we use the linux syscall */
10280         {
10281             struct new_utsname * buf;
10282 
10283             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10284                 return -TARGET_EFAULT;
10285             ret = get_errno(sys_uname(buf));
10286             if (!is_error(ret)) {
10287                 /* Overwrite the native machine name with whatever is being
10288                    emulated. */
10289                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10290                           sizeof(buf->machine));
10291                 /* Allow the user to override the reported release.  */
10292                 if (qemu_uname_release && *qemu_uname_release) {
10293                     g_strlcpy(buf->release, qemu_uname_release,
10294                               sizeof(buf->release));
10295                 }
10296             }
10297             unlock_user_struct(buf, arg1, 1);
10298         }
10299         return ret;
10300 #ifdef TARGET_I386
10301     case TARGET_NR_modify_ldt:
10302         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10303 #if !defined(TARGET_X86_64)
10304     case TARGET_NR_vm86:
10305         return do_vm86(cpu_env, arg1, arg2);
10306 #endif
10307 #endif
10308 #if defined(TARGET_NR_adjtimex)
10309     case TARGET_NR_adjtimex:
10310         {
10311             struct timex host_buf;
10312 
10313             if (target_to_host_timex(&host_buf, arg1) != 0) {
10314                 return -TARGET_EFAULT;
10315             }
10316             ret = get_errno(adjtimex(&host_buf));
10317             if (!is_error(ret)) {
10318                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10319                     return -TARGET_EFAULT;
10320                 }
10321             }
10322         }
10323         return ret;
10324 #endif
10325 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10326     case TARGET_NR_clock_adjtime:
10327         {
10328             struct timex htx, *phtx = &htx;
10329 
10330             if (target_to_host_timex(phtx, arg2) != 0) {
10331                 return -TARGET_EFAULT;
10332             }
10333             ret = get_errno(clock_adjtime(arg1, phtx));
10334             if (!is_error(ret) && phtx) {
10335                 if (host_to_target_timex(arg2, phtx) != 0) {
10336                     return -TARGET_EFAULT;
10337                 }
10338             }
10339         }
10340         return ret;
10341 #endif
10342 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10343     case TARGET_NR_clock_adjtime64:
10344         {
10345             struct timex htx;
10346 
10347             if (target_to_host_timex64(&htx, arg2) != 0) {
10348                 return -TARGET_EFAULT;
10349             }
10350             ret = get_errno(clock_adjtime(arg1, &htx));
10351             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10352                     return -TARGET_EFAULT;
10353             }
10354         }
10355         return ret;
10356 #endif
10357     case TARGET_NR_getpgid:
10358         return get_errno(getpgid(arg1));
10359     case TARGET_NR_fchdir:
10360         return get_errno(fchdir(arg1));
10361     case TARGET_NR_personality:
10362         return get_errno(personality(arg1));
10363 #ifdef TARGET_NR__llseek /* Not on alpha */
10364     case TARGET_NR__llseek:
10365         {
10366             int64_t res;
10367 #if !defined(__NR_llseek)
10368             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10369             if (res == -1) {
10370                 ret = get_errno(res);
10371             } else {
10372                 ret = 0;
10373             }
10374 #else
10375             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10376 #endif
10377             if ((ret == 0) && put_user_s64(res, arg4)) {
10378                 return -TARGET_EFAULT;
10379             }
10380         }
10381         return ret;
10382 #endif
10383 #ifdef TARGET_NR_getdents
10384     case TARGET_NR_getdents:
10385         return do_getdents(arg1, arg2, arg3);
10386 #endif /* TARGET_NR_getdents */
10387 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10388     case TARGET_NR_getdents64:
10389         return do_getdents64(arg1, arg2, arg3);
10390 #endif /* TARGET_NR_getdents64 */
10391 #if defined(TARGET_NR__newselect)
10392     case TARGET_NR__newselect:
10393         return do_select(arg1, arg2, arg3, arg4, arg5);
10394 #endif
10395 #ifdef TARGET_NR_poll
10396     case TARGET_NR_poll:
10397         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10398 #endif
10399 #ifdef TARGET_NR_ppoll
10400     case TARGET_NR_ppoll:
10401         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10402 #endif
10403 #ifdef TARGET_NR_ppoll_time64
10404     case TARGET_NR_ppoll_time64:
10405         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10406 #endif
10407     case TARGET_NR_flock:
10408         /* NOTE: the flock constant seems to be the same for every
10409            Linux platform */
10410         return get_errno(safe_flock(arg1, arg2));
10411     case TARGET_NR_readv:
10412         {
10413             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10414             if (vec != NULL) {
10415                 ret = get_errno(safe_readv(arg1, vec, arg3));
10416                 unlock_iovec(vec, arg2, arg3, 1);
10417             } else {
10418                 ret = -host_to_target_errno(errno);
10419             }
10420         }
10421         return ret;
10422     case TARGET_NR_writev:
10423         {
10424             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10425             if (vec != NULL) {
10426                 ret = get_errno(safe_writev(arg1, vec, arg3));
10427                 unlock_iovec(vec, arg2, arg3, 0);
10428             } else {
10429                 ret = -host_to_target_errno(errno);
10430             }
10431         }
10432         return ret;
10433 #if defined(TARGET_NR_preadv)
10434     case TARGET_NR_preadv:
10435         {
10436             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10437             if (vec != NULL) {
10438                 unsigned long low, high;
10439 
10440                 target_to_host_low_high(arg4, arg5, &low, &high);
10441                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10442                 unlock_iovec(vec, arg2, arg3, 1);
10443             } else {
10444                 ret = -host_to_target_errno(errno);
10445            }
10446         }
10447         return ret;
10448 #endif
10449 #if defined(TARGET_NR_pwritev)
10450     case TARGET_NR_pwritev:
10451         {
10452             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10453             if (vec != NULL) {
10454                 unsigned long low, high;
10455 
10456                 target_to_host_low_high(arg4, arg5, &low, &high);
10457                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10458                 unlock_iovec(vec, arg2, arg3, 0);
10459             } else {
10460                 ret = -host_to_target_errno(errno);
10461            }
10462         }
10463         return ret;
10464 #endif
10465     case TARGET_NR_getsid:
10466         return get_errno(getsid(arg1));
10467 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10468     case TARGET_NR_fdatasync:
10469         return get_errno(fdatasync(arg1));
10470 #endif
10471     case TARGET_NR_sched_getaffinity:
10472         {
10473             unsigned int mask_size;
10474             unsigned long *mask;
10475 
10476             /*
10477              * sched_getaffinity needs multiples of ulong, so need to take
10478              * care of mismatches between target ulong and host ulong sizes.
10479              */
10480             if (arg2 & (sizeof(abi_ulong) - 1)) {
10481                 return -TARGET_EINVAL;
10482             }
10483             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10484 
10485             mask = alloca(mask_size);
10486             memset(mask, 0, mask_size);
10487             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10488 
10489             if (!is_error(ret)) {
10490                 if (ret > arg2) {
10491                     /* More data returned than the caller's buffer will fit.
10492                      * This only happens if sizeof(abi_long) < sizeof(long)
10493                      * and the caller passed us a buffer holding an odd number
10494                      * of abi_longs. If the host kernel is actually using the
10495                      * extra 4 bytes then fail EINVAL; otherwise we can just
10496                      * ignore them and only copy the interesting part.
10497                      */
10498                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10499                     if (numcpus > arg2 * 8) {
10500                         return -TARGET_EINVAL;
10501                     }
10502                     ret = arg2;
10503                 }
10504 
10505                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10506                     return -TARGET_EFAULT;
10507                 }
10508             }
10509         }
10510         return ret;
10511     case TARGET_NR_sched_setaffinity:
10512         {
10513             unsigned int mask_size;
10514             unsigned long *mask;
10515 
10516             /*
10517              * sched_setaffinity needs multiples of ulong, so need to take
10518              * care of mismatches between target ulong and host ulong sizes.
10519              */
10520             if (arg2 & (sizeof(abi_ulong) - 1)) {
10521                 return -TARGET_EINVAL;
10522             }
10523             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10524             mask = alloca(mask_size);
10525 
10526             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10527             if (ret) {
10528                 return ret;
10529             }
10530 
10531             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10532         }
10533     case TARGET_NR_getcpu:
10534         {
10535             unsigned cpu, node;
10536             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10537                                        arg2 ? &node : NULL,
10538                                        NULL));
10539             if (is_error(ret)) {
10540                 return ret;
10541             }
10542             if (arg1 && put_user_u32(cpu, arg1)) {
10543                 return -TARGET_EFAULT;
10544             }
10545             if (arg2 && put_user_u32(node, arg2)) {
10546                 return -TARGET_EFAULT;
10547             }
10548         }
10549         return ret;
10550     case TARGET_NR_sched_setparam:
10551         {
10552             struct sched_param *target_schp;
10553             struct sched_param schp;
10554 
10555             if (arg2 == 0) {
10556                 return -TARGET_EINVAL;
10557             }
10558             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10559                 return -TARGET_EFAULT;
10560             schp.sched_priority = tswap32(target_schp->sched_priority);
10561             unlock_user_struct(target_schp, arg2, 0);
10562             return get_errno(sched_setparam(arg1, &schp));
10563         }
10564     case TARGET_NR_sched_getparam:
10565         {
10566             struct sched_param *target_schp;
10567             struct sched_param schp;
10568 
10569             if (arg2 == 0) {
10570                 return -TARGET_EINVAL;
10571             }
10572             ret = get_errno(sched_getparam(arg1, &schp));
10573             if (!is_error(ret)) {
10574                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10575                     return -TARGET_EFAULT;
10576                 target_schp->sched_priority = tswap32(schp.sched_priority);
10577                 unlock_user_struct(target_schp, arg2, 1);
10578             }
10579         }
10580         return ret;
10581     case TARGET_NR_sched_setscheduler:
10582         {
10583             struct sched_param *target_schp;
10584             struct sched_param schp;
10585             if (arg3 == 0) {
10586                 return -TARGET_EINVAL;
10587             }
10588             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10589                 return -TARGET_EFAULT;
10590             schp.sched_priority = tswap32(target_schp->sched_priority);
10591             unlock_user_struct(target_schp, arg3, 0);
10592             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10593         }
10594     case TARGET_NR_sched_getscheduler:
10595         return get_errno(sched_getscheduler(arg1));
10596     case TARGET_NR_sched_yield:
10597         return get_errno(sched_yield());
10598     case TARGET_NR_sched_get_priority_max:
10599         return get_errno(sched_get_priority_max(arg1));
10600     case TARGET_NR_sched_get_priority_min:
10601         return get_errno(sched_get_priority_min(arg1));
10602 #ifdef TARGET_NR_sched_rr_get_interval
10603     case TARGET_NR_sched_rr_get_interval:
10604         {
10605             struct timespec ts;
10606             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10607             if (!is_error(ret)) {
10608                 ret = host_to_target_timespec(arg2, &ts);
10609             }
10610         }
10611         return ret;
10612 #endif
10613 #ifdef TARGET_NR_sched_rr_get_interval_time64
10614     case TARGET_NR_sched_rr_get_interval_time64:
10615         {
10616             struct timespec ts;
10617             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10618             if (!is_error(ret)) {
10619                 ret = host_to_target_timespec64(arg2, &ts);
10620             }
10621         }
10622         return ret;
10623 #endif
10624 #if defined(TARGET_NR_nanosleep)
10625     case TARGET_NR_nanosleep:
10626         {
10627             struct timespec req, rem;
10628             target_to_host_timespec(&req, arg1);
10629             ret = get_errno(safe_nanosleep(&req, &rem));
10630             if (is_error(ret) && arg2) {
10631                 host_to_target_timespec(arg2, &rem);
10632             }
10633         }
10634         return ret;
10635 #endif
10636     case TARGET_NR_prctl:
10637         switch (arg1) {
10638         case PR_GET_PDEATHSIG:
10639         {
10640             int deathsig;
10641             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10642             if (!is_error(ret) && arg2
10643                 && put_user_s32(deathsig, arg2)) {
10644                 return -TARGET_EFAULT;
10645             }
10646             return ret;
10647         }
10648 #ifdef PR_GET_NAME
10649         case PR_GET_NAME:
10650         {
10651             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10652             if (!name) {
10653                 return -TARGET_EFAULT;
10654             }
10655             ret = get_errno(prctl(arg1, (unsigned long)name,
10656                                   arg3, arg4, arg5));
10657             unlock_user(name, arg2, 16);
10658             return ret;
10659         }
10660         case PR_SET_NAME:
10661         {
10662             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10663             if (!name) {
10664                 return -TARGET_EFAULT;
10665             }
10666             ret = get_errno(prctl(arg1, (unsigned long)name,
10667                                   arg3, arg4, arg5));
10668             unlock_user(name, arg2, 0);
10669             return ret;
10670         }
10671 #endif
10672 #ifdef TARGET_MIPS
10673         case TARGET_PR_GET_FP_MODE:
10674         {
10675             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10676             ret = 0;
10677             if (env->CP0_Status & (1 << CP0St_FR)) {
10678                 ret |= TARGET_PR_FP_MODE_FR;
10679             }
10680             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10681                 ret |= TARGET_PR_FP_MODE_FRE;
10682             }
10683             return ret;
10684         }
10685         case TARGET_PR_SET_FP_MODE:
10686         {
10687             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10688             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10689             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10690             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10691             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10692 
10693             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10694                                             TARGET_PR_FP_MODE_FRE;
10695 
10696             /* If nothing to change, return right away, successfully.  */
10697             if (old_fr == new_fr && old_fre == new_fre) {
10698                 return 0;
10699             }
10700             /* Check the value is valid */
10701             if (arg2 & ~known_bits) {
10702                 return -TARGET_EOPNOTSUPP;
10703             }
10704             /* Setting FRE without FR is not supported.  */
10705             if (new_fre && !new_fr) {
10706                 return -TARGET_EOPNOTSUPP;
10707             }
10708             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10709                 /* FR1 is not supported */
10710                 return -TARGET_EOPNOTSUPP;
10711             }
10712             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10713                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10714                 /* cannot set FR=0 */
10715                 return -TARGET_EOPNOTSUPP;
10716             }
10717             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10718                 /* Cannot set FRE=1 */
10719                 return -TARGET_EOPNOTSUPP;
10720             }
10721 
10722             int i;
10723             fpr_t *fpr = env->active_fpu.fpr;
10724             for (i = 0; i < 32 ; i += 2) {
10725                 if (!old_fr && new_fr) {
10726                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10727                 } else if (old_fr && !new_fr) {
10728                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10729                 }
10730             }
10731 
10732             if (new_fr) {
10733                 env->CP0_Status |= (1 << CP0St_FR);
10734                 env->hflags |= MIPS_HFLAG_F64;
10735             } else {
10736                 env->CP0_Status &= ~(1 << CP0St_FR);
10737                 env->hflags &= ~MIPS_HFLAG_F64;
10738             }
10739             if (new_fre) {
10740                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10741                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10742                     env->hflags |= MIPS_HFLAG_FRE;
10743                 }
10744             } else {
10745                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10746                 env->hflags &= ~MIPS_HFLAG_FRE;
10747             }
10748 
10749             return 0;
10750         }
10751 #endif /* MIPS */
10752 #ifdef TARGET_AARCH64
10753         case TARGET_PR_SVE_SET_VL:
10754             /*
10755              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10756              * PR_SVE_VL_INHERIT.  Note the kernel definition
10757              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10758              * even though the current architectural maximum is VQ=16.
10759              */
10760             ret = -TARGET_EINVAL;
10761             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10762                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10763                 CPUARMState *env = cpu_env;
10764                 ARMCPU *cpu = env_archcpu(env);
10765                 uint32_t vq, old_vq;
10766 
10767                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10768                 vq = MAX(arg2 / 16, 1);
10769                 vq = MIN(vq, cpu->sve_max_vq);
10770 
10771                 if (vq < old_vq) {
10772                     aarch64_sve_narrow_vq(env, vq);
10773                 }
10774                 env->vfp.zcr_el[1] = vq - 1;
10775                 arm_rebuild_hflags(env);
10776                 ret = vq * 16;
10777             }
10778             return ret;
10779         case TARGET_PR_SVE_GET_VL:
10780             ret = -TARGET_EINVAL;
10781             {
10782                 ARMCPU *cpu = env_archcpu(cpu_env);
10783                 if (cpu_isar_feature(aa64_sve, cpu)) {
10784                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10785                 }
10786             }
10787             return ret;
10788         case TARGET_PR_PAC_RESET_KEYS:
10789             {
10790                 CPUARMState *env = cpu_env;
10791                 ARMCPU *cpu = env_archcpu(env);
10792 
10793                 if (arg3 || arg4 || arg5) {
10794                     return -TARGET_EINVAL;
10795                 }
10796                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10797                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10798                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10799                                TARGET_PR_PAC_APGAKEY);
10800                     int ret = 0;
10801                     Error *err = NULL;
10802 
10803                     if (arg2 == 0) {
10804                         arg2 = all;
10805                     } else if (arg2 & ~all) {
10806                         return -TARGET_EINVAL;
10807                     }
10808                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10809                         ret |= qemu_guest_getrandom(&env->keys.apia,
10810                                                     sizeof(ARMPACKey), &err);
10811                     }
10812                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10813                         ret |= qemu_guest_getrandom(&env->keys.apib,
10814                                                     sizeof(ARMPACKey), &err);
10815                     }
10816                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10817                         ret |= qemu_guest_getrandom(&env->keys.apda,
10818                                                     sizeof(ARMPACKey), &err);
10819                     }
10820                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10821                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10822                                                     sizeof(ARMPACKey), &err);
10823                     }
10824                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10825                         ret |= qemu_guest_getrandom(&env->keys.apga,
10826                                                     sizeof(ARMPACKey), &err);
10827                     }
10828                     if (ret != 0) {
10829                         /*
10830                          * Some unknown failure in the crypto.  The best
10831                          * we can do is log it and fail the syscall.
10832                          * The real syscall cannot fail this way.
10833                          */
10834                         qemu_log_mask(LOG_UNIMP,
10835                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10836                                       error_get_pretty(err));
10837                         error_free(err);
10838                         return -TARGET_EIO;
10839                     }
10840                     return 0;
10841                 }
10842             }
10843             return -TARGET_EINVAL;
10844         case TARGET_PR_SET_TAGGED_ADDR_CTRL:
10845             {
10846                 abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
10847                 CPUARMState *env = cpu_env;
10848                 ARMCPU *cpu = env_archcpu(env);
10849 
10850                 if (cpu_isar_feature(aa64_mte, cpu)) {
10851                     valid_mask |= TARGET_PR_MTE_TCF_MASK;
10852                     valid_mask |= TARGET_PR_MTE_TAG_MASK;
10853                 }
10854 
10855                 if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
10856                     return -TARGET_EINVAL;
10857                 }
10858                 env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
10859 
10860                 if (cpu_isar_feature(aa64_mte, cpu)) {
10861                     switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
10862                     case TARGET_PR_MTE_TCF_NONE:
10863                     case TARGET_PR_MTE_TCF_SYNC:
10864                     case TARGET_PR_MTE_TCF_ASYNC:
10865                         break;
10866                     default:
10867                         return -EINVAL;
10868                     }
10869 
10870                     /*
10871                      * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10872                      * Note that the syscall values are consistent with hw.
10873                      */
10874                     env->cp15.sctlr_el[1] =
10875                         deposit64(env->cp15.sctlr_el[1], 38, 2,
10876                                   arg2 >> TARGET_PR_MTE_TCF_SHIFT);
10877 
10878                     /*
10879                      * Write PR_MTE_TAG to GCR_EL1[Exclude].
10880                      * Note that the syscall uses an include mask,
10881                      * and hardware uses an exclude mask -- invert.
10882                      */
10883                     env->cp15.gcr_el1 =
10884                         deposit64(env->cp15.gcr_el1, 0, 16,
10885                                   ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
10886                     arm_rebuild_hflags(env);
10887                 }
10888                 return 0;
10889             }
10890         case TARGET_PR_GET_TAGGED_ADDR_CTRL:
10891             {
10892                 abi_long ret = 0;
10893                 CPUARMState *env = cpu_env;
10894                 ARMCPU *cpu = env_archcpu(env);
10895 
10896                 if (arg2 || arg3 || arg4 || arg5) {
10897                     return -TARGET_EINVAL;
10898                 }
10899                 if (env->tagged_addr_enable) {
10900                     ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
10901                 }
10902                 if (cpu_isar_feature(aa64_mte, cpu)) {
10903                     /* See above. */
10904                     ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
10905                             << TARGET_PR_MTE_TCF_SHIFT);
10906                     ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
10907                                     ~env->cp15.gcr_el1);
10908                 }
10909                 return ret;
10910             }
10911 #endif /* AARCH64 */
10912         case PR_GET_SECCOMP:
10913         case PR_SET_SECCOMP:
10914             /* Disable seccomp to prevent the target disabling syscalls we
10915              * need. */
10916             return -TARGET_EINVAL;
10917         default:
10918             /* Most prctl options have no pointer arguments */
10919             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10920         }
10921         break;
10922 #ifdef TARGET_NR_arch_prctl
10923     case TARGET_NR_arch_prctl:
10924         return do_arch_prctl(cpu_env, arg1, arg2);
10925 #endif
10926 #ifdef TARGET_NR_pread64
10927     case TARGET_NR_pread64:
10928         if (regpairs_aligned(cpu_env, num)) {
10929             arg4 = arg5;
10930             arg5 = arg6;
10931         }
10932         if (arg2 == 0 && arg3 == 0) {
10933             /* Special-case NULL buffer and zero length, which should succeed */
10934             p = 0;
10935         } else {
10936             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10937             if (!p) {
10938                 return -TARGET_EFAULT;
10939             }
10940         }
10941         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10942         unlock_user(p, arg2, ret);
10943         return ret;
10944     case TARGET_NR_pwrite64:
10945         if (regpairs_aligned(cpu_env, num)) {
10946             arg4 = arg5;
10947             arg5 = arg6;
10948         }
10949         if (arg2 == 0 && arg3 == 0) {
10950             /* Special-case NULL buffer and zero length, which should succeed */
10951             p = 0;
10952         } else {
10953             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10954             if (!p) {
10955                 return -TARGET_EFAULT;
10956             }
10957         }
10958         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10959         unlock_user(p, arg2, 0);
10960         return ret;
10961 #endif
10962     case TARGET_NR_getcwd:
10963         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10964             return -TARGET_EFAULT;
10965         ret = get_errno(sys_getcwd1(p, arg2));
10966         unlock_user(p, arg1, ret);
10967         return ret;
10968     case TARGET_NR_capget:
10969     case TARGET_NR_capset:
10970     {
10971         struct target_user_cap_header *target_header;
10972         struct target_user_cap_data *target_data = NULL;
10973         struct __user_cap_header_struct header;
10974         struct __user_cap_data_struct data[2];
10975         struct __user_cap_data_struct *dataptr = NULL;
10976         int i, target_datalen;
10977         int data_items = 1;
10978 
10979         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10980             return -TARGET_EFAULT;
10981         }
10982         header.version = tswap32(target_header->version);
10983         header.pid = tswap32(target_header->pid);
10984 
10985         if (header.version != _LINUX_CAPABILITY_VERSION) {
10986             /* Version 2 and up takes pointer to two user_data structs */
10987             data_items = 2;
10988         }
10989 
10990         target_datalen = sizeof(*target_data) * data_items;
10991 
10992         if (arg2) {
10993             if (num == TARGET_NR_capget) {
10994                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10995             } else {
10996                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10997             }
10998             if (!target_data) {
10999                 unlock_user_struct(target_header, arg1, 0);
11000                 return -TARGET_EFAULT;
11001             }
11002 
11003             if (num == TARGET_NR_capset) {
11004                 for (i = 0; i < data_items; i++) {
11005                     data[i].effective = tswap32(target_data[i].effective);
11006                     data[i].permitted = tswap32(target_data[i].permitted);
11007                     data[i].inheritable = tswap32(target_data[i].inheritable);
11008                 }
11009             }
11010 
11011             dataptr = data;
11012         }
11013 
11014         if (num == TARGET_NR_capget) {
11015             ret = get_errno(capget(&header, dataptr));
11016         } else {
11017             ret = get_errno(capset(&header, dataptr));
11018         }
11019 
11020         /* The kernel always updates version for both capget and capset */
11021         target_header->version = tswap32(header.version);
11022         unlock_user_struct(target_header, arg1, 1);
11023 
11024         if (arg2) {
11025             if (num == TARGET_NR_capget) {
11026                 for (i = 0; i < data_items; i++) {
11027                     target_data[i].effective = tswap32(data[i].effective);
11028                     target_data[i].permitted = tswap32(data[i].permitted);
11029                     target_data[i].inheritable = tswap32(data[i].inheritable);
11030                 }
11031                 unlock_user(target_data, arg2, target_datalen);
11032             } else {
11033                 unlock_user(target_data, arg2, 0);
11034             }
11035         }
11036         return ret;
11037     }
11038     case TARGET_NR_sigaltstack:
11039         return do_sigaltstack(arg1, arg2, cpu_env);
11040 
11041 #ifdef CONFIG_SENDFILE
11042 #ifdef TARGET_NR_sendfile
11043     case TARGET_NR_sendfile:
11044     {
11045         off_t *offp = NULL;
11046         off_t off;
11047         if (arg3) {
11048             ret = get_user_sal(off, arg3);
11049             if (is_error(ret)) {
11050                 return ret;
11051             }
11052             offp = &off;
11053         }
11054         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11055         if (!is_error(ret) && arg3) {
11056             abi_long ret2 = put_user_sal(off, arg3);
11057             if (is_error(ret2)) {
11058                 ret = ret2;
11059             }
11060         }
11061         return ret;
11062     }
11063 #endif
11064 #ifdef TARGET_NR_sendfile64
11065     case TARGET_NR_sendfile64:
11066     {
11067         off_t *offp = NULL;
11068         off_t off;
11069         if (arg3) {
11070             ret = get_user_s64(off, arg3);
11071             if (is_error(ret)) {
11072                 return ret;
11073             }
11074             offp = &off;
11075         }
11076         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11077         if (!is_error(ret) && arg3) {
11078             abi_long ret2 = put_user_s64(off, arg3);
11079             if (is_error(ret2)) {
11080                 ret = ret2;
11081             }
11082         }
11083         return ret;
11084     }
11085 #endif
11086 #endif
11087 #ifdef TARGET_NR_vfork
11088     case TARGET_NR_vfork:
11089         return get_errno(do_fork(cpu_env,
11090                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11091                          0, 0, 0, 0));
11092 #endif
11093 #ifdef TARGET_NR_ugetrlimit
11094     case TARGET_NR_ugetrlimit:
11095     {
11096 	struct rlimit rlim;
11097 	int resource = target_to_host_resource(arg1);
11098 	ret = get_errno(getrlimit(resource, &rlim));
11099 	if (!is_error(ret)) {
11100 	    struct target_rlimit *target_rlim;
11101             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11102                 return -TARGET_EFAULT;
11103 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11104 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11105             unlock_user_struct(target_rlim, arg2, 1);
11106 	}
11107         return ret;
11108     }
11109 #endif
11110 #ifdef TARGET_NR_truncate64
11111     case TARGET_NR_truncate64:
11112         if (!(p = lock_user_string(arg1)))
11113             return -TARGET_EFAULT;
11114 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11115         unlock_user(p, arg1, 0);
11116         return ret;
11117 #endif
11118 #ifdef TARGET_NR_ftruncate64
11119     case TARGET_NR_ftruncate64:
11120         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11121 #endif
11122 #ifdef TARGET_NR_stat64
11123     case TARGET_NR_stat64:
11124         if (!(p = lock_user_string(arg1))) {
11125             return -TARGET_EFAULT;
11126         }
11127         ret = get_errno(stat(path(p), &st));
11128         unlock_user(p, arg1, 0);
11129         if (!is_error(ret))
11130             ret = host_to_target_stat64(cpu_env, arg2, &st);
11131         return ret;
11132 #endif
11133 #ifdef TARGET_NR_lstat64
11134     case TARGET_NR_lstat64:
11135         if (!(p = lock_user_string(arg1))) {
11136             return -TARGET_EFAULT;
11137         }
11138         ret = get_errno(lstat(path(p), &st));
11139         unlock_user(p, arg1, 0);
11140         if (!is_error(ret))
11141             ret = host_to_target_stat64(cpu_env, arg2, &st);
11142         return ret;
11143 #endif
11144 #ifdef TARGET_NR_fstat64
11145     case TARGET_NR_fstat64:
11146         ret = get_errno(fstat(arg1, &st));
11147         if (!is_error(ret))
11148             ret = host_to_target_stat64(cpu_env, arg2, &st);
11149         return ret;
11150 #endif
11151 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11152 #ifdef TARGET_NR_fstatat64
11153     case TARGET_NR_fstatat64:
11154 #endif
11155 #ifdef TARGET_NR_newfstatat
11156     case TARGET_NR_newfstatat:
11157 #endif
11158         if (!(p = lock_user_string(arg2))) {
11159             return -TARGET_EFAULT;
11160         }
11161         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11162         unlock_user(p, arg2, 0);
11163         if (!is_error(ret))
11164             ret = host_to_target_stat64(cpu_env, arg3, &st);
11165         return ret;
11166 #endif
11167 #if defined(TARGET_NR_statx)
11168     case TARGET_NR_statx:
11169         {
11170             struct target_statx *target_stx;
11171             int dirfd = arg1;
11172             int flags = arg3;
11173 
11174             p = lock_user_string(arg2);
11175             if (p == NULL) {
11176                 return -TARGET_EFAULT;
11177             }
11178 #if defined(__NR_statx)
11179             {
11180                 /*
11181                  * It is assumed that struct statx is architecture independent.
11182                  */
11183                 struct target_statx host_stx;
11184                 int mask = arg4;
11185 
11186                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11187                 if (!is_error(ret)) {
11188                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11189                         unlock_user(p, arg2, 0);
11190                         return -TARGET_EFAULT;
11191                     }
11192                 }
11193 
11194                 if (ret != -TARGET_ENOSYS) {
11195                     unlock_user(p, arg2, 0);
11196                     return ret;
11197                 }
11198             }
11199 #endif
11200             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11201             unlock_user(p, arg2, 0);
11202 
11203             if (!is_error(ret)) {
11204                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11205                     return -TARGET_EFAULT;
11206                 }
11207                 memset(target_stx, 0, sizeof(*target_stx));
11208                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11209                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11210                 __put_user(st.st_ino, &target_stx->stx_ino);
11211                 __put_user(st.st_mode, &target_stx->stx_mode);
11212                 __put_user(st.st_uid, &target_stx->stx_uid);
11213                 __put_user(st.st_gid, &target_stx->stx_gid);
11214                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11215                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11216                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11217                 __put_user(st.st_size, &target_stx->stx_size);
11218                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11219                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11220                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11221                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11222                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11223                 unlock_user_struct(target_stx, arg5, 1);
11224             }
11225         }
11226         return ret;
11227 #endif
11228 #ifdef TARGET_NR_lchown
11229     case TARGET_NR_lchown:
11230         if (!(p = lock_user_string(arg1)))
11231             return -TARGET_EFAULT;
11232         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11233         unlock_user(p, arg1, 0);
11234         return ret;
11235 #endif
11236 #ifdef TARGET_NR_getuid
11237     case TARGET_NR_getuid:
11238         return get_errno(high2lowuid(getuid()));
11239 #endif
11240 #ifdef TARGET_NR_getgid
11241     case TARGET_NR_getgid:
11242         return get_errno(high2lowgid(getgid()));
11243 #endif
11244 #ifdef TARGET_NR_geteuid
11245     case TARGET_NR_geteuid:
11246         return get_errno(high2lowuid(geteuid()));
11247 #endif
11248 #ifdef TARGET_NR_getegid
11249     case TARGET_NR_getegid:
11250         return get_errno(high2lowgid(getegid()));
11251 #endif
11252     case TARGET_NR_setreuid:
11253         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11254     case TARGET_NR_setregid:
11255         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11256     case TARGET_NR_getgroups:
11257         {
11258             int gidsetsize = arg1;
11259             target_id *target_grouplist;
11260             gid_t *grouplist;
11261             int i;
11262 
11263             grouplist = alloca(gidsetsize * sizeof(gid_t));
11264             ret = get_errno(getgroups(gidsetsize, grouplist));
11265             if (gidsetsize == 0)
11266                 return ret;
11267             if (!is_error(ret)) {
11268                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11269                 if (!target_grouplist)
11270                     return -TARGET_EFAULT;
11271                 for(i = 0;i < ret; i++)
11272                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11273                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11274             }
11275         }
11276         return ret;
11277     case TARGET_NR_setgroups:
11278         {
11279             int gidsetsize = arg1;
11280             target_id *target_grouplist;
11281             gid_t *grouplist = NULL;
11282             int i;
11283             if (gidsetsize) {
11284                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11285                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11286                 if (!target_grouplist) {
11287                     return -TARGET_EFAULT;
11288                 }
11289                 for (i = 0; i < gidsetsize; i++) {
11290                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11291                 }
11292                 unlock_user(target_grouplist, arg2, 0);
11293             }
11294             return get_errno(setgroups(gidsetsize, grouplist));
11295         }
11296     case TARGET_NR_fchown:
11297         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11298 #if defined(TARGET_NR_fchownat)
11299     case TARGET_NR_fchownat:
11300         if (!(p = lock_user_string(arg2)))
11301             return -TARGET_EFAULT;
11302         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11303                                  low2highgid(arg4), arg5));
11304         unlock_user(p, arg2, 0);
11305         return ret;
11306 #endif
11307 #ifdef TARGET_NR_setresuid
11308     case TARGET_NR_setresuid:
11309         return get_errno(sys_setresuid(low2highuid(arg1),
11310                                        low2highuid(arg2),
11311                                        low2highuid(arg3)));
11312 #endif
11313 #ifdef TARGET_NR_getresuid
11314     case TARGET_NR_getresuid:
11315         {
11316             uid_t ruid, euid, suid;
11317             ret = get_errno(getresuid(&ruid, &euid, &suid));
11318             if (!is_error(ret)) {
11319                 if (put_user_id(high2lowuid(ruid), arg1)
11320                     || put_user_id(high2lowuid(euid), arg2)
11321                     || put_user_id(high2lowuid(suid), arg3))
11322                     return -TARGET_EFAULT;
11323             }
11324         }
11325         return ret;
11326 #endif
11327 #ifdef TARGET_NR_getresgid
11328     case TARGET_NR_setresgid:
11329         return get_errno(sys_setresgid(low2highgid(arg1),
11330                                        low2highgid(arg2),
11331                                        low2highgid(arg3)));
11332 #endif
11333 #ifdef TARGET_NR_getresgid
11334     case TARGET_NR_getresgid:
11335         {
11336             gid_t rgid, egid, sgid;
11337             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11338             if (!is_error(ret)) {
11339                 if (put_user_id(high2lowgid(rgid), arg1)
11340                     || put_user_id(high2lowgid(egid), arg2)
11341                     || put_user_id(high2lowgid(sgid), arg3))
11342                     return -TARGET_EFAULT;
11343             }
11344         }
11345         return ret;
11346 #endif
11347 #ifdef TARGET_NR_chown
11348     case TARGET_NR_chown:
11349         if (!(p = lock_user_string(arg1)))
11350             return -TARGET_EFAULT;
11351         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11352         unlock_user(p, arg1, 0);
11353         return ret;
11354 #endif
11355     case TARGET_NR_setuid:
11356         return get_errno(sys_setuid(low2highuid(arg1)));
11357     case TARGET_NR_setgid:
11358         return get_errno(sys_setgid(low2highgid(arg1)));
11359     case TARGET_NR_setfsuid:
11360         return get_errno(setfsuid(arg1));
11361     case TARGET_NR_setfsgid:
11362         return get_errno(setfsgid(arg1));
11363 
11364 #ifdef TARGET_NR_lchown32
11365     case TARGET_NR_lchown32:
11366         if (!(p = lock_user_string(arg1)))
11367             return -TARGET_EFAULT;
11368         ret = get_errno(lchown(p, arg2, arg3));
11369         unlock_user(p, arg1, 0);
11370         return ret;
11371 #endif
11372 #ifdef TARGET_NR_getuid32
11373     case TARGET_NR_getuid32:
11374         return get_errno(getuid());
11375 #endif
11376 
11377 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11378    /* Alpha specific */
11379     case TARGET_NR_getxuid:
11380          {
11381             uid_t euid;
11382             euid=geteuid();
11383             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11384          }
11385         return get_errno(getuid());
11386 #endif
11387 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11388    /* Alpha specific */
11389     case TARGET_NR_getxgid:
11390          {
11391             uid_t egid;
11392             egid=getegid();
11393             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11394          }
11395         return get_errno(getgid());
11396 #endif
11397 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11398     /* Alpha specific */
11399     case TARGET_NR_osf_getsysinfo:
11400         ret = -TARGET_EOPNOTSUPP;
11401         switch (arg1) {
11402           case TARGET_GSI_IEEE_FP_CONTROL:
11403             {
11404                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11405                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11406 
11407                 swcr &= ~SWCR_STATUS_MASK;
11408                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11409 
11410                 if (put_user_u64 (swcr, arg2))
11411                         return -TARGET_EFAULT;
11412                 ret = 0;
11413             }
11414             break;
11415 
11416           /* case GSI_IEEE_STATE_AT_SIGNAL:
11417              -- Not implemented in linux kernel.
11418              case GSI_UACPROC:
11419              -- Retrieves current unaligned access state; not much used.
11420              case GSI_PROC_TYPE:
11421              -- Retrieves implver information; surely not used.
11422              case GSI_GET_HWRPB:
11423              -- Grabs a copy of the HWRPB; surely not used.
11424           */
11425         }
11426         return ret;
11427 #endif
11428 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11429     /* Alpha specific */
11430     case TARGET_NR_osf_setsysinfo:
11431         ret = -TARGET_EOPNOTSUPP;
11432         switch (arg1) {
11433           case TARGET_SSI_IEEE_FP_CONTROL:
11434             {
11435                 uint64_t swcr, fpcr;
11436 
11437                 if (get_user_u64 (swcr, arg2)) {
11438                     return -TARGET_EFAULT;
11439                 }
11440 
11441                 /*
11442                  * The kernel calls swcr_update_status to update the
11443                  * status bits from the fpcr at every point that it
11444                  * could be queried.  Therefore, we store the status
11445                  * bits only in FPCR.
11446                  */
11447                 ((CPUAlphaState *)cpu_env)->swcr
11448                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11449 
11450                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11451                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11452                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11453                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11454                 ret = 0;
11455             }
11456             break;
11457 
11458           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11459             {
11460                 uint64_t exc, fpcr, fex;
11461 
11462                 if (get_user_u64(exc, arg2)) {
11463                     return -TARGET_EFAULT;
11464                 }
11465                 exc &= SWCR_STATUS_MASK;
11466                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11467 
11468                 /* Old exceptions are not signaled.  */
11469                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11470                 fex = exc & ~fex;
11471                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11472                 fex &= ((CPUArchState *)cpu_env)->swcr;
11473 
11474                 /* Update the hardware fpcr.  */
11475                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11476                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11477 
11478                 if (fex) {
11479                     int si_code = TARGET_FPE_FLTUNK;
11480                     target_siginfo_t info;
11481 
11482                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11483                         si_code = TARGET_FPE_FLTUND;
11484                     }
11485                     if (fex & SWCR_TRAP_ENABLE_INE) {
11486                         si_code = TARGET_FPE_FLTRES;
11487                     }
11488                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11489                         si_code = TARGET_FPE_FLTUND;
11490                     }
11491                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11492                         si_code = TARGET_FPE_FLTOVF;
11493                     }
11494                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11495                         si_code = TARGET_FPE_FLTDIV;
11496                     }
11497                     if (fex & SWCR_TRAP_ENABLE_INV) {
11498                         si_code = TARGET_FPE_FLTINV;
11499                     }
11500 
11501                     info.si_signo = SIGFPE;
11502                     info.si_errno = 0;
11503                     info.si_code = si_code;
11504                     info._sifields._sigfault._addr
11505                         = ((CPUArchState *)cpu_env)->pc;
11506                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11507                                  QEMU_SI_FAULT, &info);
11508                 }
11509                 ret = 0;
11510             }
11511             break;
11512 
11513           /* case SSI_NVPAIRS:
11514              -- Used with SSIN_UACPROC to enable unaligned accesses.
11515              case SSI_IEEE_STATE_AT_SIGNAL:
11516              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11517              -- Not implemented in linux kernel
11518           */
11519         }
11520         return ret;
11521 #endif
11522 #ifdef TARGET_NR_osf_sigprocmask
11523     /* Alpha specific.  */
11524     case TARGET_NR_osf_sigprocmask:
11525         {
11526             abi_ulong mask;
11527             int how;
11528             sigset_t set, oldset;
11529 
11530             switch(arg1) {
11531             case TARGET_SIG_BLOCK:
11532                 how = SIG_BLOCK;
11533                 break;
11534             case TARGET_SIG_UNBLOCK:
11535                 how = SIG_UNBLOCK;
11536                 break;
11537             case TARGET_SIG_SETMASK:
11538                 how = SIG_SETMASK;
11539                 break;
11540             default:
11541                 return -TARGET_EINVAL;
11542             }
11543             mask = arg2;
11544             target_to_host_old_sigset(&set, &mask);
11545             ret = do_sigprocmask(how, &set, &oldset);
11546             if (!ret) {
11547                 host_to_target_old_sigset(&mask, &oldset);
11548                 ret = mask;
11549             }
11550         }
11551         return ret;
11552 #endif
11553 
11554 #ifdef TARGET_NR_getgid32
11555     case TARGET_NR_getgid32:
11556         return get_errno(getgid());
11557 #endif
11558 #ifdef TARGET_NR_geteuid32
11559     case TARGET_NR_geteuid32:
11560         return get_errno(geteuid());
11561 #endif
11562 #ifdef TARGET_NR_getegid32
11563     case TARGET_NR_getegid32:
11564         return get_errno(getegid());
11565 #endif
11566 #ifdef TARGET_NR_setreuid32
11567     case TARGET_NR_setreuid32:
11568         return get_errno(setreuid(arg1, arg2));
11569 #endif
11570 #ifdef TARGET_NR_setregid32
11571     case TARGET_NR_setregid32:
11572         return get_errno(setregid(arg1, arg2));
11573 #endif
11574 #ifdef TARGET_NR_getgroups32
11575     case TARGET_NR_getgroups32:
11576         {
11577             int gidsetsize = arg1;
11578             uint32_t *target_grouplist;
11579             gid_t *grouplist;
11580             int i;
11581 
11582             grouplist = alloca(gidsetsize * sizeof(gid_t));
11583             ret = get_errno(getgroups(gidsetsize, grouplist));
11584             if (gidsetsize == 0)
11585                 return ret;
11586             if (!is_error(ret)) {
11587                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11588                 if (!target_grouplist) {
11589                     return -TARGET_EFAULT;
11590                 }
11591                 for(i = 0;i < ret; i++)
11592                     target_grouplist[i] = tswap32(grouplist[i]);
11593                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11594             }
11595         }
11596         return ret;
11597 #endif
11598 #ifdef TARGET_NR_setgroups32
11599     case TARGET_NR_setgroups32:
11600         {
11601             int gidsetsize = arg1;
11602             uint32_t *target_grouplist;
11603             gid_t *grouplist;
11604             int i;
11605 
11606             grouplist = alloca(gidsetsize * sizeof(gid_t));
11607             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11608             if (!target_grouplist) {
11609                 return -TARGET_EFAULT;
11610             }
11611             for(i = 0;i < gidsetsize; i++)
11612                 grouplist[i] = tswap32(target_grouplist[i]);
11613             unlock_user(target_grouplist, arg2, 0);
11614             return get_errno(setgroups(gidsetsize, grouplist));
11615         }
11616 #endif
11617 #ifdef TARGET_NR_fchown32
11618     case TARGET_NR_fchown32:
11619         return get_errno(fchown(arg1, arg2, arg3));
11620 #endif
11621 #ifdef TARGET_NR_setresuid32
11622     case TARGET_NR_setresuid32:
11623         return get_errno(sys_setresuid(arg1, arg2, arg3));
11624 #endif
11625 #ifdef TARGET_NR_getresuid32
11626     case TARGET_NR_getresuid32:
11627         {
11628             uid_t ruid, euid, suid;
11629             ret = get_errno(getresuid(&ruid, &euid, &suid));
11630             if (!is_error(ret)) {
11631                 if (put_user_u32(ruid, arg1)
11632                     || put_user_u32(euid, arg2)
11633                     || put_user_u32(suid, arg3))
11634                     return -TARGET_EFAULT;
11635             }
11636         }
11637         return ret;
11638 #endif
11639 #ifdef TARGET_NR_setresgid32
11640     case TARGET_NR_setresgid32:
11641         return get_errno(sys_setresgid(arg1, arg2, arg3));
11642 #endif
11643 #ifdef TARGET_NR_getresgid32
11644     case TARGET_NR_getresgid32:
11645         {
11646             gid_t rgid, egid, sgid;
11647             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11648             if (!is_error(ret)) {
11649                 if (put_user_u32(rgid, arg1)
11650                     || put_user_u32(egid, arg2)
11651                     || put_user_u32(sgid, arg3))
11652                     return -TARGET_EFAULT;
11653             }
11654         }
11655         return ret;
11656 #endif
11657 #ifdef TARGET_NR_chown32
11658     case TARGET_NR_chown32:
11659         if (!(p = lock_user_string(arg1)))
11660             return -TARGET_EFAULT;
11661         ret = get_errno(chown(p, arg2, arg3));
11662         unlock_user(p, arg1, 0);
11663         return ret;
11664 #endif
11665 #ifdef TARGET_NR_setuid32
11666     case TARGET_NR_setuid32:
11667         return get_errno(sys_setuid(arg1));
11668 #endif
11669 #ifdef TARGET_NR_setgid32
11670     case TARGET_NR_setgid32:
11671         return get_errno(sys_setgid(arg1));
11672 #endif
11673 #ifdef TARGET_NR_setfsuid32
11674     case TARGET_NR_setfsuid32:
11675         return get_errno(setfsuid(arg1));
11676 #endif
11677 #ifdef TARGET_NR_setfsgid32
11678     case TARGET_NR_setfsgid32:
11679         return get_errno(setfsgid(arg1));
11680 #endif
11681 #ifdef TARGET_NR_mincore
11682     case TARGET_NR_mincore:
11683         {
11684             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11685             if (!a) {
11686                 return -TARGET_ENOMEM;
11687             }
11688             p = lock_user_string(arg3);
11689             if (!p) {
11690                 ret = -TARGET_EFAULT;
11691             } else {
11692                 ret = get_errno(mincore(a, arg2, p));
11693                 unlock_user(p, arg3, ret);
11694             }
11695             unlock_user(a, arg1, 0);
11696         }
11697         return ret;
11698 #endif
11699 #ifdef TARGET_NR_arm_fadvise64_64
11700     case TARGET_NR_arm_fadvise64_64:
11701         /* arm_fadvise64_64 looks like fadvise64_64 but
11702          * with different argument order: fd, advice, offset, len
11703          * rather than the usual fd, offset, len, advice.
11704          * Note that offset and len are both 64-bit so appear as
11705          * pairs of 32-bit registers.
11706          */
11707         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11708                             target_offset64(arg5, arg6), arg2);
11709         return -host_to_target_errno(ret);
11710 #endif
11711 
11712 #if TARGET_ABI_BITS == 32
11713 
11714 #ifdef TARGET_NR_fadvise64_64
11715     case TARGET_NR_fadvise64_64:
11716 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11717         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11718         ret = arg2;
11719         arg2 = arg3;
11720         arg3 = arg4;
11721         arg4 = arg5;
11722         arg5 = arg6;
11723         arg6 = ret;
11724 #else
11725         /* 6 args: fd, offset (high, low), len (high, low), advice */
11726         if (regpairs_aligned(cpu_env, num)) {
11727             /* offset is in (3,4), len in (5,6) and advice in 7 */
11728             arg2 = arg3;
11729             arg3 = arg4;
11730             arg4 = arg5;
11731             arg5 = arg6;
11732             arg6 = arg7;
11733         }
11734 #endif
11735         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11736                             target_offset64(arg4, arg5), arg6);
11737         return -host_to_target_errno(ret);
11738 #endif
11739 
11740 #ifdef TARGET_NR_fadvise64
11741     case TARGET_NR_fadvise64:
11742         /* 5 args: fd, offset (high, low), len, advice */
11743         if (regpairs_aligned(cpu_env, num)) {
11744             /* offset is in (3,4), len in 5 and advice in 6 */
11745             arg2 = arg3;
11746             arg3 = arg4;
11747             arg4 = arg5;
11748             arg5 = arg6;
11749         }
11750         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11751         return -host_to_target_errno(ret);
11752 #endif
11753 
11754 #else /* not a 32-bit ABI */
11755 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11756 #ifdef TARGET_NR_fadvise64_64
11757     case TARGET_NR_fadvise64_64:
11758 #endif
11759 #ifdef TARGET_NR_fadvise64
11760     case TARGET_NR_fadvise64:
11761 #endif
11762 #ifdef TARGET_S390X
11763         switch (arg4) {
11764         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11765         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11766         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11767         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11768         default: break;
11769         }
11770 #endif
11771         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11772 #endif
11773 #endif /* end of 64-bit ABI fadvise handling */
11774 
11775 #ifdef TARGET_NR_madvise
11776     case TARGET_NR_madvise:
11777         /* A straight passthrough may not be safe because qemu sometimes
11778            turns private file-backed mappings into anonymous mappings.
11779            This will break MADV_DONTNEED.
11780            This is a hint, so ignoring and returning success is ok.  */
11781         return 0;
11782 #endif
11783 #ifdef TARGET_NR_fcntl64
11784     case TARGET_NR_fcntl64:
11785     {
11786         int cmd;
11787         struct flock64 fl;
11788         from_flock64_fn *copyfrom = copy_from_user_flock64;
11789         to_flock64_fn *copyto = copy_to_user_flock64;
11790 
11791 #ifdef TARGET_ARM
11792         if (!((CPUARMState *)cpu_env)->eabi) {
11793             copyfrom = copy_from_user_oabi_flock64;
11794             copyto = copy_to_user_oabi_flock64;
11795         }
11796 #endif
11797 
11798         cmd = target_to_host_fcntl_cmd(arg2);
11799         if (cmd == -TARGET_EINVAL) {
11800             return cmd;
11801         }
11802 
11803         switch(arg2) {
11804         case TARGET_F_GETLK64:
11805             ret = copyfrom(&fl, arg3);
11806             if (ret) {
11807                 break;
11808             }
11809             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11810             if (ret == 0) {
11811                 ret = copyto(arg3, &fl);
11812             }
11813 	    break;
11814 
11815         case TARGET_F_SETLK64:
11816         case TARGET_F_SETLKW64:
11817             ret = copyfrom(&fl, arg3);
11818             if (ret) {
11819                 break;
11820             }
11821             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11822 	    break;
11823         default:
11824             ret = do_fcntl(arg1, arg2, arg3);
11825             break;
11826         }
11827         return ret;
11828     }
11829 #endif
11830 #ifdef TARGET_NR_cacheflush
11831     case TARGET_NR_cacheflush:
11832         /* self-modifying code is handled automatically, so nothing needed */
11833         return 0;
11834 #endif
11835 #ifdef TARGET_NR_getpagesize
11836     case TARGET_NR_getpagesize:
11837         return TARGET_PAGE_SIZE;
11838 #endif
11839     case TARGET_NR_gettid:
11840         return get_errno(sys_gettid());
11841 #ifdef TARGET_NR_readahead
11842     case TARGET_NR_readahead:
11843 #if TARGET_ABI_BITS == 32
11844         if (regpairs_aligned(cpu_env, num)) {
11845             arg2 = arg3;
11846             arg3 = arg4;
11847             arg4 = arg5;
11848         }
11849         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11850 #else
11851         ret = get_errno(readahead(arg1, arg2, arg3));
11852 #endif
11853         return ret;
11854 #endif
11855 #ifdef CONFIG_ATTR
11856 #ifdef TARGET_NR_setxattr
11857     case TARGET_NR_listxattr:
11858     case TARGET_NR_llistxattr:
11859     {
11860         void *p, *b = 0;
11861         if (arg2) {
11862             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11863             if (!b) {
11864                 return -TARGET_EFAULT;
11865             }
11866         }
11867         p = lock_user_string(arg1);
11868         if (p) {
11869             if (num == TARGET_NR_listxattr) {
11870                 ret = get_errno(listxattr(p, b, arg3));
11871             } else {
11872                 ret = get_errno(llistxattr(p, b, arg3));
11873             }
11874         } else {
11875             ret = -TARGET_EFAULT;
11876         }
11877         unlock_user(p, arg1, 0);
11878         unlock_user(b, arg2, arg3);
11879         return ret;
11880     }
11881     case TARGET_NR_flistxattr:
11882     {
11883         void *b = 0;
11884         if (arg2) {
11885             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11886             if (!b) {
11887                 return -TARGET_EFAULT;
11888             }
11889         }
11890         ret = get_errno(flistxattr(arg1, b, arg3));
11891         unlock_user(b, arg2, arg3);
11892         return ret;
11893     }
11894     case TARGET_NR_setxattr:
11895     case TARGET_NR_lsetxattr:
11896         {
11897             void *p, *n, *v = 0;
11898             if (arg3) {
11899                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11900                 if (!v) {
11901                     return -TARGET_EFAULT;
11902                 }
11903             }
11904             p = lock_user_string(arg1);
11905             n = lock_user_string(arg2);
11906             if (p && n) {
11907                 if (num == TARGET_NR_setxattr) {
11908                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11909                 } else {
11910                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11911                 }
11912             } else {
11913                 ret = -TARGET_EFAULT;
11914             }
11915             unlock_user(p, arg1, 0);
11916             unlock_user(n, arg2, 0);
11917             unlock_user(v, arg3, 0);
11918         }
11919         return ret;
11920     case TARGET_NR_fsetxattr:
11921         {
11922             void *n, *v = 0;
11923             if (arg3) {
11924                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11925                 if (!v) {
11926                     return -TARGET_EFAULT;
11927                 }
11928             }
11929             n = lock_user_string(arg2);
11930             if (n) {
11931                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11932             } else {
11933                 ret = -TARGET_EFAULT;
11934             }
11935             unlock_user(n, arg2, 0);
11936             unlock_user(v, arg3, 0);
11937         }
11938         return ret;
11939     case TARGET_NR_getxattr:
11940     case TARGET_NR_lgetxattr:
11941         {
11942             void *p, *n, *v = 0;
11943             if (arg3) {
11944                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11945                 if (!v) {
11946                     return -TARGET_EFAULT;
11947                 }
11948             }
11949             p = lock_user_string(arg1);
11950             n = lock_user_string(arg2);
11951             if (p && n) {
11952                 if (num == TARGET_NR_getxattr) {
11953                     ret = get_errno(getxattr(p, n, v, arg4));
11954                 } else {
11955                     ret = get_errno(lgetxattr(p, n, v, arg4));
11956                 }
11957             } else {
11958                 ret = -TARGET_EFAULT;
11959             }
11960             unlock_user(p, arg1, 0);
11961             unlock_user(n, arg2, 0);
11962             unlock_user(v, arg3, arg4);
11963         }
11964         return ret;
11965     case TARGET_NR_fgetxattr:
11966         {
11967             void *n, *v = 0;
11968             if (arg3) {
11969                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11970                 if (!v) {
11971                     return -TARGET_EFAULT;
11972                 }
11973             }
11974             n = lock_user_string(arg2);
11975             if (n) {
11976                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11977             } else {
11978                 ret = -TARGET_EFAULT;
11979             }
11980             unlock_user(n, arg2, 0);
11981             unlock_user(v, arg3, arg4);
11982         }
11983         return ret;
11984     case TARGET_NR_removexattr:
11985     case TARGET_NR_lremovexattr:
11986         {
11987             void *p, *n;
11988             p = lock_user_string(arg1);
11989             n = lock_user_string(arg2);
11990             if (p && n) {
11991                 if (num == TARGET_NR_removexattr) {
11992                     ret = get_errno(removexattr(p, n));
11993                 } else {
11994                     ret = get_errno(lremovexattr(p, n));
11995                 }
11996             } else {
11997                 ret = -TARGET_EFAULT;
11998             }
11999             unlock_user(p, arg1, 0);
12000             unlock_user(n, arg2, 0);
12001         }
12002         return ret;
12003     case TARGET_NR_fremovexattr:
12004         {
12005             void *n;
12006             n = lock_user_string(arg2);
12007             if (n) {
12008                 ret = get_errno(fremovexattr(arg1, n));
12009             } else {
12010                 ret = -TARGET_EFAULT;
12011             }
12012             unlock_user(n, arg2, 0);
12013         }
12014         return ret;
12015 #endif
12016 #endif /* CONFIG_ATTR */
12017 #ifdef TARGET_NR_set_thread_area
12018     case TARGET_NR_set_thread_area:
12019 #if defined(TARGET_MIPS)
12020       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12021       return 0;
12022 #elif defined(TARGET_CRIS)
12023       if (arg1 & 0xff)
12024           ret = -TARGET_EINVAL;
12025       else {
12026           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12027           ret = 0;
12028       }
12029       return ret;
12030 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12031       return do_set_thread_area(cpu_env, arg1);
12032 #elif defined(TARGET_M68K)
12033       {
12034           TaskState *ts = cpu->opaque;
12035           ts->tp_value = arg1;
12036           return 0;
12037       }
12038 #else
12039       return -TARGET_ENOSYS;
12040 #endif
12041 #endif
12042 #ifdef TARGET_NR_get_thread_area
12043     case TARGET_NR_get_thread_area:
12044 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12045         return do_get_thread_area(cpu_env, arg1);
12046 #elif defined(TARGET_M68K)
12047         {
12048             TaskState *ts = cpu->opaque;
12049             return ts->tp_value;
12050         }
12051 #else
12052         return -TARGET_ENOSYS;
12053 #endif
12054 #endif
12055 #ifdef TARGET_NR_getdomainname
12056     case TARGET_NR_getdomainname:
12057         return -TARGET_ENOSYS;
12058 #endif
12059 
12060 #ifdef TARGET_NR_clock_settime
12061     case TARGET_NR_clock_settime:
12062     {
12063         struct timespec ts;
12064 
12065         ret = target_to_host_timespec(&ts, arg2);
12066         if (!is_error(ret)) {
12067             ret = get_errno(clock_settime(arg1, &ts));
12068         }
12069         return ret;
12070     }
12071 #endif
12072 #ifdef TARGET_NR_clock_settime64
12073     case TARGET_NR_clock_settime64:
12074     {
12075         struct timespec ts;
12076 
12077         ret = target_to_host_timespec64(&ts, arg2);
12078         if (!is_error(ret)) {
12079             ret = get_errno(clock_settime(arg1, &ts));
12080         }
12081         return ret;
12082     }
12083 #endif
12084 #ifdef TARGET_NR_clock_gettime
12085     case TARGET_NR_clock_gettime:
12086     {
12087         struct timespec ts;
12088         ret = get_errno(clock_gettime(arg1, &ts));
12089         if (!is_error(ret)) {
12090             ret = host_to_target_timespec(arg2, &ts);
12091         }
12092         return ret;
12093     }
12094 #endif
12095 #ifdef TARGET_NR_clock_gettime64
12096     case TARGET_NR_clock_gettime64:
12097     {
12098         struct timespec ts;
12099         ret = get_errno(clock_gettime(arg1, &ts));
12100         if (!is_error(ret)) {
12101             ret = host_to_target_timespec64(arg2, &ts);
12102         }
12103         return ret;
12104     }
12105 #endif
12106 #ifdef TARGET_NR_clock_getres
12107     case TARGET_NR_clock_getres:
12108     {
12109         struct timespec ts;
12110         ret = get_errno(clock_getres(arg1, &ts));
12111         if (!is_error(ret)) {
12112             host_to_target_timespec(arg2, &ts);
12113         }
12114         return ret;
12115     }
12116 #endif
12117 #ifdef TARGET_NR_clock_getres_time64
12118     case TARGET_NR_clock_getres_time64:
12119     {
12120         struct timespec ts;
12121         ret = get_errno(clock_getres(arg1, &ts));
12122         if (!is_error(ret)) {
12123             host_to_target_timespec64(arg2, &ts);
12124         }
12125         return ret;
12126     }
12127 #endif
12128 #ifdef TARGET_NR_clock_nanosleep
12129     case TARGET_NR_clock_nanosleep:
12130     {
12131         struct timespec ts;
12132         if (target_to_host_timespec(&ts, arg3)) {
12133             return -TARGET_EFAULT;
12134         }
12135         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12136                                              &ts, arg4 ? &ts : NULL));
12137         /*
12138          * if the call is interrupted by a signal handler, it fails
12139          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12140          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12141          */
12142         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12143             host_to_target_timespec(arg4, &ts)) {
12144               return -TARGET_EFAULT;
12145         }
12146 
12147         return ret;
12148     }
12149 #endif
12150 #ifdef TARGET_NR_clock_nanosleep_time64
12151     case TARGET_NR_clock_nanosleep_time64:
12152     {
12153         struct timespec ts;
12154 
12155         if (target_to_host_timespec64(&ts, arg3)) {
12156             return -TARGET_EFAULT;
12157         }
12158 
12159         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12160                                              &ts, arg4 ? &ts : NULL));
12161 
12162         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12163             host_to_target_timespec64(arg4, &ts)) {
12164             return -TARGET_EFAULT;
12165         }
12166         return ret;
12167     }
12168 #endif
12169 
12170 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12171     case TARGET_NR_set_tid_address:
12172         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12173 #endif
12174 
12175     case TARGET_NR_tkill:
12176         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12177 
12178     case TARGET_NR_tgkill:
12179         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12180                          target_to_host_signal(arg3)));
12181 
12182 #ifdef TARGET_NR_set_robust_list
12183     case TARGET_NR_set_robust_list:
12184     case TARGET_NR_get_robust_list:
12185         /* The ABI for supporting robust futexes has userspace pass
12186          * the kernel a pointer to a linked list which is updated by
12187          * userspace after the syscall; the list is walked by the kernel
12188          * when the thread exits. Since the linked list in QEMU guest
12189          * memory isn't a valid linked list for the host and we have
12190          * no way to reliably intercept the thread-death event, we can't
12191          * support these. Silently return ENOSYS so that guest userspace
12192          * falls back to a non-robust futex implementation (which should
12193          * be OK except in the corner case of the guest crashing while
12194          * holding a mutex that is shared with another process via
12195          * shared memory).
12196          */
12197         return -TARGET_ENOSYS;
12198 #endif
12199 
12200 #if defined(TARGET_NR_utimensat)
12201     case TARGET_NR_utimensat:
12202         {
12203             struct timespec *tsp, ts[2];
12204             if (!arg3) {
12205                 tsp = NULL;
12206             } else {
12207                 if (target_to_host_timespec(ts, arg3)) {
12208                     return -TARGET_EFAULT;
12209                 }
12210                 if (target_to_host_timespec(ts + 1, arg3 +
12211                                             sizeof(struct target_timespec))) {
12212                     return -TARGET_EFAULT;
12213                 }
12214                 tsp = ts;
12215             }
12216             if (!arg2)
12217                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12218             else {
12219                 if (!(p = lock_user_string(arg2))) {
12220                     return -TARGET_EFAULT;
12221                 }
12222                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12223                 unlock_user(p, arg2, 0);
12224             }
12225         }
12226         return ret;
12227 #endif
12228 #ifdef TARGET_NR_utimensat_time64
12229     case TARGET_NR_utimensat_time64:
12230         {
12231             struct timespec *tsp, ts[2];
12232             if (!arg3) {
12233                 tsp = NULL;
12234             } else {
12235                 if (target_to_host_timespec64(ts, arg3)) {
12236                     return -TARGET_EFAULT;
12237                 }
12238                 if (target_to_host_timespec64(ts + 1, arg3 +
12239                                      sizeof(struct target__kernel_timespec))) {
12240                     return -TARGET_EFAULT;
12241                 }
12242                 tsp = ts;
12243             }
12244             if (!arg2)
12245                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12246             else {
12247                 p = lock_user_string(arg2);
12248                 if (!p) {
12249                     return -TARGET_EFAULT;
12250                 }
12251                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12252                 unlock_user(p, arg2, 0);
12253             }
12254         }
12255         return ret;
12256 #endif
12257 #ifdef TARGET_NR_futex
12258     case TARGET_NR_futex:
12259         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12260 #endif
12261 #ifdef TARGET_NR_futex_time64
12262     case TARGET_NR_futex_time64:
12263         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12264 #endif
12265 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12266     case TARGET_NR_inotify_init:
12267         ret = get_errno(sys_inotify_init());
12268         if (ret >= 0) {
12269             fd_trans_register(ret, &target_inotify_trans);
12270         }
12271         return ret;
12272 #endif
12273 #ifdef CONFIG_INOTIFY1
12274 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12275     case TARGET_NR_inotify_init1:
12276         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12277                                           fcntl_flags_tbl)));
12278         if (ret >= 0) {
12279             fd_trans_register(ret, &target_inotify_trans);
12280         }
12281         return ret;
12282 #endif
12283 #endif
12284 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12285     case TARGET_NR_inotify_add_watch:
12286         p = lock_user_string(arg2);
12287         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12288         unlock_user(p, arg2, 0);
12289         return ret;
12290 #endif
12291 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12292     case TARGET_NR_inotify_rm_watch:
12293         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12294 #endif
12295 
12296 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12297     case TARGET_NR_mq_open:
12298         {
12299             struct mq_attr posix_mq_attr;
12300             struct mq_attr *pposix_mq_attr;
12301             int host_flags;
12302 
12303             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12304             pposix_mq_attr = NULL;
12305             if (arg4) {
12306                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12307                     return -TARGET_EFAULT;
12308                 }
12309                 pposix_mq_attr = &posix_mq_attr;
12310             }
12311             p = lock_user_string(arg1 - 1);
12312             if (!p) {
12313                 return -TARGET_EFAULT;
12314             }
12315             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12316             unlock_user (p, arg1, 0);
12317         }
12318         return ret;
12319 
12320     case TARGET_NR_mq_unlink:
12321         p = lock_user_string(arg1 - 1);
12322         if (!p) {
12323             return -TARGET_EFAULT;
12324         }
12325         ret = get_errno(mq_unlink(p));
12326         unlock_user (p, arg1, 0);
12327         return ret;
12328 
12329 #ifdef TARGET_NR_mq_timedsend
12330     case TARGET_NR_mq_timedsend:
12331         {
12332             struct timespec ts;
12333 
12334             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12335             if (arg5 != 0) {
12336                 if (target_to_host_timespec(&ts, arg5)) {
12337                     return -TARGET_EFAULT;
12338                 }
12339                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12340                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12341                     return -TARGET_EFAULT;
12342                 }
12343             } else {
12344                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12345             }
12346             unlock_user (p, arg2, arg3);
12347         }
12348         return ret;
12349 #endif
12350 #ifdef TARGET_NR_mq_timedsend_time64
12351     case TARGET_NR_mq_timedsend_time64:
12352         {
12353             struct timespec ts;
12354 
12355             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12356             if (arg5 != 0) {
12357                 if (target_to_host_timespec64(&ts, arg5)) {
12358                     return -TARGET_EFAULT;
12359                 }
12360                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12361                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12362                     return -TARGET_EFAULT;
12363                 }
12364             } else {
12365                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12366             }
12367             unlock_user(p, arg2, arg3);
12368         }
12369         return ret;
12370 #endif
12371 
12372 #ifdef TARGET_NR_mq_timedreceive
12373     case TARGET_NR_mq_timedreceive:
12374         {
12375             struct timespec ts;
12376             unsigned int prio;
12377 
12378             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12379             if (arg5 != 0) {
12380                 if (target_to_host_timespec(&ts, arg5)) {
12381                     return -TARGET_EFAULT;
12382                 }
12383                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12384                                                      &prio, &ts));
12385                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12386                     return -TARGET_EFAULT;
12387                 }
12388             } else {
12389                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12390                                                      &prio, NULL));
12391             }
12392             unlock_user (p, arg2, arg3);
12393             if (arg4 != 0)
12394                 put_user_u32(prio, arg4);
12395         }
12396         return ret;
12397 #endif
12398 #ifdef TARGET_NR_mq_timedreceive_time64
12399     case TARGET_NR_mq_timedreceive_time64:
12400         {
12401             struct timespec ts;
12402             unsigned int prio;
12403 
12404             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12405             if (arg5 != 0) {
12406                 if (target_to_host_timespec64(&ts, arg5)) {
12407                     return -TARGET_EFAULT;
12408                 }
12409                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12410                                                      &prio, &ts));
12411                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12412                     return -TARGET_EFAULT;
12413                 }
12414             } else {
12415                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12416                                                      &prio, NULL));
12417             }
12418             unlock_user(p, arg2, arg3);
12419             if (arg4 != 0) {
12420                 put_user_u32(prio, arg4);
12421             }
12422         }
12423         return ret;
12424 #endif
12425 
12426     /* Not implemented for now... */
12427 /*     case TARGET_NR_mq_notify: */
12428 /*         break; */
12429 
12430     case TARGET_NR_mq_getsetattr:
12431         {
12432             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12433             ret = 0;
12434             if (arg2 != 0) {
12435                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12436                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12437                                            &posix_mq_attr_out));
12438             } else if (arg3 != 0) {
12439                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12440             }
12441             if (ret == 0 && arg3 != 0) {
12442                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12443             }
12444         }
12445         return ret;
12446 #endif
12447 
12448 #ifdef CONFIG_SPLICE
12449 #ifdef TARGET_NR_tee
12450     case TARGET_NR_tee:
12451         {
12452             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12453         }
12454         return ret;
12455 #endif
12456 #ifdef TARGET_NR_splice
12457     case TARGET_NR_splice:
12458         {
12459             loff_t loff_in, loff_out;
12460             loff_t *ploff_in = NULL, *ploff_out = NULL;
12461             if (arg2) {
12462                 if (get_user_u64(loff_in, arg2)) {
12463                     return -TARGET_EFAULT;
12464                 }
12465                 ploff_in = &loff_in;
12466             }
12467             if (arg4) {
12468                 if (get_user_u64(loff_out, arg4)) {
12469                     return -TARGET_EFAULT;
12470                 }
12471                 ploff_out = &loff_out;
12472             }
12473             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12474             if (arg2) {
12475                 if (put_user_u64(loff_in, arg2)) {
12476                     return -TARGET_EFAULT;
12477                 }
12478             }
12479             if (arg4) {
12480                 if (put_user_u64(loff_out, arg4)) {
12481                     return -TARGET_EFAULT;
12482                 }
12483             }
12484         }
12485         return ret;
12486 #endif
12487 #ifdef TARGET_NR_vmsplice
12488 	case TARGET_NR_vmsplice:
12489         {
12490             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12491             if (vec != NULL) {
12492                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12493                 unlock_iovec(vec, arg2, arg3, 0);
12494             } else {
12495                 ret = -host_to_target_errno(errno);
12496             }
12497         }
12498         return ret;
12499 #endif
12500 #endif /* CONFIG_SPLICE */
12501 #ifdef CONFIG_EVENTFD
12502 #if defined(TARGET_NR_eventfd)
12503     case TARGET_NR_eventfd:
12504         ret = get_errno(eventfd(arg1, 0));
12505         if (ret >= 0) {
12506             fd_trans_register(ret, &target_eventfd_trans);
12507         }
12508         return ret;
12509 #endif
12510 #if defined(TARGET_NR_eventfd2)
12511     case TARGET_NR_eventfd2:
12512     {
12513         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12514         if (arg2 & TARGET_O_NONBLOCK) {
12515             host_flags |= O_NONBLOCK;
12516         }
12517         if (arg2 & TARGET_O_CLOEXEC) {
12518             host_flags |= O_CLOEXEC;
12519         }
12520         ret = get_errno(eventfd(arg1, host_flags));
12521         if (ret >= 0) {
12522             fd_trans_register(ret, &target_eventfd_trans);
12523         }
12524         return ret;
12525     }
12526 #endif
12527 #endif /* CONFIG_EVENTFD  */
12528 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12529     case TARGET_NR_fallocate:
12530 #if TARGET_ABI_BITS == 32
12531         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12532                                   target_offset64(arg5, arg6)));
12533 #else
12534         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12535 #endif
12536         return ret;
12537 #endif
12538 #if defined(CONFIG_SYNC_FILE_RANGE)
12539 #if defined(TARGET_NR_sync_file_range)
12540     case TARGET_NR_sync_file_range:
12541 #if TARGET_ABI_BITS == 32
12542 #if defined(TARGET_MIPS)
12543         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12544                                         target_offset64(arg5, arg6), arg7));
12545 #else
12546         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12547                                         target_offset64(arg4, arg5), arg6));
12548 #endif /* !TARGET_MIPS */
12549 #else
12550         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12551 #endif
12552         return ret;
12553 #endif
12554 #if defined(TARGET_NR_sync_file_range2) || \
12555     defined(TARGET_NR_arm_sync_file_range)
12556 #if defined(TARGET_NR_sync_file_range2)
12557     case TARGET_NR_sync_file_range2:
12558 #endif
12559 #if defined(TARGET_NR_arm_sync_file_range)
12560     case TARGET_NR_arm_sync_file_range:
12561 #endif
12562         /* This is like sync_file_range but the arguments are reordered */
12563 #if TARGET_ABI_BITS == 32
12564         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12565                                         target_offset64(arg5, arg6), arg2));
12566 #else
12567         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12568 #endif
12569         return ret;
12570 #endif
12571 #endif
12572 #if defined(TARGET_NR_signalfd4)
12573     case TARGET_NR_signalfd4:
12574         return do_signalfd4(arg1, arg2, arg4);
12575 #endif
12576 #if defined(TARGET_NR_signalfd)
12577     case TARGET_NR_signalfd:
12578         return do_signalfd4(arg1, arg2, 0);
12579 #endif
12580 #if defined(CONFIG_EPOLL)
12581 #if defined(TARGET_NR_epoll_create)
12582     case TARGET_NR_epoll_create:
12583         return get_errno(epoll_create(arg1));
12584 #endif
12585 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12586     case TARGET_NR_epoll_create1:
12587         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12588 #endif
12589 #if defined(TARGET_NR_epoll_ctl)
12590     case TARGET_NR_epoll_ctl:
12591     {
12592         struct epoll_event ep;
12593         struct epoll_event *epp = 0;
12594         if (arg4) {
12595             if (arg2 != EPOLL_CTL_DEL) {
12596                 struct target_epoll_event *target_ep;
12597                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12598                     return -TARGET_EFAULT;
12599                 }
12600                 ep.events = tswap32(target_ep->events);
12601                 /*
12602                  * The epoll_data_t union is just opaque data to the kernel,
12603                  * so we transfer all 64 bits across and need not worry what
12604                  * actual data type it is.
12605                  */
12606                 ep.data.u64 = tswap64(target_ep->data.u64);
12607                 unlock_user_struct(target_ep, arg4, 0);
12608             }
12609             /*
12610              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12611              * non-null pointer, even though this argument is ignored.
12612              *
12613              */
12614             epp = &ep;
12615         }
12616         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12617     }
12618 #endif
12619 
12620 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12621 #if defined(TARGET_NR_epoll_wait)
12622     case TARGET_NR_epoll_wait:
12623 #endif
12624 #if defined(TARGET_NR_epoll_pwait)
12625     case TARGET_NR_epoll_pwait:
12626 #endif
12627     {
12628         struct target_epoll_event *target_ep;
12629         struct epoll_event *ep;
12630         int epfd = arg1;
12631         int maxevents = arg3;
12632         int timeout = arg4;
12633 
12634         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12635             return -TARGET_EINVAL;
12636         }
12637 
12638         target_ep = lock_user(VERIFY_WRITE, arg2,
12639                               maxevents * sizeof(struct target_epoll_event), 1);
12640         if (!target_ep) {
12641             return -TARGET_EFAULT;
12642         }
12643 
12644         ep = g_try_new(struct epoll_event, maxevents);
12645         if (!ep) {
12646             unlock_user(target_ep, arg2, 0);
12647             return -TARGET_ENOMEM;
12648         }
12649 
12650         switch (num) {
12651 #if defined(TARGET_NR_epoll_pwait)
12652         case TARGET_NR_epoll_pwait:
12653         {
12654             target_sigset_t *target_set;
12655             sigset_t _set, *set = &_set;
12656 
12657             if (arg5) {
12658                 if (arg6 != sizeof(target_sigset_t)) {
12659                     ret = -TARGET_EINVAL;
12660                     break;
12661                 }
12662 
12663                 target_set = lock_user(VERIFY_READ, arg5,
12664                                        sizeof(target_sigset_t), 1);
12665                 if (!target_set) {
12666                     ret = -TARGET_EFAULT;
12667                     break;
12668                 }
12669                 target_to_host_sigset(set, target_set);
12670                 unlock_user(target_set, arg5, 0);
12671             } else {
12672                 set = NULL;
12673             }
12674 
12675             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12676                                              set, SIGSET_T_SIZE));
12677             break;
12678         }
12679 #endif
12680 #if defined(TARGET_NR_epoll_wait)
12681         case TARGET_NR_epoll_wait:
12682             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12683                                              NULL, 0));
12684             break;
12685 #endif
12686         default:
12687             ret = -TARGET_ENOSYS;
12688         }
12689         if (!is_error(ret)) {
12690             int i;
12691             for (i = 0; i < ret; i++) {
12692                 target_ep[i].events = tswap32(ep[i].events);
12693                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12694             }
12695             unlock_user(target_ep, arg2,
12696                         ret * sizeof(struct target_epoll_event));
12697         } else {
12698             unlock_user(target_ep, arg2, 0);
12699         }
12700         g_free(ep);
12701         return ret;
12702     }
12703 #endif
12704 #endif
12705 #ifdef TARGET_NR_prlimit64
12706     case TARGET_NR_prlimit64:
12707     {
12708         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12709         struct target_rlimit64 *target_rnew, *target_rold;
12710         struct host_rlimit64 rnew, rold, *rnewp = 0;
12711         int resource = target_to_host_resource(arg2);
12712 
12713         if (arg3 && (resource != RLIMIT_AS &&
12714                      resource != RLIMIT_DATA &&
12715                      resource != RLIMIT_STACK)) {
12716             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12717                 return -TARGET_EFAULT;
12718             }
12719             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12720             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12721             unlock_user_struct(target_rnew, arg3, 0);
12722             rnewp = &rnew;
12723         }
12724 
12725         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12726         if (!is_error(ret) && arg4) {
12727             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12728                 return -TARGET_EFAULT;
12729             }
12730             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12731             target_rold->rlim_max = tswap64(rold.rlim_max);
12732             unlock_user_struct(target_rold, arg4, 1);
12733         }
12734         return ret;
12735     }
12736 #endif
12737 #ifdef TARGET_NR_gethostname
12738     case TARGET_NR_gethostname:
12739     {
12740         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12741         if (name) {
12742             ret = get_errno(gethostname(name, arg2));
12743             unlock_user(name, arg1, arg2);
12744         } else {
12745             ret = -TARGET_EFAULT;
12746         }
12747         return ret;
12748     }
12749 #endif
12750 #ifdef TARGET_NR_atomic_cmpxchg_32
12751     case TARGET_NR_atomic_cmpxchg_32:
12752     {
12753         /* should use start_exclusive from main.c */
12754         abi_ulong mem_value;
12755         if (get_user_u32(mem_value, arg6)) {
12756             target_siginfo_t info;
12757             info.si_signo = SIGSEGV;
12758             info.si_errno = 0;
12759             info.si_code = TARGET_SEGV_MAPERR;
12760             info._sifields._sigfault._addr = arg6;
12761             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12762                          QEMU_SI_FAULT, &info);
12763             ret = 0xdeadbeef;
12764 
12765         }
12766         if (mem_value == arg2)
12767             put_user_u32(arg1, arg6);
12768         return mem_value;
12769     }
12770 #endif
12771 #ifdef TARGET_NR_atomic_barrier
12772     case TARGET_NR_atomic_barrier:
12773         /* Like the kernel implementation and the
12774            qemu arm barrier, no-op this? */
12775         return 0;
12776 #endif
12777 
12778 #ifdef TARGET_NR_timer_create
12779     case TARGET_NR_timer_create:
12780     {
12781         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12782 
12783         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12784 
12785         int clkid = arg1;
12786         int timer_index = next_free_host_timer();
12787 
12788         if (timer_index < 0) {
12789             ret = -TARGET_EAGAIN;
12790         } else {
12791             timer_t *phtimer = g_posix_timers  + timer_index;
12792 
12793             if (arg2) {
12794                 phost_sevp = &host_sevp;
12795                 ret = target_to_host_sigevent(phost_sevp, arg2);
12796                 if (ret != 0) {
12797                     return ret;
12798                 }
12799             }
12800 
12801             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12802             if (ret) {
12803                 phtimer = NULL;
12804             } else {
12805                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12806                     return -TARGET_EFAULT;
12807                 }
12808             }
12809         }
12810         return ret;
12811     }
12812 #endif
12813 
12814 #ifdef TARGET_NR_timer_settime
12815     case TARGET_NR_timer_settime:
12816     {
12817         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12818          * struct itimerspec * old_value */
12819         target_timer_t timerid = get_timer_id(arg1);
12820 
12821         if (timerid < 0) {
12822             ret = timerid;
12823         } else if (arg3 == 0) {
12824             ret = -TARGET_EINVAL;
12825         } else {
12826             timer_t htimer = g_posix_timers[timerid];
12827             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12828 
12829             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12830                 return -TARGET_EFAULT;
12831             }
12832             ret = get_errno(
12833                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12834             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12835                 return -TARGET_EFAULT;
12836             }
12837         }
12838         return ret;
12839     }
12840 #endif
12841 
12842 #ifdef TARGET_NR_timer_settime64
12843     case TARGET_NR_timer_settime64:
12844     {
12845         target_timer_t timerid = get_timer_id(arg1);
12846 
12847         if (timerid < 0) {
12848             ret = timerid;
12849         } else if (arg3 == 0) {
12850             ret = -TARGET_EINVAL;
12851         } else {
12852             timer_t htimer = g_posix_timers[timerid];
12853             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12854 
12855             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12856                 return -TARGET_EFAULT;
12857             }
12858             ret = get_errno(
12859                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12860             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12861                 return -TARGET_EFAULT;
12862             }
12863         }
12864         return ret;
12865     }
12866 #endif
12867 
12868 #ifdef TARGET_NR_timer_gettime
12869     case TARGET_NR_timer_gettime:
12870     {
12871         /* args: timer_t timerid, struct itimerspec *curr_value */
12872         target_timer_t timerid = get_timer_id(arg1);
12873 
12874         if (timerid < 0) {
12875             ret = timerid;
12876         } else if (!arg2) {
12877             ret = -TARGET_EFAULT;
12878         } else {
12879             timer_t htimer = g_posix_timers[timerid];
12880             struct itimerspec hspec;
12881             ret = get_errno(timer_gettime(htimer, &hspec));
12882 
12883             if (host_to_target_itimerspec(arg2, &hspec)) {
12884                 ret = -TARGET_EFAULT;
12885             }
12886         }
12887         return ret;
12888     }
12889 #endif
12890 
12891 #ifdef TARGET_NR_timer_gettime64
12892     case TARGET_NR_timer_gettime64:
12893     {
12894         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12895         target_timer_t timerid = get_timer_id(arg1);
12896 
12897         if (timerid < 0) {
12898             ret = timerid;
12899         } else if (!arg2) {
12900             ret = -TARGET_EFAULT;
12901         } else {
12902             timer_t htimer = g_posix_timers[timerid];
12903             struct itimerspec hspec;
12904             ret = get_errno(timer_gettime(htimer, &hspec));
12905 
12906             if (host_to_target_itimerspec64(arg2, &hspec)) {
12907                 ret = -TARGET_EFAULT;
12908             }
12909         }
12910         return ret;
12911     }
12912 #endif
12913 
12914 #ifdef TARGET_NR_timer_getoverrun
12915     case TARGET_NR_timer_getoverrun:
12916     {
12917         /* args: timer_t timerid */
12918         target_timer_t timerid = get_timer_id(arg1);
12919 
12920         if (timerid < 0) {
12921             ret = timerid;
12922         } else {
12923             timer_t htimer = g_posix_timers[timerid];
12924             ret = get_errno(timer_getoverrun(htimer));
12925         }
12926         return ret;
12927     }
12928 #endif
12929 
12930 #ifdef TARGET_NR_timer_delete
12931     case TARGET_NR_timer_delete:
12932     {
12933         /* args: timer_t timerid */
12934         target_timer_t timerid = get_timer_id(arg1);
12935 
12936         if (timerid < 0) {
12937             ret = timerid;
12938         } else {
12939             timer_t htimer = g_posix_timers[timerid];
12940             ret = get_errno(timer_delete(htimer));
12941             g_posix_timers[timerid] = 0;
12942         }
12943         return ret;
12944     }
12945 #endif
12946 
12947 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12948     case TARGET_NR_timerfd_create:
12949         return get_errno(timerfd_create(arg1,
12950                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12951 #endif
12952 
12953 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12954     case TARGET_NR_timerfd_gettime:
12955         {
12956             struct itimerspec its_curr;
12957 
12958             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12959 
12960             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12961                 return -TARGET_EFAULT;
12962             }
12963         }
12964         return ret;
12965 #endif
12966 
12967 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12968     case TARGET_NR_timerfd_gettime64:
12969         {
12970             struct itimerspec its_curr;
12971 
12972             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12973 
12974             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12975                 return -TARGET_EFAULT;
12976             }
12977         }
12978         return ret;
12979 #endif
12980 
12981 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12982     case TARGET_NR_timerfd_settime:
12983         {
12984             struct itimerspec its_new, its_old, *p_new;
12985 
12986             if (arg3) {
12987                 if (target_to_host_itimerspec(&its_new, arg3)) {
12988                     return -TARGET_EFAULT;
12989                 }
12990                 p_new = &its_new;
12991             } else {
12992                 p_new = NULL;
12993             }
12994 
12995             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12996 
12997             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12998                 return -TARGET_EFAULT;
12999             }
13000         }
13001         return ret;
13002 #endif
13003 
13004 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13005     case TARGET_NR_timerfd_settime64:
13006         {
13007             struct itimerspec its_new, its_old, *p_new;
13008 
13009             if (arg3) {
13010                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13011                     return -TARGET_EFAULT;
13012                 }
13013                 p_new = &its_new;
13014             } else {
13015                 p_new = NULL;
13016             }
13017 
13018             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13019 
13020             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13021                 return -TARGET_EFAULT;
13022             }
13023         }
13024         return ret;
13025 #endif
13026 
13027 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13028     case TARGET_NR_ioprio_get:
13029         return get_errno(ioprio_get(arg1, arg2));
13030 #endif
13031 
13032 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13033     case TARGET_NR_ioprio_set:
13034         return get_errno(ioprio_set(arg1, arg2, arg3));
13035 #endif
13036 
13037 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13038     case TARGET_NR_setns:
13039         return get_errno(setns(arg1, arg2));
13040 #endif
13041 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13042     case TARGET_NR_unshare:
13043         return get_errno(unshare(arg1));
13044 #endif
13045 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13046     case TARGET_NR_kcmp:
13047         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13048 #endif
13049 #ifdef TARGET_NR_swapcontext
13050     case TARGET_NR_swapcontext:
13051         /* PowerPC specific.  */
13052         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13053 #endif
13054 #ifdef TARGET_NR_memfd_create
13055     case TARGET_NR_memfd_create:
13056         p = lock_user_string(arg1);
13057         if (!p) {
13058             return -TARGET_EFAULT;
13059         }
13060         ret = get_errno(memfd_create(p, arg2));
13061         fd_trans_unregister(ret);
13062         unlock_user(p, arg1, 0);
13063         return ret;
13064 #endif
13065 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13066     case TARGET_NR_membarrier:
13067         return get_errno(membarrier(arg1, arg2));
13068 #endif
13069 
13070 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13071     case TARGET_NR_copy_file_range:
13072         {
13073             loff_t inoff, outoff;
13074             loff_t *pinoff = NULL, *poutoff = NULL;
13075 
13076             if (arg2) {
13077                 if (get_user_u64(inoff, arg2)) {
13078                     return -TARGET_EFAULT;
13079                 }
13080                 pinoff = &inoff;
13081             }
13082             if (arg4) {
13083                 if (get_user_u64(outoff, arg4)) {
13084                     return -TARGET_EFAULT;
13085                 }
13086                 poutoff = &outoff;
13087             }
13088             /* Do not sign-extend the count parameter. */
13089             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13090                                                  (abi_ulong)arg5, arg6));
13091             if (!is_error(ret) && ret > 0) {
13092                 if (arg2) {
13093                     if (put_user_u64(inoff, arg2)) {
13094                         return -TARGET_EFAULT;
13095                     }
13096                 }
13097                 if (arg4) {
13098                     if (put_user_u64(outoff, arg4)) {
13099                         return -TARGET_EFAULT;
13100                     }
13101                 }
13102             }
13103         }
13104         return ret;
13105 #endif
13106 
13107 #if defined(TARGET_NR_pivot_root)
13108     case TARGET_NR_pivot_root:
13109         {
13110             void *p2;
13111             p = lock_user_string(arg1); /* new_root */
13112             p2 = lock_user_string(arg2); /* put_old */
13113             if (!p || !p2) {
13114                 ret = -TARGET_EFAULT;
13115             } else {
13116                 ret = get_errno(pivot_root(p, p2));
13117             }
13118             unlock_user(p2, arg2, 0);
13119             unlock_user(p, arg1, 0);
13120         }
13121         return ret;
13122 #endif
13123 
13124     default:
13125         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13126         return -TARGET_ENOSYS;
13127     }
13128     return ret;
13129 }
13130 
do_syscall(void * cpu_env,int num,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,abi_long arg7,abi_long arg8)13131 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13132                     abi_long arg2, abi_long arg3, abi_long arg4,
13133                     abi_long arg5, abi_long arg6, abi_long arg7,
13134                     abi_long arg8)
13135 {
13136     CPUState *cpu = env_cpu(cpu_env);
13137     abi_long ret;
13138 
13139 #ifdef DEBUG_ERESTARTSYS
13140     /* Debug-only code for exercising the syscall-restart code paths
13141      * in the per-architecture cpu main loops: restart every syscall
13142      * the guest makes once before letting it through.
13143      */
13144     {
13145         static bool flag;
13146         flag = !flag;
13147         if (flag) {
13148             return -TARGET_ERESTARTSYS;
13149         }
13150     }
13151 #endif
13152 
13153     record_syscall_start(cpu, num, arg1,
13154                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13155 
13156     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13157         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13158     }
13159 
13160     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13161                       arg5, arg6, arg7, arg8);
13162 
13163     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13164         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13165                           arg3, arg4, arg5, arg6);
13166     }
13167 
13168     record_syscall_return(cpu, num, ret);
13169     return ret;
13170 }
13171