1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
79 
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
86 
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #ifdef CONFIG_BTRFS
116 #include <linux/btrfs.h>
117 #endif
118 #ifdef HAVE_DRM_H
119 #include <libdrm/drm.h>
120 #include <libdrm/i915_drm.h>
121 #endif
122 #include "linux_loop.h"
123 #include "uname.h"
124 
125 #include "qemu.h"
126 #include "qemu/guest-random.h"
127 #include "qemu/selfmap.h"
128 #include "user/syscall-trace.h"
129 #include "qapi/error.h"
130 #include "fd-trans.h"
131 #include "tcg/tcg.h"
132 
133 #ifndef CLONE_IO
134 #define CLONE_IO                0x80000000      /* Clone io context */
135 #endif
136 
137 /* We can't directly call the host clone syscall, because this will
138  * badly confuse libc (breaking mutexes, for example). So we must
139  * divide clone flags into:
140  *  * flag combinations that look like pthread_create()
141  *  * flag combinations that look like fork()
142  *  * flags we can implement within QEMU itself
143  *  * flags we can't support and will return an error for
144  */
145 /* For thread creation, all these flags must be present; for
146  * fork, none must be present.
147  */
148 #define CLONE_THREAD_FLAGS                              \
149     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
150      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
151 
152 /* These flags are ignored:
153  * CLONE_DETACHED is now ignored by the kernel;
154  * CLONE_IO is just an optimisation hint to the I/O scheduler
155  */
156 #define CLONE_IGNORED_FLAGS                     \
157     (CLONE_DETACHED | CLONE_IO)
158 
159 /* Flags for fork which we can implement within QEMU itself */
160 #define CLONE_OPTIONAL_FORK_FLAGS               \
161     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
162      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
163 
164 /* Flags for thread creation which we can implement within QEMU itself */
165 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
166     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
167      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
168 
169 #define CLONE_INVALID_FORK_FLAGS                                        \
170     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
171 
172 #define CLONE_INVALID_THREAD_FLAGS                                      \
173     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
174        CLONE_IGNORED_FLAGS))
175 
176 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
177  * have almost all been allocated. We cannot support any of
178  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
179  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
180  * The checks against the invalid thread masks above will catch these.
181  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
182  */
183 
184 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
185  * once. This exercises the codepaths for restart.
186  */
187 //#define DEBUG_ERESTARTSYS
188 
189 //#include <linux/msdos_fs.h>
190 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
191 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
192 
193 #undef _syscall0
194 #undef _syscall1
195 #undef _syscall2
196 #undef _syscall3
197 #undef _syscall4
198 #undef _syscall5
199 #undef _syscall6
200 
201 #define _syscall0(type,name)		\
202 static type name (void)			\
203 {					\
204 	return syscall(__NR_##name);	\
205 }
206 
207 #define _syscall1(type,name,type1,arg1)		\
208 static type name (type1 arg1)			\
209 {						\
210 	return syscall(__NR_##name, arg1);	\
211 }
212 
213 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
214 static type name (type1 arg1,type2 arg2)		\
215 {							\
216 	return syscall(__NR_##name, arg1, arg2);	\
217 }
218 
219 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
220 static type name (type1 arg1,type2 arg2,type3 arg3)		\
221 {								\
222 	return syscall(__NR_##name, arg1, arg2, arg3);		\
223 }
224 
225 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
226 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
227 {										\
228 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
229 }
230 
231 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
232 		  type5,arg5)							\
233 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
234 {										\
235 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
236 }
237 
238 
239 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
240 		  type5,arg5,type6,arg6)					\
241 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
242                   type6 arg6)							\
243 {										\
244 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
245 }
246 
247 
248 #define __NR_sys_uname __NR_uname
249 #define __NR_sys_getcwd1 __NR_getcwd
250 #define __NR_sys_getdents __NR_getdents
251 #define __NR_sys_getdents64 __NR_getdents64
252 #define __NR_sys_getpriority __NR_getpriority
253 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
254 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
255 #define __NR_sys_syslog __NR_syslog
256 #if defined(__NR_futex)
257 # define __NR_sys_futex __NR_futex
258 #endif
259 #if defined(__NR_futex_time64)
260 # define __NR_sys_futex_time64 __NR_futex_time64
261 #endif
262 #define __NR_sys_inotify_init __NR_inotify_init
263 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
264 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
265 #define __NR_sys_statx __NR_statx
266 
267 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
268 #define __NR__llseek __NR_lseek
269 #endif
270 
271 /* Newer kernel ports have llseek() instead of _llseek() */
272 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
273 #define TARGET_NR__llseek TARGET_NR_llseek
274 #endif
275 
276 #define __NR_sys_gettid __NR_gettid
277 _syscall0(int, sys_gettid)
278 
279 /* For the 64-bit guest on 32-bit host case we must emulate
280  * getdents using getdents64, because otherwise the host
281  * might hand us back more dirent records than we can fit
282  * into the guest buffer after structure format conversion.
283  * Otherwise we emulate getdents with getdents if the host has it.
284  */
285 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
286 #define EMULATE_GETDENTS_WITH_GETDENTS
287 #endif
288 
289 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
290 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
291 #endif
292 #if (defined(TARGET_NR_getdents) && \
293       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
294     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
295 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
296 #endif
297 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
298 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
299           loff_t *, res, uint, wh);
300 #endif
301 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
302 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
303           siginfo_t *, uinfo)
304 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
305 #ifdef __NR_exit_group
306 _syscall1(int,exit_group,int,error_code)
307 #endif
308 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
309 _syscall1(int,set_tid_address,int *,tidptr)
310 #endif
311 #if defined(__NR_futex)
312 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
313           const struct timespec *,timeout,int *,uaddr2,int,val3)
314 #endif
315 #if defined(__NR_futex_time64)
316 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
317           const struct timespec *,timeout,int *,uaddr2,int,val3)
318 #endif
319 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
320 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
321           unsigned long *, user_mask_ptr);
322 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
323 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
324           unsigned long *, user_mask_ptr);
325 #define __NR_sys_getcpu __NR_getcpu
326 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
327 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
328           void *, arg);
329 _syscall2(int, capget, struct __user_cap_header_struct *, header,
330           struct __user_cap_data_struct *, data);
331 _syscall2(int, capset, struct __user_cap_header_struct *, header,
332           struct __user_cap_data_struct *, data);
333 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
334 _syscall2(int, ioprio_get, int, which, int, who)
335 #endif
336 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
337 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
338 #endif
339 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
340 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
341 #endif
342 
343 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
344 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
345           unsigned long, idx1, unsigned long, idx2)
346 #endif
347 
348 /*
349  * It is assumed that struct statx is architecture independent.
350  */
351 #if defined(TARGET_NR_statx) && defined(__NR_statx)
352 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
353           unsigned int, mask, struct target_statx *, statxbuf)
354 #endif
355 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
356 _syscall2(int, membarrier, int, cmd, int, flags)
357 #endif
358 
359 static bitmask_transtbl fcntl_flags_tbl[] = {
360   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
361   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
362   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
363   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
364   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
365   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
366   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
367   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
368   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
369   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
370   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
371   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
372   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
373 #if defined(O_DIRECT)
374   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
375 #endif
376 #if defined(O_NOATIME)
377   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
378 #endif
379 #if defined(O_CLOEXEC)
380   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
381 #endif
382 #if defined(O_PATH)
383   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
384 #endif
385 #if defined(O_TMPFILE)
386   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
387 #endif
388   /* Don't terminate the list prematurely on 64-bit host+guest.  */
389 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
390   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
391 #endif
392   { 0, 0, 0, 0 }
393 };
394 
_syscall2(int,sys_getcwd1,char *,buf,size_t,size)395 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
396 
397 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
398 #if defined(__NR_utimensat)
399 #define __NR_sys_utimensat __NR_utimensat
400 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
401           const struct timespec *,tsp,int,flags)
402 #else
403 static int sys_utimensat(int dirfd, const char *pathname,
404                          const struct timespec times[2], int flags)
405 {
406     errno = ENOSYS;
407     return -1;
408 }
409 #endif
410 #endif /* TARGET_NR_utimensat */
411 
412 #ifdef TARGET_NR_renameat2
413 #if defined(__NR_renameat2)
414 #define __NR_sys_renameat2 __NR_renameat2
415 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
416           const char *, new, unsigned int, flags)
417 #else
418 static int sys_renameat2(int oldfd, const char *old,
419                          int newfd, const char *new, int flags)
420 {
421     if (flags == 0) {
422         return renameat(oldfd, old, newfd, new);
423     }
424     errno = ENOSYS;
425     return -1;
426 }
427 #endif
428 #endif /* TARGET_NR_renameat2 */
429 
430 #ifdef CONFIG_INOTIFY
431 #include <sys/inotify.h>
432 
433 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
434 static int sys_inotify_init(void)
435 {
436   return (inotify_init());
437 }
438 #endif
439 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
sys_inotify_add_watch(int fd,const char * pathname,int32_t mask)440 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
441 {
442   return (inotify_add_watch(fd, pathname, mask));
443 }
444 #endif
445 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
sys_inotify_rm_watch(int fd,int32_t wd)446 static int sys_inotify_rm_watch(int fd, int32_t wd)
447 {
448   return (inotify_rm_watch(fd, wd));
449 }
450 #endif
451 #ifdef CONFIG_INOTIFY1
452 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
sys_inotify_init1(int flags)453 static int sys_inotify_init1(int flags)
454 {
455   return (inotify_init1(flags));
456 }
457 #endif
458 #endif
459 #else
460 /* Userspace can usually survive runtime without inotify */
461 #undef TARGET_NR_inotify_init
462 #undef TARGET_NR_inotify_init1
463 #undef TARGET_NR_inotify_add_watch
464 #undef TARGET_NR_inotify_rm_watch
465 #endif /* CONFIG_INOTIFY  */
466 
467 #if defined(TARGET_NR_prlimit64)
468 #ifndef __NR_prlimit64
469 # define __NR_prlimit64 -1
470 #endif
471 #define __NR_sys_prlimit64 __NR_prlimit64
472 /* The glibc rlimit structure may not be that used by the underlying syscall */
473 struct host_rlimit64 {
474     uint64_t rlim_cur;
475     uint64_t rlim_max;
476 };
477 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
478           const struct host_rlimit64 *, new_limit,
479           struct host_rlimit64 *, old_limit)
480 #endif
481 
482 
483 #if defined(TARGET_NR_timer_create)
484 /* Maximum of 32 active POSIX timers allowed at any one time. */
485 static timer_t g_posix_timers[32] = { 0, } ;
486 
next_free_host_timer(void)487 static inline int next_free_host_timer(void)
488 {
489     int k ;
490     /* FIXME: Does finding the next free slot require a lock? */
491     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
492         if (g_posix_timers[k] == 0) {
493             g_posix_timers[k] = (timer_t) 1;
494             return k;
495         }
496     }
497     return -1;
498 }
499 #endif
500 
501 #define ERRNO_TABLE_SIZE 1200
502 
503 /* target_to_host_errno_table[] is initialized from
504  * host_to_target_errno_table[] in syscall_init(). */
505 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
506 };
507 
508 /*
509  * This list is the union of errno values overridden in asm-<arch>/errno.h
510  * minus the errnos that are not actually generic to all archs.
511  */
512 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
513     [EAGAIN]		= TARGET_EAGAIN,
514     [EIDRM]		= TARGET_EIDRM,
515     [ECHRNG]		= TARGET_ECHRNG,
516     [EL2NSYNC]		= TARGET_EL2NSYNC,
517     [EL3HLT]		= TARGET_EL3HLT,
518     [EL3RST]		= TARGET_EL3RST,
519     [ELNRNG]		= TARGET_ELNRNG,
520     [EUNATCH]		= TARGET_EUNATCH,
521     [ENOCSI]		= TARGET_ENOCSI,
522     [EL2HLT]		= TARGET_EL2HLT,
523     [EDEADLK]		= TARGET_EDEADLK,
524     [ENOLCK]		= TARGET_ENOLCK,
525     [EBADE]		= TARGET_EBADE,
526     [EBADR]		= TARGET_EBADR,
527     [EXFULL]		= TARGET_EXFULL,
528     [ENOANO]		= TARGET_ENOANO,
529     [EBADRQC]		= TARGET_EBADRQC,
530     [EBADSLT]		= TARGET_EBADSLT,
531     [EBFONT]		= TARGET_EBFONT,
532     [ENOSTR]		= TARGET_ENOSTR,
533     [ENODATA]		= TARGET_ENODATA,
534     [ETIME]		= TARGET_ETIME,
535     [ENOSR]		= TARGET_ENOSR,
536     [ENONET]		= TARGET_ENONET,
537     [ENOPKG]		= TARGET_ENOPKG,
538     [EREMOTE]		= TARGET_EREMOTE,
539     [ENOLINK]		= TARGET_ENOLINK,
540     [EADV]		= TARGET_EADV,
541     [ESRMNT]		= TARGET_ESRMNT,
542     [ECOMM]		= TARGET_ECOMM,
543     [EPROTO]		= TARGET_EPROTO,
544     [EDOTDOT]		= TARGET_EDOTDOT,
545     [EMULTIHOP]		= TARGET_EMULTIHOP,
546     [EBADMSG]		= TARGET_EBADMSG,
547     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
548     [EOVERFLOW]		= TARGET_EOVERFLOW,
549     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
550     [EBADFD]		= TARGET_EBADFD,
551     [EREMCHG]		= TARGET_EREMCHG,
552     [ELIBACC]		= TARGET_ELIBACC,
553     [ELIBBAD]		= TARGET_ELIBBAD,
554     [ELIBSCN]		= TARGET_ELIBSCN,
555     [ELIBMAX]		= TARGET_ELIBMAX,
556     [ELIBEXEC]		= TARGET_ELIBEXEC,
557     [EILSEQ]		= TARGET_EILSEQ,
558     [ENOSYS]		= TARGET_ENOSYS,
559     [ELOOP]		= TARGET_ELOOP,
560     [ERESTART]		= TARGET_ERESTART,
561     [ESTRPIPE]		= TARGET_ESTRPIPE,
562     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
563     [EUSERS]		= TARGET_EUSERS,
564     [ENOTSOCK]		= TARGET_ENOTSOCK,
565     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
566     [EMSGSIZE]		= TARGET_EMSGSIZE,
567     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
568     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
569     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
570     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
571     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
572     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
573     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
574     [EADDRINUSE]	= TARGET_EADDRINUSE,
575     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
576     [ENETDOWN]		= TARGET_ENETDOWN,
577     [ENETUNREACH]	= TARGET_ENETUNREACH,
578     [ENETRESET]		= TARGET_ENETRESET,
579     [ECONNABORTED]	= TARGET_ECONNABORTED,
580     [ECONNRESET]	= TARGET_ECONNRESET,
581     [ENOBUFS]		= TARGET_ENOBUFS,
582     [EISCONN]		= TARGET_EISCONN,
583     [ENOTCONN]		= TARGET_ENOTCONN,
584     [EUCLEAN]		= TARGET_EUCLEAN,
585     [ENOTNAM]		= TARGET_ENOTNAM,
586     [ENAVAIL]		= TARGET_ENAVAIL,
587     [EISNAM]		= TARGET_EISNAM,
588     [EREMOTEIO]		= TARGET_EREMOTEIO,
589     [EDQUOT]            = TARGET_EDQUOT,
590     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
591     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
592     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
593     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
594     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
595     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
596     [EALREADY]		= TARGET_EALREADY,
597     [EINPROGRESS]	= TARGET_EINPROGRESS,
598     [ESTALE]		= TARGET_ESTALE,
599     [ECANCELED]		= TARGET_ECANCELED,
600     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
601     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
602 #ifdef ENOKEY
603     [ENOKEY]		= TARGET_ENOKEY,
604 #endif
605 #ifdef EKEYEXPIRED
606     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
607 #endif
608 #ifdef EKEYREVOKED
609     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
610 #endif
611 #ifdef EKEYREJECTED
612     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
613 #endif
614 #ifdef EOWNERDEAD
615     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
616 #endif
617 #ifdef ENOTRECOVERABLE
618     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
619 #endif
620 #ifdef ENOMSG
621     [ENOMSG]            = TARGET_ENOMSG,
622 #endif
623 #ifdef ERKFILL
624     [ERFKILL]           = TARGET_ERFKILL,
625 #endif
626 #ifdef EHWPOISON
627     [EHWPOISON]         = TARGET_EHWPOISON,
628 #endif
629 };
630 
host_to_target_errno(int err)631 static inline int host_to_target_errno(int err)
632 {
633     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
634         host_to_target_errno_table[err]) {
635         return host_to_target_errno_table[err];
636     }
637     return err;
638 }
639 
target_to_host_errno(int err)640 static inline int target_to_host_errno(int err)
641 {
642     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
643         target_to_host_errno_table[err]) {
644         return target_to_host_errno_table[err];
645     }
646     return err;
647 }
648 
get_errno(abi_long ret)649 static inline abi_long get_errno(abi_long ret)
650 {
651     if (ret == -1)
652         return -host_to_target_errno(errno);
653     else
654         return ret;
655 }
656 
target_strerror(int err)657 const char *target_strerror(int err)
658 {
659     if (err == TARGET_ERESTARTSYS) {
660         return "To be restarted";
661     }
662     if (err == TARGET_QEMU_ESIGRETURN) {
663         return "Successful exit from sigreturn";
664     }
665 
666     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
667         return NULL;
668     }
669     return strerror(target_to_host_errno(err));
670 }
671 
672 #define safe_syscall0(type, name) \
673 static type safe_##name(void) \
674 { \
675     return safe_syscall(__NR_##name); \
676 }
677 
678 #define safe_syscall1(type, name, type1, arg1) \
679 static type safe_##name(type1 arg1) \
680 { \
681     return safe_syscall(__NR_##name, arg1); \
682 }
683 
684 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
685 static type safe_##name(type1 arg1, type2 arg2) \
686 { \
687     return safe_syscall(__NR_##name, arg1, arg2); \
688 }
689 
690 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
691 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
692 { \
693     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
694 }
695 
696 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
697     type4, arg4) \
698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
699 { \
700     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
701 }
702 
703 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
704     type4, arg4, type5, arg5) \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
706     type5 arg5) \
707 { \
708     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
709 }
710 
711 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
712     type4, arg4, type5, arg5, type6, arg6) \
713 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
714     type5 arg5, type6 arg6) \
715 { \
716     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
717 }
718 
safe_syscall3(ssize_t,read,int,fd,void *,buff,size_t,count)719 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
720 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
721 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
722               int, flags, mode_t, mode)
723 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
724 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
725               struct rusage *, rusage)
726 #endif
727 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
728               int, options, struct rusage *, rusage)
729 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
730 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
731     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
732 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
733               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
734 #endif
735 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
736 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
737               struct timespec *, tsp, const sigset_t *, sigmask,
738               size_t, sigsetsize)
739 #endif
740 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
741               int, maxevents, int, timeout, const sigset_t *, sigmask,
742               size_t, sigsetsize)
743 #if defined(__NR_futex)
744 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
745               const struct timespec *,timeout,int *,uaddr2,int,val3)
746 #endif
747 #if defined(__NR_futex_time64)
748 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
749               const struct timespec *,timeout,int *,uaddr2,int,val3)
750 #endif
751 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
752 safe_syscall2(int, kill, pid_t, pid, int, sig)
753 safe_syscall2(int, tkill, int, tid, int, sig)
754 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
755 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
756 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
757 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
758               unsigned long, pos_l, unsigned long, pos_h)
759 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
760               unsigned long, pos_l, unsigned long, pos_h)
761 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
762               socklen_t, addrlen)
763 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
764               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
765 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
766               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
767 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
768 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
769 safe_syscall2(int, flock, int, fd, int, operation)
770 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
771 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
772               const struct timespec *, uts, size_t, sigsetsize)
773 #endif
774 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
775               int, flags)
776 #if defined(TARGET_NR_nanosleep)
777 safe_syscall2(int, nanosleep, const struct timespec *, req,
778               struct timespec *, rem)
779 #endif
780 #if defined(TARGET_NR_clock_nanosleep) || \
781     defined(TARGET_NR_clock_nanosleep_time64)
782 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
783               const struct timespec *, req, struct timespec *, rem)
784 #endif
785 #ifdef __NR_ipc
786 #ifdef __s390x__
787 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
788               void *, ptr)
789 #else
790 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
791               void *, ptr, long, fifth)
792 #endif
793 #endif
794 #ifdef __NR_msgsnd
795 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
796               int, flags)
797 #endif
798 #ifdef __NR_msgrcv
799 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
800               long, msgtype, int, flags)
801 #endif
802 #ifdef __NR_semtimedop
803 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
804               unsigned, nsops, const struct timespec *, timeout)
805 #endif
806 #if defined(TARGET_NR_mq_timedsend) || \
807     defined(TARGET_NR_mq_timedsend_time64)
808 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
809               size_t, len, unsigned, prio, const struct timespec *, timeout)
810 #endif
811 #if defined(TARGET_NR_mq_timedreceive) || \
812     defined(TARGET_NR_mq_timedreceive_time64)
813 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
814               size_t, len, unsigned *, prio, const struct timespec *, timeout)
815 #endif
816 /* We do ioctl like this rather than via safe_syscall3 to preserve the
817  * "third argument might be integer or pointer or not present" behaviour of
818  * the libc function.
819  */
820 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
821 /* Similarly for fcntl. Note that callers must always:
822  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
823  *  use the flock64 struct rather than unsuffixed flock
824  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
825  */
826 #ifdef __NR_fcntl64
827 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
828 #else
829 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
830 #endif
831 
832 static inline int host_to_target_sock_type(int host_type)
833 {
834     int target_type;
835 
836     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
837     case SOCK_DGRAM:
838         target_type = TARGET_SOCK_DGRAM;
839         break;
840     case SOCK_STREAM:
841         target_type = TARGET_SOCK_STREAM;
842         break;
843     default:
844         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
845         break;
846     }
847 
848 #if defined(SOCK_CLOEXEC)
849     if (host_type & SOCK_CLOEXEC) {
850         target_type |= TARGET_SOCK_CLOEXEC;
851     }
852 #endif
853 
854 #if defined(SOCK_NONBLOCK)
855     if (host_type & SOCK_NONBLOCK) {
856         target_type |= TARGET_SOCK_NONBLOCK;
857     }
858 #endif
859 
860     return target_type;
861 }
862 
863 static abi_ulong target_brk;
864 static abi_ulong target_original_brk;
865 static abi_ulong brk_page;
866 
target_set_brk(abi_ulong new_brk)867 void target_set_brk(abi_ulong new_brk)
868 {
869     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
870     brk_page = HOST_PAGE_ALIGN(target_brk);
871 }
872 
873 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
874 #define DEBUGF_BRK(message, args...)
875 
876 /* do_brk() must return target values and target errnos. */
do_brk(abi_ulong new_brk)877 abi_long do_brk(abi_ulong new_brk)
878 {
879     abi_long mapped_addr;
880     abi_ulong new_alloc_size;
881 
882     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
883 
884     if (!new_brk) {
885         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
886         return target_brk;
887     }
888     if (new_brk < target_original_brk) {
889         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
890                    target_brk);
891         return target_brk;
892     }
893 
894     /* If the new brk is less than the highest page reserved to the
895      * target heap allocation, set it and we're almost done...  */
896     if (new_brk <= brk_page) {
897         /* Heap contents are initialized to zero, as for anonymous
898          * mapped pages.  */
899         if (new_brk > target_brk) {
900             memset(g2h(target_brk), 0, new_brk - target_brk);
901         }
902 	target_brk = new_brk;
903         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
904 	return target_brk;
905     }
906 
907     /* We need to allocate more memory after the brk... Note that
908      * we don't use MAP_FIXED because that will map over the top of
909      * any existing mapping (like the one with the host libc or qemu
910      * itself); instead we treat "mapped but at wrong address" as
911      * a failure and unmap again.
912      */
913     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
914     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
915                                         PROT_READ|PROT_WRITE,
916                                         MAP_ANON|MAP_PRIVATE, 0, 0));
917 
918     if (mapped_addr == brk_page) {
919         /* Heap contents are initialized to zero, as for anonymous
920          * mapped pages.  Technically the new pages are already
921          * initialized to zero since they *are* anonymous mapped
922          * pages, however we have to take care with the contents that
923          * come from the remaining part of the previous page: it may
924          * contains garbage data due to a previous heap usage (grown
925          * then shrunken).  */
926         memset(g2h(target_brk), 0, brk_page - target_brk);
927 
928         target_brk = new_brk;
929         brk_page = HOST_PAGE_ALIGN(target_brk);
930         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
931             target_brk);
932         return target_brk;
933     } else if (mapped_addr != -1) {
934         /* Mapped but at wrong address, meaning there wasn't actually
935          * enough space for this brk.
936          */
937         target_munmap(mapped_addr, new_alloc_size);
938         mapped_addr = -1;
939         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
940     }
941     else {
942         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
943     }
944 
945 #if defined(TARGET_ALPHA)
946     /* We (partially) emulate OSF/1 on Alpha, which requires we
947        return a proper errno, not an unchanged brk value.  */
948     return -TARGET_ENOMEM;
949 #endif
950     /* For everything else, return the previous break. */
951     return target_brk;
952 }
953 
954 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
955     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
copy_from_user_fdset(fd_set * fds,abi_ulong target_fds_addr,int n)956 static inline abi_long copy_from_user_fdset(fd_set *fds,
957                                             abi_ulong target_fds_addr,
958                                             int n)
959 {
960     int i, nw, j, k;
961     abi_ulong b, *target_fds;
962 
963     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
964     if (!(target_fds = lock_user(VERIFY_READ,
965                                  target_fds_addr,
966                                  sizeof(abi_ulong) * nw,
967                                  1)))
968         return -TARGET_EFAULT;
969 
970     FD_ZERO(fds);
971     k = 0;
972     for (i = 0; i < nw; i++) {
973         /* grab the abi_ulong */
974         __get_user(b, &target_fds[i]);
975         for (j = 0; j < TARGET_ABI_BITS; j++) {
976             /* check the bit inside the abi_ulong */
977             if ((b >> j) & 1)
978                 FD_SET(k, fds);
979             k++;
980         }
981     }
982 
983     unlock_user(target_fds, target_fds_addr, 0);
984 
985     return 0;
986 }
987 
copy_from_user_fdset_ptr(fd_set * fds,fd_set ** fds_ptr,abi_ulong target_fds_addr,int n)988 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
989                                                  abi_ulong target_fds_addr,
990                                                  int n)
991 {
992     if (target_fds_addr) {
993         if (copy_from_user_fdset(fds, target_fds_addr, n))
994             return -TARGET_EFAULT;
995         *fds_ptr = fds;
996     } else {
997         *fds_ptr = NULL;
998     }
999     return 0;
1000 }
1001 
copy_to_user_fdset(abi_ulong target_fds_addr,const fd_set * fds,int n)1002 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1003                                           const fd_set *fds,
1004                                           int n)
1005 {
1006     int i, nw, j, k;
1007     abi_long v;
1008     abi_ulong *target_fds;
1009 
1010     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1011     if (!(target_fds = lock_user(VERIFY_WRITE,
1012                                  target_fds_addr,
1013                                  sizeof(abi_ulong) * nw,
1014                                  0)))
1015         return -TARGET_EFAULT;
1016 
1017     k = 0;
1018     for (i = 0; i < nw; i++) {
1019         v = 0;
1020         for (j = 0; j < TARGET_ABI_BITS; j++) {
1021             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1022             k++;
1023         }
1024         __put_user(v, &target_fds[i]);
1025     }
1026 
1027     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1028 
1029     return 0;
1030 }
1031 #endif
1032 
1033 #if defined(__alpha__)
1034 #define HOST_HZ 1024
1035 #else
1036 #define HOST_HZ 100
1037 #endif
1038 
host_to_target_clock_t(long ticks)1039 static inline abi_long host_to_target_clock_t(long ticks)
1040 {
1041 #if HOST_HZ == TARGET_HZ
1042     return ticks;
1043 #else
1044     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1045 #endif
1046 }
1047 
host_to_target_rusage(abi_ulong target_addr,const struct rusage * rusage)1048 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1049                                              const struct rusage *rusage)
1050 {
1051     struct target_rusage *target_rusage;
1052 
1053     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1054         return -TARGET_EFAULT;
1055     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1056     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1057     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1058     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1059     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1060     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1061     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1062     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1063     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1064     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1065     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1066     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1067     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1068     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1069     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1070     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1071     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1072     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1073     unlock_user_struct(target_rusage, target_addr, 1);
1074 
1075     return 0;
1076 }
1077 
1078 #ifdef TARGET_NR_setrlimit
target_to_host_rlim(abi_ulong target_rlim)1079 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1080 {
1081     abi_ulong target_rlim_swap;
1082     rlim_t result;
1083 
1084     target_rlim_swap = tswapal(target_rlim);
1085     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1086         return RLIM_INFINITY;
1087 
1088     result = target_rlim_swap;
1089     if (target_rlim_swap != (rlim_t)result)
1090         return RLIM_INFINITY;
1091 
1092     return result;
1093 }
1094 #endif
1095 
1096 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
host_to_target_rlim(rlim_t rlim)1097 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1098 {
1099     abi_ulong target_rlim_swap;
1100     abi_ulong result;
1101 
1102     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1103         target_rlim_swap = TARGET_RLIM_INFINITY;
1104     else
1105         target_rlim_swap = rlim;
1106     result = tswapal(target_rlim_swap);
1107 
1108     return result;
1109 }
1110 #endif
1111 
target_to_host_resource(int code)1112 static inline int target_to_host_resource(int code)
1113 {
1114     switch (code) {
1115     case TARGET_RLIMIT_AS:
1116         return RLIMIT_AS;
1117     case TARGET_RLIMIT_CORE:
1118         return RLIMIT_CORE;
1119     case TARGET_RLIMIT_CPU:
1120         return RLIMIT_CPU;
1121     case TARGET_RLIMIT_DATA:
1122         return RLIMIT_DATA;
1123     case TARGET_RLIMIT_FSIZE:
1124         return RLIMIT_FSIZE;
1125     case TARGET_RLIMIT_LOCKS:
1126         return RLIMIT_LOCKS;
1127     case TARGET_RLIMIT_MEMLOCK:
1128         return RLIMIT_MEMLOCK;
1129     case TARGET_RLIMIT_MSGQUEUE:
1130         return RLIMIT_MSGQUEUE;
1131     case TARGET_RLIMIT_NICE:
1132         return RLIMIT_NICE;
1133     case TARGET_RLIMIT_NOFILE:
1134         return RLIMIT_NOFILE;
1135     case TARGET_RLIMIT_NPROC:
1136         return RLIMIT_NPROC;
1137     case TARGET_RLIMIT_RSS:
1138         return RLIMIT_RSS;
1139     case TARGET_RLIMIT_RTPRIO:
1140         return RLIMIT_RTPRIO;
1141     case TARGET_RLIMIT_SIGPENDING:
1142         return RLIMIT_SIGPENDING;
1143     case TARGET_RLIMIT_STACK:
1144         return RLIMIT_STACK;
1145     default:
1146         return code;
1147     }
1148 }
1149 
copy_from_user_timeval(struct timeval * tv,abi_ulong target_tv_addr)1150 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1151                                               abi_ulong target_tv_addr)
1152 {
1153     struct target_timeval *target_tv;
1154 
1155     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1156         return -TARGET_EFAULT;
1157     }
1158 
1159     __get_user(tv->tv_sec, &target_tv->tv_sec);
1160     __get_user(tv->tv_usec, &target_tv->tv_usec);
1161 
1162     unlock_user_struct(target_tv, target_tv_addr, 0);
1163 
1164     return 0;
1165 }
1166 
copy_to_user_timeval(abi_ulong target_tv_addr,const struct timeval * tv)1167 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1168                                             const struct timeval *tv)
1169 {
1170     struct target_timeval *target_tv;
1171 
1172     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1173         return -TARGET_EFAULT;
1174     }
1175 
1176     __put_user(tv->tv_sec, &target_tv->tv_sec);
1177     __put_user(tv->tv_usec, &target_tv->tv_usec);
1178 
1179     unlock_user_struct(target_tv, target_tv_addr, 1);
1180 
1181     return 0;
1182 }
1183 
1184 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
copy_from_user_timeval64(struct timeval * tv,abi_ulong target_tv_addr)1185 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1186                                                 abi_ulong target_tv_addr)
1187 {
1188     struct target__kernel_sock_timeval *target_tv;
1189 
1190     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1191         return -TARGET_EFAULT;
1192     }
1193 
1194     __get_user(tv->tv_sec, &target_tv->tv_sec);
1195     __get_user(tv->tv_usec, &target_tv->tv_usec);
1196 
1197     unlock_user_struct(target_tv, target_tv_addr, 0);
1198 
1199     return 0;
1200 }
1201 #endif
1202 
copy_to_user_timeval64(abi_ulong target_tv_addr,const struct timeval * tv)1203 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1204                                               const struct timeval *tv)
1205 {
1206     struct target__kernel_sock_timeval *target_tv;
1207 
1208     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1209         return -TARGET_EFAULT;
1210     }
1211 
1212     __put_user(tv->tv_sec, &target_tv->tv_sec);
1213     __put_user(tv->tv_usec, &target_tv->tv_usec);
1214 
1215     unlock_user_struct(target_tv, target_tv_addr, 1);
1216 
1217     return 0;
1218 }
1219 
1220 #if defined(TARGET_NR_futex) || \
1221     defined(TARGET_NR_rt_sigtimedwait) || \
1222     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1223     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1224     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1225     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1226     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1227     defined(TARGET_NR_timer_settime) || \
1228     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_timespec(struct timespec * host_ts,abi_ulong target_addr)1229 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1230                                                abi_ulong target_addr)
1231 {
1232     struct target_timespec *target_ts;
1233 
1234     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1235         return -TARGET_EFAULT;
1236     }
1237     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1238     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1239     unlock_user_struct(target_ts, target_addr, 0);
1240     return 0;
1241 }
1242 #endif
1243 
1244 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1245     defined(TARGET_NR_timer_settime64) || \
1246     defined(TARGET_NR_mq_timedsend_time64) || \
1247     defined(TARGET_NR_mq_timedreceive_time64) || \
1248     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1249     defined(TARGET_NR_clock_nanosleep_time64) || \
1250     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1251     defined(TARGET_NR_utimensat) || \
1252     defined(TARGET_NR_utimensat_time64) || \
1253     defined(TARGET_NR_semtimedop_time64) || \
1254     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
target_to_host_timespec64(struct timespec * host_ts,abi_ulong target_addr)1255 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1256                                                  abi_ulong target_addr)
1257 {
1258     struct target__kernel_timespec *target_ts;
1259 
1260     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1261         return -TARGET_EFAULT;
1262     }
1263     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1264     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1265     /* in 32bit mode, this drops the padding */
1266     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1267     unlock_user_struct(target_ts, target_addr, 0);
1268     return 0;
1269 }
1270 #endif
1271 
host_to_target_timespec(abi_ulong target_addr,struct timespec * host_ts)1272 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1273                                                struct timespec *host_ts)
1274 {
1275     struct target_timespec *target_ts;
1276 
1277     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1278         return -TARGET_EFAULT;
1279     }
1280     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1281     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1282     unlock_user_struct(target_ts, target_addr, 1);
1283     return 0;
1284 }
1285 
host_to_target_timespec64(abi_ulong target_addr,struct timespec * host_ts)1286 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1287                                                  struct timespec *host_ts)
1288 {
1289     struct target__kernel_timespec *target_ts;
1290 
1291     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1292         return -TARGET_EFAULT;
1293     }
1294     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1295     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1296     unlock_user_struct(target_ts, target_addr, 1);
1297     return 0;
1298 }
1299 
1300 #if defined(TARGET_NR_gettimeofday)
copy_to_user_timezone(abi_ulong target_tz_addr,struct timezone * tz)1301 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1302                                              struct timezone *tz)
1303 {
1304     struct target_timezone *target_tz;
1305 
1306     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1307         return -TARGET_EFAULT;
1308     }
1309 
1310     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1311     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1312 
1313     unlock_user_struct(target_tz, target_tz_addr, 1);
1314 
1315     return 0;
1316 }
1317 #endif
1318 
1319 #if defined(TARGET_NR_settimeofday)
copy_from_user_timezone(struct timezone * tz,abi_ulong target_tz_addr)1320 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1321                                                abi_ulong target_tz_addr)
1322 {
1323     struct target_timezone *target_tz;
1324 
1325     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1326         return -TARGET_EFAULT;
1327     }
1328 
1329     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1330     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1331 
1332     unlock_user_struct(target_tz, target_tz_addr, 0);
1333 
1334     return 0;
1335 }
1336 #endif
1337 
1338 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1339 #include <mqueue.h>
1340 
copy_from_user_mq_attr(struct mq_attr * attr,abi_ulong target_mq_attr_addr)1341 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1342                                               abi_ulong target_mq_attr_addr)
1343 {
1344     struct target_mq_attr *target_mq_attr;
1345 
1346     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1347                           target_mq_attr_addr, 1))
1348         return -TARGET_EFAULT;
1349 
1350     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1351     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1352     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1353     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1354 
1355     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1356 
1357     return 0;
1358 }
1359 
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,const struct mq_attr * attr)1360 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1361                                             const struct mq_attr *attr)
1362 {
1363     struct target_mq_attr *target_mq_attr;
1364 
1365     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1366                           target_mq_attr_addr, 0))
1367         return -TARGET_EFAULT;
1368 
1369     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1370     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1371     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1372     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1373 
1374     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1375 
1376     return 0;
1377 }
1378 #endif
1379 
1380 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1381 /* do_select() must return target values and target errnos. */
do_select(int n,abi_ulong rfd_addr,abi_ulong wfd_addr,abi_ulong efd_addr,abi_ulong target_tv_addr)1382 static abi_long do_select(int n,
1383                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1384                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1385 {
1386     fd_set rfds, wfds, efds;
1387     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1388     struct timeval tv;
1389     struct timespec ts, *ts_ptr;
1390     abi_long ret;
1391 
1392     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1393     if (ret) {
1394         return ret;
1395     }
1396     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1397     if (ret) {
1398         return ret;
1399     }
1400     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1401     if (ret) {
1402         return ret;
1403     }
1404 
1405     if (target_tv_addr) {
1406         if (copy_from_user_timeval(&tv, target_tv_addr))
1407             return -TARGET_EFAULT;
1408         ts.tv_sec = tv.tv_sec;
1409         ts.tv_nsec = tv.tv_usec * 1000;
1410         ts_ptr = &ts;
1411     } else {
1412         ts_ptr = NULL;
1413     }
1414 
1415     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1416                                   ts_ptr, NULL));
1417 
1418     if (!is_error(ret)) {
1419         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1420             return -TARGET_EFAULT;
1421         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1422             return -TARGET_EFAULT;
1423         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1424             return -TARGET_EFAULT;
1425 
1426         if (target_tv_addr) {
1427             tv.tv_sec = ts.tv_sec;
1428             tv.tv_usec = ts.tv_nsec / 1000;
1429             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1430                 return -TARGET_EFAULT;
1431             }
1432         }
1433     }
1434 
1435     return ret;
1436 }
1437 
1438 #if defined(TARGET_WANT_OLD_SYS_SELECT)
do_old_select(abi_ulong arg1)1439 static abi_long do_old_select(abi_ulong arg1)
1440 {
1441     struct target_sel_arg_struct *sel;
1442     abi_ulong inp, outp, exp, tvp;
1443     long nsel;
1444 
1445     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1446         return -TARGET_EFAULT;
1447     }
1448 
1449     nsel = tswapal(sel->n);
1450     inp = tswapal(sel->inp);
1451     outp = tswapal(sel->outp);
1452     exp = tswapal(sel->exp);
1453     tvp = tswapal(sel->tvp);
1454 
1455     unlock_user_struct(sel, arg1, 0);
1456 
1457     return do_select(nsel, inp, outp, exp, tvp);
1458 }
1459 #endif
1460 #endif
1461 
1462 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
do_pselect6(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,bool time64)1463 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1464                             abi_long arg4, abi_long arg5, abi_long arg6,
1465                             bool time64)
1466 {
1467     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1468     fd_set rfds, wfds, efds;
1469     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1470     struct timespec ts, *ts_ptr;
1471     abi_long ret;
1472 
1473     /*
1474      * The 6th arg is actually two args smashed together,
1475      * so we cannot use the C library.
1476      */
1477     sigset_t set;
1478     struct {
1479         sigset_t *set;
1480         size_t size;
1481     } sig, *sig_ptr;
1482 
1483     abi_ulong arg_sigset, arg_sigsize, *arg7;
1484     target_sigset_t *target_sigset;
1485 
1486     n = arg1;
1487     rfd_addr = arg2;
1488     wfd_addr = arg3;
1489     efd_addr = arg4;
1490     ts_addr = arg5;
1491 
1492     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1493     if (ret) {
1494         return ret;
1495     }
1496     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1497     if (ret) {
1498         return ret;
1499     }
1500     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1501     if (ret) {
1502         return ret;
1503     }
1504 
1505     /*
1506      * This takes a timespec, and not a timeval, so we cannot
1507      * use the do_select() helper ...
1508      */
1509     if (ts_addr) {
1510         if (time64) {
1511             if (target_to_host_timespec64(&ts, ts_addr)) {
1512                 return -TARGET_EFAULT;
1513             }
1514         } else {
1515             if (target_to_host_timespec(&ts, ts_addr)) {
1516                 return -TARGET_EFAULT;
1517             }
1518         }
1519             ts_ptr = &ts;
1520     } else {
1521         ts_ptr = NULL;
1522     }
1523 
1524     /* Extract the two packed args for the sigset */
1525     if (arg6) {
1526         sig_ptr = &sig;
1527         sig.size = SIGSET_T_SIZE;
1528 
1529         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1530         if (!arg7) {
1531             return -TARGET_EFAULT;
1532         }
1533         arg_sigset = tswapal(arg7[0]);
1534         arg_sigsize = tswapal(arg7[1]);
1535         unlock_user(arg7, arg6, 0);
1536 
1537         if (arg_sigset) {
1538             sig.set = &set;
1539             if (arg_sigsize != sizeof(*target_sigset)) {
1540                 /* Like the kernel, we enforce correct size sigsets */
1541                 return -TARGET_EINVAL;
1542             }
1543             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1544                                       sizeof(*target_sigset), 1);
1545             if (!target_sigset) {
1546                 return -TARGET_EFAULT;
1547             }
1548             target_to_host_sigset(&set, target_sigset);
1549             unlock_user(target_sigset, arg_sigset, 0);
1550         } else {
1551             sig.set = NULL;
1552         }
1553     } else {
1554         sig_ptr = NULL;
1555     }
1556 
1557     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1558                                   ts_ptr, sig_ptr));
1559 
1560     if (!is_error(ret)) {
1561         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1562             return -TARGET_EFAULT;
1563         }
1564         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1565             return -TARGET_EFAULT;
1566         }
1567         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1568             return -TARGET_EFAULT;
1569         }
1570         if (time64) {
1571             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1572                 return -TARGET_EFAULT;
1573             }
1574         } else {
1575             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1576                 return -TARGET_EFAULT;
1577             }
1578         }
1579     }
1580     return ret;
1581 }
1582 #endif
1583 
1584 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1585     defined(TARGET_NR_ppoll_time64)
do_ppoll(abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,bool ppoll,bool time64)1586 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1587                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1588 {
1589     struct target_pollfd *target_pfd;
1590     unsigned int nfds = arg2;
1591     struct pollfd *pfd;
1592     unsigned int i;
1593     abi_long ret;
1594 
1595     pfd = NULL;
1596     target_pfd = NULL;
1597     if (nfds) {
1598         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1599             return -TARGET_EINVAL;
1600         }
1601         target_pfd = lock_user(VERIFY_WRITE, arg1,
1602                                sizeof(struct target_pollfd) * nfds, 1);
1603         if (!target_pfd) {
1604             return -TARGET_EFAULT;
1605         }
1606 
1607         pfd = alloca(sizeof(struct pollfd) * nfds);
1608         for (i = 0; i < nfds; i++) {
1609             pfd[i].fd = tswap32(target_pfd[i].fd);
1610             pfd[i].events = tswap16(target_pfd[i].events);
1611         }
1612     }
1613     if (ppoll) {
1614         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1615         target_sigset_t *target_set;
1616         sigset_t _set, *set = &_set;
1617 
1618         if (arg3) {
1619             if (time64) {
1620                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1621                     unlock_user(target_pfd, arg1, 0);
1622                     return -TARGET_EFAULT;
1623                 }
1624             } else {
1625                 if (target_to_host_timespec(timeout_ts, arg3)) {
1626                     unlock_user(target_pfd, arg1, 0);
1627                     return -TARGET_EFAULT;
1628                 }
1629             }
1630         } else {
1631             timeout_ts = NULL;
1632         }
1633 
1634         if (arg4) {
1635             if (arg5 != sizeof(target_sigset_t)) {
1636                 unlock_user(target_pfd, arg1, 0);
1637                 return -TARGET_EINVAL;
1638             }
1639 
1640             target_set = lock_user(VERIFY_READ, arg4,
1641                                    sizeof(target_sigset_t), 1);
1642             if (!target_set) {
1643                 unlock_user(target_pfd, arg1, 0);
1644                 return -TARGET_EFAULT;
1645             }
1646             target_to_host_sigset(set, target_set);
1647         } else {
1648             set = NULL;
1649         }
1650 
1651         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1652                                    set, SIGSET_T_SIZE));
1653 
1654         if (!is_error(ret) && arg3) {
1655             if (time64) {
1656                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1657                     return -TARGET_EFAULT;
1658                 }
1659             } else {
1660                 if (host_to_target_timespec(arg3, timeout_ts)) {
1661                     return -TARGET_EFAULT;
1662                 }
1663             }
1664         }
1665         if (arg4) {
1666             unlock_user(target_set, arg4, 0);
1667         }
1668     } else {
1669           struct timespec ts, *pts;
1670 
1671           if (arg3 >= 0) {
1672               /* Convert ms to secs, ns */
1673               ts.tv_sec = arg3 / 1000;
1674               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1675               pts = &ts;
1676           } else {
1677               /* -ve poll() timeout means "infinite" */
1678               pts = NULL;
1679           }
1680           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1681     }
1682 
1683     if (!is_error(ret)) {
1684         for (i = 0; i < nfds; i++) {
1685             target_pfd[i].revents = tswap16(pfd[i].revents);
1686         }
1687     }
1688     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1689     return ret;
1690 }
1691 #endif
1692 
do_pipe2(int host_pipe[],int flags)1693 static abi_long do_pipe2(int host_pipe[], int flags)
1694 {
1695 #ifdef CONFIG_PIPE2
1696     return pipe2(host_pipe, flags);
1697 #else
1698     return -ENOSYS;
1699 #endif
1700 }
1701 
do_pipe(void * cpu_env,abi_ulong pipedes,int flags,int is_pipe2)1702 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1703                         int flags, int is_pipe2)
1704 {
1705     int host_pipe[2];
1706     abi_long ret;
1707     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1708 
1709     if (is_error(ret))
1710         return get_errno(ret);
1711 
1712     /* Several targets have special calling conventions for the original
1713        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1714     if (!is_pipe2) {
1715 #if defined(TARGET_ALPHA)
1716         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1717         return host_pipe[0];
1718 #elif defined(TARGET_MIPS)
1719         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1720         return host_pipe[0];
1721 #elif defined(TARGET_SH4)
1722         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1723         return host_pipe[0];
1724 #elif defined(TARGET_SPARC)
1725         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1726         return host_pipe[0];
1727 #endif
1728     }
1729 
1730     if (put_user_s32(host_pipe[0], pipedes)
1731         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1732         return -TARGET_EFAULT;
1733     return get_errno(ret);
1734 }
1735 
target_to_host_ip_mreq(struct ip_mreqn * mreqn,abi_ulong target_addr,socklen_t len)1736 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1737                                               abi_ulong target_addr,
1738                                               socklen_t len)
1739 {
1740     struct target_ip_mreqn *target_smreqn;
1741 
1742     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1743     if (!target_smreqn)
1744         return -TARGET_EFAULT;
1745     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1746     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1747     if (len == sizeof(struct target_ip_mreqn))
1748         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1749     unlock_user(target_smreqn, target_addr, 0);
1750 
1751     return 0;
1752 }
1753 
target_to_host_sockaddr(int fd,struct sockaddr * addr,abi_ulong target_addr,socklen_t len)1754 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1755                                                abi_ulong target_addr,
1756                                                socklen_t len)
1757 {
1758     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1759     sa_family_t sa_family;
1760     struct target_sockaddr *target_saddr;
1761 
1762     if (fd_trans_target_to_host_addr(fd)) {
1763         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1764     }
1765 
1766     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1767     if (!target_saddr)
1768         return -TARGET_EFAULT;
1769 
1770     sa_family = tswap16(target_saddr->sa_family);
1771 
1772     /* Oops. The caller might send a incomplete sun_path; sun_path
1773      * must be terminated by \0 (see the manual page), but
1774      * unfortunately it is quite common to specify sockaddr_un
1775      * length as "strlen(x->sun_path)" while it should be
1776      * "strlen(...) + 1". We'll fix that here if needed.
1777      * Linux kernel has a similar feature.
1778      */
1779 
1780     if (sa_family == AF_UNIX) {
1781         if (len < unix_maxlen && len > 0) {
1782             char *cp = (char*)target_saddr;
1783 
1784             if ( cp[len-1] && !cp[len] )
1785                 len++;
1786         }
1787         if (len > unix_maxlen)
1788             len = unix_maxlen;
1789     }
1790 
1791     memcpy(addr, target_saddr, len);
1792     addr->sa_family = sa_family;
1793     if (sa_family == AF_NETLINK) {
1794         struct sockaddr_nl *nladdr;
1795 
1796         nladdr = (struct sockaddr_nl *)addr;
1797         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1798         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1799     } else if (sa_family == AF_PACKET) {
1800 	struct target_sockaddr_ll *lladdr;
1801 
1802 	lladdr = (struct target_sockaddr_ll *)addr;
1803 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1804 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1805     }
1806     unlock_user(target_saddr, target_addr, 0);
1807 
1808     return 0;
1809 }
1810 
host_to_target_sockaddr(abi_ulong target_addr,struct sockaddr * addr,socklen_t len)1811 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1812                                                struct sockaddr *addr,
1813                                                socklen_t len)
1814 {
1815     struct target_sockaddr *target_saddr;
1816 
1817     if (len == 0) {
1818         return 0;
1819     }
1820     assert(addr);
1821 
1822     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1823     if (!target_saddr)
1824         return -TARGET_EFAULT;
1825     memcpy(target_saddr, addr, len);
1826     if (len >= offsetof(struct target_sockaddr, sa_family) +
1827         sizeof(target_saddr->sa_family)) {
1828         target_saddr->sa_family = tswap16(addr->sa_family);
1829     }
1830     if (addr->sa_family == AF_NETLINK &&
1831         len >= sizeof(struct target_sockaddr_nl)) {
1832         struct target_sockaddr_nl *target_nl =
1833                (struct target_sockaddr_nl *)target_saddr;
1834         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1835         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1836     } else if (addr->sa_family == AF_PACKET) {
1837         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1838         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1839         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1840     } else if (addr->sa_family == AF_INET6 &&
1841                len >= sizeof(struct target_sockaddr_in6)) {
1842         struct target_sockaddr_in6 *target_in6 =
1843                (struct target_sockaddr_in6 *)target_saddr;
1844         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1845     }
1846     unlock_user(target_saddr, target_addr, len);
1847 
1848     return 0;
1849 }
1850 
target_to_host_cmsg(struct msghdr * msgh,struct target_msghdr * target_msgh)1851 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1852                                            struct target_msghdr *target_msgh)
1853 {
1854     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1855     abi_long msg_controllen;
1856     abi_ulong target_cmsg_addr;
1857     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1858     socklen_t space = 0;
1859 
1860     msg_controllen = tswapal(target_msgh->msg_controllen);
1861     if (msg_controllen < sizeof (struct target_cmsghdr))
1862         goto the_end;
1863     target_cmsg_addr = tswapal(target_msgh->msg_control);
1864     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1865     target_cmsg_start = target_cmsg;
1866     if (!target_cmsg)
1867         return -TARGET_EFAULT;
1868 
1869     while (cmsg && target_cmsg) {
1870         void *data = CMSG_DATA(cmsg);
1871         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1872 
1873         int len = tswapal(target_cmsg->cmsg_len)
1874             - sizeof(struct target_cmsghdr);
1875 
1876         space += CMSG_SPACE(len);
1877         if (space > msgh->msg_controllen) {
1878             space -= CMSG_SPACE(len);
1879             /* This is a QEMU bug, since we allocated the payload
1880              * area ourselves (unlike overflow in host-to-target
1881              * conversion, which is just the guest giving us a buffer
1882              * that's too small). It can't happen for the payload types
1883              * we currently support; if it becomes an issue in future
1884              * we would need to improve our allocation strategy to
1885              * something more intelligent than "twice the size of the
1886              * target buffer we're reading from".
1887              */
1888             qemu_log_mask(LOG_UNIMP,
1889                           ("Unsupported ancillary data %d/%d: "
1890                            "unhandled msg size\n"),
1891                           tswap32(target_cmsg->cmsg_level),
1892                           tswap32(target_cmsg->cmsg_type));
1893             break;
1894         }
1895 
1896         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1897             cmsg->cmsg_level = SOL_SOCKET;
1898         } else {
1899             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1900         }
1901         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1902         cmsg->cmsg_len = CMSG_LEN(len);
1903 
1904         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1905             int *fd = (int *)data;
1906             int *target_fd = (int *)target_data;
1907             int i, numfds = len / sizeof(int);
1908 
1909             for (i = 0; i < numfds; i++) {
1910                 __get_user(fd[i], target_fd + i);
1911             }
1912         } else if (cmsg->cmsg_level == SOL_SOCKET
1913                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1914             struct ucred *cred = (struct ucred *)data;
1915             struct target_ucred *target_cred =
1916                 (struct target_ucred *)target_data;
1917 
1918             __get_user(cred->pid, &target_cred->pid);
1919             __get_user(cred->uid, &target_cred->uid);
1920             __get_user(cred->gid, &target_cred->gid);
1921         } else {
1922             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1923                           cmsg->cmsg_level, cmsg->cmsg_type);
1924             memcpy(data, target_data, len);
1925         }
1926 
1927         cmsg = CMSG_NXTHDR(msgh, cmsg);
1928         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1929                                          target_cmsg_start);
1930     }
1931     unlock_user(target_cmsg, target_cmsg_addr, 0);
1932  the_end:
1933     msgh->msg_controllen = space;
1934     return 0;
1935 }
1936 
host_to_target_cmsg(struct target_msghdr * target_msgh,struct msghdr * msgh)1937 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1938                                            struct msghdr *msgh)
1939 {
1940     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1941     abi_long msg_controllen;
1942     abi_ulong target_cmsg_addr;
1943     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1944     socklen_t space = 0;
1945 
1946     msg_controllen = tswapal(target_msgh->msg_controllen);
1947     if (msg_controllen < sizeof (struct target_cmsghdr))
1948         goto the_end;
1949     target_cmsg_addr = tswapal(target_msgh->msg_control);
1950     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1951     target_cmsg_start = target_cmsg;
1952     if (!target_cmsg)
1953         return -TARGET_EFAULT;
1954 
1955     while (cmsg && target_cmsg) {
1956         void *data = CMSG_DATA(cmsg);
1957         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1958 
1959         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1960         int tgt_len, tgt_space;
1961 
1962         /* We never copy a half-header but may copy half-data;
1963          * this is Linux's behaviour in put_cmsg(). Note that
1964          * truncation here is a guest problem (which we report
1965          * to the guest via the CTRUNC bit), unlike truncation
1966          * in target_to_host_cmsg, which is a QEMU bug.
1967          */
1968         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1969             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1970             break;
1971         }
1972 
1973         if (cmsg->cmsg_level == SOL_SOCKET) {
1974             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1975         } else {
1976             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1977         }
1978         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1979 
1980         /* Payload types which need a different size of payload on
1981          * the target must adjust tgt_len here.
1982          */
1983         tgt_len = len;
1984         switch (cmsg->cmsg_level) {
1985         case SOL_SOCKET:
1986             switch (cmsg->cmsg_type) {
1987             case SO_TIMESTAMP:
1988                 tgt_len = sizeof(struct target_timeval);
1989                 break;
1990             default:
1991                 break;
1992             }
1993             break;
1994         default:
1995             break;
1996         }
1997 
1998         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1999             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
2000             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
2001         }
2002 
2003         /* We must now copy-and-convert len bytes of payload
2004          * into tgt_len bytes of destination space. Bear in mind
2005          * that in both source and destination we may be dealing
2006          * with a truncated value!
2007          */
2008         switch (cmsg->cmsg_level) {
2009         case SOL_SOCKET:
2010             switch (cmsg->cmsg_type) {
2011             case SCM_RIGHTS:
2012             {
2013                 int *fd = (int *)data;
2014                 int *target_fd = (int *)target_data;
2015                 int i, numfds = tgt_len / sizeof(int);
2016 
2017                 for (i = 0; i < numfds; i++) {
2018                     __put_user(fd[i], target_fd + i);
2019                 }
2020                 break;
2021             }
2022             case SO_TIMESTAMP:
2023             {
2024                 struct timeval *tv = (struct timeval *)data;
2025                 struct target_timeval *target_tv =
2026                     (struct target_timeval *)target_data;
2027 
2028                 if (len != sizeof(struct timeval) ||
2029                     tgt_len != sizeof(struct target_timeval)) {
2030                     goto unimplemented;
2031                 }
2032 
2033                 /* copy struct timeval to target */
2034                 __put_user(tv->tv_sec, &target_tv->tv_sec);
2035                 __put_user(tv->tv_usec, &target_tv->tv_usec);
2036                 break;
2037             }
2038             case SCM_CREDENTIALS:
2039             {
2040                 struct ucred *cred = (struct ucred *)data;
2041                 struct target_ucred *target_cred =
2042                     (struct target_ucred *)target_data;
2043 
2044                 __put_user(cred->pid, &target_cred->pid);
2045                 __put_user(cred->uid, &target_cred->uid);
2046                 __put_user(cred->gid, &target_cred->gid);
2047                 break;
2048             }
2049             default:
2050                 goto unimplemented;
2051             }
2052             break;
2053 
2054         case SOL_IP:
2055             switch (cmsg->cmsg_type) {
2056             case IP_TTL:
2057             {
2058                 uint32_t *v = (uint32_t *)data;
2059                 uint32_t *t_int = (uint32_t *)target_data;
2060 
2061                 if (len != sizeof(uint32_t) ||
2062                     tgt_len != sizeof(uint32_t)) {
2063                     goto unimplemented;
2064                 }
2065                 __put_user(*v, t_int);
2066                 break;
2067             }
2068             case IP_RECVERR:
2069             {
2070                 struct errhdr_t {
2071                    struct sock_extended_err ee;
2072                    struct sockaddr_in offender;
2073                 };
2074                 struct errhdr_t *errh = (struct errhdr_t *)data;
2075                 struct errhdr_t *target_errh =
2076                     (struct errhdr_t *)target_data;
2077 
2078                 if (len != sizeof(struct errhdr_t) ||
2079                     tgt_len != sizeof(struct errhdr_t)) {
2080                     goto unimplemented;
2081                 }
2082                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2083                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2084                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2085                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2086                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2087                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2088                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2089                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2090                     (void *) &errh->offender, sizeof(errh->offender));
2091                 break;
2092             }
2093             default:
2094                 goto unimplemented;
2095             }
2096             break;
2097 
2098         case SOL_IPV6:
2099             switch (cmsg->cmsg_type) {
2100             case IPV6_HOPLIMIT:
2101             {
2102                 uint32_t *v = (uint32_t *)data;
2103                 uint32_t *t_int = (uint32_t *)target_data;
2104 
2105                 if (len != sizeof(uint32_t) ||
2106                     tgt_len != sizeof(uint32_t)) {
2107                     goto unimplemented;
2108                 }
2109                 __put_user(*v, t_int);
2110                 break;
2111             }
2112             case IPV6_RECVERR:
2113             {
2114                 struct errhdr6_t {
2115                    struct sock_extended_err ee;
2116                    struct sockaddr_in6 offender;
2117                 };
2118                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2119                 struct errhdr6_t *target_errh =
2120                     (struct errhdr6_t *)target_data;
2121 
2122                 if (len != sizeof(struct errhdr6_t) ||
2123                     tgt_len != sizeof(struct errhdr6_t)) {
2124                     goto unimplemented;
2125                 }
2126                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2127                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2128                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2129                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2130                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2131                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2132                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2133                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2134                     (void *) &errh->offender, sizeof(errh->offender));
2135                 break;
2136             }
2137             default:
2138                 goto unimplemented;
2139             }
2140             break;
2141 
2142         default:
2143         unimplemented:
2144             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2145                           cmsg->cmsg_level, cmsg->cmsg_type);
2146             memcpy(target_data, data, MIN(len, tgt_len));
2147             if (tgt_len > len) {
2148                 memset(target_data + len, 0, tgt_len - len);
2149             }
2150         }
2151 
2152         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2153         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2154         if (msg_controllen < tgt_space) {
2155             tgt_space = msg_controllen;
2156         }
2157         msg_controllen -= tgt_space;
2158         space += tgt_space;
2159         cmsg = CMSG_NXTHDR(msgh, cmsg);
2160         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2161                                          target_cmsg_start);
2162     }
2163     unlock_user(target_cmsg, target_cmsg_addr, space);
2164  the_end:
2165     target_msgh->msg_controllen = tswapal(space);
2166     return 0;
2167 }
2168 
2169 /* do_setsockopt() Must return target values and target errnos. */
do_setsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,socklen_t optlen)2170 static abi_long do_setsockopt(int sockfd, int level, int optname,
2171                               abi_ulong optval_addr, socklen_t optlen)
2172 {
2173     abi_long ret;
2174     int val;
2175     struct ip_mreqn *ip_mreq;
2176     struct ip_mreq_source *ip_mreq_source;
2177 
2178     switch(level) {
2179     case SOL_TCP:
2180         /* TCP options all take an 'int' value.  */
2181         if (optlen < sizeof(uint32_t))
2182             return -TARGET_EINVAL;
2183 
2184         if (get_user_u32(val, optval_addr))
2185             return -TARGET_EFAULT;
2186         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2187         break;
2188     case SOL_IP:
2189         switch(optname) {
2190         case IP_TOS:
2191         case IP_TTL:
2192         case IP_HDRINCL:
2193         case IP_ROUTER_ALERT:
2194         case IP_RECVOPTS:
2195         case IP_RETOPTS:
2196         case IP_PKTINFO:
2197         case IP_MTU_DISCOVER:
2198         case IP_RECVERR:
2199         case IP_RECVTTL:
2200         case IP_RECVTOS:
2201 #ifdef IP_FREEBIND
2202         case IP_FREEBIND:
2203 #endif
2204         case IP_MULTICAST_TTL:
2205         case IP_MULTICAST_LOOP:
2206             val = 0;
2207             if (optlen >= sizeof(uint32_t)) {
2208                 if (get_user_u32(val, optval_addr))
2209                     return -TARGET_EFAULT;
2210             } else if (optlen >= 1) {
2211                 if (get_user_u8(val, optval_addr))
2212                     return -TARGET_EFAULT;
2213             }
2214             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2215             break;
2216         case IP_ADD_MEMBERSHIP:
2217         case IP_DROP_MEMBERSHIP:
2218             if (optlen < sizeof (struct target_ip_mreq) ||
2219                 optlen > sizeof (struct target_ip_mreqn))
2220                 return -TARGET_EINVAL;
2221 
2222             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2223             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2224             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2225             break;
2226 
2227         case IP_BLOCK_SOURCE:
2228         case IP_UNBLOCK_SOURCE:
2229         case IP_ADD_SOURCE_MEMBERSHIP:
2230         case IP_DROP_SOURCE_MEMBERSHIP:
2231             if (optlen != sizeof (struct target_ip_mreq_source))
2232                 return -TARGET_EINVAL;
2233 
2234             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2235             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2236             unlock_user (ip_mreq_source, optval_addr, 0);
2237             break;
2238 
2239         default:
2240             goto unimplemented;
2241         }
2242         break;
2243     case SOL_IPV6:
2244         switch (optname) {
2245         case IPV6_MTU_DISCOVER:
2246         case IPV6_MTU:
2247         case IPV6_V6ONLY:
2248         case IPV6_RECVPKTINFO:
2249         case IPV6_UNICAST_HOPS:
2250         case IPV6_MULTICAST_HOPS:
2251         case IPV6_MULTICAST_LOOP:
2252         case IPV6_RECVERR:
2253         case IPV6_RECVHOPLIMIT:
2254         case IPV6_2292HOPLIMIT:
2255         case IPV6_CHECKSUM:
2256         case IPV6_ADDRFORM:
2257         case IPV6_2292PKTINFO:
2258         case IPV6_RECVTCLASS:
2259         case IPV6_RECVRTHDR:
2260         case IPV6_2292RTHDR:
2261         case IPV6_RECVHOPOPTS:
2262         case IPV6_2292HOPOPTS:
2263         case IPV6_RECVDSTOPTS:
2264         case IPV6_2292DSTOPTS:
2265         case IPV6_TCLASS:
2266 #ifdef IPV6_RECVPATHMTU
2267         case IPV6_RECVPATHMTU:
2268 #endif
2269 #ifdef IPV6_TRANSPARENT
2270         case IPV6_TRANSPARENT:
2271 #endif
2272 #ifdef IPV6_FREEBIND
2273         case IPV6_FREEBIND:
2274 #endif
2275 #ifdef IPV6_RECVORIGDSTADDR
2276         case IPV6_RECVORIGDSTADDR:
2277 #endif
2278             val = 0;
2279             if (optlen < sizeof(uint32_t)) {
2280                 return -TARGET_EINVAL;
2281             }
2282             if (get_user_u32(val, optval_addr)) {
2283                 return -TARGET_EFAULT;
2284             }
2285             ret = get_errno(setsockopt(sockfd, level, optname,
2286                                        &val, sizeof(val)));
2287             break;
2288         case IPV6_PKTINFO:
2289         {
2290             struct in6_pktinfo pki;
2291 
2292             if (optlen < sizeof(pki)) {
2293                 return -TARGET_EINVAL;
2294             }
2295 
2296             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2297                 return -TARGET_EFAULT;
2298             }
2299 
2300             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2301 
2302             ret = get_errno(setsockopt(sockfd, level, optname,
2303                                        &pki, sizeof(pki)));
2304             break;
2305         }
2306         case IPV6_ADD_MEMBERSHIP:
2307         case IPV6_DROP_MEMBERSHIP:
2308         {
2309             struct ipv6_mreq ipv6mreq;
2310 
2311             if (optlen < sizeof(ipv6mreq)) {
2312                 return -TARGET_EINVAL;
2313             }
2314 
2315             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2316                 return -TARGET_EFAULT;
2317             }
2318 
2319             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2320 
2321             ret = get_errno(setsockopt(sockfd, level, optname,
2322                                        &ipv6mreq, sizeof(ipv6mreq)));
2323             break;
2324         }
2325         default:
2326             goto unimplemented;
2327         }
2328         break;
2329     case SOL_ICMPV6:
2330         switch (optname) {
2331         case ICMPV6_FILTER:
2332         {
2333             struct icmp6_filter icmp6f;
2334 
2335             if (optlen > sizeof(icmp6f)) {
2336                 optlen = sizeof(icmp6f);
2337             }
2338 
2339             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2340                 return -TARGET_EFAULT;
2341             }
2342 
2343             for (val = 0; val < 8; val++) {
2344                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2345             }
2346 
2347             ret = get_errno(setsockopt(sockfd, level, optname,
2348                                        &icmp6f, optlen));
2349             break;
2350         }
2351         default:
2352             goto unimplemented;
2353         }
2354         break;
2355     case SOL_RAW:
2356         switch (optname) {
2357         case ICMP_FILTER:
2358         case IPV6_CHECKSUM:
2359             /* those take an u32 value */
2360             if (optlen < sizeof(uint32_t)) {
2361                 return -TARGET_EINVAL;
2362             }
2363 
2364             if (get_user_u32(val, optval_addr)) {
2365                 return -TARGET_EFAULT;
2366             }
2367             ret = get_errno(setsockopt(sockfd, level, optname,
2368                                        &val, sizeof(val)));
2369             break;
2370 
2371         default:
2372             goto unimplemented;
2373         }
2374         break;
2375 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2376     case SOL_ALG:
2377         switch (optname) {
2378         case ALG_SET_KEY:
2379         {
2380             char *alg_key = g_malloc(optlen);
2381 
2382             if (!alg_key) {
2383                 return -TARGET_ENOMEM;
2384             }
2385             if (copy_from_user(alg_key, optval_addr, optlen)) {
2386                 g_free(alg_key);
2387                 return -TARGET_EFAULT;
2388             }
2389             ret = get_errno(setsockopt(sockfd, level, optname,
2390                                        alg_key, optlen));
2391             g_free(alg_key);
2392             break;
2393         }
2394         case ALG_SET_AEAD_AUTHSIZE:
2395         {
2396             ret = get_errno(setsockopt(sockfd, level, optname,
2397                                        NULL, optlen));
2398             break;
2399         }
2400         default:
2401             goto unimplemented;
2402         }
2403         break;
2404 #endif
2405     case TARGET_SOL_SOCKET:
2406         switch (optname) {
2407         case TARGET_SO_RCVTIMEO:
2408         {
2409                 struct timeval tv;
2410 
2411                 optname = SO_RCVTIMEO;
2412 
2413 set_timeout:
2414                 if (optlen != sizeof(struct target_timeval)) {
2415                     return -TARGET_EINVAL;
2416                 }
2417 
2418                 if (copy_from_user_timeval(&tv, optval_addr)) {
2419                     return -TARGET_EFAULT;
2420                 }
2421 
2422                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2423                                 &tv, sizeof(tv)));
2424                 return ret;
2425         }
2426         case TARGET_SO_SNDTIMEO:
2427                 optname = SO_SNDTIMEO;
2428                 goto set_timeout;
2429         case TARGET_SO_ATTACH_FILTER:
2430         {
2431                 struct target_sock_fprog *tfprog;
2432                 struct target_sock_filter *tfilter;
2433                 struct sock_fprog fprog;
2434                 struct sock_filter *filter;
2435                 int i;
2436 
2437                 if (optlen != sizeof(*tfprog)) {
2438                     return -TARGET_EINVAL;
2439                 }
2440                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2441                     return -TARGET_EFAULT;
2442                 }
2443                 if (!lock_user_struct(VERIFY_READ, tfilter,
2444                                       tswapal(tfprog->filter), 0)) {
2445                     unlock_user_struct(tfprog, optval_addr, 1);
2446                     return -TARGET_EFAULT;
2447                 }
2448 
2449                 fprog.len = tswap16(tfprog->len);
2450                 filter = g_try_new(struct sock_filter, fprog.len);
2451                 if (filter == NULL) {
2452                     unlock_user_struct(tfilter, tfprog->filter, 1);
2453                     unlock_user_struct(tfprog, optval_addr, 1);
2454                     return -TARGET_ENOMEM;
2455                 }
2456                 for (i = 0; i < fprog.len; i++) {
2457                     filter[i].code = tswap16(tfilter[i].code);
2458                     filter[i].jt = tfilter[i].jt;
2459                     filter[i].jf = tfilter[i].jf;
2460                     filter[i].k = tswap32(tfilter[i].k);
2461                 }
2462                 fprog.filter = filter;
2463 
2464                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2465                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2466                 g_free(filter);
2467 
2468                 unlock_user_struct(tfilter, tfprog->filter, 1);
2469                 unlock_user_struct(tfprog, optval_addr, 1);
2470                 return ret;
2471         }
2472 	case TARGET_SO_BINDTODEVICE:
2473 	{
2474 		char *dev_ifname, *addr_ifname;
2475 
2476 		if (optlen > IFNAMSIZ - 1) {
2477 		    optlen = IFNAMSIZ - 1;
2478 		}
2479 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2480 		if (!dev_ifname) {
2481 		    return -TARGET_EFAULT;
2482 		}
2483 		optname = SO_BINDTODEVICE;
2484 		addr_ifname = alloca(IFNAMSIZ);
2485 		memcpy(addr_ifname, dev_ifname, optlen);
2486 		addr_ifname[optlen] = 0;
2487 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2488                                            addr_ifname, optlen));
2489 		unlock_user (dev_ifname, optval_addr, 0);
2490 		return ret;
2491 	}
2492         case TARGET_SO_LINGER:
2493         {
2494                 struct linger lg;
2495                 struct target_linger *tlg;
2496 
2497                 if (optlen != sizeof(struct target_linger)) {
2498                     return -TARGET_EINVAL;
2499                 }
2500                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2501                     return -TARGET_EFAULT;
2502                 }
2503                 __get_user(lg.l_onoff, &tlg->l_onoff);
2504                 __get_user(lg.l_linger, &tlg->l_linger);
2505                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2506                                 &lg, sizeof(lg)));
2507                 unlock_user_struct(tlg, optval_addr, 0);
2508                 return ret;
2509         }
2510             /* Options with 'int' argument.  */
2511         case TARGET_SO_DEBUG:
2512 		optname = SO_DEBUG;
2513 		break;
2514         case TARGET_SO_REUSEADDR:
2515 		optname = SO_REUSEADDR;
2516 		break;
2517 #ifdef SO_REUSEPORT
2518         case TARGET_SO_REUSEPORT:
2519                 optname = SO_REUSEPORT;
2520                 break;
2521 #endif
2522         case TARGET_SO_TYPE:
2523 		optname = SO_TYPE;
2524 		break;
2525         case TARGET_SO_ERROR:
2526 		optname = SO_ERROR;
2527 		break;
2528         case TARGET_SO_DONTROUTE:
2529 		optname = SO_DONTROUTE;
2530 		break;
2531         case TARGET_SO_BROADCAST:
2532 		optname = SO_BROADCAST;
2533 		break;
2534         case TARGET_SO_SNDBUF:
2535 		optname = SO_SNDBUF;
2536 		break;
2537         case TARGET_SO_SNDBUFFORCE:
2538                 optname = SO_SNDBUFFORCE;
2539                 break;
2540         case TARGET_SO_RCVBUF:
2541 		optname = SO_RCVBUF;
2542 		break;
2543         case TARGET_SO_RCVBUFFORCE:
2544                 optname = SO_RCVBUFFORCE;
2545                 break;
2546         case TARGET_SO_KEEPALIVE:
2547 		optname = SO_KEEPALIVE;
2548 		break;
2549         case TARGET_SO_OOBINLINE:
2550 		optname = SO_OOBINLINE;
2551 		break;
2552         case TARGET_SO_NO_CHECK:
2553 		optname = SO_NO_CHECK;
2554 		break;
2555         case TARGET_SO_PRIORITY:
2556 		optname = SO_PRIORITY;
2557 		break;
2558 #ifdef SO_BSDCOMPAT
2559         case TARGET_SO_BSDCOMPAT:
2560 		optname = SO_BSDCOMPAT;
2561 		break;
2562 #endif
2563         case TARGET_SO_PASSCRED:
2564 		optname = SO_PASSCRED;
2565 		break;
2566         case TARGET_SO_PASSSEC:
2567                 optname = SO_PASSSEC;
2568                 break;
2569         case TARGET_SO_TIMESTAMP:
2570 		optname = SO_TIMESTAMP;
2571 		break;
2572         case TARGET_SO_RCVLOWAT:
2573 		optname = SO_RCVLOWAT;
2574 		break;
2575         default:
2576             goto unimplemented;
2577         }
2578 	if (optlen < sizeof(uint32_t))
2579             return -TARGET_EINVAL;
2580 
2581 	if (get_user_u32(val, optval_addr))
2582             return -TARGET_EFAULT;
2583 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2584         break;
2585 #ifdef SOL_NETLINK
2586     case SOL_NETLINK:
2587         switch (optname) {
2588         case NETLINK_PKTINFO:
2589         case NETLINK_ADD_MEMBERSHIP:
2590         case NETLINK_DROP_MEMBERSHIP:
2591         case NETLINK_BROADCAST_ERROR:
2592         case NETLINK_NO_ENOBUFS:
2593 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2594         case NETLINK_LISTEN_ALL_NSID:
2595         case NETLINK_CAP_ACK:
2596 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2597 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2598         case NETLINK_EXT_ACK:
2599 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2600 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2601         case NETLINK_GET_STRICT_CHK:
2602 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2603             break;
2604         default:
2605             goto unimplemented;
2606         }
2607         val = 0;
2608         if (optlen < sizeof(uint32_t)) {
2609             return -TARGET_EINVAL;
2610         }
2611         if (get_user_u32(val, optval_addr)) {
2612             return -TARGET_EFAULT;
2613         }
2614         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2615                                    sizeof(val)));
2616         break;
2617 #endif /* SOL_NETLINK */
2618     default:
2619     unimplemented:
2620         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2621                       level, optname);
2622         ret = -TARGET_ENOPROTOOPT;
2623     }
2624     return ret;
2625 }
2626 
2627 /* do_getsockopt() Must return target values and target errnos. */
do_getsockopt(int sockfd,int level,int optname,abi_ulong optval_addr,abi_ulong optlen)2628 static abi_long do_getsockopt(int sockfd, int level, int optname,
2629                               abi_ulong optval_addr, abi_ulong optlen)
2630 {
2631     abi_long ret;
2632     int len, val;
2633     socklen_t lv;
2634 
2635     switch(level) {
2636     case TARGET_SOL_SOCKET:
2637         level = SOL_SOCKET;
2638         switch (optname) {
2639         /* These don't just return a single integer */
2640         case TARGET_SO_PEERNAME:
2641             goto unimplemented;
2642         case TARGET_SO_RCVTIMEO: {
2643             struct timeval tv;
2644             socklen_t tvlen;
2645 
2646             optname = SO_RCVTIMEO;
2647 
2648 get_timeout:
2649             if (get_user_u32(len, optlen)) {
2650                 return -TARGET_EFAULT;
2651             }
2652             if (len < 0) {
2653                 return -TARGET_EINVAL;
2654             }
2655 
2656             tvlen = sizeof(tv);
2657             ret = get_errno(getsockopt(sockfd, level, optname,
2658                                        &tv, &tvlen));
2659             if (ret < 0) {
2660                 return ret;
2661             }
2662             if (len > sizeof(struct target_timeval)) {
2663                 len = sizeof(struct target_timeval);
2664             }
2665             if (copy_to_user_timeval(optval_addr, &tv)) {
2666                 return -TARGET_EFAULT;
2667             }
2668             if (put_user_u32(len, optlen)) {
2669                 return -TARGET_EFAULT;
2670             }
2671             break;
2672         }
2673         case TARGET_SO_SNDTIMEO:
2674             optname = SO_SNDTIMEO;
2675             goto get_timeout;
2676         case TARGET_SO_PEERCRED: {
2677             struct ucred cr;
2678             socklen_t crlen;
2679             struct target_ucred *tcr;
2680 
2681             if (get_user_u32(len, optlen)) {
2682                 return -TARGET_EFAULT;
2683             }
2684             if (len < 0) {
2685                 return -TARGET_EINVAL;
2686             }
2687 
2688             crlen = sizeof(cr);
2689             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2690                                        &cr, &crlen));
2691             if (ret < 0) {
2692                 return ret;
2693             }
2694             if (len > crlen) {
2695                 len = crlen;
2696             }
2697             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2698                 return -TARGET_EFAULT;
2699             }
2700             __put_user(cr.pid, &tcr->pid);
2701             __put_user(cr.uid, &tcr->uid);
2702             __put_user(cr.gid, &tcr->gid);
2703             unlock_user_struct(tcr, optval_addr, 1);
2704             if (put_user_u32(len, optlen)) {
2705                 return -TARGET_EFAULT;
2706             }
2707             break;
2708         }
2709         case TARGET_SO_PEERSEC: {
2710             char *name;
2711 
2712             if (get_user_u32(len, optlen)) {
2713                 return -TARGET_EFAULT;
2714             }
2715             if (len < 0) {
2716                 return -TARGET_EINVAL;
2717             }
2718             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2719             if (!name) {
2720                 return -TARGET_EFAULT;
2721             }
2722             lv = len;
2723             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2724                                        name, &lv));
2725             if (put_user_u32(lv, optlen)) {
2726                 ret = -TARGET_EFAULT;
2727             }
2728             unlock_user(name, optval_addr, lv);
2729             break;
2730         }
2731         case TARGET_SO_LINGER:
2732         {
2733             struct linger lg;
2734             socklen_t lglen;
2735             struct target_linger *tlg;
2736 
2737             if (get_user_u32(len, optlen)) {
2738                 return -TARGET_EFAULT;
2739             }
2740             if (len < 0) {
2741                 return -TARGET_EINVAL;
2742             }
2743 
2744             lglen = sizeof(lg);
2745             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2746                                        &lg, &lglen));
2747             if (ret < 0) {
2748                 return ret;
2749             }
2750             if (len > lglen) {
2751                 len = lglen;
2752             }
2753             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2754                 return -TARGET_EFAULT;
2755             }
2756             __put_user(lg.l_onoff, &tlg->l_onoff);
2757             __put_user(lg.l_linger, &tlg->l_linger);
2758             unlock_user_struct(tlg, optval_addr, 1);
2759             if (put_user_u32(len, optlen)) {
2760                 return -TARGET_EFAULT;
2761             }
2762             break;
2763         }
2764         /* Options with 'int' argument.  */
2765         case TARGET_SO_DEBUG:
2766             optname = SO_DEBUG;
2767             goto int_case;
2768         case TARGET_SO_REUSEADDR:
2769             optname = SO_REUSEADDR;
2770             goto int_case;
2771 #ifdef SO_REUSEPORT
2772         case TARGET_SO_REUSEPORT:
2773             optname = SO_REUSEPORT;
2774             goto int_case;
2775 #endif
2776         case TARGET_SO_TYPE:
2777             optname = SO_TYPE;
2778             goto int_case;
2779         case TARGET_SO_ERROR:
2780             optname = SO_ERROR;
2781             goto int_case;
2782         case TARGET_SO_DONTROUTE:
2783             optname = SO_DONTROUTE;
2784             goto int_case;
2785         case TARGET_SO_BROADCAST:
2786             optname = SO_BROADCAST;
2787             goto int_case;
2788         case TARGET_SO_SNDBUF:
2789             optname = SO_SNDBUF;
2790             goto int_case;
2791         case TARGET_SO_RCVBUF:
2792             optname = SO_RCVBUF;
2793             goto int_case;
2794         case TARGET_SO_KEEPALIVE:
2795             optname = SO_KEEPALIVE;
2796             goto int_case;
2797         case TARGET_SO_OOBINLINE:
2798             optname = SO_OOBINLINE;
2799             goto int_case;
2800         case TARGET_SO_NO_CHECK:
2801             optname = SO_NO_CHECK;
2802             goto int_case;
2803         case TARGET_SO_PRIORITY:
2804             optname = SO_PRIORITY;
2805             goto int_case;
2806 #ifdef SO_BSDCOMPAT
2807         case TARGET_SO_BSDCOMPAT:
2808             optname = SO_BSDCOMPAT;
2809             goto int_case;
2810 #endif
2811         case TARGET_SO_PASSCRED:
2812             optname = SO_PASSCRED;
2813             goto int_case;
2814         case TARGET_SO_TIMESTAMP:
2815             optname = SO_TIMESTAMP;
2816             goto int_case;
2817         case TARGET_SO_RCVLOWAT:
2818             optname = SO_RCVLOWAT;
2819             goto int_case;
2820         case TARGET_SO_ACCEPTCONN:
2821             optname = SO_ACCEPTCONN;
2822             goto int_case;
2823         default:
2824             goto int_case;
2825         }
2826         break;
2827     case SOL_TCP:
2828         /* TCP options all take an 'int' value.  */
2829     int_case:
2830         if (get_user_u32(len, optlen))
2831             return -TARGET_EFAULT;
2832         if (len < 0)
2833             return -TARGET_EINVAL;
2834         lv = sizeof(lv);
2835         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2836         if (ret < 0)
2837             return ret;
2838         if (optname == SO_TYPE) {
2839             val = host_to_target_sock_type(val);
2840         }
2841         if (len > lv)
2842             len = lv;
2843         if (len == 4) {
2844             if (put_user_u32(val, optval_addr))
2845                 return -TARGET_EFAULT;
2846         } else {
2847             if (put_user_u8(val, optval_addr))
2848                 return -TARGET_EFAULT;
2849         }
2850         if (put_user_u32(len, optlen))
2851             return -TARGET_EFAULT;
2852         break;
2853     case SOL_IP:
2854         switch(optname) {
2855         case IP_TOS:
2856         case IP_TTL:
2857         case IP_HDRINCL:
2858         case IP_ROUTER_ALERT:
2859         case IP_RECVOPTS:
2860         case IP_RETOPTS:
2861         case IP_PKTINFO:
2862         case IP_MTU_DISCOVER:
2863         case IP_RECVERR:
2864         case IP_RECVTOS:
2865 #ifdef IP_FREEBIND
2866         case IP_FREEBIND:
2867 #endif
2868         case IP_MULTICAST_TTL:
2869         case IP_MULTICAST_LOOP:
2870             if (get_user_u32(len, optlen))
2871                 return -TARGET_EFAULT;
2872             if (len < 0)
2873                 return -TARGET_EINVAL;
2874             lv = sizeof(lv);
2875             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2876             if (ret < 0)
2877                 return ret;
2878             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2879                 len = 1;
2880                 if (put_user_u32(len, optlen)
2881                     || put_user_u8(val, optval_addr))
2882                     return -TARGET_EFAULT;
2883             } else {
2884                 if (len > sizeof(int))
2885                     len = sizeof(int);
2886                 if (put_user_u32(len, optlen)
2887                     || put_user_u32(val, optval_addr))
2888                     return -TARGET_EFAULT;
2889             }
2890             break;
2891         default:
2892             ret = -TARGET_ENOPROTOOPT;
2893             break;
2894         }
2895         break;
2896     case SOL_IPV6:
2897         switch (optname) {
2898         case IPV6_MTU_DISCOVER:
2899         case IPV6_MTU:
2900         case IPV6_V6ONLY:
2901         case IPV6_RECVPKTINFO:
2902         case IPV6_UNICAST_HOPS:
2903         case IPV6_MULTICAST_HOPS:
2904         case IPV6_MULTICAST_LOOP:
2905         case IPV6_RECVERR:
2906         case IPV6_RECVHOPLIMIT:
2907         case IPV6_2292HOPLIMIT:
2908         case IPV6_CHECKSUM:
2909         case IPV6_ADDRFORM:
2910         case IPV6_2292PKTINFO:
2911         case IPV6_RECVTCLASS:
2912         case IPV6_RECVRTHDR:
2913         case IPV6_2292RTHDR:
2914         case IPV6_RECVHOPOPTS:
2915         case IPV6_2292HOPOPTS:
2916         case IPV6_RECVDSTOPTS:
2917         case IPV6_2292DSTOPTS:
2918         case IPV6_TCLASS:
2919 #ifdef IPV6_RECVPATHMTU
2920         case IPV6_RECVPATHMTU:
2921 #endif
2922 #ifdef IPV6_TRANSPARENT
2923         case IPV6_TRANSPARENT:
2924 #endif
2925 #ifdef IPV6_FREEBIND
2926         case IPV6_FREEBIND:
2927 #endif
2928 #ifdef IPV6_RECVORIGDSTADDR
2929         case IPV6_RECVORIGDSTADDR:
2930 #endif
2931             if (get_user_u32(len, optlen))
2932                 return -TARGET_EFAULT;
2933             if (len < 0)
2934                 return -TARGET_EINVAL;
2935             lv = sizeof(lv);
2936             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2937             if (ret < 0)
2938                 return ret;
2939             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2940                 len = 1;
2941                 if (put_user_u32(len, optlen)
2942                     || put_user_u8(val, optval_addr))
2943                     return -TARGET_EFAULT;
2944             } else {
2945                 if (len > sizeof(int))
2946                     len = sizeof(int);
2947                 if (put_user_u32(len, optlen)
2948                     || put_user_u32(val, optval_addr))
2949                     return -TARGET_EFAULT;
2950             }
2951             break;
2952         default:
2953             ret = -TARGET_ENOPROTOOPT;
2954             break;
2955         }
2956         break;
2957 #ifdef SOL_NETLINK
2958     case SOL_NETLINK:
2959         switch (optname) {
2960         case NETLINK_PKTINFO:
2961         case NETLINK_BROADCAST_ERROR:
2962         case NETLINK_NO_ENOBUFS:
2963 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2964         case NETLINK_LISTEN_ALL_NSID:
2965         case NETLINK_CAP_ACK:
2966 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2967 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2968         case NETLINK_EXT_ACK:
2969 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2970 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2971         case NETLINK_GET_STRICT_CHK:
2972 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2973             if (get_user_u32(len, optlen)) {
2974                 return -TARGET_EFAULT;
2975             }
2976             if (len != sizeof(val)) {
2977                 return -TARGET_EINVAL;
2978             }
2979             lv = len;
2980             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2981             if (ret < 0) {
2982                 return ret;
2983             }
2984             if (put_user_u32(lv, optlen)
2985                 || put_user_u32(val, optval_addr)) {
2986                 return -TARGET_EFAULT;
2987             }
2988             break;
2989 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2990         case NETLINK_LIST_MEMBERSHIPS:
2991         {
2992             uint32_t *results;
2993             int i;
2994             if (get_user_u32(len, optlen)) {
2995                 return -TARGET_EFAULT;
2996             }
2997             if (len < 0) {
2998                 return -TARGET_EINVAL;
2999             }
3000             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
3001             if (!results) {
3002                 return -TARGET_EFAULT;
3003             }
3004             lv = len;
3005             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
3006             if (ret < 0) {
3007                 unlock_user(results, optval_addr, 0);
3008                 return ret;
3009             }
3010             /* swap host endianess to target endianess. */
3011             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
3012                 results[i] = tswap32(results[i]);
3013             }
3014             if (put_user_u32(lv, optlen)) {
3015                 return -TARGET_EFAULT;
3016             }
3017             unlock_user(results, optval_addr, 0);
3018             break;
3019         }
3020 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3021         default:
3022             goto unimplemented;
3023         }
3024         break;
3025 #endif /* SOL_NETLINK */
3026     default:
3027     unimplemented:
3028         qemu_log_mask(LOG_UNIMP,
3029                       "getsockopt level=%d optname=%d not yet supported\n",
3030                       level, optname);
3031         ret = -TARGET_EOPNOTSUPP;
3032         break;
3033     }
3034     return ret;
3035 }
3036 
3037 /* Convert target low/high pair representing file offset into the host
3038  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3039  * as the kernel doesn't handle them either.
3040  */
target_to_host_low_high(abi_ulong tlow,abi_ulong thigh,unsigned long * hlow,unsigned long * hhigh)3041 static void target_to_host_low_high(abi_ulong tlow,
3042                                     abi_ulong thigh,
3043                                     unsigned long *hlow,
3044                                     unsigned long *hhigh)
3045 {
3046     uint64_t off = tlow |
3047         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3048         TARGET_LONG_BITS / 2;
3049 
3050     *hlow = off;
3051     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3052 }
3053 
lock_iovec(int type,abi_ulong target_addr,abi_ulong count,int copy)3054 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3055                                 abi_ulong count, int copy)
3056 {
3057     struct target_iovec *target_vec;
3058     struct iovec *vec;
3059     abi_ulong total_len, max_len;
3060     int i;
3061     int err = 0;
3062     bool bad_address = false;
3063 
3064     if (count == 0) {
3065         errno = 0;
3066         return NULL;
3067     }
3068     if (count > IOV_MAX) {
3069         errno = EINVAL;
3070         return NULL;
3071     }
3072 
3073     vec = g_try_new0(struct iovec, count);
3074     if (vec == NULL) {
3075         errno = ENOMEM;
3076         return NULL;
3077     }
3078 
3079     target_vec = lock_user(VERIFY_READ, target_addr,
3080                            count * sizeof(struct target_iovec), 1);
3081     if (target_vec == NULL) {
3082         err = EFAULT;
3083         goto fail2;
3084     }
3085 
3086     /* ??? If host page size > target page size, this will result in a
3087        value larger than what we can actually support.  */
3088     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3089     total_len = 0;
3090 
3091     for (i = 0; i < count; i++) {
3092         abi_ulong base = tswapal(target_vec[i].iov_base);
3093         abi_long len = tswapal(target_vec[i].iov_len);
3094 
3095         if (len < 0) {
3096             err = EINVAL;
3097             goto fail;
3098         } else if (len == 0) {
3099             /* Zero length pointer is ignored.  */
3100             vec[i].iov_base = 0;
3101         } else {
3102             vec[i].iov_base = lock_user(type, base, len, copy);
3103             /* If the first buffer pointer is bad, this is a fault.  But
3104              * subsequent bad buffers will result in a partial write; this
3105              * is realized by filling the vector with null pointers and
3106              * zero lengths. */
3107             if (!vec[i].iov_base) {
3108                 if (i == 0) {
3109                     err = EFAULT;
3110                     goto fail;
3111                 } else {
3112                     bad_address = true;
3113                 }
3114             }
3115             if (bad_address) {
3116                 len = 0;
3117             }
3118             if (len > max_len - total_len) {
3119                 len = max_len - total_len;
3120             }
3121         }
3122         vec[i].iov_len = len;
3123         total_len += len;
3124     }
3125 
3126     unlock_user(target_vec, target_addr, 0);
3127     return vec;
3128 
3129  fail:
3130     while (--i >= 0) {
3131         if (tswapal(target_vec[i].iov_len) > 0) {
3132             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3133         }
3134     }
3135     unlock_user(target_vec, target_addr, 0);
3136  fail2:
3137     g_free(vec);
3138     errno = err;
3139     return NULL;
3140 }
3141 
unlock_iovec(struct iovec * vec,abi_ulong target_addr,abi_ulong count,int copy)3142 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3143                          abi_ulong count, int copy)
3144 {
3145     struct target_iovec *target_vec;
3146     int i;
3147 
3148     target_vec = lock_user(VERIFY_READ, target_addr,
3149                            count * sizeof(struct target_iovec), 1);
3150     if (target_vec) {
3151         for (i = 0; i < count; i++) {
3152             abi_ulong base = tswapal(target_vec[i].iov_base);
3153             abi_long len = tswapal(target_vec[i].iov_len);
3154             if (len < 0) {
3155                 break;
3156             }
3157             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3158         }
3159         unlock_user(target_vec, target_addr, 0);
3160     }
3161 
3162     g_free(vec);
3163 }
3164 
target_to_host_sock_type(int * type)3165 static inline int target_to_host_sock_type(int *type)
3166 {
3167     int host_type = 0;
3168     int target_type = *type;
3169 
3170     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3171     case TARGET_SOCK_DGRAM:
3172         host_type = SOCK_DGRAM;
3173         break;
3174     case TARGET_SOCK_STREAM:
3175         host_type = SOCK_STREAM;
3176         break;
3177     default:
3178         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3179         break;
3180     }
3181     if (target_type & TARGET_SOCK_CLOEXEC) {
3182 #if defined(SOCK_CLOEXEC)
3183         host_type |= SOCK_CLOEXEC;
3184 #else
3185         return -TARGET_EINVAL;
3186 #endif
3187     }
3188     if (target_type & TARGET_SOCK_NONBLOCK) {
3189 #if defined(SOCK_NONBLOCK)
3190         host_type |= SOCK_NONBLOCK;
3191 #elif !defined(O_NONBLOCK)
3192         return -TARGET_EINVAL;
3193 #endif
3194     }
3195     *type = host_type;
3196     return 0;
3197 }
3198 
3199 /* Try to emulate socket type flags after socket creation.  */
sock_flags_fixup(int fd,int target_type)3200 static int sock_flags_fixup(int fd, int target_type)
3201 {
3202 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3203     if (target_type & TARGET_SOCK_NONBLOCK) {
3204         int flags = fcntl(fd, F_GETFL);
3205         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3206             close(fd);
3207             return -TARGET_EINVAL;
3208         }
3209     }
3210 #endif
3211     return fd;
3212 }
3213 
3214 /* do_socket() Must return target values and target errnos. */
do_socket(int domain,int type,int protocol)3215 static abi_long do_socket(int domain, int type, int protocol)
3216 {
3217     int target_type = type;
3218     int ret;
3219 
3220     ret = target_to_host_sock_type(&type);
3221     if (ret) {
3222         return ret;
3223     }
3224 
3225     if (domain == PF_NETLINK && !(
3226 #ifdef CONFIG_RTNETLINK
3227          protocol == NETLINK_ROUTE ||
3228 #endif
3229          protocol == NETLINK_KOBJECT_UEVENT ||
3230          protocol == NETLINK_AUDIT)) {
3231         return -TARGET_EPROTONOSUPPORT;
3232     }
3233 
3234     if (domain == AF_PACKET ||
3235         (domain == AF_INET && type == SOCK_PACKET)) {
3236         protocol = tswap16(protocol);
3237     }
3238 
3239     ret = get_errno(socket(domain, type, protocol));
3240     if (ret >= 0) {
3241         ret = sock_flags_fixup(ret, target_type);
3242         if (type == SOCK_PACKET) {
3243             /* Manage an obsolete case :
3244              * if socket type is SOCK_PACKET, bind by name
3245              */
3246             fd_trans_register(ret, &target_packet_trans);
3247         } else if (domain == PF_NETLINK) {
3248             switch (protocol) {
3249 #ifdef CONFIG_RTNETLINK
3250             case NETLINK_ROUTE:
3251                 fd_trans_register(ret, &target_netlink_route_trans);
3252                 break;
3253 #endif
3254             case NETLINK_KOBJECT_UEVENT:
3255                 /* nothing to do: messages are strings */
3256                 break;
3257             case NETLINK_AUDIT:
3258                 fd_trans_register(ret, &target_netlink_audit_trans);
3259                 break;
3260             default:
3261                 g_assert_not_reached();
3262             }
3263         }
3264     }
3265     return ret;
3266 }
3267 
3268 /* do_bind() Must return target values and target errnos. */
do_bind(int sockfd,abi_ulong target_addr,socklen_t addrlen)3269 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3270                         socklen_t addrlen)
3271 {
3272     void *addr;
3273     abi_long ret;
3274 
3275     if ((int)addrlen < 0) {
3276         return -TARGET_EINVAL;
3277     }
3278 
3279     addr = alloca(addrlen+1);
3280 
3281     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3282     if (ret)
3283         return ret;
3284 
3285     return get_errno(bind(sockfd, addr, addrlen));
3286 }
3287 
3288 /* do_connect() Must return target values and target errnos. */
do_connect(int sockfd,abi_ulong target_addr,socklen_t addrlen)3289 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3290                            socklen_t addrlen)
3291 {
3292     void *addr;
3293     abi_long ret;
3294 
3295     if ((int)addrlen < 0) {
3296         return -TARGET_EINVAL;
3297     }
3298 
3299     addr = alloca(addrlen+1);
3300 
3301     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3302     if (ret)
3303         return ret;
3304 
3305     return get_errno(safe_connect(sockfd, addr, addrlen));
3306 }
3307 
3308 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
do_sendrecvmsg_locked(int fd,struct target_msghdr * msgp,int flags,int send)3309 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3310                                       int flags, int send)
3311 {
3312     abi_long ret, len;
3313     struct msghdr msg;
3314     abi_ulong count;
3315     struct iovec *vec;
3316     abi_ulong target_vec;
3317 
3318     if (msgp->msg_name) {
3319         msg.msg_namelen = tswap32(msgp->msg_namelen);
3320         msg.msg_name = alloca(msg.msg_namelen+1);
3321         ret = target_to_host_sockaddr(fd, msg.msg_name,
3322                                       tswapal(msgp->msg_name),
3323                                       msg.msg_namelen);
3324         if (ret == -TARGET_EFAULT) {
3325             /* For connected sockets msg_name and msg_namelen must
3326              * be ignored, so returning EFAULT immediately is wrong.
3327              * Instead, pass a bad msg_name to the host kernel, and
3328              * let it decide whether to return EFAULT or not.
3329              */
3330             msg.msg_name = (void *)-1;
3331         } else if (ret) {
3332             goto out2;
3333         }
3334     } else {
3335         msg.msg_name = NULL;
3336         msg.msg_namelen = 0;
3337     }
3338     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3339     msg.msg_control = alloca(msg.msg_controllen);
3340     memset(msg.msg_control, 0, msg.msg_controllen);
3341 
3342     msg.msg_flags = tswap32(msgp->msg_flags);
3343 
3344     count = tswapal(msgp->msg_iovlen);
3345     target_vec = tswapal(msgp->msg_iov);
3346 
3347     if (count > IOV_MAX) {
3348         /* sendrcvmsg returns a different errno for this condition than
3349          * readv/writev, so we must catch it here before lock_iovec() does.
3350          */
3351         ret = -TARGET_EMSGSIZE;
3352         goto out2;
3353     }
3354 
3355     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3356                      target_vec, count, send);
3357     if (vec == NULL) {
3358         ret = -host_to_target_errno(errno);
3359         goto out2;
3360     }
3361     msg.msg_iovlen = count;
3362     msg.msg_iov = vec;
3363 
3364     if (send) {
3365         if (fd_trans_target_to_host_data(fd)) {
3366             void *host_msg;
3367 
3368             host_msg = g_malloc(msg.msg_iov->iov_len);
3369             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3370             ret = fd_trans_target_to_host_data(fd)(host_msg,
3371                                                    msg.msg_iov->iov_len);
3372             if (ret >= 0) {
3373                 msg.msg_iov->iov_base = host_msg;
3374                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3375             }
3376             g_free(host_msg);
3377         } else {
3378             ret = target_to_host_cmsg(&msg, msgp);
3379             if (ret == 0) {
3380                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3381             }
3382         }
3383     } else {
3384         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3385         if (!is_error(ret)) {
3386             len = ret;
3387             if (fd_trans_host_to_target_data(fd)) {
3388                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3389                                                MIN(msg.msg_iov->iov_len, len));
3390             } else {
3391                 ret = host_to_target_cmsg(msgp, &msg);
3392             }
3393             if (!is_error(ret)) {
3394                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3395                 msgp->msg_flags = tswap32(msg.msg_flags);
3396                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3397                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3398                                     msg.msg_name, msg.msg_namelen);
3399                     if (ret) {
3400                         goto out;
3401                     }
3402                 }
3403 
3404                 ret = len;
3405             }
3406         }
3407     }
3408 
3409 out:
3410     unlock_iovec(vec, target_vec, count, !send);
3411 out2:
3412     return ret;
3413 }
3414 
do_sendrecvmsg(int fd,abi_ulong target_msg,int flags,int send)3415 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3416                                int flags, int send)
3417 {
3418     abi_long ret;
3419     struct target_msghdr *msgp;
3420 
3421     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3422                           msgp,
3423                           target_msg,
3424                           send ? 1 : 0)) {
3425         return -TARGET_EFAULT;
3426     }
3427     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3428     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3429     return ret;
3430 }
3431 
3432 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3433  * so it might not have this *mmsg-specific flag either.
3434  */
3435 #ifndef MSG_WAITFORONE
3436 #define MSG_WAITFORONE 0x10000
3437 #endif
3438 
do_sendrecvmmsg(int fd,abi_ulong target_msgvec,unsigned int vlen,unsigned int flags,int send)3439 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3440                                 unsigned int vlen, unsigned int flags,
3441                                 int send)
3442 {
3443     struct target_mmsghdr *mmsgp;
3444     abi_long ret = 0;
3445     int i;
3446 
3447     if (vlen > UIO_MAXIOV) {
3448         vlen = UIO_MAXIOV;
3449     }
3450 
3451     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3452     if (!mmsgp) {
3453         return -TARGET_EFAULT;
3454     }
3455 
3456     for (i = 0; i < vlen; i++) {
3457         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3458         if (is_error(ret)) {
3459             break;
3460         }
3461         mmsgp[i].msg_len = tswap32(ret);
3462         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3463         if (flags & MSG_WAITFORONE) {
3464             flags |= MSG_DONTWAIT;
3465         }
3466     }
3467 
3468     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3469 
3470     /* Return number of datagrams sent if we sent any at all;
3471      * otherwise return the error.
3472      */
3473     if (i) {
3474         return i;
3475     }
3476     return ret;
3477 }
3478 
3479 /* do_accept4() Must return target values and target errnos. */
do_accept4(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr,int flags)3480 static abi_long do_accept4(int fd, abi_ulong target_addr,
3481                            abi_ulong target_addrlen_addr, int flags)
3482 {
3483     socklen_t addrlen, ret_addrlen;
3484     void *addr;
3485     abi_long ret;
3486     int host_flags;
3487 
3488     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3489 
3490     if (target_addr == 0) {
3491         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3492     }
3493 
3494     /* linux returns EFAULT if addrlen pointer is invalid */
3495     if (get_user_u32(addrlen, target_addrlen_addr))
3496         return -TARGET_EFAULT;
3497 
3498     if ((int)addrlen < 0) {
3499         return -TARGET_EINVAL;
3500     }
3501 
3502     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3503         return -TARGET_EFAULT;
3504 
3505     addr = alloca(addrlen);
3506 
3507     ret_addrlen = addrlen;
3508     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3509     if (!is_error(ret)) {
3510         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3511         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3512             ret = -TARGET_EFAULT;
3513         }
3514     }
3515     return ret;
3516 }
3517 
3518 /* do_getpeername() Must return target values and target errnos. */
do_getpeername(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3519 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3520                                abi_ulong target_addrlen_addr)
3521 {
3522     socklen_t addrlen, ret_addrlen;
3523     void *addr;
3524     abi_long ret;
3525 
3526     if (get_user_u32(addrlen, target_addrlen_addr))
3527         return -TARGET_EFAULT;
3528 
3529     if ((int)addrlen < 0) {
3530         return -TARGET_EINVAL;
3531     }
3532 
3533     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3534         return -TARGET_EFAULT;
3535 
3536     addr = alloca(addrlen);
3537 
3538     ret_addrlen = addrlen;
3539     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3540     if (!is_error(ret)) {
3541         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3542         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3543             ret = -TARGET_EFAULT;
3544         }
3545     }
3546     return ret;
3547 }
3548 
3549 /* do_getsockname() Must return target values and target errnos. */
do_getsockname(int fd,abi_ulong target_addr,abi_ulong target_addrlen_addr)3550 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3551                                abi_ulong target_addrlen_addr)
3552 {
3553     socklen_t addrlen, ret_addrlen;
3554     void *addr;
3555     abi_long ret;
3556 
3557     if (get_user_u32(addrlen, target_addrlen_addr))
3558         return -TARGET_EFAULT;
3559 
3560     if ((int)addrlen < 0) {
3561         return -TARGET_EINVAL;
3562     }
3563 
3564     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3565         return -TARGET_EFAULT;
3566 
3567     addr = alloca(addrlen);
3568 
3569     ret_addrlen = addrlen;
3570     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3571     if (!is_error(ret)) {
3572         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3573         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3574             ret = -TARGET_EFAULT;
3575         }
3576     }
3577     return ret;
3578 }
3579 
3580 /* do_socketpair() Must return target values and target errnos. */
do_socketpair(int domain,int type,int protocol,abi_ulong target_tab_addr)3581 static abi_long do_socketpair(int domain, int type, int protocol,
3582                               abi_ulong target_tab_addr)
3583 {
3584     int tab[2];
3585     abi_long ret;
3586 
3587     target_to_host_sock_type(&type);
3588 
3589     ret = get_errno(socketpair(domain, type, protocol, tab));
3590     if (!is_error(ret)) {
3591         if (put_user_s32(tab[0], target_tab_addr)
3592             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3593             ret = -TARGET_EFAULT;
3594     }
3595     return ret;
3596 }
3597 
3598 /* do_sendto() Must return target values and target errnos. */
do_sendto(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,socklen_t addrlen)3599 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3600                           abi_ulong target_addr, socklen_t addrlen)
3601 {
3602     void *addr;
3603     void *host_msg;
3604     void *copy_msg = NULL;
3605     abi_long ret;
3606 
3607     if ((int)addrlen < 0) {
3608         return -TARGET_EINVAL;
3609     }
3610 
3611     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3612     if (!host_msg)
3613         return -TARGET_EFAULT;
3614     if (fd_trans_target_to_host_data(fd)) {
3615         copy_msg = host_msg;
3616         host_msg = g_malloc(len);
3617         memcpy(host_msg, copy_msg, len);
3618         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3619         if (ret < 0) {
3620             goto fail;
3621         }
3622     }
3623     if (target_addr) {
3624         addr = alloca(addrlen+1);
3625         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3626         if (ret) {
3627             goto fail;
3628         }
3629         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3630     } else {
3631         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3632     }
3633 fail:
3634     if (copy_msg) {
3635         g_free(host_msg);
3636         host_msg = copy_msg;
3637     }
3638     unlock_user(host_msg, msg, 0);
3639     return ret;
3640 }
3641 
3642 /* do_recvfrom() Must return target values and target errnos. */
do_recvfrom(int fd,abi_ulong msg,size_t len,int flags,abi_ulong target_addr,abi_ulong target_addrlen)3643 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3644                             abi_ulong target_addr,
3645                             abi_ulong target_addrlen)
3646 {
3647     socklen_t addrlen, ret_addrlen;
3648     void *addr;
3649     void *host_msg;
3650     abi_long ret;
3651 
3652     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3653     if (!host_msg)
3654         return -TARGET_EFAULT;
3655     if (target_addr) {
3656         if (get_user_u32(addrlen, target_addrlen)) {
3657             ret = -TARGET_EFAULT;
3658             goto fail;
3659         }
3660         if ((int)addrlen < 0) {
3661             ret = -TARGET_EINVAL;
3662             goto fail;
3663         }
3664         addr = alloca(addrlen);
3665         ret_addrlen = addrlen;
3666         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3667                                       addr, &ret_addrlen));
3668     } else {
3669         addr = NULL; /* To keep compiler quiet.  */
3670         addrlen = 0; /* To keep compiler quiet.  */
3671         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3672     }
3673     if (!is_error(ret)) {
3674         if (fd_trans_host_to_target_data(fd)) {
3675             abi_long trans;
3676             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3677             if (is_error(trans)) {
3678                 ret = trans;
3679                 goto fail;
3680             }
3681         }
3682         if (target_addr) {
3683             host_to_target_sockaddr(target_addr, addr,
3684                                     MIN(addrlen, ret_addrlen));
3685             if (put_user_u32(ret_addrlen, target_addrlen)) {
3686                 ret = -TARGET_EFAULT;
3687                 goto fail;
3688             }
3689         }
3690         unlock_user(host_msg, msg, len);
3691     } else {
3692 fail:
3693         unlock_user(host_msg, msg, 0);
3694     }
3695     return ret;
3696 }
3697 
3698 #ifdef TARGET_NR_socketcall
3699 /* do_socketcall() must return target values and target errnos. */
do_socketcall(int num,abi_ulong vptr)3700 static abi_long do_socketcall(int num, abi_ulong vptr)
3701 {
3702     static const unsigned nargs[] = { /* number of arguments per operation */
3703         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3704         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3705         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3706         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3707         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3708         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3709         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3710         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3711         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3712         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3713         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3714         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3715         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3716         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3717         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3718         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3719         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3720         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3721         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3722         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3723     };
3724     abi_long a[6]; /* max 6 args */
3725     unsigned i;
3726 
3727     /* check the range of the first argument num */
3728     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3729     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3730         return -TARGET_EINVAL;
3731     }
3732     /* ensure we have space for args */
3733     if (nargs[num] > ARRAY_SIZE(a)) {
3734         return -TARGET_EINVAL;
3735     }
3736     /* collect the arguments in a[] according to nargs[] */
3737     for (i = 0; i < nargs[num]; ++i) {
3738         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3739             return -TARGET_EFAULT;
3740         }
3741     }
3742     /* now when we have the args, invoke the appropriate underlying function */
3743     switch (num) {
3744     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3745         return do_socket(a[0], a[1], a[2]);
3746     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3747         return do_bind(a[0], a[1], a[2]);
3748     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3749         return do_connect(a[0], a[1], a[2]);
3750     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3751         return get_errno(listen(a[0], a[1]));
3752     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3753         return do_accept4(a[0], a[1], a[2], 0);
3754     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3755         return do_getsockname(a[0], a[1], a[2]);
3756     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3757         return do_getpeername(a[0], a[1], a[2]);
3758     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3759         return do_socketpair(a[0], a[1], a[2], a[3]);
3760     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3761         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3762     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3763         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3764     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3765         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3766     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3767         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3768     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3769         return get_errno(shutdown(a[0], a[1]));
3770     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3771         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3772     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3773         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3774     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3775         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3776     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3777         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3778     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3779         return do_accept4(a[0], a[1], a[2], a[3]);
3780     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3781         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3782     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3783         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3784     default:
3785         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3786         return -TARGET_EINVAL;
3787     }
3788 }
3789 #endif
3790 
3791 #define N_SHM_REGIONS	32
3792 
3793 static struct shm_region {
3794     abi_ulong start;
3795     abi_ulong size;
3796     bool in_use;
3797 } shm_regions[N_SHM_REGIONS];
3798 
3799 #ifndef TARGET_SEMID64_DS
3800 /* asm-generic version of this struct */
3801 struct target_semid64_ds
3802 {
3803   struct target_ipc_perm sem_perm;
3804   abi_ulong sem_otime;
3805 #if TARGET_ABI_BITS == 32
3806   abi_ulong __unused1;
3807 #endif
3808   abi_ulong sem_ctime;
3809 #if TARGET_ABI_BITS == 32
3810   abi_ulong __unused2;
3811 #endif
3812   abi_ulong sem_nsems;
3813   abi_ulong __unused3;
3814   abi_ulong __unused4;
3815 };
3816 #endif
3817 
target_to_host_ipc_perm(struct ipc_perm * host_ip,abi_ulong target_addr)3818 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3819                                                abi_ulong target_addr)
3820 {
3821     struct target_ipc_perm *target_ip;
3822     struct target_semid64_ds *target_sd;
3823 
3824     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3825         return -TARGET_EFAULT;
3826     target_ip = &(target_sd->sem_perm);
3827     host_ip->__key = tswap32(target_ip->__key);
3828     host_ip->uid = tswap32(target_ip->uid);
3829     host_ip->gid = tswap32(target_ip->gid);
3830     host_ip->cuid = tswap32(target_ip->cuid);
3831     host_ip->cgid = tswap32(target_ip->cgid);
3832 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3833     host_ip->mode = tswap32(target_ip->mode);
3834 #else
3835     host_ip->mode = tswap16(target_ip->mode);
3836 #endif
3837 #if defined(TARGET_PPC)
3838     host_ip->__seq = tswap32(target_ip->__seq);
3839 #else
3840     host_ip->__seq = tswap16(target_ip->__seq);
3841 #endif
3842     unlock_user_struct(target_sd, target_addr, 0);
3843     return 0;
3844 }
3845 
host_to_target_ipc_perm(abi_ulong target_addr,struct ipc_perm * host_ip)3846 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3847                                                struct ipc_perm *host_ip)
3848 {
3849     struct target_ipc_perm *target_ip;
3850     struct target_semid64_ds *target_sd;
3851 
3852     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3853         return -TARGET_EFAULT;
3854     target_ip = &(target_sd->sem_perm);
3855     target_ip->__key = tswap32(host_ip->__key);
3856     target_ip->uid = tswap32(host_ip->uid);
3857     target_ip->gid = tswap32(host_ip->gid);
3858     target_ip->cuid = tswap32(host_ip->cuid);
3859     target_ip->cgid = tswap32(host_ip->cgid);
3860 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3861     target_ip->mode = tswap32(host_ip->mode);
3862 #else
3863     target_ip->mode = tswap16(host_ip->mode);
3864 #endif
3865 #if defined(TARGET_PPC)
3866     target_ip->__seq = tswap32(host_ip->__seq);
3867 #else
3868     target_ip->__seq = tswap16(host_ip->__seq);
3869 #endif
3870     unlock_user_struct(target_sd, target_addr, 1);
3871     return 0;
3872 }
3873 
target_to_host_semid_ds(struct semid_ds * host_sd,abi_ulong target_addr)3874 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3875                                                abi_ulong target_addr)
3876 {
3877     struct target_semid64_ds *target_sd;
3878 
3879     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3880         return -TARGET_EFAULT;
3881     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3882         return -TARGET_EFAULT;
3883     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3884     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3885     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3886     unlock_user_struct(target_sd, target_addr, 0);
3887     return 0;
3888 }
3889 
host_to_target_semid_ds(abi_ulong target_addr,struct semid_ds * host_sd)3890 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3891                                                struct semid_ds *host_sd)
3892 {
3893     struct target_semid64_ds *target_sd;
3894 
3895     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3896         return -TARGET_EFAULT;
3897     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3898         return -TARGET_EFAULT;
3899     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3900     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3901     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3902     unlock_user_struct(target_sd, target_addr, 1);
3903     return 0;
3904 }
3905 
3906 struct target_seminfo {
3907     int semmap;
3908     int semmni;
3909     int semmns;
3910     int semmnu;
3911     int semmsl;
3912     int semopm;
3913     int semume;
3914     int semusz;
3915     int semvmx;
3916     int semaem;
3917 };
3918 
host_to_target_seminfo(abi_ulong target_addr,struct seminfo * host_seminfo)3919 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3920                                               struct seminfo *host_seminfo)
3921 {
3922     struct target_seminfo *target_seminfo;
3923     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3924         return -TARGET_EFAULT;
3925     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3926     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3927     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3928     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3929     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3930     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3931     __put_user(host_seminfo->semume, &target_seminfo->semume);
3932     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3933     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3934     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3935     unlock_user_struct(target_seminfo, target_addr, 1);
3936     return 0;
3937 }
3938 
3939 union semun {
3940 	int val;
3941 	struct semid_ds *buf;
3942 	unsigned short *array;
3943 	struct seminfo *__buf;
3944 };
3945 
3946 union target_semun {
3947 	int val;
3948 	abi_ulong buf;
3949 	abi_ulong array;
3950 	abi_ulong __buf;
3951 };
3952 
target_to_host_semarray(int semid,unsigned short ** host_array,abi_ulong target_addr)3953 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3954                                                abi_ulong target_addr)
3955 {
3956     int nsems;
3957     unsigned short *array;
3958     union semun semun;
3959     struct semid_ds semid_ds;
3960     int i, ret;
3961 
3962     semun.buf = &semid_ds;
3963 
3964     ret = semctl(semid, 0, IPC_STAT, semun);
3965     if (ret == -1)
3966         return get_errno(ret);
3967 
3968     nsems = semid_ds.sem_nsems;
3969 
3970     *host_array = g_try_new(unsigned short, nsems);
3971     if (!*host_array) {
3972         return -TARGET_ENOMEM;
3973     }
3974     array = lock_user(VERIFY_READ, target_addr,
3975                       nsems*sizeof(unsigned short), 1);
3976     if (!array) {
3977         g_free(*host_array);
3978         return -TARGET_EFAULT;
3979     }
3980 
3981     for(i=0; i<nsems; i++) {
3982         __get_user((*host_array)[i], &array[i]);
3983     }
3984     unlock_user(array, target_addr, 0);
3985 
3986     return 0;
3987 }
3988 
host_to_target_semarray(int semid,abi_ulong target_addr,unsigned short ** host_array)3989 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3990                                                unsigned short **host_array)
3991 {
3992     int nsems;
3993     unsigned short *array;
3994     union semun semun;
3995     struct semid_ds semid_ds;
3996     int i, ret;
3997 
3998     semun.buf = &semid_ds;
3999 
4000     ret = semctl(semid, 0, IPC_STAT, semun);
4001     if (ret == -1)
4002         return get_errno(ret);
4003 
4004     nsems = semid_ds.sem_nsems;
4005 
4006     array = lock_user(VERIFY_WRITE, target_addr,
4007                       nsems*sizeof(unsigned short), 0);
4008     if (!array)
4009         return -TARGET_EFAULT;
4010 
4011     for(i=0; i<nsems; i++) {
4012         __put_user((*host_array)[i], &array[i]);
4013     }
4014     g_free(*host_array);
4015     unlock_user(array, target_addr, 1);
4016 
4017     return 0;
4018 }
4019 
do_semctl(int semid,int semnum,int cmd,abi_ulong target_arg)4020 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4021                                  abi_ulong target_arg)
4022 {
4023     union target_semun target_su = { .buf = target_arg };
4024     union semun arg;
4025     struct semid_ds dsarg;
4026     unsigned short *array = NULL;
4027     struct seminfo seminfo;
4028     abi_long ret = -TARGET_EINVAL;
4029     abi_long err;
4030     cmd &= 0xff;
4031 
4032     switch( cmd ) {
4033 	case GETVAL:
4034 	case SETVAL:
4035             /* In 64 bit cross-endian situations, we will erroneously pick up
4036              * the wrong half of the union for the "val" element.  To rectify
4037              * this, the entire 8-byte structure is byteswapped, followed by
4038 	     * a swap of the 4 byte val field. In other cases, the data is
4039 	     * already in proper host byte order. */
4040 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4041 		target_su.buf = tswapal(target_su.buf);
4042 		arg.val = tswap32(target_su.val);
4043 	    } else {
4044 		arg.val = target_su.val;
4045 	    }
4046             ret = get_errno(semctl(semid, semnum, cmd, arg));
4047             break;
4048 	case GETALL:
4049 	case SETALL:
4050             err = target_to_host_semarray(semid, &array, target_su.array);
4051             if (err)
4052                 return err;
4053             arg.array = array;
4054             ret = get_errno(semctl(semid, semnum, cmd, arg));
4055             err = host_to_target_semarray(semid, target_su.array, &array);
4056             if (err)
4057                 return err;
4058             break;
4059 	case IPC_STAT:
4060 	case IPC_SET:
4061 	case SEM_STAT:
4062             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4063             if (err)
4064                 return err;
4065             arg.buf = &dsarg;
4066             ret = get_errno(semctl(semid, semnum, cmd, arg));
4067             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4068             if (err)
4069                 return err;
4070             break;
4071 	case IPC_INFO:
4072 	case SEM_INFO:
4073             arg.__buf = &seminfo;
4074             ret = get_errno(semctl(semid, semnum, cmd, arg));
4075             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4076             if (err)
4077                 return err;
4078             break;
4079 	case IPC_RMID:
4080 	case GETPID:
4081 	case GETNCNT:
4082 	case GETZCNT:
4083             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4084             break;
4085     }
4086 
4087     return ret;
4088 }
4089 
4090 struct target_sembuf {
4091     unsigned short sem_num;
4092     short sem_op;
4093     short sem_flg;
4094 };
4095 
target_to_host_sembuf(struct sembuf * host_sembuf,abi_ulong target_addr,unsigned nsops)4096 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4097                                              abi_ulong target_addr,
4098                                              unsigned nsops)
4099 {
4100     struct target_sembuf *target_sembuf;
4101     int i;
4102 
4103     target_sembuf = lock_user(VERIFY_READ, target_addr,
4104                               nsops*sizeof(struct target_sembuf), 1);
4105     if (!target_sembuf)
4106         return -TARGET_EFAULT;
4107 
4108     for(i=0; i<nsops; i++) {
4109         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4110         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4111         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4112     }
4113 
4114     unlock_user(target_sembuf, target_addr, 0);
4115 
4116     return 0;
4117 }
4118 
4119 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4120     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4121 
4122 /*
4123  * This macro is required to handle the s390 variants, which passes the
4124  * arguments in a different order than default.
4125  */
4126 #ifdef __s390x__
4127 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4128   (__nsops), (__timeout), (__sops)
4129 #else
4130 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4131   (__nsops), 0, (__sops), (__timeout)
4132 #endif
4133 
do_semtimedop(int semid,abi_long ptr,unsigned nsops,abi_long timeout,bool time64)4134 static inline abi_long do_semtimedop(int semid,
4135                                      abi_long ptr,
4136                                      unsigned nsops,
4137                                      abi_long timeout, bool time64)
4138 {
4139     struct sembuf *sops;
4140     struct timespec ts, *pts = NULL;
4141     abi_long ret;
4142 
4143     if (timeout) {
4144         pts = &ts;
4145         if (time64) {
4146             if (target_to_host_timespec64(pts, timeout)) {
4147                 return -TARGET_EFAULT;
4148             }
4149         } else {
4150             if (target_to_host_timespec(pts, timeout)) {
4151                 return -TARGET_EFAULT;
4152             }
4153         }
4154     }
4155 
4156     if (nsops > TARGET_SEMOPM) {
4157         return -TARGET_E2BIG;
4158     }
4159 
4160     sops = g_new(struct sembuf, nsops);
4161 
4162     if (target_to_host_sembuf(sops, ptr, nsops)) {
4163         g_free(sops);
4164         return -TARGET_EFAULT;
4165     }
4166 
4167     ret = -TARGET_ENOSYS;
4168 #ifdef __NR_semtimedop
4169     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4170 #endif
4171 #ifdef __NR_ipc
4172     if (ret == -TARGET_ENOSYS) {
4173         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4174                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4175     }
4176 #endif
4177     g_free(sops);
4178     return ret;
4179 }
4180 #endif
4181 
4182 struct target_msqid_ds
4183 {
4184     struct target_ipc_perm msg_perm;
4185     abi_ulong msg_stime;
4186 #if TARGET_ABI_BITS == 32
4187     abi_ulong __unused1;
4188 #endif
4189     abi_ulong msg_rtime;
4190 #if TARGET_ABI_BITS == 32
4191     abi_ulong __unused2;
4192 #endif
4193     abi_ulong msg_ctime;
4194 #if TARGET_ABI_BITS == 32
4195     abi_ulong __unused3;
4196 #endif
4197     abi_ulong __msg_cbytes;
4198     abi_ulong msg_qnum;
4199     abi_ulong msg_qbytes;
4200     abi_ulong msg_lspid;
4201     abi_ulong msg_lrpid;
4202     abi_ulong __unused4;
4203     abi_ulong __unused5;
4204 };
4205 
target_to_host_msqid_ds(struct msqid_ds * host_md,abi_ulong target_addr)4206 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4207                                                abi_ulong target_addr)
4208 {
4209     struct target_msqid_ds *target_md;
4210 
4211     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4212         return -TARGET_EFAULT;
4213     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4214         return -TARGET_EFAULT;
4215     host_md->msg_stime = tswapal(target_md->msg_stime);
4216     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4217     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4218     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4219     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4220     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4221     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4222     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4223     unlock_user_struct(target_md, target_addr, 0);
4224     return 0;
4225 }
4226 
host_to_target_msqid_ds(abi_ulong target_addr,struct msqid_ds * host_md)4227 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4228                                                struct msqid_ds *host_md)
4229 {
4230     struct target_msqid_ds *target_md;
4231 
4232     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4233         return -TARGET_EFAULT;
4234     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4235         return -TARGET_EFAULT;
4236     target_md->msg_stime = tswapal(host_md->msg_stime);
4237     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4238     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4239     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4240     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4241     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4242     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4243     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4244     unlock_user_struct(target_md, target_addr, 1);
4245     return 0;
4246 }
4247 
4248 struct target_msginfo {
4249     int msgpool;
4250     int msgmap;
4251     int msgmax;
4252     int msgmnb;
4253     int msgmni;
4254     int msgssz;
4255     int msgtql;
4256     unsigned short int msgseg;
4257 };
4258 
host_to_target_msginfo(abi_ulong target_addr,struct msginfo * host_msginfo)4259 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4260                                               struct msginfo *host_msginfo)
4261 {
4262     struct target_msginfo *target_msginfo;
4263     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4264         return -TARGET_EFAULT;
4265     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4266     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4267     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4268     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4269     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4270     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4271     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4272     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4273     unlock_user_struct(target_msginfo, target_addr, 1);
4274     return 0;
4275 }
4276 
do_msgctl(int msgid,int cmd,abi_long ptr)4277 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4278 {
4279     struct msqid_ds dsarg;
4280     struct msginfo msginfo;
4281     abi_long ret = -TARGET_EINVAL;
4282 
4283     cmd &= 0xff;
4284 
4285     switch (cmd) {
4286     case IPC_STAT:
4287     case IPC_SET:
4288     case MSG_STAT:
4289         if (target_to_host_msqid_ds(&dsarg,ptr))
4290             return -TARGET_EFAULT;
4291         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4292         if (host_to_target_msqid_ds(ptr,&dsarg))
4293             return -TARGET_EFAULT;
4294         break;
4295     case IPC_RMID:
4296         ret = get_errno(msgctl(msgid, cmd, NULL));
4297         break;
4298     case IPC_INFO:
4299     case MSG_INFO:
4300         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4301         if (host_to_target_msginfo(ptr, &msginfo))
4302             return -TARGET_EFAULT;
4303         break;
4304     }
4305 
4306     return ret;
4307 }
4308 
4309 struct target_msgbuf {
4310     abi_long mtype;
4311     char	mtext[1];
4312 };
4313 
do_msgsnd(int msqid,abi_long msgp,ssize_t msgsz,int msgflg)4314 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4315                                  ssize_t msgsz, int msgflg)
4316 {
4317     struct target_msgbuf *target_mb;
4318     struct msgbuf *host_mb;
4319     abi_long ret = 0;
4320 
4321     if (msgsz < 0) {
4322         return -TARGET_EINVAL;
4323     }
4324 
4325     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4326         return -TARGET_EFAULT;
4327     host_mb = g_try_malloc(msgsz + sizeof(long));
4328     if (!host_mb) {
4329         unlock_user_struct(target_mb, msgp, 0);
4330         return -TARGET_ENOMEM;
4331     }
4332     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4333     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4334     ret = -TARGET_ENOSYS;
4335 #ifdef __NR_msgsnd
4336     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4337 #endif
4338 #ifdef __NR_ipc
4339     if (ret == -TARGET_ENOSYS) {
4340 #ifdef __s390x__
4341         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4342                                  host_mb));
4343 #else
4344         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4345                                  host_mb, 0));
4346 #endif
4347     }
4348 #endif
4349     g_free(host_mb);
4350     unlock_user_struct(target_mb, msgp, 0);
4351 
4352     return ret;
4353 }
4354 
4355 #ifdef __NR_ipc
4356 #if defined(__sparc__)
4357 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4358 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4359 #elif defined(__s390x__)
4360 /* The s390 sys_ipc variant has only five parameters.  */
4361 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4362     ((long int[]){(long int)__msgp, __msgtyp})
4363 #else
4364 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4365     ((long int[]){(long int)__msgp, __msgtyp}), 0
4366 #endif
4367 #endif
4368 
do_msgrcv(int msqid,abi_long msgp,ssize_t msgsz,abi_long msgtyp,int msgflg)4369 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4370                                  ssize_t msgsz, abi_long msgtyp,
4371                                  int msgflg)
4372 {
4373     struct target_msgbuf *target_mb;
4374     char *target_mtext;
4375     struct msgbuf *host_mb;
4376     abi_long ret = 0;
4377 
4378     if (msgsz < 0) {
4379         return -TARGET_EINVAL;
4380     }
4381 
4382     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4383         return -TARGET_EFAULT;
4384 
4385     host_mb = g_try_malloc(msgsz + sizeof(long));
4386     if (!host_mb) {
4387         ret = -TARGET_ENOMEM;
4388         goto end;
4389     }
4390     ret = -TARGET_ENOSYS;
4391 #ifdef __NR_msgrcv
4392     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4393 #endif
4394 #ifdef __NR_ipc
4395     if (ret == -TARGET_ENOSYS) {
4396         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4397                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4398     }
4399 #endif
4400 
4401     if (ret > 0) {
4402         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4403         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4404         if (!target_mtext) {
4405             ret = -TARGET_EFAULT;
4406             goto end;
4407         }
4408         memcpy(target_mb->mtext, host_mb->mtext, ret);
4409         unlock_user(target_mtext, target_mtext_addr, ret);
4410     }
4411 
4412     target_mb->mtype = tswapal(host_mb->mtype);
4413 
4414 end:
4415     if (target_mb)
4416         unlock_user_struct(target_mb, msgp, 1);
4417     g_free(host_mb);
4418     return ret;
4419 }
4420 
target_to_host_shmid_ds(struct shmid_ds * host_sd,abi_ulong target_addr)4421 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4422                                                abi_ulong target_addr)
4423 {
4424     struct target_shmid_ds *target_sd;
4425 
4426     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4427         return -TARGET_EFAULT;
4428     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4429         return -TARGET_EFAULT;
4430     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4431     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4432     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4433     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4434     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4435     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4436     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4437     unlock_user_struct(target_sd, target_addr, 0);
4438     return 0;
4439 }
4440 
host_to_target_shmid_ds(abi_ulong target_addr,struct shmid_ds * host_sd)4441 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4442                                                struct shmid_ds *host_sd)
4443 {
4444     struct target_shmid_ds *target_sd;
4445 
4446     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4447         return -TARGET_EFAULT;
4448     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4449         return -TARGET_EFAULT;
4450     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4451     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4452     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4453     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4454     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4455     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4456     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4457     unlock_user_struct(target_sd, target_addr, 1);
4458     return 0;
4459 }
4460 
4461 struct  target_shminfo {
4462     abi_ulong shmmax;
4463     abi_ulong shmmin;
4464     abi_ulong shmmni;
4465     abi_ulong shmseg;
4466     abi_ulong shmall;
4467 };
4468 
host_to_target_shminfo(abi_ulong target_addr,struct shminfo * host_shminfo)4469 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4470                                               struct shminfo *host_shminfo)
4471 {
4472     struct target_shminfo *target_shminfo;
4473     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4474         return -TARGET_EFAULT;
4475     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4476     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4477     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4478     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4479     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4480     unlock_user_struct(target_shminfo, target_addr, 1);
4481     return 0;
4482 }
4483 
4484 struct target_shm_info {
4485     int used_ids;
4486     abi_ulong shm_tot;
4487     abi_ulong shm_rss;
4488     abi_ulong shm_swp;
4489     abi_ulong swap_attempts;
4490     abi_ulong swap_successes;
4491 };
4492 
host_to_target_shm_info(abi_ulong target_addr,struct shm_info * host_shm_info)4493 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4494                                                struct shm_info *host_shm_info)
4495 {
4496     struct target_shm_info *target_shm_info;
4497     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4498         return -TARGET_EFAULT;
4499     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4500     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4501     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4502     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4503     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4504     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4505     unlock_user_struct(target_shm_info, target_addr, 1);
4506     return 0;
4507 }
4508 
do_shmctl(int shmid,int cmd,abi_long buf)4509 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4510 {
4511     struct shmid_ds dsarg;
4512     struct shminfo shminfo;
4513     struct shm_info shm_info;
4514     abi_long ret = -TARGET_EINVAL;
4515 
4516     cmd &= 0xff;
4517 
4518     switch(cmd) {
4519     case IPC_STAT:
4520     case IPC_SET:
4521     case SHM_STAT:
4522         if (target_to_host_shmid_ds(&dsarg, buf))
4523             return -TARGET_EFAULT;
4524         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4525         if (host_to_target_shmid_ds(buf, &dsarg))
4526             return -TARGET_EFAULT;
4527         break;
4528     case IPC_INFO:
4529         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4530         if (host_to_target_shminfo(buf, &shminfo))
4531             return -TARGET_EFAULT;
4532         break;
4533     case SHM_INFO:
4534         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4535         if (host_to_target_shm_info(buf, &shm_info))
4536             return -TARGET_EFAULT;
4537         break;
4538     case IPC_RMID:
4539     case SHM_LOCK:
4540     case SHM_UNLOCK:
4541         ret = get_errno(shmctl(shmid, cmd, NULL));
4542         break;
4543     }
4544 
4545     return ret;
4546 }
4547 
4548 #ifndef TARGET_FORCE_SHMLBA
4549 /* For most architectures, SHMLBA is the same as the page size;
4550  * some architectures have larger values, in which case they should
4551  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4552  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4553  * and defining its own value for SHMLBA.
4554  *
4555  * The kernel also permits SHMLBA to be set by the architecture to a
4556  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4557  * this means that addresses are rounded to the large size if
4558  * SHM_RND is set but addresses not aligned to that size are not rejected
4559  * as long as they are at least page-aligned. Since the only architecture
4560  * which uses this is ia64 this code doesn't provide for that oddity.
4561  */
target_shmlba(CPUArchState * cpu_env)4562 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4563 {
4564     return TARGET_PAGE_SIZE;
4565 }
4566 #endif
4567 
do_shmat(CPUArchState * cpu_env,int shmid,abi_ulong shmaddr,int shmflg)4568 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4569                                  int shmid, abi_ulong shmaddr, int shmflg)
4570 {
4571     abi_long raddr;
4572     void *host_raddr;
4573     struct shmid_ds shm_info;
4574     int i,ret;
4575     abi_ulong shmlba;
4576 
4577     /* find out the length of the shared memory segment */
4578     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4579     if (is_error(ret)) {
4580         /* can't get length, bail out */
4581         return ret;
4582     }
4583 
4584     shmlba = target_shmlba(cpu_env);
4585 
4586     if (shmaddr & (shmlba - 1)) {
4587         if (shmflg & SHM_RND) {
4588             shmaddr &= ~(shmlba - 1);
4589         } else {
4590             return -TARGET_EINVAL;
4591         }
4592     }
4593     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4594         return -TARGET_EINVAL;
4595     }
4596 
4597     mmap_lock();
4598 
4599     if (shmaddr)
4600         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4601     else {
4602         abi_ulong mmap_start;
4603 
4604         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4605         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4606 
4607         if (mmap_start == -1) {
4608             errno = ENOMEM;
4609             host_raddr = (void *)-1;
4610         } else
4611             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4612     }
4613 
4614     if (host_raddr == (void *)-1) {
4615         mmap_unlock();
4616         return get_errno((long)host_raddr);
4617     }
4618     raddr=h2g((unsigned long)host_raddr);
4619 
4620     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4621                    PAGE_VALID | PAGE_READ |
4622                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4623 
4624     for (i = 0; i < N_SHM_REGIONS; i++) {
4625         if (!shm_regions[i].in_use) {
4626             shm_regions[i].in_use = true;
4627             shm_regions[i].start = raddr;
4628             shm_regions[i].size = shm_info.shm_segsz;
4629             break;
4630         }
4631     }
4632 
4633     mmap_unlock();
4634     return raddr;
4635 
4636 }
4637 
do_shmdt(abi_ulong shmaddr)4638 static inline abi_long do_shmdt(abi_ulong shmaddr)
4639 {
4640     int i;
4641     abi_long rv;
4642 
4643     mmap_lock();
4644 
4645     for (i = 0; i < N_SHM_REGIONS; ++i) {
4646         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4647             shm_regions[i].in_use = false;
4648             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4649             break;
4650         }
4651     }
4652     rv = get_errno(shmdt(g2h(shmaddr)));
4653 
4654     mmap_unlock();
4655 
4656     return rv;
4657 }
4658 
4659 #ifdef TARGET_NR_ipc
4660 /* ??? This only works with linear mappings.  */
4661 /* do_ipc() must return target values and target errnos. */
do_ipc(CPUArchState * cpu_env,unsigned int call,abi_long first,abi_long second,abi_long third,abi_long ptr,abi_long fifth)4662 static abi_long do_ipc(CPUArchState *cpu_env,
4663                        unsigned int call, abi_long first,
4664                        abi_long second, abi_long third,
4665                        abi_long ptr, abi_long fifth)
4666 {
4667     int version;
4668     abi_long ret = 0;
4669 
4670     version = call >> 16;
4671     call &= 0xffff;
4672 
4673     switch (call) {
4674     case IPCOP_semop:
4675         ret = do_semtimedop(first, ptr, second, 0, false);
4676         break;
4677     case IPCOP_semtimedop:
4678     /*
4679      * The s390 sys_ipc variant has only five parameters instead of six
4680      * (as for default variant) and the only difference is the handling of
4681      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4682      * to a struct timespec where the generic variant uses fifth parameter.
4683      */
4684 #if defined(TARGET_S390X)
4685         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4686 #else
4687         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4688 #endif
4689         break;
4690 
4691     case IPCOP_semget:
4692         ret = get_errno(semget(first, second, third));
4693         break;
4694 
4695     case IPCOP_semctl: {
4696         /* The semun argument to semctl is passed by value, so dereference the
4697          * ptr argument. */
4698         abi_ulong atptr;
4699         get_user_ual(atptr, ptr);
4700         ret = do_semctl(first, second, third, atptr);
4701         break;
4702     }
4703 
4704     case IPCOP_msgget:
4705         ret = get_errno(msgget(first, second));
4706         break;
4707 
4708     case IPCOP_msgsnd:
4709         ret = do_msgsnd(first, ptr, second, third);
4710         break;
4711 
4712     case IPCOP_msgctl:
4713         ret = do_msgctl(first, second, ptr);
4714         break;
4715 
4716     case IPCOP_msgrcv:
4717         switch (version) {
4718         case 0:
4719             {
4720                 struct target_ipc_kludge {
4721                     abi_long msgp;
4722                     abi_long msgtyp;
4723                 } *tmp;
4724 
4725                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4726                     ret = -TARGET_EFAULT;
4727                     break;
4728                 }
4729 
4730                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4731 
4732                 unlock_user_struct(tmp, ptr, 0);
4733                 break;
4734             }
4735         default:
4736             ret = do_msgrcv(first, ptr, second, fifth, third);
4737         }
4738         break;
4739 
4740     case IPCOP_shmat:
4741         switch (version) {
4742         default:
4743         {
4744             abi_ulong raddr;
4745             raddr = do_shmat(cpu_env, first, ptr, second);
4746             if (is_error(raddr))
4747                 return get_errno(raddr);
4748             if (put_user_ual(raddr, third))
4749                 return -TARGET_EFAULT;
4750             break;
4751         }
4752         case 1:
4753             ret = -TARGET_EINVAL;
4754             break;
4755         }
4756 	break;
4757     case IPCOP_shmdt:
4758         ret = do_shmdt(ptr);
4759 	break;
4760 
4761     case IPCOP_shmget:
4762 	/* IPC_* flag values are the same on all linux platforms */
4763 	ret = get_errno(shmget(first, second, third));
4764 	break;
4765 
4766 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4767     case IPCOP_shmctl:
4768         ret = do_shmctl(first, second, ptr);
4769         break;
4770     default:
4771         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4772                       call, version);
4773 	ret = -TARGET_ENOSYS;
4774 	break;
4775     }
4776     return ret;
4777 }
4778 #endif
4779 
4780 /* kernel structure types definitions */
4781 
4782 #define STRUCT(name, ...) STRUCT_ ## name,
4783 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4784 enum {
4785 #include "syscall_types.h"
4786 STRUCT_MAX
4787 };
4788 #undef STRUCT
4789 #undef STRUCT_SPECIAL
4790 
4791 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4792 #define STRUCT_SPECIAL(name)
4793 #include "syscall_types.h"
4794 #undef STRUCT
4795 #undef STRUCT_SPECIAL
4796 
4797 #define MAX_STRUCT_SIZE 4096
4798 
4799 #ifdef CONFIG_FIEMAP
4800 /* So fiemap access checks don't overflow on 32 bit systems.
4801  * This is very slightly smaller than the limit imposed by
4802  * the underlying kernel.
4803  */
4804 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4805                             / sizeof(struct fiemap_extent))
4806 
do_ioctl_fs_ioc_fiemap(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4807 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4808                                        int fd, int cmd, abi_long arg)
4809 {
4810     /* The parameter for this ioctl is a struct fiemap followed
4811      * by an array of struct fiemap_extent whose size is set
4812      * in fiemap->fm_extent_count. The array is filled in by the
4813      * ioctl.
4814      */
4815     int target_size_in, target_size_out;
4816     struct fiemap *fm;
4817     const argtype *arg_type = ie->arg_type;
4818     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4819     void *argptr, *p;
4820     abi_long ret;
4821     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4822     uint32_t outbufsz;
4823     int free_fm = 0;
4824 
4825     assert(arg_type[0] == TYPE_PTR);
4826     assert(ie->access == IOC_RW);
4827     arg_type++;
4828     target_size_in = thunk_type_size(arg_type, 0);
4829     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4830     if (!argptr) {
4831         return -TARGET_EFAULT;
4832     }
4833     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4834     unlock_user(argptr, arg, 0);
4835     fm = (struct fiemap *)buf_temp;
4836     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4837         return -TARGET_EINVAL;
4838     }
4839 
4840     outbufsz = sizeof (*fm) +
4841         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4842 
4843     if (outbufsz > MAX_STRUCT_SIZE) {
4844         /* We can't fit all the extents into the fixed size buffer.
4845          * Allocate one that is large enough and use it instead.
4846          */
4847         fm = g_try_malloc(outbufsz);
4848         if (!fm) {
4849             return -TARGET_ENOMEM;
4850         }
4851         memcpy(fm, buf_temp, sizeof(struct fiemap));
4852         free_fm = 1;
4853     }
4854     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4855     if (!is_error(ret)) {
4856         target_size_out = target_size_in;
4857         /* An extent_count of 0 means we were only counting the extents
4858          * so there are no structs to copy
4859          */
4860         if (fm->fm_extent_count != 0) {
4861             target_size_out += fm->fm_mapped_extents * extent_size;
4862         }
4863         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4864         if (!argptr) {
4865             ret = -TARGET_EFAULT;
4866         } else {
4867             /* Convert the struct fiemap */
4868             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4869             if (fm->fm_extent_count != 0) {
4870                 p = argptr + target_size_in;
4871                 /* ...and then all the struct fiemap_extents */
4872                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4873                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4874                                   THUNK_TARGET);
4875                     p += extent_size;
4876                 }
4877             }
4878             unlock_user(argptr, arg, target_size_out);
4879         }
4880     }
4881     if (free_fm) {
4882         g_free(fm);
4883     }
4884     return ret;
4885 }
4886 #endif
4887 
do_ioctl_ifconf(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)4888 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4889                                 int fd, int cmd, abi_long arg)
4890 {
4891     const argtype *arg_type = ie->arg_type;
4892     int target_size;
4893     void *argptr;
4894     int ret;
4895     struct ifconf *host_ifconf;
4896     uint32_t outbufsz;
4897     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4898     int target_ifreq_size;
4899     int nb_ifreq;
4900     int free_buf = 0;
4901     int i;
4902     int target_ifc_len;
4903     abi_long target_ifc_buf;
4904     int host_ifc_len;
4905     char *host_ifc_buf;
4906 
4907     assert(arg_type[0] == TYPE_PTR);
4908     assert(ie->access == IOC_RW);
4909 
4910     arg_type++;
4911     target_size = thunk_type_size(arg_type, 0);
4912 
4913     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4914     if (!argptr)
4915         return -TARGET_EFAULT;
4916     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4917     unlock_user(argptr, arg, 0);
4918 
4919     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4920     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4921     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4922 
4923     if (target_ifc_buf != 0) {
4924         target_ifc_len = host_ifconf->ifc_len;
4925         nb_ifreq = target_ifc_len / target_ifreq_size;
4926         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4927 
4928         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4929         if (outbufsz > MAX_STRUCT_SIZE) {
4930             /*
4931              * We can't fit all the extents into the fixed size buffer.
4932              * Allocate one that is large enough and use it instead.
4933              */
4934             host_ifconf = malloc(outbufsz);
4935             if (!host_ifconf) {
4936                 return -TARGET_ENOMEM;
4937             }
4938             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4939             free_buf = 1;
4940         }
4941         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4942 
4943         host_ifconf->ifc_len = host_ifc_len;
4944     } else {
4945       host_ifc_buf = NULL;
4946     }
4947     host_ifconf->ifc_buf = host_ifc_buf;
4948 
4949     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4950     if (!is_error(ret)) {
4951 	/* convert host ifc_len to target ifc_len */
4952 
4953         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4954         target_ifc_len = nb_ifreq * target_ifreq_size;
4955         host_ifconf->ifc_len = target_ifc_len;
4956 
4957 	/* restore target ifc_buf */
4958 
4959         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4960 
4961 	/* copy struct ifconf to target user */
4962 
4963         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4964         if (!argptr)
4965             return -TARGET_EFAULT;
4966         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4967         unlock_user(argptr, arg, target_size);
4968 
4969         if (target_ifc_buf != 0) {
4970             /* copy ifreq[] to target user */
4971             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4972             for (i = 0; i < nb_ifreq ; i++) {
4973                 thunk_convert(argptr + i * target_ifreq_size,
4974                               host_ifc_buf + i * sizeof(struct ifreq),
4975                               ifreq_arg_type, THUNK_TARGET);
4976             }
4977             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4978         }
4979     }
4980 
4981     if (free_buf) {
4982         free(host_ifconf);
4983     }
4984 
4985     return ret;
4986 }
4987 
4988 #if defined(CONFIG_USBFS)
4989 #if HOST_LONG_BITS > 64
4990 #error USBDEVFS thunks do not support >64 bit hosts yet.
4991 #endif
4992 struct live_urb {
4993     uint64_t target_urb_adr;
4994     uint64_t target_buf_adr;
4995     char *target_buf_ptr;
4996     struct usbdevfs_urb host_urb;
4997 };
4998 
usbdevfs_urb_hashtable(void)4999 static GHashTable *usbdevfs_urb_hashtable(void)
5000 {
5001     static GHashTable *urb_hashtable;
5002 
5003     if (!urb_hashtable) {
5004         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
5005     }
5006     return urb_hashtable;
5007 }
5008 
urb_hashtable_insert(struct live_urb * urb)5009 static void urb_hashtable_insert(struct live_urb *urb)
5010 {
5011     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5012     g_hash_table_insert(urb_hashtable, urb, urb);
5013 }
5014 
urb_hashtable_lookup(uint64_t target_urb_adr)5015 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5016 {
5017     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5018     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5019 }
5020 
urb_hashtable_remove(struct live_urb * urb)5021 static void urb_hashtable_remove(struct live_urb *urb)
5022 {
5023     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5024     g_hash_table_remove(urb_hashtable, urb);
5025 }
5026 
5027 static abi_long
do_ioctl_usbdevfs_reapurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5028 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5029                           int fd, int cmd, abi_long arg)
5030 {
5031     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5032     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5033     struct live_urb *lurb;
5034     void *argptr;
5035     uint64_t hurb;
5036     int target_size;
5037     uintptr_t target_urb_adr;
5038     abi_long ret;
5039 
5040     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5041 
5042     memset(buf_temp, 0, sizeof(uint64_t));
5043     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5044     if (is_error(ret)) {
5045         return ret;
5046     }
5047 
5048     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5049     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5050     if (!lurb->target_urb_adr) {
5051         return -TARGET_EFAULT;
5052     }
5053     urb_hashtable_remove(lurb);
5054     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5055         lurb->host_urb.buffer_length);
5056     lurb->target_buf_ptr = NULL;
5057 
5058     /* restore the guest buffer pointer */
5059     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5060 
5061     /* update the guest urb struct */
5062     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5063     if (!argptr) {
5064         g_free(lurb);
5065         return -TARGET_EFAULT;
5066     }
5067     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5068     unlock_user(argptr, lurb->target_urb_adr, target_size);
5069 
5070     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5071     /* write back the urb handle */
5072     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5073     if (!argptr) {
5074         g_free(lurb);
5075         return -TARGET_EFAULT;
5076     }
5077 
5078     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5079     target_urb_adr = lurb->target_urb_adr;
5080     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5081     unlock_user(argptr, arg, target_size);
5082 
5083     g_free(lurb);
5084     return ret;
5085 }
5086 
5087 static abi_long
do_ioctl_usbdevfs_discardurb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5088 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5089                              uint8_t *buf_temp __attribute__((unused)),
5090                              int fd, int cmd, abi_long arg)
5091 {
5092     struct live_urb *lurb;
5093 
5094     /* map target address back to host URB with metadata. */
5095     lurb = urb_hashtable_lookup(arg);
5096     if (!lurb) {
5097         return -TARGET_EFAULT;
5098     }
5099     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5100 }
5101 
5102 static abi_long
do_ioctl_usbdevfs_submiturb(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5103 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5104                             int fd, int cmd, abi_long arg)
5105 {
5106     const argtype *arg_type = ie->arg_type;
5107     int target_size;
5108     abi_long ret;
5109     void *argptr;
5110     int rw_dir;
5111     struct live_urb *lurb;
5112 
5113     /*
5114      * each submitted URB needs to map to a unique ID for the
5115      * kernel, and that unique ID needs to be a pointer to
5116      * host memory.  hence, we need to malloc for each URB.
5117      * isochronous transfers have a variable length struct.
5118      */
5119     arg_type++;
5120     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5121 
5122     /* construct host copy of urb and metadata */
5123     lurb = g_try_malloc0(sizeof(struct live_urb));
5124     if (!lurb) {
5125         return -TARGET_ENOMEM;
5126     }
5127 
5128     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5129     if (!argptr) {
5130         g_free(lurb);
5131         return -TARGET_EFAULT;
5132     }
5133     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5134     unlock_user(argptr, arg, 0);
5135 
5136     lurb->target_urb_adr = arg;
5137     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5138 
5139     /* buffer space used depends on endpoint type so lock the entire buffer */
5140     /* control type urbs should check the buffer contents for true direction */
5141     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5142     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5143         lurb->host_urb.buffer_length, 1);
5144     if (lurb->target_buf_ptr == NULL) {
5145         g_free(lurb);
5146         return -TARGET_EFAULT;
5147     }
5148 
5149     /* update buffer pointer in host copy */
5150     lurb->host_urb.buffer = lurb->target_buf_ptr;
5151 
5152     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5153     if (is_error(ret)) {
5154         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5155         g_free(lurb);
5156     } else {
5157         urb_hashtable_insert(lurb);
5158     }
5159 
5160     return ret;
5161 }
5162 #endif /* CONFIG_USBFS */
5163 
do_ioctl_dm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5164 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5165                             int cmd, abi_long arg)
5166 {
5167     void *argptr;
5168     struct dm_ioctl *host_dm;
5169     abi_long guest_data;
5170     uint32_t guest_data_size;
5171     int target_size;
5172     const argtype *arg_type = ie->arg_type;
5173     abi_long ret;
5174     void *big_buf = NULL;
5175     char *host_data;
5176 
5177     arg_type++;
5178     target_size = thunk_type_size(arg_type, 0);
5179     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5180     if (!argptr) {
5181         ret = -TARGET_EFAULT;
5182         goto out;
5183     }
5184     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5185     unlock_user(argptr, arg, 0);
5186 
5187     /* buf_temp is too small, so fetch things into a bigger buffer */
5188     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5189     memcpy(big_buf, buf_temp, target_size);
5190     buf_temp = big_buf;
5191     host_dm = big_buf;
5192 
5193     guest_data = arg + host_dm->data_start;
5194     if ((guest_data - arg) < 0) {
5195         ret = -TARGET_EINVAL;
5196         goto out;
5197     }
5198     guest_data_size = host_dm->data_size - host_dm->data_start;
5199     host_data = (char*)host_dm + host_dm->data_start;
5200 
5201     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5202     if (!argptr) {
5203         ret = -TARGET_EFAULT;
5204         goto out;
5205     }
5206 
5207     switch (ie->host_cmd) {
5208     case DM_REMOVE_ALL:
5209     case DM_LIST_DEVICES:
5210     case DM_DEV_CREATE:
5211     case DM_DEV_REMOVE:
5212     case DM_DEV_SUSPEND:
5213     case DM_DEV_STATUS:
5214     case DM_DEV_WAIT:
5215     case DM_TABLE_STATUS:
5216     case DM_TABLE_CLEAR:
5217     case DM_TABLE_DEPS:
5218     case DM_LIST_VERSIONS:
5219         /* no input data */
5220         break;
5221     case DM_DEV_RENAME:
5222     case DM_DEV_SET_GEOMETRY:
5223         /* data contains only strings */
5224         memcpy(host_data, argptr, guest_data_size);
5225         break;
5226     case DM_TARGET_MSG:
5227         memcpy(host_data, argptr, guest_data_size);
5228         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5229         break;
5230     case DM_TABLE_LOAD:
5231     {
5232         void *gspec = argptr;
5233         void *cur_data = host_data;
5234         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5235         int spec_size = thunk_type_size(arg_type, 0);
5236         int i;
5237 
5238         for (i = 0; i < host_dm->target_count; i++) {
5239             struct dm_target_spec *spec = cur_data;
5240             uint32_t next;
5241             int slen;
5242 
5243             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5244             slen = strlen((char*)gspec + spec_size) + 1;
5245             next = spec->next;
5246             spec->next = sizeof(*spec) + slen;
5247             strcpy((char*)&spec[1], gspec + spec_size);
5248             gspec += next;
5249             cur_data += spec->next;
5250         }
5251         break;
5252     }
5253     default:
5254         ret = -TARGET_EINVAL;
5255         unlock_user(argptr, guest_data, 0);
5256         goto out;
5257     }
5258     unlock_user(argptr, guest_data, 0);
5259 
5260     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5261     if (!is_error(ret)) {
5262         guest_data = arg + host_dm->data_start;
5263         guest_data_size = host_dm->data_size - host_dm->data_start;
5264         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5265         switch (ie->host_cmd) {
5266         case DM_REMOVE_ALL:
5267         case DM_DEV_CREATE:
5268         case DM_DEV_REMOVE:
5269         case DM_DEV_RENAME:
5270         case DM_DEV_SUSPEND:
5271         case DM_DEV_STATUS:
5272         case DM_TABLE_LOAD:
5273         case DM_TABLE_CLEAR:
5274         case DM_TARGET_MSG:
5275         case DM_DEV_SET_GEOMETRY:
5276             /* no return data */
5277             break;
5278         case DM_LIST_DEVICES:
5279         {
5280             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5281             uint32_t remaining_data = guest_data_size;
5282             void *cur_data = argptr;
5283             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5284             int nl_size = 12; /* can't use thunk_size due to alignment */
5285 
5286             while (1) {
5287                 uint32_t next = nl->next;
5288                 if (next) {
5289                     nl->next = nl_size + (strlen(nl->name) + 1);
5290                 }
5291                 if (remaining_data < nl->next) {
5292                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5293                     break;
5294                 }
5295                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5296                 strcpy(cur_data + nl_size, nl->name);
5297                 cur_data += nl->next;
5298                 remaining_data -= nl->next;
5299                 if (!next) {
5300                     break;
5301                 }
5302                 nl = (void*)nl + next;
5303             }
5304             break;
5305         }
5306         case DM_DEV_WAIT:
5307         case DM_TABLE_STATUS:
5308         {
5309             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5310             void *cur_data = argptr;
5311             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5312             int spec_size = thunk_type_size(arg_type, 0);
5313             int i;
5314 
5315             for (i = 0; i < host_dm->target_count; i++) {
5316                 uint32_t next = spec->next;
5317                 int slen = strlen((char*)&spec[1]) + 1;
5318                 spec->next = (cur_data - argptr) + spec_size + slen;
5319                 if (guest_data_size < spec->next) {
5320                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5321                     break;
5322                 }
5323                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5324                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5325                 cur_data = argptr + spec->next;
5326                 spec = (void*)host_dm + host_dm->data_start + next;
5327             }
5328             break;
5329         }
5330         case DM_TABLE_DEPS:
5331         {
5332             void *hdata = (void*)host_dm + host_dm->data_start;
5333             int count = *(uint32_t*)hdata;
5334             uint64_t *hdev = hdata + 8;
5335             uint64_t *gdev = argptr + 8;
5336             int i;
5337 
5338             *(uint32_t*)argptr = tswap32(count);
5339             for (i = 0; i < count; i++) {
5340                 *gdev = tswap64(*hdev);
5341                 gdev++;
5342                 hdev++;
5343             }
5344             break;
5345         }
5346         case DM_LIST_VERSIONS:
5347         {
5348             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5349             uint32_t remaining_data = guest_data_size;
5350             void *cur_data = argptr;
5351             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5352             int vers_size = thunk_type_size(arg_type, 0);
5353 
5354             while (1) {
5355                 uint32_t next = vers->next;
5356                 if (next) {
5357                     vers->next = vers_size + (strlen(vers->name) + 1);
5358                 }
5359                 if (remaining_data < vers->next) {
5360                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5361                     break;
5362                 }
5363                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5364                 strcpy(cur_data + vers_size, vers->name);
5365                 cur_data += vers->next;
5366                 remaining_data -= vers->next;
5367                 if (!next) {
5368                     break;
5369                 }
5370                 vers = (void*)vers + next;
5371             }
5372             break;
5373         }
5374         default:
5375             unlock_user(argptr, guest_data, 0);
5376             ret = -TARGET_EINVAL;
5377             goto out;
5378         }
5379         unlock_user(argptr, guest_data, guest_data_size);
5380 
5381         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5382         if (!argptr) {
5383             ret = -TARGET_EFAULT;
5384             goto out;
5385         }
5386         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5387         unlock_user(argptr, arg, target_size);
5388     }
5389 out:
5390     g_free(big_buf);
5391     return ret;
5392 }
5393 
do_ioctl_blkpg(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5394 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5395                                int cmd, abi_long arg)
5396 {
5397     void *argptr;
5398     int target_size;
5399     const argtype *arg_type = ie->arg_type;
5400     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5401     abi_long ret;
5402 
5403     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5404     struct blkpg_partition host_part;
5405 
5406     /* Read and convert blkpg */
5407     arg_type++;
5408     target_size = thunk_type_size(arg_type, 0);
5409     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5410     if (!argptr) {
5411         ret = -TARGET_EFAULT;
5412         goto out;
5413     }
5414     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5415     unlock_user(argptr, arg, 0);
5416 
5417     switch (host_blkpg->op) {
5418     case BLKPG_ADD_PARTITION:
5419     case BLKPG_DEL_PARTITION:
5420         /* payload is struct blkpg_partition */
5421         break;
5422     default:
5423         /* Unknown opcode */
5424         ret = -TARGET_EINVAL;
5425         goto out;
5426     }
5427 
5428     /* Read and convert blkpg->data */
5429     arg = (abi_long)(uintptr_t)host_blkpg->data;
5430     target_size = thunk_type_size(part_arg_type, 0);
5431     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5432     if (!argptr) {
5433         ret = -TARGET_EFAULT;
5434         goto out;
5435     }
5436     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5437     unlock_user(argptr, arg, 0);
5438 
5439     /* Swizzle the data pointer to our local copy and call! */
5440     host_blkpg->data = &host_part;
5441     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5442 
5443 out:
5444     return ret;
5445 }
5446 
do_ioctl_rt(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5447 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5448                                 int fd, int cmd, abi_long arg)
5449 {
5450     const argtype *arg_type = ie->arg_type;
5451     const StructEntry *se;
5452     const argtype *field_types;
5453     const int *dst_offsets, *src_offsets;
5454     int target_size;
5455     void *argptr;
5456     abi_ulong *target_rt_dev_ptr = NULL;
5457     unsigned long *host_rt_dev_ptr = NULL;
5458     abi_long ret;
5459     int i;
5460 
5461     assert(ie->access == IOC_W);
5462     assert(*arg_type == TYPE_PTR);
5463     arg_type++;
5464     assert(*arg_type == TYPE_STRUCT);
5465     target_size = thunk_type_size(arg_type, 0);
5466     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5467     if (!argptr) {
5468         return -TARGET_EFAULT;
5469     }
5470     arg_type++;
5471     assert(*arg_type == (int)STRUCT_rtentry);
5472     se = struct_entries + *arg_type++;
5473     assert(se->convert[0] == NULL);
5474     /* convert struct here to be able to catch rt_dev string */
5475     field_types = se->field_types;
5476     dst_offsets = se->field_offsets[THUNK_HOST];
5477     src_offsets = se->field_offsets[THUNK_TARGET];
5478     for (i = 0; i < se->nb_fields; i++) {
5479         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5480             assert(*field_types == TYPE_PTRVOID);
5481             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5482             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5483             if (*target_rt_dev_ptr != 0) {
5484                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5485                                                   tswapal(*target_rt_dev_ptr));
5486                 if (!*host_rt_dev_ptr) {
5487                     unlock_user(argptr, arg, 0);
5488                     return -TARGET_EFAULT;
5489                 }
5490             } else {
5491                 *host_rt_dev_ptr = 0;
5492             }
5493             field_types++;
5494             continue;
5495         }
5496         field_types = thunk_convert(buf_temp + dst_offsets[i],
5497                                     argptr + src_offsets[i],
5498                                     field_types, THUNK_HOST);
5499     }
5500     unlock_user(argptr, arg, 0);
5501 
5502     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5503 
5504     assert(host_rt_dev_ptr != NULL);
5505     assert(target_rt_dev_ptr != NULL);
5506     if (*host_rt_dev_ptr != 0) {
5507         unlock_user((void *)*host_rt_dev_ptr,
5508                     *target_rt_dev_ptr, 0);
5509     }
5510     return ret;
5511 }
5512 
do_ioctl_kdsigaccept(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5513 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5514                                      int fd, int cmd, abi_long arg)
5515 {
5516     int sig = target_to_host_signal(arg);
5517     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5518 }
5519 
do_ioctl_SIOCGSTAMP(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5520 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5521                                     int fd, int cmd, abi_long arg)
5522 {
5523     struct timeval tv;
5524     abi_long ret;
5525 
5526     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5527     if (is_error(ret)) {
5528         return ret;
5529     }
5530 
5531     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5532         if (copy_to_user_timeval(arg, &tv)) {
5533             return -TARGET_EFAULT;
5534         }
5535     } else {
5536         if (copy_to_user_timeval64(arg, &tv)) {
5537             return -TARGET_EFAULT;
5538         }
5539     }
5540 
5541     return ret;
5542 }
5543 
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5544 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5545                                       int fd, int cmd, abi_long arg)
5546 {
5547     struct timespec ts;
5548     abi_long ret;
5549 
5550     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5551     if (is_error(ret)) {
5552         return ret;
5553     }
5554 
5555     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5556         if (host_to_target_timespec(arg, &ts)) {
5557             return -TARGET_EFAULT;
5558         }
5559     } else{
5560         if (host_to_target_timespec64(arg, &ts)) {
5561             return -TARGET_EFAULT;
5562         }
5563     }
5564 
5565     return ret;
5566 }
5567 
5568 #ifdef TIOCGPTPEER
do_ioctl_tiocgptpeer(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5569 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5570                                      int fd, int cmd, abi_long arg)
5571 {
5572     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5573     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5574 }
5575 #endif
5576 
5577 #ifdef HAVE_DRM_H
5578 
unlock_drm_version(struct drm_version * host_ver,struct target_drm_version * target_ver,bool copy)5579 static void unlock_drm_version(struct drm_version *host_ver,
5580                                struct target_drm_version *target_ver,
5581                                bool copy)
5582 {
5583     unlock_user(host_ver->name, target_ver->name,
5584                                 copy ? host_ver->name_len : 0);
5585     unlock_user(host_ver->date, target_ver->date,
5586                                 copy ? host_ver->date_len : 0);
5587     unlock_user(host_ver->desc, target_ver->desc,
5588                                 copy ? host_ver->desc_len : 0);
5589 }
5590 
target_to_host_drmversion(struct drm_version * host_ver,struct target_drm_version * target_ver)5591 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5592                                           struct target_drm_version *target_ver)
5593 {
5594     memset(host_ver, 0, sizeof(*host_ver));
5595 
5596     __get_user(host_ver->name_len, &target_ver->name_len);
5597     if (host_ver->name_len) {
5598         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5599                                    target_ver->name_len, 0);
5600         if (!host_ver->name) {
5601             return -EFAULT;
5602         }
5603     }
5604 
5605     __get_user(host_ver->date_len, &target_ver->date_len);
5606     if (host_ver->date_len) {
5607         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5608                                    target_ver->date_len, 0);
5609         if (!host_ver->date) {
5610             goto err;
5611         }
5612     }
5613 
5614     __get_user(host_ver->desc_len, &target_ver->desc_len);
5615     if (host_ver->desc_len) {
5616         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5617                                    target_ver->desc_len, 0);
5618         if (!host_ver->desc) {
5619             goto err;
5620         }
5621     }
5622 
5623     return 0;
5624 err:
5625     unlock_drm_version(host_ver, target_ver, false);
5626     return -EFAULT;
5627 }
5628 
host_to_target_drmversion(struct target_drm_version * target_ver,struct drm_version * host_ver)5629 static inline void host_to_target_drmversion(
5630                                           struct target_drm_version *target_ver,
5631                                           struct drm_version *host_ver)
5632 {
5633     __put_user(host_ver->version_major, &target_ver->version_major);
5634     __put_user(host_ver->version_minor, &target_ver->version_minor);
5635     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5636     __put_user(host_ver->name_len, &target_ver->name_len);
5637     __put_user(host_ver->date_len, &target_ver->date_len);
5638     __put_user(host_ver->desc_len, &target_ver->desc_len);
5639     unlock_drm_version(host_ver, target_ver, true);
5640 }
5641 
do_ioctl_drm(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5642 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5643                              int fd, int cmd, abi_long arg)
5644 {
5645     struct drm_version *ver;
5646     struct target_drm_version *target_ver;
5647     abi_long ret;
5648 
5649     switch (ie->host_cmd) {
5650     case DRM_IOCTL_VERSION:
5651         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5652             return -TARGET_EFAULT;
5653         }
5654         ver = (struct drm_version *)buf_temp;
5655         ret = target_to_host_drmversion(ver, target_ver);
5656         if (!is_error(ret)) {
5657             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5658             if (is_error(ret)) {
5659                 unlock_drm_version(ver, target_ver, false);
5660             } else {
5661                 host_to_target_drmversion(target_ver, ver);
5662             }
5663         }
5664         unlock_user_struct(target_ver, arg, 0);
5665         return ret;
5666     }
5667     return -TARGET_ENOSYS;
5668 }
5669 
do_ioctl_drm_i915_getparam(const IOCTLEntry * ie,struct drm_i915_getparam * gparam,int fd,abi_long arg)5670 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5671                                            struct drm_i915_getparam *gparam,
5672                                            int fd, abi_long arg)
5673 {
5674     abi_long ret;
5675     int value;
5676     struct target_drm_i915_getparam *target_gparam;
5677 
5678     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5679         return -TARGET_EFAULT;
5680     }
5681 
5682     __get_user(gparam->param, &target_gparam->param);
5683     gparam->value = &value;
5684     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5685     put_user_s32(value, target_gparam->value);
5686 
5687     unlock_user_struct(target_gparam, arg, 0);
5688     return ret;
5689 }
5690 
do_ioctl_drm_i915(const IOCTLEntry * ie,uint8_t * buf_temp,int fd,int cmd,abi_long arg)5691 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5692                                   int fd, int cmd, abi_long arg)
5693 {
5694     switch (ie->host_cmd) {
5695     case DRM_IOCTL_I915_GETPARAM:
5696         return do_ioctl_drm_i915_getparam(ie,
5697                                           (struct drm_i915_getparam *)buf_temp,
5698                                           fd, arg);
5699     default:
5700         return -TARGET_ENOSYS;
5701     }
5702 }
5703 
5704 #endif
5705 
5706 IOCTLEntry ioctl_entries[] = {
5707 #define IOCTL(cmd, access, ...) \
5708     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5709 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5710     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5711 #define IOCTL_IGNORE(cmd) \
5712     { TARGET_ ## cmd, 0, #cmd },
5713 #include "ioctls.h"
5714     { 0, 0, },
5715 };
5716 
5717 /* ??? Implement proper locking for ioctls.  */
5718 /* do_ioctl() Must return target values and target errnos. */
do_ioctl(int fd,int cmd,abi_long arg)5719 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5720 {
5721     const IOCTLEntry *ie;
5722     const argtype *arg_type;
5723     abi_long ret;
5724     uint8_t buf_temp[MAX_STRUCT_SIZE];
5725     int target_size;
5726     void *argptr;
5727 
5728     ie = ioctl_entries;
5729     for(;;) {
5730         if (ie->target_cmd == 0) {
5731             qemu_log_mask(
5732                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5733             return -TARGET_ENOSYS;
5734         }
5735         if (ie->target_cmd == cmd)
5736             break;
5737         ie++;
5738     }
5739     arg_type = ie->arg_type;
5740     if (ie->do_ioctl) {
5741         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5742     } else if (!ie->host_cmd) {
5743         /* Some architectures define BSD ioctls in their headers
5744            that are not implemented in Linux.  */
5745         return -TARGET_ENOSYS;
5746     }
5747 
5748     switch(arg_type[0]) {
5749     case TYPE_NULL:
5750         /* no argument */
5751         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5752         break;
5753     case TYPE_PTRVOID:
5754     case TYPE_INT:
5755     case TYPE_LONG:
5756     case TYPE_ULONG:
5757         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5758         break;
5759     case TYPE_PTR:
5760         arg_type++;
5761         target_size = thunk_type_size(arg_type, 0);
5762         switch(ie->access) {
5763         case IOC_R:
5764             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5765             if (!is_error(ret)) {
5766                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5767                 if (!argptr)
5768                     return -TARGET_EFAULT;
5769                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5770                 unlock_user(argptr, arg, target_size);
5771             }
5772             break;
5773         case IOC_W:
5774             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5775             if (!argptr)
5776                 return -TARGET_EFAULT;
5777             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5778             unlock_user(argptr, arg, 0);
5779             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5780             break;
5781         default:
5782         case IOC_RW:
5783             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5784             if (!argptr)
5785                 return -TARGET_EFAULT;
5786             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5787             unlock_user(argptr, arg, 0);
5788             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5789             if (!is_error(ret)) {
5790                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5791                 if (!argptr)
5792                     return -TARGET_EFAULT;
5793                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5794                 unlock_user(argptr, arg, target_size);
5795             }
5796             break;
5797         }
5798         break;
5799     default:
5800         qemu_log_mask(LOG_UNIMP,
5801                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5802                       (long)cmd, arg_type[0]);
5803         ret = -TARGET_ENOSYS;
5804         break;
5805     }
5806     return ret;
5807 }
5808 
5809 static const bitmask_transtbl iflag_tbl[] = {
5810         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5811         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5812         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5813         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5814         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5815         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5816         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5817         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5818         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5819         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5820         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5821         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5822         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5823         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5824         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5825         { 0, 0, 0, 0 }
5826 };
5827 
5828 static const bitmask_transtbl oflag_tbl[] = {
5829 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5830 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5831 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5832 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5833 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5834 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5835 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5836 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5837 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5838 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5839 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5840 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5841 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5842 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5843 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5844 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5845 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5846 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5847 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5848 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5849 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5850 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5851 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5852 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5853 	{ 0, 0, 0, 0 }
5854 };
5855 
5856 static const bitmask_transtbl cflag_tbl[] = {
5857 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5858 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5859 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5860 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5861 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5862 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5863 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5864 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5865 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5866 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5867 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5868 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5869 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5870 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5871 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5872 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5873 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5874 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5875 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5876 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5877 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5878 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5879 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5880 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5881 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5882 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5883 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5884 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5885 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5886 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5887 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5888 	{ 0, 0, 0, 0 }
5889 };
5890 
5891 static const bitmask_transtbl lflag_tbl[] = {
5892   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5893   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5894   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5895   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5896   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5897   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5898   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5899   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5900   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5901   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5902   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5903   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5904   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5905   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5906   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5907   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5908   { 0, 0, 0, 0 }
5909 };
5910 
target_to_host_termios(void * dst,const void * src)5911 static void target_to_host_termios (void *dst, const void *src)
5912 {
5913     struct host_termios *host = dst;
5914     const struct target_termios *target = src;
5915 
5916     host->c_iflag =
5917         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5918     host->c_oflag =
5919         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5920     host->c_cflag =
5921         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5922     host->c_lflag =
5923         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5924     host->c_line = target->c_line;
5925 
5926     memset(host->c_cc, 0, sizeof(host->c_cc));
5927     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5928     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5929     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5930     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5931     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5932     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5933     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5934     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5935     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5936     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5937     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5938     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5939     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5940     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5941     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5942     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5943     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5944 }
5945 
host_to_target_termios(void * dst,const void * src)5946 static void host_to_target_termios (void *dst, const void *src)
5947 {
5948     struct target_termios *target = dst;
5949     const struct host_termios *host = src;
5950 
5951     target->c_iflag =
5952         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5953     target->c_oflag =
5954         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5955     target->c_cflag =
5956         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5957     target->c_lflag =
5958         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5959     target->c_line = host->c_line;
5960 
5961     memset(target->c_cc, 0, sizeof(target->c_cc));
5962     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5963     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5964     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5965     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5966     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5967     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5968     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5969     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5970     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5971     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5972     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5973     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5974     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5975     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5976     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5977     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5978     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5979 }
5980 
5981 static const StructEntry struct_termios_def = {
5982     .convert = { host_to_target_termios, target_to_host_termios },
5983     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5984     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5985     .print = print_termios,
5986 };
5987 
5988 static bitmask_transtbl mmap_flags_tbl[] = {
5989     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5990     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5991     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5992     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5993       MAP_ANONYMOUS, MAP_ANONYMOUS },
5994     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5995       MAP_GROWSDOWN, MAP_GROWSDOWN },
5996     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5997       MAP_DENYWRITE, MAP_DENYWRITE },
5998     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5999       MAP_EXECUTABLE, MAP_EXECUTABLE },
6000     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6001     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6002       MAP_NORESERVE, MAP_NORESERVE },
6003     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6004     /* MAP_STACK had been ignored by the kernel for quite some time.
6005        Recognize it for the target insofar as we do not want to pass
6006        it through to the host.  */
6007     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6008     { 0, 0, 0, 0 }
6009 };
6010 
6011 /*
6012  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6013  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6014  */
6015 #if defined(TARGET_I386)
6016 
6017 /* NOTE: there is really one LDT for all the threads */
6018 static uint8_t *ldt_table;
6019 
read_ldt(abi_ulong ptr,unsigned long bytecount)6020 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6021 {
6022     int size;
6023     void *p;
6024 
6025     if (!ldt_table)
6026         return 0;
6027     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6028     if (size > bytecount)
6029         size = bytecount;
6030     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6031     if (!p)
6032         return -TARGET_EFAULT;
6033     /* ??? Should this by byteswapped?  */
6034     memcpy(p, ldt_table, size);
6035     unlock_user(p, ptr, size);
6036     return size;
6037 }
6038 
6039 /* XXX: add locking support */
write_ldt(CPUX86State * env,abi_ulong ptr,unsigned long bytecount,int oldmode)6040 static abi_long write_ldt(CPUX86State *env,
6041                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6042 {
6043     struct target_modify_ldt_ldt_s ldt_info;
6044     struct target_modify_ldt_ldt_s *target_ldt_info;
6045     int seg_32bit, contents, read_exec_only, limit_in_pages;
6046     int seg_not_present, useable, lm;
6047     uint32_t *lp, entry_1, entry_2;
6048 
6049     if (bytecount != sizeof(ldt_info))
6050         return -TARGET_EINVAL;
6051     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6052         return -TARGET_EFAULT;
6053     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6054     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6055     ldt_info.limit = tswap32(target_ldt_info->limit);
6056     ldt_info.flags = tswap32(target_ldt_info->flags);
6057     unlock_user_struct(target_ldt_info, ptr, 0);
6058 
6059     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6060         return -TARGET_EINVAL;
6061     seg_32bit = ldt_info.flags & 1;
6062     contents = (ldt_info.flags >> 1) & 3;
6063     read_exec_only = (ldt_info.flags >> 3) & 1;
6064     limit_in_pages = (ldt_info.flags >> 4) & 1;
6065     seg_not_present = (ldt_info.flags >> 5) & 1;
6066     useable = (ldt_info.flags >> 6) & 1;
6067 #ifdef TARGET_ABI32
6068     lm = 0;
6069 #else
6070     lm = (ldt_info.flags >> 7) & 1;
6071 #endif
6072     if (contents == 3) {
6073         if (oldmode)
6074             return -TARGET_EINVAL;
6075         if (seg_not_present == 0)
6076             return -TARGET_EINVAL;
6077     }
6078     /* allocate the LDT */
6079     if (!ldt_table) {
6080         env->ldt.base = target_mmap(0,
6081                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6082                                     PROT_READ|PROT_WRITE,
6083                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6084         if (env->ldt.base == -1)
6085             return -TARGET_ENOMEM;
6086         memset(g2h(env->ldt.base), 0,
6087                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6088         env->ldt.limit = 0xffff;
6089         ldt_table = g2h(env->ldt.base);
6090     }
6091 
6092     /* NOTE: same code as Linux kernel */
6093     /* Allow LDTs to be cleared by the user. */
6094     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6095         if (oldmode ||
6096             (contents == 0		&&
6097              read_exec_only == 1	&&
6098              seg_32bit == 0		&&
6099              limit_in_pages == 0	&&
6100              seg_not_present == 1	&&
6101              useable == 0 )) {
6102             entry_1 = 0;
6103             entry_2 = 0;
6104             goto install;
6105         }
6106     }
6107 
6108     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6109         (ldt_info.limit & 0x0ffff);
6110     entry_2 = (ldt_info.base_addr & 0xff000000) |
6111         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6112         (ldt_info.limit & 0xf0000) |
6113         ((read_exec_only ^ 1) << 9) |
6114         (contents << 10) |
6115         ((seg_not_present ^ 1) << 15) |
6116         (seg_32bit << 22) |
6117         (limit_in_pages << 23) |
6118         (lm << 21) |
6119         0x7000;
6120     if (!oldmode)
6121         entry_2 |= (useable << 20);
6122 
6123     /* Install the new entry ...  */
6124 install:
6125     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6126     lp[0] = tswap32(entry_1);
6127     lp[1] = tswap32(entry_2);
6128     return 0;
6129 }
6130 
6131 /* specific and weird i386 syscalls */
do_modify_ldt(CPUX86State * env,int func,abi_ulong ptr,unsigned long bytecount)6132 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6133                               unsigned long bytecount)
6134 {
6135     abi_long ret;
6136 
6137     switch (func) {
6138     case 0:
6139         ret = read_ldt(ptr, bytecount);
6140         break;
6141     case 1:
6142         ret = write_ldt(env, ptr, bytecount, 1);
6143         break;
6144     case 0x11:
6145         ret = write_ldt(env, ptr, bytecount, 0);
6146         break;
6147     default:
6148         ret = -TARGET_ENOSYS;
6149         break;
6150     }
6151     return ret;
6152 }
6153 
6154 #if defined(TARGET_ABI32)
do_set_thread_area(CPUX86State * env,abi_ulong ptr)6155 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6156 {
6157     uint64_t *gdt_table = g2h(env->gdt.base);
6158     struct target_modify_ldt_ldt_s ldt_info;
6159     struct target_modify_ldt_ldt_s *target_ldt_info;
6160     int seg_32bit, contents, read_exec_only, limit_in_pages;
6161     int seg_not_present, useable, lm;
6162     uint32_t *lp, entry_1, entry_2;
6163     int i;
6164 
6165     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6166     if (!target_ldt_info)
6167         return -TARGET_EFAULT;
6168     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6169     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6170     ldt_info.limit = tswap32(target_ldt_info->limit);
6171     ldt_info.flags = tswap32(target_ldt_info->flags);
6172     if (ldt_info.entry_number == -1) {
6173         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6174             if (gdt_table[i] == 0) {
6175                 ldt_info.entry_number = i;
6176                 target_ldt_info->entry_number = tswap32(i);
6177                 break;
6178             }
6179         }
6180     }
6181     unlock_user_struct(target_ldt_info, ptr, 1);
6182 
6183     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6184         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6185            return -TARGET_EINVAL;
6186     seg_32bit = ldt_info.flags & 1;
6187     contents = (ldt_info.flags >> 1) & 3;
6188     read_exec_only = (ldt_info.flags >> 3) & 1;
6189     limit_in_pages = (ldt_info.flags >> 4) & 1;
6190     seg_not_present = (ldt_info.flags >> 5) & 1;
6191     useable = (ldt_info.flags >> 6) & 1;
6192 #ifdef TARGET_ABI32
6193     lm = 0;
6194 #else
6195     lm = (ldt_info.flags >> 7) & 1;
6196 #endif
6197 
6198     if (contents == 3) {
6199         if (seg_not_present == 0)
6200             return -TARGET_EINVAL;
6201     }
6202 
6203     /* NOTE: same code as Linux kernel */
6204     /* Allow LDTs to be cleared by the user. */
6205     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6206         if ((contents == 0             &&
6207              read_exec_only == 1       &&
6208              seg_32bit == 0            &&
6209              limit_in_pages == 0       &&
6210              seg_not_present == 1      &&
6211              useable == 0 )) {
6212             entry_1 = 0;
6213             entry_2 = 0;
6214             goto install;
6215         }
6216     }
6217 
6218     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6219         (ldt_info.limit & 0x0ffff);
6220     entry_2 = (ldt_info.base_addr & 0xff000000) |
6221         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6222         (ldt_info.limit & 0xf0000) |
6223         ((read_exec_only ^ 1) << 9) |
6224         (contents << 10) |
6225         ((seg_not_present ^ 1) << 15) |
6226         (seg_32bit << 22) |
6227         (limit_in_pages << 23) |
6228         (useable << 20) |
6229         (lm << 21) |
6230         0x7000;
6231 
6232     /* Install the new entry ...  */
6233 install:
6234     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6235     lp[0] = tswap32(entry_1);
6236     lp[1] = tswap32(entry_2);
6237     return 0;
6238 }
6239 
do_get_thread_area(CPUX86State * env,abi_ulong ptr)6240 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6241 {
6242     struct target_modify_ldt_ldt_s *target_ldt_info;
6243     uint64_t *gdt_table = g2h(env->gdt.base);
6244     uint32_t base_addr, limit, flags;
6245     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6246     int seg_not_present, useable, lm;
6247     uint32_t *lp, entry_1, entry_2;
6248 
6249     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6250     if (!target_ldt_info)
6251         return -TARGET_EFAULT;
6252     idx = tswap32(target_ldt_info->entry_number);
6253     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6254         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6255         unlock_user_struct(target_ldt_info, ptr, 1);
6256         return -TARGET_EINVAL;
6257     }
6258     lp = (uint32_t *)(gdt_table + idx);
6259     entry_1 = tswap32(lp[0]);
6260     entry_2 = tswap32(lp[1]);
6261 
6262     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6263     contents = (entry_2 >> 10) & 3;
6264     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6265     seg_32bit = (entry_2 >> 22) & 1;
6266     limit_in_pages = (entry_2 >> 23) & 1;
6267     useable = (entry_2 >> 20) & 1;
6268 #ifdef TARGET_ABI32
6269     lm = 0;
6270 #else
6271     lm = (entry_2 >> 21) & 1;
6272 #endif
6273     flags = (seg_32bit << 0) | (contents << 1) |
6274         (read_exec_only << 3) | (limit_in_pages << 4) |
6275         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6276     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6277     base_addr = (entry_1 >> 16) |
6278         (entry_2 & 0xff000000) |
6279         ((entry_2 & 0xff) << 16);
6280     target_ldt_info->base_addr = tswapal(base_addr);
6281     target_ldt_info->limit = tswap32(limit);
6282     target_ldt_info->flags = tswap32(flags);
6283     unlock_user_struct(target_ldt_info, ptr, 1);
6284     return 0;
6285 }
6286 
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6287 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6288 {
6289     return -TARGET_ENOSYS;
6290 }
6291 #else
do_arch_prctl(CPUX86State * env,int code,abi_ulong addr)6292 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6293 {
6294     abi_long ret = 0;
6295     abi_ulong val;
6296     int idx;
6297 
6298     switch(code) {
6299     case TARGET_ARCH_SET_GS:
6300     case TARGET_ARCH_SET_FS:
6301         if (code == TARGET_ARCH_SET_GS)
6302             idx = R_GS;
6303         else
6304             idx = R_FS;
6305         cpu_x86_load_seg(env, idx, 0);
6306         env->segs[idx].base = addr;
6307         break;
6308     case TARGET_ARCH_GET_GS:
6309     case TARGET_ARCH_GET_FS:
6310         if (code == TARGET_ARCH_GET_GS)
6311             idx = R_GS;
6312         else
6313             idx = R_FS;
6314         val = env->segs[idx].base;
6315         if (put_user(val, addr, abi_ulong))
6316             ret = -TARGET_EFAULT;
6317         break;
6318     default:
6319         ret = -TARGET_EINVAL;
6320         break;
6321     }
6322     return ret;
6323 }
6324 #endif /* defined(TARGET_ABI32 */
6325 
6326 #endif /* defined(TARGET_I386) */
6327 
6328 #define NEW_STACK_SIZE 0x40000
6329 
6330 
6331 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6332 typedef struct {
6333     CPUArchState *env;
6334     pthread_mutex_t mutex;
6335     pthread_cond_t cond;
6336     pthread_t thread;
6337     uint32_t tid;
6338     abi_ulong child_tidptr;
6339     abi_ulong parent_tidptr;
6340     sigset_t sigmask;
6341 } new_thread_info;
6342 
clone_func(void * arg)6343 static void *clone_func(void *arg)
6344 {
6345     new_thread_info *info = arg;
6346     CPUArchState *env;
6347     CPUState *cpu;
6348     TaskState *ts;
6349 
6350     rcu_register_thread();
6351     tcg_register_thread();
6352     env = info->env;
6353     cpu = env_cpu(env);
6354     thread_cpu = cpu;
6355     ts = (TaskState *)cpu->opaque;
6356     info->tid = sys_gettid();
6357     task_settid(ts);
6358     if (info->child_tidptr)
6359         put_user_u32(info->tid, info->child_tidptr);
6360     if (info->parent_tidptr)
6361         put_user_u32(info->tid, info->parent_tidptr);
6362     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6363     /* Enable signals.  */
6364     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6365     /* Signal to the parent that we're ready.  */
6366     pthread_mutex_lock(&info->mutex);
6367     pthread_cond_broadcast(&info->cond);
6368     pthread_mutex_unlock(&info->mutex);
6369     /* Wait until the parent has finished initializing the tls state.  */
6370     pthread_mutex_lock(&clone_lock);
6371     pthread_mutex_unlock(&clone_lock);
6372     cpu_loop(env);
6373     /* never exits */
6374     return NULL;
6375 }
6376 
6377 /* do_fork() Must return host values and target errnos (unlike most
6378    do_*() functions). */
do_fork(CPUArchState * env,unsigned int flags,abi_ulong newsp,abi_ulong parent_tidptr,target_ulong newtls,abi_ulong child_tidptr)6379 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6380                    abi_ulong parent_tidptr, target_ulong newtls,
6381                    abi_ulong child_tidptr)
6382 {
6383     CPUState *cpu = env_cpu(env);
6384     int ret;
6385     TaskState *ts;
6386     CPUState *new_cpu;
6387     CPUArchState *new_env;
6388     sigset_t sigmask;
6389 
6390     flags &= ~CLONE_IGNORED_FLAGS;
6391 
6392     /* Emulate vfork() with fork() */
6393     if (flags & CLONE_VFORK)
6394         flags &= ~(CLONE_VFORK | CLONE_VM);
6395 
6396     if (flags & CLONE_VM) {
6397         TaskState *parent_ts = (TaskState *)cpu->opaque;
6398         new_thread_info info;
6399         pthread_attr_t attr;
6400 
6401         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6402             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6403             return -TARGET_EINVAL;
6404         }
6405 
6406         ts = g_new0(TaskState, 1);
6407         init_task_state(ts);
6408 
6409         /* Grab a mutex so that thread setup appears atomic.  */
6410         pthread_mutex_lock(&clone_lock);
6411 
6412         /* we create a new CPU instance. */
6413         new_env = cpu_copy(env);
6414         /* Init regs that differ from the parent.  */
6415         cpu_clone_regs_child(new_env, newsp, flags);
6416         cpu_clone_regs_parent(env, flags);
6417         new_cpu = env_cpu(new_env);
6418         new_cpu->opaque = ts;
6419         ts->bprm = parent_ts->bprm;
6420         ts->info = parent_ts->info;
6421         ts->signal_mask = parent_ts->signal_mask;
6422 
6423         if (flags & CLONE_CHILD_CLEARTID) {
6424             ts->child_tidptr = child_tidptr;
6425         }
6426 
6427         if (flags & CLONE_SETTLS) {
6428             cpu_set_tls (new_env, newtls);
6429         }
6430 
6431         memset(&info, 0, sizeof(info));
6432         pthread_mutex_init(&info.mutex, NULL);
6433         pthread_mutex_lock(&info.mutex);
6434         pthread_cond_init(&info.cond, NULL);
6435         info.env = new_env;
6436         if (flags & CLONE_CHILD_SETTID) {
6437             info.child_tidptr = child_tidptr;
6438         }
6439         if (flags & CLONE_PARENT_SETTID) {
6440             info.parent_tidptr = parent_tidptr;
6441         }
6442 
6443         ret = pthread_attr_init(&attr);
6444         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6445         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6446         /* It is not safe to deliver signals until the child has finished
6447            initializing, so temporarily block all signals.  */
6448         sigfillset(&sigmask);
6449         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6450         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6451 
6452         /* If this is our first additional thread, we need to ensure we
6453          * generate code for parallel execution and flush old translations.
6454          */
6455         if (!parallel_cpus) {
6456             parallel_cpus = true;
6457             tb_flush(cpu);
6458         }
6459 
6460         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6461         /* TODO: Free new CPU state if thread creation failed.  */
6462 
6463         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6464         pthread_attr_destroy(&attr);
6465         if (ret == 0) {
6466             /* Wait for the child to initialize.  */
6467             pthread_cond_wait(&info.cond, &info.mutex);
6468             ret = info.tid;
6469         } else {
6470             ret = -1;
6471         }
6472         pthread_mutex_unlock(&info.mutex);
6473         pthread_cond_destroy(&info.cond);
6474         pthread_mutex_destroy(&info.mutex);
6475         pthread_mutex_unlock(&clone_lock);
6476     } else {
6477         /* if no CLONE_VM, we consider it is a fork */
6478         if (flags & CLONE_INVALID_FORK_FLAGS) {
6479             return -TARGET_EINVAL;
6480         }
6481 
6482         /* We can't support custom termination signals */
6483         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6484             return -TARGET_EINVAL;
6485         }
6486 
6487         if (block_signals()) {
6488             return -TARGET_ERESTARTSYS;
6489         }
6490 
6491         fork_start();
6492         ret = fork();
6493         if (ret == 0) {
6494             /* Child Process.  */
6495             cpu_clone_regs_child(env, newsp, flags);
6496             fork_end(1);
6497             /* There is a race condition here.  The parent process could
6498                theoretically read the TID in the child process before the child
6499                tid is set.  This would require using either ptrace
6500                (not implemented) or having *_tidptr to point at a shared memory
6501                mapping.  We can't repeat the spinlock hack used above because
6502                the child process gets its own copy of the lock.  */
6503             if (flags & CLONE_CHILD_SETTID)
6504                 put_user_u32(sys_gettid(), child_tidptr);
6505             if (flags & CLONE_PARENT_SETTID)
6506                 put_user_u32(sys_gettid(), parent_tidptr);
6507             ts = (TaskState *)cpu->opaque;
6508             if (flags & CLONE_SETTLS)
6509                 cpu_set_tls (env, newtls);
6510             if (flags & CLONE_CHILD_CLEARTID)
6511                 ts->child_tidptr = child_tidptr;
6512         } else {
6513             cpu_clone_regs_parent(env, flags);
6514             fork_end(0);
6515         }
6516     }
6517     return ret;
6518 }
6519 
6520 /* warning : doesn't handle linux specific flags... */
target_to_host_fcntl_cmd(int cmd)6521 static int target_to_host_fcntl_cmd(int cmd)
6522 {
6523     int ret;
6524 
6525     switch(cmd) {
6526     case TARGET_F_DUPFD:
6527     case TARGET_F_GETFD:
6528     case TARGET_F_SETFD:
6529     case TARGET_F_GETFL:
6530     case TARGET_F_SETFL:
6531     case TARGET_F_OFD_GETLK:
6532     case TARGET_F_OFD_SETLK:
6533     case TARGET_F_OFD_SETLKW:
6534         ret = cmd;
6535         break;
6536     case TARGET_F_GETLK:
6537         ret = F_GETLK64;
6538         break;
6539     case TARGET_F_SETLK:
6540         ret = F_SETLK64;
6541         break;
6542     case TARGET_F_SETLKW:
6543         ret = F_SETLKW64;
6544         break;
6545     case TARGET_F_GETOWN:
6546         ret = F_GETOWN;
6547         break;
6548     case TARGET_F_SETOWN:
6549         ret = F_SETOWN;
6550         break;
6551     case TARGET_F_GETSIG:
6552         ret = F_GETSIG;
6553         break;
6554     case TARGET_F_SETSIG:
6555         ret = F_SETSIG;
6556         break;
6557 #if TARGET_ABI_BITS == 32
6558     case TARGET_F_GETLK64:
6559         ret = F_GETLK64;
6560         break;
6561     case TARGET_F_SETLK64:
6562         ret = F_SETLK64;
6563         break;
6564     case TARGET_F_SETLKW64:
6565         ret = F_SETLKW64;
6566         break;
6567 #endif
6568     case TARGET_F_SETLEASE:
6569         ret = F_SETLEASE;
6570         break;
6571     case TARGET_F_GETLEASE:
6572         ret = F_GETLEASE;
6573         break;
6574 #ifdef F_DUPFD_CLOEXEC
6575     case TARGET_F_DUPFD_CLOEXEC:
6576         ret = F_DUPFD_CLOEXEC;
6577         break;
6578 #endif
6579     case TARGET_F_NOTIFY:
6580         ret = F_NOTIFY;
6581         break;
6582 #ifdef F_GETOWN_EX
6583     case TARGET_F_GETOWN_EX:
6584         ret = F_GETOWN_EX;
6585         break;
6586 #endif
6587 #ifdef F_SETOWN_EX
6588     case TARGET_F_SETOWN_EX:
6589         ret = F_SETOWN_EX;
6590         break;
6591 #endif
6592 #ifdef F_SETPIPE_SZ
6593     case TARGET_F_SETPIPE_SZ:
6594         ret = F_SETPIPE_SZ;
6595         break;
6596     case TARGET_F_GETPIPE_SZ:
6597         ret = F_GETPIPE_SZ;
6598         break;
6599 #endif
6600     default:
6601         ret = -TARGET_EINVAL;
6602         break;
6603     }
6604 
6605 #if defined(__powerpc64__)
6606     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6607      * is not supported by kernel. The glibc fcntl call actually adjusts
6608      * them to 5, 6 and 7 before making the syscall(). Since we make the
6609      * syscall directly, adjust to what is supported by the kernel.
6610      */
6611     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6612         ret -= F_GETLK64 - 5;
6613     }
6614 #endif
6615 
6616     return ret;
6617 }
6618 
6619 #define FLOCK_TRANSTBL \
6620     switch (type) { \
6621     TRANSTBL_CONVERT(F_RDLCK); \
6622     TRANSTBL_CONVERT(F_WRLCK); \
6623     TRANSTBL_CONVERT(F_UNLCK); \
6624     TRANSTBL_CONVERT(F_EXLCK); \
6625     TRANSTBL_CONVERT(F_SHLCK); \
6626     }
6627 
target_to_host_flock(int type)6628 static int target_to_host_flock(int type)
6629 {
6630 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6631     FLOCK_TRANSTBL
6632 #undef  TRANSTBL_CONVERT
6633     return -TARGET_EINVAL;
6634 }
6635 
host_to_target_flock(int type)6636 static int host_to_target_flock(int type)
6637 {
6638 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6639     FLOCK_TRANSTBL
6640 #undef  TRANSTBL_CONVERT
6641     /* if we don't know how to convert the value coming
6642      * from the host we copy to the target field as-is
6643      */
6644     return type;
6645 }
6646 
copy_from_user_flock(struct flock64 * fl,abi_ulong target_flock_addr)6647 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6648                                             abi_ulong target_flock_addr)
6649 {
6650     struct target_flock *target_fl;
6651     int l_type;
6652 
6653     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6654         return -TARGET_EFAULT;
6655     }
6656 
6657     __get_user(l_type, &target_fl->l_type);
6658     l_type = target_to_host_flock(l_type);
6659     if (l_type < 0) {
6660         return l_type;
6661     }
6662     fl->l_type = l_type;
6663     __get_user(fl->l_whence, &target_fl->l_whence);
6664     __get_user(fl->l_start, &target_fl->l_start);
6665     __get_user(fl->l_len, &target_fl->l_len);
6666     __get_user(fl->l_pid, &target_fl->l_pid);
6667     unlock_user_struct(target_fl, target_flock_addr, 0);
6668     return 0;
6669 }
6670 
copy_to_user_flock(abi_ulong target_flock_addr,const struct flock64 * fl)6671 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6672                                           const struct flock64 *fl)
6673 {
6674     struct target_flock *target_fl;
6675     short l_type;
6676 
6677     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6678         return -TARGET_EFAULT;
6679     }
6680 
6681     l_type = host_to_target_flock(fl->l_type);
6682     __put_user(l_type, &target_fl->l_type);
6683     __put_user(fl->l_whence, &target_fl->l_whence);
6684     __put_user(fl->l_start, &target_fl->l_start);
6685     __put_user(fl->l_len, &target_fl->l_len);
6686     __put_user(fl->l_pid, &target_fl->l_pid);
6687     unlock_user_struct(target_fl, target_flock_addr, 1);
6688     return 0;
6689 }
6690 
6691 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6692 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6693 
6694 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
copy_from_user_oabi_flock64(struct flock64 * fl,abi_ulong target_flock_addr)6695 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6696                                                    abi_ulong target_flock_addr)
6697 {
6698     struct target_oabi_flock64 *target_fl;
6699     int l_type;
6700 
6701     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6702         return -TARGET_EFAULT;
6703     }
6704 
6705     __get_user(l_type, &target_fl->l_type);
6706     l_type = target_to_host_flock(l_type);
6707     if (l_type < 0) {
6708         return l_type;
6709     }
6710     fl->l_type = l_type;
6711     __get_user(fl->l_whence, &target_fl->l_whence);
6712     __get_user(fl->l_start, &target_fl->l_start);
6713     __get_user(fl->l_len, &target_fl->l_len);
6714     __get_user(fl->l_pid, &target_fl->l_pid);
6715     unlock_user_struct(target_fl, target_flock_addr, 0);
6716     return 0;
6717 }
6718 
copy_to_user_oabi_flock64(abi_ulong target_flock_addr,const struct flock64 * fl)6719 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6720                                                  const struct flock64 *fl)
6721 {
6722     struct target_oabi_flock64 *target_fl;
6723     short l_type;
6724 
6725     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6726         return -TARGET_EFAULT;
6727     }
6728 
6729     l_type = host_to_target_flock(fl->l_type);
6730     __put_user(l_type, &target_fl->l_type);
6731     __put_user(fl->l_whence, &target_fl->l_whence);
6732     __put_user(fl->l_start, &target_fl->l_start);
6733     __put_user(fl->l_len, &target_fl->l_len);
6734     __put_user(fl->l_pid, &target_fl->l_pid);
6735     unlock_user_struct(target_fl, target_flock_addr, 1);
6736     return 0;
6737 }
6738 #endif
6739 
copy_from_user_flock64(struct flock64 * fl,abi_ulong target_flock_addr)6740 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6741                                               abi_ulong target_flock_addr)
6742 {
6743     struct target_flock64 *target_fl;
6744     int l_type;
6745 
6746     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6747         return -TARGET_EFAULT;
6748     }
6749 
6750     __get_user(l_type, &target_fl->l_type);
6751     l_type = target_to_host_flock(l_type);
6752     if (l_type < 0) {
6753         return l_type;
6754     }
6755     fl->l_type = l_type;
6756     __get_user(fl->l_whence, &target_fl->l_whence);
6757     __get_user(fl->l_start, &target_fl->l_start);
6758     __get_user(fl->l_len, &target_fl->l_len);
6759     __get_user(fl->l_pid, &target_fl->l_pid);
6760     unlock_user_struct(target_fl, target_flock_addr, 0);
6761     return 0;
6762 }
6763 
copy_to_user_flock64(abi_ulong target_flock_addr,const struct flock64 * fl)6764 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6765                                             const struct flock64 *fl)
6766 {
6767     struct target_flock64 *target_fl;
6768     short l_type;
6769 
6770     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6771         return -TARGET_EFAULT;
6772     }
6773 
6774     l_type = host_to_target_flock(fl->l_type);
6775     __put_user(l_type, &target_fl->l_type);
6776     __put_user(fl->l_whence, &target_fl->l_whence);
6777     __put_user(fl->l_start, &target_fl->l_start);
6778     __put_user(fl->l_len, &target_fl->l_len);
6779     __put_user(fl->l_pid, &target_fl->l_pid);
6780     unlock_user_struct(target_fl, target_flock_addr, 1);
6781     return 0;
6782 }
6783 
do_fcntl(int fd,int cmd,abi_ulong arg)6784 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6785 {
6786     struct flock64 fl64;
6787 #ifdef F_GETOWN_EX
6788     struct f_owner_ex fox;
6789     struct target_f_owner_ex *target_fox;
6790 #endif
6791     abi_long ret;
6792     int host_cmd = target_to_host_fcntl_cmd(cmd);
6793 
6794     if (host_cmd == -TARGET_EINVAL)
6795 	    return host_cmd;
6796 
6797     switch(cmd) {
6798     case TARGET_F_GETLK:
6799         ret = copy_from_user_flock(&fl64, arg);
6800         if (ret) {
6801             return ret;
6802         }
6803         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6804         if (ret == 0) {
6805             ret = copy_to_user_flock(arg, &fl64);
6806         }
6807         break;
6808 
6809     case TARGET_F_SETLK:
6810     case TARGET_F_SETLKW:
6811         ret = copy_from_user_flock(&fl64, arg);
6812         if (ret) {
6813             return ret;
6814         }
6815         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6816         break;
6817 
6818     case TARGET_F_GETLK64:
6819     case TARGET_F_OFD_GETLK:
6820         ret = copy_from_user_flock64(&fl64, arg);
6821         if (ret) {
6822             return ret;
6823         }
6824         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6825         if (ret == 0) {
6826             ret = copy_to_user_flock64(arg, &fl64);
6827         }
6828         break;
6829     case TARGET_F_SETLK64:
6830     case TARGET_F_SETLKW64:
6831     case TARGET_F_OFD_SETLK:
6832     case TARGET_F_OFD_SETLKW:
6833         ret = copy_from_user_flock64(&fl64, arg);
6834         if (ret) {
6835             return ret;
6836         }
6837         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6838         break;
6839 
6840     case TARGET_F_GETFL:
6841         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6842         if (ret >= 0) {
6843             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6844         }
6845         break;
6846 
6847     case TARGET_F_SETFL:
6848         ret = get_errno(safe_fcntl(fd, host_cmd,
6849                                    target_to_host_bitmask(arg,
6850                                                           fcntl_flags_tbl)));
6851         break;
6852 
6853 #ifdef F_GETOWN_EX
6854     case TARGET_F_GETOWN_EX:
6855         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6856         if (ret >= 0) {
6857             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6858                 return -TARGET_EFAULT;
6859             target_fox->type = tswap32(fox.type);
6860             target_fox->pid = tswap32(fox.pid);
6861             unlock_user_struct(target_fox, arg, 1);
6862         }
6863         break;
6864 #endif
6865 
6866 #ifdef F_SETOWN_EX
6867     case TARGET_F_SETOWN_EX:
6868         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6869             return -TARGET_EFAULT;
6870         fox.type = tswap32(target_fox->type);
6871         fox.pid = tswap32(target_fox->pid);
6872         unlock_user_struct(target_fox, arg, 0);
6873         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6874         break;
6875 #endif
6876 
6877     case TARGET_F_SETSIG:
6878         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6879         break;
6880 
6881     case TARGET_F_GETSIG:
6882         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6883         break;
6884 
6885     case TARGET_F_SETOWN:
6886     case TARGET_F_GETOWN:
6887     case TARGET_F_SETLEASE:
6888     case TARGET_F_GETLEASE:
6889     case TARGET_F_SETPIPE_SZ:
6890     case TARGET_F_GETPIPE_SZ:
6891         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6892         break;
6893 
6894     default:
6895         ret = get_errno(safe_fcntl(fd, cmd, arg));
6896         break;
6897     }
6898     return ret;
6899 }
6900 
6901 #ifdef USE_UID16
6902 
high2lowuid(int uid)6903 static inline int high2lowuid(int uid)
6904 {
6905     if (uid > 65535)
6906         return 65534;
6907     else
6908         return uid;
6909 }
6910 
high2lowgid(int gid)6911 static inline int high2lowgid(int gid)
6912 {
6913     if (gid > 65535)
6914         return 65534;
6915     else
6916         return gid;
6917 }
6918 
low2highuid(int uid)6919 static inline int low2highuid(int uid)
6920 {
6921     if ((int16_t)uid == -1)
6922         return -1;
6923     else
6924         return uid;
6925 }
6926 
low2highgid(int gid)6927 static inline int low2highgid(int gid)
6928 {
6929     if ((int16_t)gid == -1)
6930         return -1;
6931     else
6932         return gid;
6933 }
tswapid(int id)6934 static inline int tswapid(int id)
6935 {
6936     return tswap16(id);
6937 }
6938 
6939 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6940 
6941 #else /* !USE_UID16 */
high2lowuid(int uid)6942 static inline int high2lowuid(int uid)
6943 {
6944     return uid;
6945 }
high2lowgid(int gid)6946 static inline int high2lowgid(int gid)
6947 {
6948     return gid;
6949 }
low2highuid(int uid)6950 static inline int low2highuid(int uid)
6951 {
6952     return uid;
6953 }
low2highgid(int gid)6954 static inline int low2highgid(int gid)
6955 {
6956     return gid;
6957 }
tswapid(int id)6958 static inline int tswapid(int id)
6959 {
6960     return tswap32(id);
6961 }
6962 
6963 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6964 
6965 #endif /* USE_UID16 */
6966 
6967 /* We must do direct syscalls for setting UID/GID, because we want to
6968  * implement the Linux system call semantics of "change only for this thread",
6969  * not the libc/POSIX semantics of "change for all threads in process".
6970  * (See http://ewontfix.com/17/ for more details.)
6971  * We use the 32-bit version of the syscalls if present; if it is not
6972  * then either the host architecture supports 32-bit UIDs natively with
6973  * the standard syscall, or the 16-bit UID is the best we can do.
6974  */
6975 #ifdef __NR_setuid32
6976 #define __NR_sys_setuid __NR_setuid32
6977 #else
6978 #define __NR_sys_setuid __NR_setuid
6979 #endif
6980 #ifdef __NR_setgid32
6981 #define __NR_sys_setgid __NR_setgid32
6982 #else
6983 #define __NR_sys_setgid __NR_setgid
6984 #endif
6985 #ifdef __NR_setresuid32
6986 #define __NR_sys_setresuid __NR_setresuid32
6987 #else
6988 #define __NR_sys_setresuid __NR_setresuid
6989 #endif
6990 #ifdef __NR_setresgid32
6991 #define __NR_sys_setresgid __NR_setresgid32
6992 #else
6993 #define __NR_sys_setresgid __NR_setresgid
6994 #endif
6995 
_syscall1(int,sys_setuid,uid_t,uid)6996 _syscall1(int, sys_setuid, uid_t, uid)
6997 _syscall1(int, sys_setgid, gid_t, gid)
6998 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6999 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7000 
7001 void syscall_init(void)
7002 {
7003     IOCTLEntry *ie;
7004     const argtype *arg_type;
7005     int size;
7006     int i;
7007 
7008     thunk_init(STRUCT_MAX);
7009 
7010 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7011 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7012 #include "syscall_types.h"
7013 #undef STRUCT
7014 #undef STRUCT_SPECIAL
7015 
7016     /* Build target_to_host_errno_table[] table from
7017      * host_to_target_errno_table[]. */
7018     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7019         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7020     }
7021 
7022     /* we patch the ioctl size if necessary. We rely on the fact that
7023        no ioctl has all the bits at '1' in the size field */
7024     ie = ioctl_entries;
7025     while (ie->target_cmd != 0) {
7026         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7027             TARGET_IOC_SIZEMASK) {
7028             arg_type = ie->arg_type;
7029             if (arg_type[0] != TYPE_PTR) {
7030                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7031                         ie->target_cmd);
7032                 exit(1);
7033             }
7034             arg_type++;
7035             size = thunk_type_size(arg_type, 0);
7036             ie->target_cmd = (ie->target_cmd &
7037                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7038                 (size << TARGET_IOC_SIZESHIFT);
7039         }
7040 
7041         /* automatic consistency check if same arch */
7042 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7043     (defined(__x86_64__) && defined(TARGET_X86_64))
7044         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7045             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7046                     ie->name, ie->target_cmd, ie->host_cmd);
7047         }
7048 #endif
7049         ie++;
7050     }
7051 }
7052 
7053 #ifdef TARGET_NR_truncate64
target_truncate64(void * cpu_env,const char * arg1,abi_long arg2,abi_long arg3,abi_long arg4)7054 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7055                                          abi_long arg2,
7056                                          abi_long arg3,
7057                                          abi_long arg4)
7058 {
7059     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7060         arg2 = arg3;
7061         arg3 = arg4;
7062     }
7063     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7064 }
7065 #endif
7066 
7067 #ifdef TARGET_NR_ftruncate64
target_ftruncate64(void * cpu_env,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4)7068 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7069                                           abi_long arg2,
7070                                           abi_long arg3,
7071                                           abi_long arg4)
7072 {
7073     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7074         arg2 = arg3;
7075         arg3 = arg4;
7076     }
7077     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7078 }
7079 #endif
7080 
7081 #if defined(TARGET_NR_timer_settime) || \
7082     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec(struct itimerspec * host_its,abi_ulong target_addr)7083 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7084                                                  abi_ulong target_addr)
7085 {
7086     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7087                                 offsetof(struct target_itimerspec,
7088                                          it_interval)) ||
7089         target_to_host_timespec(&host_its->it_value, target_addr +
7090                                 offsetof(struct target_itimerspec,
7091                                          it_value))) {
7092         return -TARGET_EFAULT;
7093     }
7094 
7095     return 0;
7096 }
7097 #endif
7098 
7099 #if defined(TARGET_NR_timer_settime64) || \
7100     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
target_to_host_itimerspec64(struct itimerspec * host_its,abi_ulong target_addr)7101 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7102                                                    abi_ulong target_addr)
7103 {
7104     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7105                                   offsetof(struct target__kernel_itimerspec,
7106                                            it_interval)) ||
7107         target_to_host_timespec64(&host_its->it_value, target_addr +
7108                                   offsetof(struct target__kernel_itimerspec,
7109                                            it_value))) {
7110         return -TARGET_EFAULT;
7111     }
7112 
7113     return 0;
7114 }
7115 #endif
7116 
7117 #if ((defined(TARGET_NR_timerfd_gettime) || \
7118       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7119       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
host_to_target_itimerspec(abi_ulong target_addr,struct itimerspec * host_its)7120 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7121                                                  struct itimerspec *host_its)
7122 {
7123     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7124                                                        it_interval),
7125                                 &host_its->it_interval) ||
7126         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7127                                                        it_value),
7128                                 &host_its->it_value)) {
7129         return -TARGET_EFAULT;
7130     }
7131     return 0;
7132 }
7133 #endif
7134 
7135 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7136       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7137       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
host_to_target_itimerspec64(abi_ulong target_addr,struct itimerspec * host_its)7138 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7139                                                    struct itimerspec *host_its)
7140 {
7141     if (host_to_target_timespec64(target_addr +
7142                                   offsetof(struct target__kernel_itimerspec,
7143                                            it_interval),
7144                                   &host_its->it_interval) ||
7145         host_to_target_timespec64(target_addr +
7146                                   offsetof(struct target__kernel_itimerspec,
7147                                            it_value),
7148                                   &host_its->it_value)) {
7149         return -TARGET_EFAULT;
7150     }
7151     return 0;
7152 }
7153 #endif
7154 
7155 #if defined(TARGET_NR_adjtimex) || \
7156     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
target_to_host_timex(struct timex * host_tx,abi_long target_addr)7157 static inline abi_long target_to_host_timex(struct timex *host_tx,
7158                                             abi_long target_addr)
7159 {
7160     struct target_timex *target_tx;
7161 
7162     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7163         return -TARGET_EFAULT;
7164     }
7165 
7166     __get_user(host_tx->modes, &target_tx->modes);
7167     __get_user(host_tx->offset, &target_tx->offset);
7168     __get_user(host_tx->freq, &target_tx->freq);
7169     __get_user(host_tx->maxerror, &target_tx->maxerror);
7170     __get_user(host_tx->esterror, &target_tx->esterror);
7171     __get_user(host_tx->status, &target_tx->status);
7172     __get_user(host_tx->constant, &target_tx->constant);
7173     __get_user(host_tx->precision, &target_tx->precision);
7174     __get_user(host_tx->tolerance, &target_tx->tolerance);
7175     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7176     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7177     __get_user(host_tx->tick, &target_tx->tick);
7178     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7179     __get_user(host_tx->jitter, &target_tx->jitter);
7180     __get_user(host_tx->shift, &target_tx->shift);
7181     __get_user(host_tx->stabil, &target_tx->stabil);
7182     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7183     __get_user(host_tx->calcnt, &target_tx->calcnt);
7184     __get_user(host_tx->errcnt, &target_tx->errcnt);
7185     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7186     __get_user(host_tx->tai, &target_tx->tai);
7187 
7188     unlock_user_struct(target_tx, target_addr, 0);
7189     return 0;
7190 }
7191 
host_to_target_timex(abi_long target_addr,struct timex * host_tx)7192 static inline abi_long host_to_target_timex(abi_long target_addr,
7193                                             struct timex *host_tx)
7194 {
7195     struct target_timex *target_tx;
7196 
7197     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7198         return -TARGET_EFAULT;
7199     }
7200 
7201     __put_user(host_tx->modes, &target_tx->modes);
7202     __put_user(host_tx->offset, &target_tx->offset);
7203     __put_user(host_tx->freq, &target_tx->freq);
7204     __put_user(host_tx->maxerror, &target_tx->maxerror);
7205     __put_user(host_tx->esterror, &target_tx->esterror);
7206     __put_user(host_tx->status, &target_tx->status);
7207     __put_user(host_tx->constant, &target_tx->constant);
7208     __put_user(host_tx->precision, &target_tx->precision);
7209     __put_user(host_tx->tolerance, &target_tx->tolerance);
7210     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7211     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7212     __put_user(host_tx->tick, &target_tx->tick);
7213     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7214     __put_user(host_tx->jitter, &target_tx->jitter);
7215     __put_user(host_tx->shift, &target_tx->shift);
7216     __put_user(host_tx->stabil, &target_tx->stabil);
7217     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7218     __put_user(host_tx->calcnt, &target_tx->calcnt);
7219     __put_user(host_tx->errcnt, &target_tx->errcnt);
7220     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7221     __put_user(host_tx->tai, &target_tx->tai);
7222 
7223     unlock_user_struct(target_tx, target_addr, 1);
7224     return 0;
7225 }
7226 #endif
7227 
7228 
7229 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
target_to_host_timex64(struct timex * host_tx,abi_long target_addr)7230 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7231                                               abi_long target_addr)
7232 {
7233     struct target__kernel_timex *target_tx;
7234 
7235     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7236                                  offsetof(struct target__kernel_timex,
7237                                           time))) {
7238         return -TARGET_EFAULT;
7239     }
7240 
7241     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7242         return -TARGET_EFAULT;
7243     }
7244 
7245     __get_user(host_tx->modes, &target_tx->modes);
7246     __get_user(host_tx->offset, &target_tx->offset);
7247     __get_user(host_tx->freq, &target_tx->freq);
7248     __get_user(host_tx->maxerror, &target_tx->maxerror);
7249     __get_user(host_tx->esterror, &target_tx->esterror);
7250     __get_user(host_tx->status, &target_tx->status);
7251     __get_user(host_tx->constant, &target_tx->constant);
7252     __get_user(host_tx->precision, &target_tx->precision);
7253     __get_user(host_tx->tolerance, &target_tx->tolerance);
7254     __get_user(host_tx->tick, &target_tx->tick);
7255     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7256     __get_user(host_tx->jitter, &target_tx->jitter);
7257     __get_user(host_tx->shift, &target_tx->shift);
7258     __get_user(host_tx->stabil, &target_tx->stabil);
7259     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7260     __get_user(host_tx->calcnt, &target_tx->calcnt);
7261     __get_user(host_tx->errcnt, &target_tx->errcnt);
7262     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7263     __get_user(host_tx->tai, &target_tx->tai);
7264 
7265     unlock_user_struct(target_tx, target_addr, 0);
7266     return 0;
7267 }
7268 
host_to_target_timex64(abi_long target_addr,struct timex * host_tx)7269 static inline abi_long host_to_target_timex64(abi_long target_addr,
7270                                               struct timex *host_tx)
7271 {
7272     struct target__kernel_timex *target_tx;
7273 
7274    if (copy_to_user_timeval64(target_addr +
7275                               offsetof(struct target__kernel_timex, time),
7276                               &host_tx->time)) {
7277         return -TARGET_EFAULT;
7278     }
7279 
7280     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7281         return -TARGET_EFAULT;
7282     }
7283 
7284     __put_user(host_tx->modes, &target_tx->modes);
7285     __put_user(host_tx->offset, &target_tx->offset);
7286     __put_user(host_tx->freq, &target_tx->freq);
7287     __put_user(host_tx->maxerror, &target_tx->maxerror);
7288     __put_user(host_tx->esterror, &target_tx->esterror);
7289     __put_user(host_tx->status, &target_tx->status);
7290     __put_user(host_tx->constant, &target_tx->constant);
7291     __put_user(host_tx->precision, &target_tx->precision);
7292     __put_user(host_tx->tolerance, &target_tx->tolerance);
7293     __put_user(host_tx->tick, &target_tx->tick);
7294     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7295     __put_user(host_tx->jitter, &target_tx->jitter);
7296     __put_user(host_tx->shift, &target_tx->shift);
7297     __put_user(host_tx->stabil, &target_tx->stabil);
7298     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7299     __put_user(host_tx->calcnt, &target_tx->calcnt);
7300     __put_user(host_tx->errcnt, &target_tx->errcnt);
7301     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7302     __put_user(host_tx->tai, &target_tx->tai);
7303 
7304     unlock_user_struct(target_tx, target_addr, 1);
7305     return 0;
7306 }
7307 #endif
7308 
target_to_host_sigevent(struct sigevent * host_sevp,abi_ulong target_addr)7309 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7310                                                abi_ulong target_addr)
7311 {
7312     struct target_sigevent *target_sevp;
7313 
7314     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7315         return -TARGET_EFAULT;
7316     }
7317 
7318     /* This union is awkward on 64 bit systems because it has a 32 bit
7319      * integer and a pointer in it; we follow the conversion approach
7320      * used for handling sigval types in signal.c so the guest should get
7321      * the correct value back even if we did a 64 bit byteswap and it's
7322      * using the 32 bit integer.
7323      */
7324     host_sevp->sigev_value.sival_ptr =
7325         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7326     host_sevp->sigev_signo =
7327         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7328     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7329     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7330 
7331     unlock_user_struct(target_sevp, target_addr, 1);
7332     return 0;
7333 }
7334 
7335 #if defined(TARGET_NR_mlockall)
target_to_host_mlockall_arg(int arg)7336 static inline int target_to_host_mlockall_arg(int arg)
7337 {
7338     int result = 0;
7339 
7340     if (arg & TARGET_MCL_CURRENT) {
7341         result |= MCL_CURRENT;
7342     }
7343     if (arg & TARGET_MCL_FUTURE) {
7344         result |= MCL_FUTURE;
7345     }
7346 #ifdef MCL_ONFAULT
7347     if (arg & TARGET_MCL_ONFAULT) {
7348         result |= MCL_ONFAULT;
7349     }
7350 #endif
7351 
7352     return result;
7353 }
7354 #endif
7355 
7356 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7357      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7358      defined(TARGET_NR_newfstatat))
host_to_target_stat64(void * cpu_env,abi_ulong target_addr,struct stat * host_st)7359 static inline abi_long host_to_target_stat64(void *cpu_env,
7360                                              abi_ulong target_addr,
7361                                              struct stat *host_st)
7362 {
7363 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7364     if (((CPUARMState *)cpu_env)->eabi) {
7365         struct target_eabi_stat64 *target_st;
7366 
7367         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7368             return -TARGET_EFAULT;
7369         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7370         __put_user(host_st->st_dev, &target_st->st_dev);
7371         __put_user(host_st->st_ino, &target_st->st_ino);
7372 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7373         __put_user(host_st->st_ino, &target_st->__st_ino);
7374 #endif
7375         __put_user(host_st->st_mode, &target_st->st_mode);
7376         __put_user(host_st->st_nlink, &target_st->st_nlink);
7377         __put_user(host_st->st_uid, &target_st->st_uid);
7378         __put_user(host_st->st_gid, &target_st->st_gid);
7379         __put_user(host_st->st_rdev, &target_st->st_rdev);
7380         __put_user(host_st->st_size, &target_st->st_size);
7381         __put_user(host_st->st_blksize, &target_st->st_blksize);
7382         __put_user(host_st->st_blocks, &target_st->st_blocks);
7383         __put_user(host_st->st_atime, &target_st->target_st_atime);
7384         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7385         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7386 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7387         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7388         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7389         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7390 #endif
7391         unlock_user_struct(target_st, target_addr, 1);
7392     } else
7393 #endif
7394     {
7395 #if defined(TARGET_HAS_STRUCT_STAT64)
7396         struct target_stat64 *target_st;
7397 #else
7398         struct target_stat *target_st;
7399 #endif
7400 
7401         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7402             return -TARGET_EFAULT;
7403         memset(target_st, 0, sizeof(*target_st));
7404         __put_user(host_st->st_dev, &target_st->st_dev);
7405         __put_user(host_st->st_ino, &target_st->st_ino);
7406 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7407         __put_user(host_st->st_ino, &target_st->__st_ino);
7408 #endif
7409         __put_user(host_st->st_mode, &target_st->st_mode);
7410         __put_user(host_st->st_nlink, &target_st->st_nlink);
7411         __put_user(host_st->st_uid, &target_st->st_uid);
7412         __put_user(host_st->st_gid, &target_st->st_gid);
7413         __put_user(host_st->st_rdev, &target_st->st_rdev);
7414         /* XXX: better use of kernel struct */
7415         __put_user(host_st->st_size, &target_st->st_size);
7416         __put_user(host_st->st_blksize, &target_st->st_blksize);
7417         __put_user(host_st->st_blocks, &target_st->st_blocks);
7418         __put_user(host_st->st_atime, &target_st->target_st_atime);
7419         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7420         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7421 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7422         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7423         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7424         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7425 #endif
7426         unlock_user_struct(target_st, target_addr, 1);
7427     }
7428 
7429     return 0;
7430 }
7431 #endif
7432 
7433 #if defined(TARGET_NR_statx) && defined(__NR_statx)
host_to_target_statx(struct target_statx * host_stx,abi_ulong target_addr)7434 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7435                                             abi_ulong target_addr)
7436 {
7437     struct target_statx *target_stx;
7438 
7439     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7440         return -TARGET_EFAULT;
7441     }
7442     memset(target_stx, 0, sizeof(*target_stx));
7443 
7444     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7445     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7446     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7447     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7448     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7449     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7450     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7451     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7452     __put_user(host_stx->stx_size, &target_stx->stx_size);
7453     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7454     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7455     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7456     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7457     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7458     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7459     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7460     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7461     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7462     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7463     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7464     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7465     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7466     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7467 
7468     unlock_user_struct(target_stx, target_addr, 1);
7469 
7470     return 0;
7471 }
7472 #endif
7473 
do_sys_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7474 static int do_sys_futex(int *uaddr, int op, int val,
7475                          const struct timespec *timeout, int *uaddr2,
7476                          int val3)
7477 {
7478 #if HOST_LONG_BITS == 64
7479 #if defined(__NR_futex)
7480     /* always a 64-bit time_t, it doesn't define _time64 version  */
7481     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7482 
7483 #endif
7484 #else /* HOST_LONG_BITS == 64 */
7485 #if defined(__NR_futex_time64)
7486     if (sizeof(timeout->tv_sec) == 8) {
7487         /* _time64 function on 32bit arch */
7488         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7489     }
7490 #endif
7491 #if defined(__NR_futex)
7492     /* old function on 32bit arch */
7493     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7494 #endif
7495 #endif /* HOST_LONG_BITS == 64 */
7496     g_assert_not_reached();
7497 }
7498 
do_safe_futex(int * uaddr,int op,int val,const struct timespec * timeout,int * uaddr2,int val3)7499 static int do_safe_futex(int *uaddr, int op, int val,
7500                          const struct timespec *timeout, int *uaddr2,
7501                          int val3)
7502 {
7503 #if HOST_LONG_BITS == 64
7504 #if defined(__NR_futex)
7505     /* always a 64-bit time_t, it doesn't define _time64 version  */
7506     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7507 #endif
7508 #else /* HOST_LONG_BITS == 64 */
7509 #if defined(__NR_futex_time64)
7510     if (sizeof(timeout->tv_sec) == 8) {
7511         /* _time64 function on 32bit arch */
7512         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7513                                            val3));
7514     }
7515 #endif
7516 #if defined(__NR_futex)
7517     /* old function on 32bit arch */
7518     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7519 #endif
7520 #endif /* HOST_LONG_BITS == 64 */
7521     return -TARGET_ENOSYS;
7522 }
7523 
7524 /* ??? Using host futex calls even when target atomic operations
7525    are not really atomic probably breaks things.  However implementing
7526    futexes locally would make futexes shared between multiple processes
7527    tricky.  However they're probably useless because guest atomic
7528    operations won't work either.  */
7529 #if defined(TARGET_NR_futex)
do_futex(target_ulong uaddr,int op,int val,target_ulong timeout,target_ulong uaddr2,int val3)7530 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7531                     target_ulong uaddr2, int val3)
7532 {
7533     struct timespec ts, *pts;
7534     int base_op;
7535 
7536     /* ??? We assume FUTEX_* constants are the same on both host
7537        and target.  */
7538 #ifdef FUTEX_CMD_MASK
7539     base_op = op & FUTEX_CMD_MASK;
7540 #else
7541     base_op = op;
7542 #endif
7543     switch (base_op) {
7544     case FUTEX_WAIT:
7545     case FUTEX_WAIT_BITSET:
7546         if (timeout) {
7547             pts = &ts;
7548             target_to_host_timespec(pts, timeout);
7549         } else {
7550             pts = NULL;
7551         }
7552         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7553     case FUTEX_WAKE:
7554         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7555     case FUTEX_FD:
7556         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7557     case FUTEX_REQUEUE:
7558     case FUTEX_CMP_REQUEUE:
7559     case FUTEX_WAKE_OP:
7560         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7561            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7562            But the prototype takes a `struct timespec *'; insert casts
7563            to satisfy the compiler.  We do not need to tswap TIMEOUT
7564            since it's not compared to guest memory.  */
7565         pts = (struct timespec *)(uintptr_t) timeout;
7566         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7567                              (base_op == FUTEX_CMP_REQUEUE
7568                                       ? tswap32(val3)
7569                                       : val3));
7570     default:
7571         return -TARGET_ENOSYS;
7572     }
7573 }
7574 #endif
7575 
7576 #if defined(TARGET_NR_futex_time64)
do_futex_time64(target_ulong uaddr,int op,int val,target_ulong timeout,target_ulong uaddr2,int val3)7577 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7578                            target_ulong uaddr2, int val3)
7579 {
7580     struct timespec ts, *pts;
7581     int base_op;
7582 
7583     /* ??? We assume FUTEX_* constants are the same on both host
7584        and target.  */
7585 #ifdef FUTEX_CMD_MASK
7586     base_op = op & FUTEX_CMD_MASK;
7587 #else
7588     base_op = op;
7589 #endif
7590     switch (base_op) {
7591     case FUTEX_WAIT:
7592     case FUTEX_WAIT_BITSET:
7593         if (timeout) {
7594             pts = &ts;
7595             if (target_to_host_timespec64(pts, timeout)) {
7596                 return -TARGET_EFAULT;
7597             }
7598         } else {
7599             pts = NULL;
7600         }
7601         return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7602     case FUTEX_WAKE:
7603         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7604     case FUTEX_FD:
7605         return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7606     case FUTEX_REQUEUE:
7607     case FUTEX_CMP_REQUEUE:
7608     case FUTEX_WAKE_OP:
7609         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7610            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7611            But the prototype takes a `struct timespec *'; insert casts
7612            to satisfy the compiler.  We do not need to tswap TIMEOUT
7613            since it's not compared to guest memory.  */
7614         pts = (struct timespec *)(uintptr_t) timeout;
7615         return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7616                              (base_op == FUTEX_CMP_REQUEUE
7617                                       ? tswap32(val3)
7618                                       : val3));
7619     default:
7620         return -TARGET_ENOSYS;
7621     }
7622 }
7623 #endif
7624 
7625 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_name_to_handle_at(abi_long dirfd,abi_long pathname,abi_long handle,abi_long mount_id,abi_long flags)7626 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7627                                      abi_long handle, abi_long mount_id,
7628                                      abi_long flags)
7629 {
7630     struct file_handle *target_fh;
7631     struct file_handle *fh;
7632     int mid = 0;
7633     abi_long ret;
7634     char *name;
7635     unsigned int size, total_size;
7636 
7637     if (get_user_s32(size, handle)) {
7638         return -TARGET_EFAULT;
7639     }
7640 
7641     name = lock_user_string(pathname);
7642     if (!name) {
7643         return -TARGET_EFAULT;
7644     }
7645 
7646     total_size = sizeof(struct file_handle) + size;
7647     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7648     if (!target_fh) {
7649         unlock_user(name, pathname, 0);
7650         return -TARGET_EFAULT;
7651     }
7652 
7653     fh = g_malloc0(total_size);
7654     fh->handle_bytes = size;
7655 
7656     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7657     unlock_user(name, pathname, 0);
7658 
7659     /* man name_to_handle_at(2):
7660      * Other than the use of the handle_bytes field, the caller should treat
7661      * the file_handle structure as an opaque data type
7662      */
7663 
7664     memcpy(target_fh, fh, total_size);
7665     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7666     target_fh->handle_type = tswap32(fh->handle_type);
7667     g_free(fh);
7668     unlock_user(target_fh, handle, total_size);
7669 
7670     if (put_user_s32(mid, mount_id)) {
7671         return -TARGET_EFAULT;
7672     }
7673 
7674     return ret;
7675 
7676 }
7677 #endif
7678 
7679 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
do_open_by_handle_at(abi_long mount_fd,abi_long handle,abi_long flags)7680 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7681                                      abi_long flags)
7682 {
7683     struct file_handle *target_fh;
7684     struct file_handle *fh;
7685     unsigned int size, total_size;
7686     abi_long ret;
7687 
7688     if (get_user_s32(size, handle)) {
7689         return -TARGET_EFAULT;
7690     }
7691 
7692     total_size = sizeof(struct file_handle) + size;
7693     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7694     if (!target_fh) {
7695         return -TARGET_EFAULT;
7696     }
7697 
7698     fh = g_memdup(target_fh, total_size);
7699     fh->handle_bytes = size;
7700     fh->handle_type = tswap32(target_fh->handle_type);
7701 
7702     ret = get_errno(open_by_handle_at(mount_fd, fh,
7703                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7704 
7705     g_free(fh);
7706 
7707     unlock_user(target_fh, handle, total_size);
7708 
7709     return ret;
7710 }
7711 #endif
7712 
7713 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7714 
do_signalfd4(int fd,abi_long mask,int flags)7715 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7716 {
7717     int host_flags;
7718     target_sigset_t *target_mask;
7719     sigset_t host_mask;
7720     abi_long ret;
7721 
7722     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7723         return -TARGET_EINVAL;
7724     }
7725     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7726         return -TARGET_EFAULT;
7727     }
7728 
7729     target_to_host_sigset(&host_mask, target_mask);
7730 
7731     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7732 
7733     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7734     if (ret >= 0) {
7735         fd_trans_register(ret, &target_signalfd_trans);
7736     }
7737 
7738     unlock_user_struct(target_mask, mask, 0);
7739 
7740     return ret;
7741 }
7742 #endif
7743 
7744 /* Map host to target signal numbers for the wait family of syscalls.
7745    Assume all other status bits are the same.  */
host_to_target_waitstatus(int status)7746 int host_to_target_waitstatus(int status)
7747 {
7748     if (WIFSIGNALED(status)) {
7749         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7750     }
7751     if (WIFSTOPPED(status)) {
7752         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7753                | (status & 0xff);
7754     }
7755     return status;
7756 }
7757 
open_self_cmdline(void * cpu_env,int fd)7758 static int open_self_cmdline(void *cpu_env, int fd)
7759 {
7760     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7761     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7762     int i;
7763 
7764     for (i = 0; i < bprm->argc; i++) {
7765         size_t len = strlen(bprm->argv[i]) + 1;
7766 
7767         if (write(fd, bprm->argv[i], len) != len) {
7768             return -1;
7769         }
7770     }
7771 
7772     return 0;
7773 }
7774 
open_self_maps(void * cpu_env,int fd)7775 static int open_self_maps(void *cpu_env, int fd)
7776 {
7777     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7778     TaskState *ts = cpu->opaque;
7779     GSList *map_info = read_self_maps();
7780     GSList *s;
7781     int count;
7782 
7783     for (s = map_info; s; s = g_slist_next(s)) {
7784         MapInfo *e = (MapInfo *) s->data;
7785 
7786         if (h2g_valid(e->start)) {
7787             unsigned long min = e->start;
7788             unsigned long max = e->end;
7789             int flags = page_get_flags(h2g(min));
7790             const char *path;
7791 
7792             max = h2g_valid(max - 1) ?
7793                 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7794 
7795             if (page_check_range(h2g(min), max - min, flags) == -1) {
7796                 continue;
7797             }
7798 
7799             if (h2g(min) == ts->info->stack_limit) {
7800                 path = "[stack]";
7801             } else {
7802                 path = e->path;
7803             }
7804 
7805             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7806                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7807                             h2g(min), h2g(max - 1) + 1,
7808                             e->is_read ? 'r' : '-',
7809                             e->is_write ? 'w' : '-',
7810                             e->is_exec ? 'x' : '-',
7811                             e->is_priv ? 'p' : '-',
7812                             (uint64_t) e->offset, e->dev, e->inode);
7813             if (path) {
7814                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7815             } else {
7816                 dprintf(fd, "\n");
7817             }
7818         }
7819     }
7820 
7821     free_self_maps(map_info);
7822 
7823 #ifdef TARGET_VSYSCALL_PAGE
7824     /*
7825      * We only support execution from the vsyscall page.
7826      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7827      */
7828     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7829                     " --xp 00000000 00:00 0",
7830                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7831     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7832 #endif
7833 
7834     return 0;
7835 }
7836 
open_self_stat(void * cpu_env,int fd)7837 static int open_self_stat(void *cpu_env, int fd)
7838 {
7839     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7840     TaskState *ts = cpu->opaque;
7841     g_autoptr(GString) buf = g_string_new(NULL);
7842     int i;
7843 
7844     for (i = 0; i < 44; i++) {
7845         if (i == 0) {
7846             /* pid */
7847             g_string_printf(buf, FMT_pid " ", getpid());
7848         } else if (i == 1) {
7849             /* app name */
7850             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7851             bin = bin ? bin + 1 : ts->bprm->argv[0];
7852             g_string_printf(buf, "(%.15s) ", bin);
7853         } else if (i == 27) {
7854             /* stack bottom */
7855             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7856         } else {
7857             /* for the rest, there is MasterCard */
7858             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7859         }
7860 
7861         if (write(fd, buf->str, buf->len) != buf->len) {
7862             return -1;
7863         }
7864     }
7865 
7866     return 0;
7867 }
7868 
open_self_auxv(void * cpu_env,int fd)7869 static int open_self_auxv(void *cpu_env, int fd)
7870 {
7871     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7872     TaskState *ts = cpu->opaque;
7873     abi_ulong auxv = ts->info->saved_auxv;
7874     abi_ulong len = ts->info->auxv_len;
7875     char *ptr;
7876 
7877     /*
7878      * Auxiliary vector is stored in target process stack.
7879      * read in whole auxv vector and copy it to file
7880      */
7881     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7882     if (ptr != NULL) {
7883         while (len > 0) {
7884             ssize_t r;
7885             r = write(fd, ptr, len);
7886             if (r <= 0) {
7887                 break;
7888             }
7889             len -= r;
7890             ptr += r;
7891         }
7892         lseek(fd, 0, SEEK_SET);
7893         unlock_user(ptr, auxv, len);
7894     }
7895 
7896     return 0;
7897 }
7898 
is_proc_myself(const char * filename,const char * entry)7899 static int is_proc_myself(const char *filename, const char *entry)
7900 {
7901     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7902         filename += strlen("/proc/");
7903         if (!strncmp(filename, "self/", strlen("self/"))) {
7904             filename += strlen("self/");
7905         } else if (*filename >= '1' && *filename <= '9') {
7906             char myself[80];
7907             snprintf(myself, sizeof(myself), "%d/", getpid());
7908             if (!strncmp(filename, myself, strlen(myself))) {
7909                 filename += strlen(myself);
7910             } else {
7911                 return 0;
7912             }
7913         } else {
7914             return 0;
7915         }
7916         if (!strcmp(filename, entry)) {
7917             return 1;
7918         }
7919     }
7920     return 0;
7921 }
7922 
7923 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7924     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
is_proc(const char * filename,const char * entry)7925 static int is_proc(const char *filename, const char *entry)
7926 {
7927     return strcmp(filename, entry) == 0;
7928 }
7929 #endif
7930 
7931 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
open_net_route(void * cpu_env,int fd)7932 static int open_net_route(void *cpu_env, int fd)
7933 {
7934     FILE *fp;
7935     char *line = NULL;
7936     size_t len = 0;
7937     ssize_t read;
7938 
7939     fp = fopen("/proc/net/route", "r");
7940     if (fp == NULL) {
7941         return -1;
7942     }
7943 
7944     /* read header */
7945 
7946     read = getline(&line, &len, fp);
7947     dprintf(fd, "%s", line);
7948 
7949     /* read routes */
7950 
7951     while ((read = getline(&line, &len, fp)) != -1) {
7952         char iface[16];
7953         uint32_t dest, gw, mask;
7954         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7955         int fields;
7956 
7957         fields = sscanf(line,
7958                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7959                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7960                         &mask, &mtu, &window, &irtt);
7961         if (fields != 11) {
7962             continue;
7963         }
7964         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7965                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7966                 metric, tswap32(mask), mtu, window, irtt);
7967     }
7968 
7969     free(line);
7970     fclose(fp);
7971 
7972     return 0;
7973 }
7974 #endif
7975 
7976 #if defined(TARGET_SPARC)
open_cpuinfo(void * cpu_env,int fd)7977 static int open_cpuinfo(void *cpu_env, int fd)
7978 {
7979     dprintf(fd, "type\t\t: sun4u\n");
7980     return 0;
7981 }
7982 #endif
7983 
7984 #if defined(TARGET_HPPA)
open_cpuinfo(void * cpu_env,int fd)7985 static int open_cpuinfo(void *cpu_env, int fd)
7986 {
7987     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7988     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7989     dprintf(fd, "capabilities\t: os32\n");
7990     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7991     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7992     return 0;
7993 }
7994 #endif
7995 
7996 #if defined(TARGET_M68K)
open_hardware(void * cpu_env,int fd)7997 static int open_hardware(void *cpu_env, int fd)
7998 {
7999     dprintf(fd, "Model:\t\tqemu-m68k\n");
8000     return 0;
8001 }
8002 #endif
8003 
do_openat(void * cpu_env,int dirfd,const char * pathname,int flags,mode_t mode)8004 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8005 {
8006     struct fake_open {
8007         const char *filename;
8008         int (*fill)(void *cpu_env, int fd);
8009         int (*cmp)(const char *s1, const char *s2);
8010     };
8011     const struct fake_open *fake_open;
8012     static const struct fake_open fakes[] = {
8013         { "maps", open_self_maps, is_proc_myself },
8014         { "stat", open_self_stat, is_proc_myself },
8015         { "auxv", open_self_auxv, is_proc_myself },
8016         { "cmdline", open_self_cmdline, is_proc_myself },
8017 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8018         { "/proc/net/route", open_net_route, is_proc },
8019 #endif
8020 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8021         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8022 #endif
8023 #if defined(TARGET_M68K)
8024         { "/proc/hardware", open_hardware, is_proc },
8025 #endif
8026         { NULL, NULL, NULL }
8027     };
8028 
8029     if (is_proc_myself(pathname, "exe")) {
8030         int execfd = qemu_getauxval(AT_EXECFD);
8031         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8032     }
8033 
8034     for (fake_open = fakes; fake_open->filename; fake_open++) {
8035         if (fake_open->cmp(pathname, fake_open->filename)) {
8036             break;
8037         }
8038     }
8039 
8040     if (fake_open->filename) {
8041         const char *tmpdir;
8042         char filename[PATH_MAX];
8043         int fd, r;
8044 
8045         /* create temporary file to map stat to */
8046         tmpdir = getenv("TMPDIR");
8047         if (!tmpdir)
8048             tmpdir = "/tmp";
8049         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8050         fd = mkstemp(filename);
8051         if (fd < 0) {
8052             return fd;
8053         }
8054         unlink(filename);
8055 
8056         if ((r = fake_open->fill(cpu_env, fd))) {
8057             int e = errno;
8058             close(fd);
8059             errno = e;
8060             return r;
8061         }
8062         lseek(fd, 0, SEEK_SET);
8063 
8064         return fd;
8065     }
8066 
8067     return safe_openat(dirfd, path(pathname), flags, mode);
8068 }
8069 
8070 #define TIMER_MAGIC 0x0caf0000
8071 #define TIMER_MAGIC_MASK 0xffff0000
8072 
8073 /* Convert QEMU provided timer ID back to internal 16bit index format */
get_timer_id(abi_long arg)8074 static target_timer_t get_timer_id(abi_long arg)
8075 {
8076     target_timer_t timerid = arg;
8077 
8078     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8079         return -TARGET_EINVAL;
8080     }
8081 
8082     timerid &= 0xffff;
8083 
8084     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8085         return -TARGET_EINVAL;
8086     }
8087 
8088     return timerid;
8089 }
8090 
target_to_host_cpu_mask(unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8091 static int target_to_host_cpu_mask(unsigned long *host_mask,
8092                                    size_t host_size,
8093                                    abi_ulong target_addr,
8094                                    size_t target_size)
8095 {
8096     unsigned target_bits = sizeof(abi_ulong) * 8;
8097     unsigned host_bits = sizeof(*host_mask) * 8;
8098     abi_ulong *target_mask;
8099     unsigned i, j;
8100 
8101     assert(host_size >= target_size);
8102 
8103     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8104     if (!target_mask) {
8105         return -TARGET_EFAULT;
8106     }
8107     memset(host_mask, 0, host_size);
8108 
8109     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8110         unsigned bit = i * target_bits;
8111         abi_ulong val;
8112 
8113         __get_user(val, &target_mask[i]);
8114         for (j = 0; j < target_bits; j++, bit++) {
8115             if (val & (1UL << j)) {
8116                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8117             }
8118         }
8119     }
8120 
8121     unlock_user(target_mask, target_addr, 0);
8122     return 0;
8123 }
8124 
host_to_target_cpu_mask(const unsigned long * host_mask,size_t host_size,abi_ulong target_addr,size_t target_size)8125 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8126                                    size_t host_size,
8127                                    abi_ulong target_addr,
8128                                    size_t target_size)
8129 {
8130     unsigned target_bits = sizeof(abi_ulong) * 8;
8131     unsigned host_bits = sizeof(*host_mask) * 8;
8132     abi_ulong *target_mask;
8133     unsigned i, j;
8134 
8135     assert(host_size >= target_size);
8136 
8137     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8138     if (!target_mask) {
8139         return -TARGET_EFAULT;
8140     }
8141 
8142     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8143         unsigned bit = i * target_bits;
8144         abi_ulong val = 0;
8145 
8146         for (j = 0; j < target_bits; j++, bit++) {
8147             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8148                 val |= 1UL << j;
8149             }
8150         }
8151         __put_user(val, &target_mask[i]);
8152     }
8153 
8154     unlock_user(target_mask, target_addr, target_size);
8155     return 0;
8156 }
8157 
8158 /* This is an internal helper for do_syscall so that it is easier
8159  * to have a single return point, so that actions, such as logging
8160  * of syscall results, can be performed.
8161  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8162  */
do_syscall1(void * cpu_env,int num,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,abi_long arg7,abi_long arg8)8163 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8164                             abi_long arg2, abi_long arg3, abi_long arg4,
8165                             abi_long arg5, abi_long arg6, abi_long arg7,
8166                             abi_long arg8)
8167 {
8168     CPUState *cpu = env_cpu(cpu_env);
8169     abi_long ret;
8170 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8171     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8172     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8173     || defined(TARGET_NR_statx)
8174     struct stat st;
8175 #endif
8176 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8177     || defined(TARGET_NR_fstatfs)
8178     struct statfs stfs;
8179 #endif
8180     void *p;
8181 
8182     switch(num) {
8183     case TARGET_NR_exit:
8184         /* In old applications this may be used to implement _exit(2).
8185            However in threaded applications it is used for thread termination,
8186            and _exit_group is used for application termination.
8187            Do thread termination if we have more then one thread.  */
8188 
8189         if (block_signals()) {
8190             return -TARGET_ERESTARTSYS;
8191         }
8192 
8193         pthread_mutex_lock(&clone_lock);
8194 
8195         if (CPU_NEXT(first_cpu)) {
8196             TaskState *ts = cpu->opaque;
8197 
8198             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8199             object_unref(OBJECT(cpu));
8200             /*
8201              * At this point the CPU should be unrealized and removed
8202              * from cpu lists. We can clean-up the rest of the thread
8203              * data without the lock held.
8204              */
8205 
8206             pthread_mutex_unlock(&clone_lock);
8207 
8208             if (ts->child_tidptr) {
8209                 put_user_u32(0, ts->child_tidptr);
8210                 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8211                           NULL, NULL, 0);
8212             }
8213             thread_cpu = NULL;
8214             g_free(ts);
8215             rcu_unregister_thread();
8216             pthread_exit(NULL);
8217         }
8218 
8219         pthread_mutex_unlock(&clone_lock);
8220         preexit_cleanup(cpu_env, arg1);
8221         _exit(arg1);
8222         return 0; /* avoid warning */
8223     case TARGET_NR_read:
8224         if (arg2 == 0 && arg3 == 0) {
8225             return get_errno(safe_read(arg1, 0, 0));
8226         } else {
8227             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8228                 return -TARGET_EFAULT;
8229             ret = get_errno(safe_read(arg1, p, arg3));
8230             if (ret >= 0 &&
8231                 fd_trans_host_to_target_data(arg1)) {
8232                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8233             }
8234             unlock_user(p, arg2, ret);
8235         }
8236         return ret;
8237     case TARGET_NR_write:
8238         if (arg2 == 0 && arg3 == 0) {
8239             return get_errno(safe_write(arg1, 0, 0));
8240         }
8241         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8242             return -TARGET_EFAULT;
8243         if (fd_trans_target_to_host_data(arg1)) {
8244             void *copy = g_malloc(arg3);
8245             memcpy(copy, p, arg3);
8246             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8247             if (ret >= 0) {
8248                 ret = get_errno(safe_write(arg1, copy, ret));
8249             }
8250             g_free(copy);
8251         } else {
8252             ret = get_errno(safe_write(arg1, p, arg3));
8253         }
8254         unlock_user(p, arg2, 0);
8255         return ret;
8256 
8257 #ifdef TARGET_NR_open
8258     case TARGET_NR_open:
8259         if (!(p = lock_user_string(arg1)))
8260             return -TARGET_EFAULT;
8261         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8262                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8263                                   arg3));
8264         fd_trans_unregister(ret);
8265         unlock_user(p, arg1, 0);
8266         return ret;
8267 #endif
8268     case TARGET_NR_openat:
8269         if (!(p = lock_user_string(arg2)))
8270             return -TARGET_EFAULT;
8271         ret = get_errno(do_openat(cpu_env, arg1, p,
8272                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8273                                   arg4));
8274         fd_trans_unregister(ret);
8275         unlock_user(p, arg2, 0);
8276         return ret;
8277 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8278     case TARGET_NR_name_to_handle_at:
8279         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8280         return ret;
8281 #endif
8282 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8283     case TARGET_NR_open_by_handle_at:
8284         ret = do_open_by_handle_at(arg1, arg2, arg3);
8285         fd_trans_unregister(ret);
8286         return ret;
8287 #endif
8288     case TARGET_NR_close:
8289         fd_trans_unregister(arg1);
8290         return get_errno(close(arg1));
8291 
8292     case TARGET_NR_brk:
8293         return do_brk(arg1);
8294 #ifdef TARGET_NR_fork
8295     case TARGET_NR_fork:
8296         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8297 #endif
8298 #ifdef TARGET_NR_waitpid
8299     case TARGET_NR_waitpid:
8300         {
8301             int status;
8302             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8303             if (!is_error(ret) && arg2 && ret
8304                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8305                 return -TARGET_EFAULT;
8306         }
8307         return ret;
8308 #endif
8309 #ifdef TARGET_NR_waitid
8310     case TARGET_NR_waitid:
8311         {
8312             siginfo_t info;
8313             info.si_pid = 0;
8314             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8315             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8316                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8317                     return -TARGET_EFAULT;
8318                 host_to_target_siginfo(p, &info);
8319                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8320             }
8321         }
8322         return ret;
8323 #endif
8324 #ifdef TARGET_NR_creat /* not on alpha */
8325     case TARGET_NR_creat:
8326         if (!(p = lock_user_string(arg1)))
8327             return -TARGET_EFAULT;
8328         ret = get_errno(creat(p, arg2));
8329         fd_trans_unregister(ret);
8330         unlock_user(p, arg1, 0);
8331         return ret;
8332 #endif
8333 #ifdef TARGET_NR_link
8334     case TARGET_NR_link:
8335         {
8336             void * p2;
8337             p = lock_user_string(arg1);
8338             p2 = lock_user_string(arg2);
8339             if (!p || !p2)
8340                 ret = -TARGET_EFAULT;
8341             else
8342                 ret = get_errno(link(p, p2));
8343             unlock_user(p2, arg2, 0);
8344             unlock_user(p, arg1, 0);
8345         }
8346         return ret;
8347 #endif
8348 #if defined(TARGET_NR_linkat)
8349     case TARGET_NR_linkat:
8350         {
8351             void * p2 = NULL;
8352             if (!arg2 || !arg4)
8353                 return -TARGET_EFAULT;
8354             p  = lock_user_string(arg2);
8355             p2 = lock_user_string(arg4);
8356             if (!p || !p2)
8357                 ret = -TARGET_EFAULT;
8358             else
8359                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8360             unlock_user(p, arg2, 0);
8361             unlock_user(p2, arg4, 0);
8362         }
8363         return ret;
8364 #endif
8365 #ifdef TARGET_NR_unlink
8366     case TARGET_NR_unlink:
8367         if (!(p = lock_user_string(arg1)))
8368             return -TARGET_EFAULT;
8369         ret = get_errno(unlink(p));
8370         unlock_user(p, arg1, 0);
8371         return ret;
8372 #endif
8373 #if defined(TARGET_NR_unlinkat)
8374     case TARGET_NR_unlinkat:
8375         if (!(p = lock_user_string(arg2)))
8376             return -TARGET_EFAULT;
8377         ret = get_errno(unlinkat(arg1, p, arg3));
8378         unlock_user(p, arg2, 0);
8379         return ret;
8380 #endif
8381     case TARGET_NR_execve:
8382         {
8383             char **argp, **envp;
8384             int argc, envc;
8385             abi_ulong gp;
8386             abi_ulong guest_argp;
8387             abi_ulong guest_envp;
8388             abi_ulong addr;
8389             char **q;
8390             int total_size = 0;
8391 
8392             argc = 0;
8393             guest_argp = arg2;
8394             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8395                 if (get_user_ual(addr, gp))
8396                     return -TARGET_EFAULT;
8397                 if (!addr)
8398                     break;
8399                 argc++;
8400             }
8401             envc = 0;
8402             guest_envp = arg3;
8403             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8404                 if (get_user_ual(addr, gp))
8405                     return -TARGET_EFAULT;
8406                 if (!addr)
8407                     break;
8408                 envc++;
8409             }
8410 
8411             argp = g_new0(char *, argc + 1);
8412             envp = g_new0(char *, envc + 1);
8413 
8414             for (gp = guest_argp, q = argp; gp;
8415                   gp += sizeof(abi_ulong), q++) {
8416                 if (get_user_ual(addr, gp))
8417                     goto execve_efault;
8418                 if (!addr)
8419                     break;
8420                 if (!(*q = lock_user_string(addr)))
8421                     goto execve_efault;
8422                 total_size += strlen(*q) + 1;
8423             }
8424             *q = NULL;
8425 
8426             for (gp = guest_envp, q = envp; gp;
8427                   gp += sizeof(abi_ulong), q++) {
8428                 if (get_user_ual(addr, gp))
8429                     goto execve_efault;
8430                 if (!addr)
8431                     break;
8432                 if (!(*q = lock_user_string(addr)))
8433                     goto execve_efault;
8434                 total_size += strlen(*q) + 1;
8435             }
8436             *q = NULL;
8437 
8438             if (!(p = lock_user_string(arg1)))
8439                 goto execve_efault;
8440             /* Although execve() is not an interruptible syscall it is
8441              * a special case where we must use the safe_syscall wrapper:
8442              * if we allow a signal to happen before we make the host
8443              * syscall then we will 'lose' it, because at the point of
8444              * execve the process leaves QEMU's control. So we use the
8445              * safe syscall wrapper to ensure that we either take the
8446              * signal as a guest signal, or else it does not happen
8447              * before the execve completes and makes it the other
8448              * program's problem.
8449              */
8450             ret = get_errno(safe_execve(p, argp, envp));
8451             unlock_user(p, arg1, 0);
8452 
8453             goto execve_end;
8454 
8455         execve_efault:
8456             ret = -TARGET_EFAULT;
8457 
8458         execve_end:
8459             for (gp = guest_argp, q = argp; *q;
8460                   gp += sizeof(abi_ulong), q++) {
8461                 if (get_user_ual(addr, gp)
8462                     || !addr)
8463                     break;
8464                 unlock_user(*q, addr, 0);
8465             }
8466             for (gp = guest_envp, q = envp; *q;
8467                   gp += sizeof(abi_ulong), q++) {
8468                 if (get_user_ual(addr, gp)
8469                     || !addr)
8470                     break;
8471                 unlock_user(*q, addr, 0);
8472             }
8473 
8474             g_free(argp);
8475             g_free(envp);
8476         }
8477         return ret;
8478     case TARGET_NR_chdir:
8479         if (!(p = lock_user_string(arg1)))
8480             return -TARGET_EFAULT;
8481         ret = get_errno(chdir(p));
8482         unlock_user(p, arg1, 0);
8483         return ret;
8484 #ifdef TARGET_NR_time
8485     case TARGET_NR_time:
8486         {
8487             time_t host_time;
8488             ret = get_errno(time(&host_time));
8489             if (!is_error(ret)
8490                 && arg1
8491                 && put_user_sal(host_time, arg1))
8492                 return -TARGET_EFAULT;
8493         }
8494         return ret;
8495 #endif
8496 #ifdef TARGET_NR_mknod
8497     case TARGET_NR_mknod:
8498         if (!(p = lock_user_string(arg1)))
8499             return -TARGET_EFAULT;
8500         ret = get_errno(mknod(p, arg2, arg3));
8501         unlock_user(p, arg1, 0);
8502         return ret;
8503 #endif
8504 #if defined(TARGET_NR_mknodat)
8505     case TARGET_NR_mknodat:
8506         if (!(p = lock_user_string(arg2)))
8507             return -TARGET_EFAULT;
8508         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8509         unlock_user(p, arg2, 0);
8510         return ret;
8511 #endif
8512 #ifdef TARGET_NR_chmod
8513     case TARGET_NR_chmod:
8514         if (!(p = lock_user_string(arg1)))
8515             return -TARGET_EFAULT;
8516         ret = get_errno(chmod(p, arg2));
8517         unlock_user(p, arg1, 0);
8518         return ret;
8519 #endif
8520 #ifdef TARGET_NR_lseek
8521     case TARGET_NR_lseek:
8522         return get_errno(lseek(arg1, arg2, arg3));
8523 #endif
8524 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8525     /* Alpha specific */
8526     case TARGET_NR_getxpid:
8527         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8528         return get_errno(getpid());
8529 #endif
8530 #ifdef TARGET_NR_getpid
8531     case TARGET_NR_getpid:
8532         return get_errno(getpid());
8533 #endif
8534     case TARGET_NR_mount:
8535         {
8536             /* need to look at the data field */
8537             void *p2, *p3;
8538 
8539             if (arg1) {
8540                 p = lock_user_string(arg1);
8541                 if (!p) {
8542                     return -TARGET_EFAULT;
8543                 }
8544             } else {
8545                 p = NULL;
8546             }
8547 
8548             p2 = lock_user_string(arg2);
8549             if (!p2) {
8550                 if (arg1) {
8551                     unlock_user(p, arg1, 0);
8552                 }
8553                 return -TARGET_EFAULT;
8554             }
8555 
8556             if (arg3) {
8557                 p3 = lock_user_string(arg3);
8558                 if (!p3) {
8559                     if (arg1) {
8560                         unlock_user(p, arg1, 0);
8561                     }
8562                     unlock_user(p2, arg2, 0);
8563                     return -TARGET_EFAULT;
8564                 }
8565             } else {
8566                 p3 = NULL;
8567             }
8568 
8569             /* FIXME - arg5 should be locked, but it isn't clear how to
8570              * do that since it's not guaranteed to be a NULL-terminated
8571              * string.
8572              */
8573             if (!arg5) {
8574                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8575             } else {
8576                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8577             }
8578             ret = get_errno(ret);
8579 
8580             if (arg1) {
8581                 unlock_user(p, arg1, 0);
8582             }
8583             unlock_user(p2, arg2, 0);
8584             if (arg3) {
8585                 unlock_user(p3, arg3, 0);
8586             }
8587         }
8588         return ret;
8589 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8590 #if defined(TARGET_NR_umount)
8591     case TARGET_NR_umount:
8592 #endif
8593 #if defined(TARGET_NR_oldumount)
8594     case TARGET_NR_oldumount:
8595 #endif
8596         if (!(p = lock_user_string(arg1)))
8597             return -TARGET_EFAULT;
8598         ret = get_errno(umount(p));
8599         unlock_user(p, arg1, 0);
8600         return ret;
8601 #endif
8602 #ifdef TARGET_NR_stime /* not on alpha */
8603     case TARGET_NR_stime:
8604         {
8605             struct timespec ts;
8606             ts.tv_nsec = 0;
8607             if (get_user_sal(ts.tv_sec, arg1)) {
8608                 return -TARGET_EFAULT;
8609             }
8610             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8611         }
8612 #endif
8613 #ifdef TARGET_NR_alarm /* not on alpha */
8614     case TARGET_NR_alarm:
8615         return alarm(arg1);
8616 #endif
8617 #ifdef TARGET_NR_pause /* not on alpha */
8618     case TARGET_NR_pause:
8619         if (!block_signals()) {
8620             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8621         }
8622         return -TARGET_EINTR;
8623 #endif
8624 #ifdef TARGET_NR_utime
8625     case TARGET_NR_utime:
8626         {
8627             struct utimbuf tbuf, *host_tbuf;
8628             struct target_utimbuf *target_tbuf;
8629             if (arg2) {
8630                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8631                     return -TARGET_EFAULT;
8632                 tbuf.actime = tswapal(target_tbuf->actime);
8633                 tbuf.modtime = tswapal(target_tbuf->modtime);
8634                 unlock_user_struct(target_tbuf, arg2, 0);
8635                 host_tbuf = &tbuf;
8636             } else {
8637                 host_tbuf = NULL;
8638             }
8639             if (!(p = lock_user_string(arg1)))
8640                 return -TARGET_EFAULT;
8641             ret = get_errno(utime(p, host_tbuf));
8642             unlock_user(p, arg1, 0);
8643         }
8644         return ret;
8645 #endif
8646 #ifdef TARGET_NR_utimes
8647     case TARGET_NR_utimes:
8648         {
8649             struct timeval *tvp, tv[2];
8650             if (arg2) {
8651                 if (copy_from_user_timeval(&tv[0], arg2)
8652                     || copy_from_user_timeval(&tv[1],
8653                                               arg2 + sizeof(struct target_timeval)))
8654                     return -TARGET_EFAULT;
8655                 tvp = tv;
8656             } else {
8657                 tvp = NULL;
8658             }
8659             if (!(p = lock_user_string(arg1)))
8660                 return -TARGET_EFAULT;
8661             ret = get_errno(utimes(p, tvp));
8662             unlock_user(p, arg1, 0);
8663         }
8664         return ret;
8665 #endif
8666 #if defined(TARGET_NR_futimesat)
8667     case TARGET_NR_futimesat:
8668         {
8669             struct timeval *tvp, tv[2];
8670             if (arg3) {
8671                 if (copy_from_user_timeval(&tv[0], arg3)
8672                     || copy_from_user_timeval(&tv[1],
8673                                               arg3 + sizeof(struct target_timeval)))
8674                     return -TARGET_EFAULT;
8675                 tvp = tv;
8676             } else {
8677                 tvp = NULL;
8678             }
8679             if (!(p = lock_user_string(arg2))) {
8680                 return -TARGET_EFAULT;
8681             }
8682             ret = get_errno(futimesat(arg1, path(p), tvp));
8683             unlock_user(p, arg2, 0);
8684         }
8685         return ret;
8686 #endif
8687 #ifdef TARGET_NR_access
8688     case TARGET_NR_access:
8689         if (!(p = lock_user_string(arg1))) {
8690             return -TARGET_EFAULT;
8691         }
8692         ret = get_errno(access(path(p), arg2));
8693         unlock_user(p, arg1, 0);
8694         return ret;
8695 #endif
8696 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8697     case TARGET_NR_faccessat:
8698         if (!(p = lock_user_string(arg2))) {
8699             return -TARGET_EFAULT;
8700         }
8701         ret = get_errno(faccessat(arg1, p, arg3, 0));
8702         unlock_user(p, arg2, 0);
8703         return ret;
8704 #endif
8705 #ifdef TARGET_NR_nice /* not on alpha */
8706     case TARGET_NR_nice:
8707         return get_errno(nice(arg1));
8708 #endif
8709     case TARGET_NR_sync:
8710         sync();
8711         return 0;
8712 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8713     case TARGET_NR_syncfs:
8714         return get_errno(syncfs(arg1));
8715 #endif
8716     case TARGET_NR_kill:
8717         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8718 #ifdef TARGET_NR_rename
8719     case TARGET_NR_rename:
8720         {
8721             void *p2;
8722             p = lock_user_string(arg1);
8723             p2 = lock_user_string(arg2);
8724             if (!p || !p2)
8725                 ret = -TARGET_EFAULT;
8726             else
8727                 ret = get_errno(rename(p, p2));
8728             unlock_user(p2, arg2, 0);
8729             unlock_user(p, arg1, 0);
8730         }
8731         return ret;
8732 #endif
8733 #if defined(TARGET_NR_renameat)
8734     case TARGET_NR_renameat:
8735         {
8736             void *p2;
8737             p  = lock_user_string(arg2);
8738             p2 = lock_user_string(arg4);
8739             if (!p || !p2)
8740                 ret = -TARGET_EFAULT;
8741             else
8742                 ret = get_errno(renameat(arg1, p, arg3, p2));
8743             unlock_user(p2, arg4, 0);
8744             unlock_user(p, arg2, 0);
8745         }
8746         return ret;
8747 #endif
8748 #if defined(TARGET_NR_renameat2)
8749     case TARGET_NR_renameat2:
8750         {
8751             void *p2;
8752             p  = lock_user_string(arg2);
8753             p2 = lock_user_string(arg4);
8754             if (!p || !p2) {
8755                 ret = -TARGET_EFAULT;
8756             } else {
8757                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8758             }
8759             unlock_user(p2, arg4, 0);
8760             unlock_user(p, arg2, 0);
8761         }
8762         return ret;
8763 #endif
8764 #ifdef TARGET_NR_mkdir
8765     case TARGET_NR_mkdir:
8766         if (!(p = lock_user_string(arg1)))
8767             return -TARGET_EFAULT;
8768         ret = get_errno(mkdir(p, arg2));
8769         unlock_user(p, arg1, 0);
8770         return ret;
8771 #endif
8772 #if defined(TARGET_NR_mkdirat)
8773     case TARGET_NR_mkdirat:
8774         if (!(p = lock_user_string(arg2)))
8775             return -TARGET_EFAULT;
8776         ret = get_errno(mkdirat(arg1, p, arg3));
8777         unlock_user(p, arg2, 0);
8778         return ret;
8779 #endif
8780 #ifdef TARGET_NR_rmdir
8781     case TARGET_NR_rmdir:
8782         if (!(p = lock_user_string(arg1)))
8783             return -TARGET_EFAULT;
8784         ret = get_errno(rmdir(p));
8785         unlock_user(p, arg1, 0);
8786         return ret;
8787 #endif
8788     case TARGET_NR_dup:
8789         ret = get_errno(dup(arg1));
8790         if (ret >= 0) {
8791             fd_trans_dup(arg1, ret);
8792         }
8793         return ret;
8794 #ifdef TARGET_NR_pipe
8795     case TARGET_NR_pipe:
8796         return do_pipe(cpu_env, arg1, 0, 0);
8797 #endif
8798 #ifdef TARGET_NR_pipe2
8799     case TARGET_NR_pipe2:
8800         return do_pipe(cpu_env, arg1,
8801                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8802 #endif
8803     case TARGET_NR_times:
8804         {
8805             struct target_tms *tmsp;
8806             struct tms tms;
8807             ret = get_errno(times(&tms));
8808             if (arg1) {
8809                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8810                 if (!tmsp)
8811                     return -TARGET_EFAULT;
8812                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8813                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8814                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8815                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8816             }
8817             if (!is_error(ret))
8818                 ret = host_to_target_clock_t(ret);
8819         }
8820         return ret;
8821     case TARGET_NR_acct:
8822         if (arg1 == 0) {
8823             ret = get_errno(acct(NULL));
8824         } else {
8825             if (!(p = lock_user_string(arg1))) {
8826                 return -TARGET_EFAULT;
8827             }
8828             ret = get_errno(acct(path(p)));
8829             unlock_user(p, arg1, 0);
8830         }
8831         return ret;
8832 #ifdef TARGET_NR_umount2
8833     case TARGET_NR_umount2:
8834         if (!(p = lock_user_string(arg1)))
8835             return -TARGET_EFAULT;
8836         ret = get_errno(umount2(p, arg2));
8837         unlock_user(p, arg1, 0);
8838         return ret;
8839 #endif
8840     case TARGET_NR_ioctl:
8841         return do_ioctl(arg1, arg2, arg3);
8842 #ifdef TARGET_NR_fcntl
8843     case TARGET_NR_fcntl:
8844         return do_fcntl(arg1, arg2, arg3);
8845 #endif
8846     case TARGET_NR_setpgid:
8847         return get_errno(setpgid(arg1, arg2));
8848     case TARGET_NR_umask:
8849         return get_errno(umask(arg1));
8850     case TARGET_NR_chroot:
8851         if (!(p = lock_user_string(arg1)))
8852             return -TARGET_EFAULT;
8853         ret = get_errno(chroot(p));
8854         unlock_user(p, arg1, 0);
8855         return ret;
8856 #ifdef TARGET_NR_dup2
8857     case TARGET_NR_dup2:
8858         ret = get_errno(dup2(arg1, arg2));
8859         if (ret >= 0) {
8860             fd_trans_dup(arg1, arg2);
8861         }
8862         return ret;
8863 #endif
8864 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8865     case TARGET_NR_dup3:
8866     {
8867         int host_flags;
8868 
8869         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8870             return -EINVAL;
8871         }
8872         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8873         ret = get_errno(dup3(arg1, arg2, host_flags));
8874         if (ret >= 0) {
8875             fd_trans_dup(arg1, arg2);
8876         }
8877         return ret;
8878     }
8879 #endif
8880 #ifdef TARGET_NR_getppid /* not on alpha */
8881     case TARGET_NR_getppid:
8882         return get_errno(getppid());
8883 #endif
8884 #ifdef TARGET_NR_getpgrp
8885     case TARGET_NR_getpgrp:
8886         return get_errno(getpgrp());
8887 #endif
8888     case TARGET_NR_setsid:
8889         return get_errno(setsid());
8890 #ifdef TARGET_NR_sigaction
8891     case TARGET_NR_sigaction:
8892         {
8893 #if defined(TARGET_ALPHA)
8894             struct target_sigaction act, oact, *pact = 0;
8895             struct target_old_sigaction *old_act;
8896             if (arg2) {
8897                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8898                     return -TARGET_EFAULT;
8899                 act._sa_handler = old_act->_sa_handler;
8900                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8901                 act.sa_flags = old_act->sa_flags;
8902                 act.sa_restorer = 0;
8903                 unlock_user_struct(old_act, arg2, 0);
8904                 pact = &act;
8905             }
8906             ret = get_errno(do_sigaction(arg1, pact, &oact));
8907             if (!is_error(ret) && arg3) {
8908                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8909                     return -TARGET_EFAULT;
8910                 old_act->_sa_handler = oact._sa_handler;
8911                 old_act->sa_mask = oact.sa_mask.sig[0];
8912                 old_act->sa_flags = oact.sa_flags;
8913                 unlock_user_struct(old_act, arg3, 1);
8914             }
8915 #elif defined(TARGET_MIPS)
8916 	    struct target_sigaction act, oact, *pact, *old_act;
8917 
8918 	    if (arg2) {
8919                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8920                     return -TARGET_EFAULT;
8921 		act._sa_handler = old_act->_sa_handler;
8922 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8923 		act.sa_flags = old_act->sa_flags;
8924 		unlock_user_struct(old_act, arg2, 0);
8925 		pact = &act;
8926 	    } else {
8927 		pact = NULL;
8928 	    }
8929 
8930 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8931 
8932 	    if (!is_error(ret) && arg3) {
8933                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8934                     return -TARGET_EFAULT;
8935 		old_act->_sa_handler = oact._sa_handler;
8936 		old_act->sa_flags = oact.sa_flags;
8937 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8938 		old_act->sa_mask.sig[1] = 0;
8939 		old_act->sa_mask.sig[2] = 0;
8940 		old_act->sa_mask.sig[3] = 0;
8941 		unlock_user_struct(old_act, arg3, 1);
8942 	    }
8943 #else
8944             struct target_old_sigaction *old_act;
8945             struct target_sigaction act, oact, *pact;
8946             if (arg2) {
8947                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8948                     return -TARGET_EFAULT;
8949                 act._sa_handler = old_act->_sa_handler;
8950                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8951                 act.sa_flags = old_act->sa_flags;
8952                 act.sa_restorer = old_act->sa_restorer;
8953 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8954                 act.ka_restorer = 0;
8955 #endif
8956                 unlock_user_struct(old_act, arg2, 0);
8957                 pact = &act;
8958             } else {
8959                 pact = NULL;
8960             }
8961             ret = get_errno(do_sigaction(arg1, pact, &oact));
8962             if (!is_error(ret) && arg3) {
8963                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8964                     return -TARGET_EFAULT;
8965                 old_act->_sa_handler = oact._sa_handler;
8966                 old_act->sa_mask = oact.sa_mask.sig[0];
8967                 old_act->sa_flags = oact.sa_flags;
8968                 old_act->sa_restorer = oact.sa_restorer;
8969                 unlock_user_struct(old_act, arg3, 1);
8970             }
8971 #endif
8972         }
8973         return ret;
8974 #endif
8975     case TARGET_NR_rt_sigaction:
8976         {
8977 #if defined(TARGET_ALPHA)
8978             /* For Alpha and SPARC this is a 5 argument syscall, with
8979              * a 'restorer' parameter which must be copied into the
8980              * sa_restorer field of the sigaction struct.
8981              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8982              * and arg5 is the sigsetsize.
8983              * Alpha also has a separate rt_sigaction struct that it uses
8984              * here; SPARC uses the usual sigaction struct.
8985              */
8986             struct target_rt_sigaction *rt_act;
8987             struct target_sigaction act, oact, *pact = 0;
8988 
8989             if (arg4 != sizeof(target_sigset_t)) {
8990                 return -TARGET_EINVAL;
8991             }
8992             if (arg2) {
8993                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8994                     return -TARGET_EFAULT;
8995                 act._sa_handler = rt_act->_sa_handler;
8996                 act.sa_mask = rt_act->sa_mask;
8997                 act.sa_flags = rt_act->sa_flags;
8998                 act.sa_restorer = arg5;
8999                 unlock_user_struct(rt_act, arg2, 0);
9000                 pact = &act;
9001             }
9002             ret = get_errno(do_sigaction(arg1, pact, &oact));
9003             if (!is_error(ret) && arg3) {
9004                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
9005                     return -TARGET_EFAULT;
9006                 rt_act->_sa_handler = oact._sa_handler;
9007                 rt_act->sa_mask = oact.sa_mask;
9008                 rt_act->sa_flags = oact.sa_flags;
9009                 unlock_user_struct(rt_act, arg3, 1);
9010             }
9011 #else
9012 #ifdef TARGET_SPARC
9013             target_ulong restorer = arg4;
9014             target_ulong sigsetsize = arg5;
9015 #else
9016             target_ulong sigsetsize = arg4;
9017 #endif
9018             struct target_sigaction *act;
9019             struct target_sigaction *oact;
9020 
9021             if (sigsetsize != sizeof(target_sigset_t)) {
9022                 return -TARGET_EINVAL;
9023             }
9024             if (arg2) {
9025                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9026                     return -TARGET_EFAULT;
9027                 }
9028 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9029                 act->ka_restorer = restorer;
9030 #endif
9031             } else {
9032                 act = NULL;
9033             }
9034             if (arg3) {
9035                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9036                     ret = -TARGET_EFAULT;
9037                     goto rt_sigaction_fail;
9038                 }
9039             } else
9040                 oact = NULL;
9041             ret = get_errno(do_sigaction(arg1, act, oact));
9042 	rt_sigaction_fail:
9043             if (act)
9044                 unlock_user_struct(act, arg2, 0);
9045             if (oact)
9046                 unlock_user_struct(oact, arg3, 1);
9047 #endif
9048         }
9049         return ret;
9050 #ifdef TARGET_NR_sgetmask /* not on alpha */
9051     case TARGET_NR_sgetmask:
9052         {
9053             sigset_t cur_set;
9054             abi_ulong target_set;
9055             ret = do_sigprocmask(0, NULL, &cur_set);
9056             if (!ret) {
9057                 host_to_target_old_sigset(&target_set, &cur_set);
9058                 ret = target_set;
9059             }
9060         }
9061         return ret;
9062 #endif
9063 #ifdef TARGET_NR_ssetmask /* not on alpha */
9064     case TARGET_NR_ssetmask:
9065         {
9066             sigset_t set, oset;
9067             abi_ulong target_set = arg1;
9068             target_to_host_old_sigset(&set, &target_set);
9069             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9070             if (!ret) {
9071                 host_to_target_old_sigset(&target_set, &oset);
9072                 ret = target_set;
9073             }
9074         }
9075         return ret;
9076 #endif
9077 #ifdef TARGET_NR_sigprocmask
9078     case TARGET_NR_sigprocmask:
9079         {
9080 #if defined(TARGET_ALPHA)
9081             sigset_t set, oldset;
9082             abi_ulong mask;
9083             int how;
9084 
9085             switch (arg1) {
9086             case TARGET_SIG_BLOCK:
9087                 how = SIG_BLOCK;
9088                 break;
9089             case TARGET_SIG_UNBLOCK:
9090                 how = SIG_UNBLOCK;
9091                 break;
9092             case TARGET_SIG_SETMASK:
9093                 how = SIG_SETMASK;
9094                 break;
9095             default:
9096                 return -TARGET_EINVAL;
9097             }
9098             mask = arg2;
9099             target_to_host_old_sigset(&set, &mask);
9100 
9101             ret = do_sigprocmask(how, &set, &oldset);
9102             if (!is_error(ret)) {
9103                 host_to_target_old_sigset(&mask, &oldset);
9104                 ret = mask;
9105                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9106             }
9107 #else
9108             sigset_t set, oldset, *set_ptr;
9109             int how;
9110 
9111             if (arg2) {
9112                 switch (arg1) {
9113                 case TARGET_SIG_BLOCK:
9114                     how = SIG_BLOCK;
9115                     break;
9116                 case TARGET_SIG_UNBLOCK:
9117                     how = SIG_UNBLOCK;
9118                     break;
9119                 case TARGET_SIG_SETMASK:
9120                     how = SIG_SETMASK;
9121                     break;
9122                 default:
9123                     return -TARGET_EINVAL;
9124                 }
9125                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9126                     return -TARGET_EFAULT;
9127                 target_to_host_old_sigset(&set, p);
9128                 unlock_user(p, arg2, 0);
9129                 set_ptr = &set;
9130             } else {
9131                 how = 0;
9132                 set_ptr = NULL;
9133             }
9134             ret = do_sigprocmask(how, set_ptr, &oldset);
9135             if (!is_error(ret) && arg3) {
9136                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9137                     return -TARGET_EFAULT;
9138                 host_to_target_old_sigset(p, &oldset);
9139                 unlock_user(p, arg3, sizeof(target_sigset_t));
9140             }
9141 #endif
9142         }
9143         return ret;
9144 #endif
9145     case TARGET_NR_rt_sigprocmask:
9146         {
9147             int how = arg1;
9148             sigset_t set, oldset, *set_ptr;
9149 
9150             if (arg4 != sizeof(target_sigset_t)) {
9151                 return -TARGET_EINVAL;
9152             }
9153 
9154             if (arg2) {
9155                 switch(how) {
9156                 case TARGET_SIG_BLOCK:
9157                     how = SIG_BLOCK;
9158                     break;
9159                 case TARGET_SIG_UNBLOCK:
9160                     how = SIG_UNBLOCK;
9161                     break;
9162                 case TARGET_SIG_SETMASK:
9163                     how = SIG_SETMASK;
9164                     break;
9165                 default:
9166                     return -TARGET_EINVAL;
9167                 }
9168                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9169                     return -TARGET_EFAULT;
9170                 target_to_host_sigset(&set, p);
9171                 unlock_user(p, arg2, 0);
9172                 set_ptr = &set;
9173             } else {
9174                 how = 0;
9175                 set_ptr = NULL;
9176             }
9177             ret = do_sigprocmask(how, set_ptr, &oldset);
9178             if (!is_error(ret) && arg3) {
9179                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9180                     return -TARGET_EFAULT;
9181                 host_to_target_sigset(p, &oldset);
9182                 unlock_user(p, arg3, sizeof(target_sigset_t));
9183             }
9184         }
9185         return ret;
9186 #ifdef TARGET_NR_sigpending
9187     case TARGET_NR_sigpending:
9188         {
9189             sigset_t set;
9190             ret = get_errno(sigpending(&set));
9191             if (!is_error(ret)) {
9192                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9193                     return -TARGET_EFAULT;
9194                 host_to_target_old_sigset(p, &set);
9195                 unlock_user(p, arg1, sizeof(target_sigset_t));
9196             }
9197         }
9198         return ret;
9199 #endif
9200     case TARGET_NR_rt_sigpending:
9201         {
9202             sigset_t set;
9203 
9204             /* Yes, this check is >, not != like most. We follow the kernel's
9205              * logic and it does it like this because it implements
9206              * NR_sigpending through the same code path, and in that case
9207              * the old_sigset_t is smaller in size.
9208              */
9209             if (arg2 > sizeof(target_sigset_t)) {
9210                 return -TARGET_EINVAL;
9211             }
9212 
9213             ret = get_errno(sigpending(&set));
9214             if (!is_error(ret)) {
9215                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9216                     return -TARGET_EFAULT;
9217                 host_to_target_sigset(p, &set);
9218                 unlock_user(p, arg1, sizeof(target_sigset_t));
9219             }
9220         }
9221         return ret;
9222 #ifdef TARGET_NR_sigsuspend
9223     case TARGET_NR_sigsuspend:
9224         {
9225             TaskState *ts = cpu->opaque;
9226 #if defined(TARGET_ALPHA)
9227             abi_ulong mask = arg1;
9228             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9229 #else
9230             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9231                 return -TARGET_EFAULT;
9232             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9233             unlock_user(p, arg1, 0);
9234 #endif
9235             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9236                                                SIGSET_T_SIZE));
9237             if (ret != -TARGET_ERESTARTSYS) {
9238                 ts->in_sigsuspend = 1;
9239             }
9240         }
9241         return ret;
9242 #endif
9243     case TARGET_NR_rt_sigsuspend:
9244         {
9245             TaskState *ts = cpu->opaque;
9246 
9247             if (arg2 != sizeof(target_sigset_t)) {
9248                 return -TARGET_EINVAL;
9249             }
9250             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9251                 return -TARGET_EFAULT;
9252             target_to_host_sigset(&ts->sigsuspend_mask, p);
9253             unlock_user(p, arg1, 0);
9254             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9255                                                SIGSET_T_SIZE));
9256             if (ret != -TARGET_ERESTARTSYS) {
9257                 ts->in_sigsuspend = 1;
9258             }
9259         }
9260         return ret;
9261 #ifdef TARGET_NR_rt_sigtimedwait
9262     case TARGET_NR_rt_sigtimedwait:
9263         {
9264             sigset_t set;
9265             struct timespec uts, *puts;
9266             siginfo_t uinfo;
9267 
9268             if (arg4 != sizeof(target_sigset_t)) {
9269                 return -TARGET_EINVAL;
9270             }
9271 
9272             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9273                 return -TARGET_EFAULT;
9274             target_to_host_sigset(&set, p);
9275             unlock_user(p, arg1, 0);
9276             if (arg3) {
9277                 puts = &uts;
9278                 if (target_to_host_timespec(puts, arg3)) {
9279                     return -TARGET_EFAULT;
9280                 }
9281             } else {
9282                 puts = NULL;
9283             }
9284             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9285                                                  SIGSET_T_SIZE));
9286             if (!is_error(ret)) {
9287                 if (arg2) {
9288                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9289                                   0);
9290                     if (!p) {
9291                         return -TARGET_EFAULT;
9292                     }
9293                     host_to_target_siginfo(p, &uinfo);
9294                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9295                 }
9296                 ret = host_to_target_signal(ret);
9297             }
9298         }
9299         return ret;
9300 #endif
9301 #ifdef TARGET_NR_rt_sigtimedwait_time64
9302     case TARGET_NR_rt_sigtimedwait_time64:
9303         {
9304             sigset_t set;
9305             struct timespec uts, *puts;
9306             siginfo_t uinfo;
9307 
9308             if (arg4 != sizeof(target_sigset_t)) {
9309                 return -TARGET_EINVAL;
9310             }
9311 
9312             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9313             if (!p) {
9314                 return -TARGET_EFAULT;
9315             }
9316             target_to_host_sigset(&set, p);
9317             unlock_user(p, arg1, 0);
9318             if (arg3) {
9319                 puts = &uts;
9320                 if (target_to_host_timespec64(puts, arg3)) {
9321                     return -TARGET_EFAULT;
9322                 }
9323             } else {
9324                 puts = NULL;
9325             }
9326             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9327                                                  SIGSET_T_SIZE));
9328             if (!is_error(ret)) {
9329                 if (arg2) {
9330                     p = lock_user(VERIFY_WRITE, arg2,
9331                                   sizeof(target_siginfo_t), 0);
9332                     if (!p) {
9333                         return -TARGET_EFAULT;
9334                     }
9335                     host_to_target_siginfo(p, &uinfo);
9336                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9337                 }
9338                 ret = host_to_target_signal(ret);
9339             }
9340         }
9341         return ret;
9342 #endif
9343     case TARGET_NR_rt_sigqueueinfo:
9344         {
9345             siginfo_t uinfo;
9346 
9347             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9348             if (!p) {
9349                 return -TARGET_EFAULT;
9350             }
9351             target_to_host_siginfo(&uinfo, p);
9352             unlock_user(p, arg3, 0);
9353             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9354         }
9355         return ret;
9356     case TARGET_NR_rt_tgsigqueueinfo:
9357         {
9358             siginfo_t uinfo;
9359 
9360             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9361             if (!p) {
9362                 return -TARGET_EFAULT;
9363             }
9364             target_to_host_siginfo(&uinfo, p);
9365             unlock_user(p, arg4, 0);
9366             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9367         }
9368         return ret;
9369 #ifdef TARGET_NR_sigreturn
9370     case TARGET_NR_sigreturn:
9371         if (block_signals()) {
9372             return -TARGET_ERESTARTSYS;
9373         }
9374         return do_sigreturn(cpu_env);
9375 #endif
9376     case TARGET_NR_rt_sigreturn:
9377         if (block_signals()) {
9378             return -TARGET_ERESTARTSYS;
9379         }
9380         return do_rt_sigreturn(cpu_env);
9381     case TARGET_NR_sethostname:
9382         if (!(p = lock_user_string(arg1)))
9383             return -TARGET_EFAULT;
9384         ret = get_errno(sethostname(p, arg2));
9385         unlock_user(p, arg1, 0);
9386         return ret;
9387 #ifdef TARGET_NR_setrlimit
9388     case TARGET_NR_setrlimit:
9389         {
9390             int resource = target_to_host_resource(arg1);
9391             struct target_rlimit *target_rlim;
9392             struct rlimit rlim;
9393             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9394                 return -TARGET_EFAULT;
9395             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9396             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9397             unlock_user_struct(target_rlim, arg2, 0);
9398             /*
9399              * If we just passed through resource limit settings for memory then
9400              * they would also apply to QEMU's own allocations, and QEMU will
9401              * crash or hang or die if its allocations fail. Ideally we would
9402              * track the guest allocations in QEMU and apply the limits ourselves.
9403              * For now, just tell the guest the call succeeded but don't actually
9404              * limit anything.
9405              */
9406             if (resource != RLIMIT_AS &&
9407                 resource != RLIMIT_DATA &&
9408                 resource != RLIMIT_STACK) {
9409                 return get_errno(setrlimit(resource, &rlim));
9410             } else {
9411                 return 0;
9412             }
9413         }
9414 #endif
9415 #ifdef TARGET_NR_getrlimit
9416     case TARGET_NR_getrlimit:
9417         {
9418             int resource = target_to_host_resource(arg1);
9419             struct target_rlimit *target_rlim;
9420             struct rlimit rlim;
9421 
9422             ret = get_errno(getrlimit(resource, &rlim));
9423             if (!is_error(ret)) {
9424                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9425                     return -TARGET_EFAULT;
9426                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9427                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9428                 unlock_user_struct(target_rlim, arg2, 1);
9429             }
9430         }
9431         return ret;
9432 #endif
9433     case TARGET_NR_getrusage:
9434         {
9435             struct rusage rusage;
9436             ret = get_errno(getrusage(arg1, &rusage));
9437             if (!is_error(ret)) {
9438                 ret = host_to_target_rusage(arg2, &rusage);
9439             }
9440         }
9441         return ret;
9442 #if defined(TARGET_NR_gettimeofday)
9443     case TARGET_NR_gettimeofday:
9444         {
9445             struct timeval tv;
9446             struct timezone tz;
9447 
9448             ret = get_errno(gettimeofday(&tv, &tz));
9449             if (!is_error(ret)) {
9450                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9451                     return -TARGET_EFAULT;
9452                 }
9453                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9454                     return -TARGET_EFAULT;
9455                 }
9456             }
9457         }
9458         return ret;
9459 #endif
9460 #if defined(TARGET_NR_settimeofday)
9461     case TARGET_NR_settimeofday:
9462         {
9463             struct timeval tv, *ptv = NULL;
9464             struct timezone tz, *ptz = NULL;
9465 
9466             if (arg1) {
9467                 if (copy_from_user_timeval(&tv, arg1)) {
9468                     return -TARGET_EFAULT;
9469                 }
9470                 ptv = &tv;
9471             }
9472 
9473             if (arg2) {
9474                 if (copy_from_user_timezone(&tz, arg2)) {
9475                     return -TARGET_EFAULT;
9476                 }
9477                 ptz = &tz;
9478             }
9479 
9480             return get_errno(settimeofday(ptv, ptz));
9481         }
9482 #endif
9483 #if defined(TARGET_NR_select)
9484     case TARGET_NR_select:
9485 #if defined(TARGET_WANT_NI_OLD_SELECT)
9486         /* some architectures used to have old_select here
9487          * but now ENOSYS it.
9488          */
9489         ret = -TARGET_ENOSYS;
9490 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9491         ret = do_old_select(arg1);
9492 #else
9493         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9494 #endif
9495         return ret;
9496 #endif
9497 #ifdef TARGET_NR_pselect6
9498     case TARGET_NR_pselect6:
9499         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9500 #endif
9501 #ifdef TARGET_NR_pselect6_time64
9502     case TARGET_NR_pselect6_time64:
9503         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9504 #endif
9505 #ifdef TARGET_NR_symlink
9506     case TARGET_NR_symlink:
9507         {
9508             void *p2;
9509             p = lock_user_string(arg1);
9510             p2 = lock_user_string(arg2);
9511             if (!p || !p2)
9512                 ret = -TARGET_EFAULT;
9513             else
9514                 ret = get_errno(symlink(p, p2));
9515             unlock_user(p2, arg2, 0);
9516             unlock_user(p, arg1, 0);
9517         }
9518         return ret;
9519 #endif
9520 #if defined(TARGET_NR_symlinkat)
9521     case TARGET_NR_symlinkat:
9522         {
9523             void *p2;
9524             p  = lock_user_string(arg1);
9525             p2 = lock_user_string(arg3);
9526             if (!p || !p2)
9527                 ret = -TARGET_EFAULT;
9528             else
9529                 ret = get_errno(symlinkat(p, arg2, p2));
9530             unlock_user(p2, arg3, 0);
9531             unlock_user(p, arg1, 0);
9532         }
9533         return ret;
9534 #endif
9535 #ifdef TARGET_NR_readlink
9536     case TARGET_NR_readlink:
9537         {
9538             void *p2;
9539             p = lock_user_string(arg1);
9540             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9541             if (!p || !p2) {
9542                 ret = -TARGET_EFAULT;
9543             } else if (!arg3) {
9544                 /* Short circuit this for the magic exe check. */
9545                 ret = -TARGET_EINVAL;
9546             } else if (is_proc_myself((const char *)p, "exe")) {
9547                 char real[PATH_MAX], *temp;
9548                 temp = realpath(exec_path, real);
9549                 /* Return value is # of bytes that we wrote to the buffer. */
9550                 if (temp == NULL) {
9551                     ret = get_errno(-1);
9552                 } else {
9553                     /* Don't worry about sign mismatch as earlier mapping
9554                      * logic would have thrown a bad address error. */
9555                     ret = MIN(strlen(real), arg3);
9556                     /* We cannot NUL terminate the string. */
9557                     memcpy(p2, real, ret);
9558                 }
9559             } else {
9560                 ret = get_errno(readlink(path(p), p2, arg3));
9561             }
9562             unlock_user(p2, arg2, ret);
9563             unlock_user(p, arg1, 0);
9564         }
9565         return ret;
9566 #endif
9567 #if defined(TARGET_NR_readlinkat)
9568     case TARGET_NR_readlinkat:
9569         {
9570             void *p2;
9571             p  = lock_user_string(arg2);
9572             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9573             if (!p || !p2) {
9574                 ret = -TARGET_EFAULT;
9575             } else if (is_proc_myself((const char *)p, "exe")) {
9576                 char real[PATH_MAX], *temp;
9577                 temp = realpath(exec_path, real);
9578                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9579                 snprintf((char *)p2, arg4, "%s", real);
9580             } else {
9581                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9582             }
9583             unlock_user(p2, arg3, ret);
9584             unlock_user(p, arg2, 0);
9585         }
9586         return ret;
9587 #endif
9588 #ifdef TARGET_NR_swapon
9589     case TARGET_NR_swapon:
9590         if (!(p = lock_user_string(arg1)))
9591             return -TARGET_EFAULT;
9592         ret = get_errno(swapon(p, arg2));
9593         unlock_user(p, arg1, 0);
9594         return ret;
9595 #endif
9596     case TARGET_NR_reboot:
9597         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9598            /* arg4 must be ignored in all other cases */
9599            p = lock_user_string(arg4);
9600            if (!p) {
9601                return -TARGET_EFAULT;
9602            }
9603            ret = get_errno(reboot(arg1, arg2, arg3, p));
9604            unlock_user(p, arg4, 0);
9605         } else {
9606            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9607         }
9608         return ret;
9609 #ifdef TARGET_NR_mmap
9610     case TARGET_NR_mmap:
9611 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9612     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9613     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9614     || defined(TARGET_S390X)
9615         {
9616             abi_ulong *v;
9617             abi_ulong v1, v2, v3, v4, v5, v6;
9618             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9619                 return -TARGET_EFAULT;
9620             v1 = tswapal(v[0]);
9621             v2 = tswapal(v[1]);
9622             v3 = tswapal(v[2]);
9623             v4 = tswapal(v[3]);
9624             v5 = tswapal(v[4]);
9625             v6 = tswapal(v[5]);
9626             unlock_user(v, arg1, 0);
9627             ret = get_errno(target_mmap(v1, v2, v3,
9628                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9629                                         v5, v6));
9630         }
9631 #else
9632         ret = get_errno(target_mmap(arg1, arg2, arg3,
9633                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9634                                     arg5,
9635                                     arg6));
9636 #endif
9637         return ret;
9638 #endif
9639 #ifdef TARGET_NR_mmap2
9640     case TARGET_NR_mmap2:
9641 #ifndef MMAP_SHIFT
9642 #define MMAP_SHIFT 12
9643 #endif
9644         ret = target_mmap(arg1, arg2, arg3,
9645                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9646                           arg5, arg6 << MMAP_SHIFT);
9647         return get_errno(ret);
9648 #endif
9649     case TARGET_NR_munmap:
9650         return get_errno(target_munmap(arg1, arg2));
9651     case TARGET_NR_mprotect:
9652         {
9653             TaskState *ts = cpu->opaque;
9654             /* Special hack to detect libc making the stack executable.  */
9655             if ((arg3 & PROT_GROWSDOWN)
9656                 && arg1 >= ts->info->stack_limit
9657                 && arg1 <= ts->info->start_stack) {
9658                 arg3 &= ~PROT_GROWSDOWN;
9659                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9660                 arg1 = ts->info->stack_limit;
9661             }
9662         }
9663         return get_errno(target_mprotect(arg1, arg2, arg3));
9664 #ifdef TARGET_NR_mremap
9665     case TARGET_NR_mremap:
9666         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9667 #endif
9668         /* ??? msync/mlock/munlock are broken for softmmu.  */
9669 #ifdef TARGET_NR_msync
9670     case TARGET_NR_msync:
9671         return get_errno(msync(g2h(arg1), arg2, arg3));
9672 #endif
9673 #ifdef TARGET_NR_mlock
9674     case TARGET_NR_mlock:
9675         return get_errno(mlock(g2h(arg1), arg2));
9676 #endif
9677 #ifdef TARGET_NR_munlock
9678     case TARGET_NR_munlock:
9679         return get_errno(munlock(g2h(arg1), arg2));
9680 #endif
9681 #ifdef TARGET_NR_mlockall
9682     case TARGET_NR_mlockall:
9683         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9684 #endif
9685 #ifdef TARGET_NR_munlockall
9686     case TARGET_NR_munlockall:
9687         return get_errno(munlockall());
9688 #endif
9689 #ifdef TARGET_NR_truncate
9690     case TARGET_NR_truncate:
9691         if (!(p = lock_user_string(arg1)))
9692             return -TARGET_EFAULT;
9693         ret = get_errno(truncate(p, arg2));
9694         unlock_user(p, arg1, 0);
9695         return ret;
9696 #endif
9697 #ifdef TARGET_NR_ftruncate
9698     case TARGET_NR_ftruncate:
9699         return get_errno(ftruncate(arg1, arg2));
9700 #endif
9701     case TARGET_NR_fchmod:
9702         return get_errno(fchmod(arg1, arg2));
9703 #if defined(TARGET_NR_fchmodat)
9704     case TARGET_NR_fchmodat:
9705         if (!(p = lock_user_string(arg2)))
9706             return -TARGET_EFAULT;
9707         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9708         unlock_user(p, arg2, 0);
9709         return ret;
9710 #endif
9711     case TARGET_NR_getpriority:
9712         /* Note that negative values are valid for getpriority, so we must
9713            differentiate based on errno settings.  */
9714         errno = 0;
9715         ret = getpriority(arg1, arg2);
9716         if (ret == -1 && errno != 0) {
9717             return -host_to_target_errno(errno);
9718         }
9719 #ifdef TARGET_ALPHA
9720         /* Return value is the unbiased priority.  Signal no error.  */
9721         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9722 #else
9723         /* Return value is a biased priority to avoid negative numbers.  */
9724         ret = 20 - ret;
9725 #endif
9726         return ret;
9727     case TARGET_NR_setpriority:
9728         return get_errno(setpriority(arg1, arg2, arg3));
9729 #ifdef TARGET_NR_statfs
9730     case TARGET_NR_statfs:
9731         if (!(p = lock_user_string(arg1))) {
9732             return -TARGET_EFAULT;
9733         }
9734         ret = get_errno(statfs(path(p), &stfs));
9735         unlock_user(p, arg1, 0);
9736     convert_statfs:
9737         if (!is_error(ret)) {
9738             struct target_statfs *target_stfs;
9739 
9740             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9741                 return -TARGET_EFAULT;
9742             __put_user(stfs.f_type, &target_stfs->f_type);
9743             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9744             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9745             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9746             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9747             __put_user(stfs.f_files, &target_stfs->f_files);
9748             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9749             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9750             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9751             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9752             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9753 #ifdef _STATFS_F_FLAGS
9754             __put_user(stfs.f_flags, &target_stfs->f_flags);
9755 #else
9756             __put_user(0, &target_stfs->f_flags);
9757 #endif
9758             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9759             unlock_user_struct(target_stfs, arg2, 1);
9760         }
9761         return ret;
9762 #endif
9763 #ifdef TARGET_NR_fstatfs
9764     case TARGET_NR_fstatfs:
9765         ret = get_errno(fstatfs(arg1, &stfs));
9766         goto convert_statfs;
9767 #endif
9768 #ifdef TARGET_NR_statfs64
9769     case TARGET_NR_statfs64:
9770         if (!(p = lock_user_string(arg1))) {
9771             return -TARGET_EFAULT;
9772         }
9773         ret = get_errno(statfs(path(p), &stfs));
9774         unlock_user(p, arg1, 0);
9775     convert_statfs64:
9776         if (!is_error(ret)) {
9777             struct target_statfs64 *target_stfs;
9778 
9779             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9780                 return -TARGET_EFAULT;
9781             __put_user(stfs.f_type, &target_stfs->f_type);
9782             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9783             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9784             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9785             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9786             __put_user(stfs.f_files, &target_stfs->f_files);
9787             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9788             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9789             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9790             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9791             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9792 #ifdef _STATFS_F_FLAGS
9793             __put_user(stfs.f_flags, &target_stfs->f_flags);
9794 #else
9795             __put_user(0, &target_stfs->f_flags);
9796 #endif
9797             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9798             unlock_user_struct(target_stfs, arg3, 1);
9799         }
9800         return ret;
9801     case TARGET_NR_fstatfs64:
9802         ret = get_errno(fstatfs(arg1, &stfs));
9803         goto convert_statfs64;
9804 #endif
9805 #ifdef TARGET_NR_socketcall
9806     case TARGET_NR_socketcall:
9807         return do_socketcall(arg1, arg2);
9808 #endif
9809 #ifdef TARGET_NR_accept
9810     case TARGET_NR_accept:
9811         return do_accept4(arg1, arg2, arg3, 0);
9812 #endif
9813 #ifdef TARGET_NR_accept4
9814     case TARGET_NR_accept4:
9815         return do_accept4(arg1, arg2, arg3, arg4);
9816 #endif
9817 #ifdef TARGET_NR_bind
9818     case TARGET_NR_bind:
9819         return do_bind(arg1, arg2, arg3);
9820 #endif
9821 #ifdef TARGET_NR_connect
9822     case TARGET_NR_connect:
9823         return do_connect(arg1, arg2, arg3);
9824 #endif
9825 #ifdef TARGET_NR_getpeername
9826     case TARGET_NR_getpeername:
9827         return do_getpeername(arg1, arg2, arg3);
9828 #endif
9829 #ifdef TARGET_NR_getsockname
9830     case TARGET_NR_getsockname:
9831         return do_getsockname(arg1, arg2, arg3);
9832 #endif
9833 #ifdef TARGET_NR_getsockopt
9834     case TARGET_NR_getsockopt:
9835         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9836 #endif
9837 #ifdef TARGET_NR_listen
9838     case TARGET_NR_listen:
9839         return get_errno(listen(arg1, arg2));
9840 #endif
9841 #ifdef TARGET_NR_recv
9842     case TARGET_NR_recv:
9843         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9844 #endif
9845 #ifdef TARGET_NR_recvfrom
9846     case TARGET_NR_recvfrom:
9847         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9848 #endif
9849 #ifdef TARGET_NR_recvmsg
9850     case TARGET_NR_recvmsg:
9851         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9852 #endif
9853 #ifdef TARGET_NR_send
9854     case TARGET_NR_send:
9855         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9856 #endif
9857 #ifdef TARGET_NR_sendmsg
9858     case TARGET_NR_sendmsg:
9859         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9860 #endif
9861 #ifdef TARGET_NR_sendmmsg
9862     case TARGET_NR_sendmmsg:
9863         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9864 #endif
9865 #ifdef TARGET_NR_recvmmsg
9866     case TARGET_NR_recvmmsg:
9867         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9868 #endif
9869 #ifdef TARGET_NR_sendto
9870     case TARGET_NR_sendto:
9871         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9872 #endif
9873 #ifdef TARGET_NR_shutdown
9874     case TARGET_NR_shutdown:
9875         return get_errno(shutdown(arg1, arg2));
9876 #endif
9877 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9878     case TARGET_NR_getrandom:
9879         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9880         if (!p) {
9881             return -TARGET_EFAULT;
9882         }
9883         ret = get_errno(getrandom(p, arg2, arg3));
9884         unlock_user(p, arg1, ret);
9885         return ret;
9886 #endif
9887 #ifdef TARGET_NR_socket
9888     case TARGET_NR_socket:
9889         return do_socket(arg1, arg2, arg3);
9890 #endif
9891 #ifdef TARGET_NR_socketpair
9892     case TARGET_NR_socketpair:
9893         return do_socketpair(arg1, arg2, arg3, arg4);
9894 #endif
9895 #ifdef TARGET_NR_setsockopt
9896     case TARGET_NR_setsockopt:
9897         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9898 #endif
9899 #if defined(TARGET_NR_syslog)
9900     case TARGET_NR_syslog:
9901         {
9902             int len = arg2;
9903 
9904             switch (arg1) {
9905             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9906             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9907             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9908             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9909             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9910             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9911             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9912             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9913                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9914             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9915             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9916             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9917                 {
9918                     if (len < 0) {
9919                         return -TARGET_EINVAL;
9920                     }
9921                     if (len == 0) {
9922                         return 0;
9923                     }
9924                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9925                     if (!p) {
9926                         return -TARGET_EFAULT;
9927                     }
9928                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9929                     unlock_user(p, arg2, arg3);
9930                 }
9931                 return ret;
9932             default:
9933                 return -TARGET_EINVAL;
9934             }
9935         }
9936         break;
9937 #endif
9938     case TARGET_NR_setitimer:
9939         {
9940             struct itimerval value, ovalue, *pvalue;
9941 
9942             if (arg2) {
9943                 pvalue = &value;
9944                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9945                     || copy_from_user_timeval(&pvalue->it_value,
9946                                               arg2 + sizeof(struct target_timeval)))
9947                     return -TARGET_EFAULT;
9948             } else {
9949                 pvalue = NULL;
9950             }
9951             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9952             if (!is_error(ret) && arg3) {
9953                 if (copy_to_user_timeval(arg3,
9954                                          &ovalue.it_interval)
9955                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9956                                             &ovalue.it_value))
9957                     return -TARGET_EFAULT;
9958             }
9959         }
9960         return ret;
9961     case TARGET_NR_getitimer:
9962         {
9963             struct itimerval value;
9964 
9965             ret = get_errno(getitimer(arg1, &value));
9966             if (!is_error(ret) && arg2) {
9967                 if (copy_to_user_timeval(arg2,
9968                                          &value.it_interval)
9969                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9970                                             &value.it_value))
9971                     return -TARGET_EFAULT;
9972             }
9973         }
9974         return ret;
9975 #ifdef TARGET_NR_stat
9976     case TARGET_NR_stat:
9977         if (!(p = lock_user_string(arg1))) {
9978             return -TARGET_EFAULT;
9979         }
9980         ret = get_errno(stat(path(p), &st));
9981         unlock_user(p, arg1, 0);
9982         goto do_stat;
9983 #endif
9984 #ifdef TARGET_NR_lstat
9985     case TARGET_NR_lstat:
9986         if (!(p = lock_user_string(arg1))) {
9987             return -TARGET_EFAULT;
9988         }
9989         ret = get_errno(lstat(path(p), &st));
9990         unlock_user(p, arg1, 0);
9991         goto do_stat;
9992 #endif
9993 #ifdef TARGET_NR_fstat
9994     case TARGET_NR_fstat:
9995         {
9996             ret = get_errno(fstat(arg1, &st));
9997 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9998         do_stat:
9999 #endif
10000             if (!is_error(ret)) {
10001                 struct target_stat *target_st;
10002 
10003                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10004                     return -TARGET_EFAULT;
10005                 memset(target_st, 0, sizeof(*target_st));
10006                 __put_user(st.st_dev, &target_st->st_dev);
10007                 __put_user(st.st_ino, &target_st->st_ino);
10008                 __put_user(st.st_mode, &target_st->st_mode);
10009                 __put_user(st.st_uid, &target_st->st_uid);
10010                 __put_user(st.st_gid, &target_st->st_gid);
10011                 __put_user(st.st_nlink, &target_st->st_nlink);
10012                 __put_user(st.st_rdev, &target_st->st_rdev);
10013                 __put_user(st.st_size, &target_st->st_size);
10014                 __put_user(st.st_blksize, &target_st->st_blksize);
10015                 __put_user(st.st_blocks, &target_st->st_blocks);
10016                 __put_user(st.st_atime, &target_st->target_st_atime);
10017                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10018                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10019 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
10020     defined(TARGET_STAT_HAVE_NSEC)
10021                 __put_user(st.st_atim.tv_nsec,
10022                            &target_st->target_st_atime_nsec);
10023                 __put_user(st.st_mtim.tv_nsec,
10024                            &target_st->target_st_mtime_nsec);
10025                 __put_user(st.st_ctim.tv_nsec,
10026                            &target_st->target_st_ctime_nsec);
10027 #endif
10028                 unlock_user_struct(target_st, arg2, 1);
10029             }
10030         }
10031         return ret;
10032 #endif
10033     case TARGET_NR_vhangup:
10034         return get_errno(vhangup());
10035 #ifdef TARGET_NR_syscall
10036     case TARGET_NR_syscall:
10037         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10038                           arg6, arg7, arg8, 0);
10039 #endif
10040 #if defined(TARGET_NR_wait4)
10041     case TARGET_NR_wait4:
10042         {
10043             int status;
10044             abi_long status_ptr = arg2;
10045             struct rusage rusage, *rusage_ptr;
10046             abi_ulong target_rusage = arg4;
10047             abi_long rusage_err;
10048             if (target_rusage)
10049                 rusage_ptr = &rusage;
10050             else
10051                 rusage_ptr = NULL;
10052             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10053             if (!is_error(ret)) {
10054                 if (status_ptr && ret) {
10055                     status = host_to_target_waitstatus(status);
10056                     if (put_user_s32(status, status_ptr))
10057                         return -TARGET_EFAULT;
10058                 }
10059                 if (target_rusage) {
10060                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10061                     if (rusage_err) {
10062                         ret = rusage_err;
10063                     }
10064                 }
10065             }
10066         }
10067         return ret;
10068 #endif
10069 #ifdef TARGET_NR_swapoff
10070     case TARGET_NR_swapoff:
10071         if (!(p = lock_user_string(arg1)))
10072             return -TARGET_EFAULT;
10073         ret = get_errno(swapoff(p));
10074         unlock_user(p, arg1, 0);
10075         return ret;
10076 #endif
10077     case TARGET_NR_sysinfo:
10078         {
10079             struct target_sysinfo *target_value;
10080             struct sysinfo value;
10081             ret = get_errno(sysinfo(&value));
10082             if (!is_error(ret) && arg1)
10083             {
10084                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10085                     return -TARGET_EFAULT;
10086                 __put_user(value.uptime, &target_value->uptime);
10087                 __put_user(value.loads[0], &target_value->loads[0]);
10088                 __put_user(value.loads[1], &target_value->loads[1]);
10089                 __put_user(value.loads[2], &target_value->loads[2]);
10090                 __put_user(value.totalram, &target_value->totalram);
10091                 __put_user(value.freeram, &target_value->freeram);
10092                 __put_user(value.sharedram, &target_value->sharedram);
10093                 __put_user(value.bufferram, &target_value->bufferram);
10094                 __put_user(value.totalswap, &target_value->totalswap);
10095                 __put_user(value.freeswap, &target_value->freeswap);
10096                 __put_user(value.procs, &target_value->procs);
10097                 __put_user(value.totalhigh, &target_value->totalhigh);
10098                 __put_user(value.freehigh, &target_value->freehigh);
10099                 __put_user(value.mem_unit, &target_value->mem_unit);
10100                 unlock_user_struct(target_value, arg1, 1);
10101             }
10102         }
10103         return ret;
10104 #ifdef TARGET_NR_ipc
10105     case TARGET_NR_ipc:
10106         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10107 #endif
10108 #ifdef TARGET_NR_semget
10109     case TARGET_NR_semget:
10110         return get_errno(semget(arg1, arg2, arg3));
10111 #endif
10112 #ifdef TARGET_NR_semop
10113     case TARGET_NR_semop:
10114         return do_semtimedop(arg1, arg2, arg3, 0, false);
10115 #endif
10116 #ifdef TARGET_NR_semtimedop
10117     case TARGET_NR_semtimedop:
10118         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10119 #endif
10120 #ifdef TARGET_NR_semtimedop_time64
10121     case TARGET_NR_semtimedop_time64:
10122         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10123 #endif
10124 #ifdef TARGET_NR_semctl
10125     case TARGET_NR_semctl:
10126         return do_semctl(arg1, arg2, arg3, arg4);
10127 #endif
10128 #ifdef TARGET_NR_msgctl
10129     case TARGET_NR_msgctl:
10130         return do_msgctl(arg1, arg2, arg3);
10131 #endif
10132 #ifdef TARGET_NR_msgget
10133     case TARGET_NR_msgget:
10134         return get_errno(msgget(arg1, arg2));
10135 #endif
10136 #ifdef TARGET_NR_msgrcv
10137     case TARGET_NR_msgrcv:
10138         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10139 #endif
10140 #ifdef TARGET_NR_msgsnd
10141     case TARGET_NR_msgsnd:
10142         return do_msgsnd(arg1, arg2, arg3, arg4);
10143 #endif
10144 #ifdef TARGET_NR_shmget
10145     case TARGET_NR_shmget:
10146         return get_errno(shmget(arg1, arg2, arg3));
10147 #endif
10148 #ifdef TARGET_NR_shmctl
10149     case TARGET_NR_shmctl:
10150         return do_shmctl(arg1, arg2, arg3);
10151 #endif
10152 #ifdef TARGET_NR_shmat
10153     case TARGET_NR_shmat:
10154         return do_shmat(cpu_env, arg1, arg2, arg3);
10155 #endif
10156 #ifdef TARGET_NR_shmdt
10157     case TARGET_NR_shmdt:
10158         return do_shmdt(arg1);
10159 #endif
10160     case TARGET_NR_fsync:
10161         return get_errno(fsync(arg1));
10162     case TARGET_NR_clone:
10163         /* Linux manages to have three different orderings for its
10164          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10165          * match the kernel's CONFIG_CLONE_* settings.
10166          * Microblaze is further special in that it uses a sixth
10167          * implicit argument to clone for the TLS pointer.
10168          */
10169 #if defined(TARGET_MICROBLAZE)
10170         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10171 #elif defined(TARGET_CLONE_BACKWARDS)
10172         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10173 #elif defined(TARGET_CLONE_BACKWARDS2)
10174         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10175 #else
10176         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10177 #endif
10178         return ret;
10179 #ifdef __NR_exit_group
10180         /* new thread calls */
10181     case TARGET_NR_exit_group:
10182         preexit_cleanup(cpu_env, arg1);
10183         return get_errno(exit_group(arg1));
10184 #endif
10185     case TARGET_NR_setdomainname:
10186         if (!(p = lock_user_string(arg1)))
10187             return -TARGET_EFAULT;
10188         ret = get_errno(setdomainname(p, arg2));
10189         unlock_user(p, arg1, 0);
10190         return ret;
10191     case TARGET_NR_uname:
10192         /* no need to transcode because we use the linux syscall */
10193         {
10194             struct new_utsname * buf;
10195 
10196             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10197                 return -TARGET_EFAULT;
10198             ret = get_errno(sys_uname(buf));
10199             if (!is_error(ret)) {
10200                 /* Overwrite the native machine name with whatever is being
10201                    emulated. */
10202                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10203                           sizeof(buf->machine));
10204                 /* Allow the user to override the reported release.  */
10205                 if (qemu_uname_release && *qemu_uname_release) {
10206                     g_strlcpy(buf->release, qemu_uname_release,
10207                               sizeof(buf->release));
10208                 }
10209             }
10210             unlock_user_struct(buf, arg1, 1);
10211         }
10212         return ret;
10213 #ifdef TARGET_I386
10214     case TARGET_NR_modify_ldt:
10215         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10216 #if !defined(TARGET_X86_64)
10217     case TARGET_NR_vm86:
10218         return do_vm86(cpu_env, arg1, arg2);
10219 #endif
10220 #endif
10221 #if defined(TARGET_NR_adjtimex)
10222     case TARGET_NR_adjtimex:
10223         {
10224             struct timex host_buf;
10225 
10226             if (target_to_host_timex(&host_buf, arg1) != 0) {
10227                 return -TARGET_EFAULT;
10228             }
10229             ret = get_errno(adjtimex(&host_buf));
10230             if (!is_error(ret)) {
10231                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10232                     return -TARGET_EFAULT;
10233                 }
10234             }
10235         }
10236         return ret;
10237 #endif
10238 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10239     case TARGET_NR_clock_adjtime:
10240         {
10241             struct timex htx, *phtx = &htx;
10242 
10243             if (target_to_host_timex(phtx, arg2) != 0) {
10244                 return -TARGET_EFAULT;
10245             }
10246             ret = get_errno(clock_adjtime(arg1, phtx));
10247             if (!is_error(ret) && phtx) {
10248                 if (host_to_target_timex(arg2, phtx) != 0) {
10249                     return -TARGET_EFAULT;
10250                 }
10251             }
10252         }
10253         return ret;
10254 #endif
10255 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10256     case TARGET_NR_clock_adjtime64:
10257         {
10258             struct timex htx;
10259 
10260             if (target_to_host_timex64(&htx, arg2) != 0) {
10261                 return -TARGET_EFAULT;
10262             }
10263             ret = get_errno(clock_adjtime(arg1, &htx));
10264             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10265                     return -TARGET_EFAULT;
10266             }
10267         }
10268         return ret;
10269 #endif
10270     case TARGET_NR_getpgid:
10271         return get_errno(getpgid(arg1));
10272     case TARGET_NR_fchdir:
10273         return get_errno(fchdir(arg1));
10274     case TARGET_NR_personality:
10275         return get_errno(personality(arg1));
10276 #ifdef TARGET_NR__llseek /* Not on alpha */
10277     case TARGET_NR__llseek:
10278         {
10279             int64_t res;
10280 #if !defined(__NR_llseek)
10281             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10282             if (res == -1) {
10283                 ret = get_errno(res);
10284             } else {
10285                 ret = 0;
10286             }
10287 #else
10288             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10289 #endif
10290             if ((ret == 0) && put_user_s64(res, arg4)) {
10291                 return -TARGET_EFAULT;
10292             }
10293         }
10294         return ret;
10295 #endif
10296 #ifdef TARGET_NR_getdents
10297     case TARGET_NR_getdents:
10298 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10299 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10300         {
10301             struct target_dirent *target_dirp;
10302             struct linux_dirent *dirp;
10303             abi_long count = arg3;
10304 
10305             dirp = g_try_malloc(count);
10306             if (!dirp) {
10307                 return -TARGET_ENOMEM;
10308             }
10309 
10310             ret = get_errno(sys_getdents(arg1, dirp, count));
10311             if (!is_error(ret)) {
10312                 struct linux_dirent *de;
10313 		struct target_dirent *tde;
10314                 int len = ret;
10315                 int reclen, treclen;
10316 		int count1, tnamelen;
10317 
10318 		count1 = 0;
10319                 de = dirp;
10320                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10321                     return -TARGET_EFAULT;
10322 		tde = target_dirp;
10323                 while (len > 0) {
10324                     reclen = de->d_reclen;
10325                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10326                     assert(tnamelen >= 0);
10327                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10328                     assert(count1 + treclen <= count);
10329                     tde->d_reclen = tswap16(treclen);
10330                     tde->d_ino = tswapal(de->d_ino);
10331                     tde->d_off = tswapal(de->d_off);
10332                     memcpy(tde->d_name, de->d_name, tnamelen);
10333                     de = (struct linux_dirent *)((char *)de + reclen);
10334                     len -= reclen;
10335                     tde = (struct target_dirent *)((char *)tde + treclen);
10336 		    count1 += treclen;
10337                 }
10338 		ret = count1;
10339                 unlock_user(target_dirp, arg2, ret);
10340             }
10341             g_free(dirp);
10342         }
10343 #else
10344         {
10345             struct linux_dirent *dirp;
10346             abi_long count = arg3;
10347 
10348             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10349                 return -TARGET_EFAULT;
10350             ret = get_errno(sys_getdents(arg1, dirp, count));
10351             if (!is_error(ret)) {
10352                 struct linux_dirent *de;
10353                 int len = ret;
10354                 int reclen;
10355                 de = dirp;
10356                 while (len > 0) {
10357                     reclen = de->d_reclen;
10358                     if (reclen > len)
10359                         break;
10360                     de->d_reclen = tswap16(reclen);
10361                     tswapls(&de->d_ino);
10362                     tswapls(&de->d_off);
10363                     de = (struct linux_dirent *)((char *)de + reclen);
10364                     len -= reclen;
10365                 }
10366             }
10367             unlock_user(dirp, arg2, ret);
10368         }
10369 #endif
10370 #else
10371         /* Implement getdents in terms of getdents64 */
10372         {
10373             struct linux_dirent64 *dirp;
10374             abi_long count = arg3;
10375 
10376             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10377             if (!dirp) {
10378                 return -TARGET_EFAULT;
10379             }
10380             ret = get_errno(sys_getdents64(arg1, dirp, count));
10381             if (!is_error(ret)) {
10382                 /* Convert the dirent64 structs to target dirent.  We do this
10383                  * in-place, since we can guarantee that a target_dirent is no
10384                  * larger than a dirent64; however this means we have to be
10385                  * careful to read everything before writing in the new format.
10386                  */
10387                 struct linux_dirent64 *de;
10388                 struct target_dirent *tde;
10389                 int len = ret;
10390                 int tlen = 0;
10391 
10392                 de = dirp;
10393                 tde = (struct target_dirent *)dirp;
10394                 while (len > 0) {
10395                     int namelen, treclen;
10396                     int reclen = de->d_reclen;
10397                     uint64_t ino = de->d_ino;
10398                     int64_t off = de->d_off;
10399                     uint8_t type = de->d_type;
10400 
10401                     namelen = strlen(de->d_name);
10402                     treclen = offsetof(struct target_dirent, d_name)
10403                         + namelen + 2;
10404                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10405 
10406                     memmove(tde->d_name, de->d_name, namelen + 1);
10407                     tde->d_ino = tswapal(ino);
10408                     tde->d_off = tswapal(off);
10409                     tde->d_reclen = tswap16(treclen);
10410                     /* The target_dirent type is in what was formerly a padding
10411                      * byte at the end of the structure:
10412                      */
10413                     *(((char *)tde) + treclen - 1) = type;
10414 
10415                     de = (struct linux_dirent64 *)((char *)de + reclen);
10416                     tde = (struct target_dirent *)((char *)tde + treclen);
10417                     len -= reclen;
10418                     tlen += treclen;
10419                 }
10420                 ret = tlen;
10421             }
10422             unlock_user(dirp, arg2, ret);
10423         }
10424 #endif
10425         return ret;
10426 #endif /* TARGET_NR_getdents */
10427 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10428     case TARGET_NR_getdents64:
10429         {
10430             struct linux_dirent64 *dirp;
10431             abi_long count = arg3;
10432             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10433                 return -TARGET_EFAULT;
10434             ret = get_errno(sys_getdents64(arg1, dirp, count));
10435             if (!is_error(ret)) {
10436                 struct linux_dirent64 *de;
10437                 int len = ret;
10438                 int reclen;
10439                 de = dirp;
10440                 while (len > 0) {
10441                     reclen = de->d_reclen;
10442                     if (reclen > len)
10443                         break;
10444                     de->d_reclen = tswap16(reclen);
10445                     tswap64s((uint64_t *)&de->d_ino);
10446                     tswap64s((uint64_t *)&de->d_off);
10447                     de = (struct linux_dirent64 *)((char *)de + reclen);
10448                     len -= reclen;
10449                 }
10450             }
10451             unlock_user(dirp, arg2, ret);
10452         }
10453         return ret;
10454 #endif /* TARGET_NR_getdents64 */
10455 #if defined(TARGET_NR__newselect)
10456     case TARGET_NR__newselect:
10457         return do_select(arg1, arg2, arg3, arg4, arg5);
10458 #endif
10459 #ifdef TARGET_NR_poll
10460     case TARGET_NR_poll:
10461         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10462 #endif
10463 #ifdef TARGET_NR_ppoll
10464     case TARGET_NR_ppoll:
10465         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10466 #endif
10467 #ifdef TARGET_NR_ppoll_time64
10468     case TARGET_NR_ppoll_time64:
10469         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10470 #endif
10471     case TARGET_NR_flock:
10472         /* NOTE: the flock constant seems to be the same for every
10473            Linux platform */
10474         return get_errno(safe_flock(arg1, arg2));
10475     case TARGET_NR_readv:
10476         {
10477             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10478             if (vec != NULL) {
10479                 ret = get_errno(safe_readv(arg1, vec, arg3));
10480                 unlock_iovec(vec, arg2, arg3, 1);
10481             } else {
10482                 ret = -host_to_target_errno(errno);
10483             }
10484         }
10485         return ret;
10486     case TARGET_NR_writev:
10487         {
10488             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10489             if (vec != NULL) {
10490                 ret = get_errno(safe_writev(arg1, vec, arg3));
10491                 unlock_iovec(vec, arg2, arg3, 0);
10492             } else {
10493                 ret = -host_to_target_errno(errno);
10494             }
10495         }
10496         return ret;
10497 #if defined(TARGET_NR_preadv)
10498     case TARGET_NR_preadv:
10499         {
10500             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10501             if (vec != NULL) {
10502                 unsigned long low, high;
10503 
10504                 target_to_host_low_high(arg4, arg5, &low, &high);
10505                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10506                 unlock_iovec(vec, arg2, arg3, 1);
10507             } else {
10508                 ret = -host_to_target_errno(errno);
10509            }
10510         }
10511         return ret;
10512 #endif
10513 #if defined(TARGET_NR_pwritev)
10514     case TARGET_NR_pwritev:
10515         {
10516             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10517             if (vec != NULL) {
10518                 unsigned long low, high;
10519 
10520                 target_to_host_low_high(arg4, arg5, &low, &high);
10521                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10522                 unlock_iovec(vec, arg2, arg3, 0);
10523             } else {
10524                 ret = -host_to_target_errno(errno);
10525            }
10526         }
10527         return ret;
10528 #endif
10529     case TARGET_NR_getsid:
10530         return get_errno(getsid(arg1));
10531 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10532     case TARGET_NR_fdatasync:
10533         return get_errno(fdatasync(arg1));
10534 #endif
10535     case TARGET_NR_sched_getaffinity:
10536         {
10537             unsigned int mask_size;
10538             unsigned long *mask;
10539 
10540             /*
10541              * sched_getaffinity needs multiples of ulong, so need to take
10542              * care of mismatches between target ulong and host ulong sizes.
10543              */
10544             if (arg2 & (sizeof(abi_ulong) - 1)) {
10545                 return -TARGET_EINVAL;
10546             }
10547             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10548 
10549             mask = alloca(mask_size);
10550             memset(mask, 0, mask_size);
10551             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10552 
10553             if (!is_error(ret)) {
10554                 if (ret > arg2) {
10555                     /* More data returned than the caller's buffer will fit.
10556                      * This only happens if sizeof(abi_long) < sizeof(long)
10557                      * and the caller passed us a buffer holding an odd number
10558                      * of abi_longs. If the host kernel is actually using the
10559                      * extra 4 bytes then fail EINVAL; otherwise we can just
10560                      * ignore them and only copy the interesting part.
10561                      */
10562                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10563                     if (numcpus > arg2 * 8) {
10564                         return -TARGET_EINVAL;
10565                     }
10566                     ret = arg2;
10567                 }
10568 
10569                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10570                     return -TARGET_EFAULT;
10571                 }
10572             }
10573         }
10574         return ret;
10575     case TARGET_NR_sched_setaffinity:
10576         {
10577             unsigned int mask_size;
10578             unsigned long *mask;
10579 
10580             /*
10581              * sched_setaffinity needs multiples of ulong, so need to take
10582              * care of mismatches between target ulong and host ulong sizes.
10583              */
10584             if (arg2 & (sizeof(abi_ulong) - 1)) {
10585                 return -TARGET_EINVAL;
10586             }
10587             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10588             mask = alloca(mask_size);
10589 
10590             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10591             if (ret) {
10592                 return ret;
10593             }
10594 
10595             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10596         }
10597     case TARGET_NR_getcpu:
10598         {
10599             unsigned cpu, node;
10600             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10601                                        arg2 ? &node : NULL,
10602                                        NULL));
10603             if (is_error(ret)) {
10604                 return ret;
10605             }
10606             if (arg1 && put_user_u32(cpu, arg1)) {
10607                 return -TARGET_EFAULT;
10608             }
10609             if (arg2 && put_user_u32(node, arg2)) {
10610                 return -TARGET_EFAULT;
10611             }
10612         }
10613         return ret;
10614     case TARGET_NR_sched_setparam:
10615         {
10616             struct sched_param *target_schp;
10617             struct sched_param schp;
10618 
10619             if (arg2 == 0) {
10620                 return -TARGET_EINVAL;
10621             }
10622             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10623                 return -TARGET_EFAULT;
10624             schp.sched_priority = tswap32(target_schp->sched_priority);
10625             unlock_user_struct(target_schp, arg2, 0);
10626             return get_errno(sched_setparam(arg1, &schp));
10627         }
10628     case TARGET_NR_sched_getparam:
10629         {
10630             struct sched_param *target_schp;
10631             struct sched_param schp;
10632 
10633             if (arg2 == 0) {
10634                 return -TARGET_EINVAL;
10635             }
10636             ret = get_errno(sched_getparam(arg1, &schp));
10637             if (!is_error(ret)) {
10638                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10639                     return -TARGET_EFAULT;
10640                 target_schp->sched_priority = tswap32(schp.sched_priority);
10641                 unlock_user_struct(target_schp, arg2, 1);
10642             }
10643         }
10644         return ret;
10645     case TARGET_NR_sched_setscheduler:
10646         {
10647             struct sched_param *target_schp;
10648             struct sched_param schp;
10649             if (arg3 == 0) {
10650                 return -TARGET_EINVAL;
10651             }
10652             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10653                 return -TARGET_EFAULT;
10654             schp.sched_priority = tswap32(target_schp->sched_priority);
10655             unlock_user_struct(target_schp, arg3, 0);
10656             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10657         }
10658     case TARGET_NR_sched_getscheduler:
10659         return get_errno(sched_getscheduler(arg1));
10660     case TARGET_NR_sched_yield:
10661         return get_errno(sched_yield());
10662     case TARGET_NR_sched_get_priority_max:
10663         return get_errno(sched_get_priority_max(arg1));
10664     case TARGET_NR_sched_get_priority_min:
10665         return get_errno(sched_get_priority_min(arg1));
10666 #ifdef TARGET_NR_sched_rr_get_interval
10667     case TARGET_NR_sched_rr_get_interval:
10668         {
10669             struct timespec ts;
10670             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10671             if (!is_error(ret)) {
10672                 ret = host_to_target_timespec(arg2, &ts);
10673             }
10674         }
10675         return ret;
10676 #endif
10677 #ifdef TARGET_NR_sched_rr_get_interval_time64
10678     case TARGET_NR_sched_rr_get_interval_time64:
10679         {
10680             struct timespec ts;
10681             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10682             if (!is_error(ret)) {
10683                 ret = host_to_target_timespec64(arg2, &ts);
10684             }
10685         }
10686         return ret;
10687 #endif
10688 #if defined(TARGET_NR_nanosleep)
10689     case TARGET_NR_nanosleep:
10690         {
10691             struct timespec req, rem;
10692             target_to_host_timespec(&req, arg1);
10693             ret = get_errno(safe_nanosleep(&req, &rem));
10694             if (is_error(ret) && arg2) {
10695                 host_to_target_timespec(arg2, &rem);
10696             }
10697         }
10698         return ret;
10699 #endif
10700     case TARGET_NR_prctl:
10701         switch (arg1) {
10702         case PR_GET_PDEATHSIG:
10703         {
10704             int deathsig;
10705             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10706             if (!is_error(ret) && arg2
10707                 && put_user_s32(deathsig, arg2)) {
10708                 return -TARGET_EFAULT;
10709             }
10710             return ret;
10711         }
10712 #ifdef PR_GET_NAME
10713         case PR_GET_NAME:
10714         {
10715             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10716             if (!name) {
10717                 return -TARGET_EFAULT;
10718             }
10719             ret = get_errno(prctl(arg1, (unsigned long)name,
10720                                   arg3, arg4, arg5));
10721             unlock_user(name, arg2, 16);
10722             return ret;
10723         }
10724         case PR_SET_NAME:
10725         {
10726             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10727             if (!name) {
10728                 return -TARGET_EFAULT;
10729             }
10730             ret = get_errno(prctl(arg1, (unsigned long)name,
10731                                   arg3, arg4, arg5));
10732             unlock_user(name, arg2, 0);
10733             return ret;
10734         }
10735 #endif
10736 #ifdef TARGET_MIPS
10737         case TARGET_PR_GET_FP_MODE:
10738         {
10739             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10740             ret = 0;
10741             if (env->CP0_Status & (1 << CP0St_FR)) {
10742                 ret |= TARGET_PR_FP_MODE_FR;
10743             }
10744             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10745                 ret |= TARGET_PR_FP_MODE_FRE;
10746             }
10747             return ret;
10748         }
10749         case TARGET_PR_SET_FP_MODE:
10750         {
10751             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10752             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10753             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10754             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10755             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10756 
10757             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10758                                             TARGET_PR_FP_MODE_FRE;
10759 
10760             /* If nothing to change, return right away, successfully.  */
10761             if (old_fr == new_fr && old_fre == new_fre) {
10762                 return 0;
10763             }
10764             /* Check the value is valid */
10765             if (arg2 & ~known_bits) {
10766                 return -TARGET_EOPNOTSUPP;
10767             }
10768             /* Setting FRE without FR is not supported.  */
10769             if (new_fre && !new_fr) {
10770                 return -TARGET_EOPNOTSUPP;
10771             }
10772             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10773                 /* FR1 is not supported */
10774                 return -TARGET_EOPNOTSUPP;
10775             }
10776             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10777                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10778                 /* cannot set FR=0 */
10779                 return -TARGET_EOPNOTSUPP;
10780             }
10781             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10782                 /* Cannot set FRE=1 */
10783                 return -TARGET_EOPNOTSUPP;
10784             }
10785 
10786             int i;
10787             fpr_t *fpr = env->active_fpu.fpr;
10788             for (i = 0; i < 32 ; i += 2) {
10789                 if (!old_fr && new_fr) {
10790                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10791                 } else if (old_fr && !new_fr) {
10792                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10793                 }
10794             }
10795 
10796             if (new_fr) {
10797                 env->CP0_Status |= (1 << CP0St_FR);
10798                 env->hflags |= MIPS_HFLAG_F64;
10799             } else {
10800                 env->CP0_Status &= ~(1 << CP0St_FR);
10801                 env->hflags &= ~MIPS_HFLAG_F64;
10802             }
10803             if (new_fre) {
10804                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10805                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10806                     env->hflags |= MIPS_HFLAG_FRE;
10807                 }
10808             } else {
10809                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10810                 env->hflags &= ~MIPS_HFLAG_FRE;
10811             }
10812 
10813             return 0;
10814         }
10815 #endif /* MIPS */
10816 #ifdef TARGET_AARCH64
10817         case TARGET_PR_SVE_SET_VL:
10818             /*
10819              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10820              * PR_SVE_VL_INHERIT.  Note the kernel definition
10821              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10822              * even though the current architectural maximum is VQ=16.
10823              */
10824             ret = -TARGET_EINVAL;
10825             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10826                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10827                 CPUARMState *env = cpu_env;
10828                 ARMCPU *cpu = env_archcpu(env);
10829                 uint32_t vq, old_vq;
10830 
10831                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10832                 vq = MAX(arg2 / 16, 1);
10833                 vq = MIN(vq, cpu->sve_max_vq);
10834 
10835                 if (vq < old_vq) {
10836                     aarch64_sve_narrow_vq(env, vq);
10837                 }
10838                 env->vfp.zcr_el[1] = vq - 1;
10839                 arm_rebuild_hflags(env);
10840                 ret = vq * 16;
10841             }
10842             return ret;
10843         case TARGET_PR_SVE_GET_VL:
10844             ret = -TARGET_EINVAL;
10845             {
10846                 ARMCPU *cpu = env_archcpu(cpu_env);
10847                 if (cpu_isar_feature(aa64_sve, cpu)) {
10848                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10849                 }
10850             }
10851             return ret;
10852         case TARGET_PR_PAC_RESET_KEYS:
10853             {
10854                 CPUARMState *env = cpu_env;
10855                 ARMCPU *cpu = env_archcpu(env);
10856 
10857                 if (arg3 || arg4 || arg5) {
10858                     return -TARGET_EINVAL;
10859                 }
10860                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10861                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10862                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10863                                TARGET_PR_PAC_APGAKEY);
10864                     int ret = 0;
10865                     Error *err = NULL;
10866 
10867                     if (arg2 == 0) {
10868                         arg2 = all;
10869                     } else if (arg2 & ~all) {
10870                         return -TARGET_EINVAL;
10871                     }
10872                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10873                         ret |= qemu_guest_getrandom(&env->keys.apia,
10874                                                     sizeof(ARMPACKey), &err);
10875                     }
10876                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10877                         ret |= qemu_guest_getrandom(&env->keys.apib,
10878                                                     sizeof(ARMPACKey), &err);
10879                     }
10880                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10881                         ret |= qemu_guest_getrandom(&env->keys.apda,
10882                                                     sizeof(ARMPACKey), &err);
10883                     }
10884                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10885                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10886                                                     sizeof(ARMPACKey), &err);
10887                     }
10888                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10889                         ret |= qemu_guest_getrandom(&env->keys.apga,
10890                                                     sizeof(ARMPACKey), &err);
10891                     }
10892                     if (ret != 0) {
10893                         /*
10894                          * Some unknown failure in the crypto.  The best
10895                          * we can do is log it and fail the syscall.
10896                          * The real syscall cannot fail this way.
10897                          */
10898                         qemu_log_mask(LOG_UNIMP,
10899                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10900                                       error_get_pretty(err));
10901                         error_free(err);
10902                         return -TARGET_EIO;
10903                     }
10904                     return 0;
10905                 }
10906             }
10907             return -TARGET_EINVAL;
10908 #endif /* AARCH64 */
10909         case PR_GET_SECCOMP:
10910         case PR_SET_SECCOMP:
10911             /* Disable seccomp to prevent the target disabling syscalls we
10912              * need. */
10913             return -TARGET_EINVAL;
10914         default:
10915             /* Most prctl options have no pointer arguments */
10916             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10917         }
10918         break;
10919 #ifdef TARGET_NR_arch_prctl
10920     case TARGET_NR_arch_prctl:
10921         return do_arch_prctl(cpu_env, arg1, arg2);
10922 #endif
10923 #ifdef TARGET_NR_pread64
10924     case TARGET_NR_pread64:
10925         if (regpairs_aligned(cpu_env, num)) {
10926             arg4 = arg5;
10927             arg5 = arg6;
10928         }
10929         if (arg2 == 0 && arg3 == 0) {
10930             /* Special-case NULL buffer and zero length, which should succeed */
10931             p = 0;
10932         } else {
10933             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10934             if (!p) {
10935                 return -TARGET_EFAULT;
10936             }
10937         }
10938         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10939         unlock_user(p, arg2, ret);
10940         return ret;
10941     case TARGET_NR_pwrite64:
10942         if (regpairs_aligned(cpu_env, num)) {
10943             arg4 = arg5;
10944             arg5 = arg6;
10945         }
10946         if (arg2 == 0 && arg3 == 0) {
10947             /* Special-case NULL buffer and zero length, which should succeed */
10948             p = 0;
10949         } else {
10950             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10951             if (!p) {
10952                 return -TARGET_EFAULT;
10953             }
10954         }
10955         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10956         unlock_user(p, arg2, 0);
10957         return ret;
10958 #endif
10959     case TARGET_NR_getcwd:
10960         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10961             return -TARGET_EFAULT;
10962         ret = get_errno(sys_getcwd1(p, arg2));
10963         unlock_user(p, arg1, ret);
10964         return ret;
10965     case TARGET_NR_capget:
10966     case TARGET_NR_capset:
10967     {
10968         struct target_user_cap_header *target_header;
10969         struct target_user_cap_data *target_data = NULL;
10970         struct __user_cap_header_struct header;
10971         struct __user_cap_data_struct data[2];
10972         struct __user_cap_data_struct *dataptr = NULL;
10973         int i, target_datalen;
10974         int data_items = 1;
10975 
10976         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10977             return -TARGET_EFAULT;
10978         }
10979         header.version = tswap32(target_header->version);
10980         header.pid = tswap32(target_header->pid);
10981 
10982         if (header.version != _LINUX_CAPABILITY_VERSION) {
10983             /* Version 2 and up takes pointer to two user_data structs */
10984             data_items = 2;
10985         }
10986 
10987         target_datalen = sizeof(*target_data) * data_items;
10988 
10989         if (arg2) {
10990             if (num == TARGET_NR_capget) {
10991                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10992             } else {
10993                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10994             }
10995             if (!target_data) {
10996                 unlock_user_struct(target_header, arg1, 0);
10997                 return -TARGET_EFAULT;
10998             }
10999 
11000             if (num == TARGET_NR_capset) {
11001                 for (i = 0; i < data_items; i++) {
11002                     data[i].effective = tswap32(target_data[i].effective);
11003                     data[i].permitted = tswap32(target_data[i].permitted);
11004                     data[i].inheritable = tswap32(target_data[i].inheritable);
11005                 }
11006             }
11007 
11008             dataptr = data;
11009         }
11010 
11011         if (num == TARGET_NR_capget) {
11012             ret = get_errno(capget(&header, dataptr));
11013         } else {
11014             ret = get_errno(capset(&header, dataptr));
11015         }
11016 
11017         /* The kernel always updates version for both capget and capset */
11018         target_header->version = tswap32(header.version);
11019         unlock_user_struct(target_header, arg1, 1);
11020 
11021         if (arg2) {
11022             if (num == TARGET_NR_capget) {
11023                 for (i = 0; i < data_items; i++) {
11024                     target_data[i].effective = tswap32(data[i].effective);
11025                     target_data[i].permitted = tswap32(data[i].permitted);
11026                     target_data[i].inheritable = tswap32(data[i].inheritable);
11027                 }
11028                 unlock_user(target_data, arg2, target_datalen);
11029             } else {
11030                 unlock_user(target_data, arg2, 0);
11031             }
11032         }
11033         return ret;
11034     }
11035     case TARGET_NR_sigaltstack:
11036         return do_sigaltstack(arg1, arg2,
11037                               get_sp_from_cpustate((CPUArchState *)cpu_env));
11038 
11039 #ifdef CONFIG_SENDFILE
11040 #ifdef TARGET_NR_sendfile
11041     case TARGET_NR_sendfile:
11042     {
11043         off_t *offp = NULL;
11044         off_t off;
11045         if (arg3) {
11046             ret = get_user_sal(off, arg3);
11047             if (is_error(ret)) {
11048                 return ret;
11049             }
11050             offp = &off;
11051         }
11052         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11053         if (!is_error(ret) && arg3) {
11054             abi_long ret2 = put_user_sal(off, arg3);
11055             if (is_error(ret2)) {
11056                 ret = ret2;
11057             }
11058         }
11059         return ret;
11060     }
11061 #endif
11062 #ifdef TARGET_NR_sendfile64
11063     case TARGET_NR_sendfile64:
11064     {
11065         off_t *offp = NULL;
11066         off_t off;
11067         if (arg3) {
11068             ret = get_user_s64(off, arg3);
11069             if (is_error(ret)) {
11070                 return ret;
11071             }
11072             offp = &off;
11073         }
11074         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11075         if (!is_error(ret) && arg3) {
11076             abi_long ret2 = put_user_s64(off, arg3);
11077             if (is_error(ret2)) {
11078                 ret = ret2;
11079             }
11080         }
11081         return ret;
11082     }
11083 #endif
11084 #endif
11085 #ifdef TARGET_NR_vfork
11086     case TARGET_NR_vfork:
11087         return get_errno(do_fork(cpu_env,
11088                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11089                          0, 0, 0, 0));
11090 #endif
11091 #ifdef TARGET_NR_ugetrlimit
11092     case TARGET_NR_ugetrlimit:
11093     {
11094 	struct rlimit rlim;
11095 	int resource = target_to_host_resource(arg1);
11096 	ret = get_errno(getrlimit(resource, &rlim));
11097 	if (!is_error(ret)) {
11098 	    struct target_rlimit *target_rlim;
11099             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11100                 return -TARGET_EFAULT;
11101 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11102 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11103             unlock_user_struct(target_rlim, arg2, 1);
11104 	}
11105         return ret;
11106     }
11107 #endif
11108 #ifdef TARGET_NR_truncate64
11109     case TARGET_NR_truncate64:
11110         if (!(p = lock_user_string(arg1)))
11111             return -TARGET_EFAULT;
11112 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11113         unlock_user(p, arg1, 0);
11114         return ret;
11115 #endif
11116 #ifdef TARGET_NR_ftruncate64
11117     case TARGET_NR_ftruncate64:
11118         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11119 #endif
11120 #ifdef TARGET_NR_stat64
11121     case TARGET_NR_stat64:
11122         if (!(p = lock_user_string(arg1))) {
11123             return -TARGET_EFAULT;
11124         }
11125         ret = get_errno(stat(path(p), &st));
11126         unlock_user(p, arg1, 0);
11127         if (!is_error(ret))
11128             ret = host_to_target_stat64(cpu_env, arg2, &st);
11129         return ret;
11130 #endif
11131 #ifdef TARGET_NR_lstat64
11132     case TARGET_NR_lstat64:
11133         if (!(p = lock_user_string(arg1))) {
11134             return -TARGET_EFAULT;
11135         }
11136         ret = get_errno(lstat(path(p), &st));
11137         unlock_user(p, arg1, 0);
11138         if (!is_error(ret))
11139             ret = host_to_target_stat64(cpu_env, arg2, &st);
11140         return ret;
11141 #endif
11142 #ifdef TARGET_NR_fstat64
11143     case TARGET_NR_fstat64:
11144         ret = get_errno(fstat(arg1, &st));
11145         if (!is_error(ret))
11146             ret = host_to_target_stat64(cpu_env, arg2, &st);
11147         return ret;
11148 #endif
11149 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11150 #ifdef TARGET_NR_fstatat64
11151     case TARGET_NR_fstatat64:
11152 #endif
11153 #ifdef TARGET_NR_newfstatat
11154     case TARGET_NR_newfstatat:
11155 #endif
11156         if (!(p = lock_user_string(arg2))) {
11157             return -TARGET_EFAULT;
11158         }
11159         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11160         unlock_user(p, arg2, 0);
11161         if (!is_error(ret))
11162             ret = host_to_target_stat64(cpu_env, arg3, &st);
11163         return ret;
11164 #endif
11165 #if defined(TARGET_NR_statx)
11166     case TARGET_NR_statx:
11167         {
11168             struct target_statx *target_stx;
11169             int dirfd = arg1;
11170             int flags = arg3;
11171 
11172             p = lock_user_string(arg2);
11173             if (p == NULL) {
11174                 return -TARGET_EFAULT;
11175             }
11176 #if defined(__NR_statx)
11177             {
11178                 /*
11179                  * It is assumed that struct statx is architecture independent.
11180                  */
11181                 struct target_statx host_stx;
11182                 int mask = arg4;
11183 
11184                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11185                 if (!is_error(ret)) {
11186                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11187                         unlock_user(p, arg2, 0);
11188                         return -TARGET_EFAULT;
11189                     }
11190                 }
11191 
11192                 if (ret != -TARGET_ENOSYS) {
11193                     unlock_user(p, arg2, 0);
11194                     return ret;
11195                 }
11196             }
11197 #endif
11198             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11199             unlock_user(p, arg2, 0);
11200 
11201             if (!is_error(ret)) {
11202                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11203                     return -TARGET_EFAULT;
11204                 }
11205                 memset(target_stx, 0, sizeof(*target_stx));
11206                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11207                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11208                 __put_user(st.st_ino, &target_stx->stx_ino);
11209                 __put_user(st.st_mode, &target_stx->stx_mode);
11210                 __put_user(st.st_uid, &target_stx->stx_uid);
11211                 __put_user(st.st_gid, &target_stx->stx_gid);
11212                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11213                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11214                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11215                 __put_user(st.st_size, &target_stx->stx_size);
11216                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11217                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11218                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11219                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11220                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11221                 unlock_user_struct(target_stx, arg5, 1);
11222             }
11223         }
11224         return ret;
11225 #endif
11226 #ifdef TARGET_NR_lchown
11227     case TARGET_NR_lchown:
11228         if (!(p = lock_user_string(arg1)))
11229             return -TARGET_EFAULT;
11230         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11231         unlock_user(p, arg1, 0);
11232         return ret;
11233 #endif
11234 #ifdef TARGET_NR_getuid
11235     case TARGET_NR_getuid:
11236         return get_errno(high2lowuid(getuid()));
11237 #endif
11238 #ifdef TARGET_NR_getgid
11239     case TARGET_NR_getgid:
11240         return get_errno(high2lowgid(getgid()));
11241 #endif
11242 #ifdef TARGET_NR_geteuid
11243     case TARGET_NR_geteuid:
11244         return get_errno(high2lowuid(geteuid()));
11245 #endif
11246 #ifdef TARGET_NR_getegid
11247     case TARGET_NR_getegid:
11248         return get_errno(high2lowgid(getegid()));
11249 #endif
11250     case TARGET_NR_setreuid:
11251         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11252     case TARGET_NR_setregid:
11253         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11254     case TARGET_NR_getgroups:
11255         {
11256             int gidsetsize = arg1;
11257             target_id *target_grouplist;
11258             gid_t *grouplist;
11259             int i;
11260 
11261             grouplist = alloca(gidsetsize * sizeof(gid_t));
11262             ret = get_errno(getgroups(gidsetsize, grouplist));
11263             if (gidsetsize == 0)
11264                 return ret;
11265             if (!is_error(ret)) {
11266                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11267                 if (!target_grouplist)
11268                     return -TARGET_EFAULT;
11269                 for(i = 0;i < ret; i++)
11270                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11271                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11272             }
11273         }
11274         return ret;
11275     case TARGET_NR_setgroups:
11276         {
11277             int gidsetsize = arg1;
11278             target_id *target_grouplist;
11279             gid_t *grouplist = NULL;
11280             int i;
11281             if (gidsetsize) {
11282                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11283                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11284                 if (!target_grouplist) {
11285                     return -TARGET_EFAULT;
11286                 }
11287                 for (i = 0; i < gidsetsize; i++) {
11288                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11289                 }
11290                 unlock_user(target_grouplist, arg2, 0);
11291             }
11292             return get_errno(setgroups(gidsetsize, grouplist));
11293         }
11294     case TARGET_NR_fchown:
11295         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11296 #if defined(TARGET_NR_fchownat)
11297     case TARGET_NR_fchownat:
11298         if (!(p = lock_user_string(arg2)))
11299             return -TARGET_EFAULT;
11300         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11301                                  low2highgid(arg4), arg5));
11302         unlock_user(p, arg2, 0);
11303         return ret;
11304 #endif
11305 #ifdef TARGET_NR_setresuid
11306     case TARGET_NR_setresuid:
11307         return get_errno(sys_setresuid(low2highuid(arg1),
11308                                        low2highuid(arg2),
11309                                        low2highuid(arg3)));
11310 #endif
11311 #ifdef TARGET_NR_getresuid
11312     case TARGET_NR_getresuid:
11313         {
11314             uid_t ruid, euid, suid;
11315             ret = get_errno(getresuid(&ruid, &euid, &suid));
11316             if (!is_error(ret)) {
11317                 if (put_user_id(high2lowuid(ruid), arg1)
11318                     || put_user_id(high2lowuid(euid), arg2)
11319                     || put_user_id(high2lowuid(suid), arg3))
11320                     return -TARGET_EFAULT;
11321             }
11322         }
11323         return ret;
11324 #endif
11325 #ifdef TARGET_NR_getresgid
11326     case TARGET_NR_setresgid:
11327         return get_errno(sys_setresgid(low2highgid(arg1),
11328                                        low2highgid(arg2),
11329                                        low2highgid(arg3)));
11330 #endif
11331 #ifdef TARGET_NR_getresgid
11332     case TARGET_NR_getresgid:
11333         {
11334             gid_t rgid, egid, sgid;
11335             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11336             if (!is_error(ret)) {
11337                 if (put_user_id(high2lowgid(rgid), arg1)
11338                     || put_user_id(high2lowgid(egid), arg2)
11339                     || put_user_id(high2lowgid(sgid), arg3))
11340                     return -TARGET_EFAULT;
11341             }
11342         }
11343         return ret;
11344 #endif
11345 #ifdef TARGET_NR_chown
11346     case TARGET_NR_chown:
11347         if (!(p = lock_user_string(arg1)))
11348             return -TARGET_EFAULT;
11349         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11350         unlock_user(p, arg1, 0);
11351         return ret;
11352 #endif
11353     case TARGET_NR_setuid:
11354         return get_errno(sys_setuid(low2highuid(arg1)));
11355     case TARGET_NR_setgid:
11356         return get_errno(sys_setgid(low2highgid(arg1)));
11357     case TARGET_NR_setfsuid:
11358         return get_errno(setfsuid(arg1));
11359     case TARGET_NR_setfsgid:
11360         return get_errno(setfsgid(arg1));
11361 
11362 #ifdef TARGET_NR_lchown32
11363     case TARGET_NR_lchown32:
11364         if (!(p = lock_user_string(arg1)))
11365             return -TARGET_EFAULT;
11366         ret = get_errno(lchown(p, arg2, arg3));
11367         unlock_user(p, arg1, 0);
11368         return ret;
11369 #endif
11370 #ifdef TARGET_NR_getuid32
11371     case TARGET_NR_getuid32:
11372         return get_errno(getuid());
11373 #endif
11374 
11375 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11376    /* Alpha specific */
11377     case TARGET_NR_getxuid:
11378          {
11379             uid_t euid;
11380             euid=geteuid();
11381             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11382          }
11383         return get_errno(getuid());
11384 #endif
11385 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11386    /* Alpha specific */
11387     case TARGET_NR_getxgid:
11388          {
11389             uid_t egid;
11390             egid=getegid();
11391             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11392          }
11393         return get_errno(getgid());
11394 #endif
11395 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11396     /* Alpha specific */
11397     case TARGET_NR_osf_getsysinfo:
11398         ret = -TARGET_EOPNOTSUPP;
11399         switch (arg1) {
11400           case TARGET_GSI_IEEE_FP_CONTROL:
11401             {
11402                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11403                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11404 
11405                 swcr &= ~SWCR_STATUS_MASK;
11406                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11407 
11408                 if (put_user_u64 (swcr, arg2))
11409                         return -TARGET_EFAULT;
11410                 ret = 0;
11411             }
11412             break;
11413 
11414           /* case GSI_IEEE_STATE_AT_SIGNAL:
11415              -- Not implemented in linux kernel.
11416              case GSI_UACPROC:
11417              -- Retrieves current unaligned access state; not much used.
11418              case GSI_PROC_TYPE:
11419              -- Retrieves implver information; surely not used.
11420              case GSI_GET_HWRPB:
11421              -- Grabs a copy of the HWRPB; surely not used.
11422           */
11423         }
11424         return ret;
11425 #endif
11426 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11427     /* Alpha specific */
11428     case TARGET_NR_osf_setsysinfo:
11429         ret = -TARGET_EOPNOTSUPP;
11430         switch (arg1) {
11431           case TARGET_SSI_IEEE_FP_CONTROL:
11432             {
11433                 uint64_t swcr, fpcr;
11434 
11435                 if (get_user_u64 (swcr, arg2)) {
11436                     return -TARGET_EFAULT;
11437                 }
11438 
11439                 /*
11440                  * The kernel calls swcr_update_status to update the
11441                  * status bits from the fpcr at every point that it
11442                  * could be queried.  Therefore, we store the status
11443                  * bits only in FPCR.
11444                  */
11445                 ((CPUAlphaState *)cpu_env)->swcr
11446                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11447 
11448                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11449                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11450                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11451                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11452                 ret = 0;
11453             }
11454             break;
11455 
11456           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11457             {
11458                 uint64_t exc, fpcr, fex;
11459 
11460                 if (get_user_u64(exc, arg2)) {
11461                     return -TARGET_EFAULT;
11462                 }
11463                 exc &= SWCR_STATUS_MASK;
11464                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11465 
11466                 /* Old exceptions are not signaled.  */
11467                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11468                 fex = exc & ~fex;
11469                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11470                 fex &= ((CPUArchState *)cpu_env)->swcr;
11471 
11472                 /* Update the hardware fpcr.  */
11473                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11474                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11475 
11476                 if (fex) {
11477                     int si_code = TARGET_FPE_FLTUNK;
11478                     target_siginfo_t info;
11479 
11480                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11481                         si_code = TARGET_FPE_FLTUND;
11482                     }
11483                     if (fex & SWCR_TRAP_ENABLE_INE) {
11484                         si_code = TARGET_FPE_FLTRES;
11485                     }
11486                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11487                         si_code = TARGET_FPE_FLTUND;
11488                     }
11489                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11490                         si_code = TARGET_FPE_FLTOVF;
11491                     }
11492                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11493                         si_code = TARGET_FPE_FLTDIV;
11494                     }
11495                     if (fex & SWCR_TRAP_ENABLE_INV) {
11496                         si_code = TARGET_FPE_FLTINV;
11497                     }
11498 
11499                     info.si_signo = SIGFPE;
11500                     info.si_errno = 0;
11501                     info.si_code = si_code;
11502                     info._sifields._sigfault._addr
11503                         = ((CPUArchState *)cpu_env)->pc;
11504                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11505                                  QEMU_SI_FAULT, &info);
11506                 }
11507                 ret = 0;
11508             }
11509             break;
11510 
11511           /* case SSI_NVPAIRS:
11512              -- Used with SSIN_UACPROC to enable unaligned accesses.
11513              case SSI_IEEE_STATE_AT_SIGNAL:
11514              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11515              -- Not implemented in linux kernel
11516           */
11517         }
11518         return ret;
11519 #endif
11520 #ifdef TARGET_NR_osf_sigprocmask
11521     /* Alpha specific.  */
11522     case TARGET_NR_osf_sigprocmask:
11523         {
11524             abi_ulong mask;
11525             int how;
11526             sigset_t set, oldset;
11527 
11528             switch(arg1) {
11529             case TARGET_SIG_BLOCK:
11530                 how = SIG_BLOCK;
11531                 break;
11532             case TARGET_SIG_UNBLOCK:
11533                 how = SIG_UNBLOCK;
11534                 break;
11535             case TARGET_SIG_SETMASK:
11536                 how = SIG_SETMASK;
11537                 break;
11538             default:
11539                 return -TARGET_EINVAL;
11540             }
11541             mask = arg2;
11542             target_to_host_old_sigset(&set, &mask);
11543             ret = do_sigprocmask(how, &set, &oldset);
11544             if (!ret) {
11545                 host_to_target_old_sigset(&mask, &oldset);
11546                 ret = mask;
11547             }
11548         }
11549         return ret;
11550 #endif
11551 
11552 #ifdef TARGET_NR_getgid32
11553     case TARGET_NR_getgid32:
11554         return get_errno(getgid());
11555 #endif
11556 #ifdef TARGET_NR_geteuid32
11557     case TARGET_NR_geteuid32:
11558         return get_errno(geteuid());
11559 #endif
11560 #ifdef TARGET_NR_getegid32
11561     case TARGET_NR_getegid32:
11562         return get_errno(getegid());
11563 #endif
11564 #ifdef TARGET_NR_setreuid32
11565     case TARGET_NR_setreuid32:
11566         return get_errno(setreuid(arg1, arg2));
11567 #endif
11568 #ifdef TARGET_NR_setregid32
11569     case TARGET_NR_setregid32:
11570         return get_errno(setregid(arg1, arg2));
11571 #endif
11572 #ifdef TARGET_NR_getgroups32
11573     case TARGET_NR_getgroups32:
11574         {
11575             int gidsetsize = arg1;
11576             uint32_t *target_grouplist;
11577             gid_t *grouplist;
11578             int i;
11579 
11580             grouplist = alloca(gidsetsize * sizeof(gid_t));
11581             ret = get_errno(getgroups(gidsetsize, grouplist));
11582             if (gidsetsize == 0)
11583                 return ret;
11584             if (!is_error(ret)) {
11585                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11586                 if (!target_grouplist) {
11587                     return -TARGET_EFAULT;
11588                 }
11589                 for(i = 0;i < ret; i++)
11590                     target_grouplist[i] = tswap32(grouplist[i]);
11591                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11592             }
11593         }
11594         return ret;
11595 #endif
11596 #ifdef TARGET_NR_setgroups32
11597     case TARGET_NR_setgroups32:
11598         {
11599             int gidsetsize = arg1;
11600             uint32_t *target_grouplist;
11601             gid_t *grouplist;
11602             int i;
11603 
11604             grouplist = alloca(gidsetsize * sizeof(gid_t));
11605             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11606             if (!target_grouplist) {
11607                 return -TARGET_EFAULT;
11608             }
11609             for(i = 0;i < gidsetsize; i++)
11610                 grouplist[i] = tswap32(target_grouplist[i]);
11611             unlock_user(target_grouplist, arg2, 0);
11612             return get_errno(setgroups(gidsetsize, grouplist));
11613         }
11614 #endif
11615 #ifdef TARGET_NR_fchown32
11616     case TARGET_NR_fchown32:
11617         return get_errno(fchown(arg1, arg2, arg3));
11618 #endif
11619 #ifdef TARGET_NR_setresuid32
11620     case TARGET_NR_setresuid32:
11621         return get_errno(sys_setresuid(arg1, arg2, arg3));
11622 #endif
11623 #ifdef TARGET_NR_getresuid32
11624     case TARGET_NR_getresuid32:
11625         {
11626             uid_t ruid, euid, suid;
11627             ret = get_errno(getresuid(&ruid, &euid, &suid));
11628             if (!is_error(ret)) {
11629                 if (put_user_u32(ruid, arg1)
11630                     || put_user_u32(euid, arg2)
11631                     || put_user_u32(suid, arg3))
11632                     return -TARGET_EFAULT;
11633             }
11634         }
11635         return ret;
11636 #endif
11637 #ifdef TARGET_NR_setresgid32
11638     case TARGET_NR_setresgid32:
11639         return get_errno(sys_setresgid(arg1, arg2, arg3));
11640 #endif
11641 #ifdef TARGET_NR_getresgid32
11642     case TARGET_NR_getresgid32:
11643         {
11644             gid_t rgid, egid, sgid;
11645             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11646             if (!is_error(ret)) {
11647                 if (put_user_u32(rgid, arg1)
11648                     || put_user_u32(egid, arg2)
11649                     || put_user_u32(sgid, arg3))
11650                     return -TARGET_EFAULT;
11651             }
11652         }
11653         return ret;
11654 #endif
11655 #ifdef TARGET_NR_chown32
11656     case TARGET_NR_chown32:
11657         if (!(p = lock_user_string(arg1)))
11658             return -TARGET_EFAULT;
11659         ret = get_errno(chown(p, arg2, arg3));
11660         unlock_user(p, arg1, 0);
11661         return ret;
11662 #endif
11663 #ifdef TARGET_NR_setuid32
11664     case TARGET_NR_setuid32:
11665         return get_errno(sys_setuid(arg1));
11666 #endif
11667 #ifdef TARGET_NR_setgid32
11668     case TARGET_NR_setgid32:
11669         return get_errno(sys_setgid(arg1));
11670 #endif
11671 #ifdef TARGET_NR_setfsuid32
11672     case TARGET_NR_setfsuid32:
11673         return get_errno(setfsuid(arg1));
11674 #endif
11675 #ifdef TARGET_NR_setfsgid32
11676     case TARGET_NR_setfsgid32:
11677         return get_errno(setfsgid(arg1));
11678 #endif
11679 #ifdef TARGET_NR_mincore
11680     case TARGET_NR_mincore:
11681         {
11682             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11683             if (!a) {
11684                 return -TARGET_ENOMEM;
11685             }
11686             p = lock_user_string(arg3);
11687             if (!p) {
11688                 ret = -TARGET_EFAULT;
11689             } else {
11690                 ret = get_errno(mincore(a, arg2, p));
11691                 unlock_user(p, arg3, ret);
11692             }
11693             unlock_user(a, arg1, 0);
11694         }
11695         return ret;
11696 #endif
11697 #ifdef TARGET_NR_arm_fadvise64_64
11698     case TARGET_NR_arm_fadvise64_64:
11699         /* arm_fadvise64_64 looks like fadvise64_64 but
11700          * with different argument order: fd, advice, offset, len
11701          * rather than the usual fd, offset, len, advice.
11702          * Note that offset and len are both 64-bit so appear as
11703          * pairs of 32-bit registers.
11704          */
11705         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11706                             target_offset64(arg5, arg6), arg2);
11707         return -host_to_target_errno(ret);
11708 #endif
11709 
11710 #if TARGET_ABI_BITS == 32
11711 
11712 #ifdef TARGET_NR_fadvise64_64
11713     case TARGET_NR_fadvise64_64:
11714 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11715         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11716         ret = arg2;
11717         arg2 = arg3;
11718         arg3 = arg4;
11719         arg4 = arg5;
11720         arg5 = arg6;
11721         arg6 = ret;
11722 #else
11723         /* 6 args: fd, offset (high, low), len (high, low), advice */
11724         if (regpairs_aligned(cpu_env, num)) {
11725             /* offset is in (3,4), len in (5,6) and advice in 7 */
11726             arg2 = arg3;
11727             arg3 = arg4;
11728             arg4 = arg5;
11729             arg5 = arg6;
11730             arg6 = arg7;
11731         }
11732 #endif
11733         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11734                             target_offset64(arg4, arg5), arg6);
11735         return -host_to_target_errno(ret);
11736 #endif
11737 
11738 #ifdef TARGET_NR_fadvise64
11739     case TARGET_NR_fadvise64:
11740         /* 5 args: fd, offset (high, low), len, advice */
11741         if (regpairs_aligned(cpu_env, num)) {
11742             /* offset is in (3,4), len in 5 and advice in 6 */
11743             arg2 = arg3;
11744             arg3 = arg4;
11745             arg4 = arg5;
11746             arg5 = arg6;
11747         }
11748         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11749         return -host_to_target_errno(ret);
11750 #endif
11751 
11752 #else /* not a 32-bit ABI */
11753 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11754 #ifdef TARGET_NR_fadvise64_64
11755     case TARGET_NR_fadvise64_64:
11756 #endif
11757 #ifdef TARGET_NR_fadvise64
11758     case TARGET_NR_fadvise64:
11759 #endif
11760 #ifdef TARGET_S390X
11761         switch (arg4) {
11762         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11763         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11764         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11765         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11766         default: break;
11767         }
11768 #endif
11769         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11770 #endif
11771 #endif /* end of 64-bit ABI fadvise handling */
11772 
11773 #ifdef TARGET_NR_madvise
11774     case TARGET_NR_madvise:
11775         /* A straight passthrough may not be safe because qemu sometimes
11776            turns private file-backed mappings into anonymous mappings.
11777            This will break MADV_DONTNEED.
11778            This is a hint, so ignoring and returning success is ok.  */
11779         return 0;
11780 #endif
11781 #ifdef TARGET_NR_fcntl64
11782     case TARGET_NR_fcntl64:
11783     {
11784         int cmd;
11785         struct flock64 fl;
11786         from_flock64_fn *copyfrom = copy_from_user_flock64;
11787         to_flock64_fn *copyto = copy_to_user_flock64;
11788 
11789 #ifdef TARGET_ARM
11790         if (!((CPUARMState *)cpu_env)->eabi) {
11791             copyfrom = copy_from_user_oabi_flock64;
11792             copyto = copy_to_user_oabi_flock64;
11793         }
11794 #endif
11795 
11796         cmd = target_to_host_fcntl_cmd(arg2);
11797         if (cmd == -TARGET_EINVAL) {
11798             return cmd;
11799         }
11800 
11801         switch(arg2) {
11802         case TARGET_F_GETLK64:
11803             ret = copyfrom(&fl, arg3);
11804             if (ret) {
11805                 break;
11806             }
11807             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11808             if (ret == 0) {
11809                 ret = copyto(arg3, &fl);
11810             }
11811 	    break;
11812 
11813         case TARGET_F_SETLK64:
11814         case TARGET_F_SETLKW64:
11815             ret = copyfrom(&fl, arg3);
11816             if (ret) {
11817                 break;
11818             }
11819             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11820 	    break;
11821         default:
11822             ret = do_fcntl(arg1, arg2, arg3);
11823             break;
11824         }
11825         return ret;
11826     }
11827 #endif
11828 #ifdef TARGET_NR_cacheflush
11829     case TARGET_NR_cacheflush:
11830         /* self-modifying code is handled automatically, so nothing needed */
11831         return 0;
11832 #endif
11833 #ifdef TARGET_NR_getpagesize
11834     case TARGET_NR_getpagesize:
11835         return TARGET_PAGE_SIZE;
11836 #endif
11837     case TARGET_NR_gettid:
11838         return get_errno(sys_gettid());
11839 #ifdef TARGET_NR_readahead
11840     case TARGET_NR_readahead:
11841 #if TARGET_ABI_BITS == 32
11842         if (regpairs_aligned(cpu_env, num)) {
11843             arg2 = arg3;
11844             arg3 = arg4;
11845             arg4 = arg5;
11846         }
11847         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11848 #else
11849         ret = get_errno(readahead(arg1, arg2, arg3));
11850 #endif
11851         return ret;
11852 #endif
11853 #ifdef CONFIG_ATTR
11854 #ifdef TARGET_NR_setxattr
11855     case TARGET_NR_listxattr:
11856     case TARGET_NR_llistxattr:
11857     {
11858         void *p, *b = 0;
11859         if (arg2) {
11860             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11861             if (!b) {
11862                 return -TARGET_EFAULT;
11863             }
11864         }
11865         p = lock_user_string(arg1);
11866         if (p) {
11867             if (num == TARGET_NR_listxattr) {
11868                 ret = get_errno(listxattr(p, b, arg3));
11869             } else {
11870                 ret = get_errno(llistxattr(p, b, arg3));
11871             }
11872         } else {
11873             ret = -TARGET_EFAULT;
11874         }
11875         unlock_user(p, arg1, 0);
11876         unlock_user(b, arg2, arg3);
11877         return ret;
11878     }
11879     case TARGET_NR_flistxattr:
11880     {
11881         void *b = 0;
11882         if (arg2) {
11883             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11884             if (!b) {
11885                 return -TARGET_EFAULT;
11886             }
11887         }
11888         ret = get_errno(flistxattr(arg1, b, arg3));
11889         unlock_user(b, arg2, arg3);
11890         return ret;
11891     }
11892     case TARGET_NR_setxattr:
11893     case TARGET_NR_lsetxattr:
11894         {
11895             void *p, *n, *v = 0;
11896             if (arg3) {
11897                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11898                 if (!v) {
11899                     return -TARGET_EFAULT;
11900                 }
11901             }
11902             p = lock_user_string(arg1);
11903             n = lock_user_string(arg2);
11904             if (p && n) {
11905                 if (num == TARGET_NR_setxattr) {
11906                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11907                 } else {
11908                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11909                 }
11910             } else {
11911                 ret = -TARGET_EFAULT;
11912             }
11913             unlock_user(p, arg1, 0);
11914             unlock_user(n, arg2, 0);
11915             unlock_user(v, arg3, 0);
11916         }
11917         return ret;
11918     case TARGET_NR_fsetxattr:
11919         {
11920             void *n, *v = 0;
11921             if (arg3) {
11922                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11923                 if (!v) {
11924                     return -TARGET_EFAULT;
11925                 }
11926             }
11927             n = lock_user_string(arg2);
11928             if (n) {
11929                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11930             } else {
11931                 ret = -TARGET_EFAULT;
11932             }
11933             unlock_user(n, arg2, 0);
11934             unlock_user(v, arg3, 0);
11935         }
11936         return ret;
11937     case TARGET_NR_getxattr:
11938     case TARGET_NR_lgetxattr:
11939         {
11940             void *p, *n, *v = 0;
11941             if (arg3) {
11942                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11943                 if (!v) {
11944                     return -TARGET_EFAULT;
11945                 }
11946             }
11947             p = lock_user_string(arg1);
11948             n = lock_user_string(arg2);
11949             if (p && n) {
11950                 if (num == TARGET_NR_getxattr) {
11951                     ret = get_errno(getxattr(p, n, v, arg4));
11952                 } else {
11953                     ret = get_errno(lgetxattr(p, n, v, arg4));
11954                 }
11955             } else {
11956                 ret = -TARGET_EFAULT;
11957             }
11958             unlock_user(p, arg1, 0);
11959             unlock_user(n, arg2, 0);
11960             unlock_user(v, arg3, arg4);
11961         }
11962         return ret;
11963     case TARGET_NR_fgetxattr:
11964         {
11965             void *n, *v = 0;
11966             if (arg3) {
11967                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11968                 if (!v) {
11969                     return -TARGET_EFAULT;
11970                 }
11971             }
11972             n = lock_user_string(arg2);
11973             if (n) {
11974                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11975             } else {
11976                 ret = -TARGET_EFAULT;
11977             }
11978             unlock_user(n, arg2, 0);
11979             unlock_user(v, arg3, arg4);
11980         }
11981         return ret;
11982     case TARGET_NR_removexattr:
11983     case TARGET_NR_lremovexattr:
11984         {
11985             void *p, *n;
11986             p = lock_user_string(arg1);
11987             n = lock_user_string(arg2);
11988             if (p && n) {
11989                 if (num == TARGET_NR_removexattr) {
11990                     ret = get_errno(removexattr(p, n));
11991                 } else {
11992                     ret = get_errno(lremovexattr(p, n));
11993                 }
11994             } else {
11995                 ret = -TARGET_EFAULT;
11996             }
11997             unlock_user(p, arg1, 0);
11998             unlock_user(n, arg2, 0);
11999         }
12000         return ret;
12001     case TARGET_NR_fremovexattr:
12002         {
12003             void *n;
12004             n = lock_user_string(arg2);
12005             if (n) {
12006                 ret = get_errno(fremovexattr(arg1, n));
12007             } else {
12008                 ret = -TARGET_EFAULT;
12009             }
12010             unlock_user(n, arg2, 0);
12011         }
12012         return ret;
12013 #endif
12014 #endif /* CONFIG_ATTR */
12015 #ifdef TARGET_NR_set_thread_area
12016     case TARGET_NR_set_thread_area:
12017 #if defined(TARGET_MIPS)
12018       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12019       return 0;
12020 #elif defined(TARGET_CRIS)
12021       if (arg1 & 0xff)
12022           ret = -TARGET_EINVAL;
12023       else {
12024           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12025           ret = 0;
12026       }
12027       return ret;
12028 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12029       return do_set_thread_area(cpu_env, arg1);
12030 #elif defined(TARGET_M68K)
12031       {
12032           TaskState *ts = cpu->opaque;
12033           ts->tp_value = arg1;
12034           return 0;
12035       }
12036 #else
12037       return -TARGET_ENOSYS;
12038 #endif
12039 #endif
12040 #ifdef TARGET_NR_get_thread_area
12041     case TARGET_NR_get_thread_area:
12042 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12043         return do_get_thread_area(cpu_env, arg1);
12044 #elif defined(TARGET_M68K)
12045         {
12046             TaskState *ts = cpu->opaque;
12047             return ts->tp_value;
12048         }
12049 #else
12050         return -TARGET_ENOSYS;
12051 #endif
12052 #endif
12053 #ifdef TARGET_NR_getdomainname
12054     case TARGET_NR_getdomainname:
12055         return -TARGET_ENOSYS;
12056 #endif
12057 
12058 #ifdef TARGET_NR_clock_settime
12059     case TARGET_NR_clock_settime:
12060     {
12061         struct timespec ts;
12062 
12063         ret = target_to_host_timespec(&ts, arg2);
12064         if (!is_error(ret)) {
12065             ret = get_errno(clock_settime(arg1, &ts));
12066         }
12067         return ret;
12068     }
12069 #endif
12070 #ifdef TARGET_NR_clock_settime64
12071     case TARGET_NR_clock_settime64:
12072     {
12073         struct timespec ts;
12074 
12075         ret = target_to_host_timespec64(&ts, arg2);
12076         if (!is_error(ret)) {
12077             ret = get_errno(clock_settime(arg1, &ts));
12078         }
12079         return ret;
12080     }
12081 #endif
12082 #ifdef TARGET_NR_clock_gettime
12083     case TARGET_NR_clock_gettime:
12084     {
12085         struct timespec ts;
12086         ret = get_errno(clock_gettime(arg1, &ts));
12087         if (!is_error(ret)) {
12088             ret = host_to_target_timespec(arg2, &ts);
12089         }
12090         return ret;
12091     }
12092 #endif
12093 #ifdef TARGET_NR_clock_gettime64
12094     case TARGET_NR_clock_gettime64:
12095     {
12096         struct timespec ts;
12097         ret = get_errno(clock_gettime(arg1, &ts));
12098         if (!is_error(ret)) {
12099             ret = host_to_target_timespec64(arg2, &ts);
12100         }
12101         return ret;
12102     }
12103 #endif
12104 #ifdef TARGET_NR_clock_getres
12105     case TARGET_NR_clock_getres:
12106     {
12107         struct timespec ts;
12108         ret = get_errno(clock_getres(arg1, &ts));
12109         if (!is_error(ret)) {
12110             host_to_target_timespec(arg2, &ts);
12111         }
12112         return ret;
12113     }
12114 #endif
12115 #ifdef TARGET_NR_clock_getres_time64
12116     case TARGET_NR_clock_getres_time64:
12117     {
12118         struct timespec ts;
12119         ret = get_errno(clock_getres(arg1, &ts));
12120         if (!is_error(ret)) {
12121             host_to_target_timespec64(arg2, &ts);
12122         }
12123         return ret;
12124     }
12125 #endif
12126 #ifdef TARGET_NR_clock_nanosleep
12127     case TARGET_NR_clock_nanosleep:
12128     {
12129         struct timespec ts;
12130         if (target_to_host_timespec(&ts, arg3)) {
12131             return -TARGET_EFAULT;
12132         }
12133         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12134                                              &ts, arg4 ? &ts : NULL));
12135         /*
12136          * if the call is interrupted by a signal handler, it fails
12137          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12138          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12139          */
12140         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12141             host_to_target_timespec(arg4, &ts)) {
12142               return -TARGET_EFAULT;
12143         }
12144 
12145         return ret;
12146     }
12147 #endif
12148 #ifdef TARGET_NR_clock_nanosleep_time64
12149     case TARGET_NR_clock_nanosleep_time64:
12150     {
12151         struct timespec ts;
12152 
12153         if (target_to_host_timespec64(&ts, arg3)) {
12154             return -TARGET_EFAULT;
12155         }
12156 
12157         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12158                                              &ts, arg4 ? &ts : NULL));
12159 
12160         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12161             host_to_target_timespec64(arg4, &ts)) {
12162             return -TARGET_EFAULT;
12163         }
12164         return ret;
12165     }
12166 #endif
12167 
12168 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12169     case TARGET_NR_set_tid_address:
12170         return get_errno(set_tid_address((int *)g2h(arg1)));
12171 #endif
12172 
12173     case TARGET_NR_tkill:
12174         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12175 
12176     case TARGET_NR_tgkill:
12177         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12178                          target_to_host_signal(arg3)));
12179 
12180 #ifdef TARGET_NR_set_robust_list
12181     case TARGET_NR_set_robust_list:
12182     case TARGET_NR_get_robust_list:
12183         /* The ABI for supporting robust futexes has userspace pass
12184          * the kernel a pointer to a linked list which is updated by
12185          * userspace after the syscall; the list is walked by the kernel
12186          * when the thread exits. Since the linked list in QEMU guest
12187          * memory isn't a valid linked list for the host and we have
12188          * no way to reliably intercept the thread-death event, we can't
12189          * support these. Silently return ENOSYS so that guest userspace
12190          * falls back to a non-robust futex implementation (which should
12191          * be OK except in the corner case of the guest crashing while
12192          * holding a mutex that is shared with another process via
12193          * shared memory).
12194          */
12195         return -TARGET_ENOSYS;
12196 #endif
12197 
12198 #if defined(TARGET_NR_utimensat)
12199     case TARGET_NR_utimensat:
12200         {
12201             struct timespec *tsp, ts[2];
12202             if (!arg3) {
12203                 tsp = NULL;
12204             } else {
12205                 if (target_to_host_timespec(ts, arg3)) {
12206                     return -TARGET_EFAULT;
12207                 }
12208                 if (target_to_host_timespec(ts + 1, arg3 +
12209                                             sizeof(struct target_timespec))) {
12210                     return -TARGET_EFAULT;
12211                 }
12212                 tsp = ts;
12213             }
12214             if (!arg2)
12215                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12216             else {
12217                 if (!(p = lock_user_string(arg2))) {
12218                     return -TARGET_EFAULT;
12219                 }
12220                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12221                 unlock_user(p, arg2, 0);
12222             }
12223         }
12224         return ret;
12225 #endif
12226 #ifdef TARGET_NR_utimensat_time64
12227     case TARGET_NR_utimensat_time64:
12228         {
12229             struct timespec *tsp, ts[2];
12230             if (!arg3) {
12231                 tsp = NULL;
12232             } else {
12233                 if (target_to_host_timespec64(ts, arg3)) {
12234                     return -TARGET_EFAULT;
12235                 }
12236                 if (target_to_host_timespec64(ts + 1, arg3 +
12237                                      sizeof(struct target__kernel_timespec))) {
12238                     return -TARGET_EFAULT;
12239                 }
12240                 tsp = ts;
12241             }
12242             if (!arg2)
12243                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12244             else {
12245                 p = lock_user_string(arg2);
12246                 if (!p) {
12247                     return -TARGET_EFAULT;
12248                 }
12249                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12250                 unlock_user(p, arg2, 0);
12251             }
12252         }
12253         return ret;
12254 #endif
12255 #ifdef TARGET_NR_futex
12256     case TARGET_NR_futex:
12257         return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12258 #endif
12259 #ifdef TARGET_NR_futex_time64
12260     case TARGET_NR_futex_time64:
12261         return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
12262 #endif
12263 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12264     case TARGET_NR_inotify_init:
12265         ret = get_errno(sys_inotify_init());
12266         if (ret >= 0) {
12267             fd_trans_register(ret, &target_inotify_trans);
12268         }
12269         return ret;
12270 #endif
12271 #ifdef CONFIG_INOTIFY1
12272 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12273     case TARGET_NR_inotify_init1:
12274         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12275                                           fcntl_flags_tbl)));
12276         if (ret >= 0) {
12277             fd_trans_register(ret, &target_inotify_trans);
12278         }
12279         return ret;
12280 #endif
12281 #endif
12282 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12283     case TARGET_NR_inotify_add_watch:
12284         p = lock_user_string(arg2);
12285         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12286         unlock_user(p, arg2, 0);
12287         return ret;
12288 #endif
12289 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12290     case TARGET_NR_inotify_rm_watch:
12291         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12292 #endif
12293 
12294 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12295     case TARGET_NR_mq_open:
12296         {
12297             struct mq_attr posix_mq_attr;
12298             struct mq_attr *pposix_mq_attr;
12299             int host_flags;
12300 
12301             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12302             pposix_mq_attr = NULL;
12303             if (arg4) {
12304                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12305                     return -TARGET_EFAULT;
12306                 }
12307                 pposix_mq_attr = &posix_mq_attr;
12308             }
12309             p = lock_user_string(arg1 - 1);
12310             if (!p) {
12311                 return -TARGET_EFAULT;
12312             }
12313             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12314             unlock_user (p, arg1, 0);
12315         }
12316         return ret;
12317 
12318     case TARGET_NR_mq_unlink:
12319         p = lock_user_string(arg1 - 1);
12320         if (!p) {
12321             return -TARGET_EFAULT;
12322         }
12323         ret = get_errno(mq_unlink(p));
12324         unlock_user (p, arg1, 0);
12325         return ret;
12326 
12327 #ifdef TARGET_NR_mq_timedsend
12328     case TARGET_NR_mq_timedsend:
12329         {
12330             struct timespec ts;
12331 
12332             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12333             if (arg5 != 0) {
12334                 if (target_to_host_timespec(&ts, arg5)) {
12335                     return -TARGET_EFAULT;
12336                 }
12337                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12338                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12339                     return -TARGET_EFAULT;
12340                 }
12341             } else {
12342                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12343             }
12344             unlock_user (p, arg2, arg3);
12345         }
12346         return ret;
12347 #endif
12348 #ifdef TARGET_NR_mq_timedsend_time64
12349     case TARGET_NR_mq_timedsend_time64:
12350         {
12351             struct timespec ts;
12352 
12353             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12354             if (arg5 != 0) {
12355                 if (target_to_host_timespec64(&ts, arg5)) {
12356                     return -TARGET_EFAULT;
12357                 }
12358                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12359                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12360                     return -TARGET_EFAULT;
12361                 }
12362             } else {
12363                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12364             }
12365             unlock_user(p, arg2, arg3);
12366         }
12367         return ret;
12368 #endif
12369 
12370 #ifdef TARGET_NR_mq_timedreceive
12371     case TARGET_NR_mq_timedreceive:
12372         {
12373             struct timespec ts;
12374             unsigned int prio;
12375 
12376             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12377             if (arg5 != 0) {
12378                 if (target_to_host_timespec(&ts, arg5)) {
12379                     return -TARGET_EFAULT;
12380                 }
12381                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12382                                                      &prio, &ts));
12383                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12384                     return -TARGET_EFAULT;
12385                 }
12386             } else {
12387                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12388                                                      &prio, NULL));
12389             }
12390             unlock_user (p, arg2, arg3);
12391             if (arg4 != 0)
12392                 put_user_u32(prio, arg4);
12393         }
12394         return ret;
12395 #endif
12396 #ifdef TARGET_NR_mq_timedreceive_time64
12397     case TARGET_NR_mq_timedreceive_time64:
12398         {
12399             struct timespec ts;
12400             unsigned int prio;
12401 
12402             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12403             if (arg5 != 0) {
12404                 if (target_to_host_timespec64(&ts, arg5)) {
12405                     return -TARGET_EFAULT;
12406                 }
12407                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12408                                                      &prio, &ts));
12409                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12410                     return -TARGET_EFAULT;
12411                 }
12412             } else {
12413                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12414                                                      &prio, NULL));
12415             }
12416             unlock_user(p, arg2, arg3);
12417             if (arg4 != 0) {
12418                 put_user_u32(prio, arg4);
12419             }
12420         }
12421         return ret;
12422 #endif
12423 
12424     /* Not implemented for now... */
12425 /*     case TARGET_NR_mq_notify: */
12426 /*         break; */
12427 
12428     case TARGET_NR_mq_getsetattr:
12429         {
12430             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12431             ret = 0;
12432             if (arg2 != 0) {
12433                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12434                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12435                                            &posix_mq_attr_out));
12436             } else if (arg3 != 0) {
12437                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12438             }
12439             if (ret == 0 && arg3 != 0) {
12440                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12441             }
12442         }
12443         return ret;
12444 #endif
12445 
12446 #ifdef CONFIG_SPLICE
12447 #ifdef TARGET_NR_tee
12448     case TARGET_NR_tee:
12449         {
12450             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12451         }
12452         return ret;
12453 #endif
12454 #ifdef TARGET_NR_splice
12455     case TARGET_NR_splice:
12456         {
12457             loff_t loff_in, loff_out;
12458             loff_t *ploff_in = NULL, *ploff_out = NULL;
12459             if (arg2) {
12460                 if (get_user_u64(loff_in, arg2)) {
12461                     return -TARGET_EFAULT;
12462                 }
12463                 ploff_in = &loff_in;
12464             }
12465             if (arg4) {
12466                 if (get_user_u64(loff_out, arg4)) {
12467                     return -TARGET_EFAULT;
12468                 }
12469                 ploff_out = &loff_out;
12470             }
12471             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12472             if (arg2) {
12473                 if (put_user_u64(loff_in, arg2)) {
12474                     return -TARGET_EFAULT;
12475                 }
12476             }
12477             if (arg4) {
12478                 if (put_user_u64(loff_out, arg4)) {
12479                     return -TARGET_EFAULT;
12480                 }
12481             }
12482         }
12483         return ret;
12484 #endif
12485 #ifdef TARGET_NR_vmsplice
12486 	case TARGET_NR_vmsplice:
12487         {
12488             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12489             if (vec != NULL) {
12490                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12491                 unlock_iovec(vec, arg2, arg3, 0);
12492             } else {
12493                 ret = -host_to_target_errno(errno);
12494             }
12495         }
12496         return ret;
12497 #endif
12498 #endif /* CONFIG_SPLICE */
12499 #ifdef CONFIG_EVENTFD
12500 #if defined(TARGET_NR_eventfd)
12501     case TARGET_NR_eventfd:
12502         ret = get_errno(eventfd(arg1, 0));
12503         if (ret >= 0) {
12504             fd_trans_register(ret, &target_eventfd_trans);
12505         }
12506         return ret;
12507 #endif
12508 #if defined(TARGET_NR_eventfd2)
12509     case TARGET_NR_eventfd2:
12510     {
12511         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12512         if (arg2 & TARGET_O_NONBLOCK) {
12513             host_flags |= O_NONBLOCK;
12514         }
12515         if (arg2 & TARGET_O_CLOEXEC) {
12516             host_flags |= O_CLOEXEC;
12517         }
12518         ret = get_errno(eventfd(arg1, host_flags));
12519         if (ret >= 0) {
12520             fd_trans_register(ret, &target_eventfd_trans);
12521         }
12522         return ret;
12523     }
12524 #endif
12525 #endif /* CONFIG_EVENTFD  */
12526 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12527     case TARGET_NR_fallocate:
12528 #if TARGET_ABI_BITS == 32
12529         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12530                                   target_offset64(arg5, arg6)));
12531 #else
12532         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12533 #endif
12534         return ret;
12535 #endif
12536 #if defined(CONFIG_SYNC_FILE_RANGE)
12537 #if defined(TARGET_NR_sync_file_range)
12538     case TARGET_NR_sync_file_range:
12539 #if TARGET_ABI_BITS == 32
12540 #if defined(TARGET_MIPS)
12541         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12542                                         target_offset64(arg5, arg6), arg7));
12543 #else
12544         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12545                                         target_offset64(arg4, arg5), arg6));
12546 #endif /* !TARGET_MIPS */
12547 #else
12548         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12549 #endif
12550         return ret;
12551 #endif
12552 #if defined(TARGET_NR_sync_file_range2) || \
12553     defined(TARGET_NR_arm_sync_file_range)
12554 #if defined(TARGET_NR_sync_file_range2)
12555     case TARGET_NR_sync_file_range2:
12556 #endif
12557 #if defined(TARGET_NR_arm_sync_file_range)
12558     case TARGET_NR_arm_sync_file_range:
12559 #endif
12560         /* This is like sync_file_range but the arguments are reordered */
12561 #if TARGET_ABI_BITS == 32
12562         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12563                                         target_offset64(arg5, arg6), arg2));
12564 #else
12565         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12566 #endif
12567         return ret;
12568 #endif
12569 #endif
12570 #if defined(TARGET_NR_signalfd4)
12571     case TARGET_NR_signalfd4:
12572         return do_signalfd4(arg1, arg2, arg4);
12573 #endif
12574 #if defined(TARGET_NR_signalfd)
12575     case TARGET_NR_signalfd:
12576         return do_signalfd4(arg1, arg2, 0);
12577 #endif
12578 #if defined(CONFIG_EPOLL)
12579 #if defined(TARGET_NR_epoll_create)
12580     case TARGET_NR_epoll_create:
12581         return get_errno(epoll_create(arg1));
12582 #endif
12583 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12584     case TARGET_NR_epoll_create1:
12585         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12586 #endif
12587 #if defined(TARGET_NR_epoll_ctl)
12588     case TARGET_NR_epoll_ctl:
12589     {
12590         struct epoll_event ep;
12591         struct epoll_event *epp = 0;
12592         if (arg4) {
12593             if (arg2 != EPOLL_CTL_DEL) {
12594                 struct target_epoll_event *target_ep;
12595                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12596                     return -TARGET_EFAULT;
12597                 }
12598                 ep.events = tswap32(target_ep->events);
12599                 /*
12600                  * The epoll_data_t union is just opaque data to the kernel,
12601                  * so we transfer all 64 bits across and need not worry what
12602                  * actual data type it is.
12603                  */
12604                 ep.data.u64 = tswap64(target_ep->data.u64);
12605                 unlock_user_struct(target_ep, arg4, 0);
12606             }
12607             /*
12608              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12609              * non-null pointer, even though this argument is ignored.
12610              *
12611              */
12612             epp = &ep;
12613         }
12614         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12615     }
12616 #endif
12617 
12618 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12619 #if defined(TARGET_NR_epoll_wait)
12620     case TARGET_NR_epoll_wait:
12621 #endif
12622 #if defined(TARGET_NR_epoll_pwait)
12623     case TARGET_NR_epoll_pwait:
12624 #endif
12625     {
12626         struct target_epoll_event *target_ep;
12627         struct epoll_event *ep;
12628         int epfd = arg1;
12629         int maxevents = arg3;
12630         int timeout = arg4;
12631 
12632         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12633             return -TARGET_EINVAL;
12634         }
12635 
12636         target_ep = lock_user(VERIFY_WRITE, arg2,
12637                               maxevents * sizeof(struct target_epoll_event), 1);
12638         if (!target_ep) {
12639             return -TARGET_EFAULT;
12640         }
12641 
12642         ep = g_try_new(struct epoll_event, maxevents);
12643         if (!ep) {
12644             unlock_user(target_ep, arg2, 0);
12645             return -TARGET_ENOMEM;
12646         }
12647 
12648         switch (num) {
12649 #if defined(TARGET_NR_epoll_pwait)
12650         case TARGET_NR_epoll_pwait:
12651         {
12652             target_sigset_t *target_set;
12653             sigset_t _set, *set = &_set;
12654 
12655             if (arg5) {
12656                 if (arg6 != sizeof(target_sigset_t)) {
12657                     ret = -TARGET_EINVAL;
12658                     break;
12659                 }
12660 
12661                 target_set = lock_user(VERIFY_READ, arg5,
12662                                        sizeof(target_sigset_t), 1);
12663                 if (!target_set) {
12664                     ret = -TARGET_EFAULT;
12665                     break;
12666                 }
12667                 target_to_host_sigset(set, target_set);
12668                 unlock_user(target_set, arg5, 0);
12669             } else {
12670                 set = NULL;
12671             }
12672 
12673             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12674                                              set, SIGSET_T_SIZE));
12675             break;
12676         }
12677 #endif
12678 #if defined(TARGET_NR_epoll_wait)
12679         case TARGET_NR_epoll_wait:
12680             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12681                                              NULL, 0));
12682             break;
12683 #endif
12684         default:
12685             ret = -TARGET_ENOSYS;
12686         }
12687         if (!is_error(ret)) {
12688             int i;
12689             for (i = 0; i < ret; i++) {
12690                 target_ep[i].events = tswap32(ep[i].events);
12691                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12692             }
12693             unlock_user(target_ep, arg2,
12694                         ret * sizeof(struct target_epoll_event));
12695         } else {
12696             unlock_user(target_ep, arg2, 0);
12697         }
12698         g_free(ep);
12699         return ret;
12700     }
12701 #endif
12702 #endif
12703 #ifdef TARGET_NR_prlimit64
12704     case TARGET_NR_prlimit64:
12705     {
12706         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12707         struct target_rlimit64 *target_rnew, *target_rold;
12708         struct host_rlimit64 rnew, rold, *rnewp = 0;
12709         int resource = target_to_host_resource(arg2);
12710 
12711         if (arg3 && (resource != RLIMIT_AS &&
12712                      resource != RLIMIT_DATA &&
12713                      resource != RLIMIT_STACK)) {
12714             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12715                 return -TARGET_EFAULT;
12716             }
12717             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12718             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12719             unlock_user_struct(target_rnew, arg3, 0);
12720             rnewp = &rnew;
12721         }
12722 
12723         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12724         if (!is_error(ret) && arg4) {
12725             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12726                 return -TARGET_EFAULT;
12727             }
12728             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12729             target_rold->rlim_max = tswap64(rold.rlim_max);
12730             unlock_user_struct(target_rold, arg4, 1);
12731         }
12732         return ret;
12733     }
12734 #endif
12735 #ifdef TARGET_NR_gethostname
12736     case TARGET_NR_gethostname:
12737     {
12738         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12739         if (name) {
12740             ret = get_errno(gethostname(name, arg2));
12741             unlock_user(name, arg1, arg2);
12742         } else {
12743             ret = -TARGET_EFAULT;
12744         }
12745         return ret;
12746     }
12747 #endif
12748 #ifdef TARGET_NR_atomic_cmpxchg_32
12749     case TARGET_NR_atomic_cmpxchg_32:
12750     {
12751         /* should use start_exclusive from main.c */
12752         abi_ulong mem_value;
12753         if (get_user_u32(mem_value, arg6)) {
12754             target_siginfo_t info;
12755             info.si_signo = SIGSEGV;
12756             info.si_errno = 0;
12757             info.si_code = TARGET_SEGV_MAPERR;
12758             info._sifields._sigfault._addr = arg6;
12759             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12760                          QEMU_SI_FAULT, &info);
12761             ret = 0xdeadbeef;
12762 
12763         }
12764         if (mem_value == arg2)
12765             put_user_u32(arg1, arg6);
12766         return mem_value;
12767     }
12768 #endif
12769 #ifdef TARGET_NR_atomic_barrier
12770     case TARGET_NR_atomic_barrier:
12771         /* Like the kernel implementation and the
12772            qemu arm barrier, no-op this? */
12773         return 0;
12774 #endif
12775 
12776 #ifdef TARGET_NR_timer_create
12777     case TARGET_NR_timer_create:
12778     {
12779         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12780 
12781         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12782 
12783         int clkid = arg1;
12784         int timer_index = next_free_host_timer();
12785 
12786         if (timer_index < 0) {
12787             ret = -TARGET_EAGAIN;
12788         } else {
12789             timer_t *phtimer = g_posix_timers  + timer_index;
12790 
12791             if (arg2) {
12792                 phost_sevp = &host_sevp;
12793                 ret = target_to_host_sigevent(phost_sevp, arg2);
12794                 if (ret != 0) {
12795                     return ret;
12796                 }
12797             }
12798 
12799             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12800             if (ret) {
12801                 phtimer = NULL;
12802             } else {
12803                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12804                     return -TARGET_EFAULT;
12805                 }
12806             }
12807         }
12808         return ret;
12809     }
12810 #endif
12811 
12812 #ifdef TARGET_NR_timer_settime
12813     case TARGET_NR_timer_settime:
12814     {
12815         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12816          * struct itimerspec * old_value */
12817         target_timer_t timerid = get_timer_id(arg1);
12818 
12819         if (timerid < 0) {
12820             ret = timerid;
12821         } else if (arg3 == 0) {
12822             ret = -TARGET_EINVAL;
12823         } else {
12824             timer_t htimer = g_posix_timers[timerid];
12825             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12826 
12827             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12828                 return -TARGET_EFAULT;
12829             }
12830             ret = get_errno(
12831                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12832             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12833                 return -TARGET_EFAULT;
12834             }
12835         }
12836         return ret;
12837     }
12838 #endif
12839 
12840 #ifdef TARGET_NR_timer_settime64
12841     case TARGET_NR_timer_settime64:
12842     {
12843         target_timer_t timerid = get_timer_id(arg1);
12844 
12845         if (timerid < 0) {
12846             ret = timerid;
12847         } else if (arg3 == 0) {
12848             ret = -TARGET_EINVAL;
12849         } else {
12850             timer_t htimer = g_posix_timers[timerid];
12851             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12852 
12853             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12854                 return -TARGET_EFAULT;
12855             }
12856             ret = get_errno(
12857                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12858             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12859                 return -TARGET_EFAULT;
12860             }
12861         }
12862         return ret;
12863     }
12864 #endif
12865 
12866 #ifdef TARGET_NR_timer_gettime
12867     case TARGET_NR_timer_gettime:
12868     {
12869         /* args: timer_t timerid, struct itimerspec *curr_value */
12870         target_timer_t timerid = get_timer_id(arg1);
12871 
12872         if (timerid < 0) {
12873             ret = timerid;
12874         } else if (!arg2) {
12875             ret = -TARGET_EFAULT;
12876         } else {
12877             timer_t htimer = g_posix_timers[timerid];
12878             struct itimerspec hspec;
12879             ret = get_errno(timer_gettime(htimer, &hspec));
12880 
12881             if (host_to_target_itimerspec(arg2, &hspec)) {
12882                 ret = -TARGET_EFAULT;
12883             }
12884         }
12885         return ret;
12886     }
12887 #endif
12888 
12889 #ifdef TARGET_NR_timer_gettime64
12890     case TARGET_NR_timer_gettime64:
12891     {
12892         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12893         target_timer_t timerid = get_timer_id(arg1);
12894 
12895         if (timerid < 0) {
12896             ret = timerid;
12897         } else if (!arg2) {
12898             ret = -TARGET_EFAULT;
12899         } else {
12900             timer_t htimer = g_posix_timers[timerid];
12901             struct itimerspec hspec;
12902             ret = get_errno(timer_gettime(htimer, &hspec));
12903 
12904             if (host_to_target_itimerspec64(arg2, &hspec)) {
12905                 ret = -TARGET_EFAULT;
12906             }
12907         }
12908         return ret;
12909     }
12910 #endif
12911 
12912 #ifdef TARGET_NR_timer_getoverrun
12913     case TARGET_NR_timer_getoverrun:
12914     {
12915         /* args: timer_t timerid */
12916         target_timer_t timerid = get_timer_id(arg1);
12917 
12918         if (timerid < 0) {
12919             ret = timerid;
12920         } else {
12921             timer_t htimer = g_posix_timers[timerid];
12922             ret = get_errno(timer_getoverrun(htimer));
12923         }
12924         return ret;
12925     }
12926 #endif
12927 
12928 #ifdef TARGET_NR_timer_delete
12929     case TARGET_NR_timer_delete:
12930     {
12931         /* args: timer_t timerid */
12932         target_timer_t timerid = get_timer_id(arg1);
12933 
12934         if (timerid < 0) {
12935             ret = timerid;
12936         } else {
12937             timer_t htimer = g_posix_timers[timerid];
12938             ret = get_errno(timer_delete(htimer));
12939             g_posix_timers[timerid] = 0;
12940         }
12941         return ret;
12942     }
12943 #endif
12944 
12945 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12946     case TARGET_NR_timerfd_create:
12947         return get_errno(timerfd_create(arg1,
12948                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12949 #endif
12950 
12951 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12952     case TARGET_NR_timerfd_gettime:
12953         {
12954             struct itimerspec its_curr;
12955 
12956             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12957 
12958             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12959                 return -TARGET_EFAULT;
12960             }
12961         }
12962         return ret;
12963 #endif
12964 
12965 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12966     case TARGET_NR_timerfd_gettime64:
12967         {
12968             struct itimerspec its_curr;
12969 
12970             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12971 
12972             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12973                 return -TARGET_EFAULT;
12974             }
12975         }
12976         return ret;
12977 #endif
12978 
12979 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12980     case TARGET_NR_timerfd_settime:
12981         {
12982             struct itimerspec its_new, its_old, *p_new;
12983 
12984             if (arg3) {
12985                 if (target_to_host_itimerspec(&its_new, arg3)) {
12986                     return -TARGET_EFAULT;
12987                 }
12988                 p_new = &its_new;
12989             } else {
12990                 p_new = NULL;
12991             }
12992 
12993             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12994 
12995             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12996                 return -TARGET_EFAULT;
12997             }
12998         }
12999         return ret;
13000 #endif
13001 
13002 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13003     case TARGET_NR_timerfd_settime64:
13004         {
13005             struct itimerspec its_new, its_old, *p_new;
13006 
13007             if (arg3) {
13008                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13009                     return -TARGET_EFAULT;
13010                 }
13011                 p_new = &its_new;
13012             } else {
13013                 p_new = NULL;
13014             }
13015 
13016             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13017 
13018             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13019                 return -TARGET_EFAULT;
13020             }
13021         }
13022         return ret;
13023 #endif
13024 
13025 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13026     case TARGET_NR_ioprio_get:
13027         return get_errno(ioprio_get(arg1, arg2));
13028 #endif
13029 
13030 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13031     case TARGET_NR_ioprio_set:
13032         return get_errno(ioprio_set(arg1, arg2, arg3));
13033 #endif
13034 
13035 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13036     case TARGET_NR_setns:
13037         return get_errno(setns(arg1, arg2));
13038 #endif
13039 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13040     case TARGET_NR_unshare:
13041         return get_errno(unshare(arg1));
13042 #endif
13043 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13044     case TARGET_NR_kcmp:
13045         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13046 #endif
13047 #ifdef TARGET_NR_swapcontext
13048     case TARGET_NR_swapcontext:
13049         /* PowerPC specific.  */
13050         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13051 #endif
13052 #ifdef TARGET_NR_memfd_create
13053     case TARGET_NR_memfd_create:
13054         p = lock_user_string(arg1);
13055         if (!p) {
13056             return -TARGET_EFAULT;
13057         }
13058         ret = get_errno(memfd_create(p, arg2));
13059         fd_trans_unregister(ret);
13060         unlock_user(p, arg1, 0);
13061         return ret;
13062 #endif
13063 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13064     case TARGET_NR_membarrier:
13065         return get_errno(membarrier(arg1, arg2));
13066 #endif
13067 
13068     default:
13069         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13070         return -TARGET_ENOSYS;
13071     }
13072     return ret;
13073 }
13074 
do_syscall(void * cpu_env,int num,abi_long arg1,abi_long arg2,abi_long arg3,abi_long arg4,abi_long arg5,abi_long arg6,abi_long arg7,abi_long arg8)13075 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13076                     abi_long arg2, abi_long arg3, abi_long arg4,
13077                     abi_long arg5, abi_long arg6, abi_long arg7,
13078                     abi_long arg8)
13079 {
13080     CPUState *cpu = env_cpu(cpu_env);
13081     abi_long ret;
13082 
13083 #ifdef DEBUG_ERESTARTSYS
13084     /* Debug-only code for exercising the syscall-restart code paths
13085      * in the per-architecture cpu main loops: restart every syscall
13086      * the guest makes once before letting it through.
13087      */
13088     {
13089         static bool flag;
13090         flag = !flag;
13091         if (flag) {
13092             return -TARGET_ERESTARTSYS;
13093         }
13094     }
13095 #endif
13096 
13097     record_syscall_start(cpu, num, arg1,
13098                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13099 
13100     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13101         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13102     }
13103 
13104     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13105                       arg5, arg6, arg7, arg8);
13106 
13107     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13108         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13109                           arg3, arg4, arg5, arg6);
13110     }
13111 
13112     record_syscall_return(cpu, num, ret);
13113     return ret;
13114 }
13115