xref: /qemu/linux-user/syscall.c (revision c5840b90)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77 
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84 
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
112 
113 #include "qemu.h"
114 
115 #ifndef CLONE_IO
116 #define CLONE_IO                0x80000000      /* Clone io context */
117 #endif
118 
119 /* We can't directly call the host clone syscall, because this will
120  * badly confuse libc (breaking mutexes, for example). So we must
121  * divide clone flags into:
122  *  * flag combinations that look like pthread_create()
123  *  * flag combinations that look like fork()
124  *  * flags we can implement within QEMU itself
125  *  * flags we can't support and will return an error for
126  */
127 /* For thread creation, all these flags must be present; for
128  * fork, none must be present.
129  */
130 #define CLONE_THREAD_FLAGS                              \
131     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
132      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133 
134 /* These flags are ignored:
135  * CLONE_DETACHED is now ignored by the kernel;
136  * CLONE_IO is just an optimisation hint to the I/O scheduler
137  */
138 #define CLONE_IGNORED_FLAGS                     \
139     (CLONE_DETACHED | CLONE_IO)
140 
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS               \
143     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
144      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145 
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
148     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
149      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150 
151 #define CLONE_INVALID_FORK_FLAGS                                        \
152     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153 
154 #define CLONE_INVALID_THREAD_FLAGS                                      \
155     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
156        CLONE_IGNORED_FLAGS))
157 
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159  * have almost all been allocated. We cannot support any of
160  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162  * The checks against the invalid thread masks above will catch these.
163  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
164  */
165 
166 //#define DEBUG
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168  * once. This exercises the codepaths for restart.
169  */
170 //#define DEBUG_ERESTARTSYS
171 
172 //#include <linux/msdos_fs.h>
173 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
174 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
175 
176 #undef _syscall0
177 #undef _syscall1
178 #undef _syscall2
179 #undef _syscall3
180 #undef _syscall4
181 #undef _syscall5
182 #undef _syscall6
183 
184 #define _syscall0(type,name)		\
185 static type name (void)			\
186 {					\
187 	return syscall(__NR_##name);	\
188 }
189 
190 #define _syscall1(type,name,type1,arg1)		\
191 static type name (type1 arg1)			\
192 {						\
193 	return syscall(__NR_##name, arg1);	\
194 }
195 
196 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
197 static type name (type1 arg1,type2 arg2)		\
198 {							\
199 	return syscall(__NR_##name, arg1, arg2);	\
200 }
201 
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
203 static type name (type1 arg1,type2 arg2,type3 arg3)		\
204 {								\
205 	return syscall(__NR_##name, arg1, arg2, arg3);		\
206 }
207 
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
210 {										\
211 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
212 }
213 
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
215 		  type5,arg5)							\
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
217 {										\
218 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
219 }
220 
221 
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
223 		  type5,arg5,type6,arg6)					\
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
225                   type6 arg6)							\
226 {										\
227 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
228 }
229 
230 
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243 
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
246 #endif
247 
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
251 #endif
252 
253 #ifdef __NR_gettid
254 _syscall0(int, gettid)
255 #else
256 /* This is a replacement for the host gettid() and must return a host
257    errno. */
258 static int gettid(void) {
259     return -ENOSYS;
260 }
261 #endif
262 
263 /* For the 64-bit guest on 32-bit host case we must emulate
264  * getdents using getdents64, because otherwise the host
265  * might hand us back more dirent records than we can fit
266  * into the guest buffer after structure format conversion.
267  * Otherwise we emulate getdents with getdents if the host has it.
268  */
269 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
270 #define EMULATE_GETDENTS_WITH_GETDENTS
271 #endif
272 
273 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
274 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
275 #endif
276 #if (defined(TARGET_NR_getdents) && \
277       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
278     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
279 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
280 #endif
281 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
282 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
283           loff_t *, res, uint, wh);
284 #endif
285 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
286 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
287           siginfo_t *, uinfo)
288 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
289 #ifdef __NR_exit_group
290 _syscall1(int,exit_group,int,error_code)
291 #endif
292 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
293 _syscall1(int,set_tid_address,int *,tidptr)
294 #endif
295 #if defined(TARGET_NR_futex) && defined(__NR_futex)
296 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
297           const struct timespec *,timeout,int *,uaddr2,int,val3)
298 #endif
299 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
300 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
301           unsigned long *, user_mask_ptr);
302 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
303 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
304           unsigned long *, user_mask_ptr);
305 #define __NR_sys_getcpu __NR_getcpu
306 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
307 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
308           void *, arg);
309 _syscall2(int, capget, struct __user_cap_header_struct *, header,
310           struct __user_cap_data_struct *, data);
311 _syscall2(int, capset, struct __user_cap_header_struct *, header,
312           struct __user_cap_data_struct *, data);
313 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
314 _syscall2(int, ioprio_get, int, which, int, who)
315 #endif
316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
317 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
318 #endif
319 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
320 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
321 #endif
322 
323 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
324 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
325           unsigned long, idx1, unsigned long, idx2)
326 #endif
327 
328 static bitmask_transtbl fcntl_flags_tbl[] = {
329   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
330   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
331   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
332   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
333   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
334   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
335   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
336   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
337   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
338   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
339   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
340   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
341   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
342 #if defined(O_DIRECT)
343   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
344 #endif
345 #if defined(O_NOATIME)
346   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
347 #endif
348 #if defined(O_CLOEXEC)
349   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
350 #endif
351 #if defined(O_PATH)
352   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
353 #endif
354 #if defined(O_TMPFILE)
355   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
356 #endif
357   /* Don't terminate the list prematurely on 64-bit host+guest.  */
358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
359   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
360 #endif
361   { 0, 0, 0, 0 }
362 };
363 
364 enum {
365     QEMU_IFLA_BR_UNSPEC,
366     QEMU_IFLA_BR_FORWARD_DELAY,
367     QEMU_IFLA_BR_HELLO_TIME,
368     QEMU_IFLA_BR_MAX_AGE,
369     QEMU_IFLA_BR_AGEING_TIME,
370     QEMU_IFLA_BR_STP_STATE,
371     QEMU_IFLA_BR_PRIORITY,
372     QEMU_IFLA_BR_VLAN_FILTERING,
373     QEMU_IFLA_BR_VLAN_PROTOCOL,
374     QEMU_IFLA_BR_GROUP_FWD_MASK,
375     QEMU_IFLA_BR_ROOT_ID,
376     QEMU_IFLA_BR_BRIDGE_ID,
377     QEMU_IFLA_BR_ROOT_PORT,
378     QEMU_IFLA_BR_ROOT_PATH_COST,
379     QEMU_IFLA_BR_TOPOLOGY_CHANGE,
380     QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
381     QEMU_IFLA_BR_HELLO_TIMER,
382     QEMU_IFLA_BR_TCN_TIMER,
383     QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
384     QEMU_IFLA_BR_GC_TIMER,
385     QEMU_IFLA_BR_GROUP_ADDR,
386     QEMU_IFLA_BR_FDB_FLUSH,
387     QEMU_IFLA_BR_MCAST_ROUTER,
388     QEMU_IFLA_BR_MCAST_SNOOPING,
389     QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
390     QEMU_IFLA_BR_MCAST_QUERIER,
391     QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
392     QEMU_IFLA_BR_MCAST_HASH_MAX,
393     QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
394     QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
395     QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
396     QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
397     QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
398     QEMU_IFLA_BR_MCAST_QUERY_INTVL,
399     QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
400     QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
401     QEMU_IFLA_BR_NF_CALL_IPTABLES,
402     QEMU_IFLA_BR_NF_CALL_IP6TABLES,
403     QEMU_IFLA_BR_NF_CALL_ARPTABLES,
404     QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
405     QEMU_IFLA_BR_PAD,
406     QEMU_IFLA_BR_VLAN_STATS_ENABLED,
407     QEMU_IFLA_BR_MCAST_STATS_ENABLED,
408     QEMU_IFLA_BR_MCAST_IGMP_VERSION,
409     QEMU_IFLA_BR_MCAST_MLD_VERSION,
410     QEMU___IFLA_BR_MAX,
411 };
412 
413 enum {
414     QEMU_IFLA_UNSPEC,
415     QEMU_IFLA_ADDRESS,
416     QEMU_IFLA_BROADCAST,
417     QEMU_IFLA_IFNAME,
418     QEMU_IFLA_MTU,
419     QEMU_IFLA_LINK,
420     QEMU_IFLA_QDISC,
421     QEMU_IFLA_STATS,
422     QEMU_IFLA_COST,
423     QEMU_IFLA_PRIORITY,
424     QEMU_IFLA_MASTER,
425     QEMU_IFLA_WIRELESS,
426     QEMU_IFLA_PROTINFO,
427     QEMU_IFLA_TXQLEN,
428     QEMU_IFLA_MAP,
429     QEMU_IFLA_WEIGHT,
430     QEMU_IFLA_OPERSTATE,
431     QEMU_IFLA_LINKMODE,
432     QEMU_IFLA_LINKINFO,
433     QEMU_IFLA_NET_NS_PID,
434     QEMU_IFLA_IFALIAS,
435     QEMU_IFLA_NUM_VF,
436     QEMU_IFLA_VFINFO_LIST,
437     QEMU_IFLA_STATS64,
438     QEMU_IFLA_VF_PORTS,
439     QEMU_IFLA_PORT_SELF,
440     QEMU_IFLA_AF_SPEC,
441     QEMU_IFLA_GROUP,
442     QEMU_IFLA_NET_NS_FD,
443     QEMU_IFLA_EXT_MASK,
444     QEMU_IFLA_PROMISCUITY,
445     QEMU_IFLA_NUM_TX_QUEUES,
446     QEMU_IFLA_NUM_RX_QUEUES,
447     QEMU_IFLA_CARRIER,
448     QEMU_IFLA_PHYS_PORT_ID,
449     QEMU_IFLA_CARRIER_CHANGES,
450     QEMU_IFLA_PHYS_SWITCH_ID,
451     QEMU_IFLA_LINK_NETNSID,
452     QEMU_IFLA_PHYS_PORT_NAME,
453     QEMU_IFLA_PROTO_DOWN,
454     QEMU_IFLA_GSO_MAX_SEGS,
455     QEMU_IFLA_GSO_MAX_SIZE,
456     QEMU_IFLA_PAD,
457     QEMU_IFLA_XDP,
458     QEMU_IFLA_EVENT,
459     QEMU_IFLA_NEW_NETNSID,
460     QEMU_IFLA_IF_NETNSID,
461     QEMU_IFLA_CARRIER_UP_COUNT,
462     QEMU_IFLA_CARRIER_DOWN_COUNT,
463     QEMU_IFLA_NEW_IFINDEX,
464     QEMU___IFLA_MAX
465 };
466 
467 enum {
468     QEMU_IFLA_BRPORT_UNSPEC,
469     QEMU_IFLA_BRPORT_STATE,
470     QEMU_IFLA_BRPORT_PRIORITY,
471     QEMU_IFLA_BRPORT_COST,
472     QEMU_IFLA_BRPORT_MODE,
473     QEMU_IFLA_BRPORT_GUARD,
474     QEMU_IFLA_BRPORT_PROTECT,
475     QEMU_IFLA_BRPORT_FAST_LEAVE,
476     QEMU_IFLA_BRPORT_LEARNING,
477     QEMU_IFLA_BRPORT_UNICAST_FLOOD,
478     QEMU_IFLA_BRPORT_PROXYARP,
479     QEMU_IFLA_BRPORT_LEARNING_SYNC,
480     QEMU_IFLA_BRPORT_PROXYARP_WIFI,
481     QEMU_IFLA_BRPORT_ROOT_ID,
482     QEMU_IFLA_BRPORT_BRIDGE_ID,
483     QEMU_IFLA_BRPORT_DESIGNATED_PORT,
484     QEMU_IFLA_BRPORT_DESIGNATED_COST,
485     QEMU_IFLA_BRPORT_ID,
486     QEMU_IFLA_BRPORT_NO,
487     QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
488     QEMU_IFLA_BRPORT_CONFIG_PENDING,
489     QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
490     QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
491     QEMU_IFLA_BRPORT_HOLD_TIMER,
492     QEMU_IFLA_BRPORT_FLUSH,
493     QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
494     QEMU_IFLA_BRPORT_PAD,
495     QEMU_IFLA_BRPORT_MCAST_FLOOD,
496     QEMU_IFLA_BRPORT_MCAST_TO_UCAST,
497     QEMU_IFLA_BRPORT_VLAN_TUNNEL,
498     QEMU_IFLA_BRPORT_BCAST_FLOOD,
499     QEMU_IFLA_BRPORT_GROUP_FWD_MASK,
500     QEMU_IFLA_BRPORT_NEIGH_SUPPRESS,
501     QEMU___IFLA_BRPORT_MAX
502 };
503 
504 enum {
505     QEMU_IFLA_INFO_UNSPEC,
506     QEMU_IFLA_INFO_KIND,
507     QEMU_IFLA_INFO_DATA,
508     QEMU_IFLA_INFO_XSTATS,
509     QEMU_IFLA_INFO_SLAVE_KIND,
510     QEMU_IFLA_INFO_SLAVE_DATA,
511     QEMU___IFLA_INFO_MAX,
512 };
513 
514 enum {
515     QEMU_IFLA_INET_UNSPEC,
516     QEMU_IFLA_INET_CONF,
517     QEMU___IFLA_INET_MAX,
518 };
519 
520 enum {
521     QEMU_IFLA_INET6_UNSPEC,
522     QEMU_IFLA_INET6_FLAGS,
523     QEMU_IFLA_INET6_CONF,
524     QEMU_IFLA_INET6_STATS,
525     QEMU_IFLA_INET6_MCAST,
526     QEMU_IFLA_INET6_CACHEINFO,
527     QEMU_IFLA_INET6_ICMP6STATS,
528     QEMU_IFLA_INET6_TOKEN,
529     QEMU_IFLA_INET6_ADDR_GEN_MODE,
530     QEMU___IFLA_INET6_MAX
531 };
532 
533 enum {
534     QEMU_IFLA_XDP_UNSPEC,
535     QEMU_IFLA_XDP_FD,
536     QEMU_IFLA_XDP_ATTACHED,
537     QEMU_IFLA_XDP_FLAGS,
538     QEMU_IFLA_XDP_PROG_ID,
539     QEMU___IFLA_XDP_MAX,
540 };
541 
542 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
543 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
544 typedef struct TargetFdTrans {
545     TargetFdDataFunc host_to_target_data;
546     TargetFdDataFunc target_to_host_data;
547     TargetFdAddrFunc target_to_host_addr;
548 } TargetFdTrans;
549 
550 static TargetFdTrans **target_fd_trans;
551 
552 static unsigned int target_fd_max;
553 
554 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
555 {
556     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
557         return target_fd_trans[fd]->target_to_host_data;
558     }
559     return NULL;
560 }
561 
562 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
563 {
564     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
565         return target_fd_trans[fd]->host_to_target_data;
566     }
567     return NULL;
568 }
569 
570 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
571 {
572     if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
573         return target_fd_trans[fd]->target_to_host_addr;
574     }
575     return NULL;
576 }
577 
578 static void fd_trans_register(int fd, TargetFdTrans *trans)
579 {
580     unsigned int oldmax;
581 
582     if (fd >= target_fd_max) {
583         oldmax = target_fd_max;
584         target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
585         target_fd_trans = g_renew(TargetFdTrans *,
586                                   target_fd_trans, target_fd_max);
587         memset((void *)(target_fd_trans + oldmax), 0,
588                (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
589     }
590     target_fd_trans[fd] = trans;
591 }
592 
593 static void fd_trans_unregister(int fd)
594 {
595     if (fd >= 0 && fd < target_fd_max) {
596         target_fd_trans[fd] = NULL;
597     }
598 }
599 
600 static void fd_trans_dup(int oldfd, int newfd)
601 {
602     fd_trans_unregister(newfd);
603     if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
604         fd_trans_register(newfd, target_fd_trans[oldfd]);
605     }
606 }
607 
608 static int sys_getcwd1(char *buf, size_t size)
609 {
610   if (getcwd(buf, size) == NULL) {
611       /* getcwd() sets errno */
612       return (-1);
613   }
614   return strlen(buf)+1;
615 }
616 
617 #ifdef TARGET_NR_utimensat
618 #if defined(__NR_utimensat)
619 #define __NR_sys_utimensat __NR_utimensat
620 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
621           const struct timespec *,tsp,int,flags)
622 #else
623 static int sys_utimensat(int dirfd, const char *pathname,
624                          const struct timespec times[2], int flags)
625 {
626     errno = ENOSYS;
627     return -1;
628 }
629 #endif
630 #endif /* TARGET_NR_utimensat */
631 
632 #ifdef TARGET_NR_renameat2
633 #if defined(__NR_renameat2)
634 #define __NR_sys_renameat2 __NR_renameat2
635 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
636           const char *, new, unsigned int, flags)
637 #else
638 static int sys_renameat2(int oldfd, const char *old,
639                          int newfd, const char *new, int flags)
640 {
641     if (flags == 0) {
642         return renameat(oldfd, old, newfd, new);
643     }
644     errno = ENOSYS;
645     return -1;
646 }
647 #endif
648 #endif /* TARGET_NR_renameat2 */
649 
650 #ifdef CONFIG_INOTIFY
651 #include <sys/inotify.h>
652 
653 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
654 static int sys_inotify_init(void)
655 {
656   return (inotify_init());
657 }
658 #endif
659 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
660 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
661 {
662   return (inotify_add_watch(fd, pathname, mask));
663 }
664 #endif
665 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
666 static int sys_inotify_rm_watch(int fd, int32_t wd)
667 {
668   return (inotify_rm_watch(fd, wd));
669 }
670 #endif
671 #ifdef CONFIG_INOTIFY1
672 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
673 static int sys_inotify_init1(int flags)
674 {
675   return (inotify_init1(flags));
676 }
677 #endif
678 #endif
679 #else
680 /* Userspace can usually survive runtime without inotify */
681 #undef TARGET_NR_inotify_init
682 #undef TARGET_NR_inotify_init1
683 #undef TARGET_NR_inotify_add_watch
684 #undef TARGET_NR_inotify_rm_watch
685 #endif /* CONFIG_INOTIFY  */
686 
687 #if defined(TARGET_NR_prlimit64)
688 #ifndef __NR_prlimit64
689 # define __NR_prlimit64 -1
690 #endif
691 #define __NR_sys_prlimit64 __NR_prlimit64
692 /* The glibc rlimit structure may not be that used by the underlying syscall */
693 struct host_rlimit64 {
694     uint64_t rlim_cur;
695     uint64_t rlim_max;
696 };
697 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
698           const struct host_rlimit64 *, new_limit,
699           struct host_rlimit64 *, old_limit)
700 #endif
701 
702 
703 #if defined(TARGET_NR_timer_create)
704 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
705 static timer_t g_posix_timers[32] = { 0, } ;
706 
707 static inline int next_free_host_timer(void)
708 {
709     int k ;
710     /* FIXME: Does finding the next free slot require a lock? */
711     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
712         if (g_posix_timers[k] == 0) {
713             g_posix_timers[k] = (timer_t) 1;
714             return k;
715         }
716     }
717     return -1;
718 }
719 #endif
720 
721 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
722 #ifdef TARGET_ARM
723 static inline int regpairs_aligned(void *cpu_env, int num)
724 {
725     return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
726 }
727 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
728 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
729 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
730 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
731  * of registers which translates to the same as ARM/MIPS, because we start with
732  * r3 as arg1 */
733 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
734 #elif defined(TARGET_SH4)
735 /* SH4 doesn't align register pairs, except for p{read,write}64 */
736 static inline int regpairs_aligned(void *cpu_env, int num)
737 {
738     switch (num) {
739     case TARGET_NR_pread64:
740     case TARGET_NR_pwrite64:
741         return 1;
742 
743     default:
744         return 0;
745     }
746 }
747 #elif defined(TARGET_XTENSA)
748 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
749 #else
750 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
751 #endif
752 
753 #define ERRNO_TABLE_SIZE 1200
754 
755 /* target_to_host_errno_table[] is initialized from
756  * host_to_target_errno_table[] in syscall_init(). */
757 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
758 };
759 
760 /*
761  * This list is the union of errno values overridden in asm-<arch>/errno.h
762  * minus the errnos that are not actually generic to all archs.
763  */
764 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
765     [EAGAIN]		= TARGET_EAGAIN,
766     [EIDRM]		= TARGET_EIDRM,
767     [ECHRNG]		= TARGET_ECHRNG,
768     [EL2NSYNC]		= TARGET_EL2NSYNC,
769     [EL3HLT]		= TARGET_EL3HLT,
770     [EL3RST]		= TARGET_EL3RST,
771     [ELNRNG]		= TARGET_ELNRNG,
772     [EUNATCH]		= TARGET_EUNATCH,
773     [ENOCSI]		= TARGET_ENOCSI,
774     [EL2HLT]		= TARGET_EL2HLT,
775     [EDEADLK]		= TARGET_EDEADLK,
776     [ENOLCK]		= TARGET_ENOLCK,
777     [EBADE]		= TARGET_EBADE,
778     [EBADR]		= TARGET_EBADR,
779     [EXFULL]		= TARGET_EXFULL,
780     [ENOANO]		= TARGET_ENOANO,
781     [EBADRQC]		= TARGET_EBADRQC,
782     [EBADSLT]		= TARGET_EBADSLT,
783     [EBFONT]		= TARGET_EBFONT,
784     [ENOSTR]		= TARGET_ENOSTR,
785     [ENODATA]		= TARGET_ENODATA,
786     [ETIME]		= TARGET_ETIME,
787     [ENOSR]		= TARGET_ENOSR,
788     [ENONET]		= TARGET_ENONET,
789     [ENOPKG]		= TARGET_ENOPKG,
790     [EREMOTE]		= TARGET_EREMOTE,
791     [ENOLINK]		= TARGET_ENOLINK,
792     [EADV]		= TARGET_EADV,
793     [ESRMNT]		= TARGET_ESRMNT,
794     [ECOMM]		= TARGET_ECOMM,
795     [EPROTO]		= TARGET_EPROTO,
796     [EDOTDOT]		= TARGET_EDOTDOT,
797     [EMULTIHOP]		= TARGET_EMULTIHOP,
798     [EBADMSG]		= TARGET_EBADMSG,
799     [ENAMETOOLONG]	= TARGET_ENAMETOOLONG,
800     [EOVERFLOW]		= TARGET_EOVERFLOW,
801     [ENOTUNIQ]		= TARGET_ENOTUNIQ,
802     [EBADFD]		= TARGET_EBADFD,
803     [EREMCHG]		= TARGET_EREMCHG,
804     [ELIBACC]		= TARGET_ELIBACC,
805     [ELIBBAD]		= TARGET_ELIBBAD,
806     [ELIBSCN]		= TARGET_ELIBSCN,
807     [ELIBMAX]		= TARGET_ELIBMAX,
808     [ELIBEXEC]		= TARGET_ELIBEXEC,
809     [EILSEQ]		= TARGET_EILSEQ,
810     [ENOSYS]		= TARGET_ENOSYS,
811     [ELOOP]		= TARGET_ELOOP,
812     [ERESTART]		= TARGET_ERESTART,
813     [ESTRPIPE]		= TARGET_ESTRPIPE,
814     [ENOTEMPTY]		= TARGET_ENOTEMPTY,
815     [EUSERS]		= TARGET_EUSERS,
816     [ENOTSOCK]		= TARGET_ENOTSOCK,
817     [EDESTADDRREQ]	= TARGET_EDESTADDRREQ,
818     [EMSGSIZE]		= TARGET_EMSGSIZE,
819     [EPROTOTYPE]	= TARGET_EPROTOTYPE,
820     [ENOPROTOOPT]	= TARGET_ENOPROTOOPT,
821     [EPROTONOSUPPORT]	= TARGET_EPROTONOSUPPORT,
822     [ESOCKTNOSUPPORT]	= TARGET_ESOCKTNOSUPPORT,
823     [EOPNOTSUPP]	= TARGET_EOPNOTSUPP,
824     [EPFNOSUPPORT]	= TARGET_EPFNOSUPPORT,
825     [EAFNOSUPPORT]	= TARGET_EAFNOSUPPORT,
826     [EADDRINUSE]	= TARGET_EADDRINUSE,
827     [EADDRNOTAVAIL]	= TARGET_EADDRNOTAVAIL,
828     [ENETDOWN]		= TARGET_ENETDOWN,
829     [ENETUNREACH]	= TARGET_ENETUNREACH,
830     [ENETRESET]		= TARGET_ENETRESET,
831     [ECONNABORTED]	= TARGET_ECONNABORTED,
832     [ECONNRESET]	= TARGET_ECONNRESET,
833     [ENOBUFS]		= TARGET_ENOBUFS,
834     [EISCONN]		= TARGET_EISCONN,
835     [ENOTCONN]		= TARGET_ENOTCONN,
836     [EUCLEAN]		= TARGET_EUCLEAN,
837     [ENOTNAM]		= TARGET_ENOTNAM,
838     [ENAVAIL]		= TARGET_ENAVAIL,
839     [EISNAM]		= TARGET_EISNAM,
840     [EREMOTEIO]		= TARGET_EREMOTEIO,
841     [EDQUOT]            = TARGET_EDQUOT,
842     [ESHUTDOWN]		= TARGET_ESHUTDOWN,
843     [ETOOMANYREFS]	= TARGET_ETOOMANYREFS,
844     [ETIMEDOUT]		= TARGET_ETIMEDOUT,
845     [ECONNREFUSED]	= TARGET_ECONNREFUSED,
846     [EHOSTDOWN]		= TARGET_EHOSTDOWN,
847     [EHOSTUNREACH]	= TARGET_EHOSTUNREACH,
848     [EALREADY]		= TARGET_EALREADY,
849     [EINPROGRESS]	= TARGET_EINPROGRESS,
850     [ESTALE]		= TARGET_ESTALE,
851     [ECANCELED]		= TARGET_ECANCELED,
852     [ENOMEDIUM]		= TARGET_ENOMEDIUM,
853     [EMEDIUMTYPE]	= TARGET_EMEDIUMTYPE,
854 #ifdef ENOKEY
855     [ENOKEY]		= TARGET_ENOKEY,
856 #endif
857 #ifdef EKEYEXPIRED
858     [EKEYEXPIRED]	= TARGET_EKEYEXPIRED,
859 #endif
860 #ifdef EKEYREVOKED
861     [EKEYREVOKED]	= TARGET_EKEYREVOKED,
862 #endif
863 #ifdef EKEYREJECTED
864     [EKEYREJECTED]	= TARGET_EKEYREJECTED,
865 #endif
866 #ifdef EOWNERDEAD
867     [EOWNERDEAD]	= TARGET_EOWNERDEAD,
868 #endif
869 #ifdef ENOTRECOVERABLE
870     [ENOTRECOVERABLE]	= TARGET_ENOTRECOVERABLE,
871 #endif
872 #ifdef ENOMSG
873     [ENOMSG]            = TARGET_ENOMSG,
874 #endif
875 #ifdef ERKFILL
876     [ERFKILL]           = TARGET_ERFKILL,
877 #endif
878 #ifdef EHWPOISON
879     [EHWPOISON]         = TARGET_EHWPOISON,
880 #endif
881 };
882 
883 static inline int host_to_target_errno(int err)
884 {
885     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
886         host_to_target_errno_table[err]) {
887         return host_to_target_errno_table[err];
888     }
889     return err;
890 }
891 
892 static inline int target_to_host_errno(int err)
893 {
894     if (err >= 0 && err < ERRNO_TABLE_SIZE &&
895         target_to_host_errno_table[err]) {
896         return target_to_host_errno_table[err];
897     }
898     return err;
899 }
900 
901 static inline abi_long get_errno(abi_long ret)
902 {
903     if (ret == -1)
904         return -host_to_target_errno(errno);
905     else
906         return ret;
907 }
908 
909 const char *target_strerror(int err)
910 {
911     if (err == TARGET_ERESTARTSYS) {
912         return "To be restarted";
913     }
914     if (err == TARGET_QEMU_ESIGRETURN) {
915         return "Successful exit from sigreturn";
916     }
917 
918     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
919         return NULL;
920     }
921     return strerror(target_to_host_errno(err));
922 }
923 
924 #define safe_syscall0(type, name) \
925 static type safe_##name(void) \
926 { \
927     return safe_syscall(__NR_##name); \
928 }
929 
930 #define safe_syscall1(type, name, type1, arg1) \
931 static type safe_##name(type1 arg1) \
932 { \
933     return safe_syscall(__NR_##name, arg1); \
934 }
935 
936 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
937 static type safe_##name(type1 arg1, type2 arg2) \
938 { \
939     return safe_syscall(__NR_##name, arg1, arg2); \
940 }
941 
942 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
943 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
944 { \
945     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
946 }
947 
948 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
949     type4, arg4) \
950 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
951 { \
952     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
953 }
954 
955 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
956     type4, arg4, type5, arg5) \
957 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
958     type5 arg5) \
959 { \
960     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
961 }
962 
963 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
964     type4, arg4, type5, arg5, type6, arg6) \
965 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
966     type5 arg5, type6 arg6) \
967 { \
968     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
969 }
970 
971 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
972 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
973 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
974               int, flags, mode_t, mode)
975 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
976               struct rusage *, rusage)
977 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
978               int, options, struct rusage *, rusage)
979 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
980 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
981               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
982 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
983               struct timespec *, tsp, const sigset_t *, sigmask,
984               size_t, sigsetsize)
985 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
986               int, maxevents, int, timeout, const sigset_t *, sigmask,
987               size_t, sigsetsize)
988 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
989               const struct timespec *,timeout,int *,uaddr2,int,val3)
990 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
991 safe_syscall2(int, kill, pid_t, pid, int, sig)
992 safe_syscall2(int, tkill, int, tid, int, sig)
993 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
994 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
995 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
996 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
997               unsigned long, pos_l, unsigned long, pos_h)
998 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
999               unsigned long, pos_l, unsigned long, pos_h)
1000 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
1001               socklen_t, addrlen)
1002 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
1003               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
1004 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
1005               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
1006 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
1007 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
1008 safe_syscall2(int, flock, int, fd, int, operation)
1009 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
1010               const struct timespec *, uts, size_t, sigsetsize)
1011 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
1012               int, flags)
1013 safe_syscall2(int, nanosleep, const struct timespec *, req,
1014               struct timespec *, rem)
1015 #ifdef TARGET_NR_clock_nanosleep
1016 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
1017               const struct timespec *, req, struct timespec *, rem)
1018 #endif
1019 #ifdef __NR_msgsnd
1020 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
1021               int, flags)
1022 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
1023               long, msgtype, int, flags)
1024 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
1025               unsigned, nsops, const struct timespec *, timeout)
1026 #else
1027 /* This host kernel architecture uses a single ipc syscall; fake up
1028  * wrappers for the sub-operations to hide this implementation detail.
1029  * Annoyingly we can't include linux/ipc.h to get the constant definitions
1030  * for the call parameter because some structs in there conflict with the
1031  * sys/ipc.h ones. So we just define them here, and rely on them being
1032  * the same for all host architectures.
1033  */
1034 #define Q_SEMTIMEDOP 4
1035 #define Q_MSGSND 11
1036 #define Q_MSGRCV 12
1037 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1038 
1039 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1040               void *, ptr, long, fifth)
1041 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1042 {
1043     return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1044 }
1045 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1046 {
1047     return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1048 }
1049 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1050                            const struct timespec *timeout)
1051 {
1052     return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1053                     (long)timeout);
1054 }
1055 #endif
1056 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1057 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1058               size_t, len, unsigned, prio, const struct timespec *, timeout)
1059 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1060               size_t, len, unsigned *, prio, const struct timespec *, timeout)
1061 #endif
1062 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1063  * "third argument might be integer or pointer or not present" behaviour of
1064  * the libc function.
1065  */
1066 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1067 /* Similarly for fcntl. Note that callers must always:
1068  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1069  *  use the flock64 struct rather than unsuffixed flock
1070  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1071  */
1072 #ifdef __NR_fcntl64
1073 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1074 #else
1075 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1076 #endif
1077 
1078 static inline int host_to_target_sock_type(int host_type)
1079 {
1080     int target_type;
1081 
1082     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1083     case SOCK_DGRAM:
1084         target_type = TARGET_SOCK_DGRAM;
1085         break;
1086     case SOCK_STREAM:
1087         target_type = TARGET_SOCK_STREAM;
1088         break;
1089     default:
1090         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1091         break;
1092     }
1093 
1094 #if defined(SOCK_CLOEXEC)
1095     if (host_type & SOCK_CLOEXEC) {
1096         target_type |= TARGET_SOCK_CLOEXEC;
1097     }
1098 #endif
1099 
1100 #if defined(SOCK_NONBLOCK)
1101     if (host_type & SOCK_NONBLOCK) {
1102         target_type |= TARGET_SOCK_NONBLOCK;
1103     }
1104 #endif
1105 
1106     return target_type;
1107 }
1108 
1109 static abi_ulong target_brk;
1110 static abi_ulong target_original_brk;
1111 static abi_ulong brk_page;
1112 
1113 void target_set_brk(abi_ulong new_brk)
1114 {
1115     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1116     brk_page = HOST_PAGE_ALIGN(target_brk);
1117 }
1118 
1119 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1120 #define DEBUGF_BRK(message, args...)
1121 
1122 /* do_brk() must return target values and target errnos. */
1123 abi_long do_brk(abi_ulong new_brk)
1124 {
1125     abi_long mapped_addr;
1126     abi_ulong new_alloc_size;
1127 
1128     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1129 
1130     if (!new_brk) {
1131         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1132         return target_brk;
1133     }
1134     if (new_brk < target_original_brk) {
1135         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1136                    target_brk);
1137         return target_brk;
1138     }
1139 
1140     /* If the new brk is less than the highest page reserved to the
1141      * target heap allocation, set it and we're almost done...  */
1142     if (new_brk <= brk_page) {
1143         /* Heap contents are initialized to zero, as for anonymous
1144          * mapped pages.  */
1145         if (new_brk > target_brk) {
1146             memset(g2h(target_brk), 0, new_brk - target_brk);
1147         }
1148 	target_brk = new_brk;
1149         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1150     	return target_brk;
1151     }
1152 
1153     /* We need to allocate more memory after the brk... Note that
1154      * we don't use MAP_FIXED because that will map over the top of
1155      * any existing mapping (like the one with the host libc or qemu
1156      * itself); instead we treat "mapped but at wrong address" as
1157      * a failure and unmap again.
1158      */
1159     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1160     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1161                                         PROT_READ|PROT_WRITE,
1162                                         MAP_ANON|MAP_PRIVATE, 0, 0));
1163 
1164     if (mapped_addr == brk_page) {
1165         /* Heap contents are initialized to zero, as for anonymous
1166          * mapped pages.  Technically the new pages are already
1167          * initialized to zero since they *are* anonymous mapped
1168          * pages, however we have to take care with the contents that
1169          * come from the remaining part of the previous page: it may
1170          * contains garbage data due to a previous heap usage (grown
1171          * then shrunken).  */
1172         memset(g2h(target_brk), 0, brk_page - target_brk);
1173 
1174         target_brk = new_brk;
1175         brk_page = HOST_PAGE_ALIGN(target_brk);
1176         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1177             target_brk);
1178         return target_brk;
1179     } else if (mapped_addr != -1) {
1180         /* Mapped but at wrong address, meaning there wasn't actually
1181          * enough space for this brk.
1182          */
1183         target_munmap(mapped_addr, new_alloc_size);
1184         mapped_addr = -1;
1185         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1186     }
1187     else {
1188         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1189     }
1190 
1191 #if defined(TARGET_ALPHA)
1192     /* We (partially) emulate OSF/1 on Alpha, which requires we
1193        return a proper errno, not an unchanged brk value.  */
1194     return -TARGET_ENOMEM;
1195 #endif
1196     /* For everything else, return the previous break. */
1197     return target_brk;
1198 }
1199 
1200 static inline abi_long copy_from_user_fdset(fd_set *fds,
1201                                             abi_ulong target_fds_addr,
1202                                             int n)
1203 {
1204     int i, nw, j, k;
1205     abi_ulong b, *target_fds;
1206 
1207     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1208     if (!(target_fds = lock_user(VERIFY_READ,
1209                                  target_fds_addr,
1210                                  sizeof(abi_ulong) * nw,
1211                                  1)))
1212         return -TARGET_EFAULT;
1213 
1214     FD_ZERO(fds);
1215     k = 0;
1216     for (i = 0; i < nw; i++) {
1217         /* grab the abi_ulong */
1218         __get_user(b, &target_fds[i]);
1219         for (j = 0; j < TARGET_ABI_BITS; j++) {
1220             /* check the bit inside the abi_ulong */
1221             if ((b >> j) & 1)
1222                 FD_SET(k, fds);
1223             k++;
1224         }
1225     }
1226 
1227     unlock_user(target_fds, target_fds_addr, 0);
1228 
1229     return 0;
1230 }
1231 
1232 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1233                                                  abi_ulong target_fds_addr,
1234                                                  int n)
1235 {
1236     if (target_fds_addr) {
1237         if (copy_from_user_fdset(fds, target_fds_addr, n))
1238             return -TARGET_EFAULT;
1239         *fds_ptr = fds;
1240     } else {
1241         *fds_ptr = NULL;
1242     }
1243     return 0;
1244 }
1245 
1246 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1247                                           const fd_set *fds,
1248                                           int n)
1249 {
1250     int i, nw, j, k;
1251     abi_long v;
1252     abi_ulong *target_fds;
1253 
1254     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1255     if (!(target_fds = lock_user(VERIFY_WRITE,
1256                                  target_fds_addr,
1257                                  sizeof(abi_ulong) * nw,
1258                                  0)))
1259         return -TARGET_EFAULT;
1260 
1261     k = 0;
1262     for (i = 0; i < nw; i++) {
1263         v = 0;
1264         for (j = 0; j < TARGET_ABI_BITS; j++) {
1265             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1266             k++;
1267         }
1268         __put_user(v, &target_fds[i]);
1269     }
1270 
1271     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1272 
1273     return 0;
1274 }
1275 
1276 #if defined(__alpha__)
1277 #define HOST_HZ 1024
1278 #else
1279 #define HOST_HZ 100
1280 #endif
1281 
1282 static inline abi_long host_to_target_clock_t(long ticks)
1283 {
1284 #if HOST_HZ == TARGET_HZ
1285     return ticks;
1286 #else
1287     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1288 #endif
1289 }
1290 
1291 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1292                                              const struct rusage *rusage)
1293 {
1294     struct target_rusage *target_rusage;
1295 
1296     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1297         return -TARGET_EFAULT;
1298     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1299     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1300     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1301     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1302     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1303     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1304     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1305     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1306     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1307     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1308     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1309     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1310     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1311     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1312     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1313     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1314     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1315     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1316     unlock_user_struct(target_rusage, target_addr, 1);
1317 
1318     return 0;
1319 }
1320 
1321 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1322 {
1323     abi_ulong target_rlim_swap;
1324     rlim_t result;
1325 
1326     target_rlim_swap = tswapal(target_rlim);
1327     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1328         return RLIM_INFINITY;
1329 
1330     result = target_rlim_swap;
1331     if (target_rlim_swap != (rlim_t)result)
1332         return RLIM_INFINITY;
1333 
1334     return result;
1335 }
1336 
1337 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1338 {
1339     abi_ulong target_rlim_swap;
1340     abi_ulong result;
1341 
1342     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1343         target_rlim_swap = TARGET_RLIM_INFINITY;
1344     else
1345         target_rlim_swap = rlim;
1346     result = tswapal(target_rlim_swap);
1347 
1348     return result;
1349 }
1350 
1351 static inline int target_to_host_resource(int code)
1352 {
1353     switch (code) {
1354     case TARGET_RLIMIT_AS:
1355         return RLIMIT_AS;
1356     case TARGET_RLIMIT_CORE:
1357         return RLIMIT_CORE;
1358     case TARGET_RLIMIT_CPU:
1359         return RLIMIT_CPU;
1360     case TARGET_RLIMIT_DATA:
1361         return RLIMIT_DATA;
1362     case TARGET_RLIMIT_FSIZE:
1363         return RLIMIT_FSIZE;
1364     case TARGET_RLIMIT_LOCKS:
1365         return RLIMIT_LOCKS;
1366     case TARGET_RLIMIT_MEMLOCK:
1367         return RLIMIT_MEMLOCK;
1368     case TARGET_RLIMIT_MSGQUEUE:
1369         return RLIMIT_MSGQUEUE;
1370     case TARGET_RLIMIT_NICE:
1371         return RLIMIT_NICE;
1372     case TARGET_RLIMIT_NOFILE:
1373         return RLIMIT_NOFILE;
1374     case TARGET_RLIMIT_NPROC:
1375         return RLIMIT_NPROC;
1376     case TARGET_RLIMIT_RSS:
1377         return RLIMIT_RSS;
1378     case TARGET_RLIMIT_RTPRIO:
1379         return RLIMIT_RTPRIO;
1380     case TARGET_RLIMIT_SIGPENDING:
1381         return RLIMIT_SIGPENDING;
1382     case TARGET_RLIMIT_STACK:
1383         return RLIMIT_STACK;
1384     default:
1385         return code;
1386     }
1387 }
1388 
1389 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1390                                               abi_ulong target_tv_addr)
1391 {
1392     struct target_timeval *target_tv;
1393 
1394     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1395         return -TARGET_EFAULT;
1396 
1397     __get_user(tv->tv_sec, &target_tv->tv_sec);
1398     __get_user(tv->tv_usec, &target_tv->tv_usec);
1399 
1400     unlock_user_struct(target_tv, target_tv_addr, 0);
1401 
1402     return 0;
1403 }
1404 
1405 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1406                                             const struct timeval *tv)
1407 {
1408     struct target_timeval *target_tv;
1409 
1410     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1411         return -TARGET_EFAULT;
1412 
1413     __put_user(tv->tv_sec, &target_tv->tv_sec);
1414     __put_user(tv->tv_usec, &target_tv->tv_usec);
1415 
1416     unlock_user_struct(target_tv, target_tv_addr, 1);
1417 
1418     return 0;
1419 }
1420 
1421 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1422                                                abi_ulong target_tz_addr)
1423 {
1424     struct target_timezone *target_tz;
1425 
1426     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1427         return -TARGET_EFAULT;
1428     }
1429 
1430     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1431     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1432 
1433     unlock_user_struct(target_tz, target_tz_addr, 0);
1434 
1435     return 0;
1436 }
1437 
1438 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1439 #include <mqueue.h>
1440 
1441 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1442                                               abi_ulong target_mq_attr_addr)
1443 {
1444     struct target_mq_attr *target_mq_attr;
1445 
1446     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1447                           target_mq_attr_addr, 1))
1448         return -TARGET_EFAULT;
1449 
1450     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1451     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1452     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1453     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1454 
1455     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1456 
1457     return 0;
1458 }
1459 
1460 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1461                                             const struct mq_attr *attr)
1462 {
1463     struct target_mq_attr *target_mq_attr;
1464 
1465     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1466                           target_mq_attr_addr, 0))
1467         return -TARGET_EFAULT;
1468 
1469     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1470     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1471     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1472     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1473 
1474     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1475 
1476     return 0;
1477 }
1478 #endif
1479 
1480 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1481 /* do_select() must return target values and target errnos. */
1482 static abi_long do_select(int n,
1483                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1484                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1485 {
1486     fd_set rfds, wfds, efds;
1487     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1488     struct timeval tv;
1489     struct timespec ts, *ts_ptr;
1490     abi_long ret;
1491 
1492     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1493     if (ret) {
1494         return ret;
1495     }
1496     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1497     if (ret) {
1498         return ret;
1499     }
1500     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1501     if (ret) {
1502         return ret;
1503     }
1504 
1505     if (target_tv_addr) {
1506         if (copy_from_user_timeval(&tv, target_tv_addr))
1507             return -TARGET_EFAULT;
1508         ts.tv_sec = tv.tv_sec;
1509         ts.tv_nsec = tv.tv_usec * 1000;
1510         ts_ptr = &ts;
1511     } else {
1512         ts_ptr = NULL;
1513     }
1514 
1515     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1516                                   ts_ptr, NULL));
1517 
1518     if (!is_error(ret)) {
1519         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1520             return -TARGET_EFAULT;
1521         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1522             return -TARGET_EFAULT;
1523         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1524             return -TARGET_EFAULT;
1525 
1526         if (target_tv_addr) {
1527             tv.tv_sec = ts.tv_sec;
1528             tv.tv_usec = ts.tv_nsec / 1000;
1529             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1530                 return -TARGET_EFAULT;
1531             }
1532         }
1533     }
1534 
1535     return ret;
1536 }
1537 
1538 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1539 static abi_long do_old_select(abi_ulong arg1)
1540 {
1541     struct target_sel_arg_struct *sel;
1542     abi_ulong inp, outp, exp, tvp;
1543     long nsel;
1544 
1545     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1546         return -TARGET_EFAULT;
1547     }
1548 
1549     nsel = tswapal(sel->n);
1550     inp = tswapal(sel->inp);
1551     outp = tswapal(sel->outp);
1552     exp = tswapal(sel->exp);
1553     tvp = tswapal(sel->tvp);
1554 
1555     unlock_user_struct(sel, arg1, 0);
1556 
1557     return do_select(nsel, inp, outp, exp, tvp);
1558 }
1559 #endif
1560 #endif
1561 
1562 static abi_long do_pipe2(int host_pipe[], int flags)
1563 {
1564 #ifdef CONFIG_PIPE2
1565     return pipe2(host_pipe, flags);
1566 #else
1567     return -ENOSYS;
1568 #endif
1569 }
1570 
1571 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1572                         int flags, int is_pipe2)
1573 {
1574     int host_pipe[2];
1575     abi_long ret;
1576     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1577 
1578     if (is_error(ret))
1579         return get_errno(ret);
1580 
1581     /* Several targets have special calling conventions for the original
1582        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1583     if (!is_pipe2) {
1584 #if defined(TARGET_ALPHA)
1585         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1586         return host_pipe[0];
1587 #elif defined(TARGET_MIPS)
1588         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1589         return host_pipe[0];
1590 #elif defined(TARGET_SH4)
1591         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1592         return host_pipe[0];
1593 #elif defined(TARGET_SPARC)
1594         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1595         return host_pipe[0];
1596 #endif
1597     }
1598 
1599     if (put_user_s32(host_pipe[0], pipedes)
1600         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1601         return -TARGET_EFAULT;
1602     return get_errno(ret);
1603 }
1604 
1605 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1606                                               abi_ulong target_addr,
1607                                               socklen_t len)
1608 {
1609     struct target_ip_mreqn *target_smreqn;
1610 
1611     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1612     if (!target_smreqn)
1613         return -TARGET_EFAULT;
1614     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1615     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1616     if (len == sizeof(struct target_ip_mreqn))
1617         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1618     unlock_user(target_smreqn, target_addr, 0);
1619 
1620     return 0;
1621 }
1622 
1623 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1624                                                abi_ulong target_addr,
1625                                                socklen_t len)
1626 {
1627     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1628     sa_family_t sa_family;
1629     struct target_sockaddr *target_saddr;
1630 
1631     if (fd_trans_target_to_host_addr(fd)) {
1632         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1633     }
1634 
1635     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1636     if (!target_saddr)
1637         return -TARGET_EFAULT;
1638 
1639     sa_family = tswap16(target_saddr->sa_family);
1640 
1641     /* Oops. The caller might send a incomplete sun_path; sun_path
1642      * must be terminated by \0 (see the manual page), but
1643      * unfortunately it is quite common to specify sockaddr_un
1644      * length as "strlen(x->sun_path)" while it should be
1645      * "strlen(...) + 1". We'll fix that here if needed.
1646      * Linux kernel has a similar feature.
1647      */
1648 
1649     if (sa_family == AF_UNIX) {
1650         if (len < unix_maxlen && len > 0) {
1651             char *cp = (char*)target_saddr;
1652 
1653             if ( cp[len-1] && !cp[len] )
1654                 len++;
1655         }
1656         if (len > unix_maxlen)
1657             len = unix_maxlen;
1658     }
1659 
1660     memcpy(addr, target_saddr, len);
1661     addr->sa_family = sa_family;
1662     if (sa_family == AF_NETLINK) {
1663         struct sockaddr_nl *nladdr;
1664 
1665         nladdr = (struct sockaddr_nl *)addr;
1666         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1667         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1668     } else if (sa_family == AF_PACKET) {
1669 	struct target_sockaddr_ll *lladdr;
1670 
1671 	lladdr = (struct target_sockaddr_ll *)addr;
1672 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1673 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1674     }
1675     unlock_user(target_saddr, target_addr, 0);
1676 
1677     return 0;
1678 }
1679 
1680 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1681                                                struct sockaddr *addr,
1682                                                socklen_t len)
1683 {
1684     struct target_sockaddr *target_saddr;
1685 
1686     if (len == 0) {
1687         return 0;
1688     }
1689     assert(addr);
1690 
1691     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1692     if (!target_saddr)
1693         return -TARGET_EFAULT;
1694     memcpy(target_saddr, addr, len);
1695     if (len >= offsetof(struct target_sockaddr, sa_family) +
1696         sizeof(target_saddr->sa_family)) {
1697         target_saddr->sa_family = tswap16(addr->sa_family);
1698     }
1699     if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1700         struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1701         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1702         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1703     } else if (addr->sa_family == AF_PACKET) {
1704         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1705         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1706         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1707     } else if (addr->sa_family == AF_INET6 &&
1708                len >= sizeof(struct target_sockaddr_in6)) {
1709         struct target_sockaddr_in6 *target_in6 =
1710                (struct target_sockaddr_in6 *)target_saddr;
1711         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1712     }
1713     unlock_user(target_saddr, target_addr, len);
1714 
1715     return 0;
1716 }
1717 
1718 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1719                                            struct target_msghdr *target_msgh)
1720 {
1721     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1722     abi_long msg_controllen;
1723     abi_ulong target_cmsg_addr;
1724     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1725     socklen_t space = 0;
1726 
1727     msg_controllen = tswapal(target_msgh->msg_controllen);
1728     if (msg_controllen < sizeof (struct target_cmsghdr))
1729         goto the_end;
1730     target_cmsg_addr = tswapal(target_msgh->msg_control);
1731     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1732     target_cmsg_start = target_cmsg;
1733     if (!target_cmsg)
1734         return -TARGET_EFAULT;
1735 
1736     while (cmsg && target_cmsg) {
1737         void *data = CMSG_DATA(cmsg);
1738         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1739 
1740         int len = tswapal(target_cmsg->cmsg_len)
1741             - sizeof(struct target_cmsghdr);
1742 
1743         space += CMSG_SPACE(len);
1744         if (space > msgh->msg_controllen) {
1745             space -= CMSG_SPACE(len);
1746             /* This is a QEMU bug, since we allocated the payload
1747              * area ourselves (unlike overflow in host-to-target
1748              * conversion, which is just the guest giving us a buffer
1749              * that's too small). It can't happen for the payload types
1750              * we currently support; if it becomes an issue in future
1751              * we would need to improve our allocation strategy to
1752              * something more intelligent than "twice the size of the
1753              * target buffer we're reading from".
1754              */
1755             gemu_log("Host cmsg overflow\n");
1756             break;
1757         }
1758 
1759         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1760             cmsg->cmsg_level = SOL_SOCKET;
1761         } else {
1762             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1763         }
1764         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1765         cmsg->cmsg_len = CMSG_LEN(len);
1766 
1767         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1768             int *fd = (int *)data;
1769             int *target_fd = (int *)target_data;
1770             int i, numfds = len / sizeof(int);
1771 
1772             for (i = 0; i < numfds; i++) {
1773                 __get_user(fd[i], target_fd + i);
1774             }
1775         } else if (cmsg->cmsg_level == SOL_SOCKET
1776                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1777             struct ucred *cred = (struct ucred *)data;
1778             struct target_ucred *target_cred =
1779                 (struct target_ucred *)target_data;
1780 
1781             __get_user(cred->pid, &target_cred->pid);
1782             __get_user(cred->uid, &target_cred->uid);
1783             __get_user(cred->gid, &target_cred->gid);
1784         } else {
1785             gemu_log("Unsupported ancillary data: %d/%d\n",
1786                                         cmsg->cmsg_level, cmsg->cmsg_type);
1787             memcpy(data, target_data, len);
1788         }
1789 
1790         cmsg = CMSG_NXTHDR(msgh, cmsg);
1791         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1792                                          target_cmsg_start);
1793     }
1794     unlock_user(target_cmsg, target_cmsg_addr, 0);
1795  the_end:
1796     msgh->msg_controllen = space;
1797     return 0;
1798 }
1799 
1800 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1801                                            struct msghdr *msgh)
1802 {
1803     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1804     abi_long msg_controllen;
1805     abi_ulong target_cmsg_addr;
1806     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1807     socklen_t space = 0;
1808 
1809     msg_controllen = tswapal(target_msgh->msg_controllen);
1810     if (msg_controllen < sizeof (struct target_cmsghdr))
1811         goto the_end;
1812     target_cmsg_addr = tswapal(target_msgh->msg_control);
1813     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1814     target_cmsg_start = target_cmsg;
1815     if (!target_cmsg)
1816         return -TARGET_EFAULT;
1817 
1818     while (cmsg && target_cmsg) {
1819         void *data = CMSG_DATA(cmsg);
1820         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1821 
1822         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1823         int tgt_len, tgt_space;
1824 
1825         /* We never copy a half-header but may copy half-data;
1826          * this is Linux's behaviour in put_cmsg(). Note that
1827          * truncation here is a guest problem (which we report
1828          * to the guest via the CTRUNC bit), unlike truncation
1829          * in target_to_host_cmsg, which is a QEMU bug.
1830          */
1831         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1832             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1833             break;
1834         }
1835 
1836         if (cmsg->cmsg_level == SOL_SOCKET) {
1837             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1838         } else {
1839             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1840         }
1841         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1842 
1843         /* Payload types which need a different size of payload on
1844          * the target must adjust tgt_len here.
1845          */
1846         tgt_len = len;
1847         switch (cmsg->cmsg_level) {
1848         case SOL_SOCKET:
1849             switch (cmsg->cmsg_type) {
1850             case SO_TIMESTAMP:
1851                 tgt_len = sizeof(struct target_timeval);
1852                 break;
1853             default:
1854                 break;
1855             }
1856             break;
1857         default:
1858             break;
1859         }
1860 
1861         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1862             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1863             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1864         }
1865 
1866         /* We must now copy-and-convert len bytes of payload
1867          * into tgt_len bytes of destination space. Bear in mind
1868          * that in both source and destination we may be dealing
1869          * with a truncated value!
1870          */
1871         switch (cmsg->cmsg_level) {
1872         case SOL_SOCKET:
1873             switch (cmsg->cmsg_type) {
1874             case SCM_RIGHTS:
1875             {
1876                 int *fd = (int *)data;
1877                 int *target_fd = (int *)target_data;
1878                 int i, numfds = tgt_len / sizeof(int);
1879 
1880                 for (i = 0; i < numfds; i++) {
1881                     __put_user(fd[i], target_fd + i);
1882                 }
1883                 break;
1884             }
1885             case SO_TIMESTAMP:
1886             {
1887                 struct timeval *tv = (struct timeval *)data;
1888                 struct target_timeval *target_tv =
1889                     (struct target_timeval *)target_data;
1890 
1891                 if (len != sizeof(struct timeval) ||
1892                     tgt_len != sizeof(struct target_timeval)) {
1893                     goto unimplemented;
1894                 }
1895 
1896                 /* copy struct timeval to target */
1897                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1898                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1899                 break;
1900             }
1901             case SCM_CREDENTIALS:
1902             {
1903                 struct ucred *cred = (struct ucred *)data;
1904                 struct target_ucred *target_cred =
1905                     (struct target_ucred *)target_data;
1906 
1907                 __put_user(cred->pid, &target_cred->pid);
1908                 __put_user(cred->uid, &target_cred->uid);
1909                 __put_user(cred->gid, &target_cred->gid);
1910                 break;
1911             }
1912             default:
1913                 goto unimplemented;
1914             }
1915             break;
1916 
1917         case SOL_IP:
1918             switch (cmsg->cmsg_type) {
1919             case IP_TTL:
1920             {
1921                 uint32_t *v = (uint32_t *)data;
1922                 uint32_t *t_int = (uint32_t *)target_data;
1923 
1924                 if (len != sizeof(uint32_t) ||
1925                     tgt_len != sizeof(uint32_t)) {
1926                     goto unimplemented;
1927                 }
1928                 __put_user(*v, t_int);
1929                 break;
1930             }
1931             case IP_RECVERR:
1932             {
1933                 struct errhdr_t {
1934                    struct sock_extended_err ee;
1935                    struct sockaddr_in offender;
1936                 };
1937                 struct errhdr_t *errh = (struct errhdr_t *)data;
1938                 struct errhdr_t *target_errh =
1939                     (struct errhdr_t *)target_data;
1940 
1941                 if (len != sizeof(struct errhdr_t) ||
1942                     tgt_len != sizeof(struct errhdr_t)) {
1943                     goto unimplemented;
1944                 }
1945                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1946                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1947                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1948                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1949                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1950                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1951                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1952                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1953                     (void *) &errh->offender, sizeof(errh->offender));
1954                 break;
1955             }
1956             default:
1957                 goto unimplemented;
1958             }
1959             break;
1960 
1961         case SOL_IPV6:
1962             switch (cmsg->cmsg_type) {
1963             case IPV6_HOPLIMIT:
1964             {
1965                 uint32_t *v = (uint32_t *)data;
1966                 uint32_t *t_int = (uint32_t *)target_data;
1967 
1968                 if (len != sizeof(uint32_t) ||
1969                     tgt_len != sizeof(uint32_t)) {
1970                     goto unimplemented;
1971                 }
1972                 __put_user(*v, t_int);
1973                 break;
1974             }
1975             case IPV6_RECVERR:
1976             {
1977                 struct errhdr6_t {
1978                    struct sock_extended_err ee;
1979                    struct sockaddr_in6 offender;
1980                 };
1981                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1982                 struct errhdr6_t *target_errh =
1983                     (struct errhdr6_t *)target_data;
1984 
1985                 if (len != sizeof(struct errhdr6_t) ||
1986                     tgt_len != sizeof(struct errhdr6_t)) {
1987                     goto unimplemented;
1988                 }
1989                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1990                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1991                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1992                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1993                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1994                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1995                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1996                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1997                     (void *) &errh->offender, sizeof(errh->offender));
1998                 break;
1999             }
2000             default:
2001                 goto unimplemented;
2002             }
2003             break;
2004 
2005         default:
2006         unimplemented:
2007             gemu_log("Unsupported ancillary data: %d/%d\n",
2008                                         cmsg->cmsg_level, cmsg->cmsg_type);
2009             memcpy(target_data, data, MIN(len, tgt_len));
2010             if (tgt_len > len) {
2011                 memset(target_data + len, 0, tgt_len - len);
2012             }
2013         }
2014 
2015         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2016         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2017         if (msg_controllen < tgt_space) {
2018             tgt_space = msg_controllen;
2019         }
2020         msg_controllen -= tgt_space;
2021         space += tgt_space;
2022         cmsg = CMSG_NXTHDR(msgh, cmsg);
2023         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2024                                          target_cmsg_start);
2025     }
2026     unlock_user(target_cmsg, target_cmsg_addr, space);
2027  the_end:
2028     target_msgh->msg_controllen = tswapal(space);
2029     return 0;
2030 }
2031 
2032 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2033 {
2034     nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2035     nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2036     nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2037     nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2038     nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2039 }
2040 
2041 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2042                                               size_t len,
2043                                               abi_long (*host_to_target_nlmsg)
2044                                                        (struct nlmsghdr *))
2045 {
2046     uint32_t nlmsg_len;
2047     abi_long ret;
2048 
2049     while (len > sizeof(struct nlmsghdr)) {
2050 
2051         nlmsg_len = nlh->nlmsg_len;
2052         if (nlmsg_len < sizeof(struct nlmsghdr) ||
2053             nlmsg_len > len) {
2054             break;
2055         }
2056 
2057         switch (nlh->nlmsg_type) {
2058         case NLMSG_DONE:
2059             tswap_nlmsghdr(nlh);
2060             return 0;
2061         case NLMSG_NOOP:
2062             break;
2063         case NLMSG_ERROR:
2064         {
2065             struct nlmsgerr *e = NLMSG_DATA(nlh);
2066             e->error = tswap32(e->error);
2067             tswap_nlmsghdr(&e->msg);
2068             tswap_nlmsghdr(nlh);
2069             return 0;
2070         }
2071         default:
2072             ret = host_to_target_nlmsg(nlh);
2073             if (ret < 0) {
2074                 tswap_nlmsghdr(nlh);
2075                 return ret;
2076             }
2077             break;
2078         }
2079         tswap_nlmsghdr(nlh);
2080         len -= NLMSG_ALIGN(nlmsg_len);
2081         nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2082     }
2083     return 0;
2084 }
2085 
2086 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2087                                               size_t len,
2088                                               abi_long (*target_to_host_nlmsg)
2089                                                        (struct nlmsghdr *))
2090 {
2091     int ret;
2092 
2093     while (len > sizeof(struct nlmsghdr)) {
2094         if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2095             tswap32(nlh->nlmsg_len) > len) {
2096             break;
2097         }
2098         tswap_nlmsghdr(nlh);
2099         switch (nlh->nlmsg_type) {
2100         case NLMSG_DONE:
2101             return 0;
2102         case NLMSG_NOOP:
2103             break;
2104         case NLMSG_ERROR:
2105         {
2106             struct nlmsgerr *e = NLMSG_DATA(nlh);
2107             e->error = tswap32(e->error);
2108             tswap_nlmsghdr(&e->msg);
2109             return 0;
2110         }
2111         default:
2112             ret = target_to_host_nlmsg(nlh);
2113             if (ret < 0) {
2114                 return ret;
2115             }
2116         }
2117         len -= NLMSG_ALIGN(nlh->nlmsg_len);
2118         nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2119     }
2120     return 0;
2121 }
2122 
2123 #ifdef CONFIG_RTNETLINK
2124 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2125                                                size_t len, void *context,
2126                                                abi_long (*host_to_target_nlattr)
2127                                                         (struct nlattr *,
2128                                                          void *context))
2129 {
2130     unsigned short nla_len;
2131     abi_long ret;
2132 
2133     while (len > sizeof(struct nlattr)) {
2134         nla_len = nlattr->nla_len;
2135         if (nla_len < sizeof(struct nlattr) ||
2136             nla_len > len) {
2137             break;
2138         }
2139         ret = host_to_target_nlattr(nlattr, context);
2140         nlattr->nla_len = tswap16(nlattr->nla_len);
2141         nlattr->nla_type = tswap16(nlattr->nla_type);
2142         if (ret < 0) {
2143             return ret;
2144         }
2145         len -= NLA_ALIGN(nla_len);
2146         nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2147     }
2148     return 0;
2149 }
2150 
2151 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2152                                                size_t len,
2153                                                abi_long (*host_to_target_rtattr)
2154                                                         (struct rtattr *))
2155 {
2156     unsigned short rta_len;
2157     abi_long ret;
2158 
2159     while (len > sizeof(struct rtattr)) {
2160         rta_len = rtattr->rta_len;
2161         if (rta_len < sizeof(struct rtattr) ||
2162             rta_len > len) {
2163             break;
2164         }
2165         ret = host_to_target_rtattr(rtattr);
2166         rtattr->rta_len = tswap16(rtattr->rta_len);
2167         rtattr->rta_type = tswap16(rtattr->rta_type);
2168         if (ret < 0) {
2169             return ret;
2170         }
2171         len -= RTA_ALIGN(rta_len);
2172         rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2173     }
2174     return 0;
2175 }
2176 
2177 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2178 
2179 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2180                                                   void *context)
2181 {
2182     uint16_t *u16;
2183     uint32_t *u32;
2184     uint64_t *u64;
2185 
2186     switch (nlattr->nla_type) {
2187     /* no data */
2188     case QEMU_IFLA_BR_FDB_FLUSH:
2189         break;
2190     /* binary */
2191     case QEMU_IFLA_BR_GROUP_ADDR:
2192         break;
2193     /* uint8_t */
2194     case QEMU_IFLA_BR_VLAN_FILTERING:
2195     case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2196     case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2197     case QEMU_IFLA_BR_MCAST_ROUTER:
2198     case QEMU_IFLA_BR_MCAST_SNOOPING:
2199     case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2200     case QEMU_IFLA_BR_MCAST_QUERIER:
2201     case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2202     case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2203     case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2204     case QEMU_IFLA_BR_VLAN_STATS_ENABLED:
2205     case QEMU_IFLA_BR_MCAST_STATS_ENABLED:
2206     case QEMU_IFLA_BR_MCAST_IGMP_VERSION:
2207     case QEMU_IFLA_BR_MCAST_MLD_VERSION:
2208         break;
2209     /* uint16_t */
2210     case QEMU_IFLA_BR_PRIORITY:
2211     case QEMU_IFLA_BR_VLAN_PROTOCOL:
2212     case QEMU_IFLA_BR_GROUP_FWD_MASK:
2213     case QEMU_IFLA_BR_ROOT_PORT:
2214     case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2215         u16 = NLA_DATA(nlattr);
2216         *u16 = tswap16(*u16);
2217         break;
2218     /* uint32_t */
2219     case QEMU_IFLA_BR_FORWARD_DELAY:
2220     case QEMU_IFLA_BR_HELLO_TIME:
2221     case QEMU_IFLA_BR_MAX_AGE:
2222     case QEMU_IFLA_BR_AGEING_TIME:
2223     case QEMU_IFLA_BR_STP_STATE:
2224     case QEMU_IFLA_BR_ROOT_PATH_COST:
2225     case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2226     case QEMU_IFLA_BR_MCAST_HASH_MAX:
2227     case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2228     case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2229         u32 = NLA_DATA(nlattr);
2230         *u32 = tswap32(*u32);
2231         break;
2232     /* uint64_t */
2233     case QEMU_IFLA_BR_HELLO_TIMER:
2234     case QEMU_IFLA_BR_TCN_TIMER:
2235     case QEMU_IFLA_BR_GC_TIMER:
2236     case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2237     case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2238     case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2239     case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2240     case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2241     case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2242     case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2243         u64 = NLA_DATA(nlattr);
2244         *u64 = tswap64(*u64);
2245         break;
2246     /* ifla_bridge_id: uin8_t[] */
2247     case QEMU_IFLA_BR_ROOT_ID:
2248     case QEMU_IFLA_BR_BRIDGE_ID:
2249         break;
2250     default:
2251         gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2252         break;
2253     }
2254     return 0;
2255 }
2256 
2257 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2258                                                         void *context)
2259 {
2260     uint16_t *u16;
2261     uint32_t *u32;
2262     uint64_t *u64;
2263 
2264     switch (nlattr->nla_type) {
2265     /* uint8_t */
2266     case QEMU_IFLA_BRPORT_STATE:
2267     case QEMU_IFLA_BRPORT_MODE:
2268     case QEMU_IFLA_BRPORT_GUARD:
2269     case QEMU_IFLA_BRPORT_PROTECT:
2270     case QEMU_IFLA_BRPORT_FAST_LEAVE:
2271     case QEMU_IFLA_BRPORT_LEARNING:
2272     case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2273     case QEMU_IFLA_BRPORT_PROXYARP:
2274     case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2275     case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2276     case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2277     case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2278     case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2279     case QEMU_IFLA_BRPORT_MCAST_FLOOD:
2280     case QEMU_IFLA_BRPORT_MCAST_TO_UCAST:
2281     case QEMU_IFLA_BRPORT_VLAN_TUNNEL:
2282     case QEMU_IFLA_BRPORT_BCAST_FLOOD:
2283     case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS:
2284         break;
2285     /* uint16_t */
2286     case QEMU_IFLA_BRPORT_PRIORITY:
2287     case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2288     case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2289     case QEMU_IFLA_BRPORT_ID:
2290     case QEMU_IFLA_BRPORT_NO:
2291     case QEMU_IFLA_BRPORT_GROUP_FWD_MASK:
2292         u16 = NLA_DATA(nlattr);
2293         *u16 = tswap16(*u16);
2294         break;
2295     /* uin32_t */
2296     case QEMU_IFLA_BRPORT_COST:
2297         u32 = NLA_DATA(nlattr);
2298         *u32 = tswap32(*u32);
2299         break;
2300     /* uint64_t */
2301     case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2302     case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2303     case QEMU_IFLA_BRPORT_HOLD_TIMER:
2304         u64 = NLA_DATA(nlattr);
2305         *u64 = tswap64(*u64);
2306         break;
2307     /* ifla_bridge_id: uint8_t[] */
2308     case QEMU_IFLA_BRPORT_ROOT_ID:
2309     case QEMU_IFLA_BRPORT_BRIDGE_ID:
2310         break;
2311     default:
2312         gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2313         break;
2314     }
2315     return 0;
2316 }
2317 
2318 struct linkinfo_context {
2319     int len;
2320     char *name;
2321     int slave_len;
2322     char *slave_name;
2323 };
2324 
2325 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2326                                                     void *context)
2327 {
2328     struct linkinfo_context *li_context = context;
2329 
2330     switch (nlattr->nla_type) {
2331     /* string */
2332     case QEMU_IFLA_INFO_KIND:
2333         li_context->name = NLA_DATA(nlattr);
2334         li_context->len = nlattr->nla_len - NLA_HDRLEN;
2335         break;
2336     case QEMU_IFLA_INFO_SLAVE_KIND:
2337         li_context->slave_name = NLA_DATA(nlattr);
2338         li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2339         break;
2340     /* stats */
2341     case QEMU_IFLA_INFO_XSTATS:
2342         /* FIXME: only used by CAN */
2343         break;
2344     /* nested */
2345     case QEMU_IFLA_INFO_DATA:
2346         if (strncmp(li_context->name, "bridge",
2347                     li_context->len) == 0) {
2348             return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2349                                                   nlattr->nla_len,
2350                                                   NULL,
2351                                              host_to_target_data_bridge_nlattr);
2352         } else {
2353             gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2354         }
2355         break;
2356     case QEMU_IFLA_INFO_SLAVE_DATA:
2357         if (strncmp(li_context->slave_name, "bridge",
2358                     li_context->slave_len) == 0) {
2359             return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2360                                                   nlattr->nla_len,
2361                                                   NULL,
2362                                        host_to_target_slave_data_bridge_nlattr);
2363         } else {
2364             gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2365                      li_context->slave_name);
2366         }
2367         break;
2368     default:
2369         gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2370         break;
2371     }
2372 
2373     return 0;
2374 }
2375 
2376 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2377                                                 void *context)
2378 {
2379     uint32_t *u32;
2380     int i;
2381 
2382     switch (nlattr->nla_type) {
2383     case QEMU_IFLA_INET_CONF:
2384         u32 = NLA_DATA(nlattr);
2385         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2386              i++) {
2387             u32[i] = tswap32(u32[i]);
2388         }
2389         break;
2390     default:
2391         gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2392     }
2393     return 0;
2394 }
2395 
2396 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2397                                                 void *context)
2398 {
2399     uint32_t *u32;
2400     uint64_t *u64;
2401     struct ifla_cacheinfo *ci;
2402     int i;
2403 
2404     switch (nlattr->nla_type) {
2405     /* binaries */
2406     case QEMU_IFLA_INET6_TOKEN:
2407         break;
2408     /* uint8_t */
2409     case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2410         break;
2411     /* uint32_t */
2412     case QEMU_IFLA_INET6_FLAGS:
2413         u32 = NLA_DATA(nlattr);
2414         *u32 = tswap32(*u32);
2415         break;
2416     /* uint32_t[] */
2417     case QEMU_IFLA_INET6_CONF:
2418         u32 = NLA_DATA(nlattr);
2419         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2420              i++) {
2421             u32[i] = tswap32(u32[i]);
2422         }
2423         break;
2424     /* ifla_cacheinfo */
2425     case QEMU_IFLA_INET6_CACHEINFO:
2426         ci = NLA_DATA(nlattr);
2427         ci->max_reasm_len = tswap32(ci->max_reasm_len);
2428         ci->tstamp = tswap32(ci->tstamp);
2429         ci->reachable_time = tswap32(ci->reachable_time);
2430         ci->retrans_time = tswap32(ci->retrans_time);
2431         break;
2432     /* uint64_t[] */
2433     case QEMU_IFLA_INET6_STATS:
2434     case QEMU_IFLA_INET6_ICMP6STATS:
2435         u64 = NLA_DATA(nlattr);
2436         for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2437              i++) {
2438             u64[i] = tswap64(u64[i]);
2439         }
2440         break;
2441     default:
2442         gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2443     }
2444     return 0;
2445 }
2446 
2447 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2448                                                     void *context)
2449 {
2450     switch (nlattr->nla_type) {
2451     case AF_INET:
2452         return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2453                                               NULL,
2454                                              host_to_target_data_inet_nlattr);
2455     case AF_INET6:
2456         return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2457                                               NULL,
2458                                              host_to_target_data_inet6_nlattr);
2459     default:
2460         gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2461         break;
2462     }
2463     return 0;
2464 }
2465 
2466 static abi_long host_to_target_data_xdp_nlattr(struct nlattr *nlattr,
2467                                                void *context)
2468 {
2469     uint32_t *u32;
2470 
2471     switch (nlattr->nla_type) {
2472     /* uint8_t */
2473     case QEMU_IFLA_XDP_ATTACHED:
2474         break;
2475     /* uint32_t */
2476     case QEMU_IFLA_XDP_PROG_ID:
2477         u32 = NLA_DATA(nlattr);
2478         *u32 = tswap32(*u32);
2479         break;
2480     default:
2481         gemu_log("Unknown host XDP type: %d\n", nlattr->nla_type);
2482         break;
2483     }
2484     return 0;
2485 }
2486 
2487 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2488 {
2489     uint32_t *u32;
2490     struct rtnl_link_stats *st;
2491     struct rtnl_link_stats64 *st64;
2492     struct rtnl_link_ifmap *map;
2493     struct linkinfo_context li_context;
2494 
2495     switch (rtattr->rta_type) {
2496     /* binary stream */
2497     case QEMU_IFLA_ADDRESS:
2498     case QEMU_IFLA_BROADCAST:
2499     /* string */
2500     case QEMU_IFLA_IFNAME:
2501     case QEMU_IFLA_QDISC:
2502         break;
2503     /* uin8_t */
2504     case QEMU_IFLA_OPERSTATE:
2505     case QEMU_IFLA_LINKMODE:
2506     case QEMU_IFLA_CARRIER:
2507     case QEMU_IFLA_PROTO_DOWN:
2508         break;
2509     /* uint32_t */
2510     case QEMU_IFLA_MTU:
2511     case QEMU_IFLA_LINK:
2512     case QEMU_IFLA_WEIGHT:
2513     case QEMU_IFLA_TXQLEN:
2514     case QEMU_IFLA_CARRIER_CHANGES:
2515     case QEMU_IFLA_NUM_RX_QUEUES:
2516     case QEMU_IFLA_NUM_TX_QUEUES:
2517     case QEMU_IFLA_PROMISCUITY:
2518     case QEMU_IFLA_EXT_MASK:
2519     case QEMU_IFLA_LINK_NETNSID:
2520     case QEMU_IFLA_GROUP:
2521     case QEMU_IFLA_MASTER:
2522     case QEMU_IFLA_NUM_VF:
2523     case QEMU_IFLA_GSO_MAX_SEGS:
2524     case QEMU_IFLA_GSO_MAX_SIZE:
2525     case QEMU_IFLA_CARRIER_UP_COUNT:
2526     case QEMU_IFLA_CARRIER_DOWN_COUNT:
2527         u32 = RTA_DATA(rtattr);
2528         *u32 = tswap32(*u32);
2529         break;
2530     /* struct rtnl_link_stats */
2531     case QEMU_IFLA_STATS:
2532         st = RTA_DATA(rtattr);
2533         st->rx_packets = tswap32(st->rx_packets);
2534         st->tx_packets = tswap32(st->tx_packets);
2535         st->rx_bytes = tswap32(st->rx_bytes);
2536         st->tx_bytes = tswap32(st->tx_bytes);
2537         st->rx_errors = tswap32(st->rx_errors);
2538         st->tx_errors = tswap32(st->tx_errors);
2539         st->rx_dropped = tswap32(st->rx_dropped);
2540         st->tx_dropped = tswap32(st->tx_dropped);
2541         st->multicast = tswap32(st->multicast);
2542         st->collisions = tswap32(st->collisions);
2543 
2544         /* detailed rx_errors: */
2545         st->rx_length_errors = tswap32(st->rx_length_errors);
2546         st->rx_over_errors = tswap32(st->rx_over_errors);
2547         st->rx_crc_errors = tswap32(st->rx_crc_errors);
2548         st->rx_frame_errors = tswap32(st->rx_frame_errors);
2549         st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2550         st->rx_missed_errors = tswap32(st->rx_missed_errors);
2551 
2552         /* detailed tx_errors */
2553         st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2554         st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2555         st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2556         st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2557         st->tx_window_errors = tswap32(st->tx_window_errors);
2558 
2559         /* for cslip etc */
2560         st->rx_compressed = tswap32(st->rx_compressed);
2561         st->tx_compressed = tswap32(st->tx_compressed);
2562         break;
2563     /* struct rtnl_link_stats64 */
2564     case QEMU_IFLA_STATS64:
2565         st64 = RTA_DATA(rtattr);
2566         st64->rx_packets = tswap64(st64->rx_packets);
2567         st64->tx_packets = tswap64(st64->tx_packets);
2568         st64->rx_bytes = tswap64(st64->rx_bytes);
2569         st64->tx_bytes = tswap64(st64->tx_bytes);
2570         st64->rx_errors = tswap64(st64->rx_errors);
2571         st64->tx_errors = tswap64(st64->tx_errors);
2572         st64->rx_dropped = tswap64(st64->rx_dropped);
2573         st64->tx_dropped = tswap64(st64->tx_dropped);
2574         st64->multicast = tswap64(st64->multicast);
2575         st64->collisions = tswap64(st64->collisions);
2576 
2577         /* detailed rx_errors: */
2578         st64->rx_length_errors = tswap64(st64->rx_length_errors);
2579         st64->rx_over_errors = tswap64(st64->rx_over_errors);
2580         st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2581         st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2582         st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2583         st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2584 
2585         /* detailed tx_errors */
2586         st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2587         st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2588         st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2589         st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2590         st64->tx_window_errors = tswap64(st64->tx_window_errors);
2591 
2592         /* for cslip etc */
2593         st64->rx_compressed = tswap64(st64->rx_compressed);
2594         st64->tx_compressed = tswap64(st64->tx_compressed);
2595         break;
2596     /* struct rtnl_link_ifmap */
2597     case QEMU_IFLA_MAP:
2598         map = RTA_DATA(rtattr);
2599         map->mem_start = tswap64(map->mem_start);
2600         map->mem_end = tswap64(map->mem_end);
2601         map->base_addr = tswap64(map->base_addr);
2602         map->irq = tswap16(map->irq);
2603         break;
2604     /* nested */
2605     case QEMU_IFLA_LINKINFO:
2606         memset(&li_context, 0, sizeof(li_context));
2607         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2608                                               &li_context,
2609                                            host_to_target_data_linkinfo_nlattr);
2610     case QEMU_IFLA_AF_SPEC:
2611         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2612                                               NULL,
2613                                              host_to_target_data_spec_nlattr);
2614     case QEMU_IFLA_XDP:
2615         return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2616                                               NULL,
2617                                                 host_to_target_data_xdp_nlattr);
2618     default:
2619         gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2620         break;
2621     }
2622     return 0;
2623 }
2624 
2625 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2626 {
2627     uint32_t *u32;
2628     struct ifa_cacheinfo *ci;
2629 
2630     switch (rtattr->rta_type) {
2631     /* binary: depends on family type */
2632     case IFA_ADDRESS:
2633     case IFA_LOCAL:
2634         break;
2635     /* string */
2636     case IFA_LABEL:
2637         break;
2638     /* u32 */
2639     case IFA_FLAGS:
2640     case IFA_BROADCAST:
2641         u32 = RTA_DATA(rtattr);
2642         *u32 = tswap32(*u32);
2643         break;
2644     /* struct ifa_cacheinfo */
2645     case IFA_CACHEINFO:
2646         ci = RTA_DATA(rtattr);
2647         ci->ifa_prefered = tswap32(ci->ifa_prefered);
2648         ci->ifa_valid = tswap32(ci->ifa_valid);
2649         ci->cstamp = tswap32(ci->cstamp);
2650         ci->tstamp = tswap32(ci->tstamp);
2651         break;
2652     default:
2653         gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2654         break;
2655     }
2656     return 0;
2657 }
2658 
2659 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2660 {
2661     uint32_t *u32;
2662     switch (rtattr->rta_type) {
2663     /* binary: depends on family type */
2664     case RTA_GATEWAY:
2665     case RTA_DST:
2666     case RTA_PREFSRC:
2667         break;
2668     /* u32 */
2669     case RTA_PRIORITY:
2670     case RTA_TABLE:
2671     case RTA_OIF:
2672         u32 = RTA_DATA(rtattr);
2673         *u32 = tswap32(*u32);
2674         break;
2675     default:
2676         gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2677         break;
2678     }
2679     return 0;
2680 }
2681 
2682 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2683                                          uint32_t rtattr_len)
2684 {
2685     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2686                                           host_to_target_data_link_rtattr);
2687 }
2688 
2689 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2690                                          uint32_t rtattr_len)
2691 {
2692     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2693                                           host_to_target_data_addr_rtattr);
2694 }
2695 
2696 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2697                                          uint32_t rtattr_len)
2698 {
2699     return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2700                                           host_to_target_data_route_rtattr);
2701 }
2702 
2703 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2704 {
2705     uint32_t nlmsg_len;
2706     struct ifinfomsg *ifi;
2707     struct ifaddrmsg *ifa;
2708     struct rtmsg *rtm;
2709 
2710     nlmsg_len = nlh->nlmsg_len;
2711     switch (nlh->nlmsg_type) {
2712     case RTM_NEWLINK:
2713     case RTM_DELLINK:
2714     case RTM_GETLINK:
2715         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2716             ifi = NLMSG_DATA(nlh);
2717             ifi->ifi_type = tswap16(ifi->ifi_type);
2718             ifi->ifi_index = tswap32(ifi->ifi_index);
2719             ifi->ifi_flags = tswap32(ifi->ifi_flags);
2720             ifi->ifi_change = tswap32(ifi->ifi_change);
2721             host_to_target_link_rtattr(IFLA_RTA(ifi),
2722                                        nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2723         }
2724         break;
2725     case RTM_NEWADDR:
2726     case RTM_DELADDR:
2727     case RTM_GETADDR:
2728         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2729             ifa = NLMSG_DATA(nlh);
2730             ifa->ifa_index = tswap32(ifa->ifa_index);
2731             host_to_target_addr_rtattr(IFA_RTA(ifa),
2732                                        nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2733         }
2734         break;
2735     case RTM_NEWROUTE:
2736     case RTM_DELROUTE:
2737     case RTM_GETROUTE:
2738         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2739             rtm = NLMSG_DATA(nlh);
2740             rtm->rtm_flags = tswap32(rtm->rtm_flags);
2741             host_to_target_route_rtattr(RTM_RTA(rtm),
2742                                         nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2743         }
2744         break;
2745     default:
2746         return -TARGET_EINVAL;
2747     }
2748     return 0;
2749 }
2750 
2751 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2752                                                   size_t len)
2753 {
2754     return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2755 }
2756 
2757 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2758                                                size_t len,
2759                                                abi_long (*target_to_host_rtattr)
2760                                                         (struct rtattr *))
2761 {
2762     abi_long ret;
2763 
2764     while (len >= sizeof(struct rtattr)) {
2765         if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2766             tswap16(rtattr->rta_len) > len) {
2767             break;
2768         }
2769         rtattr->rta_len = tswap16(rtattr->rta_len);
2770         rtattr->rta_type = tswap16(rtattr->rta_type);
2771         ret = target_to_host_rtattr(rtattr);
2772         if (ret < 0) {
2773             return ret;
2774         }
2775         len -= RTA_ALIGN(rtattr->rta_len);
2776         rtattr = (struct rtattr *)(((char *)rtattr) +
2777                  RTA_ALIGN(rtattr->rta_len));
2778     }
2779     return 0;
2780 }
2781 
2782 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2783 {
2784     switch (rtattr->rta_type) {
2785     default:
2786         gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2787         break;
2788     }
2789     return 0;
2790 }
2791 
2792 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2793 {
2794     switch (rtattr->rta_type) {
2795     /* binary: depends on family type */
2796     case IFA_LOCAL:
2797     case IFA_ADDRESS:
2798         break;
2799     default:
2800         gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2801         break;
2802     }
2803     return 0;
2804 }
2805 
2806 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2807 {
2808     uint32_t *u32;
2809     switch (rtattr->rta_type) {
2810     /* binary: depends on family type */
2811     case RTA_DST:
2812     case RTA_SRC:
2813     case RTA_GATEWAY:
2814         break;
2815     /* u32 */
2816     case RTA_PRIORITY:
2817     case RTA_OIF:
2818         u32 = RTA_DATA(rtattr);
2819         *u32 = tswap32(*u32);
2820         break;
2821     default:
2822         gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2823         break;
2824     }
2825     return 0;
2826 }
2827 
2828 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2829                                        uint32_t rtattr_len)
2830 {
2831     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2832                                    target_to_host_data_link_rtattr);
2833 }
2834 
2835 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2836                                      uint32_t rtattr_len)
2837 {
2838     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2839                                    target_to_host_data_addr_rtattr);
2840 }
2841 
2842 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2843                                      uint32_t rtattr_len)
2844 {
2845     target_to_host_for_each_rtattr(rtattr, rtattr_len,
2846                                    target_to_host_data_route_rtattr);
2847 }
2848 
2849 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2850 {
2851     struct ifinfomsg *ifi;
2852     struct ifaddrmsg *ifa;
2853     struct rtmsg *rtm;
2854 
2855     switch (nlh->nlmsg_type) {
2856     case RTM_GETLINK:
2857         break;
2858     case RTM_NEWLINK:
2859     case RTM_DELLINK:
2860         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2861             ifi = NLMSG_DATA(nlh);
2862             ifi->ifi_type = tswap16(ifi->ifi_type);
2863             ifi->ifi_index = tswap32(ifi->ifi_index);
2864             ifi->ifi_flags = tswap32(ifi->ifi_flags);
2865             ifi->ifi_change = tswap32(ifi->ifi_change);
2866             target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2867                                        NLMSG_LENGTH(sizeof(*ifi)));
2868         }
2869         break;
2870     case RTM_GETADDR:
2871     case RTM_NEWADDR:
2872     case RTM_DELADDR:
2873         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2874             ifa = NLMSG_DATA(nlh);
2875             ifa->ifa_index = tswap32(ifa->ifa_index);
2876             target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2877                                        NLMSG_LENGTH(sizeof(*ifa)));
2878         }
2879         break;
2880     case RTM_GETROUTE:
2881         break;
2882     case RTM_NEWROUTE:
2883     case RTM_DELROUTE:
2884         if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2885             rtm = NLMSG_DATA(nlh);
2886             rtm->rtm_flags = tswap32(rtm->rtm_flags);
2887             target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2888                                         NLMSG_LENGTH(sizeof(*rtm)));
2889         }
2890         break;
2891     default:
2892         return -TARGET_EOPNOTSUPP;
2893     }
2894     return 0;
2895 }
2896 
2897 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2898 {
2899     return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2900 }
2901 #endif /* CONFIG_RTNETLINK */
2902 
2903 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2904 {
2905     switch (nlh->nlmsg_type) {
2906     default:
2907         gemu_log("Unknown host audit message type %d\n",
2908                  nlh->nlmsg_type);
2909         return -TARGET_EINVAL;
2910     }
2911     return 0;
2912 }
2913 
2914 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2915                                                   size_t len)
2916 {
2917     return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2918 }
2919 
2920 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2921 {
2922     switch (nlh->nlmsg_type) {
2923     case AUDIT_USER:
2924     case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2925     case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2926         break;
2927     default:
2928         gemu_log("Unknown target audit message type %d\n",
2929                  nlh->nlmsg_type);
2930         return -TARGET_EINVAL;
2931     }
2932 
2933     return 0;
2934 }
2935 
2936 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2937 {
2938     return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2939 }
2940 
2941 /* do_setsockopt() Must return target values and target errnos. */
2942 static abi_long do_setsockopt(int sockfd, int level, int optname,
2943                               abi_ulong optval_addr, socklen_t optlen)
2944 {
2945     abi_long ret;
2946     int val;
2947     struct ip_mreqn *ip_mreq;
2948     struct ip_mreq_source *ip_mreq_source;
2949 
2950     switch(level) {
2951     case SOL_TCP:
2952         /* TCP options all take an 'int' value.  */
2953         if (optlen < sizeof(uint32_t))
2954             return -TARGET_EINVAL;
2955 
2956         if (get_user_u32(val, optval_addr))
2957             return -TARGET_EFAULT;
2958         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2959         break;
2960     case SOL_IP:
2961         switch(optname) {
2962         case IP_TOS:
2963         case IP_TTL:
2964         case IP_HDRINCL:
2965         case IP_ROUTER_ALERT:
2966         case IP_RECVOPTS:
2967         case IP_RETOPTS:
2968         case IP_PKTINFO:
2969         case IP_MTU_DISCOVER:
2970         case IP_RECVERR:
2971         case IP_RECVTTL:
2972         case IP_RECVTOS:
2973 #ifdef IP_FREEBIND
2974         case IP_FREEBIND:
2975 #endif
2976         case IP_MULTICAST_TTL:
2977         case IP_MULTICAST_LOOP:
2978             val = 0;
2979             if (optlen >= sizeof(uint32_t)) {
2980                 if (get_user_u32(val, optval_addr))
2981                     return -TARGET_EFAULT;
2982             } else if (optlen >= 1) {
2983                 if (get_user_u8(val, optval_addr))
2984                     return -TARGET_EFAULT;
2985             }
2986             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2987             break;
2988         case IP_ADD_MEMBERSHIP:
2989         case IP_DROP_MEMBERSHIP:
2990             if (optlen < sizeof (struct target_ip_mreq) ||
2991                 optlen > sizeof (struct target_ip_mreqn))
2992                 return -TARGET_EINVAL;
2993 
2994             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2995             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2996             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2997             break;
2998 
2999         case IP_BLOCK_SOURCE:
3000         case IP_UNBLOCK_SOURCE:
3001         case IP_ADD_SOURCE_MEMBERSHIP:
3002         case IP_DROP_SOURCE_MEMBERSHIP:
3003             if (optlen != sizeof (struct target_ip_mreq_source))
3004                 return -TARGET_EINVAL;
3005 
3006             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3007             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
3008             unlock_user (ip_mreq_source, optval_addr, 0);
3009             break;
3010 
3011         default:
3012             goto unimplemented;
3013         }
3014         break;
3015     case SOL_IPV6:
3016         switch (optname) {
3017         case IPV6_MTU_DISCOVER:
3018         case IPV6_MTU:
3019         case IPV6_V6ONLY:
3020         case IPV6_RECVPKTINFO:
3021         case IPV6_UNICAST_HOPS:
3022         case IPV6_MULTICAST_HOPS:
3023         case IPV6_MULTICAST_LOOP:
3024         case IPV6_RECVERR:
3025         case IPV6_RECVHOPLIMIT:
3026         case IPV6_2292HOPLIMIT:
3027         case IPV6_CHECKSUM:
3028             val = 0;
3029             if (optlen < sizeof(uint32_t)) {
3030                 return -TARGET_EINVAL;
3031             }
3032             if (get_user_u32(val, optval_addr)) {
3033                 return -TARGET_EFAULT;
3034             }
3035             ret = get_errno(setsockopt(sockfd, level, optname,
3036                                        &val, sizeof(val)));
3037             break;
3038         case IPV6_PKTINFO:
3039         {
3040             struct in6_pktinfo pki;
3041 
3042             if (optlen < sizeof(pki)) {
3043                 return -TARGET_EINVAL;
3044             }
3045 
3046             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
3047                 return -TARGET_EFAULT;
3048             }
3049 
3050             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
3051 
3052             ret = get_errno(setsockopt(sockfd, level, optname,
3053                                        &pki, sizeof(pki)));
3054             break;
3055         }
3056         default:
3057             goto unimplemented;
3058         }
3059         break;
3060     case SOL_ICMPV6:
3061         switch (optname) {
3062         case ICMPV6_FILTER:
3063         {
3064             struct icmp6_filter icmp6f;
3065 
3066             if (optlen > sizeof(icmp6f)) {
3067                 optlen = sizeof(icmp6f);
3068             }
3069 
3070             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3071                 return -TARGET_EFAULT;
3072             }
3073 
3074             for (val = 0; val < 8; val++) {
3075                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3076             }
3077 
3078             ret = get_errno(setsockopt(sockfd, level, optname,
3079                                        &icmp6f, optlen));
3080             break;
3081         }
3082         default:
3083             goto unimplemented;
3084         }
3085         break;
3086     case SOL_RAW:
3087         switch (optname) {
3088         case ICMP_FILTER:
3089         case IPV6_CHECKSUM:
3090             /* those take an u32 value */
3091             if (optlen < sizeof(uint32_t)) {
3092                 return -TARGET_EINVAL;
3093             }
3094 
3095             if (get_user_u32(val, optval_addr)) {
3096                 return -TARGET_EFAULT;
3097             }
3098             ret = get_errno(setsockopt(sockfd, level, optname,
3099                                        &val, sizeof(val)));
3100             break;
3101 
3102         default:
3103             goto unimplemented;
3104         }
3105         break;
3106     case TARGET_SOL_SOCKET:
3107         switch (optname) {
3108         case TARGET_SO_RCVTIMEO:
3109         {
3110                 struct timeval tv;
3111 
3112                 optname = SO_RCVTIMEO;
3113 
3114 set_timeout:
3115                 if (optlen != sizeof(struct target_timeval)) {
3116                     return -TARGET_EINVAL;
3117                 }
3118 
3119                 if (copy_from_user_timeval(&tv, optval_addr)) {
3120                     return -TARGET_EFAULT;
3121                 }
3122 
3123                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3124                                 &tv, sizeof(tv)));
3125                 return ret;
3126         }
3127         case TARGET_SO_SNDTIMEO:
3128                 optname = SO_SNDTIMEO;
3129                 goto set_timeout;
3130         case TARGET_SO_ATTACH_FILTER:
3131         {
3132                 struct target_sock_fprog *tfprog;
3133                 struct target_sock_filter *tfilter;
3134                 struct sock_fprog fprog;
3135                 struct sock_filter *filter;
3136                 int i;
3137 
3138                 if (optlen != sizeof(*tfprog)) {
3139                     return -TARGET_EINVAL;
3140                 }
3141                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3142                     return -TARGET_EFAULT;
3143                 }
3144                 if (!lock_user_struct(VERIFY_READ, tfilter,
3145                                       tswapal(tfprog->filter), 0)) {
3146                     unlock_user_struct(tfprog, optval_addr, 1);
3147                     return -TARGET_EFAULT;
3148                 }
3149 
3150                 fprog.len = tswap16(tfprog->len);
3151                 filter = g_try_new(struct sock_filter, fprog.len);
3152                 if (filter == NULL) {
3153                     unlock_user_struct(tfilter, tfprog->filter, 1);
3154                     unlock_user_struct(tfprog, optval_addr, 1);
3155                     return -TARGET_ENOMEM;
3156                 }
3157                 for (i = 0; i < fprog.len; i++) {
3158                     filter[i].code = tswap16(tfilter[i].code);
3159                     filter[i].jt = tfilter[i].jt;
3160                     filter[i].jf = tfilter[i].jf;
3161                     filter[i].k = tswap32(tfilter[i].k);
3162                 }
3163                 fprog.filter = filter;
3164 
3165                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3166                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3167                 g_free(filter);
3168 
3169                 unlock_user_struct(tfilter, tfprog->filter, 1);
3170                 unlock_user_struct(tfprog, optval_addr, 1);
3171                 return ret;
3172         }
3173 	case TARGET_SO_BINDTODEVICE:
3174 	{
3175 		char *dev_ifname, *addr_ifname;
3176 
3177 		if (optlen > IFNAMSIZ - 1) {
3178 		    optlen = IFNAMSIZ - 1;
3179 		}
3180 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3181 		if (!dev_ifname) {
3182 		    return -TARGET_EFAULT;
3183 		}
3184 		optname = SO_BINDTODEVICE;
3185 		addr_ifname = alloca(IFNAMSIZ);
3186 		memcpy(addr_ifname, dev_ifname, optlen);
3187 		addr_ifname[optlen] = 0;
3188 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3189                                            addr_ifname, optlen));
3190 		unlock_user (dev_ifname, optval_addr, 0);
3191 		return ret;
3192 	}
3193             /* Options with 'int' argument.  */
3194         case TARGET_SO_DEBUG:
3195 		optname = SO_DEBUG;
3196 		break;
3197         case TARGET_SO_REUSEADDR:
3198 		optname = SO_REUSEADDR;
3199 		break;
3200         case TARGET_SO_TYPE:
3201 		optname = SO_TYPE;
3202 		break;
3203         case TARGET_SO_ERROR:
3204 		optname = SO_ERROR;
3205 		break;
3206         case TARGET_SO_DONTROUTE:
3207 		optname = SO_DONTROUTE;
3208 		break;
3209         case TARGET_SO_BROADCAST:
3210 		optname = SO_BROADCAST;
3211 		break;
3212         case TARGET_SO_SNDBUF:
3213 		optname = SO_SNDBUF;
3214 		break;
3215         case TARGET_SO_SNDBUFFORCE:
3216                 optname = SO_SNDBUFFORCE;
3217                 break;
3218         case TARGET_SO_RCVBUF:
3219 		optname = SO_RCVBUF;
3220 		break;
3221         case TARGET_SO_RCVBUFFORCE:
3222                 optname = SO_RCVBUFFORCE;
3223                 break;
3224         case TARGET_SO_KEEPALIVE:
3225 		optname = SO_KEEPALIVE;
3226 		break;
3227         case TARGET_SO_OOBINLINE:
3228 		optname = SO_OOBINLINE;
3229 		break;
3230         case TARGET_SO_NO_CHECK:
3231 		optname = SO_NO_CHECK;
3232 		break;
3233         case TARGET_SO_PRIORITY:
3234 		optname = SO_PRIORITY;
3235 		break;
3236 #ifdef SO_BSDCOMPAT
3237         case TARGET_SO_BSDCOMPAT:
3238 		optname = SO_BSDCOMPAT;
3239 		break;
3240 #endif
3241         case TARGET_SO_PASSCRED:
3242 		optname = SO_PASSCRED;
3243 		break;
3244         case TARGET_SO_PASSSEC:
3245                 optname = SO_PASSSEC;
3246                 break;
3247         case TARGET_SO_TIMESTAMP:
3248 		optname = SO_TIMESTAMP;
3249 		break;
3250         case TARGET_SO_RCVLOWAT:
3251 		optname = SO_RCVLOWAT;
3252 		break;
3253         default:
3254             goto unimplemented;
3255         }
3256 	if (optlen < sizeof(uint32_t))
3257             return -TARGET_EINVAL;
3258 
3259 	if (get_user_u32(val, optval_addr))
3260             return -TARGET_EFAULT;
3261 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3262         break;
3263     default:
3264     unimplemented:
3265         gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3266         ret = -TARGET_ENOPROTOOPT;
3267     }
3268     return ret;
3269 }
3270 
3271 /* do_getsockopt() Must return target values and target errnos. */
3272 static abi_long do_getsockopt(int sockfd, int level, int optname,
3273                               abi_ulong optval_addr, abi_ulong optlen)
3274 {
3275     abi_long ret;
3276     int len, val;
3277     socklen_t lv;
3278 
3279     switch(level) {
3280     case TARGET_SOL_SOCKET:
3281         level = SOL_SOCKET;
3282         switch (optname) {
3283         /* These don't just return a single integer */
3284         case TARGET_SO_LINGER:
3285         case TARGET_SO_RCVTIMEO:
3286         case TARGET_SO_SNDTIMEO:
3287         case TARGET_SO_PEERNAME:
3288             goto unimplemented;
3289         case TARGET_SO_PEERCRED: {
3290             struct ucred cr;
3291             socklen_t crlen;
3292             struct target_ucred *tcr;
3293 
3294             if (get_user_u32(len, optlen)) {
3295                 return -TARGET_EFAULT;
3296             }
3297             if (len < 0) {
3298                 return -TARGET_EINVAL;
3299             }
3300 
3301             crlen = sizeof(cr);
3302             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3303                                        &cr, &crlen));
3304             if (ret < 0) {
3305                 return ret;
3306             }
3307             if (len > crlen) {
3308                 len = crlen;
3309             }
3310             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3311                 return -TARGET_EFAULT;
3312             }
3313             __put_user(cr.pid, &tcr->pid);
3314             __put_user(cr.uid, &tcr->uid);
3315             __put_user(cr.gid, &tcr->gid);
3316             unlock_user_struct(tcr, optval_addr, 1);
3317             if (put_user_u32(len, optlen)) {
3318                 return -TARGET_EFAULT;
3319             }
3320             break;
3321         }
3322         /* Options with 'int' argument.  */
3323         case TARGET_SO_DEBUG:
3324             optname = SO_DEBUG;
3325             goto int_case;
3326         case TARGET_SO_REUSEADDR:
3327             optname = SO_REUSEADDR;
3328             goto int_case;
3329         case TARGET_SO_TYPE:
3330             optname = SO_TYPE;
3331             goto int_case;
3332         case TARGET_SO_ERROR:
3333             optname = SO_ERROR;
3334             goto int_case;
3335         case TARGET_SO_DONTROUTE:
3336             optname = SO_DONTROUTE;
3337             goto int_case;
3338         case TARGET_SO_BROADCAST:
3339             optname = SO_BROADCAST;
3340             goto int_case;
3341         case TARGET_SO_SNDBUF:
3342             optname = SO_SNDBUF;
3343             goto int_case;
3344         case TARGET_SO_RCVBUF:
3345             optname = SO_RCVBUF;
3346             goto int_case;
3347         case TARGET_SO_KEEPALIVE:
3348             optname = SO_KEEPALIVE;
3349             goto int_case;
3350         case TARGET_SO_OOBINLINE:
3351             optname = SO_OOBINLINE;
3352             goto int_case;
3353         case TARGET_SO_NO_CHECK:
3354             optname = SO_NO_CHECK;
3355             goto int_case;
3356         case TARGET_SO_PRIORITY:
3357             optname = SO_PRIORITY;
3358             goto int_case;
3359 #ifdef SO_BSDCOMPAT
3360         case TARGET_SO_BSDCOMPAT:
3361             optname = SO_BSDCOMPAT;
3362             goto int_case;
3363 #endif
3364         case TARGET_SO_PASSCRED:
3365             optname = SO_PASSCRED;
3366             goto int_case;
3367         case TARGET_SO_TIMESTAMP:
3368             optname = SO_TIMESTAMP;
3369             goto int_case;
3370         case TARGET_SO_RCVLOWAT:
3371             optname = SO_RCVLOWAT;
3372             goto int_case;
3373         case TARGET_SO_ACCEPTCONN:
3374             optname = SO_ACCEPTCONN;
3375             goto int_case;
3376         default:
3377             goto int_case;
3378         }
3379         break;
3380     case SOL_TCP:
3381         /* TCP options all take an 'int' value.  */
3382     int_case:
3383         if (get_user_u32(len, optlen))
3384             return -TARGET_EFAULT;
3385         if (len < 0)
3386             return -TARGET_EINVAL;
3387         lv = sizeof(lv);
3388         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3389         if (ret < 0)
3390             return ret;
3391         if (optname == SO_TYPE) {
3392             val = host_to_target_sock_type(val);
3393         }
3394         if (len > lv)
3395             len = lv;
3396         if (len == 4) {
3397             if (put_user_u32(val, optval_addr))
3398                 return -TARGET_EFAULT;
3399         } else {
3400             if (put_user_u8(val, optval_addr))
3401                 return -TARGET_EFAULT;
3402         }
3403         if (put_user_u32(len, optlen))
3404             return -TARGET_EFAULT;
3405         break;
3406     case SOL_IP:
3407         switch(optname) {
3408         case IP_TOS:
3409         case IP_TTL:
3410         case IP_HDRINCL:
3411         case IP_ROUTER_ALERT:
3412         case IP_RECVOPTS:
3413         case IP_RETOPTS:
3414         case IP_PKTINFO:
3415         case IP_MTU_DISCOVER:
3416         case IP_RECVERR:
3417         case IP_RECVTOS:
3418 #ifdef IP_FREEBIND
3419         case IP_FREEBIND:
3420 #endif
3421         case IP_MULTICAST_TTL:
3422         case IP_MULTICAST_LOOP:
3423             if (get_user_u32(len, optlen))
3424                 return -TARGET_EFAULT;
3425             if (len < 0)
3426                 return -TARGET_EINVAL;
3427             lv = sizeof(lv);
3428             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3429             if (ret < 0)
3430                 return ret;
3431             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3432                 len = 1;
3433                 if (put_user_u32(len, optlen)
3434                     || put_user_u8(val, optval_addr))
3435                     return -TARGET_EFAULT;
3436             } else {
3437                 if (len > sizeof(int))
3438                     len = sizeof(int);
3439                 if (put_user_u32(len, optlen)
3440                     || put_user_u32(val, optval_addr))
3441                     return -TARGET_EFAULT;
3442             }
3443             break;
3444         default:
3445             ret = -TARGET_ENOPROTOOPT;
3446             break;
3447         }
3448         break;
3449     default:
3450     unimplemented:
3451         gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3452                  level, optname);
3453         ret = -TARGET_EOPNOTSUPP;
3454         break;
3455     }
3456     return ret;
3457 }
3458 
3459 /* Convert target low/high pair representing file offset into the host
3460  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3461  * as the kernel doesn't handle them either.
3462  */
3463 static void target_to_host_low_high(abi_ulong tlow,
3464                                     abi_ulong thigh,
3465                                     unsigned long *hlow,
3466                                     unsigned long *hhigh)
3467 {
3468     uint64_t off = tlow |
3469         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3470         TARGET_LONG_BITS / 2;
3471 
3472     *hlow = off;
3473     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3474 }
3475 
3476 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3477                                 abi_ulong count, int copy)
3478 {
3479     struct target_iovec *target_vec;
3480     struct iovec *vec;
3481     abi_ulong total_len, max_len;
3482     int i;
3483     int err = 0;
3484     bool bad_address = false;
3485 
3486     if (count == 0) {
3487         errno = 0;
3488         return NULL;
3489     }
3490     if (count > IOV_MAX) {
3491         errno = EINVAL;
3492         return NULL;
3493     }
3494 
3495     vec = g_try_new0(struct iovec, count);
3496     if (vec == NULL) {
3497         errno = ENOMEM;
3498         return NULL;
3499     }
3500 
3501     target_vec = lock_user(VERIFY_READ, target_addr,
3502                            count * sizeof(struct target_iovec), 1);
3503     if (target_vec == NULL) {
3504         err = EFAULT;
3505         goto fail2;
3506     }
3507 
3508     /* ??? If host page size > target page size, this will result in a
3509        value larger than what we can actually support.  */
3510     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3511     total_len = 0;
3512 
3513     for (i = 0; i < count; i++) {
3514         abi_ulong base = tswapal(target_vec[i].iov_base);
3515         abi_long len = tswapal(target_vec[i].iov_len);
3516 
3517         if (len < 0) {
3518             err = EINVAL;
3519             goto fail;
3520         } else if (len == 0) {
3521             /* Zero length pointer is ignored.  */
3522             vec[i].iov_base = 0;
3523         } else {
3524             vec[i].iov_base = lock_user(type, base, len, copy);
3525             /* If the first buffer pointer is bad, this is a fault.  But
3526              * subsequent bad buffers will result in a partial write; this
3527              * is realized by filling the vector with null pointers and
3528              * zero lengths. */
3529             if (!vec[i].iov_base) {
3530                 if (i == 0) {
3531                     err = EFAULT;
3532                     goto fail;
3533                 } else {
3534                     bad_address = true;
3535                 }
3536             }
3537             if (bad_address) {
3538                 len = 0;
3539             }
3540             if (len > max_len - total_len) {
3541                 len = max_len - total_len;
3542             }
3543         }
3544         vec[i].iov_len = len;
3545         total_len += len;
3546     }
3547 
3548     unlock_user(target_vec, target_addr, 0);
3549     return vec;
3550 
3551  fail:
3552     while (--i >= 0) {
3553         if (tswapal(target_vec[i].iov_len) > 0) {
3554             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3555         }
3556     }
3557     unlock_user(target_vec, target_addr, 0);
3558  fail2:
3559     g_free(vec);
3560     errno = err;
3561     return NULL;
3562 }
3563 
3564 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3565                          abi_ulong count, int copy)
3566 {
3567     struct target_iovec *target_vec;
3568     int i;
3569 
3570     target_vec = lock_user(VERIFY_READ, target_addr,
3571                            count * sizeof(struct target_iovec), 1);
3572     if (target_vec) {
3573         for (i = 0; i < count; i++) {
3574             abi_ulong base = tswapal(target_vec[i].iov_base);
3575             abi_long len = tswapal(target_vec[i].iov_len);
3576             if (len < 0) {
3577                 break;
3578             }
3579             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3580         }
3581         unlock_user(target_vec, target_addr, 0);
3582     }
3583 
3584     g_free(vec);
3585 }
3586 
3587 static inline int target_to_host_sock_type(int *type)
3588 {
3589     int host_type = 0;
3590     int target_type = *type;
3591 
3592     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3593     case TARGET_SOCK_DGRAM:
3594         host_type = SOCK_DGRAM;
3595         break;
3596     case TARGET_SOCK_STREAM:
3597         host_type = SOCK_STREAM;
3598         break;
3599     default:
3600         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3601         break;
3602     }
3603     if (target_type & TARGET_SOCK_CLOEXEC) {
3604 #if defined(SOCK_CLOEXEC)
3605         host_type |= SOCK_CLOEXEC;
3606 #else
3607         return -TARGET_EINVAL;
3608 #endif
3609     }
3610     if (target_type & TARGET_SOCK_NONBLOCK) {
3611 #if defined(SOCK_NONBLOCK)
3612         host_type |= SOCK_NONBLOCK;
3613 #elif !defined(O_NONBLOCK)
3614         return -TARGET_EINVAL;
3615 #endif
3616     }
3617     *type = host_type;
3618     return 0;
3619 }
3620 
3621 /* Try to emulate socket type flags after socket creation.  */
3622 static int sock_flags_fixup(int fd, int target_type)
3623 {
3624 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3625     if (target_type & TARGET_SOCK_NONBLOCK) {
3626         int flags = fcntl(fd, F_GETFL);
3627         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3628             close(fd);
3629             return -TARGET_EINVAL;
3630         }
3631     }
3632 #endif
3633     return fd;
3634 }
3635 
3636 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3637                                                abi_ulong target_addr,
3638                                                socklen_t len)
3639 {
3640     struct sockaddr *addr = host_addr;
3641     struct target_sockaddr *target_saddr;
3642 
3643     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3644     if (!target_saddr) {
3645         return -TARGET_EFAULT;
3646     }
3647 
3648     memcpy(addr, target_saddr, len);
3649     addr->sa_family = tswap16(target_saddr->sa_family);
3650     /* spkt_protocol is big-endian */
3651 
3652     unlock_user(target_saddr, target_addr, 0);
3653     return 0;
3654 }
3655 
3656 static TargetFdTrans target_packet_trans = {
3657     .target_to_host_addr = packet_target_to_host_sockaddr,
3658 };
3659 
3660 #ifdef CONFIG_RTNETLINK
3661 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3662 {
3663     abi_long ret;
3664 
3665     ret = target_to_host_nlmsg_route(buf, len);
3666     if (ret < 0) {
3667         return ret;
3668     }
3669 
3670     return len;
3671 }
3672 
3673 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3674 {
3675     abi_long ret;
3676 
3677     ret = host_to_target_nlmsg_route(buf, len);
3678     if (ret < 0) {
3679         return ret;
3680     }
3681 
3682     return len;
3683 }
3684 
3685 static TargetFdTrans target_netlink_route_trans = {
3686     .target_to_host_data = netlink_route_target_to_host,
3687     .host_to_target_data = netlink_route_host_to_target,
3688 };
3689 #endif /* CONFIG_RTNETLINK */
3690 
3691 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3692 {
3693     abi_long ret;
3694 
3695     ret = target_to_host_nlmsg_audit(buf, len);
3696     if (ret < 0) {
3697         return ret;
3698     }
3699 
3700     return len;
3701 }
3702 
3703 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3704 {
3705     abi_long ret;
3706 
3707     ret = host_to_target_nlmsg_audit(buf, len);
3708     if (ret < 0) {
3709         return ret;
3710     }
3711 
3712     return len;
3713 }
3714 
3715 static TargetFdTrans target_netlink_audit_trans = {
3716     .target_to_host_data = netlink_audit_target_to_host,
3717     .host_to_target_data = netlink_audit_host_to_target,
3718 };
3719 
3720 /* do_socket() Must return target values and target errnos. */
3721 static abi_long do_socket(int domain, int type, int protocol)
3722 {
3723     int target_type = type;
3724     int ret;
3725 
3726     ret = target_to_host_sock_type(&type);
3727     if (ret) {
3728         return ret;
3729     }
3730 
3731     if (domain == PF_NETLINK && !(
3732 #ifdef CONFIG_RTNETLINK
3733          protocol == NETLINK_ROUTE ||
3734 #endif
3735          protocol == NETLINK_KOBJECT_UEVENT ||
3736          protocol == NETLINK_AUDIT)) {
3737         return -EPFNOSUPPORT;
3738     }
3739 
3740     if (domain == AF_PACKET ||
3741         (domain == AF_INET && type == SOCK_PACKET)) {
3742         protocol = tswap16(protocol);
3743     }
3744 
3745     ret = get_errno(socket(domain, type, protocol));
3746     if (ret >= 0) {
3747         ret = sock_flags_fixup(ret, target_type);
3748         if (type == SOCK_PACKET) {
3749             /* Manage an obsolete case :
3750              * if socket type is SOCK_PACKET, bind by name
3751              */
3752             fd_trans_register(ret, &target_packet_trans);
3753         } else if (domain == PF_NETLINK) {
3754             switch (protocol) {
3755 #ifdef CONFIG_RTNETLINK
3756             case NETLINK_ROUTE:
3757                 fd_trans_register(ret, &target_netlink_route_trans);
3758                 break;
3759 #endif
3760             case NETLINK_KOBJECT_UEVENT:
3761                 /* nothing to do: messages are strings */
3762                 break;
3763             case NETLINK_AUDIT:
3764                 fd_trans_register(ret, &target_netlink_audit_trans);
3765                 break;
3766             default:
3767                 g_assert_not_reached();
3768             }
3769         }
3770     }
3771     return ret;
3772 }
3773 
3774 /* do_bind() Must return target values and target errnos. */
3775 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3776                         socklen_t addrlen)
3777 {
3778     void *addr;
3779     abi_long ret;
3780 
3781     if ((int)addrlen < 0) {
3782         return -TARGET_EINVAL;
3783     }
3784 
3785     addr = alloca(addrlen+1);
3786 
3787     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3788     if (ret)
3789         return ret;
3790 
3791     return get_errno(bind(sockfd, addr, addrlen));
3792 }
3793 
3794 /* do_connect() Must return target values and target errnos. */
3795 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3796                            socklen_t addrlen)
3797 {
3798     void *addr;
3799     abi_long ret;
3800 
3801     if ((int)addrlen < 0) {
3802         return -TARGET_EINVAL;
3803     }
3804 
3805     addr = alloca(addrlen+1);
3806 
3807     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3808     if (ret)
3809         return ret;
3810 
3811     return get_errno(safe_connect(sockfd, addr, addrlen));
3812 }
3813 
3814 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3815 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3816                                       int flags, int send)
3817 {
3818     abi_long ret, len;
3819     struct msghdr msg;
3820     abi_ulong count;
3821     struct iovec *vec;
3822     abi_ulong target_vec;
3823 
3824     if (msgp->msg_name) {
3825         msg.msg_namelen = tswap32(msgp->msg_namelen);
3826         msg.msg_name = alloca(msg.msg_namelen+1);
3827         ret = target_to_host_sockaddr(fd, msg.msg_name,
3828                                       tswapal(msgp->msg_name),
3829                                       msg.msg_namelen);
3830         if (ret == -TARGET_EFAULT) {
3831             /* For connected sockets msg_name and msg_namelen must
3832              * be ignored, so returning EFAULT immediately is wrong.
3833              * Instead, pass a bad msg_name to the host kernel, and
3834              * let it decide whether to return EFAULT or not.
3835              */
3836             msg.msg_name = (void *)-1;
3837         } else if (ret) {
3838             goto out2;
3839         }
3840     } else {
3841         msg.msg_name = NULL;
3842         msg.msg_namelen = 0;
3843     }
3844     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3845     msg.msg_control = alloca(msg.msg_controllen);
3846     memset(msg.msg_control, 0, msg.msg_controllen);
3847 
3848     msg.msg_flags = tswap32(msgp->msg_flags);
3849 
3850     count = tswapal(msgp->msg_iovlen);
3851     target_vec = tswapal(msgp->msg_iov);
3852 
3853     if (count > IOV_MAX) {
3854         /* sendrcvmsg returns a different errno for this condition than
3855          * readv/writev, so we must catch it here before lock_iovec() does.
3856          */
3857         ret = -TARGET_EMSGSIZE;
3858         goto out2;
3859     }
3860 
3861     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3862                      target_vec, count, send);
3863     if (vec == NULL) {
3864         ret = -host_to_target_errno(errno);
3865         goto out2;
3866     }
3867     msg.msg_iovlen = count;
3868     msg.msg_iov = vec;
3869 
3870     if (send) {
3871         if (fd_trans_target_to_host_data(fd)) {
3872             void *host_msg;
3873 
3874             host_msg = g_malloc(msg.msg_iov->iov_len);
3875             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3876             ret = fd_trans_target_to_host_data(fd)(host_msg,
3877                                                    msg.msg_iov->iov_len);
3878             if (ret >= 0) {
3879                 msg.msg_iov->iov_base = host_msg;
3880                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3881             }
3882             g_free(host_msg);
3883         } else {
3884             ret = target_to_host_cmsg(&msg, msgp);
3885             if (ret == 0) {
3886                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3887             }
3888         }
3889     } else {
3890         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3891         if (!is_error(ret)) {
3892             len = ret;
3893             if (fd_trans_host_to_target_data(fd)) {
3894                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3895                                                        len);
3896             } else {
3897                 ret = host_to_target_cmsg(msgp, &msg);
3898             }
3899             if (!is_error(ret)) {
3900                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3901                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3902                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3903                                     msg.msg_name, msg.msg_namelen);
3904                     if (ret) {
3905                         goto out;
3906                     }
3907                 }
3908 
3909                 ret = len;
3910             }
3911         }
3912     }
3913 
3914 out:
3915     unlock_iovec(vec, target_vec, count, !send);
3916 out2:
3917     return ret;
3918 }
3919 
3920 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3921                                int flags, int send)
3922 {
3923     abi_long ret;
3924     struct target_msghdr *msgp;
3925 
3926     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3927                           msgp,
3928                           target_msg,
3929                           send ? 1 : 0)) {
3930         return -TARGET_EFAULT;
3931     }
3932     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3933     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3934     return ret;
3935 }
3936 
3937 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3938  * so it might not have this *mmsg-specific flag either.
3939  */
3940 #ifndef MSG_WAITFORONE
3941 #define MSG_WAITFORONE 0x10000
3942 #endif
3943 
3944 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3945                                 unsigned int vlen, unsigned int flags,
3946                                 int send)
3947 {
3948     struct target_mmsghdr *mmsgp;
3949     abi_long ret = 0;
3950     int i;
3951 
3952     if (vlen > UIO_MAXIOV) {
3953         vlen = UIO_MAXIOV;
3954     }
3955 
3956     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3957     if (!mmsgp) {
3958         return -TARGET_EFAULT;
3959     }
3960 
3961     for (i = 0; i < vlen; i++) {
3962         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3963         if (is_error(ret)) {
3964             break;
3965         }
3966         mmsgp[i].msg_len = tswap32(ret);
3967         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3968         if (flags & MSG_WAITFORONE) {
3969             flags |= MSG_DONTWAIT;
3970         }
3971     }
3972 
3973     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3974 
3975     /* Return number of datagrams sent if we sent any at all;
3976      * otherwise return the error.
3977      */
3978     if (i) {
3979         return i;
3980     }
3981     return ret;
3982 }
3983 
3984 /* do_accept4() Must return target values and target errnos. */
3985 static abi_long do_accept4(int fd, abi_ulong target_addr,
3986                            abi_ulong target_addrlen_addr, int flags)
3987 {
3988     socklen_t addrlen;
3989     void *addr;
3990     abi_long ret;
3991     int host_flags;
3992 
3993     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3994 
3995     if (target_addr == 0) {
3996         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3997     }
3998 
3999     /* linux returns EINVAL if addrlen pointer is invalid */
4000     if (get_user_u32(addrlen, target_addrlen_addr))
4001         return -TARGET_EINVAL;
4002 
4003     if ((int)addrlen < 0) {
4004         return -TARGET_EINVAL;
4005     }
4006 
4007     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4008         return -TARGET_EINVAL;
4009 
4010     addr = alloca(addrlen);
4011 
4012     ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
4013     if (!is_error(ret)) {
4014         host_to_target_sockaddr(target_addr, addr, addrlen);
4015         if (put_user_u32(addrlen, target_addrlen_addr))
4016             ret = -TARGET_EFAULT;
4017     }
4018     return ret;
4019 }
4020 
4021 /* do_getpeername() Must return target values and target errnos. */
4022 static abi_long do_getpeername(int fd, abi_ulong target_addr,
4023                                abi_ulong target_addrlen_addr)
4024 {
4025     socklen_t addrlen;
4026     void *addr;
4027     abi_long ret;
4028 
4029     if (get_user_u32(addrlen, target_addrlen_addr))
4030         return -TARGET_EFAULT;
4031 
4032     if ((int)addrlen < 0) {
4033         return -TARGET_EINVAL;
4034     }
4035 
4036     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4037         return -TARGET_EFAULT;
4038 
4039     addr = alloca(addrlen);
4040 
4041     ret = get_errno(getpeername(fd, addr, &addrlen));
4042     if (!is_error(ret)) {
4043         host_to_target_sockaddr(target_addr, addr, addrlen);
4044         if (put_user_u32(addrlen, target_addrlen_addr))
4045             ret = -TARGET_EFAULT;
4046     }
4047     return ret;
4048 }
4049 
4050 /* do_getsockname() Must return target values and target errnos. */
4051 static abi_long do_getsockname(int fd, abi_ulong target_addr,
4052                                abi_ulong target_addrlen_addr)
4053 {
4054     socklen_t addrlen;
4055     void *addr;
4056     abi_long ret;
4057 
4058     if (get_user_u32(addrlen, target_addrlen_addr))
4059         return -TARGET_EFAULT;
4060 
4061     if ((int)addrlen < 0) {
4062         return -TARGET_EINVAL;
4063     }
4064 
4065     if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
4066         return -TARGET_EFAULT;
4067 
4068     addr = alloca(addrlen);
4069 
4070     ret = get_errno(getsockname(fd, addr, &addrlen));
4071     if (!is_error(ret)) {
4072         host_to_target_sockaddr(target_addr, addr, addrlen);
4073         if (put_user_u32(addrlen, target_addrlen_addr))
4074             ret = -TARGET_EFAULT;
4075     }
4076     return ret;
4077 }
4078 
4079 /* do_socketpair() Must return target values and target errnos. */
4080 static abi_long do_socketpair(int domain, int type, int protocol,
4081                               abi_ulong target_tab_addr)
4082 {
4083     int tab[2];
4084     abi_long ret;
4085 
4086     target_to_host_sock_type(&type);
4087 
4088     ret = get_errno(socketpair(domain, type, protocol, tab));
4089     if (!is_error(ret)) {
4090         if (put_user_s32(tab[0], target_tab_addr)
4091             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4092             ret = -TARGET_EFAULT;
4093     }
4094     return ret;
4095 }
4096 
4097 /* do_sendto() Must return target values and target errnos. */
4098 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4099                           abi_ulong target_addr, socklen_t addrlen)
4100 {
4101     void *addr;
4102     void *host_msg;
4103     void *copy_msg = NULL;
4104     abi_long ret;
4105 
4106     if ((int)addrlen < 0) {
4107         return -TARGET_EINVAL;
4108     }
4109 
4110     host_msg = lock_user(VERIFY_READ, msg, len, 1);
4111     if (!host_msg)
4112         return -TARGET_EFAULT;
4113     if (fd_trans_target_to_host_data(fd)) {
4114         copy_msg = host_msg;
4115         host_msg = g_malloc(len);
4116         memcpy(host_msg, copy_msg, len);
4117         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4118         if (ret < 0) {
4119             goto fail;
4120         }
4121     }
4122     if (target_addr) {
4123         addr = alloca(addrlen+1);
4124         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4125         if (ret) {
4126             goto fail;
4127         }
4128         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4129     } else {
4130         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4131     }
4132 fail:
4133     if (copy_msg) {
4134         g_free(host_msg);
4135         host_msg = copy_msg;
4136     }
4137     unlock_user(host_msg, msg, 0);
4138     return ret;
4139 }
4140 
4141 /* do_recvfrom() Must return target values and target errnos. */
4142 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4143                             abi_ulong target_addr,
4144                             abi_ulong target_addrlen)
4145 {
4146     socklen_t addrlen;
4147     void *addr;
4148     void *host_msg;
4149     abi_long ret;
4150 
4151     host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4152     if (!host_msg)
4153         return -TARGET_EFAULT;
4154     if (target_addr) {
4155         if (get_user_u32(addrlen, target_addrlen)) {
4156             ret = -TARGET_EFAULT;
4157             goto fail;
4158         }
4159         if ((int)addrlen < 0) {
4160             ret = -TARGET_EINVAL;
4161             goto fail;
4162         }
4163         addr = alloca(addrlen);
4164         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4165                                       addr, &addrlen));
4166     } else {
4167         addr = NULL; /* To keep compiler quiet.  */
4168         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4169     }
4170     if (!is_error(ret)) {
4171         if (fd_trans_host_to_target_data(fd)) {
4172             ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4173         }
4174         if (target_addr) {
4175             host_to_target_sockaddr(target_addr, addr, addrlen);
4176             if (put_user_u32(addrlen, target_addrlen)) {
4177                 ret = -TARGET_EFAULT;
4178                 goto fail;
4179             }
4180         }
4181         unlock_user(host_msg, msg, len);
4182     } else {
4183 fail:
4184         unlock_user(host_msg, msg, 0);
4185     }
4186     return ret;
4187 }
4188 
4189 #ifdef TARGET_NR_socketcall
4190 /* do_socketcall() must return target values and target errnos. */
4191 static abi_long do_socketcall(int num, abi_ulong vptr)
4192 {
4193     static const unsigned nargs[] = { /* number of arguments per operation */
4194         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
4195         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
4196         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
4197         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
4198         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
4199         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4200         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4201         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
4202         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
4203         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
4204         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
4205         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
4206         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
4207         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
4208         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
4209         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
4210         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
4211         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
4212         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
4213         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
4214     };
4215     abi_long a[6]; /* max 6 args */
4216     unsigned i;
4217 
4218     /* check the range of the first argument num */
4219     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4220     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4221         return -TARGET_EINVAL;
4222     }
4223     /* ensure we have space for args */
4224     if (nargs[num] > ARRAY_SIZE(a)) {
4225         return -TARGET_EINVAL;
4226     }
4227     /* collect the arguments in a[] according to nargs[] */
4228     for (i = 0; i < nargs[num]; ++i) {
4229         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4230             return -TARGET_EFAULT;
4231         }
4232     }
4233     /* now when we have the args, invoke the appropriate underlying function */
4234     switch (num) {
4235     case TARGET_SYS_SOCKET: /* domain, type, protocol */
4236         return do_socket(a[0], a[1], a[2]);
4237     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4238         return do_bind(a[0], a[1], a[2]);
4239     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4240         return do_connect(a[0], a[1], a[2]);
4241     case TARGET_SYS_LISTEN: /* sockfd, backlog */
4242         return get_errno(listen(a[0], a[1]));
4243     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4244         return do_accept4(a[0], a[1], a[2], 0);
4245     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4246         return do_getsockname(a[0], a[1], a[2]);
4247     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4248         return do_getpeername(a[0], a[1], a[2]);
4249     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4250         return do_socketpair(a[0], a[1], a[2], a[3]);
4251     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4252         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4253     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4254         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4255     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4256         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4257     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4258         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4259     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4260         return get_errno(shutdown(a[0], a[1]));
4261     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4262         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4263     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4264         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4265     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4266         return do_sendrecvmsg(a[0], a[1], a[2], 1);
4267     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4268         return do_sendrecvmsg(a[0], a[1], a[2], 0);
4269     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4270         return do_accept4(a[0], a[1], a[2], a[3]);
4271     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4272         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4273     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4274         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4275     default:
4276         gemu_log("Unsupported socketcall: %d\n", num);
4277         return -TARGET_EINVAL;
4278     }
4279 }
4280 #endif
4281 
4282 #define N_SHM_REGIONS	32
4283 
4284 static struct shm_region {
4285     abi_ulong start;
4286     abi_ulong size;
4287     bool in_use;
4288 } shm_regions[N_SHM_REGIONS];
4289 
4290 #ifndef TARGET_SEMID64_DS
4291 /* asm-generic version of this struct */
4292 struct target_semid64_ds
4293 {
4294   struct target_ipc_perm sem_perm;
4295   abi_ulong sem_otime;
4296 #if TARGET_ABI_BITS == 32
4297   abi_ulong __unused1;
4298 #endif
4299   abi_ulong sem_ctime;
4300 #if TARGET_ABI_BITS == 32
4301   abi_ulong __unused2;
4302 #endif
4303   abi_ulong sem_nsems;
4304   abi_ulong __unused3;
4305   abi_ulong __unused4;
4306 };
4307 #endif
4308 
4309 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4310                                                abi_ulong target_addr)
4311 {
4312     struct target_ipc_perm *target_ip;
4313     struct target_semid64_ds *target_sd;
4314 
4315     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4316         return -TARGET_EFAULT;
4317     target_ip = &(target_sd->sem_perm);
4318     host_ip->__key = tswap32(target_ip->__key);
4319     host_ip->uid = tswap32(target_ip->uid);
4320     host_ip->gid = tswap32(target_ip->gid);
4321     host_ip->cuid = tswap32(target_ip->cuid);
4322     host_ip->cgid = tswap32(target_ip->cgid);
4323 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4324     host_ip->mode = tswap32(target_ip->mode);
4325 #else
4326     host_ip->mode = tswap16(target_ip->mode);
4327 #endif
4328 #if defined(TARGET_PPC)
4329     host_ip->__seq = tswap32(target_ip->__seq);
4330 #else
4331     host_ip->__seq = tswap16(target_ip->__seq);
4332 #endif
4333     unlock_user_struct(target_sd, target_addr, 0);
4334     return 0;
4335 }
4336 
4337 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4338                                                struct ipc_perm *host_ip)
4339 {
4340     struct target_ipc_perm *target_ip;
4341     struct target_semid64_ds *target_sd;
4342 
4343     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4344         return -TARGET_EFAULT;
4345     target_ip = &(target_sd->sem_perm);
4346     target_ip->__key = tswap32(host_ip->__key);
4347     target_ip->uid = tswap32(host_ip->uid);
4348     target_ip->gid = tswap32(host_ip->gid);
4349     target_ip->cuid = tswap32(host_ip->cuid);
4350     target_ip->cgid = tswap32(host_ip->cgid);
4351 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4352     target_ip->mode = tswap32(host_ip->mode);
4353 #else
4354     target_ip->mode = tswap16(host_ip->mode);
4355 #endif
4356 #if defined(TARGET_PPC)
4357     target_ip->__seq = tswap32(host_ip->__seq);
4358 #else
4359     target_ip->__seq = tswap16(host_ip->__seq);
4360 #endif
4361     unlock_user_struct(target_sd, target_addr, 1);
4362     return 0;
4363 }
4364 
4365 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4366                                                abi_ulong target_addr)
4367 {
4368     struct target_semid64_ds *target_sd;
4369 
4370     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4371         return -TARGET_EFAULT;
4372     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4373         return -TARGET_EFAULT;
4374     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4375     host_sd->sem_otime = tswapal(target_sd->sem_otime);
4376     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4377     unlock_user_struct(target_sd, target_addr, 0);
4378     return 0;
4379 }
4380 
4381 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4382                                                struct semid_ds *host_sd)
4383 {
4384     struct target_semid64_ds *target_sd;
4385 
4386     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4387         return -TARGET_EFAULT;
4388     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4389         return -TARGET_EFAULT;
4390     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4391     target_sd->sem_otime = tswapal(host_sd->sem_otime);
4392     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4393     unlock_user_struct(target_sd, target_addr, 1);
4394     return 0;
4395 }
4396 
4397 struct target_seminfo {
4398     int semmap;
4399     int semmni;
4400     int semmns;
4401     int semmnu;
4402     int semmsl;
4403     int semopm;
4404     int semume;
4405     int semusz;
4406     int semvmx;
4407     int semaem;
4408 };
4409 
4410 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4411                                               struct seminfo *host_seminfo)
4412 {
4413     struct target_seminfo *target_seminfo;
4414     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4415         return -TARGET_EFAULT;
4416     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4417     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4418     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4419     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4420     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4421     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4422     __put_user(host_seminfo->semume, &target_seminfo->semume);
4423     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4424     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4425     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4426     unlock_user_struct(target_seminfo, target_addr, 1);
4427     return 0;
4428 }
4429 
4430 union semun {
4431 	int val;
4432 	struct semid_ds *buf;
4433 	unsigned short *array;
4434 	struct seminfo *__buf;
4435 };
4436 
4437 union target_semun {
4438 	int val;
4439 	abi_ulong buf;
4440 	abi_ulong array;
4441 	abi_ulong __buf;
4442 };
4443 
4444 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4445                                                abi_ulong target_addr)
4446 {
4447     int nsems;
4448     unsigned short *array;
4449     union semun semun;
4450     struct semid_ds semid_ds;
4451     int i, ret;
4452 
4453     semun.buf = &semid_ds;
4454 
4455     ret = semctl(semid, 0, IPC_STAT, semun);
4456     if (ret == -1)
4457         return get_errno(ret);
4458 
4459     nsems = semid_ds.sem_nsems;
4460 
4461     *host_array = g_try_new(unsigned short, nsems);
4462     if (!*host_array) {
4463         return -TARGET_ENOMEM;
4464     }
4465     array = lock_user(VERIFY_READ, target_addr,
4466                       nsems*sizeof(unsigned short), 1);
4467     if (!array) {
4468         g_free(*host_array);
4469         return -TARGET_EFAULT;
4470     }
4471 
4472     for(i=0; i<nsems; i++) {
4473         __get_user((*host_array)[i], &array[i]);
4474     }
4475     unlock_user(array, target_addr, 0);
4476 
4477     return 0;
4478 }
4479 
4480 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4481                                                unsigned short **host_array)
4482 {
4483     int nsems;
4484     unsigned short *array;
4485     union semun semun;
4486     struct semid_ds semid_ds;
4487     int i, ret;
4488 
4489     semun.buf = &semid_ds;
4490 
4491     ret = semctl(semid, 0, IPC_STAT, semun);
4492     if (ret == -1)
4493         return get_errno(ret);
4494 
4495     nsems = semid_ds.sem_nsems;
4496 
4497     array = lock_user(VERIFY_WRITE, target_addr,
4498                       nsems*sizeof(unsigned short), 0);
4499     if (!array)
4500         return -TARGET_EFAULT;
4501 
4502     for(i=0; i<nsems; i++) {
4503         __put_user((*host_array)[i], &array[i]);
4504     }
4505     g_free(*host_array);
4506     unlock_user(array, target_addr, 1);
4507 
4508     return 0;
4509 }
4510 
4511 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4512                                  abi_ulong target_arg)
4513 {
4514     union target_semun target_su = { .buf = target_arg };
4515     union semun arg;
4516     struct semid_ds dsarg;
4517     unsigned short *array = NULL;
4518     struct seminfo seminfo;
4519     abi_long ret = -TARGET_EINVAL;
4520     abi_long err;
4521     cmd &= 0xff;
4522 
4523     switch( cmd ) {
4524 	case GETVAL:
4525 	case SETVAL:
4526             /* In 64 bit cross-endian situations, we will erroneously pick up
4527              * the wrong half of the union for the "val" element.  To rectify
4528              * this, the entire 8-byte structure is byteswapped, followed by
4529 	     * a swap of the 4 byte val field. In other cases, the data is
4530 	     * already in proper host byte order. */
4531 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4532 		target_su.buf = tswapal(target_su.buf);
4533 		arg.val = tswap32(target_su.val);
4534 	    } else {
4535 		arg.val = target_su.val;
4536 	    }
4537             ret = get_errno(semctl(semid, semnum, cmd, arg));
4538             break;
4539 	case GETALL:
4540 	case SETALL:
4541             err = target_to_host_semarray(semid, &array, target_su.array);
4542             if (err)
4543                 return err;
4544             arg.array = array;
4545             ret = get_errno(semctl(semid, semnum, cmd, arg));
4546             err = host_to_target_semarray(semid, target_su.array, &array);
4547             if (err)
4548                 return err;
4549             break;
4550 	case IPC_STAT:
4551 	case IPC_SET:
4552 	case SEM_STAT:
4553             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4554             if (err)
4555                 return err;
4556             arg.buf = &dsarg;
4557             ret = get_errno(semctl(semid, semnum, cmd, arg));
4558             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4559             if (err)
4560                 return err;
4561             break;
4562 	case IPC_INFO:
4563 	case SEM_INFO:
4564             arg.__buf = &seminfo;
4565             ret = get_errno(semctl(semid, semnum, cmd, arg));
4566             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4567             if (err)
4568                 return err;
4569             break;
4570 	case IPC_RMID:
4571 	case GETPID:
4572 	case GETNCNT:
4573 	case GETZCNT:
4574             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4575             break;
4576     }
4577 
4578     return ret;
4579 }
4580 
4581 struct target_sembuf {
4582     unsigned short sem_num;
4583     short sem_op;
4584     short sem_flg;
4585 };
4586 
4587 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4588                                              abi_ulong target_addr,
4589                                              unsigned nsops)
4590 {
4591     struct target_sembuf *target_sembuf;
4592     int i;
4593 
4594     target_sembuf = lock_user(VERIFY_READ, target_addr,
4595                               nsops*sizeof(struct target_sembuf), 1);
4596     if (!target_sembuf)
4597         return -TARGET_EFAULT;
4598 
4599     for(i=0; i<nsops; i++) {
4600         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4601         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4602         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4603     }
4604 
4605     unlock_user(target_sembuf, target_addr, 0);
4606 
4607     return 0;
4608 }
4609 
4610 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4611 {
4612     struct sembuf sops[nsops];
4613 
4614     if (target_to_host_sembuf(sops, ptr, nsops))
4615         return -TARGET_EFAULT;
4616 
4617     return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4618 }
4619 
4620 struct target_msqid_ds
4621 {
4622     struct target_ipc_perm msg_perm;
4623     abi_ulong msg_stime;
4624 #if TARGET_ABI_BITS == 32
4625     abi_ulong __unused1;
4626 #endif
4627     abi_ulong msg_rtime;
4628 #if TARGET_ABI_BITS == 32
4629     abi_ulong __unused2;
4630 #endif
4631     abi_ulong msg_ctime;
4632 #if TARGET_ABI_BITS == 32
4633     abi_ulong __unused3;
4634 #endif
4635     abi_ulong __msg_cbytes;
4636     abi_ulong msg_qnum;
4637     abi_ulong msg_qbytes;
4638     abi_ulong msg_lspid;
4639     abi_ulong msg_lrpid;
4640     abi_ulong __unused4;
4641     abi_ulong __unused5;
4642 };
4643 
4644 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4645                                                abi_ulong target_addr)
4646 {
4647     struct target_msqid_ds *target_md;
4648 
4649     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4650         return -TARGET_EFAULT;
4651     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4652         return -TARGET_EFAULT;
4653     host_md->msg_stime = tswapal(target_md->msg_stime);
4654     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4655     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4656     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4657     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4658     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4659     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4660     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4661     unlock_user_struct(target_md, target_addr, 0);
4662     return 0;
4663 }
4664 
4665 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4666                                                struct msqid_ds *host_md)
4667 {
4668     struct target_msqid_ds *target_md;
4669 
4670     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4671         return -TARGET_EFAULT;
4672     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4673         return -TARGET_EFAULT;
4674     target_md->msg_stime = tswapal(host_md->msg_stime);
4675     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4676     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4677     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4678     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4679     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4680     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4681     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4682     unlock_user_struct(target_md, target_addr, 1);
4683     return 0;
4684 }
4685 
4686 struct target_msginfo {
4687     int msgpool;
4688     int msgmap;
4689     int msgmax;
4690     int msgmnb;
4691     int msgmni;
4692     int msgssz;
4693     int msgtql;
4694     unsigned short int msgseg;
4695 };
4696 
4697 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4698                                               struct msginfo *host_msginfo)
4699 {
4700     struct target_msginfo *target_msginfo;
4701     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4702         return -TARGET_EFAULT;
4703     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4704     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4705     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4706     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4707     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4708     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4709     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4710     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4711     unlock_user_struct(target_msginfo, target_addr, 1);
4712     return 0;
4713 }
4714 
4715 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4716 {
4717     struct msqid_ds dsarg;
4718     struct msginfo msginfo;
4719     abi_long ret = -TARGET_EINVAL;
4720 
4721     cmd &= 0xff;
4722 
4723     switch (cmd) {
4724     case IPC_STAT:
4725     case IPC_SET:
4726     case MSG_STAT:
4727         if (target_to_host_msqid_ds(&dsarg,ptr))
4728             return -TARGET_EFAULT;
4729         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4730         if (host_to_target_msqid_ds(ptr,&dsarg))
4731             return -TARGET_EFAULT;
4732         break;
4733     case IPC_RMID:
4734         ret = get_errno(msgctl(msgid, cmd, NULL));
4735         break;
4736     case IPC_INFO:
4737     case MSG_INFO:
4738         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4739         if (host_to_target_msginfo(ptr, &msginfo))
4740             return -TARGET_EFAULT;
4741         break;
4742     }
4743 
4744     return ret;
4745 }
4746 
4747 struct target_msgbuf {
4748     abi_long mtype;
4749     char	mtext[1];
4750 };
4751 
4752 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4753                                  ssize_t msgsz, int msgflg)
4754 {
4755     struct target_msgbuf *target_mb;
4756     struct msgbuf *host_mb;
4757     abi_long ret = 0;
4758 
4759     if (msgsz < 0) {
4760         return -TARGET_EINVAL;
4761     }
4762 
4763     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4764         return -TARGET_EFAULT;
4765     host_mb = g_try_malloc(msgsz + sizeof(long));
4766     if (!host_mb) {
4767         unlock_user_struct(target_mb, msgp, 0);
4768         return -TARGET_ENOMEM;
4769     }
4770     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4771     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4772     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4773     g_free(host_mb);
4774     unlock_user_struct(target_mb, msgp, 0);
4775 
4776     return ret;
4777 }
4778 
4779 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4780                                  ssize_t msgsz, abi_long msgtyp,
4781                                  int msgflg)
4782 {
4783     struct target_msgbuf *target_mb;
4784     char *target_mtext;
4785     struct msgbuf *host_mb;
4786     abi_long ret = 0;
4787 
4788     if (msgsz < 0) {
4789         return -TARGET_EINVAL;
4790     }
4791 
4792     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4793         return -TARGET_EFAULT;
4794 
4795     host_mb = g_try_malloc(msgsz + sizeof(long));
4796     if (!host_mb) {
4797         ret = -TARGET_ENOMEM;
4798         goto end;
4799     }
4800     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4801 
4802     if (ret > 0) {
4803         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4804         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4805         if (!target_mtext) {
4806             ret = -TARGET_EFAULT;
4807             goto end;
4808         }
4809         memcpy(target_mb->mtext, host_mb->mtext, ret);
4810         unlock_user(target_mtext, target_mtext_addr, ret);
4811     }
4812 
4813     target_mb->mtype = tswapal(host_mb->mtype);
4814 
4815 end:
4816     if (target_mb)
4817         unlock_user_struct(target_mb, msgp, 1);
4818     g_free(host_mb);
4819     return ret;
4820 }
4821 
4822 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4823                                                abi_ulong target_addr)
4824 {
4825     struct target_shmid_ds *target_sd;
4826 
4827     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4828         return -TARGET_EFAULT;
4829     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4830         return -TARGET_EFAULT;
4831     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4832     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4833     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4834     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4835     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4836     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4837     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4838     unlock_user_struct(target_sd, target_addr, 0);
4839     return 0;
4840 }
4841 
4842 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4843                                                struct shmid_ds *host_sd)
4844 {
4845     struct target_shmid_ds *target_sd;
4846 
4847     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4848         return -TARGET_EFAULT;
4849     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4850         return -TARGET_EFAULT;
4851     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4852     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4853     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4854     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4855     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4856     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4857     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4858     unlock_user_struct(target_sd, target_addr, 1);
4859     return 0;
4860 }
4861 
4862 struct  target_shminfo {
4863     abi_ulong shmmax;
4864     abi_ulong shmmin;
4865     abi_ulong shmmni;
4866     abi_ulong shmseg;
4867     abi_ulong shmall;
4868 };
4869 
4870 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4871                                               struct shminfo *host_shminfo)
4872 {
4873     struct target_shminfo *target_shminfo;
4874     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4875         return -TARGET_EFAULT;
4876     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4877     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4878     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4879     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4880     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4881     unlock_user_struct(target_shminfo, target_addr, 1);
4882     return 0;
4883 }
4884 
4885 struct target_shm_info {
4886     int used_ids;
4887     abi_ulong shm_tot;
4888     abi_ulong shm_rss;
4889     abi_ulong shm_swp;
4890     abi_ulong swap_attempts;
4891     abi_ulong swap_successes;
4892 };
4893 
4894 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4895                                                struct shm_info *host_shm_info)
4896 {
4897     struct target_shm_info *target_shm_info;
4898     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4899         return -TARGET_EFAULT;
4900     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4901     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4902     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4903     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4904     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4905     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4906     unlock_user_struct(target_shm_info, target_addr, 1);
4907     return 0;
4908 }
4909 
4910 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4911 {
4912     struct shmid_ds dsarg;
4913     struct shminfo shminfo;
4914     struct shm_info shm_info;
4915     abi_long ret = -TARGET_EINVAL;
4916 
4917     cmd &= 0xff;
4918 
4919     switch(cmd) {
4920     case IPC_STAT:
4921     case IPC_SET:
4922     case SHM_STAT:
4923         if (target_to_host_shmid_ds(&dsarg, buf))
4924             return -TARGET_EFAULT;
4925         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4926         if (host_to_target_shmid_ds(buf, &dsarg))
4927             return -TARGET_EFAULT;
4928         break;
4929     case IPC_INFO:
4930         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4931         if (host_to_target_shminfo(buf, &shminfo))
4932             return -TARGET_EFAULT;
4933         break;
4934     case SHM_INFO:
4935         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4936         if (host_to_target_shm_info(buf, &shm_info))
4937             return -TARGET_EFAULT;
4938         break;
4939     case IPC_RMID:
4940     case SHM_LOCK:
4941     case SHM_UNLOCK:
4942         ret = get_errno(shmctl(shmid, cmd, NULL));
4943         break;
4944     }
4945 
4946     return ret;
4947 }
4948 
4949 #ifndef TARGET_FORCE_SHMLBA
4950 /* For most architectures, SHMLBA is the same as the page size;
4951  * some architectures have larger values, in which case they should
4952  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4953  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4954  * and defining its own value for SHMLBA.
4955  *
4956  * The kernel also permits SHMLBA to be set by the architecture to a
4957  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4958  * this means that addresses are rounded to the large size if
4959  * SHM_RND is set but addresses not aligned to that size are not rejected
4960  * as long as they are at least page-aligned. Since the only architecture
4961  * which uses this is ia64 this code doesn't provide for that oddity.
4962  */
4963 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4964 {
4965     return TARGET_PAGE_SIZE;
4966 }
4967 #endif
4968 
4969 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4970                                  int shmid, abi_ulong shmaddr, int shmflg)
4971 {
4972     abi_long raddr;
4973     void *host_raddr;
4974     struct shmid_ds shm_info;
4975     int i,ret;
4976     abi_ulong shmlba;
4977 
4978     /* find out the length of the shared memory segment */
4979     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4980     if (is_error(ret)) {
4981         /* can't get length, bail out */
4982         return ret;
4983     }
4984 
4985     shmlba = target_shmlba(cpu_env);
4986 
4987     if (shmaddr & (shmlba - 1)) {
4988         if (shmflg & SHM_RND) {
4989             shmaddr &= ~(shmlba - 1);
4990         } else {
4991             return -TARGET_EINVAL;
4992         }
4993     }
4994     if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4995         return -TARGET_EINVAL;
4996     }
4997 
4998     mmap_lock();
4999 
5000     if (shmaddr)
5001         host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
5002     else {
5003         abi_ulong mmap_start;
5004 
5005         mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
5006 
5007         if (mmap_start == -1) {
5008             errno = ENOMEM;
5009             host_raddr = (void *)-1;
5010         } else
5011             host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
5012     }
5013 
5014     if (host_raddr == (void *)-1) {
5015         mmap_unlock();
5016         return get_errno((long)host_raddr);
5017     }
5018     raddr=h2g((unsigned long)host_raddr);
5019 
5020     page_set_flags(raddr, raddr + shm_info.shm_segsz,
5021                    PAGE_VALID | PAGE_READ |
5022                    ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
5023 
5024     for (i = 0; i < N_SHM_REGIONS; i++) {
5025         if (!shm_regions[i].in_use) {
5026             shm_regions[i].in_use = true;
5027             shm_regions[i].start = raddr;
5028             shm_regions[i].size = shm_info.shm_segsz;
5029             break;
5030         }
5031     }
5032 
5033     mmap_unlock();
5034     return raddr;
5035 
5036 }
5037 
5038 static inline abi_long do_shmdt(abi_ulong shmaddr)
5039 {
5040     int i;
5041     abi_long rv;
5042 
5043     mmap_lock();
5044 
5045     for (i = 0; i < N_SHM_REGIONS; ++i) {
5046         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
5047             shm_regions[i].in_use = false;
5048             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
5049             break;
5050         }
5051     }
5052     rv = get_errno(shmdt(g2h(shmaddr)));
5053 
5054     mmap_unlock();
5055 
5056     return rv;
5057 }
5058 
5059 #ifdef TARGET_NR_ipc
5060 /* ??? This only works with linear mappings.  */
5061 /* do_ipc() must return target values and target errnos. */
5062 static abi_long do_ipc(CPUArchState *cpu_env,
5063                        unsigned int call, abi_long first,
5064                        abi_long second, abi_long third,
5065                        abi_long ptr, abi_long fifth)
5066 {
5067     int version;
5068     abi_long ret = 0;
5069 
5070     version = call >> 16;
5071     call &= 0xffff;
5072 
5073     switch (call) {
5074     case IPCOP_semop:
5075         ret = do_semop(first, ptr, second);
5076         break;
5077 
5078     case IPCOP_semget:
5079         ret = get_errno(semget(first, second, third));
5080         break;
5081 
5082     case IPCOP_semctl: {
5083         /* The semun argument to semctl is passed by value, so dereference the
5084          * ptr argument. */
5085         abi_ulong atptr;
5086         get_user_ual(atptr, ptr);
5087         ret = do_semctl(first, second, third, atptr);
5088         break;
5089     }
5090 
5091     case IPCOP_msgget:
5092         ret = get_errno(msgget(first, second));
5093         break;
5094 
5095     case IPCOP_msgsnd:
5096         ret = do_msgsnd(first, ptr, second, third);
5097         break;
5098 
5099     case IPCOP_msgctl:
5100         ret = do_msgctl(first, second, ptr);
5101         break;
5102 
5103     case IPCOP_msgrcv:
5104         switch (version) {
5105         case 0:
5106             {
5107                 struct target_ipc_kludge {
5108                     abi_long msgp;
5109                     abi_long msgtyp;
5110                 } *tmp;
5111 
5112                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5113                     ret = -TARGET_EFAULT;
5114                     break;
5115                 }
5116 
5117                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5118 
5119                 unlock_user_struct(tmp, ptr, 0);
5120                 break;
5121             }
5122         default:
5123             ret = do_msgrcv(first, ptr, second, fifth, third);
5124         }
5125         break;
5126 
5127     case IPCOP_shmat:
5128         switch (version) {
5129         default:
5130         {
5131             abi_ulong raddr;
5132             raddr = do_shmat(cpu_env, first, ptr, second);
5133             if (is_error(raddr))
5134                 return get_errno(raddr);
5135             if (put_user_ual(raddr, third))
5136                 return -TARGET_EFAULT;
5137             break;
5138         }
5139         case 1:
5140             ret = -TARGET_EINVAL;
5141             break;
5142         }
5143 	break;
5144     case IPCOP_shmdt:
5145         ret = do_shmdt(ptr);
5146 	break;
5147 
5148     case IPCOP_shmget:
5149 	/* IPC_* flag values are the same on all linux platforms */
5150 	ret = get_errno(shmget(first, second, third));
5151 	break;
5152 
5153 	/* IPC_* and SHM_* command values are the same on all linux platforms */
5154     case IPCOP_shmctl:
5155         ret = do_shmctl(first, second, ptr);
5156         break;
5157     default:
5158 	gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5159 	ret = -TARGET_ENOSYS;
5160 	break;
5161     }
5162     return ret;
5163 }
5164 #endif
5165 
5166 /* kernel structure types definitions */
5167 
5168 #define STRUCT(name, ...) STRUCT_ ## name,
5169 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5170 enum {
5171 #include "syscall_types.h"
5172 STRUCT_MAX
5173 };
5174 #undef STRUCT
5175 #undef STRUCT_SPECIAL
5176 
5177 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
5178 #define STRUCT_SPECIAL(name)
5179 #include "syscall_types.h"
5180 #undef STRUCT
5181 #undef STRUCT_SPECIAL
5182 
5183 typedef struct IOCTLEntry IOCTLEntry;
5184 
5185 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5186                              int fd, int cmd, abi_long arg);
5187 
5188 struct IOCTLEntry {
5189     int target_cmd;
5190     unsigned int host_cmd;
5191     const char *name;
5192     int access;
5193     do_ioctl_fn *do_ioctl;
5194     const argtype arg_type[5];
5195 };
5196 
5197 #define IOC_R 0x0001
5198 #define IOC_W 0x0002
5199 #define IOC_RW (IOC_R | IOC_W)
5200 
5201 #define MAX_STRUCT_SIZE 4096
5202 
5203 #ifdef CONFIG_FIEMAP
5204 /* So fiemap access checks don't overflow on 32 bit systems.
5205  * This is very slightly smaller than the limit imposed by
5206  * the underlying kernel.
5207  */
5208 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
5209                             / sizeof(struct fiemap_extent))
5210 
5211 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5212                                        int fd, int cmd, abi_long arg)
5213 {
5214     /* The parameter for this ioctl is a struct fiemap followed
5215      * by an array of struct fiemap_extent whose size is set
5216      * in fiemap->fm_extent_count. The array is filled in by the
5217      * ioctl.
5218      */
5219     int target_size_in, target_size_out;
5220     struct fiemap *fm;
5221     const argtype *arg_type = ie->arg_type;
5222     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5223     void *argptr, *p;
5224     abi_long ret;
5225     int i, extent_size = thunk_type_size(extent_arg_type, 0);
5226     uint32_t outbufsz;
5227     int free_fm = 0;
5228 
5229     assert(arg_type[0] == TYPE_PTR);
5230     assert(ie->access == IOC_RW);
5231     arg_type++;
5232     target_size_in = thunk_type_size(arg_type, 0);
5233     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5234     if (!argptr) {
5235         return -TARGET_EFAULT;
5236     }
5237     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5238     unlock_user(argptr, arg, 0);
5239     fm = (struct fiemap *)buf_temp;
5240     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5241         return -TARGET_EINVAL;
5242     }
5243 
5244     outbufsz = sizeof (*fm) +
5245         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5246 
5247     if (outbufsz > MAX_STRUCT_SIZE) {
5248         /* We can't fit all the extents into the fixed size buffer.
5249          * Allocate one that is large enough and use it instead.
5250          */
5251         fm = g_try_malloc(outbufsz);
5252         if (!fm) {
5253             return -TARGET_ENOMEM;
5254         }
5255         memcpy(fm, buf_temp, sizeof(struct fiemap));
5256         free_fm = 1;
5257     }
5258     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5259     if (!is_error(ret)) {
5260         target_size_out = target_size_in;
5261         /* An extent_count of 0 means we were only counting the extents
5262          * so there are no structs to copy
5263          */
5264         if (fm->fm_extent_count != 0) {
5265             target_size_out += fm->fm_mapped_extents * extent_size;
5266         }
5267         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5268         if (!argptr) {
5269             ret = -TARGET_EFAULT;
5270         } else {
5271             /* Convert the struct fiemap */
5272             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5273             if (fm->fm_extent_count != 0) {
5274                 p = argptr + target_size_in;
5275                 /* ...and then all the struct fiemap_extents */
5276                 for (i = 0; i < fm->fm_mapped_extents; i++) {
5277                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5278                                   THUNK_TARGET);
5279                     p += extent_size;
5280                 }
5281             }
5282             unlock_user(argptr, arg, target_size_out);
5283         }
5284     }
5285     if (free_fm) {
5286         g_free(fm);
5287     }
5288     return ret;
5289 }
5290 #endif
5291 
5292 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5293                                 int fd, int cmd, abi_long arg)
5294 {
5295     const argtype *arg_type = ie->arg_type;
5296     int target_size;
5297     void *argptr;
5298     int ret;
5299     struct ifconf *host_ifconf;
5300     uint32_t outbufsz;
5301     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5302     int target_ifreq_size;
5303     int nb_ifreq;
5304     int free_buf = 0;
5305     int i;
5306     int target_ifc_len;
5307     abi_long target_ifc_buf;
5308     int host_ifc_len;
5309     char *host_ifc_buf;
5310 
5311     assert(arg_type[0] == TYPE_PTR);
5312     assert(ie->access == IOC_RW);
5313 
5314     arg_type++;
5315     target_size = thunk_type_size(arg_type, 0);
5316 
5317     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5318     if (!argptr)
5319         return -TARGET_EFAULT;
5320     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5321     unlock_user(argptr, arg, 0);
5322 
5323     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5324     target_ifc_len = host_ifconf->ifc_len;
5325     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5326 
5327     target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5328     nb_ifreq = target_ifc_len / target_ifreq_size;
5329     host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5330 
5331     outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5332     if (outbufsz > MAX_STRUCT_SIZE) {
5333         /* We can't fit all the extents into the fixed size buffer.
5334          * Allocate one that is large enough and use it instead.
5335          */
5336         host_ifconf = malloc(outbufsz);
5337         if (!host_ifconf) {
5338             return -TARGET_ENOMEM;
5339         }
5340         memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5341         free_buf = 1;
5342     }
5343     host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5344 
5345     host_ifconf->ifc_len = host_ifc_len;
5346     host_ifconf->ifc_buf = host_ifc_buf;
5347 
5348     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5349     if (!is_error(ret)) {
5350 	/* convert host ifc_len to target ifc_len */
5351 
5352         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5353         target_ifc_len = nb_ifreq * target_ifreq_size;
5354         host_ifconf->ifc_len = target_ifc_len;
5355 
5356 	/* restore target ifc_buf */
5357 
5358         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5359 
5360 	/* copy struct ifconf to target user */
5361 
5362         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5363         if (!argptr)
5364             return -TARGET_EFAULT;
5365         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5366         unlock_user(argptr, arg, target_size);
5367 
5368 	/* copy ifreq[] to target user */
5369 
5370         argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5371         for (i = 0; i < nb_ifreq ; i++) {
5372             thunk_convert(argptr + i * target_ifreq_size,
5373                           host_ifc_buf + i * sizeof(struct ifreq),
5374                           ifreq_arg_type, THUNK_TARGET);
5375         }
5376         unlock_user(argptr, target_ifc_buf, target_ifc_len);
5377     }
5378 
5379     if (free_buf) {
5380         free(host_ifconf);
5381     }
5382 
5383     return ret;
5384 }
5385 
5386 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5387                             int cmd, abi_long arg)
5388 {
5389     void *argptr;
5390     struct dm_ioctl *host_dm;
5391     abi_long guest_data;
5392     uint32_t guest_data_size;
5393     int target_size;
5394     const argtype *arg_type = ie->arg_type;
5395     abi_long ret;
5396     void *big_buf = NULL;
5397     char *host_data;
5398 
5399     arg_type++;
5400     target_size = thunk_type_size(arg_type, 0);
5401     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5402     if (!argptr) {
5403         ret = -TARGET_EFAULT;
5404         goto out;
5405     }
5406     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5407     unlock_user(argptr, arg, 0);
5408 
5409     /* buf_temp is too small, so fetch things into a bigger buffer */
5410     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5411     memcpy(big_buf, buf_temp, target_size);
5412     buf_temp = big_buf;
5413     host_dm = big_buf;
5414 
5415     guest_data = arg + host_dm->data_start;
5416     if ((guest_data - arg) < 0) {
5417         ret = -TARGET_EINVAL;
5418         goto out;
5419     }
5420     guest_data_size = host_dm->data_size - host_dm->data_start;
5421     host_data = (char*)host_dm + host_dm->data_start;
5422 
5423     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5424     if (!argptr) {
5425         ret = -TARGET_EFAULT;
5426         goto out;
5427     }
5428 
5429     switch (ie->host_cmd) {
5430     case DM_REMOVE_ALL:
5431     case DM_LIST_DEVICES:
5432     case DM_DEV_CREATE:
5433     case DM_DEV_REMOVE:
5434     case DM_DEV_SUSPEND:
5435     case DM_DEV_STATUS:
5436     case DM_DEV_WAIT:
5437     case DM_TABLE_STATUS:
5438     case DM_TABLE_CLEAR:
5439     case DM_TABLE_DEPS:
5440     case DM_LIST_VERSIONS:
5441         /* no input data */
5442         break;
5443     case DM_DEV_RENAME:
5444     case DM_DEV_SET_GEOMETRY:
5445         /* data contains only strings */
5446         memcpy(host_data, argptr, guest_data_size);
5447         break;
5448     case DM_TARGET_MSG:
5449         memcpy(host_data, argptr, guest_data_size);
5450         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5451         break;
5452     case DM_TABLE_LOAD:
5453     {
5454         void *gspec = argptr;
5455         void *cur_data = host_data;
5456         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5457         int spec_size = thunk_type_size(arg_type, 0);
5458         int i;
5459 
5460         for (i = 0; i < host_dm->target_count; i++) {
5461             struct dm_target_spec *spec = cur_data;
5462             uint32_t next;
5463             int slen;
5464 
5465             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5466             slen = strlen((char*)gspec + spec_size) + 1;
5467             next = spec->next;
5468             spec->next = sizeof(*spec) + slen;
5469             strcpy((char*)&spec[1], gspec + spec_size);
5470             gspec += next;
5471             cur_data += spec->next;
5472         }
5473         break;
5474     }
5475     default:
5476         ret = -TARGET_EINVAL;
5477         unlock_user(argptr, guest_data, 0);
5478         goto out;
5479     }
5480     unlock_user(argptr, guest_data, 0);
5481 
5482     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5483     if (!is_error(ret)) {
5484         guest_data = arg + host_dm->data_start;
5485         guest_data_size = host_dm->data_size - host_dm->data_start;
5486         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5487         switch (ie->host_cmd) {
5488         case DM_REMOVE_ALL:
5489         case DM_DEV_CREATE:
5490         case DM_DEV_REMOVE:
5491         case DM_DEV_RENAME:
5492         case DM_DEV_SUSPEND:
5493         case DM_DEV_STATUS:
5494         case DM_TABLE_LOAD:
5495         case DM_TABLE_CLEAR:
5496         case DM_TARGET_MSG:
5497         case DM_DEV_SET_GEOMETRY:
5498             /* no return data */
5499             break;
5500         case DM_LIST_DEVICES:
5501         {
5502             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5503             uint32_t remaining_data = guest_data_size;
5504             void *cur_data = argptr;
5505             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5506             int nl_size = 12; /* can't use thunk_size due to alignment */
5507 
5508             while (1) {
5509                 uint32_t next = nl->next;
5510                 if (next) {
5511                     nl->next = nl_size + (strlen(nl->name) + 1);
5512                 }
5513                 if (remaining_data < nl->next) {
5514                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5515                     break;
5516                 }
5517                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5518                 strcpy(cur_data + nl_size, nl->name);
5519                 cur_data += nl->next;
5520                 remaining_data -= nl->next;
5521                 if (!next) {
5522                     break;
5523                 }
5524                 nl = (void*)nl + next;
5525             }
5526             break;
5527         }
5528         case DM_DEV_WAIT:
5529         case DM_TABLE_STATUS:
5530         {
5531             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5532             void *cur_data = argptr;
5533             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5534             int spec_size = thunk_type_size(arg_type, 0);
5535             int i;
5536 
5537             for (i = 0; i < host_dm->target_count; i++) {
5538                 uint32_t next = spec->next;
5539                 int slen = strlen((char*)&spec[1]) + 1;
5540                 spec->next = (cur_data - argptr) + spec_size + slen;
5541                 if (guest_data_size < spec->next) {
5542                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5543                     break;
5544                 }
5545                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5546                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5547                 cur_data = argptr + spec->next;
5548                 spec = (void*)host_dm + host_dm->data_start + next;
5549             }
5550             break;
5551         }
5552         case DM_TABLE_DEPS:
5553         {
5554             void *hdata = (void*)host_dm + host_dm->data_start;
5555             int count = *(uint32_t*)hdata;
5556             uint64_t *hdev = hdata + 8;
5557             uint64_t *gdev = argptr + 8;
5558             int i;
5559 
5560             *(uint32_t*)argptr = tswap32(count);
5561             for (i = 0; i < count; i++) {
5562                 *gdev = tswap64(*hdev);
5563                 gdev++;
5564                 hdev++;
5565             }
5566             break;
5567         }
5568         case DM_LIST_VERSIONS:
5569         {
5570             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5571             uint32_t remaining_data = guest_data_size;
5572             void *cur_data = argptr;
5573             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5574             int vers_size = thunk_type_size(arg_type, 0);
5575 
5576             while (1) {
5577                 uint32_t next = vers->next;
5578                 if (next) {
5579                     vers->next = vers_size + (strlen(vers->name) + 1);
5580                 }
5581                 if (remaining_data < vers->next) {
5582                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5583                     break;
5584                 }
5585                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5586                 strcpy(cur_data + vers_size, vers->name);
5587                 cur_data += vers->next;
5588                 remaining_data -= vers->next;
5589                 if (!next) {
5590                     break;
5591                 }
5592                 vers = (void*)vers + next;
5593             }
5594             break;
5595         }
5596         default:
5597             unlock_user(argptr, guest_data, 0);
5598             ret = -TARGET_EINVAL;
5599             goto out;
5600         }
5601         unlock_user(argptr, guest_data, guest_data_size);
5602 
5603         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5604         if (!argptr) {
5605             ret = -TARGET_EFAULT;
5606             goto out;
5607         }
5608         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5609         unlock_user(argptr, arg, target_size);
5610     }
5611 out:
5612     g_free(big_buf);
5613     return ret;
5614 }
5615 
5616 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5617                                int cmd, abi_long arg)
5618 {
5619     void *argptr;
5620     int target_size;
5621     const argtype *arg_type = ie->arg_type;
5622     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5623     abi_long ret;
5624 
5625     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5626     struct blkpg_partition host_part;
5627 
5628     /* Read and convert blkpg */
5629     arg_type++;
5630     target_size = thunk_type_size(arg_type, 0);
5631     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5632     if (!argptr) {
5633         ret = -TARGET_EFAULT;
5634         goto out;
5635     }
5636     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5637     unlock_user(argptr, arg, 0);
5638 
5639     switch (host_blkpg->op) {
5640     case BLKPG_ADD_PARTITION:
5641     case BLKPG_DEL_PARTITION:
5642         /* payload is struct blkpg_partition */
5643         break;
5644     default:
5645         /* Unknown opcode */
5646         ret = -TARGET_EINVAL;
5647         goto out;
5648     }
5649 
5650     /* Read and convert blkpg->data */
5651     arg = (abi_long)(uintptr_t)host_blkpg->data;
5652     target_size = thunk_type_size(part_arg_type, 0);
5653     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5654     if (!argptr) {
5655         ret = -TARGET_EFAULT;
5656         goto out;
5657     }
5658     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5659     unlock_user(argptr, arg, 0);
5660 
5661     /* Swizzle the data pointer to our local copy and call! */
5662     host_blkpg->data = &host_part;
5663     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5664 
5665 out:
5666     return ret;
5667 }
5668 
5669 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5670                                 int fd, int cmd, abi_long arg)
5671 {
5672     const argtype *arg_type = ie->arg_type;
5673     const StructEntry *se;
5674     const argtype *field_types;
5675     const int *dst_offsets, *src_offsets;
5676     int target_size;
5677     void *argptr;
5678     abi_ulong *target_rt_dev_ptr;
5679     unsigned long *host_rt_dev_ptr;
5680     abi_long ret;
5681     int i;
5682 
5683     assert(ie->access == IOC_W);
5684     assert(*arg_type == TYPE_PTR);
5685     arg_type++;
5686     assert(*arg_type == TYPE_STRUCT);
5687     target_size = thunk_type_size(arg_type, 0);
5688     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5689     if (!argptr) {
5690         return -TARGET_EFAULT;
5691     }
5692     arg_type++;
5693     assert(*arg_type == (int)STRUCT_rtentry);
5694     se = struct_entries + *arg_type++;
5695     assert(se->convert[0] == NULL);
5696     /* convert struct here to be able to catch rt_dev string */
5697     field_types = se->field_types;
5698     dst_offsets = se->field_offsets[THUNK_HOST];
5699     src_offsets = se->field_offsets[THUNK_TARGET];
5700     for (i = 0; i < se->nb_fields; i++) {
5701         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5702             assert(*field_types == TYPE_PTRVOID);
5703             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5704             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5705             if (*target_rt_dev_ptr != 0) {
5706                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5707                                                   tswapal(*target_rt_dev_ptr));
5708                 if (!*host_rt_dev_ptr) {
5709                     unlock_user(argptr, arg, 0);
5710                     return -TARGET_EFAULT;
5711                 }
5712             } else {
5713                 *host_rt_dev_ptr = 0;
5714             }
5715             field_types++;
5716             continue;
5717         }
5718         field_types = thunk_convert(buf_temp + dst_offsets[i],
5719                                     argptr + src_offsets[i],
5720                                     field_types, THUNK_HOST);
5721     }
5722     unlock_user(argptr, arg, 0);
5723 
5724     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5725     if (*host_rt_dev_ptr != 0) {
5726         unlock_user((void *)*host_rt_dev_ptr,
5727                     *target_rt_dev_ptr, 0);
5728     }
5729     return ret;
5730 }
5731 
5732 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5733                                      int fd, int cmd, abi_long arg)
5734 {
5735     int sig = target_to_host_signal(arg);
5736     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5737 }
5738 
5739 #ifdef TIOCGPTPEER
5740 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5741                                      int fd, int cmd, abi_long arg)
5742 {
5743     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5744     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5745 }
5746 #endif
5747 
5748 static IOCTLEntry ioctl_entries[] = {
5749 #define IOCTL(cmd, access, ...) \
5750     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5751 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5752     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5753 #define IOCTL_IGNORE(cmd) \
5754     { TARGET_ ## cmd, 0, #cmd },
5755 #include "ioctls.h"
5756     { 0, 0, },
5757 };
5758 
5759 /* ??? Implement proper locking for ioctls.  */
5760 /* do_ioctl() Must return target values and target errnos. */
5761 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5762 {
5763     const IOCTLEntry *ie;
5764     const argtype *arg_type;
5765     abi_long ret;
5766     uint8_t buf_temp[MAX_STRUCT_SIZE];
5767     int target_size;
5768     void *argptr;
5769 
5770     ie = ioctl_entries;
5771     for(;;) {
5772         if (ie->target_cmd == 0) {
5773             gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5774             return -TARGET_ENOSYS;
5775         }
5776         if (ie->target_cmd == cmd)
5777             break;
5778         ie++;
5779     }
5780     arg_type = ie->arg_type;
5781 #if defined(DEBUG)
5782     gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5783 #endif
5784     if (ie->do_ioctl) {
5785         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5786     } else if (!ie->host_cmd) {
5787         /* Some architectures define BSD ioctls in their headers
5788            that are not implemented in Linux.  */
5789         return -TARGET_ENOSYS;
5790     }
5791 
5792     switch(arg_type[0]) {
5793     case TYPE_NULL:
5794         /* no argument */
5795         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5796         break;
5797     case TYPE_PTRVOID:
5798     case TYPE_INT:
5799         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5800         break;
5801     case TYPE_PTR:
5802         arg_type++;
5803         target_size = thunk_type_size(arg_type, 0);
5804         switch(ie->access) {
5805         case IOC_R:
5806             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5807             if (!is_error(ret)) {
5808                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5809                 if (!argptr)
5810                     return -TARGET_EFAULT;
5811                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5812                 unlock_user(argptr, arg, target_size);
5813             }
5814             break;
5815         case IOC_W:
5816             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5817             if (!argptr)
5818                 return -TARGET_EFAULT;
5819             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5820             unlock_user(argptr, arg, 0);
5821             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5822             break;
5823         default:
5824         case IOC_RW:
5825             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5826             if (!argptr)
5827                 return -TARGET_EFAULT;
5828             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5829             unlock_user(argptr, arg, 0);
5830             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5831             if (!is_error(ret)) {
5832                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5833                 if (!argptr)
5834                     return -TARGET_EFAULT;
5835                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5836                 unlock_user(argptr, arg, target_size);
5837             }
5838             break;
5839         }
5840         break;
5841     default:
5842         gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5843                  (long)cmd, arg_type[0]);
5844         ret = -TARGET_ENOSYS;
5845         break;
5846     }
5847     return ret;
5848 }
5849 
5850 static const bitmask_transtbl iflag_tbl[] = {
5851         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5852         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5853         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5854         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5855         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5856         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5857         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5858         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5859         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5860         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5861         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5862         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5863         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5864         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5865         { 0, 0, 0, 0 }
5866 };
5867 
5868 static const bitmask_transtbl oflag_tbl[] = {
5869 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5870 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5871 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5872 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5873 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5874 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5875 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5876 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5877 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5878 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5879 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5880 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5881 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5882 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5883 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5884 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5885 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5886 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5887 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5888 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5889 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5890 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5891 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5892 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5893 	{ 0, 0, 0, 0 }
5894 };
5895 
5896 static const bitmask_transtbl cflag_tbl[] = {
5897 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5898 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5899 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5900 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5901 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5902 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5903 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5904 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5905 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5906 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5907 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5908 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5909 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5910 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5911 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5912 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5913 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5914 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5915 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5916 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5917 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5918 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5919 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5920 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5921 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5922 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5923 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5924 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5925 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5926 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5927 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5928 	{ 0, 0, 0, 0 }
5929 };
5930 
5931 static const bitmask_transtbl lflag_tbl[] = {
5932 	{ TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5933 	{ TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5934 	{ TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5935 	{ TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5936 	{ TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5937 	{ TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5938 	{ TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5939 	{ TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5940 	{ TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5941 	{ TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5942 	{ TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5943 	{ TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5944 	{ TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5945 	{ TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5946 	{ TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5947 	{ 0, 0, 0, 0 }
5948 };
5949 
5950 static void target_to_host_termios (void *dst, const void *src)
5951 {
5952     struct host_termios *host = dst;
5953     const struct target_termios *target = src;
5954 
5955     host->c_iflag =
5956         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5957     host->c_oflag =
5958         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5959     host->c_cflag =
5960         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5961     host->c_lflag =
5962         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5963     host->c_line = target->c_line;
5964 
5965     memset(host->c_cc, 0, sizeof(host->c_cc));
5966     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5967     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5968     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5969     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5970     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5971     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5972     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5973     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5974     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5975     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5976     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5977     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5978     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5979     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5980     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5981     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5982     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5983 }
5984 
5985 static void host_to_target_termios (void *dst, const void *src)
5986 {
5987     struct target_termios *target = dst;
5988     const struct host_termios *host = src;
5989 
5990     target->c_iflag =
5991         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5992     target->c_oflag =
5993         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5994     target->c_cflag =
5995         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5996     target->c_lflag =
5997         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5998     target->c_line = host->c_line;
5999 
6000     memset(target->c_cc, 0, sizeof(target->c_cc));
6001     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
6002     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
6003     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
6004     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
6005     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
6006     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6007     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6008     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6009     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6010     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6011     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6012     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6013     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6014     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6015     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6016     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6017     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6018 }
6019 
6020 static const StructEntry struct_termios_def = {
6021     .convert = { host_to_target_termios, target_to_host_termios },
6022     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6023     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6024 };
6025 
6026 static bitmask_transtbl mmap_flags_tbl[] = {
6027     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6028     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6029     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6030     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6031       MAP_ANONYMOUS, MAP_ANONYMOUS },
6032     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6033       MAP_GROWSDOWN, MAP_GROWSDOWN },
6034     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6035       MAP_DENYWRITE, MAP_DENYWRITE },
6036     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6037       MAP_EXECUTABLE, MAP_EXECUTABLE },
6038     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6039     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6040       MAP_NORESERVE, MAP_NORESERVE },
6041     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6042     /* MAP_STACK had been ignored by the kernel for quite some time.
6043        Recognize it for the target insofar as we do not want to pass
6044        it through to the host.  */
6045     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6046     { 0, 0, 0, 0 }
6047 };
6048 
6049 #if defined(TARGET_I386)
6050 
6051 /* NOTE: there is really one LDT for all the threads */
6052 static uint8_t *ldt_table;
6053 
6054 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6055 {
6056     int size;
6057     void *p;
6058 
6059     if (!ldt_table)
6060         return 0;
6061     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6062     if (size > bytecount)
6063         size = bytecount;
6064     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6065     if (!p)
6066         return -TARGET_EFAULT;
6067     /* ??? Should this by byteswapped?  */
6068     memcpy(p, ldt_table, size);
6069     unlock_user(p, ptr, size);
6070     return size;
6071 }
6072 
6073 /* XXX: add locking support */
6074 static abi_long write_ldt(CPUX86State *env,
6075                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6076 {
6077     struct target_modify_ldt_ldt_s ldt_info;
6078     struct target_modify_ldt_ldt_s *target_ldt_info;
6079     int seg_32bit, contents, read_exec_only, limit_in_pages;
6080     int seg_not_present, useable, lm;
6081     uint32_t *lp, entry_1, entry_2;
6082 
6083     if (bytecount != sizeof(ldt_info))
6084         return -TARGET_EINVAL;
6085     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6086         return -TARGET_EFAULT;
6087     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6088     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6089     ldt_info.limit = tswap32(target_ldt_info->limit);
6090     ldt_info.flags = tswap32(target_ldt_info->flags);
6091     unlock_user_struct(target_ldt_info, ptr, 0);
6092 
6093     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6094         return -TARGET_EINVAL;
6095     seg_32bit = ldt_info.flags & 1;
6096     contents = (ldt_info.flags >> 1) & 3;
6097     read_exec_only = (ldt_info.flags >> 3) & 1;
6098     limit_in_pages = (ldt_info.flags >> 4) & 1;
6099     seg_not_present = (ldt_info.flags >> 5) & 1;
6100     useable = (ldt_info.flags >> 6) & 1;
6101 #ifdef TARGET_ABI32
6102     lm = 0;
6103 #else
6104     lm = (ldt_info.flags >> 7) & 1;
6105 #endif
6106     if (contents == 3) {
6107         if (oldmode)
6108             return -TARGET_EINVAL;
6109         if (seg_not_present == 0)
6110             return -TARGET_EINVAL;
6111     }
6112     /* allocate the LDT */
6113     if (!ldt_table) {
6114         env->ldt.base = target_mmap(0,
6115                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6116                                     PROT_READ|PROT_WRITE,
6117                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6118         if (env->ldt.base == -1)
6119             return -TARGET_ENOMEM;
6120         memset(g2h(env->ldt.base), 0,
6121                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6122         env->ldt.limit = 0xffff;
6123         ldt_table = g2h(env->ldt.base);
6124     }
6125 
6126     /* NOTE: same code as Linux kernel */
6127     /* Allow LDTs to be cleared by the user. */
6128     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6129         if (oldmode ||
6130             (contents == 0		&&
6131              read_exec_only == 1	&&
6132              seg_32bit == 0		&&
6133              limit_in_pages == 0	&&
6134              seg_not_present == 1	&&
6135              useable == 0 )) {
6136             entry_1 = 0;
6137             entry_2 = 0;
6138             goto install;
6139         }
6140     }
6141 
6142     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6143         (ldt_info.limit & 0x0ffff);
6144     entry_2 = (ldt_info.base_addr & 0xff000000) |
6145         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6146         (ldt_info.limit & 0xf0000) |
6147         ((read_exec_only ^ 1) << 9) |
6148         (contents << 10) |
6149         ((seg_not_present ^ 1) << 15) |
6150         (seg_32bit << 22) |
6151         (limit_in_pages << 23) |
6152         (lm << 21) |
6153         0x7000;
6154     if (!oldmode)
6155         entry_2 |= (useable << 20);
6156 
6157     /* Install the new entry ...  */
6158 install:
6159     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6160     lp[0] = tswap32(entry_1);
6161     lp[1] = tswap32(entry_2);
6162     return 0;
6163 }
6164 
6165 /* specific and weird i386 syscalls */
6166 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6167                               unsigned long bytecount)
6168 {
6169     abi_long ret;
6170 
6171     switch (func) {
6172     case 0:
6173         ret = read_ldt(ptr, bytecount);
6174         break;
6175     case 1:
6176         ret = write_ldt(env, ptr, bytecount, 1);
6177         break;
6178     case 0x11:
6179         ret = write_ldt(env, ptr, bytecount, 0);
6180         break;
6181     default:
6182         ret = -TARGET_ENOSYS;
6183         break;
6184     }
6185     return ret;
6186 }
6187 
6188 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6189 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6190 {
6191     uint64_t *gdt_table = g2h(env->gdt.base);
6192     struct target_modify_ldt_ldt_s ldt_info;
6193     struct target_modify_ldt_ldt_s *target_ldt_info;
6194     int seg_32bit, contents, read_exec_only, limit_in_pages;
6195     int seg_not_present, useable, lm;
6196     uint32_t *lp, entry_1, entry_2;
6197     int i;
6198 
6199     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6200     if (!target_ldt_info)
6201         return -TARGET_EFAULT;
6202     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6203     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6204     ldt_info.limit = tswap32(target_ldt_info->limit);
6205     ldt_info.flags = tswap32(target_ldt_info->flags);
6206     if (ldt_info.entry_number == -1) {
6207         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6208             if (gdt_table[i] == 0) {
6209                 ldt_info.entry_number = i;
6210                 target_ldt_info->entry_number = tswap32(i);
6211                 break;
6212             }
6213         }
6214     }
6215     unlock_user_struct(target_ldt_info, ptr, 1);
6216 
6217     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6218         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6219            return -TARGET_EINVAL;
6220     seg_32bit = ldt_info.flags & 1;
6221     contents = (ldt_info.flags >> 1) & 3;
6222     read_exec_only = (ldt_info.flags >> 3) & 1;
6223     limit_in_pages = (ldt_info.flags >> 4) & 1;
6224     seg_not_present = (ldt_info.flags >> 5) & 1;
6225     useable = (ldt_info.flags >> 6) & 1;
6226 #ifdef TARGET_ABI32
6227     lm = 0;
6228 #else
6229     lm = (ldt_info.flags >> 7) & 1;
6230 #endif
6231 
6232     if (contents == 3) {
6233         if (seg_not_present == 0)
6234             return -TARGET_EINVAL;
6235     }
6236 
6237     /* NOTE: same code as Linux kernel */
6238     /* Allow LDTs to be cleared by the user. */
6239     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6240         if ((contents == 0             &&
6241              read_exec_only == 1       &&
6242              seg_32bit == 0            &&
6243              limit_in_pages == 0       &&
6244              seg_not_present == 1      &&
6245              useable == 0 )) {
6246             entry_1 = 0;
6247             entry_2 = 0;
6248             goto install;
6249         }
6250     }
6251 
6252     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6253         (ldt_info.limit & 0x0ffff);
6254     entry_2 = (ldt_info.base_addr & 0xff000000) |
6255         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6256         (ldt_info.limit & 0xf0000) |
6257         ((read_exec_only ^ 1) << 9) |
6258         (contents << 10) |
6259         ((seg_not_present ^ 1) << 15) |
6260         (seg_32bit << 22) |
6261         (limit_in_pages << 23) |
6262         (useable << 20) |
6263         (lm << 21) |
6264         0x7000;
6265 
6266     /* Install the new entry ...  */
6267 install:
6268     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6269     lp[0] = tswap32(entry_1);
6270     lp[1] = tswap32(entry_2);
6271     return 0;
6272 }
6273 
6274 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6275 {
6276     struct target_modify_ldt_ldt_s *target_ldt_info;
6277     uint64_t *gdt_table = g2h(env->gdt.base);
6278     uint32_t base_addr, limit, flags;
6279     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6280     int seg_not_present, useable, lm;
6281     uint32_t *lp, entry_1, entry_2;
6282 
6283     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6284     if (!target_ldt_info)
6285         return -TARGET_EFAULT;
6286     idx = tswap32(target_ldt_info->entry_number);
6287     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6288         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6289         unlock_user_struct(target_ldt_info, ptr, 1);
6290         return -TARGET_EINVAL;
6291     }
6292     lp = (uint32_t *)(gdt_table + idx);
6293     entry_1 = tswap32(lp[0]);
6294     entry_2 = tswap32(lp[1]);
6295 
6296     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6297     contents = (entry_2 >> 10) & 3;
6298     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6299     seg_32bit = (entry_2 >> 22) & 1;
6300     limit_in_pages = (entry_2 >> 23) & 1;
6301     useable = (entry_2 >> 20) & 1;
6302 #ifdef TARGET_ABI32
6303     lm = 0;
6304 #else
6305     lm = (entry_2 >> 21) & 1;
6306 #endif
6307     flags = (seg_32bit << 0) | (contents << 1) |
6308         (read_exec_only << 3) | (limit_in_pages << 4) |
6309         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6310     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6311     base_addr = (entry_1 >> 16) |
6312         (entry_2 & 0xff000000) |
6313         ((entry_2 & 0xff) << 16);
6314     target_ldt_info->base_addr = tswapal(base_addr);
6315     target_ldt_info->limit = tswap32(limit);
6316     target_ldt_info->flags = tswap32(flags);
6317     unlock_user_struct(target_ldt_info, ptr, 1);
6318     return 0;
6319 }
6320 #endif /* TARGET_I386 && TARGET_ABI32 */
6321 
6322 #ifndef TARGET_ABI32
6323 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6324 {
6325     abi_long ret = 0;
6326     abi_ulong val;
6327     int idx;
6328 
6329     switch(code) {
6330     case TARGET_ARCH_SET_GS:
6331     case TARGET_ARCH_SET_FS:
6332         if (code == TARGET_ARCH_SET_GS)
6333             idx = R_GS;
6334         else
6335             idx = R_FS;
6336         cpu_x86_load_seg(env, idx, 0);
6337         env->segs[idx].base = addr;
6338         break;
6339     case TARGET_ARCH_GET_GS:
6340     case TARGET_ARCH_GET_FS:
6341         if (code == TARGET_ARCH_GET_GS)
6342             idx = R_GS;
6343         else
6344             idx = R_FS;
6345         val = env->segs[idx].base;
6346         if (put_user(val, addr, abi_ulong))
6347             ret = -TARGET_EFAULT;
6348         break;
6349     default:
6350         ret = -TARGET_EINVAL;
6351         break;
6352     }
6353     return ret;
6354 }
6355 #endif
6356 
6357 #endif /* defined(TARGET_I386) */
6358 
6359 #define NEW_STACK_SIZE 0x40000
6360 
6361 
6362 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6363 typedef struct {
6364     CPUArchState *env;
6365     pthread_mutex_t mutex;
6366     pthread_cond_t cond;
6367     pthread_t thread;
6368     uint32_t tid;
6369     abi_ulong child_tidptr;
6370     abi_ulong parent_tidptr;
6371     sigset_t sigmask;
6372 } new_thread_info;
6373 
6374 static void *clone_func(void *arg)
6375 {
6376     new_thread_info *info = arg;
6377     CPUArchState *env;
6378     CPUState *cpu;
6379     TaskState *ts;
6380 
6381     rcu_register_thread();
6382     tcg_register_thread();
6383     env = info->env;
6384     cpu = ENV_GET_CPU(env);
6385     thread_cpu = cpu;
6386     ts = (TaskState *)cpu->opaque;
6387     info->tid = gettid();
6388     task_settid(ts);
6389     if (info->child_tidptr)
6390         put_user_u32(info->tid, info->child_tidptr);
6391     if (info->parent_tidptr)
6392         put_user_u32(info->tid, info->parent_tidptr);
6393     /* Enable signals.  */
6394     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6395     /* Signal to the parent that we're ready.  */
6396     pthread_mutex_lock(&info->mutex);
6397     pthread_cond_broadcast(&info->cond);
6398     pthread_mutex_unlock(&info->mutex);
6399     /* Wait until the parent has finished initializing the tls state.  */
6400     pthread_mutex_lock(&clone_lock);
6401     pthread_mutex_unlock(&clone_lock);
6402     cpu_loop(env);
6403     /* never exits */
6404     return NULL;
6405 }
6406 
6407 /* do_fork() Must return host values and target errnos (unlike most
6408    do_*() functions). */
6409 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6410                    abi_ulong parent_tidptr, target_ulong newtls,
6411                    abi_ulong child_tidptr)
6412 {
6413     CPUState *cpu = ENV_GET_CPU(env);
6414     int ret;
6415     TaskState *ts;
6416     CPUState *new_cpu;
6417     CPUArchState *new_env;
6418     sigset_t sigmask;
6419 
6420     flags &= ~CLONE_IGNORED_FLAGS;
6421 
6422     /* Emulate vfork() with fork() */
6423     if (flags & CLONE_VFORK)
6424         flags &= ~(CLONE_VFORK | CLONE_VM);
6425 
6426     if (flags & CLONE_VM) {
6427         TaskState *parent_ts = (TaskState *)cpu->opaque;
6428         new_thread_info info;
6429         pthread_attr_t attr;
6430 
6431         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6432             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6433             return -TARGET_EINVAL;
6434         }
6435 
6436         ts = g_new0(TaskState, 1);
6437         init_task_state(ts);
6438 
6439         /* Grab a mutex so that thread setup appears atomic.  */
6440         pthread_mutex_lock(&clone_lock);
6441 
6442         /* we create a new CPU instance. */
6443         new_env = cpu_copy(env);
6444         /* Init regs that differ from the parent.  */
6445         cpu_clone_regs(new_env, newsp);
6446         new_cpu = ENV_GET_CPU(new_env);
6447         new_cpu->opaque = ts;
6448         ts->bprm = parent_ts->bprm;
6449         ts->info = parent_ts->info;
6450         ts->signal_mask = parent_ts->signal_mask;
6451 
6452         if (flags & CLONE_CHILD_CLEARTID) {
6453             ts->child_tidptr = child_tidptr;
6454         }
6455 
6456         if (flags & CLONE_SETTLS) {
6457             cpu_set_tls (new_env, newtls);
6458         }
6459 
6460         memset(&info, 0, sizeof(info));
6461         pthread_mutex_init(&info.mutex, NULL);
6462         pthread_mutex_lock(&info.mutex);
6463         pthread_cond_init(&info.cond, NULL);
6464         info.env = new_env;
6465         if (flags & CLONE_CHILD_SETTID) {
6466             info.child_tidptr = child_tidptr;
6467         }
6468         if (flags & CLONE_PARENT_SETTID) {
6469             info.parent_tidptr = parent_tidptr;
6470         }
6471 
6472         ret = pthread_attr_init(&attr);
6473         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6474         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6475         /* It is not safe to deliver signals until the child has finished
6476            initializing, so temporarily block all signals.  */
6477         sigfillset(&sigmask);
6478         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6479 
6480         /* If this is our first additional thread, we need to ensure we
6481          * generate code for parallel execution and flush old translations.
6482          */
6483         if (!parallel_cpus) {
6484             parallel_cpus = true;
6485             tb_flush(cpu);
6486         }
6487 
6488         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6489         /* TODO: Free new CPU state if thread creation failed.  */
6490 
6491         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6492         pthread_attr_destroy(&attr);
6493         if (ret == 0) {
6494             /* Wait for the child to initialize.  */
6495             pthread_cond_wait(&info.cond, &info.mutex);
6496             ret = info.tid;
6497         } else {
6498             ret = -1;
6499         }
6500         pthread_mutex_unlock(&info.mutex);
6501         pthread_cond_destroy(&info.cond);
6502         pthread_mutex_destroy(&info.mutex);
6503         pthread_mutex_unlock(&clone_lock);
6504     } else {
6505         /* if no CLONE_VM, we consider it is a fork */
6506         if (flags & CLONE_INVALID_FORK_FLAGS) {
6507             return -TARGET_EINVAL;
6508         }
6509 
6510         /* We can't support custom termination signals */
6511         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6512             return -TARGET_EINVAL;
6513         }
6514 
6515         if (block_signals()) {
6516             return -TARGET_ERESTARTSYS;
6517         }
6518 
6519         fork_start();
6520         ret = fork();
6521         if (ret == 0) {
6522             /* Child Process.  */
6523             cpu_clone_regs(env, newsp);
6524             fork_end(1);
6525             /* There is a race condition here.  The parent process could
6526                theoretically read the TID in the child process before the child
6527                tid is set.  This would require using either ptrace
6528                (not implemented) or having *_tidptr to point at a shared memory
6529                mapping.  We can't repeat the spinlock hack used above because
6530                the child process gets its own copy of the lock.  */
6531             if (flags & CLONE_CHILD_SETTID)
6532                 put_user_u32(gettid(), child_tidptr);
6533             if (flags & CLONE_PARENT_SETTID)
6534                 put_user_u32(gettid(), parent_tidptr);
6535             ts = (TaskState *)cpu->opaque;
6536             if (flags & CLONE_SETTLS)
6537                 cpu_set_tls (env, newtls);
6538             if (flags & CLONE_CHILD_CLEARTID)
6539                 ts->child_tidptr = child_tidptr;
6540         } else {
6541             fork_end(0);
6542         }
6543     }
6544     return ret;
6545 }
6546 
6547 /* warning : doesn't handle linux specific flags... */
6548 static int target_to_host_fcntl_cmd(int cmd)
6549 {
6550     int ret;
6551 
6552     switch(cmd) {
6553     case TARGET_F_DUPFD:
6554     case TARGET_F_GETFD:
6555     case TARGET_F_SETFD:
6556     case TARGET_F_GETFL:
6557     case TARGET_F_SETFL:
6558         ret = cmd;
6559         break;
6560     case TARGET_F_GETLK:
6561         ret = F_GETLK64;
6562         break;
6563     case TARGET_F_SETLK:
6564         ret = F_SETLK64;
6565         break;
6566     case TARGET_F_SETLKW:
6567         ret = F_SETLKW64;
6568         break;
6569     case TARGET_F_GETOWN:
6570         ret = F_GETOWN;
6571         break;
6572     case TARGET_F_SETOWN:
6573         ret = F_SETOWN;
6574         break;
6575     case TARGET_F_GETSIG:
6576         ret = F_GETSIG;
6577         break;
6578     case TARGET_F_SETSIG:
6579         ret = F_SETSIG;
6580         break;
6581 #if TARGET_ABI_BITS == 32
6582     case TARGET_F_GETLK64:
6583         ret = F_GETLK64;
6584         break;
6585     case TARGET_F_SETLK64:
6586         ret = F_SETLK64;
6587         break;
6588     case TARGET_F_SETLKW64:
6589         ret = F_SETLKW64;
6590         break;
6591 #endif
6592     case TARGET_F_SETLEASE:
6593         ret = F_SETLEASE;
6594         break;
6595     case TARGET_F_GETLEASE:
6596         ret = F_GETLEASE;
6597         break;
6598 #ifdef F_DUPFD_CLOEXEC
6599     case TARGET_F_DUPFD_CLOEXEC:
6600         ret = F_DUPFD_CLOEXEC;
6601         break;
6602 #endif
6603     case TARGET_F_NOTIFY:
6604         ret = F_NOTIFY;
6605         break;
6606 #ifdef F_GETOWN_EX
6607     case TARGET_F_GETOWN_EX:
6608         ret = F_GETOWN_EX;
6609         break;
6610 #endif
6611 #ifdef F_SETOWN_EX
6612     case TARGET_F_SETOWN_EX:
6613         ret = F_SETOWN_EX;
6614         break;
6615 #endif
6616 #ifdef F_SETPIPE_SZ
6617     case TARGET_F_SETPIPE_SZ:
6618         ret = F_SETPIPE_SZ;
6619         break;
6620     case TARGET_F_GETPIPE_SZ:
6621         ret = F_GETPIPE_SZ;
6622         break;
6623 #endif
6624     default:
6625         ret = -TARGET_EINVAL;
6626         break;
6627     }
6628 
6629 #if defined(__powerpc64__)
6630     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6631      * is not supported by kernel. The glibc fcntl call actually adjusts
6632      * them to 5, 6 and 7 before making the syscall(). Since we make the
6633      * syscall directly, adjust to what is supported by the kernel.
6634      */
6635     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6636         ret -= F_GETLK64 - 5;
6637     }
6638 #endif
6639 
6640     return ret;
6641 }
6642 
6643 #define FLOCK_TRANSTBL \
6644     switch (type) { \
6645     TRANSTBL_CONVERT(F_RDLCK); \
6646     TRANSTBL_CONVERT(F_WRLCK); \
6647     TRANSTBL_CONVERT(F_UNLCK); \
6648     TRANSTBL_CONVERT(F_EXLCK); \
6649     TRANSTBL_CONVERT(F_SHLCK); \
6650     }
6651 
6652 static int target_to_host_flock(int type)
6653 {
6654 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6655     FLOCK_TRANSTBL
6656 #undef  TRANSTBL_CONVERT
6657     return -TARGET_EINVAL;
6658 }
6659 
6660 static int host_to_target_flock(int type)
6661 {
6662 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6663     FLOCK_TRANSTBL
6664 #undef  TRANSTBL_CONVERT
6665     /* if we don't know how to convert the value coming
6666      * from the host we copy to the target field as-is
6667      */
6668     return type;
6669 }
6670 
6671 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6672                                             abi_ulong target_flock_addr)
6673 {
6674     struct target_flock *target_fl;
6675     int l_type;
6676 
6677     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6678         return -TARGET_EFAULT;
6679     }
6680 
6681     __get_user(l_type, &target_fl->l_type);
6682     l_type = target_to_host_flock(l_type);
6683     if (l_type < 0) {
6684         return l_type;
6685     }
6686     fl->l_type = l_type;
6687     __get_user(fl->l_whence, &target_fl->l_whence);
6688     __get_user(fl->l_start, &target_fl->l_start);
6689     __get_user(fl->l_len, &target_fl->l_len);
6690     __get_user(fl->l_pid, &target_fl->l_pid);
6691     unlock_user_struct(target_fl, target_flock_addr, 0);
6692     return 0;
6693 }
6694 
6695 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6696                                           const struct flock64 *fl)
6697 {
6698     struct target_flock *target_fl;
6699     short l_type;
6700 
6701     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6702         return -TARGET_EFAULT;
6703     }
6704 
6705     l_type = host_to_target_flock(fl->l_type);
6706     __put_user(l_type, &target_fl->l_type);
6707     __put_user(fl->l_whence, &target_fl->l_whence);
6708     __put_user(fl->l_start, &target_fl->l_start);
6709     __put_user(fl->l_len, &target_fl->l_len);
6710     __put_user(fl->l_pid, &target_fl->l_pid);
6711     unlock_user_struct(target_fl, target_flock_addr, 1);
6712     return 0;
6713 }
6714 
6715 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6716 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6717 
6718 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6719 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6720                                                    abi_ulong target_flock_addr)
6721 {
6722     struct target_oabi_flock64 *target_fl;
6723     int l_type;
6724 
6725     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6726         return -TARGET_EFAULT;
6727     }
6728 
6729     __get_user(l_type, &target_fl->l_type);
6730     l_type = target_to_host_flock(l_type);
6731     if (l_type < 0) {
6732         return l_type;
6733     }
6734     fl->l_type = l_type;
6735     __get_user(fl->l_whence, &target_fl->l_whence);
6736     __get_user(fl->l_start, &target_fl->l_start);
6737     __get_user(fl->l_len, &target_fl->l_len);
6738     __get_user(fl->l_pid, &target_fl->l_pid);
6739     unlock_user_struct(target_fl, target_flock_addr, 0);
6740     return 0;
6741 }
6742 
6743 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6744                                                  const struct flock64 *fl)
6745 {
6746     struct target_oabi_flock64 *target_fl;
6747     short l_type;
6748 
6749     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6750         return -TARGET_EFAULT;
6751     }
6752 
6753     l_type = host_to_target_flock(fl->l_type);
6754     __put_user(l_type, &target_fl->l_type);
6755     __put_user(fl->l_whence, &target_fl->l_whence);
6756     __put_user(fl->l_start, &target_fl->l_start);
6757     __put_user(fl->l_len, &target_fl->l_len);
6758     __put_user(fl->l_pid, &target_fl->l_pid);
6759     unlock_user_struct(target_fl, target_flock_addr, 1);
6760     return 0;
6761 }
6762 #endif
6763 
6764 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6765                                               abi_ulong target_flock_addr)
6766 {
6767     struct target_flock64 *target_fl;
6768     int l_type;
6769 
6770     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6771         return -TARGET_EFAULT;
6772     }
6773 
6774     __get_user(l_type, &target_fl->l_type);
6775     l_type = target_to_host_flock(l_type);
6776     if (l_type < 0) {
6777         return l_type;
6778     }
6779     fl->l_type = l_type;
6780     __get_user(fl->l_whence, &target_fl->l_whence);
6781     __get_user(fl->l_start, &target_fl->l_start);
6782     __get_user(fl->l_len, &target_fl->l_len);
6783     __get_user(fl->l_pid, &target_fl->l_pid);
6784     unlock_user_struct(target_fl, target_flock_addr, 0);
6785     return 0;
6786 }
6787 
6788 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6789                                             const struct flock64 *fl)
6790 {
6791     struct target_flock64 *target_fl;
6792     short l_type;
6793 
6794     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6795         return -TARGET_EFAULT;
6796     }
6797 
6798     l_type = host_to_target_flock(fl->l_type);
6799     __put_user(l_type, &target_fl->l_type);
6800     __put_user(fl->l_whence, &target_fl->l_whence);
6801     __put_user(fl->l_start, &target_fl->l_start);
6802     __put_user(fl->l_len, &target_fl->l_len);
6803     __put_user(fl->l_pid, &target_fl->l_pid);
6804     unlock_user_struct(target_fl, target_flock_addr, 1);
6805     return 0;
6806 }
6807 
6808 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6809 {
6810     struct flock64 fl64;
6811 #ifdef F_GETOWN_EX
6812     struct f_owner_ex fox;
6813     struct target_f_owner_ex *target_fox;
6814 #endif
6815     abi_long ret;
6816     int host_cmd = target_to_host_fcntl_cmd(cmd);
6817 
6818     if (host_cmd == -TARGET_EINVAL)
6819 	    return host_cmd;
6820 
6821     switch(cmd) {
6822     case TARGET_F_GETLK:
6823         ret = copy_from_user_flock(&fl64, arg);
6824         if (ret) {
6825             return ret;
6826         }
6827         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6828         if (ret == 0) {
6829             ret = copy_to_user_flock(arg, &fl64);
6830         }
6831         break;
6832 
6833     case TARGET_F_SETLK:
6834     case TARGET_F_SETLKW:
6835         ret = copy_from_user_flock(&fl64, arg);
6836         if (ret) {
6837             return ret;
6838         }
6839         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6840         break;
6841 
6842     case TARGET_F_GETLK64:
6843         ret = copy_from_user_flock64(&fl64, arg);
6844         if (ret) {
6845             return ret;
6846         }
6847         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6848         if (ret == 0) {
6849             ret = copy_to_user_flock64(arg, &fl64);
6850         }
6851         break;
6852     case TARGET_F_SETLK64:
6853     case TARGET_F_SETLKW64:
6854         ret = copy_from_user_flock64(&fl64, arg);
6855         if (ret) {
6856             return ret;
6857         }
6858         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6859         break;
6860 
6861     case TARGET_F_GETFL:
6862         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6863         if (ret >= 0) {
6864             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6865         }
6866         break;
6867 
6868     case TARGET_F_SETFL:
6869         ret = get_errno(safe_fcntl(fd, host_cmd,
6870                                    target_to_host_bitmask(arg,
6871                                                           fcntl_flags_tbl)));
6872         break;
6873 
6874 #ifdef F_GETOWN_EX
6875     case TARGET_F_GETOWN_EX:
6876         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6877         if (ret >= 0) {
6878             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6879                 return -TARGET_EFAULT;
6880             target_fox->type = tswap32(fox.type);
6881             target_fox->pid = tswap32(fox.pid);
6882             unlock_user_struct(target_fox, arg, 1);
6883         }
6884         break;
6885 #endif
6886 
6887 #ifdef F_SETOWN_EX
6888     case TARGET_F_SETOWN_EX:
6889         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6890             return -TARGET_EFAULT;
6891         fox.type = tswap32(target_fox->type);
6892         fox.pid = tswap32(target_fox->pid);
6893         unlock_user_struct(target_fox, arg, 0);
6894         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6895         break;
6896 #endif
6897 
6898     case TARGET_F_SETOWN:
6899     case TARGET_F_GETOWN:
6900     case TARGET_F_SETSIG:
6901     case TARGET_F_GETSIG:
6902     case TARGET_F_SETLEASE:
6903     case TARGET_F_GETLEASE:
6904     case TARGET_F_SETPIPE_SZ:
6905     case TARGET_F_GETPIPE_SZ:
6906         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6907         break;
6908 
6909     default:
6910         ret = get_errno(safe_fcntl(fd, cmd, arg));
6911         break;
6912     }
6913     return ret;
6914 }
6915 
6916 #ifdef USE_UID16
6917 
6918 static inline int high2lowuid(int uid)
6919 {
6920     if (uid > 65535)
6921         return 65534;
6922     else
6923         return uid;
6924 }
6925 
6926 static inline int high2lowgid(int gid)
6927 {
6928     if (gid > 65535)
6929         return 65534;
6930     else
6931         return gid;
6932 }
6933 
6934 static inline int low2highuid(int uid)
6935 {
6936     if ((int16_t)uid == -1)
6937         return -1;
6938     else
6939         return uid;
6940 }
6941 
6942 static inline int low2highgid(int gid)
6943 {
6944     if ((int16_t)gid == -1)
6945         return -1;
6946     else
6947         return gid;
6948 }
6949 static inline int tswapid(int id)
6950 {
6951     return tswap16(id);
6952 }
6953 
6954 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6955 
6956 #else /* !USE_UID16 */
6957 static inline int high2lowuid(int uid)
6958 {
6959     return uid;
6960 }
6961 static inline int high2lowgid(int gid)
6962 {
6963     return gid;
6964 }
6965 static inline int low2highuid(int uid)
6966 {
6967     return uid;
6968 }
6969 static inline int low2highgid(int gid)
6970 {
6971     return gid;
6972 }
6973 static inline int tswapid(int id)
6974 {
6975     return tswap32(id);
6976 }
6977 
6978 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6979 
6980 #endif /* USE_UID16 */
6981 
6982 /* We must do direct syscalls for setting UID/GID, because we want to
6983  * implement the Linux system call semantics of "change only for this thread",
6984  * not the libc/POSIX semantics of "change for all threads in process".
6985  * (See http://ewontfix.com/17/ for more details.)
6986  * We use the 32-bit version of the syscalls if present; if it is not
6987  * then either the host architecture supports 32-bit UIDs natively with
6988  * the standard syscall, or the 16-bit UID is the best we can do.
6989  */
6990 #ifdef __NR_setuid32
6991 #define __NR_sys_setuid __NR_setuid32
6992 #else
6993 #define __NR_sys_setuid __NR_setuid
6994 #endif
6995 #ifdef __NR_setgid32
6996 #define __NR_sys_setgid __NR_setgid32
6997 #else
6998 #define __NR_sys_setgid __NR_setgid
6999 #endif
7000 #ifdef __NR_setresuid32
7001 #define __NR_sys_setresuid __NR_setresuid32
7002 #else
7003 #define __NR_sys_setresuid __NR_setresuid
7004 #endif
7005 #ifdef __NR_setresgid32
7006 #define __NR_sys_setresgid __NR_setresgid32
7007 #else
7008 #define __NR_sys_setresgid __NR_setresgid
7009 #endif
7010 
7011 _syscall1(int, sys_setuid, uid_t, uid)
7012 _syscall1(int, sys_setgid, gid_t, gid)
7013 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7014 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7015 
7016 void syscall_init(void)
7017 {
7018     IOCTLEntry *ie;
7019     const argtype *arg_type;
7020     int size;
7021     int i;
7022 
7023     thunk_init(STRUCT_MAX);
7024 
7025 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7026 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7027 #include "syscall_types.h"
7028 #undef STRUCT
7029 #undef STRUCT_SPECIAL
7030 
7031     /* Build target_to_host_errno_table[] table from
7032      * host_to_target_errno_table[]. */
7033     for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
7034         target_to_host_errno_table[host_to_target_errno_table[i]] = i;
7035     }
7036 
7037     /* we patch the ioctl size if necessary. We rely on the fact that
7038        no ioctl has all the bits at '1' in the size field */
7039     ie = ioctl_entries;
7040     while (ie->target_cmd != 0) {
7041         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7042             TARGET_IOC_SIZEMASK) {
7043             arg_type = ie->arg_type;
7044             if (arg_type[0] != TYPE_PTR) {
7045                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7046                         ie->target_cmd);
7047                 exit(1);
7048             }
7049             arg_type++;
7050             size = thunk_type_size(arg_type, 0);
7051             ie->target_cmd = (ie->target_cmd &
7052                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7053                 (size << TARGET_IOC_SIZESHIFT);
7054         }
7055 
7056         /* automatic consistency check if same arch */
7057 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7058     (defined(__x86_64__) && defined(TARGET_X86_64))
7059         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7060             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7061                     ie->name, ie->target_cmd, ie->host_cmd);
7062         }
7063 #endif
7064         ie++;
7065     }
7066 }
7067 
7068 #if TARGET_ABI_BITS == 32
7069 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
7070 {
7071 #ifdef TARGET_WORDS_BIGENDIAN
7072     return ((uint64_t)word0 << 32) | word1;
7073 #else
7074     return ((uint64_t)word1 << 32) | word0;
7075 #endif
7076 }
7077 #else /* TARGET_ABI_BITS == 32 */
7078 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
7079 {
7080     return word0;
7081 }
7082 #endif /* TARGET_ABI_BITS != 32 */
7083 
7084 #ifdef TARGET_NR_truncate64
7085 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7086                                          abi_long arg2,
7087                                          abi_long arg3,
7088                                          abi_long arg4)
7089 {
7090     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7091         arg2 = arg3;
7092         arg3 = arg4;
7093     }
7094     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7095 }
7096 #endif
7097 
7098 #ifdef TARGET_NR_ftruncate64
7099 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7100                                           abi_long arg2,
7101                                           abi_long arg3,
7102                                           abi_long arg4)
7103 {
7104     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7105         arg2 = arg3;
7106         arg3 = arg4;
7107     }
7108     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7109 }
7110 #endif
7111 
7112 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
7113                                                abi_ulong target_addr)
7114 {
7115     struct target_timespec *target_ts;
7116 
7117     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
7118         return -TARGET_EFAULT;
7119     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
7120     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7121     unlock_user_struct(target_ts, target_addr, 0);
7122     return 0;
7123 }
7124 
7125 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
7126                                                struct timespec *host_ts)
7127 {
7128     struct target_timespec *target_ts;
7129 
7130     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
7131         return -TARGET_EFAULT;
7132     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
7133     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
7134     unlock_user_struct(target_ts, target_addr, 1);
7135     return 0;
7136 }
7137 
7138 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
7139                                                  abi_ulong target_addr)
7140 {
7141     struct target_itimerspec *target_itspec;
7142 
7143     if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
7144         return -TARGET_EFAULT;
7145     }
7146 
7147     host_itspec->it_interval.tv_sec =
7148                             tswapal(target_itspec->it_interval.tv_sec);
7149     host_itspec->it_interval.tv_nsec =
7150                             tswapal(target_itspec->it_interval.tv_nsec);
7151     host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
7152     host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
7153 
7154     unlock_user_struct(target_itspec, target_addr, 1);
7155     return 0;
7156 }
7157 
7158 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7159                                                struct itimerspec *host_its)
7160 {
7161     struct target_itimerspec *target_itspec;
7162 
7163     if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7164         return -TARGET_EFAULT;
7165     }
7166 
7167     target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7168     target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7169 
7170     target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7171     target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7172 
7173     unlock_user_struct(target_itspec, target_addr, 0);
7174     return 0;
7175 }
7176 
7177 static inline abi_long target_to_host_timex(struct timex *host_tx,
7178                                             abi_long target_addr)
7179 {
7180     struct target_timex *target_tx;
7181 
7182     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7183         return -TARGET_EFAULT;
7184     }
7185 
7186     __get_user(host_tx->modes, &target_tx->modes);
7187     __get_user(host_tx->offset, &target_tx->offset);
7188     __get_user(host_tx->freq, &target_tx->freq);
7189     __get_user(host_tx->maxerror, &target_tx->maxerror);
7190     __get_user(host_tx->esterror, &target_tx->esterror);
7191     __get_user(host_tx->status, &target_tx->status);
7192     __get_user(host_tx->constant, &target_tx->constant);
7193     __get_user(host_tx->precision, &target_tx->precision);
7194     __get_user(host_tx->tolerance, &target_tx->tolerance);
7195     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7196     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7197     __get_user(host_tx->tick, &target_tx->tick);
7198     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7199     __get_user(host_tx->jitter, &target_tx->jitter);
7200     __get_user(host_tx->shift, &target_tx->shift);
7201     __get_user(host_tx->stabil, &target_tx->stabil);
7202     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7203     __get_user(host_tx->calcnt, &target_tx->calcnt);
7204     __get_user(host_tx->errcnt, &target_tx->errcnt);
7205     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7206     __get_user(host_tx->tai, &target_tx->tai);
7207 
7208     unlock_user_struct(target_tx, target_addr, 0);
7209     return 0;
7210 }
7211 
7212 static inline abi_long host_to_target_timex(abi_long target_addr,
7213                                             struct timex *host_tx)
7214 {
7215     struct target_timex *target_tx;
7216 
7217     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7218         return -TARGET_EFAULT;
7219     }
7220 
7221     __put_user(host_tx->modes, &target_tx->modes);
7222     __put_user(host_tx->offset, &target_tx->offset);
7223     __put_user(host_tx->freq, &target_tx->freq);
7224     __put_user(host_tx->maxerror, &target_tx->maxerror);
7225     __put_user(host_tx->esterror, &target_tx->esterror);
7226     __put_user(host_tx->status, &target_tx->status);
7227     __put_user(host_tx->constant, &target_tx->constant);
7228     __put_user(host_tx->precision, &target_tx->precision);
7229     __put_user(host_tx->tolerance, &target_tx->tolerance);
7230     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7231     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7232     __put_user(host_tx->tick, &target_tx->tick);
7233     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7234     __put_user(host_tx->jitter, &target_tx->jitter);
7235     __put_user(host_tx->shift, &target_tx->shift);
7236     __put_user(host_tx->stabil, &target_tx->stabil);
7237     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7238     __put_user(host_tx->calcnt, &target_tx->calcnt);
7239     __put_user(host_tx->errcnt, &target_tx->errcnt);
7240     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7241     __put_user(host_tx->tai, &target_tx->tai);
7242 
7243     unlock_user_struct(target_tx, target_addr, 1);
7244     return 0;
7245 }
7246 
7247 
7248 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7249                                                abi_ulong target_addr)
7250 {
7251     struct target_sigevent *target_sevp;
7252 
7253     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7254         return -TARGET_EFAULT;
7255     }
7256 
7257     /* This union is awkward on 64 bit systems because it has a 32 bit
7258      * integer and a pointer in it; we follow the conversion approach
7259      * used for handling sigval types in signal.c so the guest should get
7260      * the correct value back even if we did a 64 bit byteswap and it's
7261      * using the 32 bit integer.
7262      */
7263     host_sevp->sigev_value.sival_ptr =
7264         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7265     host_sevp->sigev_signo =
7266         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7267     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7268     host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7269 
7270     unlock_user_struct(target_sevp, target_addr, 1);
7271     return 0;
7272 }
7273 
7274 #if defined(TARGET_NR_mlockall)
7275 static inline int target_to_host_mlockall_arg(int arg)
7276 {
7277     int result = 0;
7278 
7279     if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7280         result |= MCL_CURRENT;
7281     }
7282     if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7283         result |= MCL_FUTURE;
7284     }
7285     return result;
7286 }
7287 #endif
7288 
7289 static inline abi_long host_to_target_stat64(void *cpu_env,
7290                                              abi_ulong target_addr,
7291                                              struct stat *host_st)
7292 {
7293 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7294     if (((CPUARMState *)cpu_env)->eabi) {
7295         struct target_eabi_stat64 *target_st;
7296 
7297         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7298             return -TARGET_EFAULT;
7299         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7300         __put_user(host_st->st_dev, &target_st->st_dev);
7301         __put_user(host_st->st_ino, &target_st->st_ino);
7302 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7303         __put_user(host_st->st_ino, &target_st->__st_ino);
7304 #endif
7305         __put_user(host_st->st_mode, &target_st->st_mode);
7306         __put_user(host_st->st_nlink, &target_st->st_nlink);
7307         __put_user(host_st->st_uid, &target_st->st_uid);
7308         __put_user(host_st->st_gid, &target_st->st_gid);
7309         __put_user(host_st->st_rdev, &target_st->st_rdev);
7310         __put_user(host_st->st_size, &target_st->st_size);
7311         __put_user(host_st->st_blksize, &target_st->st_blksize);
7312         __put_user(host_st->st_blocks, &target_st->st_blocks);
7313         __put_user(host_st->st_atime, &target_st->target_st_atime);
7314         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7315         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7316         unlock_user_struct(target_st, target_addr, 1);
7317     } else
7318 #endif
7319     {
7320 #if defined(TARGET_HAS_STRUCT_STAT64)
7321         struct target_stat64 *target_st;
7322 #else
7323         struct target_stat *target_st;
7324 #endif
7325 
7326         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7327             return -TARGET_EFAULT;
7328         memset(target_st, 0, sizeof(*target_st));
7329         __put_user(host_st->st_dev, &target_st->st_dev);
7330         __put_user(host_st->st_ino, &target_st->st_ino);
7331 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7332         __put_user(host_st->st_ino, &target_st->__st_ino);
7333 #endif
7334         __put_user(host_st->st_mode, &target_st->st_mode);
7335         __put_user(host_st->st_nlink, &target_st->st_nlink);
7336         __put_user(host_st->st_uid, &target_st->st_uid);
7337         __put_user(host_st->st_gid, &target_st->st_gid);
7338         __put_user(host_st->st_rdev, &target_st->st_rdev);
7339         /* XXX: better use of kernel struct */
7340         __put_user(host_st->st_size, &target_st->st_size);
7341         __put_user(host_st->st_blksize, &target_st->st_blksize);
7342         __put_user(host_st->st_blocks, &target_st->st_blocks);
7343         __put_user(host_st->st_atime, &target_st->target_st_atime);
7344         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7345         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7346         unlock_user_struct(target_st, target_addr, 1);
7347     }
7348 
7349     return 0;
7350 }
7351 
7352 /* ??? Using host futex calls even when target atomic operations
7353    are not really atomic probably breaks things.  However implementing
7354    futexes locally would make futexes shared between multiple processes
7355    tricky.  However they're probably useless because guest atomic
7356    operations won't work either.  */
7357 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7358                     target_ulong uaddr2, int val3)
7359 {
7360     struct timespec ts, *pts;
7361     int base_op;
7362 
7363     /* ??? We assume FUTEX_* constants are the same on both host
7364        and target.  */
7365 #ifdef FUTEX_CMD_MASK
7366     base_op = op & FUTEX_CMD_MASK;
7367 #else
7368     base_op = op;
7369 #endif
7370     switch (base_op) {
7371     case FUTEX_WAIT:
7372     case FUTEX_WAIT_BITSET:
7373         if (timeout) {
7374             pts = &ts;
7375             target_to_host_timespec(pts, timeout);
7376         } else {
7377             pts = NULL;
7378         }
7379         return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7380                          pts, NULL, val3));
7381     case FUTEX_WAKE:
7382         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7383     case FUTEX_FD:
7384         return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7385     case FUTEX_REQUEUE:
7386     case FUTEX_CMP_REQUEUE:
7387     case FUTEX_WAKE_OP:
7388         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7389            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7390            But the prototype takes a `struct timespec *'; insert casts
7391            to satisfy the compiler.  We do not need to tswap TIMEOUT
7392            since it's not compared to guest memory.  */
7393         pts = (struct timespec *)(uintptr_t) timeout;
7394         return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7395                                     g2h(uaddr2),
7396                                     (base_op == FUTEX_CMP_REQUEUE
7397                                      ? tswap32(val3)
7398                                      : val3)));
7399     default:
7400         return -TARGET_ENOSYS;
7401     }
7402 }
7403 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7404 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7405                                      abi_long handle, abi_long mount_id,
7406                                      abi_long flags)
7407 {
7408     struct file_handle *target_fh;
7409     struct file_handle *fh;
7410     int mid = 0;
7411     abi_long ret;
7412     char *name;
7413     unsigned int size, total_size;
7414 
7415     if (get_user_s32(size, handle)) {
7416         return -TARGET_EFAULT;
7417     }
7418 
7419     name = lock_user_string(pathname);
7420     if (!name) {
7421         return -TARGET_EFAULT;
7422     }
7423 
7424     total_size = sizeof(struct file_handle) + size;
7425     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7426     if (!target_fh) {
7427         unlock_user(name, pathname, 0);
7428         return -TARGET_EFAULT;
7429     }
7430 
7431     fh = g_malloc0(total_size);
7432     fh->handle_bytes = size;
7433 
7434     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7435     unlock_user(name, pathname, 0);
7436 
7437     /* man name_to_handle_at(2):
7438      * Other than the use of the handle_bytes field, the caller should treat
7439      * the file_handle structure as an opaque data type
7440      */
7441 
7442     memcpy(target_fh, fh, total_size);
7443     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7444     target_fh->handle_type = tswap32(fh->handle_type);
7445     g_free(fh);
7446     unlock_user(target_fh, handle, total_size);
7447 
7448     if (put_user_s32(mid, mount_id)) {
7449         return -TARGET_EFAULT;
7450     }
7451 
7452     return ret;
7453 
7454 }
7455 #endif
7456 
7457 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7458 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7459                                      abi_long flags)
7460 {
7461     struct file_handle *target_fh;
7462     struct file_handle *fh;
7463     unsigned int size, total_size;
7464     abi_long ret;
7465 
7466     if (get_user_s32(size, handle)) {
7467         return -TARGET_EFAULT;
7468     }
7469 
7470     total_size = sizeof(struct file_handle) + size;
7471     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7472     if (!target_fh) {
7473         return -TARGET_EFAULT;
7474     }
7475 
7476     fh = g_memdup(target_fh, total_size);
7477     fh->handle_bytes = size;
7478     fh->handle_type = tswap32(target_fh->handle_type);
7479 
7480     ret = get_errno(open_by_handle_at(mount_fd, fh,
7481                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7482 
7483     g_free(fh);
7484 
7485     unlock_user(target_fh, handle, total_size);
7486 
7487     return ret;
7488 }
7489 #endif
7490 
7491 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7492 
7493 /* signalfd siginfo conversion */
7494 
7495 static void
7496 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7497                                 const struct signalfd_siginfo *info)
7498 {
7499     int sig = host_to_target_signal(info->ssi_signo);
7500 
7501     /* linux/signalfd.h defines a ssi_addr_lsb
7502      * not defined in sys/signalfd.h but used by some kernels
7503      */
7504 
7505 #ifdef BUS_MCEERR_AO
7506     if (tinfo->ssi_signo == SIGBUS &&
7507         (tinfo->ssi_code == BUS_MCEERR_AR ||
7508          tinfo->ssi_code == BUS_MCEERR_AO)) {
7509         uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7510         uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7511         *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7512     }
7513 #endif
7514 
7515     tinfo->ssi_signo = tswap32(sig);
7516     tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7517     tinfo->ssi_code = tswap32(info->ssi_code);
7518     tinfo->ssi_pid = tswap32(info->ssi_pid);
7519     tinfo->ssi_uid = tswap32(info->ssi_uid);
7520     tinfo->ssi_fd = tswap32(info->ssi_fd);
7521     tinfo->ssi_tid = tswap32(info->ssi_tid);
7522     tinfo->ssi_band = tswap32(info->ssi_band);
7523     tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7524     tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7525     tinfo->ssi_status = tswap32(info->ssi_status);
7526     tinfo->ssi_int = tswap32(info->ssi_int);
7527     tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7528     tinfo->ssi_utime = tswap64(info->ssi_utime);
7529     tinfo->ssi_stime = tswap64(info->ssi_stime);
7530     tinfo->ssi_addr = tswap64(info->ssi_addr);
7531 }
7532 
7533 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7534 {
7535     int i;
7536 
7537     for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7538         host_to_target_signalfd_siginfo(buf + i, buf + i);
7539     }
7540 
7541     return len;
7542 }
7543 
7544 static TargetFdTrans target_signalfd_trans = {
7545     .host_to_target_data = host_to_target_data_signalfd,
7546 };
7547 
7548 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7549 {
7550     int host_flags;
7551     target_sigset_t *target_mask;
7552     sigset_t host_mask;
7553     abi_long ret;
7554 
7555     if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7556         return -TARGET_EINVAL;
7557     }
7558     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7559         return -TARGET_EFAULT;
7560     }
7561 
7562     target_to_host_sigset(&host_mask, target_mask);
7563 
7564     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7565 
7566     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7567     if (ret >= 0) {
7568         fd_trans_register(ret, &target_signalfd_trans);
7569     }
7570 
7571     unlock_user_struct(target_mask, mask, 0);
7572 
7573     return ret;
7574 }
7575 #endif
7576 
7577 /* Map host to target signal numbers for the wait family of syscalls.
7578    Assume all other status bits are the same.  */
7579 int host_to_target_waitstatus(int status)
7580 {
7581     if (WIFSIGNALED(status)) {
7582         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7583     }
7584     if (WIFSTOPPED(status)) {
7585         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7586                | (status & 0xff);
7587     }
7588     return status;
7589 }
7590 
7591 static int open_self_cmdline(void *cpu_env, int fd)
7592 {
7593     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7594     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7595     int i;
7596 
7597     for (i = 0; i < bprm->argc; i++) {
7598         size_t len = strlen(bprm->argv[i]) + 1;
7599 
7600         if (write(fd, bprm->argv[i], len) != len) {
7601             return -1;
7602         }
7603     }
7604 
7605     return 0;
7606 }
7607 
7608 static int open_self_maps(void *cpu_env, int fd)
7609 {
7610     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7611     TaskState *ts = cpu->opaque;
7612     FILE *fp;
7613     char *line = NULL;
7614     size_t len = 0;
7615     ssize_t read;
7616 
7617     fp = fopen("/proc/self/maps", "r");
7618     if (fp == NULL) {
7619         return -1;
7620     }
7621 
7622     while ((read = getline(&line, &len, fp)) != -1) {
7623         int fields, dev_maj, dev_min, inode;
7624         uint64_t min, max, offset;
7625         char flag_r, flag_w, flag_x, flag_p;
7626         char path[512] = "";
7627         fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7628                         " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7629                         &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7630 
7631         if ((fields < 10) || (fields > 11)) {
7632             continue;
7633         }
7634         if (h2g_valid(min)) {
7635             int flags = page_get_flags(h2g(min));
7636             max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7637             if (page_check_range(h2g(min), max - min, flags) == -1) {
7638                 continue;
7639             }
7640             if (h2g(min) == ts->info->stack_limit) {
7641                 pstrcpy(path, sizeof(path), "      [stack]");
7642             }
7643             dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7644                     " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7645                     h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7646                     flag_x, flag_p, offset, dev_maj, dev_min, inode,
7647                     path[0] ? "         " : "", path);
7648         }
7649     }
7650 
7651     free(line);
7652     fclose(fp);
7653 
7654     return 0;
7655 }
7656 
7657 static int open_self_stat(void *cpu_env, int fd)
7658 {
7659     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7660     TaskState *ts = cpu->opaque;
7661     abi_ulong start_stack = ts->info->start_stack;
7662     int i;
7663 
7664     for (i = 0; i < 44; i++) {
7665       char buf[128];
7666       int len;
7667       uint64_t val = 0;
7668 
7669       if (i == 0) {
7670         /* pid */
7671         val = getpid();
7672         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7673       } else if (i == 1) {
7674         /* app name */
7675         snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7676       } else if (i == 27) {
7677         /* stack bottom */
7678         val = start_stack;
7679         snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7680       } else {
7681         /* for the rest, there is MasterCard */
7682         snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7683       }
7684 
7685       len = strlen(buf);
7686       if (write(fd, buf, len) != len) {
7687           return -1;
7688       }
7689     }
7690 
7691     return 0;
7692 }
7693 
7694 static int open_self_auxv(void *cpu_env, int fd)
7695 {
7696     CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7697     TaskState *ts = cpu->opaque;
7698     abi_ulong auxv = ts->info->saved_auxv;
7699     abi_ulong len = ts->info->auxv_len;
7700     char *ptr;
7701 
7702     /*
7703      * Auxiliary vector is stored in target process stack.
7704      * read in whole auxv vector and copy it to file
7705      */
7706     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7707     if (ptr != NULL) {
7708         while (len > 0) {
7709             ssize_t r;
7710             r = write(fd, ptr, len);
7711             if (r <= 0) {
7712                 break;
7713             }
7714             len -= r;
7715             ptr += r;
7716         }
7717         lseek(fd, 0, SEEK_SET);
7718         unlock_user(ptr, auxv, len);
7719     }
7720 
7721     return 0;
7722 }
7723 
7724 static int is_proc_myself(const char *filename, const char *entry)
7725 {
7726     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7727         filename += strlen("/proc/");
7728         if (!strncmp(filename, "self/", strlen("self/"))) {
7729             filename += strlen("self/");
7730         } else if (*filename >= '1' && *filename <= '9') {
7731             char myself[80];
7732             snprintf(myself, sizeof(myself), "%d/", getpid());
7733             if (!strncmp(filename, myself, strlen(myself))) {
7734                 filename += strlen(myself);
7735             } else {
7736                 return 0;
7737             }
7738         } else {
7739             return 0;
7740         }
7741         if (!strcmp(filename, entry)) {
7742             return 1;
7743         }
7744     }
7745     return 0;
7746 }
7747 
7748 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7749 static int is_proc(const char *filename, const char *entry)
7750 {
7751     return strcmp(filename, entry) == 0;
7752 }
7753 
7754 static int open_net_route(void *cpu_env, int fd)
7755 {
7756     FILE *fp;
7757     char *line = NULL;
7758     size_t len = 0;
7759     ssize_t read;
7760 
7761     fp = fopen("/proc/net/route", "r");
7762     if (fp == NULL) {
7763         return -1;
7764     }
7765 
7766     /* read header */
7767 
7768     read = getline(&line, &len, fp);
7769     dprintf(fd, "%s", line);
7770 
7771     /* read routes */
7772 
7773     while ((read = getline(&line, &len, fp)) != -1) {
7774         char iface[16];
7775         uint32_t dest, gw, mask;
7776         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7777         sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7778                      iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7779                      &mask, &mtu, &window, &irtt);
7780         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7781                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7782                 metric, tswap32(mask), mtu, window, irtt);
7783     }
7784 
7785     free(line);
7786     fclose(fp);
7787 
7788     return 0;
7789 }
7790 #endif
7791 
7792 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7793 {
7794     struct fake_open {
7795         const char *filename;
7796         int (*fill)(void *cpu_env, int fd);
7797         int (*cmp)(const char *s1, const char *s2);
7798     };
7799     const struct fake_open *fake_open;
7800     static const struct fake_open fakes[] = {
7801         { "maps", open_self_maps, is_proc_myself },
7802         { "stat", open_self_stat, is_proc_myself },
7803         { "auxv", open_self_auxv, is_proc_myself },
7804         { "cmdline", open_self_cmdline, is_proc_myself },
7805 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7806         { "/proc/net/route", open_net_route, is_proc },
7807 #endif
7808         { NULL, NULL, NULL }
7809     };
7810 
7811     if (is_proc_myself(pathname, "exe")) {
7812         int execfd = qemu_getauxval(AT_EXECFD);
7813         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7814     }
7815 
7816     for (fake_open = fakes; fake_open->filename; fake_open++) {
7817         if (fake_open->cmp(pathname, fake_open->filename)) {
7818             break;
7819         }
7820     }
7821 
7822     if (fake_open->filename) {
7823         const char *tmpdir;
7824         char filename[PATH_MAX];
7825         int fd, r;
7826 
7827         /* create temporary file to map stat to */
7828         tmpdir = getenv("TMPDIR");
7829         if (!tmpdir)
7830             tmpdir = "/tmp";
7831         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7832         fd = mkstemp(filename);
7833         if (fd < 0) {
7834             return fd;
7835         }
7836         unlink(filename);
7837 
7838         if ((r = fake_open->fill(cpu_env, fd))) {
7839             int e = errno;
7840             close(fd);
7841             errno = e;
7842             return r;
7843         }
7844         lseek(fd, 0, SEEK_SET);
7845 
7846         return fd;
7847     }
7848 
7849     return safe_openat(dirfd, path(pathname), flags, mode);
7850 }
7851 
7852 #define TIMER_MAGIC 0x0caf0000
7853 #define TIMER_MAGIC_MASK 0xffff0000
7854 
7855 /* Convert QEMU provided timer ID back to internal 16bit index format */
7856 static target_timer_t get_timer_id(abi_long arg)
7857 {
7858     target_timer_t timerid = arg;
7859 
7860     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7861         return -TARGET_EINVAL;
7862     }
7863 
7864     timerid &= 0xffff;
7865 
7866     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7867         return -TARGET_EINVAL;
7868     }
7869 
7870     return timerid;
7871 }
7872 
7873 static abi_long swap_data_eventfd(void *buf, size_t len)
7874 {
7875     uint64_t *counter = buf;
7876     int i;
7877 
7878     if (len < sizeof(uint64_t)) {
7879         return -EINVAL;
7880     }
7881 
7882     for (i = 0; i < len; i += sizeof(uint64_t)) {
7883         *counter = tswap64(*counter);
7884         counter++;
7885     }
7886 
7887     return len;
7888 }
7889 
7890 static TargetFdTrans target_eventfd_trans = {
7891     .host_to_target_data = swap_data_eventfd,
7892     .target_to_host_data = swap_data_eventfd,
7893 };
7894 
7895 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7896     (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7897      defined(__NR_inotify_init1))
7898 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7899 {
7900     struct inotify_event *ev;
7901     int i;
7902     uint32_t name_len;
7903 
7904     for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7905         ev = (struct inotify_event *)((char *)buf + i);
7906         name_len = ev->len;
7907 
7908         ev->wd = tswap32(ev->wd);
7909         ev->mask = tswap32(ev->mask);
7910         ev->cookie = tswap32(ev->cookie);
7911         ev->len = tswap32(name_len);
7912     }
7913 
7914     return len;
7915 }
7916 
7917 static TargetFdTrans target_inotify_trans = {
7918     .host_to_target_data = host_to_target_data_inotify,
7919 };
7920 #endif
7921 
7922 static int target_to_host_cpu_mask(unsigned long *host_mask,
7923                                    size_t host_size,
7924                                    abi_ulong target_addr,
7925                                    size_t target_size)
7926 {
7927     unsigned target_bits = sizeof(abi_ulong) * 8;
7928     unsigned host_bits = sizeof(*host_mask) * 8;
7929     abi_ulong *target_mask;
7930     unsigned i, j;
7931 
7932     assert(host_size >= target_size);
7933 
7934     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7935     if (!target_mask) {
7936         return -TARGET_EFAULT;
7937     }
7938     memset(host_mask, 0, host_size);
7939 
7940     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7941         unsigned bit = i * target_bits;
7942         abi_ulong val;
7943 
7944         __get_user(val, &target_mask[i]);
7945         for (j = 0; j < target_bits; j++, bit++) {
7946             if (val & (1UL << j)) {
7947                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7948             }
7949         }
7950     }
7951 
7952     unlock_user(target_mask, target_addr, 0);
7953     return 0;
7954 }
7955 
7956 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7957                                    size_t host_size,
7958                                    abi_ulong target_addr,
7959                                    size_t target_size)
7960 {
7961     unsigned target_bits = sizeof(abi_ulong) * 8;
7962     unsigned host_bits = sizeof(*host_mask) * 8;
7963     abi_ulong *target_mask;
7964     unsigned i, j;
7965 
7966     assert(host_size >= target_size);
7967 
7968     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7969     if (!target_mask) {
7970         return -TARGET_EFAULT;
7971     }
7972 
7973     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7974         unsigned bit = i * target_bits;
7975         abi_ulong val = 0;
7976 
7977         for (j = 0; j < target_bits; j++, bit++) {
7978             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7979                 val |= 1UL << j;
7980             }
7981         }
7982         __put_user(val, &target_mask[i]);
7983     }
7984 
7985     unlock_user(target_mask, target_addr, target_size);
7986     return 0;
7987 }
7988 
7989 /* do_syscall() should always have a single exit point at the end so
7990    that actions, such as logging of syscall results, can be performed.
7991    All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7992 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7993                     abi_long arg2, abi_long arg3, abi_long arg4,
7994                     abi_long arg5, abi_long arg6, abi_long arg7,
7995                     abi_long arg8)
7996 {
7997     CPUState *cpu = ENV_GET_CPU(cpu_env);
7998     abi_long ret;
7999     struct stat st;
8000     struct statfs stfs;
8001     void *p;
8002 
8003 #if defined(DEBUG_ERESTARTSYS)
8004     /* Debug-only code for exercising the syscall-restart code paths
8005      * in the per-architecture cpu main loops: restart every syscall
8006      * the guest makes once before letting it through.
8007      */
8008     {
8009         static int flag;
8010 
8011         flag = !flag;
8012         if (flag) {
8013             return -TARGET_ERESTARTSYS;
8014         }
8015     }
8016 #endif
8017 
8018 #ifdef DEBUG
8019     gemu_log("syscall %d", num);
8020 #endif
8021     trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
8022     if(do_strace)
8023         print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
8024 
8025     switch(num) {
8026     case TARGET_NR_exit:
8027         /* In old applications this may be used to implement _exit(2).
8028            However in threaded applictions it is used for thread termination,
8029            and _exit_group is used for application termination.
8030            Do thread termination if we have more then one thread.  */
8031 
8032         if (block_signals()) {
8033             ret = -TARGET_ERESTARTSYS;
8034             break;
8035         }
8036 
8037         cpu_list_lock();
8038 
8039         if (CPU_NEXT(first_cpu)) {
8040             TaskState *ts;
8041 
8042             /* Remove the CPU from the list.  */
8043             QTAILQ_REMOVE(&cpus, cpu, node);
8044 
8045             cpu_list_unlock();
8046 
8047             ts = cpu->opaque;
8048             if (ts->child_tidptr) {
8049                 put_user_u32(0, ts->child_tidptr);
8050                 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
8051                           NULL, NULL, 0);
8052             }
8053             thread_cpu = NULL;
8054             object_unref(OBJECT(cpu));
8055             g_free(ts);
8056             rcu_unregister_thread();
8057             pthread_exit(NULL);
8058         }
8059 
8060         cpu_list_unlock();
8061         preexit_cleanup(cpu_env, arg1);
8062         _exit(arg1);
8063         ret = 0; /* avoid warning */
8064         break;
8065     case TARGET_NR_read:
8066         if (arg3 == 0)
8067             ret = 0;
8068         else {
8069             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8070                 goto efault;
8071             ret = get_errno(safe_read(arg1, p, arg3));
8072             if (ret >= 0 &&
8073                 fd_trans_host_to_target_data(arg1)) {
8074                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8075             }
8076             unlock_user(p, arg2, ret);
8077         }
8078         break;
8079     case TARGET_NR_write:
8080         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8081             goto efault;
8082         if (fd_trans_target_to_host_data(arg1)) {
8083             void *copy = g_malloc(arg3);
8084             memcpy(copy, p, arg3);
8085             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8086             if (ret >= 0) {
8087                 ret = get_errno(safe_write(arg1, copy, ret));
8088             }
8089             g_free(copy);
8090         } else {
8091             ret = get_errno(safe_write(arg1, p, arg3));
8092         }
8093         unlock_user(p, arg2, 0);
8094         break;
8095 #ifdef TARGET_NR_open
8096     case TARGET_NR_open:
8097         if (!(p = lock_user_string(arg1)))
8098             goto efault;
8099         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8100                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8101                                   arg3));
8102         fd_trans_unregister(ret);
8103         unlock_user(p, arg1, 0);
8104         break;
8105 #endif
8106     case TARGET_NR_openat:
8107         if (!(p = lock_user_string(arg2)))
8108             goto efault;
8109         ret = get_errno(do_openat(cpu_env, arg1, p,
8110                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8111                                   arg4));
8112         fd_trans_unregister(ret);
8113         unlock_user(p, arg2, 0);
8114         break;
8115 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8116     case TARGET_NR_name_to_handle_at:
8117         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8118         break;
8119 #endif
8120 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8121     case TARGET_NR_open_by_handle_at:
8122         ret = do_open_by_handle_at(arg1, arg2, arg3);
8123         fd_trans_unregister(ret);
8124         break;
8125 #endif
8126     case TARGET_NR_close:
8127         fd_trans_unregister(arg1);
8128         ret = get_errno(close(arg1));
8129         break;
8130     case TARGET_NR_brk:
8131         ret = do_brk(arg1);
8132         break;
8133 #ifdef TARGET_NR_fork
8134     case TARGET_NR_fork:
8135         ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8136         break;
8137 #endif
8138 #ifdef TARGET_NR_waitpid
8139     case TARGET_NR_waitpid:
8140         {
8141             int status;
8142             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8143             if (!is_error(ret) && arg2 && ret
8144                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8145                 goto efault;
8146         }
8147         break;
8148 #endif
8149 #ifdef TARGET_NR_waitid
8150     case TARGET_NR_waitid:
8151         {
8152             siginfo_t info;
8153             info.si_pid = 0;
8154             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8155             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8156                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8157                     goto efault;
8158                 host_to_target_siginfo(p, &info);
8159                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8160             }
8161         }
8162         break;
8163 #endif
8164 #ifdef TARGET_NR_creat /* not on alpha */
8165     case TARGET_NR_creat:
8166         if (!(p = lock_user_string(arg1)))
8167             goto efault;
8168         ret = get_errno(creat(p, arg2));
8169         fd_trans_unregister(ret);
8170         unlock_user(p, arg1, 0);
8171         break;
8172 #endif
8173 #ifdef TARGET_NR_link
8174     case TARGET_NR_link:
8175         {
8176             void * p2;
8177             p = lock_user_string(arg1);
8178             p2 = lock_user_string(arg2);
8179             if (!p || !p2)
8180                 ret = -TARGET_EFAULT;
8181             else
8182                 ret = get_errno(link(p, p2));
8183             unlock_user(p2, arg2, 0);
8184             unlock_user(p, arg1, 0);
8185         }
8186         break;
8187 #endif
8188 #if defined(TARGET_NR_linkat)
8189     case TARGET_NR_linkat:
8190         {
8191             void * p2 = NULL;
8192             if (!arg2 || !arg4)
8193                 goto efault;
8194             p  = lock_user_string(arg2);
8195             p2 = lock_user_string(arg4);
8196             if (!p || !p2)
8197                 ret = -TARGET_EFAULT;
8198             else
8199                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8200             unlock_user(p, arg2, 0);
8201             unlock_user(p2, arg4, 0);
8202         }
8203         break;
8204 #endif
8205 #ifdef TARGET_NR_unlink
8206     case TARGET_NR_unlink:
8207         if (!(p = lock_user_string(arg1)))
8208             goto efault;
8209         ret = get_errno(unlink(p));
8210         unlock_user(p, arg1, 0);
8211         break;
8212 #endif
8213 #if defined(TARGET_NR_unlinkat)
8214     case TARGET_NR_unlinkat:
8215         if (!(p = lock_user_string(arg2)))
8216             goto efault;
8217         ret = get_errno(unlinkat(arg1, p, arg3));
8218         unlock_user(p, arg2, 0);
8219         break;
8220 #endif
8221     case TARGET_NR_execve:
8222         {
8223             char **argp, **envp;
8224             int argc, envc;
8225             abi_ulong gp;
8226             abi_ulong guest_argp;
8227             abi_ulong guest_envp;
8228             abi_ulong addr;
8229             char **q;
8230             int total_size = 0;
8231 
8232             argc = 0;
8233             guest_argp = arg2;
8234             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8235                 if (get_user_ual(addr, gp))
8236                     goto efault;
8237                 if (!addr)
8238                     break;
8239                 argc++;
8240             }
8241             envc = 0;
8242             guest_envp = arg3;
8243             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8244                 if (get_user_ual(addr, gp))
8245                     goto efault;
8246                 if (!addr)
8247                     break;
8248                 envc++;
8249             }
8250 
8251             argp = g_new0(char *, argc + 1);
8252             envp = g_new0(char *, envc + 1);
8253 
8254             for (gp = guest_argp, q = argp; gp;
8255                   gp += sizeof(abi_ulong), q++) {
8256                 if (get_user_ual(addr, gp))
8257                     goto execve_efault;
8258                 if (!addr)
8259                     break;
8260                 if (!(*q = lock_user_string(addr)))
8261                     goto execve_efault;
8262                 total_size += strlen(*q) + 1;
8263             }
8264             *q = NULL;
8265 
8266             for (gp = guest_envp, q = envp; gp;
8267                   gp += sizeof(abi_ulong), q++) {
8268                 if (get_user_ual(addr, gp))
8269                     goto execve_efault;
8270                 if (!addr)
8271                     break;
8272                 if (!(*q = lock_user_string(addr)))
8273                     goto execve_efault;
8274                 total_size += strlen(*q) + 1;
8275             }
8276             *q = NULL;
8277 
8278             if (!(p = lock_user_string(arg1)))
8279                 goto execve_efault;
8280             /* Although execve() is not an interruptible syscall it is
8281              * a special case where we must use the safe_syscall wrapper:
8282              * if we allow a signal to happen before we make the host
8283              * syscall then we will 'lose' it, because at the point of
8284              * execve the process leaves QEMU's control. So we use the
8285              * safe syscall wrapper to ensure that we either take the
8286              * signal as a guest signal, or else it does not happen
8287              * before the execve completes and makes it the other
8288              * program's problem.
8289              */
8290             ret = get_errno(safe_execve(p, argp, envp));
8291             unlock_user(p, arg1, 0);
8292 
8293             goto execve_end;
8294 
8295         execve_efault:
8296             ret = -TARGET_EFAULT;
8297 
8298         execve_end:
8299             for (gp = guest_argp, q = argp; *q;
8300                   gp += sizeof(abi_ulong), q++) {
8301                 if (get_user_ual(addr, gp)
8302                     || !addr)
8303                     break;
8304                 unlock_user(*q, addr, 0);
8305             }
8306             for (gp = guest_envp, q = envp; *q;
8307                   gp += sizeof(abi_ulong), q++) {
8308                 if (get_user_ual(addr, gp)
8309                     || !addr)
8310                     break;
8311                 unlock_user(*q, addr, 0);
8312             }
8313 
8314             g_free(argp);
8315             g_free(envp);
8316         }
8317         break;
8318     case TARGET_NR_chdir:
8319         if (!(p = lock_user_string(arg1)))
8320             goto efault;
8321         ret = get_errno(chdir(p));
8322         unlock_user(p, arg1, 0);
8323         break;
8324 #ifdef TARGET_NR_time
8325     case TARGET_NR_time:
8326         {
8327             time_t host_time;
8328             ret = get_errno(time(&host_time));
8329             if (!is_error(ret)
8330                 && arg1
8331                 && put_user_sal(host_time, arg1))
8332                 goto efault;
8333         }
8334         break;
8335 #endif
8336 #ifdef TARGET_NR_mknod
8337     case TARGET_NR_mknod:
8338         if (!(p = lock_user_string(arg1)))
8339             goto efault;
8340         ret = get_errno(mknod(p, arg2, arg3));
8341         unlock_user(p, arg1, 0);
8342         break;
8343 #endif
8344 #if defined(TARGET_NR_mknodat)
8345     case TARGET_NR_mknodat:
8346         if (!(p = lock_user_string(arg2)))
8347             goto efault;
8348         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8349         unlock_user(p, arg2, 0);
8350         break;
8351 #endif
8352 #ifdef TARGET_NR_chmod
8353     case TARGET_NR_chmod:
8354         if (!(p = lock_user_string(arg1)))
8355             goto efault;
8356         ret = get_errno(chmod(p, arg2));
8357         unlock_user(p, arg1, 0);
8358         break;
8359 #endif
8360 #ifdef TARGET_NR_break
8361     case TARGET_NR_break:
8362         goto unimplemented;
8363 #endif
8364 #ifdef TARGET_NR_oldstat
8365     case TARGET_NR_oldstat:
8366         goto unimplemented;
8367 #endif
8368     case TARGET_NR_lseek:
8369         ret = get_errno(lseek(arg1, arg2, arg3));
8370         break;
8371 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8372     /* Alpha specific */
8373     case TARGET_NR_getxpid:
8374         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8375         ret = get_errno(getpid());
8376         break;
8377 #endif
8378 #ifdef TARGET_NR_getpid
8379     case TARGET_NR_getpid:
8380         ret = get_errno(getpid());
8381         break;
8382 #endif
8383     case TARGET_NR_mount:
8384         {
8385             /* need to look at the data field */
8386             void *p2, *p3;
8387 
8388             if (arg1) {
8389                 p = lock_user_string(arg1);
8390                 if (!p) {
8391                     goto efault;
8392                 }
8393             } else {
8394                 p = NULL;
8395             }
8396 
8397             p2 = lock_user_string(arg2);
8398             if (!p2) {
8399                 if (arg1) {
8400                     unlock_user(p, arg1, 0);
8401                 }
8402                 goto efault;
8403             }
8404 
8405             if (arg3) {
8406                 p3 = lock_user_string(arg3);
8407                 if (!p3) {
8408                     if (arg1) {
8409                         unlock_user(p, arg1, 0);
8410                     }
8411                     unlock_user(p2, arg2, 0);
8412                     goto efault;
8413                 }
8414             } else {
8415                 p3 = NULL;
8416             }
8417 
8418             /* FIXME - arg5 should be locked, but it isn't clear how to
8419              * do that since it's not guaranteed to be a NULL-terminated
8420              * string.
8421              */
8422             if (!arg5) {
8423                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8424             } else {
8425                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8426             }
8427             ret = get_errno(ret);
8428 
8429             if (arg1) {
8430                 unlock_user(p, arg1, 0);
8431             }
8432             unlock_user(p2, arg2, 0);
8433             if (arg3) {
8434                 unlock_user(p3, arg3, 0);
8435             }
8436         }
8437         break;
8438 #ifdef TARGET_NR_umount
8439     case TARGET_NR_umount:
8440         if (!(p = lock_user_string(arg1)))
8441             goto efault;
8442         ret = get_errno(umount(p));
8443         unlock_user(p, arg1, 0);
8444         break;
8445 #endif
8446 #ifdef TARGET_NR_stime /* not on alpha */
8447     case TARGET_NR_stime:
8448         {
8449             time_t host_time;
8450             if (get_user_sal(host_time, arg1))
8451                 goto efault;
8452             ret = get_errno(stime(&host_time));
8453         }
8454         break;
8455 #endif
8456     case TARGET_NR_ptrace:
8457         goto unimplemented;
8458 #ifdef TARGET_NR_alarm /* not on alpha */
8459     case TARGET_NR_alarm:
8460         ret = alarm(arg1);
8461         break;
8462 #endif
8463 #ifdef TARGET_NR_oldfstat
8464     case TARGET_NR_oldfstat:
8465         goto unimplemented;
8466 #endif
8467 #ifdef TARGET_NR_pause /* not on alpha */
8468     case TARGET_NR_pause:
8469         if (!block_signals()) {
8470             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8471         }
8472         ret = -TARGET_EINTR;
8473         break;
8474 #endif
8475 #ifdef TARGET_NR_utime
8476     case TARGET_NR_utime:
8477         {
8478             struct utimbuf tbuf, *host_tbuf;
8479             struct target_utimbuf *target_tbuf;
8480             if (arg2) {
8481                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8482                     goto efault;
8483                 tbuf.actime = tswapal(target_tbuf->actime);
8484                 tbuf.modtime = tswapal(target_tbuf->modtime);
8485                 unlock_user_struct(target_tbuf, arg2, 0);
8486                 host_tbuf = &tbuf;
8487             } else {
8488                 host_tbuf = NULL;
8489             }
8490             if (!(p = lock_user_string(arg1)))
8491                 goto efault;
8492             ret = get_errno(utime(p, host_tbuf));
8493             unlock_user(p, arg1, 0);
8494         }
8495         break;
8496 #endif
8497 #ifdef TARGET_NR_utimes
8498     case TARGET_NR_utimes:
8499         {
8500             struct timeval *tvp, tv[2];
8501             if (arg2) {
8502                 if (copy_from_user_timeval(&tv[0], arg2)
8503                     || copy_from_user_timeval(&tv[1],
8504                                               arg2 + sizeof(struct target_timeval)))
8505                     goto efault;
8506                 tvp = tv;
8507             } else {
8508                 tvp = NULL;
8509             }
8510             if (!(p = lock_user_string(arg1)))
8511                 goto efault;
8512             ret = get_errno(utimes(p, tvp));
8513             unlock_user(p, arg1, 0);
8514         }
8515         break;
8516 #endif
8517 #if defined(TARGET_NR_futimesat)
8518     case TARGET_NR_futimesat:
8519         {
8520             struct timeval *tvp, tv[2];
8521             if (arg3) {
8522                 if (copy_from_user_timeval(&tv[0], arg3)
8523                     || copy_from_user_timeval(&tv[1],
8524                                               arg3 + sizeof(struct target_timeval)))
8525                     goto efault;
8526                 tvp = tv;
8527             } else {
8528                 tvp = NULL;
8529             }
8530             if (!(p = lock_user_string(arg2)))
8531                 goto efault;
8532             ret = get_errno(futimesat(arg1, path(p), tvp));
8533             unlock_user(p, arg2, 0);
8534         }
8535         break;
8536 #endif
8537 #ifdef TARGET_NR_stty
8538     case TARGET_NR_stty:
8539         goto unimplemented;
8540 #endif
8541 #ifdef TARGET_NR_gtty
8542     case TARGET_NR_gtty:
8543         goto unimplemented;
8544 #endif
8545 #ifdef TARGET_NR_access
8546     case TARGET_NR_access:
8547         if (!(p = lock_user_string(arg1)))
8548             goto efault;
8549         ret = get_errno(access(path(p), arg2));
8550         unlock_user(p, arg1, 0);
8551         break;
8552 #endif
8553 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8554     case TARGET_NR_faccessat:
8555         if (!(p = lock_user_string(arg2)))
8556             goto efault;
8557         ret = get_errno(faccessat(arg1, p, arg3, 0));
8558         unlock_user(p, arg2, 0);
8559         break;
8560 #endif
8561 #ifdef TARGET_NR_nice /* not on alpha */
8562     case TARGET_NR_nice:
8563         ret = get_errno(nice(arg1));
8564         break;
8565 #endif
8566 #ifdef TARGET_NR_ftime
8567     case TARGET_NR_ftime:
8568         goto unimplemented;
8569 #endif
8570     case TARGET_NR_sync:
8571         sync();
8572         ret = 0;
8573         break;
8574 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8575     case TARGET_NR_syncfs:
8576         ret = get_errno(syncfs(arg1));
8577         break;
8578 #endif
8579     case TARGET_NR_kill:
8580         ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8581         break;
8582 #ifdef TARGET_NR_rename
8583     case TARGET_NR_rename:
8584         {
8585             void *p2;
8586             p = lock_user_string(arg1);
8587             p2 = lock_user_string(arg2);
8588             if (!p || !p2)
8589                 ret = -TARGET_EFAULT;
8590             else
8591                 ret = get_errno(rename(p, p2));
8592             unlock_user(p2, arg2, 0);
8593             unlock_user(p, arg1, 0);
8594         }
8595         break;
8596 #endif
8597 #if defined(TARGET_NR_renameat)
8598     case TARGET_NR_renameat:
8599         {
8600             void *p2;
8601             p  = lock_user_string(arg2);
8602             p2 = lock_user_string(arg4);
8603             if (!p || !p2)
8604                 ret = -TARGET_EFAULT;
8605             else
8606                 ret = get_errno(renameat(arg1, p, arg3, p2));
8607             unlock_user(p2, arg4, 0);
8608             unlock_user(p, arg2, 0);
8609         }
8610         break;
8611 #endif
8612 #if defined(TARGET_NR_renameat2)
8613     case TARGET_NR_renameat2:
8614         {
8615             void *p2;
8616             p  = lock_user_string(arg2);
8617             p2 = lock_user_string(arg4);
8618             if (!p || !p2) {
8619                 ret = -TARGET_EFAULT;
8620             } else {
8621                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8622             }
8623             unlock_user(p2, arg4, 0);
8624             unlock_user(p, arg2, 0);
8625         }
8626         break;
8627 #endif
8628 #ifdef TARGET_NR_mkdir
8629     case TARGET_NR_mkdir:
8630         if (!(p = lock_user_string(arg1)))
8631             goto efault;
8632         ret = get_errno(mkdir(p, arg2));
8633         unlock_user(p, arg1, 0);
8634         break;
8635 #endif
8636 #if defined(TARGET_NR_mkdirat)
8637     case TARGET_NR_mkdirat:
8638         if (!(p = lock_user_string(arg2)))
8639             goto efault;
8640         ret = get_errno(mkdirat(arg1, p, arg3));
8641         unlock_user(p, arg2, 0);
8642         break;
8643 #endif
8644 #ifdef TARGET_NR_rmdir
8645     case TARGET_NR_rmdir:
8646         if (!(p = lock_user_string(arg1)))
8647             goto efault;
8648         ret = get_errno(rmdir(p));
8649         unlock_user(p, arg1, 0);
8650         break;
8651 #endif
8652     case TARGET_NR_dup:
8653         ret = get_errno(dup(arg1));
8654         if (ret >= 0) {
8655             fd_trans_dup(arg1, ret);
8656         }
8657         break;
8658 #ifdef TARGET_NR_pipe
8659     case TARGET_NR_pipe:
8660         ret = do_pipe(cpu_env, arg1, 0, 0);
8661         break;
8662 #endif
8663 #ifdef TARGET_NR_pipe2
8664     case TARGET_NR_pipe2:
8665         ret = do_pipe(cpu_env, arg1,
8666                       target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8667         break;
8668 #endif
8669     case TARGET_NR_times:
8670         {
8671             struct target_tms *tmsp;
8672             struct tms tms;
8673             ret = get_errno(times(&tms));
8674             if (arg1) {
8675                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8676                 if (!tmsp)
8677                     goto efault;
8678                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8679                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8680                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8681                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8682             }
8683             if (!is_error(ret))
8684                 ret = host_to_target_clock_t(ret);
8685         }
8686         break;
8687 #ifdef TARGET_NR_prof
8688     case TARGET_NR_prof:
8689         goto unimplemented;
8690 #endif
8691 #ifdef TARGET_NR_signal
8692     case TARGET_NR_signal:
8693         goto unimplemented;
8694 #endif
8695     case TARGET_NR_acct:
8696         if (arg1 == 0) {
8697             ret = get_errno(acct(NULL));
8698         } else {
8699             if (!(p = lock_user_string(arg1)))
8700                 goto efault;
8701             ret = get_errno(acct(path(p)));
8702             unlock_user(p, arg1, 0);
8703         }
8704         break;
8705 #ifdef TARGET_NR_umount2
8706     case TARGET_NR_umount2:
8707         if (!(p = lock_user_string(arg1)))
8708             goto efault;
8709         ret = get_errno(umount2(p, arg2));
8710         unlock_user(p, arg1, 0);
8711         break;
8712 #endif
8713 #ifdef TARGET_NR_lock
8714     case TARGET_NR_lock:
8715         goto unimplemented;
8716 #endif
8717     case TARGET_NR_ioctl:
8718         ret = do_ioctl(arg1, arg2, arg3);
8719         break;
8720 #ifdef TARGET_NR_fcntl
8721     case TARGET_NR_fcntl:
8722         ret = do_fcntl(arg1, arg2, arg3);
8723         break;
8724 #endif
8725 #ifdef TARGET_NR_mpx
8726     case TARGET_NR_mpx:
8727         goto unimplemented;
8728 #endif
8729     case TARGET_NR_setpgid:
8730         ret = get_errno(setpgid(arg1, arg2));
8731         break;
8732 #ifdef TARGET_NR_ulimit
8733     case TARGET_NR_ulimit:
8734         goto unimplemented;
8735 #endif
8736 #ifdef TARGET_NR_oldolduname
8737     case TARGET_NR_oldolduname:
8738         goto unimplemented;
8739 #endif
8740     case TARGET_NR_umask:
8741         ret = get_errno(umask(arg1));
8742         break;
8743     case TARGET_NR_chroot:
8744         if (!(p = lock_user_string(arg1)))
8745             goto efault;
8746         ret = get_errno(chroot(p));
8747         unlock_user(p, arg1, 0);
8748         break;
8749 #ifdef TARGET_NR_ustat
8750     case TARGET_NR_ustat:
8751         goto unimplemented;
8752 #endif
8753 #ifdef TARGET_NR_dup2
8754     case TARGET_NR_dup2:
8755         ret = get_errno(dup2(arg1, arg2));
8756         if (ret >= 0) {
8757             fd_trans_dup(arg1, arg2);
8758         }
8759         break;
8760 #endif
8761 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8762     case TARGET_NR_dup3:
8763     {
8764         int host_flags;
8765 
8766         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8767             return -EINVAL;
8768         }
8769         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8770         ret = get_errno(dup3(arg1, arg2, host_flags));
8771         if (ret >= 0) {
8772             fd_trans_dup(arg1, arg2);
8773         }
8774         break;
8775     }
8776 #endif
8777 #ifdef TARGET_NR_getppid /* not on alpha */
8778     case TARGET_NR_getppid:
8779         ret = get_errno(getppid());
8780         break;
8781 #endif
8782 #ifdef TARGET_NR_getpgrp
8783     case TARGET_NR_getpgrp:
8784         ret = get_errno(getpgrp());
8785         break;
8786 #endif
8787     case TARGET_NR_setsid:
8788         ret = get_errno(setsid());
8789         break;
8790 #ifdef TARGET_NR_sigaction
8791     case TARGET_NR_sigaction:
8792         {
8793 #if defined(TARGET_ALPHA)
8794             struct target_sigaction act, oact, *pact = 0;
8795             struct target_old_sigaction *old_act;
8796             if (arg2) {
8797                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8798                     goto efault;
8799                 act._sa_handler = old_act->_sa_handler;
8800                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8801                 act.sa_flags = old_act->sa_flags;
8802                 act.sa_restorer = 0;
8803                 unlock_user_struct(old_act, arg2, 0);
8804                 pact = &act;
8805             }
8806             ret = get_errno(do_sigaction(arg1, pact, &oact));
8807             if (!is_error(ret) && arg3) {
8808                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8809                     goto efault;
8810                 old_act->_sa_handler = oact._sa_handler;
8811                 old_act->sa_mask = oact.sa_mask.sig[0];
8812                 old_act->sa_flags = oact.sa_flags;
8813                 unlock_user_struct(old_act, arg3, 1);
8814             }
8815 #elif defined(TARGET_MIPS)
8816 	    struct target_sigaction act, oact, *pact, *old_act;
8817 
8818 	    if (arg2) {
8819                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8820                     goto efault;
8821 		act._sa_handler = old_act->_sa_handler;
8822 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8823 		act.sa_flags = old_act->sa_flags;
8824 		unlock_user_struct(old_act, arg2, 0);
8825 		pact = &act;
8826 	    } else {
8827 		pact = NULL;
8828 	    }
8829 
8830 	    ret = get_errno(do_sigaction(arg1, pact, &oact));
8831 
8832 	    if (!is_error(ret) && arg3) {
8833                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8834                     goto efault;
8835 		old_act->_sa_handler = oact._sa_handler;
8836 		old_act->sa_flags = oact.sa_flags;
8837 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8838 		old_act->sa_mask.sig[1] = 0;
8839 		old_act->sa_mask.sig[2] = 0;
8840 		old_act->sa_mask.sig[3] = 0;
8841 		unlock_user_struct(old_act, arg3, 1);
8842 	    }
8843 #else
8844             struct target_old_sigaction *old_act;
8845             struct target_sigaction act, oact, *pact;
8846             if (arg2) {
8847                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8848                     goto efault;
8849                 act._sa_handler = old_act->_sa_handler;
8850                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8851                 act.sa_flags = old_act->sa_flags;
8852                 act.sa_restorer = old_act->sa_restorer;
8853 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8854                 act.ka_restorer = 0;
8855 #endif
8856                 unlock_user_struct(old_act, arg2, 0);
8857                 pact = &act;
8858             } else {
8859                 pact = NULL;
8860             }
8861             ret = get_errno(do_sigaction(arg1, pact, &oact));
8862             if (!is_error(ret) && arg3) {
8863                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8864                     goto efault;
8865                 old_act->_sa_handler = oact._sa_handler;
8866                 old_act->sa_mask = oact.sa_mask.sig[0];
8867                 old_act->sa_flags = oact.sa_flags;
8868                 old_act->sa_restorer = oact.sa_restorer;
8869                 unlock_user_struct(old_act, arg3, 1);
8870             }
8871 #endif
8872         }
8873         break;
8874 #endif
8875     case TARGET_NR_rt_sigaction:
8876         {
8877 #if defined(TARGET_ALPHA)
8878             /* For Alpha and SPARC this is a 5 argument syscall, with
8879              * a 'restorer' parameter which must be copied into the
8880              * sa_restorer field of the sigaction struct.
8881              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8882              * and arg5 is the sigsetsize.
8883              * Alpha also has a separate rt_sigaction struct that it uses
8884              * here; SPARC uses the usual sigaction struct.
8885              */
8886             struct target_rt_sigaction *rt_act;
8887             struct target_sigaction act, oact, *pact = 0;
8888 
8889             if (arg4 != sizeof(target_sigset_t)) {
8890                 ret = -TARGET_EINVAL;
8891                 break;
8892             }
8893             if (arg2) {
8894                 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8895                     goto efault;
8896                 act._sa_handler = rt_act->_sa_handler;
8897                 act.sa_mask = rt_act->sa_mask;
8898                 act.sa_flags = rt_act->sa_flags;
8899                 act.sa_restorer = arg5;
8900                 unlock_user_struct(rt_act, arg2, 0);
8901                 pact = &act;
8902             }
8903             ret = get_errno(do_sigaction(arg1, pact, &oact));
8904             if (!is_error(ret) && arg3) {
8905                 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8906                     goto efault;
8907                 rt_act->_sa_handler = oact._sa_handler;
8908                 rt_act->sa_mask = oact.sa_mask;
8909                 rt_act->sa_flags = oact.sa_flags;
8910                 unlock_user_struct(rt_act, arg3, 1);
8911             }
8912 #else
8913 #ifdef TARGET_SPARC
8914             target_ulong restorer = arg4;
8915             target_ulong sigsetsize = arg5;
8916 #else
8917             target_ulong sigsetsize = arg4;
8918 #endif
8919             struct target_sigaction *act;
8920             struct target_sigaction *oact;
8921 
8922             if (sigsetsize != sizeof(target_sigset_t)) {
8923                 ret = -TARGET_EINVAL;
8924                 break;
8925             }
8926             if (arg2) {
8927                 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8928                     goto efault;
8929                 }
8930 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8931                 act->ka_restorer = restorer;
8932 #endif
8933             } else {
8934                 act = NULL;
8935             }
8936             if (arg3) {
8937                 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8938                     ret = -TARGET_EFAULT;
8939                     goto rt_sigaction_fail;
8940                 }
8941             } else
8942                 oact = NULL;
8943             ret = get_errno(do_sigaction(arg1, act, oact));
8944 	rt_sigaction_fail:
8945             if (act)
8946                 unlock_user_struct(act, arg2, 0);
8947             if (oact)
8948                 unlock_user_struct(oact, arg3, 1);
8949 #endif
8950         }
8951         break;
8952 #ifdef TARGET_NR_sgetmask /* not on alpha */
8953     case TARGET_NR_sgetmask:
8954         {
8955             sigset_t cur_set;
8956             abi_ulong target_set;
8957             ret = do_sigprocmask(0, NULL, &cur_set);
8958             if (!ret) {
8959                 host_to_target_old_sigset(&target_set, &cur_set);
8960                 ret = target_set;
8961             }
8962         }
8963         break;
8964 #endif
8965 #ifdef TARGET_NR_ssetmask /* not on alpha */
8966     case TARGET_NR_ssetmask:
8967         {
8968             sigset_t set, oset;
8969             abi_ulong target_set = arg1;
8970             target_to_host_old_sigset(&set, &target_set);
8971             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8972             if (!ret) {
8973                 host_to_target_old_sigset(&target_set, &oset);
8974                 ret = target_set;
8975             }
8976         }
8977         break;
8978 #endif
8979 #ifdef TARGET_NR_sigprocmask
8980     case TARGET_NR_sigprocmask:
8981         {
8982 #if defined(TARGET_ALPHA)
8983             sigset_t set, oldset;
8984             abi_ulong mask;
8985             int how;
8986 
8987             switch (arg1) {
8988             case TARGET_SIG_BLOCK:
8989                 how = SIG_BLOCK;
8990                 break;
8991             case TARGET_SIG_UNBLOCK:
8992                 how = SIG_UNBLOCK;
8993                 break;
8994             case TARGET_SIG_SETMASK:
8995                 how = SIG_SETMASK;
8996                 break;
8997             default:
8998                 ret = -TARGET_EINVAL;
8999                 goto fail;
9000             }
9001             mask = arg2;
9002             target_to_host_old_sigset(&set, &mask);
9003 
9004             ret = do_sigprocmask(how, &set, &oldset);
9005             if (!is_error(ret)) {
9006                 host_to_target_old_sigset(&mask, &oldset);
9007                 ret = mask;
9008                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9009             }
9010 #else
9011             sigset_t set, oldset, *set_ptr;
9012             int how;
9013 
9014             if (arg2) {
9015                 switch (arg1) {
9016                 case TARGET_SIG_BLOCK:
9017                     how = SIG_BLOCK;
9018                     break;
9019                 case TARGET_SIG_UNBLOCK:
9020                     how = SIG_UNBLOCK;
9021                     break;
9022                 case TARGET_SIG_SETMASK:
9023                     how = SIG_SETMASK;
9024                     break;
9025                 default:
9026                     ret = -TARGET_EINVAL;
9027                     goto fail;
9028                 }
9029                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9030                     goto efault;
9031                 target_to_host_old_sigset(&set, p);
9032                 unlock_user(p, arg2, 0);
9033                 set_ptr = &set;
9034             } else {
9035                 how = 0;
9036                 set_ptr = NULL;
9037             }
9038             ret = do_sigprocmask(how, set_ptr, &oldset);
9039             if (!is_error(ret) && arg3) {
9040                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9041                     goto efault;
9042                 host_to_target_old_sigset(p, &oldset);
9043                 unlock_user(p, arg3, sizeof(target_sigset_t));
9044             }
9045 #endif
9046         }
9047         break;
9048 #endif
9049     case TARGET_NR_rt_sigprocmask:
9050         {
9051             int how = arg1;
9052             sigset_t set, oldset, *set_ptr;
9053 
9054             if (arg4 != sizeof(target_sigset_t)) {
9055                 ret = -TARGET_EINVAL;
9056                 break;
9057             }
9058 
9059             if (arg2) {
9060                 switch(how) {
9061                 case TARGET_SIG_BLOCK:
9062                     how = SIG_BLOCK;
9063                     break;
9064                 case TARGET_SIG_UNBLOCK:
9065                     how = SIG_UNBLOCK;
9066                     break;
9067                 case TARGET_SIG_SETMASK:
9068                     how = SIG_SETMASK;
9069                     break;
9070                 default:
9071                     ret = -TARGET_EINVAL;
9072                     goto fail;
9073                 }
9074                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9075                     goto efault;
9076                 target_to_host_sigset(&set, p);
9077                 unlock_user(p, arg2, 0);
9078                 set_ptr = &set;
9079             } else {
9080                 how = 0;
9081                 set_ptr = NULL;
9082             }
9083             ret = do_sigprocmask(how, set_ptr, &oldset);
9084             if (!is_error(ret) && arg3) {
9085                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9086                     goto efault;
9087                 host_to_target_sigset(p, &oldset);
9088                 unlock_user(p, arg3, sizeof(target_sigset_t));
9089             }
9090         }
9091         break;
9092 #ifdef TARGET_NR_sigpending
9093     case TARGET_NR_sigpending:
9094         {
9095             sigset_t set;
9096             ret = get_errno(sigpending(&set));
9097             if (!is_error(ret)) {
9098                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9099                     goto efault;
9100                 host_to_target_old_sigset(p, &set);
9101                 unlock_user(p, arg1, sizeof(target_sigset_t));
9102             }
9103         }
9104         break;
9105 #endif
9106     case TARGET_NR_rt_sigpending:
9107         {
9108             sigset_t set;
9109 
9110             /* Yes, this check is >, not != like most. We follow the kernel's
9111              * logic and it does it like this because it implements
9112              * NR_sigpending through the same code path, and in that case
9113              * the old_sigset_t is smaller in size.
9114              */
9115             if (arg2 > sizeof(target_sigset_t)) {
9116                 ret = -TARGET_EINVAL;
9117                 break;
9118             }
9119 
9120             ret = get_errno(sigpending(&set));
9121             if (!is_error(ret)) {
9122                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9123                     goto efault;
9124                 host_to_target_sigset(p, &set);
9125                 unlock_user(p, arg1, sizeof(target_sigset_t));
9126             }
9127         }
9128         break;
9129 #ifdef TARGET_NR_sigsuspend
9130     case TARGET_NR_sigsuspend:
9131         {
9132             TaskState *ts = cpu->opaque;
9133 #if defined(TARGET_ALPHA)
9134             abi_ulong mask = arg1;
9135             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9136 #else
9137             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9138                 goto efault;
9139             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9140             unlock_user(p, arg1, 0);
9141 #endif
9142             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9143                                                SIGSET_T_SIZE));
9144             if (ret != -TARGET_ERESTARTSYS) {
9145                 ts->in_sigsuspend = 1;
9146             }
9147         }
9148         break;
9149 #endif
9150     case TARGET_NR_rt_sigsuspend:
9151         {
9152             TaskState *ts = cpu->opaque;
9153 
9154             if (arg2 != sizeof(target_sigset_t)) {
9155                 ret = -TARGET_EINVAL;
9156                 break;
9157             }
9158             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9159                 goto efault;
9160             target_to_host_sigset(&ts->sigsuspend_mask, p);
9161             unlock_user(p, arg1, 0);
9162             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9163                                                SIGSET_T_SIZE));
9164             if (ret != -TARGET_ERESTARTSYS) {
9165                 ts->in_sigsuspend = 1;
9166             }
9167         }
9168         break;
9169     case TARGET_NR_rt_sigtimedwait:
9170         {
9171             sigset_t set;
9172             struct timespec uts, *puts;
9173             siginfo_t uinfo;
9174 
9175             if (arg4 != sizeof(target_sigset_t)) {
9176                 ret = -TARGET_EINVAL;
9177                 break;
9178             }
9179 
9180             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9181                 goto efault;
9182             target_to_host_sigset(&set, p);
9183             unlock_user(p, arg1, 0);
9184             if (arg3) {
9185                 puts = &uts;
9186                 target_to_host_timespec(puts, arg3);
9187             } else {
9188                 puts = NULL;
9189             }
9190             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9191                                                  SIGSET_T_SIZE));
9192             if (!is_error(ret)) {
9193                 if (arg2) {
9194                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9195                                   0);
9196                     if (!p) {
9197                         goto efault;
9198                     }
9199                     host_to_target_siginfo(p, &uinfo);
9200                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9201                 }
9202                 ret = host_to_target_signal(ret);
9203             }
9204         }
9205         break;
9206     case TARGET_NR_rt_sigqueueinfo:
9207         {
9208             siginfo_t uinfo;
9209 
9210             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9211             if (!p) {
9212                 goto efault;
9213             }
9214             target_to_host_siginfo(&uinfo, p);
9215             unlock_user(p, arg3, 0);
9216             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9217         }
9218         break;
9219     case TARGET_NR_rt_tgsigqueueinfo:
9220         {
9221             siginfo_t uinfo;
9222 
9223             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9224             if (!p) {
9225                 goto efault;
9226             }
9227             target_to_host_siginfo(&uinfo, p);
9228             unlock_user(p, arg4, 0);
9229             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9230         }
9231         break;
9232 #ifdef TARGET_NR_sigreturn
9233     case TARGET_NR_sigreturn:
9234         if (block_signals()) {
9235             ret = -TARGET_ERESTARTSYS;
9236         } else {
9237             ret = do_sigreturn(cpu_env);
9238         }
9239         break;
9240 #endif
9241     case TARGET_NR_rt_sigreturn:
9242         if (block_signals()) {
9243             ret = -TARGET_ERESTARTSYS;
9244         } else {
9245             ret = do_rt_sigreturn(cpu_env);
9246         }
9247         break;
9248     case TARGET_NR_sethostname:
9249         if (!(p = lock_user_string(arg1)))
9250             goto efault;
9251         ret = get_errno(sethostname(p, arg2));
9252         unlock_user(p, arg1, 0);
9253         break;
9254     case TARGET_NR_setrlimit:
9255         {
9256             int resource = target_to_host_resource(arg1);
9257             struct target_rlimit *target_rlim;
9258             struct rlimit rlim;
9259             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9260                 goto efault;
9261             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9262             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9263             unlock_user_struct(target_rlim, arg2, 0);
9264             ret = get_errno(setrlimit(resource, &rlim));
9265         }
9266         break;
9267     case TARGET_NR_getrlimit:
9268         {
9269             int resource = target_to_host_resource(arg1);
9270             struct target_rlimit *target_rlim;
9271             struct rlimit rlim;
9272 
9273             ret = get_errno(getrlimit(resource, &rlim));
9274             if (!is_error(ret)) {
9275                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9276                     goto efault;
9277                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9278                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9279                 unlock_user_struct(target_rlim, arg2, 1);
9280             }
9281         }
9282         break;
9283     case TARGET_NR_getrusage:
9284         {
9285             struct rusage rusage;
9286             ret = get_errno(getrusage(arg1, &rusage));
9287             if (!is_error(ret)) {
9288                 ret = host_to_target_rusage(arg2, &rusage);
9289             }
9290         }
9291         break;
9292     case TARGET_NR_gettimeofday:
9293         {
9294             struct timeval tv;
9295             ret = get_errno(gettimeofday(&tv, NULL));
9296             if (!is_error(ret)) {
9297                 if (copy_to_user_timeval(arg1, &tv))
9298                     goto efault;
9299             }
9300         }
9301         break;
9302     case TARGET_NR_settimeofday:
9303         {
9304             struct timeval tv, *ptv = NULL;
9305             struct timezone tz, *ptz = NULL;
9306 
9307             if (arg1) {
9308                 if (copy_from_user_timeval(&tv, arg1)) {
9309                     goto efault;
9310                 }
9311                 ptv = &tv;
9312             }
9313 
9314             if (arg2) {
9315                 if (copy_from_user_timezone(&tz, arg2)) {
9316                     goto efault;
9317                 }
9318                 ptz = &tz;
9319             }
9320 
9321             ret = get_errno(settimeofday(ptv, ptz));
9322         }
9323         break;
9324 #if defined(TARGET_NR_select)
9325     case TARGET_NR_select:
9326 #if defined(TARGET_WANT_NI_OLD_SELECT)
9327         /* some architectures used to have old_select here
9328          * but now ENOSYS it.
9329          */
9330         ret = -TARGET_ENOSYS;
9331 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9332         ret = do_old_select(arg1);
9333 #else
9334         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9335 #endif
9336         break;
9337 #endif
9338 #ifdef TARGET_NR_pselect6
9339     case TARGET_NR_pselect6:
9340         {
9341             abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9342             fd_set rfds, wfds, efds;
9343             fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9344             struct timespec ts, *ts_ptr;
9345 
9346             /*
9347              * The 6th arg is actually two args smashed together,
9348              * so we cannot use the C library.
9349              */
9350             sigset_t set;
9351             struct {
9352                 sigset_t *set;
9353                 size_t size;
9354             } sig, *sig_ptr;
9355 
9356             abi_ulong arg_sigset, arg_sigsize, *arg7;
9357             target_sigset_t *target_sigset;
9358 
9359             n = arg1;
9360             rfd_addr = arg2;
9361             wfd_addr = arg3;
9362             efd_addr = arg4;
9363             ts_addr = arg5;
9364 
9365             ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9366             if (ret) {
9367                 goto fail;
9368             }
9369             ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9370             if (ret) {
9371                 goto fail;
9372             }
9373             ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9374             if (ret) {
9375                 goto fail;
9376             }
9377 
9378             /*
9379              * This takes a timespec, and not a timeval, so we cannot
9380              * use the do_select() helper ...
9381              */
9382             if (ts_addr) {
9383                 if (target_to_host_timespec(&ts, ts_addr)) {
9384                     goto efault;
9385                 }
9386                 ts_ptr = &ts;
9387             } else {
9388                 ts_ptr = NULL;
9389             }
9390 
9391             /* Extract the two packed args for the sigset */
9392             if (arg6) {
9393                 sig_ptr = &sig;
9394                 sig.size = SIGSET_T_SIZE;
9395 
9396                 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9397                 if (!arg7) {
9398                     goto efault;
9399                 }
9400                 arg_sigset = tswapal(arg7[0]);
9401                 arg_sigsize = tswapal(arg7[1]);
9402                 unlock_user(arg7, arg6, 0);
9403 
9404                 if (arg_sigset) {
9405                     sig.set = &set;
9406                     if (arg_sigsize != sizeof(*target_sigset)) {
9407                         /* Like the kernel, we enforce correct size sigsets */
9408                         ret = -TARGET_EINVAL;
9409                         goto fail;
9410                     }
9411                     target_sigset = lock_user(VERIFY_READ, arg_sigset,
9412                                               sizeof(*target_sigset), 1);
9413                     if (!target_sigset) {
9414                         goto efault;
9415                     }
9416                     target_to_host_sigset(&set, target_sigset);
9417                     unlock_user(target_sigset, arg_sigset, 0);
9418                 } else {
9419                     sig.set = NULL;
9420                 }
9421             } else {
9422                 sig_ptr = NULL;
9423             }
9424 
9425             ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9426                                           ts_ptr, sig_ptr));
9427 
9428             if (!is_error(ret)) {
9429                 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9430                     goto efault;
9431                 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9432                     goto efault;
9433                 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9434                     goto efault;
9435 
9436                 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9437                     goto efault;
9438             }
9439         }
9440         break;
9441 #endif
9442 #ifdef TARGET_NR_symlink
9443     case TARGET_NR_symlink:
9444         {
9445             void *p2;
9446             p = lock_user_string(arg1);
9447             p2 = lock_user_string(arg2);
9448             if (!p || !p2)
9449                 ret = -TARGET_EFAULT;
9450             else
9451                 ret = get_errno(symlink(p, p2));
9452             unlock_user(p2, arg2, 0);
9453             unlock_user(p, arg1, 0);
9454         }
9455         break;
9456 #endif
9457 #if defined(TARGET_NR_symlinkat)
9458     case TARGET_NR_symlinkat:
9459         {
9460             void *p2;
9461             p  = lock_user_string(arg1);
9462             p2 = lock_user_string(arg3);
9463             if (!p || !p2)
9464                 ret = -TARGET_EFAULT;
9465             else
9466                 ret = get_errno(symlinkat(p, arg2, p2));
9467             unlock_user(p2, arg3, 0);
9468             unlock_user(p, arg1, 0);
9469         }
9470         break;
9471 #endif
9472 #ifdef TARGET_NR_oldlstat
9473     case TARGET_NR_oldlstat:
9474         goto unimplemented;
9475 #endif
9476 #ifdef TARGET_NR_readlink
9477     case TARGET_NR_readlink:
9478         {
9479             void *p2;
9480             p = lock_user_string(arg1);
9481             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9482             if (!p || !p2) {
9483                 ret = -TARGET_EFAULT;
9484             } else if (!arg3) {
9485                 /* Short circuit this for the magic exe check. */
9486                 ret = -TARGET_EINVAL;
9487             } else if (is_proc_myself((const char *)p, "exe")) {
9488                 char real[PATH_MAX], *temp;
9489                 temp = realpath(exec_path, real);
9490                 /* Return value is # of bytes that we wrote to the buffer. */
9491                 if (temp == NULL) {
9492                     ret = get_errno(-1);
9493                 } else {
9494                     /* Don't worry about sign mismatch as earlier mapping
9495                      * logic would have thrown a bad address error. */
9496                     ret = MIN(strlen(real), arg3);
9497                     /* We cannot NUL terminate the string. */
9498                     memcpy(p2, real, ret);
9499                 }
9500             } else {
9501                 ret = get_errno(readlink(path(p), p2, arg3));
9502             }
9503             unlock_user(p2, arg2, ret);
9504             unlock_user(p, arg1, 0);
9505         }
9506         break;
9507 #endif
9508 #if defined(TARGET_NR_readlinkat)
9509     case TARGET_NR_readlinkat:
9510         {
9511             void *p2;
9512             p  = lock_user_string(arg2);
9513             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9514             if (!p || !p2) {
9515                 ret = -TARGET_EFAULT;
9516             } else if (is_proc_myself((const char *)p, "exe")) {
9517                 char real[PATH_MAX], *temp;
9518                 temp = realpath(exec_path, real);
9519                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9520                 snprintf((char *)p2, arg4, "%s", real);
9521             } else {
9522                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9523             }
9524             unlock_user(p2, arg3, ret);
9525             unlock_user(p, arg2, 0);
9526         }
9527         break;
9528 #endif
9529 #ifdef TARGET_NR_uselib
9530     case TARGET_NR_uselib:
9531         goto unimplemented;
9532 #endif
9533 #ifdef TARGET_NR_swapon
9534     case TARGET_NR_swapon:
9535         if (!(p = lock_user_string(arg1)))
9536             goto efault;
9537         ret = get_errno(swapon(p, arg2));
9538         unlock_user(p, arg1, 0);
9539         break;
9540 #endif
9541     case TARGET_NR_reboot:
9542         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9543            /* arg4 must be ignored in all other cases */
9544            p = lock_user_string(arg4);
9545            if (!p) {
9546               goto efault;
9547            }
9548            ret = get_errno(reboot(arg1, arg2, arg3, p));
9549            unlock_user(p, arg4, 0);
9550         } else {
9551            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9552         }
9553         break;
9554 #ifdef TARGET_NR_readdir
9555     case TARGET_NR_readdir:
9556         goto unimplemented;
9557 #endif
9558 #ifdef TARGET_NR_mmap
9559     case TARGET_NR_mmap:
9560 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9561     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9562     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9563     || defined(TARGET_S390X)
9564         {
9565             abi_ulong *v;
9566             abi_ulong v1, v2, v3, v4, v5, v6;
9567             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9568                 goto efault;
9569             v1 = tswapal(v[0]);
9570             v2 = tswapal(v[1]);
9571             v3 = tswapal(v[2]);
9572             v4 = tswapal(v[3]);
9573             v5 = tswapal(v[4]);
9574             v6 = tswapal(v[5]);
9575             unlock_user(v, arg1, 0);
9576             ret = get_errno(target_mmap(v1, v2, v3,
9577                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9578                                         v5, v6));
9579         }
9580 #else
9581         ret = get_errno(target_mmap(arg1, arg2, arg3,
9582                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9583                                     arg5,
9584                                     arg6));
9585 #endif
9586         break;
9587 #endif
9588 #ifdef TARGET_NR_mmap2
9589     case TARGET_NR_mmap2:
9590 #ifndef MMAP_SHIFT
9591 #define MMAP_SHIFT 12
9592 #endif
9593         ret = get_errno(target_mmap(arg1, arg2, arg3,
9594                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9595                                     arg5,
9596                                     arg6 << MMAP_SHIFT));
9597         break;
9598 #endif
9599     case TARGET_NR_munmap:
9600         ret = get_errno(target_munmap(arg1, arg2));
9601         break;
9602     case TARGET_NR_mprotect:
9603         {
9604             TaskState *ts = cpu->opaque;
9605             /* Special hack to detect libc making the stack executable.  */
9606             if ((arg3 & PROT_GROWSDOWN)
9607                 && arg1 >= ts->info->stack_limit
9608                 && arg1 <= ts->info->start_stack) {
9609                 arg3 &= ~PROT_GROWSDOWN;
9610                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9611                 arg1 = ts->info->stack_limit;
9612             }
9613         }
9614         ret = get_errno(target_mprotect(arg1, arg2, arg3));
9615         break;
9616 #ifdef TARGET_NR_mremap
9617     case TARGET_NR_mremap:
9618         ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9619         break;
9620 #endif
9621         /* ??? msync/mlock/munlock are broken for softmmu.  */
9622 #ifdef TARGET_NR_msync
9623     case TARGET_NR_msync:
9624         ret = get_errno(msync(g2h(arg1), arg2, arg3));
9625         break;
9626 #endif
9627 #ifdef TARGET_NR_mlock
9628     case TARGET_NR_mlock:
9629         ret = get_errno(mlock(g2h(arg1), arg2));
9630         break;
9631 #endif
9632 #ifdef TARGET_NR_munlock
9633     case TARGET_NR_munlock:
9634         ret = get_errno(munlock(g2h(arg1), arg2));
9635         break;
9636 #endif
9637 #ifdef TARGET_NR_mlockall
9638     case TARGET_NR_mlockall:
9639         ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9640         break;
9641 #endif
9642 #ifdef TARGET_NR_munlockall
9643     case TARGET_NR_munlockall:
9644         ret = get_errno(munlockall());
9645         break;
9646 #endif
9647     case TARGET_NR_truncate:
9648         if (!(p = lock_user_string(arg1)))
9649             goto efault;
9650         ret = get_errno(truncate(p, arg2));
9651         unlock_user(p, arg1, 0);
9652         break;
9653     case TARGET_NR_ftruncate:
9654         ret = get_errno(ftruncate(arg1, arg2));
9655         break;
9656     case TARGET_NR_fchmod:
9657         ret = get_errno(fchmod(arg1, arg2));
9658         break;
9659 #if defined(TARGET_NR_fchmodat)
9660     case TARGET_NR_fchmodat:
9661         if (!(p = lock_user_string(arg2)))
9662             goto efault;
9663         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9664         unlock_user(p, arg2, 0);
9665         break;
9666 #endif
9667     case TARGET_NR_getpriority:
9668         /* Note that negative values are valid for getpriority, so we must
9669            differentiate based on errno settings.  */
9670         errno = 0;
9671         ret = getpriority(arg1, arg2);
9672         if (ret == -1 && errno != 0) {
9673             ret = -host_to_target_errno(errno);
9674             break;
9675         }
9676 #ifdef TARGET_ALPHA
9677         /* Return value is the unbiased priority.  Signal no error.  */
9678         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9679 #else
9680         /* Return value is a biased priority to avoid negative numbers.  */
9681         ret = 20 - ret;
9682 #endif
9683         break;
9684     case TARGET_NR_setpriority:
9685         ret = get_errno(setpriority(arg1, arg2, arg3));
9686         break;
9687 #ifdef TARGET_NR_profil
9688     case TARGET_NR_profil:
9689         goto unimplemented;
9690 #endif
9691     case TARGET_NR_statfs:
9692         if (!(p = lock_user_string(arg1)))
9693             goto efault;
9694         ret = get_errno(statfs(path(p), &stfs));
9695         unlock_user(p, arg1, 0);
9696     convert_statfs:
9697         if (!is_error(ret)) {
9698             struct target_statfs *target_stfs;
9699 
9700             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9701                 goto efault;
9702             __put_user(stfs.f_type, &target_stfs->f_type);
9703             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9704             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9705             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9706             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9707             __put_user(stfs.f_files, &target_stfs->f_files);
9708             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9709             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9710             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9711             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9712             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9713 #ifdef _STATFS_F_FLAGS
9714             __put_user(stfs.f_flags, &target_stfs->f_flags);
9715 #else
9716             __put_user(0, &target_stfs->f_flags);
9717 #endif
9718             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9719             unlock_user_struct(target_stfs, arg2, 1);
9720         }
9721         break;
9722     case TARGET_NR_fstatfs:
9723         ret = get_errno(fstatfs(arg1, &stfs));
9724         goto convert_statfs;
9725 #ifdef TARGET_NR_statfs64
9726     case TARGET_NR_statfs64:
9727         if (!(p = lock_user_string(arg1)))
9728             goto efault;
9729         ret = get_errno(statfs(path(p), &stfs));
9730         unlock_user(p, arg1, 0);
9731     convert_statfs64:
9732         if (!is_error(ret)) {
9733             struct target_statfs64 *target_stfs;
9734 
9735             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9736                 goto efault;
9737             __put_user(stfs.f_type, &target_stfs->f_type);
9738             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9739             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9740             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9741             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9742             __put_user(stfs.f_files, &target_stfs->f_files);
9743             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9744             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9745             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9746             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9747             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9748             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9749             unlock_user_struct(target_stfs, arg3, 1);
9750         }
9751         break;
9752     case TARGET_NR_fstatfs64:
9753         ret = get_errno(fstatfs(arg1, &stfs));
9754         goto convert_statfs64;
9755 #endif
9756 #ifdef TARGET_NR_ioperm
9757     case TARGET_NR_ioperm:
9758         goto unimplemented;
9759 #endif
9760 #ifdef TARGET_NR_socketcall
9761     case TARGET_NR_socketcall:
9762         ret = do_socketcall(arg1, arg2);
9763         break;
9764 #endif
9765 #ifdef TARGET_NR_accept
9766     case TARGET_NR_accept:
9767         ret = do_accept4(arg1, arg2, arg3, 0);
9768         break;
9769 #endif
9770 #ifdef TARGET_NR_accept4
9771     case TARGET_NR_accept4:
9772         ret = do_accept4(arg1, arg2, arg3, arg4);
9773         break;
9774 #endif
9775 #ifdef TARGET_NR_bind
9776     case TARGET_NR_bind:
9777         ret = do_bind(arg1, arg2, arg3);
9778         break;
9779 #endif
9780 #ifdef TARGET_NR_connect
9781     case TARGET_NR_connect:
9782         ret = do_connect(arg1, arg2, arg3);
9783         break;
9784 #endif
9785 #ifdef TARGET_NR_getpeername
9786     case TARGET_NR_getpeername:
9787         ret = do_getpeername(arg1, arg2, arg3);
9788         break;
9789 #endif
9790 #ifdef TARGET_NR_getsockname
9791     case TARGET_NR_getsockname:
9792         ret = do_getsockname(arg1, arg2, arg3);
9793         break;
9794 #endif
9795 #ifdef TARGET_NR_getsockopt
9796     case TARGET_NR_getsockopt:
9797         ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9798         break;
9799 #endif
9800 #ifdef TARGET_NR_listen
9801     case TARGET_NR_listen:
9802         ret = get_errno(listen(arg1, arg2));
9803         break;
9804 #endif
9805 #ifdef TARGET_NR_recv
9806     case TARGET_NR_recv:
9807         ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9808         break;
9809 #endif
9810 #ifdef TARGET_NR_recvfrom
9811     case TARGET_NR_recvfrom:
9812         ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9813         break;
9814 #endif
9815 #ifdef TARGET_NR_recvmsg
9816     case TARGET_NR_recvmsg:
9817         ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9818         break;
9819 #endif
9820 #ifdef TARGET_NR_send
9821     case TARGET_NR_send:
9822         ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9823         break;
9824 #endif
9825 #ifdef TARGET_NR_sendmsg
9826     case TARGET_NR_sendmsg:
9827         ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9828         break;
9829 #endif
9830 #ifdef TARGET_NR_sendmmsg
9831     case TARGET_NR_sendmmsg:
9832         ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9833         break;
9834     case TARGET_NR_recvmmsg:
9835         ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9836         break;
9837 #endif
9838 #ifdef TARGET_NR_sendto
9839     case TARGET_NR_sendto:
9840         ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9841         break;
9842 #endif
9843 #ifdef TARGET_NR_shutdown
9844     case TARGET_NR_shutdown:
9845         ret = get_errno(shutdown(arg1, arg2));
9846         break;
9847 #endif
9848 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9849     case TARGET_NR_getrandom:
9850         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9851         if (!p) {
9852             goto efault;
9853         }
9854         ret = get_errno(getrandom(p, arg2, arg3));
9855         unlock_user(p, arg1, ret);
9856         break;
9857 #endif
9858 #ifdef TARGET_NR_socket
9859     case TARGET_NR_socket:
9860         ret = do_socket(arg1, arg2, arg3);
9861         break;
9862 #endif
9863 #ifdef TARGET_NR_socketpair
9864     case TARGET_NR_socketpair:
9865         ret = do_socketpair(arg1, arg2, arg3, arg4);
9866         break;
9867 #endif
9868 #ifdef TARGET_NR_setsockopt
9869     case TARGET_NR_setsockopt:
9870         ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9871         break;
9872 #endif
9873 #if defined(TARGET_NR_syslog)
9874     case TARGET_NR_syslog:
9875         {
9876             int len = arg2;
9877 
9878             switch (arg1) {
9879             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9880             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9881             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9882             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9883             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9884             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9885             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9886             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9887                 {
9888                     ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9889                 }
9890                 break;
9891             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9892             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9893             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9894                 {
9895                     ret = -TARGET_EINVAL;
9896                     if (len < 0) {
9897                         goto fail;
9898                     }
9899                     ret = 0;
9900                     if (len == 0) {
9901                         break;
9902                     }
9903                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9904                     if (!p) {
9905                         ret = -TARGET_EFAULT;
9906                         goto fail;
9907                     }
9908                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9909                     unlock_user(p, arg2, arg3);
9910                 }
9911                 break;
9912             default:
9913                 ret = -EINVAL;
9914                 break;
9915             }
9916         }
9917         break;
9918 #endif
9919     case TARGET_NR_setitimer:
9920         {
9921             struct itimerval value, ovalue, *pvalue;
9922 
9923             if (arg2) {
9924                 pvalue = &value;
9925                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9926                     || copy_from_user_timeval(&pvalue->it_value,
9927                                               arg2 + sizeof(struct target_timeval)))
9928                     goto efault;
9929             } else {
9930                 pvalue = NULL;
9931             }
9932             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9933             if (!is_error(ret) && arg3) {
9934                 if (copy_to_user_timeval(arg3,
9935                                          &ovalue.it_interval)
9936                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9937                                             &ovalue.it_value))
9938                     goto efault;
9939             }
9940         }
9941         break;
9942     case TARGET_NR_getitimer:
9943         {
9944             struct itimerval value;
9945 
9946             ret = get_errno(getitimer(arg1, &value));
9947             if (!is_error(ret) && arg2) {
9948                 if (copy_to_user_timeval(arg2,
9949                                          &value.it_interval)
9950                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9951                                             &value.it_value))
9952                     goto efault;
9953             }
9954         }
9955         break;
9956 #ifdef TARGET_NR_stat
9957     case TARGET_NR_stat:
9958         if (!(p = lock_user_string(arg1)))
9959             goto efault;
9960         ret = get_errno(stat(path(p), &st));
9961         unlock_user(p, arg1, 0);
9962         goto do_stat;
9963 #endif
9964 #ifdef TARGET_NR_lstat
9965     case TARGET_NR_lstat:
9966         if (!(p = lock_user_string(arg1)))
9967             goto efault;
9968         ret = get_errno(lstat(path(p), &st));
9969         unlock_user(p, arg1, 0);
9970         goto do_stat;
9971 #endif
9972     case TARGET_NR_fstat:
9973         {
9974             ret = get_errno(fstat(arg1, &st));
9975 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9976         do_stat:
9977 #endif
9978             if (!is_error(ret)) {
9979                 struct target_stat *target_st;
9980 
9981                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9982                     goto efault;
9983                 memset(target_st, 0, sizeof(*target_st));
9984                 __put_user(st.st_dev, &target_st->st_dev);
9985                 __put_user(st.st_ino, &target_st->st_ino);
9986                 __put_user(st.st_mode, &target_st->st_mode);
9987                 __put_user(st.st_uid, &target_st->st_uid);
9988                 __put_user(st.st_gid, &target_st->st_gid);
9989                 __put_user(st.st_nlink, &target_st->st_nlink);
9990                 __put_user(st.st_rdev, &target_st->st_rdev);
9991                 __put_user(st.st_size, &target_st->st_size);
9992                 __put_user(st.st_blksize, &target_st->st_blksize);
9993                 __put_user(st.st_blocks, &target_st->st_blocks);
9994                 __put_user(st.st_atime, &target_st->target_st_atime);
9995                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9996                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9997                 unlock_user_struct(target_st, arg2, 1);
9998             }
9999         }
10000         break;
10001 #ifdef TARGET_NR_olduname
10002     case TARGET_NR_olduname:
10003         goto unimplemented;
10004 #endif
10005 #ifdef TARGET_NR_iopl
10006     case TARGET_NR_iopl:
10007         goto unimplemented;
10008 #endif
10009     case TARGET_NR_vhangup:
10010         ret = get_errno(vhangup());
10011         break;
10012 #ifdef TARGET_NR_idle
10013     case TARGET_NR_idle:
10014         goto unimplemented;
10015 #endif
10016 #ifdef TARGET_NR_syscall
10017     case TARGET_NR_syscall:
10018         ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10019                          arg6, arg7, arg8, 0);
10020         break;
10021 #endif
10022     case TARGET_NR_wait4:
10023         {
10024             int status;
10025             abi_long status_ptr = arg2;
10026             struct rusage rusage, *rusage_ptr;
10027             abi_ulong target_rusage = arg4;
10028             abi_long rusage_err;
10029             if (target_rusage)
10030                 rusage_ptr = &rusage;
10031             else
10032                 rusage_ptr = NULL;
10033             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10034             if (!is_error(ret)) {
10035                 if (status_ptr && ret) {
10036                     status = host_to_target_waitstatus(status);
10037                     if (put_user_s32(status, status_ptr))
10038                         goto efault;
10039                 }
10040                 if (target_rusage) {
10041                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10042                     if (rusage_err) {
10043                         ret = rusage_err;
10044                     }
10045                 }
10046             }
10047         }
10048         break;
10049 #ifdef TARGET_NR_swapoff
10050     case TARGET_NR_swapoff:
10051         if (!(p = lock_user_string(arg1)))
10052             goto efault;
10053         ret = get_errno(swapoff(p));
10054         unlock_user(p, arg1, 0);
10055         break;
10056 #endif
10057     case TARGET_NR_sysinfo:
10058         {
10059             struct target_sysinfo *target_value;
10060             struct sysinfo value;
10061             ret = get_errno(sysinfo(&value));
10062             if (!is_error(ret) && arg1)
10063             {
10064                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10065                     goto efault;
10066                 __put_user(value.uptime, &target_value->uptime);
10067                 __put_user(value.loads[0], &target_value->loads[0]);
10068                 __put_user(value.loads[1], &target_value->loads[1]);
10069                 __put_user(value.loads[2], &target_value->loads[2]);
10070                 __put_user(value.totalram, &target_value->totalram);
10071                 __put_user(value.freeram, &target_value->freeram);
10072                 __put_user(value.sharedram, &target_value->sharedram);
10073                 __put_user(value.bufferram, &target_value->bufferram);
10074                 __put_user(value.totalswap, &target_value->totalswap);
10075                 __put_user(value.freeswap, &target_value->freeswap);
10076                 __put_user(value.procs, &target_value->procs);
10077                 __put_user(value.totalhigh, &target_value->totalhigh);
10078                 __put_user(value.freehigh, &target_value->freehigh);
10079                 __put_user(value.mem_unit, &target_value->mem_unit);
10080                 unlock_user_struct(target_value, arg1, 1);
10081             }
10082         }
10083         break;
10084 #ifdef TARGET_NR_ipc
10085     case TARGET_NR_ipc:
10086         ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10087         break;
10088 #endif
10089 #ifdef TARGET_NR_semget
10090     case TARGET_NR_semget:
10091         ret = get_errno(semget(arg1, arg2, arg3));
10092         break;
10093 #endif
10094 #ifdef TARGET_NR_semop
10095     case TARGET_NR_semop:
10096         ret = do_semop(arg1, arg2, arg3);
10097         break;
10098 #endif
10099 #ifdef TARGET_NR_semctl
10100     case TARGET_NR_semctl:
10101         ret = do_semctl(arg1, arg2, arg3, arg4);
10102         break;
10103 #endif
10104 #ifdef TARGET_NR_msgctl
10105     case TARGET_NR_msgctl:
10106         ret = do_msgctl(arg1, arg2, arg3);
10107         break;
10108 #endif
10109 #ifdef TARGET_NR_msgget
10110     case TARGET_NR_msgget:
10111         ret = get_errno(msgget(arg1, arg2));
10112         break;
10113 #endif
10114 #ifdef TARGET_NR_msgrcv
10115     case TARGET_NR_msgrcv:
10116         ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10117         break;
10118 #endif
10119 #ifdef TARGET_NR_msgsnd
10120     case TARGET_NR_msgsnd:
10121         ret = do_msgsnd(arg1, arg2, arg3, arg4);
10122         break;
10123 #endif
10124 #ifdef TARGET_NR_shmget
10125     case TARGET_NR_shmget:
10126         ret = get_errno(shmget(arg1, arg2, arg3));
10127         break;
10128 #endif
10129 #ifdef TARGET_NR_shmctl
10130     case TARGET_NR_shmctl:
10131         ret = do_shmctl(arg1, arg2, arg3);
10132         break;
10133 #endif
10134 #ifdef TARGET_NR_shmat
10135     case TARGET_NR_shmat:
10136         ret = do_shmat(cpu_env, arg1, arg2, arg3);
10137         break;
10138 #endif
10139 #ifdef TARGET_NR_shmdt
10140     case TARGET_NR_shmdt:
10141         ret = do_shmdt(arg1);
10142         break;
10143 #endif
10144     case TARGET_NR_fsync:
10145         ret = get_errno(fsync(arg1));
10146         break;
10147     case TARGET_NR_clone:
10148         /* Linux manages to have three different orderings for its
10149          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10150          * match the kernel's CONFIG_CLONE_* settings.
10151          * Microblaze is further special in that it uses a sixth
10152          * implicit argument to clone for the TLS pointer.
10153          */
10154 #if defined(TARGET_MICROBLAZE)
10155         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10156 #elif defined(TARGET_CLONE_BACKWARDS)
10157         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10158 #elif defined(TARGET_CLONE_BACKWARDS2)
10159         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10160 #else
10161         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10162 #endif
10163         break;
10164 #ifdef __NR_exit_group
10165         /* new thread calls */
10166     case TARGET_NR_exit_group:
10167         preexit_cleanup(cpu_env, arg1);
10168         ret = get_errno(exit_group(arg1));
10169         break;
10170 #endif
10171     case TARGET_NR_setdomainname:
10172         if (!(p = lock_user_string(arg1)))
10173             goto efault;
10174         ret = get_errno(setdomainname(p, arg2));
10175         unlock_user(p, arg1, 0);
10176         break;
10177     case TARGET_NR_uname:
10178         /* no need to transcode because we use the linux syscall */
10179         {
10180             struct new_utsname * buf;
10181 
10182             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10183                 goto efault;
10184             ret = get_errno(sys_uname(buf));
10185             if (!is_error(ret)) {
10186                 /* Overwrite the native machine name with whatever is being
10187                    emulated. */
10188                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10189                           sizeof(buf->machine));
10190                 /* Allow the user to override the reported release.  */
10191                 if (qemu_uname_release && *qemu_uname_release) {
10192                     g_strlcpy(buf->release, qemu_uname_release,
10193                               sizeof(buf->release));
10194                 }
10195             }
10196             unlock_user_struct(buf, arg1, 1);
10197         }
10198         break;
10199 #ifdef TARGET_I386
10200     case TARGET_NR_modify_ldt:
10201         ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10202         break;
10203 #if !defined(TARGET_X86_64)
10204     case TARGET_NR_vm86old:
10205         goto unimplemented;
10206     case TARGET_NR_vm86:
10207         ret = do_vm86(cpu_env, arg1, arg2);
10208         break;
10209 #endif
10210 #endif
10211     case TARGET_NR_adjtimex:
10212         {
10213             struct timex host_buf;
10214 
10215             if (target_to_host_timex(&host_buf, arg1) != 0) {
10216                 goto efault;
10217             }
10218             ret = get_errno(adjtimex(&host_buf));
10219             if (!is_error(ret)) {
10220                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10221                     goto efault;
10222                 }
10223             }
10224         }
10225         break;
10226 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10227     case TARGET_NR_clock_adjtime:
10228         {
10229             struct timex htx, *phtx = &htx;
10230 
10231             if (target_to_host_timex(phtx, arg2) != 0) {
10232                 goto efault;
10233             }
10234             ret = get_errno(clock_adjtime(arg1, phtx));
10235             if (!is_error(ret) && phtx) {
10236                 if (host_to_target_timex(arg2, phtx) != 0) {
10237                     goto efault;
10238                 }
10239             }
10240         }
10241         break;
10242 #endif
10243 #ifdef TARGET_NR_create_module
10244     case TARGET_NR_create_module:
10245 #endif
10246     case TARGET_NR_init_module:
10247     case TARGET_NR_delete_module:
10248 #ifdef TARGET_NR_get_kernel_syms
10249     case TARGET_NR_get_kernel_syms:
10250 #endif
10251         goto unimplemented;
10252     case TARGET_NR_quotactl:
10253         goto unimplemented;
10254     case TARGET_NR_getpgid:
10255         ret = get_errno(getpgid(arg1));
10256         break;
10257     case TARGET_NR_fchdir:
10258         ret = get_errno(fchdir(arg1));
10259         break;
10260 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10261     case TARGET_NR_bdflush:
10262         goto unimplemented;
10263 #endif
10264 #ifdef TARGET_NR_sysfs
10265     case TARGET_NR_sysfs:
10266         goto unimplemented;
10267 #endif
10268     case TARGET_NR_personality:
10269         ret = get_errno(personality(arg1));
10270         break;
10271 #ifdef TARGET_NR_afs_syscall
10272     case TARGET_NR_afs_syscall:
10273         goto unimplemented;
10274 #endif
10275 #ifdef TARGET_NR__llseek /* Not on alpha */
10276     case TARGET_NR__llseek:
10277         {
10278             int64_t res;
10279 #if !defined(__NR_llseek)
10280             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10281             if (res == -1) {
10282                 ret = get_errno(res);
10283             } else {
10284                 ret = 0;
10285             }
10286 #else
10287             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10288 #endif
10289             if ((ret == 0) && put_user_s64(res, arg4)) {
10290                 goto efault;
10291             }
10292         }
10293         break;
10294 #endif
10295 #ifdef TARGET_NR_getdents
10296     case TARGET_NR_getdents:
10297 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10298 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10299         {
10300             struct target_dirent *target_dirp;
10301             struct linux_dirent *dirp;
10302             abi_long count = arg3;
10303 
10304             dirp = g_try_malloc(count);
10305             if (!dirp) {
10306                 ret = -TARGET_ENOMEM;
10307                 goto fail;
10308             }
10309 
10310             ret = get_errno(sys_getdents(arg1, dirp, count));
10311             if (!is_error(ret)) {
10312                 struct linux_dirent *de;
10313 		struct target_dirent *tde;
10314                 int len = ret;
10315                 int reclen, treclen;
10316 		int count1, tnamelen;
10317 
10318 		count1 = 0;
10319                 de = dirp;
10320                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10321                     goto efault;
10322 		tde = target_dirp;
10323                 while (len > 0) {
10324                     reclen = de->d_reclen;
10325                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10326                     assert(tnamelen >= 0);
10327                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10328                     assert(count1 + treclen <= count);
10329                     tde->d_reclen = tswap16(treclen);
10330                     tde->d_ino = tswapal(de->d_ino);
10331                     tde->d_off = tswapal(de->d_off);
10332                     memcpy(tde->d_name, de->d_name, tnamelen);
10333                     de = (struct linux_dirent *)((char *)de + reclen);
10334                     len -= reclen;
10335                     tde = (struct target_dirent *)((char *)tde + treclen);
10336 		    count1 += treclen;
10337                 }
10338 		ret = count1;
10339                 unlock_user(target_dirp, arg2, ret);
10340             }
10341             g_free(dirp);
10342         }
10343 #else
10344         {
10345             struct linux_dirent *dirp;
10346             abi_long count = arg3;
10347 
10348             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10349                 goto efault;
10350             ret = get_errno(sys_getdents(arg1, dirp, count));
10351             if (!is_error(ret)) {
10352                 struct linux_dirent *de;
10353                 int len = ret;
10354                 int reclen;
10355                 de = dirp;
10356                 while (len > 0) {
10357                     reclen = de->d_reclen;
10358                     if (reclen > len)
10359                         break;
10360                     de->d_reclen = tswap16(reclen);
10361                     tswapls(&de->d_ino);
10362                     tswapls(&de->d_off);
10363                     de = (struct linux_dirent *)((char *)de + reclen);
10364                     len -= reclen;
10365                 }
10366             }
10367             unlock_user(dirp, arg2, ret);
10368         }
10369 #endif
10370 #else
10371         /* Implement getdents in terms of getdents64 */
10372         {
10373             struct linux_dirent64 *dirp;
10374             abi_long count = arg3;
10375 
10376             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10377             if (!dirp) {
10378                 goto efault;
10379             }
10380             ret = get_errno(sys_getdents64(arg1, dirp, count));
10381             if (!is_error(ret)) {
10382                 /* Convert the dirent64 structs to target dirent.  We do this
10383                  * in-place, since we can guarantee that a target_dirent is no
10384                  * larger than a dirent64; however this means we have to be
10385                  * careful to read everything before writing in the new format.
10386                  */
10387                 struct linux_dirent64 *de;
10388                 struct target_dirent *tde;
10389                 int len = ret;
10390                 int tlen = 0;
10391 
10392                 de = dirp;
10393                 tde = (struct target_dirent *)dirp;
10394                 while (len > 0) {
10395                     int namelen, treclen;
10396                     int reclen = de->d_reclen;
10397                     uint64_t ino = de->d_ino;
10398                     int64_t off = de->d_off;
10399                     uint8_t type = de->d_type;
10400 
10401                     namelen = strlen(de->d_name);
10402                     treclen = offsetof(struct target_dirent, d_name)
10403                         + namelen + 2;
10404                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10405 
10406                     memmove(tde->d_name, de->d_name, namelen + 1);
10407                     tde->d_ino = tswapal(ino);
10408                     tde->d_off = tswapal(off);
10409                     tde->d_reclen = tswap16(treclen);
10410                     /* The target_dirent type is in what was formerly a padding
10411                      * byte at the end of the structure:
10412                      */
10413                     *(((char *)tde) + treclen - 1) = type;
10414 
10415                     de = (struct linux_dirent64 *)((char *)de + reclen);
10416                     tde = (struct target_dirent *)((char *)tde + treclen);
10417                     len -= reclen;
10418                     tlen += treclen;
10419                 }
10420                 ret = tlen;
10421             }
10422             unlock_user(dirp, arg2, ret);
10423         }
10424 #endif
10425         break;
10426 #endif /* TARGET_NR_getdents */
10427 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10428     case TARGET_NR_getdents64:
10429         {
10430             struct linux_dirent64 *dirp;
10431             abi_long count = arg3;
10432             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10433                 goto efault;
10434             ret = get_errno(sys_getdents64(arg1, dirp, count));
10435             if (!is_error(ret)) {
10436                 struct linux_dirent64 *de;
10437                 int len = ret;
10438                 int reclen;
10439                 de = dirp;
10440                 while (len > 0) {
10441                     reclen = de->d_reclen;
10442                     if (reclen > len)
10443                         break;
10444                     de->d_reclen = tswap16(reclen);
10445                     tswap64s((uint64_t *)&de->d_ino);
10446                     tswap64s((uint64_t *)&de->d_off);
10447                     de = (struct linux_dirent64 *)((char *)de + reclen);
10448                     len -= reclen;
10449                 }
10450             }
10451             unlock_user(dirp, arg2, ret);
10452         }
10453         break;
10454 #endif /* TARGET_NR_getdents64 */
10455 #if defined(TARGET_NR__newselect)
10456     case TARGET_NR__newselect:
10457         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10458         break;
10459 #endif
10460 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10461 # ifdef TARGET_NR_poll
10462     case TARGET_NR_poll:
10463 # endif
10464 # ifdef TARGET_NR_ppoll
10465     case TARGET_NR_ppoll:
10466 # endif
10467         {
10468             struct target_pollfd *target_pfd;
10469             unsigned int nfds = arg2;
10470             struct pollfd *pfd;
10471             unsigned int i;
10472 
10473             pfd = NULL;
10474             target_pfd = NULL;
10475             if (nfds) {
10476                 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10477                     ret = -TARGET_EINVAL;
10478                     break;
10479                 }
10480 
10481                 target_pfd = lock_user(VERIFY_WRITE, arg1,
10482                                        sizeof(struct target_pollfd) * nfds, 1);
10483                 if (!target_pfd) {
10484                     goto efault;
10485                 }
10486 
10487                 pfd = alloca(sizeof(struct pollfd) * nfds);
10488                 for (i = 0; i < nfds; i++) {
10489                     pfd[i].fd = tswap32(target_pfd[i].fd);
10490                     pfd[i].events = tswap16(target_pfd[i].events);
10491                 }
10492             }
10493 
10494             switch (num) {
10495 # ifdef TARGET_NR_ppoll
10496             case TARGET_NR_ppoll:
10497             {
10498                 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10499                 target_sigset_t *target_set;
10500                 sigset_t _set, *set = &_set;
10501 
10502                 if (arg3) {
10503                     if (target_to_host_timespec(timeout_ts, arg3)) {
10504                         unlock_user(target_pfd, arg1, 0);
10505                         goto efault;
10506                     }
10507                 } else {
10508                     timeout_ts = NULL;
10509                 }
10510 
10511                 if (arg4) {
10512                     if (arg5 != sizeof(target_sigset_t)) {
10513                         unlock_user(target_pfd, arg1, 0);
10514                         ret = -TARGET_EINVAL;
10515                         break;
10516                     }
10517 
10518                     target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10519                     if (!target_set) {
10520                         unlock_user(target_pfd, arg1, 0);
10521                         goto efault;
10522                     }
10523                     target_to_host_sigset(set, target_set);
10524                 } else {
10525                     set = NULL;
10526                 }
10527 
10528                 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10529                                            set, SIGSET_T_SIZE));
10530 
10531                 if (!is_error(ret) && arg3) {
10532                     host_to_target_timespec(arg3, timeout_ts);
10533                 }
10534                 if (arg4) {
10535                     unlock_user(target_set, arg4, 0);
10536                 }
10537                 break;
10538             }
10539 # endif
10540 # ifdef TARGET_NR_poll
10541             case TARGET_NR_poll:
10542             {
10543                 struct timespec ts, *pts;
10544 
10545                 if (arg3 >= 0) {
10546                     /* Convert ms to secs, ns */
10547                     ts.tv_sec = arg3 / 1000;
10548                     ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10549                     pts = &ts;
10550                 } else {
10551                     /* -ve poll() timeout means "infinite" */
10552                     pts = NULL;
10553                 }
10554                 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10555                 break;
10556             }
10557 # endif
10558             default:
10559                 g_assert_not_reached();
10560             }
10561 
10562             if (!is_error(ret)) {
10563                 for(i = 0; i < nfds; i++) {
10564                     target_pfd[i].revents = tswap16(pfd[i].revents);
10565                 }
10566             }
10567             unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10568         }
10569         break;
10570 #endif
10571     case TARGET_NR_flock:
10572         /* NOTE: the flock constant seems to be the same for every
10573            Linux platform */
10574         ret = get_errno(safe_flock(arg1, arg2));
10575         break;
10576     case TARGET_NR_readv:
10577         {
10578             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10579             if (vec != NULL) {
10580                 ret = get_errno(safe_readv(arg1, vec, arg3));
10581                 unlock_iovec(vec, arg2, arg3, 1);
10582             } else {
10583                 ret = -host_to_target_errno(errno);
10584             }
10585         }
10586         break;
10587     case TARGET_NR_writev:
10588         {
10589             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10590             if (vec != NULL) {
10591                 ret = get_errno(safe_writev(arg1, vec, arg3));
10592                 unlock_iovec(vec, arg2, arg3, 0);
10593             } else {
10594                 ret = -host_to_target_errno(errno);
10595             }
10596         }
10597         break;
10598 #if defined(TARGET_NR_preadv)
10599     case TARGET_NR_preadv:
10600         {
10601             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10602             if (vec != NULL) {
10603                 unsigned long low, high;
10604 
10605                 target_to_host_low_high(arg4, arg5, &low, &high);
10606                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10607                 unlock_iovec(vec, arg2, arg3, 1);
10608             } else {
10609                 ret = -host_to_target_errno(errno);
10610            }
10611         }
10612         break;
10613 #endif
10614 #if defined(TARGET_NR_pwritev)
10615     case TARGET_NR_pwritev:
10616         {
10617             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10618             if (vec != NULL) {
10619                 unsigned long low, high;
10620 
10621                 target_to_host_low_high(arg4, arg5, &low, &high);
10622                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10623                 unlock_iovec(vec, arg2, arg3, 0);
10624             } else {
10625                 ret = -host_to_target_errno(errno);
10626            }
10627         }
10628         break;
10629 #endif
10630     case TARGET_NR_getsid:
10631         ret = get_errno(getsid(arg1));
10632         break;
10633 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10634     case TARGET_NR_fdatasync:
10635         ret = get_errno(fdatasync(arg1));
10636         break;
10637 #endif
10638 #ifdef TARGET_NR__sysctl
10639     case TARGET_NR__sysctl:
10640         /* We don't implement this, but ENOTDIR is always a safe
10641            return value. */
10642         ret = -TARGET_ENOTDIR;
10643         break;
10644 #endif
10645     case TARGET_NR_sched_getaffinity:
10646         {
10647             unsigned int mask_size;
10648             unsigned long *mask;
10649 
10650             /*
10651              * sched_getaffinity needs multiples of ulong, so need to take
10652              * care of mismatches between target ulong and host ulong sizes.
10653              */
10654             if (arg2 & (sizeof(abi_ulong) - 1)) {
10655                 ret = -TARGET_EINVAL;
10656                 break;
10657             }
10658             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10659 
10660             mask = alloca(mask_size);
10661             memset(mask, 0, mask_size);
10662             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10663 
10664             if (!is_error(ret)) {
10665                 if (ret > arg2) {
10666                     /* More data returned than the caller's buffer will fit.
10667                      * This only happens if sizeof(abi_long) < sizeof(long)
10668                      * and the caller passed us a buffer holding an odd number
10669                      * of abi_longs. If the host kernel is actually using the
10670                      * extra 4 bytes then fail EINVAL; otherwise we can just
10671                      * ignore them and only copy the interesting part.
10672                      */
10673                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10674                     if (numcpus > arg2 * 8) {
10675                         ret = -TARGET_EINVAL;
10676                         break;
10677                     }
10678                     ret = arg2;
10679                 }
10680 
10681                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10682                     goto efault;
10683                 }
10684             }
10685         }
10686         break;
10687     case TARGET_NR_sched_setaffinity:
10688         {
10689             unsigned int mask_size;
10690             unsigned long *mask;
10691 
10692             /*
10693              * sched_setaffinity needs multiples of ulong, so need to take
10694              * care of mismatches between target ulong and host ulong sizes.
10695              */
10696             if (arg2 & (sizeof(abi_ulong) - 1)) {
10697                 ret = -TARGET_EINVAL;
10698                 break;
10699             }
10700             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10701             mask = alloca(mask_size);
10702 
10703             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10704             if (ret) {
10705                 break;
10706             }
10707 
10708             ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10709         }
10710         break;
10711     case TARGET_NR_getcpu:
10712         {
10713             unsigned cpu, node;
10714             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10715                                        arg2 ? &node : NULL,
10716                                        NULL));
10717             if (is_error(ret)) {
10718                 goto fail;
10719             }
10720             if (arg1 && put_user_u32(cpu, arg1)) {
10721                 goto efault;
10722             }
10723             if (arg2 && put_user_u32(node, arg2)) {
10724                 goto efault;
10725             }
10726         }
10727         break;
10728     case TARGET_NR_sched_setparam:
10729         {
10730             struct sched_param *target_schp;
10731             struct sched_param schp;
10732 
10733             if (arg2 == 0) {
10734                 return -TARGET_EINVAL;
10735             }
10736             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10737                 goto efault;
10738             schp.sched_priority = tswap32(target_schp->sched_priority);
10739             unlock_user_struct(target_schp, arg2, 0);
10740             ret = get_errno(sched_setparam(arg1, &schp));
10741         }
10742         break;
10743     case TARGET_NR_sched_getparam:
10744         {
10745             struct sched_param *target_schp;
10746             struct sched_param schp;
10747 
10748             if (arg2 == 0) {
10749                 return -TARGET_EINVAL;
10750             }
10751             ret = get_errno(sched_getparam(arg1, &schp));
10752             if (!is_error(ret)) {
10753                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10754                     goto efault;
10755                 target_schp->sched_priority = tswap32(schp.sched_priority);
10756                 unlock_user_struct(target_schp, arg2, 1);
10757             }
10758         }
10759         break;
10760     case TARGET_NR_sched_setscheduler:
10761         {
10762             struct sched_param *target_schp;
10763             struct sched_param schp;
10764             if (arg3 == 0) {
10765                 return -TARGET_EINVAL;
10766             }
10767             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10768                 goto efault;
10769             schp.sched_priority = tswap32(target_schp->sched_priority);
10770             unlock_user_struct(target_schp, arg3, 0);
10771             ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10772         }
10773         break;
10774     case TARGET_NR_sched_getscheduler:
10775         ret = get_errno(sched_getscheduler(arg1));
10776         break;
10777     case TARGET_NR_sched_yield:
10778         ret = get_errno(sched_yield());
10779         break;
10780     case TARGET_NR_sched_get_priority_max:
10781         ret = get_errno(sched_get_priority_max(arg1));
10782         break;
10783     case TARGET_NR_sched_get_priority_min:
10784         ret = get_errno(sched_get_priority_min(arg1));
10785         break;
10786     case TARGET_NR_sched_rr_get_interval:
10787         {
10788             struct timespec ts;
10789             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10790             if (!is_error(ret)) {
10791                 ret = host_to_target_timespec(arg2, &ts);
10792             }
10793         }
10794         break;
10795     case TARGET_NR_nanosleep:
10796         {
10797             struct timespec req, rem;
10798             target_to_host_timespec(&req, arg1);
10799             ret = get_errno(safe_nanosleep(&req, &rem));
10800             if (is_error(ret) && arg2) {
10801                 host_to_target_timespec(arg2, &rem);
10802             }
10803         }
10804         break;
10805 #ifdef TARGET_NR_query_module
10806     case TARGET_NR_query_module:
10807         goto unimplemented;
10808 #endif
10809 #ifdef TARGET_NR_nfsservctl
10810     case TARGET_NR_nfsservctl:
10811         goto unimplemented;
10812 #endif
10813     case TARGET_NR_prctl:
10814         switch (arg1) {
10815         case PR_GET_PDEATHSIG:
10816         {
10817             int deathsig;
10818             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10819             if (!is_error(ret) && arg2
10820                 && put_user_ual(deathsig, arg2)) {
10821                 goto efault;
10822             }
10823             break;
10824         }
10825 #ifdef PR_GET_NAME
10826         case PR_GET_NAME:
10827         {
10828             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10829             if (!name) {
10830                 goto efault;
10831             }
10832             ret = get_errno(prctl(arg1, (unsigned long)name,
10833                                   arg3, arg4, arg5));
10834             unlock_user(name, arg2, 16);
10835             break;
10836         }
10837         case PR_SET_NAME:
10838         {
10839             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10840             if (!name) {
10841                 goto efault;
10842             }
10843             ret = get_errno(prctl(arg1, (unsigned long)name,
10844                                   arg3, arg4, arg5));
10845             unlock_user(name, arg2, 0);
10846             break;
10847         }
10848 #endif
10849 #ifdef TARGET_AARCH64
10850         case TARGET_PR_SVE_SET_VL:
10851             /* We cannot support either PR_SVE_SET_VL_ONEXEC
10852                or PR_SVE_VL_INHERIT.  Therefore, anything above
10853                ARM_MAX_VQ results in EINVAL.  */
10854             ret = -TARGET_EINVAL;
10855             if (arm_feature(cpu_env, ARM_FEATURE_SVE)
10856                 && arg2 >= 0 && arg2 <= ARM_MAX_VQ * 16 && !(arg2 & 15)) {
10857                 CPUARMState *env = cpu_env;
10858                 int old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10859                 int vq = MAX(arg2 / 16, 1);
10860 
10861                 if (vq < old_vq) {
10862                     aarch64_sve_narrow_vq(env, vq);
10863                 }
10864                 env->vfp.zcr_el[1] = vq - 1;
10865                 ret = vq * 16;
10866             }
10867             break;
10868         case TARGET_PR_SVE_GET_VL:
10869             ret = -TARGET_EINVAL;
10870             if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
10871                 CPUARMState *env = cpu_env;
10872                 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
10873             }
10874             break;
10875 #endif /* AARCH64 */
10876         case PR_GET_SECCOMP:
10877         case PR_SET_SECCOMP:
10878             /* Disable seccomp to prevent the target disabling syscalls we
10879              * need. */
10880             ret = -TARGET_EINVAL;
10881             break;
10882         default:
10883             /* Most prctl options have no pointer arguments */
10884             ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10885             break;
10886         }
10887         break;
10888 #ifdef TARGET_NR_arch_prctl
10889     case TARGET_NR_arch_prctl:
10890 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10891         ret = do_arch_prctl(cpu_env, arg1, arg2);
10892         break;
10893 #else
10894         goto unimplemented;
10895 #endif
10896 #endif
10897 #ifdef TARGET_NR_pread64
10898     case TARGET_NR_pread64:
10899         if (regpairs_aligned(cpu_env, num)) {
10900             arg4 = arg5;
10901             arg5 = arg6;
10902         }
10903         if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10904             goto efault;
10905         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10906         unlock_user(p, arg2, ret);
10907         break;
10908     case TARGET_NR_pwrite64:
10909         if (regpairs_aligned(cpu_env, num)) {
10910             arg4 = arg5;
10911             arg5 = arg6;
10912         }
10913         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10914             goto efault;
10915         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10916         unlock_user(p, arg2, 0);
10917         break;
10918 #endif
10919     case TARGET_NR_getcwd:
10920         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10921             goto efault;
10922         ret = get_errno(sys_getcwd1(p, arg2));
10923         unlock_user(p, arg1, ret);
10924         break;
10925     case TARGET_NR_capget:
10926     case TARGET_NR_capset:
10927     {
10928         struct target_user_cap_header *target_header;
10929         struct target_user_cap_data *target_data = NULL;
10930         struct __user_cap_header_struct header;
10931         struct __user_cap_data_struct data[2];
10932         struct __user_cap_data_struct *dataptr = NULL;
10933         int i, target_datalen;
10934         int data_items = 1;
10935 
10936         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10937             goto efault;
10938         }
10939         header.version = tswap32(target_header->version);
10940         header.pid = tswap32(target_header->pid);
10941 
10942         if (header.version != _LINUX_CAPABILITY_VERSION) {
10943             /* Version 2 and up takes pointer to two user_data structs */
10944             data_items = 2;
10945         }
10946 
10947         target_datalen = sizeof(*target_data) * data_items;
10948 
10949         if (arg2) {
10950             if (num == TARGET_NR_capget) {
10951                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10952             } else {
10953                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10954             }
10955             if (!target_data) {
10956                 unlock_user_struct(target_header, arg1, 0);
10957                 goto efault;
10958             }
10959 
10960             if (num == TARGET_NR_capset) {
10961                 for (i = 0; i < data_items; i++) {
10962                     data[i].effective = tswap32(target_data[i].effective);
10963                     data[i].permitted = tswap32(target_data[i].permitted);
10964                     data[i].inheritable = tswap32(target_data[i].inheritable);
10965                 }
10966             }
10967 
10968             dataptr = data;
10969         }
10970 
10971         if (num == TARGET_NR_capget) {
10972             ret = get_errno(capget(&header, dataptr));
10973         } else {
10974             ret = get_errno(capset(&header, dataptr));
10975         }
10976 
10977         /* The kernel always updates version for both capget and capset */
10978         target_header->version = tswap32(header.version);
10979         unlock_user_struct(target_header, arg1, 1);
10980 
10981         if (arg2) {
10982             if (num == TARGET_NR_capget) {
10983                 for (i = 0; i < data_items; i++) {
10984                     target_data[i].effective = tswap32(data[i].effective);
10985                     target_data[i].permitted = tswap32(data[i].permitted);
10986                     target_data[i].inheritable = tswap32(data[i].inheritable);
10987                 }
10988                 unlock_user(target_data, arg2, target_datalen);
10989             } else {
10990                 unlock_user(target_data, arg2, 0);
10991             }
10992         }
10993         break;
10994     }
10995     case TARGET_NR_sigaltstack:
10996         ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10997         break;
10998 
10999 #ifdef CONFIG_SENDFILE
11000     case TARGET_NR_sendfile:
11001     {
11002         off_t *offp = NULL;
11003         off_t off;
11004         if (arg3) {
11005             ret = get_user_sal(off, arg3);
11006             if (is_error(ret)) {
11007                 break;
11008             }
11009             offp = &off;
11010         }
11011         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11012         if (!is_error(ret) && arg3) {
11013             abi_long ret2 = put_user_sal(off, arg3);
11014             if (is_error(ret2)) {
11015                 ret = ret2;
11016             }
11017         }
11018         break;
11019     }
11020 #ifdef TARGET_NR_sendfile64
11021     case TARGET_NR_sendfile64:
11022     {
11023         off_t *offp = NULL;
11024         off_t off;
11025         if (arg3) {
11026             ret = get_user_s64(off, arg3);
11027             if (is_error(ret)) {
11028                 break;
11029             }
11030             offp = &off;
11031         }
11032         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11033         if (!is_error(ret) && arg3) {
11034             abi_long ret2 = put_user_s64(off, arg3);
11035             if (is_error(ret2)) {
11036                 ret = ret2;
11037             }
11038         }
11039         break;
11040     }
11041 #endif
11042 #else
11043     case TARGET_NR_sendfile:
11044 #ifdef TARGET_NR_sendfile64
11045     case TARGET_NR_sendfile64:
11046 #endif
11047         goto unimplemented;
11048 #endif
11049 
11050 #ifdef TARGET_NR_getpmsg
11051     case TARGET_NR_getpmsg:
11052         goto unimplemented;
11053 #endif
11054 #ifdef TARGET_NR_putpmsg
11055     case TARGET_NR_putpmsg:
11056         goto unimplemented;
11057 #endif
11058 #ifdef TARGET_NR_vfork
11059     case TARGET_NR_vfork:
11060         ret = get_errno(do_fork(cpu_env,
11061                         CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11062                         0, 0, 0, 0));
11063         break;
11064 #endif
11065 #ifdef TARGET_NR_ugetrlimit
11066     case TARGET_NR_ugetrlimit:
11067     {
11068 	struct rlimit rlim;
11069 	int resource = target_to_host_resource(arg1);
11070 	ret = get_errno(getrlimit(resource, &rlim));
11071 	if (!is_error(ret)) {
11072 	    struct target_rlimit *target_rlim;
11073             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11074                 goto efault;
11075 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11076 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11077             unlock_user_struct(target_rlim, arg2, 1);
11078 	}
11079 	break;
11080     }
11081 #endif
11082 #ifdef TARGET_NR_truncate64
11083     case TARGET_NR_truncate64:
11084         if (!(p = lock_user_string(arg1)))
11085             goto efault;
11086 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11087         unlock_user(p, arg1, 0);
11088 	break;
11089 #endif
11090 #ifdef TARGET_NR_ftruncate64
11091     case TARGET_NR_ftruncate64:
11092 	ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11093 	break;
11094 #endif
11095 #ifdef TARGET_NR_stat64
11096     case TARGET_NR_stat64:
11097         if (!(p = lock_user_string(arg1)))
11098             goto efault;
11099         ret = get_errno(stat(path(p), &st));
11100         unlock_user(p, arg1, 0);
11101         if (!is_error(ret))
11102             ret = host_to_target_stat64(cpu_env, arg2, &st);
11103         break;
11104 #endif
11105 #ifdef TARGET_NR_lstat64
11106     case TARGET_NR_lstat64:
11107         if (!(p = lock_user_string(arg1)))
11108             goto efault;
11109         ret = get_errno(lstat(path(p), &st));
11110         unlock_user(p, arg1, 0);
11111         if (!is_error(ret))
11112             ret = host_to_target_stat64(cpu_env, arg2, &st);
11113         break;
11114 #endif
11115 #ifdef TARGET_NR_fstat64
11116     case TARGET_NR_fstat64:
11117         ret = get_errno(fstat(arg1, &st));
11118         if (!is_error(ret))
11119             ret = host_to_target_stat64(cpu_env, arg2, &st);
11120         break;
11121 #endif
11122 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11123 #ifdef TARGET_NR_fstatat64
11124     case TARGET_NR_fstatat64:
11125 #endif
11126 #ifdef TARGET_NR_newfstatat
11127     case TARGET_NR_newfstatat:
11128 #endif
11129         if (!(p = lock_user_string(arg2)))
11130             goto efault;
11131         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11132         if (!is_error(ret))
11133             ret = host_to_target_stat64(cpu_env, arg3, &st);
11134         break;
11135 #endif
11136 #ifdef TARGET_NR_lchown
11137     case TARGET_NR_lchown:
11138         if (!(p = lock_user_string(arg1)))
11139             goto efault;
11140         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11141         unlock_user(p, arg1, 0);
11142         break;
11143 #endif
11144 #ifdef TARGET_NR_getuid
11145     case TARGET_NR_getuid:
11146         ret = get_errno(high2lowuid(getuid()));
11147         break;
11148 #endif
11149 #ifdef TARGET_NR_getgid
11150     case TARGET_NR_getgid:
11151         ret = get_errno(high2lowgid(getgid()));
11152         break;
11153 #endif
11154 #ifdef TARGET_NR_geteuid
11155     case TARGET_NR_geteuid:
11156         ret = get_errno(high2lowuid(geteuid()));
11157         break;
11158 #endif
11159 #ifdef TARGET_NR_getegid
11160     case TARGET_NR_getegid:
11161         ret = get_errno(high2lowgid(getegid()));
11162         break;
11163 #endif
11164     case TARGET_NR_setreuid:
11165         ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11166         break;
11167     case TARGET_NR_setregid:
11168         ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11169         break;
11170     case TARGET_NR_getgroups:
11171         {
11172             int gidsetsize = arg1;
11173             target_id *target_grouplist;
11174             gid_t *grouplist;
11175             int i;
11176 
11177             grouplist = alloca(gidsetsize * sizeof(gid_t));
11178             ret = get_errno(getgroups(gidsetsize, grouplist));
11179             if (gidsetsize == 0)
11180                 break;
11181             if (!is_error(ret)) {
11182                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11183                 if (!target_grouplist)
11184                     goto efault;
11185                 for(i = 0;i < ret; i++)
11186                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11187                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11188             }
11189         }
11190         break;
11191     case TARGET_NR_setgroups:
11192         {
11193             int gidsetsize = arg1;
11194             target_id *target_grouplist;
11195             gid_t *grouplist = NULL;
11196             int i;
11197             if (gidsetsize) {
11198                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11199                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11200                 if (!target_grouplist) {
11201                     ret = -TARGET_EFAULT;
11202                     goto fail;
11203                 }
11204                 for (i = 0; i < gidsetsize; i++) {
11205                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11206                 }
11207                 unlock_user(target_grouplist, arg2, 0);
11208             }
11209             ret = get_errno(setgroups(gidsetsize, grouplist));
11210         }
11211         break;
11212     case TARGET_NR_fchown:
11213         ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11214         break;
11215 #if defined(TARGET_NR_fchownat)
11216     case TARGET_NR_fchownat:
11217         if (!(p = lock_user_string(arg2)))
11218             goto efault;
11219         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11220                                  low2highgid(arg4), arg5));
11221         unlock_user(p, arg2, 0);
11222         break;
11223 #endif
11224 #ifdef TARGET_NR_setresuid
11225     case TARGET_NR_setresuid:
11226         ret = get_errno(sys_setresuid(low2highuid(arg1),
11227                                       low2highuid(arg2),
11228                                       low2highuid(arg3)));
11229         break;
11230 #endif
11231 #ifdef TARGET_NR_getresuid
11232     case TARGET_NR_getresuid:
11233         {
11234             uid_t ruid, euid, suid;
11235             ret = get_errno(getresuid(&ruid, &euid, &suid));
11236             if (!is_error(ret)) {
11237                 if (put_user_id(high2lowuid(ruid), arg1)
11238                     || put_user_id(high2lowuid(euid), arg2)
11239                     || put_user_id(high2lowuid(suid), arg3))
11240                     goto efault;
11241             }
11242         }
11243         break;
11244 #endif
11245 #ifdef TARGET_NR_getresgid
11246     case TARGET_NR_setresgid:
11247         ret = get_errno(sys_setresgid(low2highgid(arg1),
11248                                       low2highgid(arg2),
11249                                       low2highgid(arg3)));
11250         break;
11251 #endif
11252 #ifdef TARGET_NR_getresgid
11253     case TARGET_NR_getresgid:
11254         {
11255             gid_t rgid, egid, sgid;
11256             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11257             if (!is_error(ret)) {
11258                 if (put_user_id(high2lowgid(rgid), arg1)
11259                     || put_user_id(high2lowgid(egid), arg2)
11260                     || put_user_id(high2lowgid(sgid), arg3))
11261                     goto efault;
11262             }
11263         }
11264         break;
11265 #endif
11266 #ifdef TARGET_NR_chown
11267     case TARGET_NR_chown:
11268         if (!(p = lock_user_string(arg1)))
11269             goto efault;
11270         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11271         unlock_user(p, arg1, 0);
11272         break;
11273 #endif
11274     case TARGET_NR_setuid:
11275         ret = get_errno(sys_setuid(low2highuid(arg1)));
11276         break;
11277     case TARGET_NR_setgid:
11278         ret = get_errno(sys_setgid(low2highgid(arg1)));
11279         break;
11280     case TARGET_NR_setfsuid:
11281         ret = get_errno(setfsuid(arg1));
11282         break;
11283     case TARGET_NR_setfsgid:
11284         ret = get_errno(setfsgid(arg1));
11285         break;
11286 
11287 #ifdef TARGET_NR_lchown32
11288     case TARGET_NR_lchown32:
11289         if (!(p = lock_user_string(arg1)))
11290             goto efault;
11291         ret = get_errno(lchown(p, arg2, arg3));
11292         unlock_user(p, arg1, 0);
11293         break;
11294 #endif
11295 #ifdef TARGET_NR_getuid32
11296     case TARGET_NR_getuid32:
11297         ret = get_errno(getuid());
11298         break;
11299 #endif
11300 
11301 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11302    /* Alpha specific */
11303     case TARGET_NR_getxuid:
11304          {
11305             uid_t euid;
11306             euid=geteuid();
11307             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11308          }
11309         ret = get_errno(getuid());
11310         break;
11311 #endif
11312 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11313    /* Alpha specific */
11314     case TARGET_NR_getxgid:
11315          {
11316             uid_t egid;
11317             egid=getegid();
11318             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11319          }
11320         ret = get_errno(getgid());
11321         break;
11322 #endif
11323 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11324     /* Alpha specific */
11325     case TARGET_NR_osf_getsysinfo:
11326         ret = -TARGET_EOPNOTSUPP;
11327         switch (arg1) {
11328           case TARGET_GSI_IEEE_FP_CONTROL:
11329             {
11330                 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11331 
11332                 /* Copied from linux ieee_fpcr_to_swcr.  */
11333                 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11334                 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11335                 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11336                                         | SWCR_TRAP_ENABLE_DZE
11337                                         | SWCR_TRAP_ENABLE_OVF);
11338                 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11339                                         | SWCR_TRAP_ENABLE_INE);
11340                 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11341                 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11342 
11343                 if (put_user_u64 (swcr, arg2))
11344                         goto efault;
11345                 ret = 0;
11346             }
11347             break;
11348 
11349           /* case GSI_IEEE_STATE_AT_SIGNAL:
11350              -- Not implemented in linux kernel.
11351              case GSI_UACPROC:
11352              -- Retrieves current unaligned access state; not much used.
11353              case GSI_PROC_TYPE:
11354              -- Retrieves implver information; surely not used.
11355              case GSI_GET_HWRPB:
11356              -- Grabs a copy of the HWRPB; surely not used.
11357           */
11358         }
11359         break;
11360 #endif
11361 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11362     /* Alpha specific */
11363     case TARGET_NR_osf_setsysinfo:
11364         ret = -TARGET_EOPNOTSUPP;
11365         switch (arg1) {
11366           case TARGET_SSI_IEEE_FP_CONTROL:
11367             {
11368                 uint64_t swcr, fpcr, orig_fpcr;
11369 
11370                 if (get_user_u64 (swcr, arg2)) {
11371                     goto efault;
11372                 }
11373                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11374                 fpcr = orig_fpcr & FPCR_DYN_MASK;
11375 
11376                 /* Copied from linux ieee_swcr_to_fpcr.  */
11377                 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11378                 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11379                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11380                                   | SWCR_TRAP_ENABLE_DZE
11381                                   | SWCR_TRAP_ENABLE_OVF)) << 48;
11382                 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11383                                   | SWCR_TRAP_ENABLE_INE)) << 57;
11384                 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11385                 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11386 
11387                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11388                 ret = 0;
11389             }
11390             break;
11391 
11392           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11393             {
11394                 uint64_t exc, fpcr, orig_fpcr;
11395                 int si_code;
11396 
11397                 if (get_user_u64(exc, arg2)) {
11398                     goto efault;
11399                 }
11400 
11401                 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11402 
11403                 /* We only add to the exception status here.  */
11404                 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11405 
11406                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11407                 ret = 0;
11408 
11409                 /* Old exceptions are not signaled.  */
11410                 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11411 
11412                 /* If any exceptions set by this call,
11413                    and are unmasked, send a signal.  */
11414                 si_code = 0;
11415                 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11416                     si_code = TARGET_FPE_FLTRES;
11417                 }
11418                 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11419                     si_code = TARGET_FPE_FLTUND;
11420                 }
11421                 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11422                     si_code = TARGET_FPE_FLTOVF;
11423                 }
11424                 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11425                     si_code = TARGET_FPE_FLTDIV;
11426                 }
11427                 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11428                     si_code = TARGET_FPE_FLTINV;
11429                 }
11430                 if (si_code != 0) {
11431                     target_siginfo_t info;
11432                     info.si_signo = SIGFPE;
11433                     info.si_errno = 0;
11434                     info.si_code = si_code;
11435                     info._sifields._sigfault._addr
11436                         = ((CPUArchState *)cpu_env)->pc;
11437                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11438                                  QEMU_SI_FAULT, &info);
11439                 }
11440             }
11441             break;
11442 
11443           /* case SSI_NVPAIRS:
11444              -- Used with SSIN_UACPROC to enable unaligned accesses.
11445              case SSI_IEEE_STATE_AT_SIGNAL:
11446              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11447              -- Not implemented in linux kernel
11448           */
11449         }
11450         break;
11451 #endif
11452 #ifdef TARGET_NR_osf_sigprocmask
11453     /* Alpha specific.  */
11454     case TARGET_NR_osf_sigprocmask:
11455         {
11456             abi_ulong mask;
11457             int how;
11458             sigset_t set, oldset;
11459 
11460             switch(arg1) {
11461             case TARGET_SIG_BLOCK:
11462                 how = SIG_BLOCK;
11463                 break;
11464             case TARGET_SIG_UNBLOCK:
11465                 how = SIG_UNBLOCK;
11466                 break;
11467             case TARGET_SIG_SETMASK:
11468                 how = SIG_SETMASK;
11469                 break;
11470             default:
11471                 ret = -TARGET_EINVAL;
11472                 goto fail;
11473             }
11474             mask = arg2;
11475             target_to_host_old_sigset(&set, &mask);
11476             ret = do_sigprocmask(how, &set, &oldset);
11477             if (!ret) {
11478                 host_to_target_old_sigset(&mask, &oldset);
11479                 ret = mask;
11480             }
11481         }
11482         break;
11483 #endif
11484 
11485 #ifdef TARGET_NR_getgid32
11486     case TARGET_NR_getgid32:
11487         ret = get_errno(getgid());
11488         break;
11489 #endif
11490 #ifdef TARGET_NR_geteuid32
11491     case TARGET_NR_geteuid32:
11492         ret = get_errno(geteuid());
11493         break;
11494 #endif
11495 #ifdef TARGET_NR_getegid32
11496     case TARGET_NR_getegid32:
11497         ret = get_errno(getegid());
11498         break;
11499 #endif
11500 #ifdef TARGET_NR_setreuid32
11501     case TARGET_NR_setreuid32:
11502         ret = get_errno(setreuid(arg1, arg2));
11503         break;
11504 #endif
11505 #ifdef TARGET_NR_setregid32
11506     case TARGET_NR_setregid32:
11507         ret = get_errno(setregid(arg1, arg2));
11508         break;
11509 #endif
11510 #ifdef TARGET_NR_getgroups32
11511     case TARGET_NR_getgroups32:
11512         {
11513             int gidsetsize = arg1;
11514             uint32_t *target_grouplist;
11515             gid_t *grouplist;
11516             int i;
11517 
11518             grouplist = alloca(gidsetsize * sizeof(gid_t));
11519             ret = get_errno(getgroups(gidsetsize, grouplist));
11520             if (gidsetsize == 0)
11521                 break;
11522             if (!is_error(ret)) {
11523                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11524                 if (!target_grouplist) {
11525                     ret = -TARGET_EFAULT;
11526                     goto fail;
11527                 }
11528                 for(i = 0;i < ret; i++)
11529                     target_grouplist[i] = tswap32(grouplist[i]);
11530                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11531             }
11532         }
11533         break;
11534 #endif
11535 #ifdef TARGET_NR_setgroups32
11536     case TARGET_NR_setgroups32:
11537         {
11538             int gidsetsize = arg1;
11539             uint32_t *target_grouplist;
11540             gid_t *grouplist;
11541             int i;
11542 
11543             grouplist = alloca(gidsetsize * sizeof(gid_t));
11544             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11545             if (!target_grouplist) {
11546                 ret = -TARGET_EFAULT;
11547                 goto fail;
11548             }
11549             for(i = 0;i < gidsetsize; i++)
11550                 grouplist[i] = tswap32(target_grouplist[i]);
11551             unlock_user(target_grouplist, arg2, 0);
11552             ret = get_errno(setgroups(gidsetsize, grouplist));
11553         }
11554         break;
11555 #endif
11556 #ifdef TARGET_NR_fchown32
11557     case TARGET_NR_fchown32:
11558         ret = get_errno(fchown(arg1, arg2, arg3));
11559         break;
11560 #endif
11561 #ifdef TARGET_NR_setresuid32
11562     case TARGET_NR_setresuid32:
11563         ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11564         break;
11565 #endif
11566 #ifdef TARGET_NR_getresuid32
11567     case TARGET_NR_getresuid32:
11568         {
11569             uid_t ruid, euid, suid;
11570             ret = get_errno(getresuid(&ruid, &euid, &suid));
11571             if (!is_error(ret)) {
11572                 if (put_user_u32(ruid, arg1)
11573                     || put_user_u32(euid, arg2)
11574                     || put_user_u32(suid, arg3))
11575                     goto efault;
11576             }
11577         }
11578         break;
11579 #endif
11580 #ifdef TARGET_NR_setresgid32
11581     case TARGET_NR_setresgid32:
11582         ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11583         break;
11584 #endif
11585 #ifdef TARGET_NR_getresgid32
11586     case TARGET_NR_getresgid32:
11587         {
11588             gid_t rgid, egid, sgid;
11589             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11590             if (!is_error(ret)) {
11591                 if (put_user_u32(rgid, arg1)
11592                     || put_user_u32(egid, arg2)
11593                     || put_user_u32(sgid, arg3))
11594                     goto efault;
11595             }
11596         }
11597         break;
11598 #endif
11599 #ifdef TARGET_NR_chown32
11600     case TARGET_NR_chown32:
11601         if (!(p = lock_user_string(arg1)))
11602             goto efault;
11603         ret = get_errno(chown(p, arg2, arg3));
11604         unlock_user(p, arg1, 0);
11605         break;
11606 #endif
11607 #ifdef TARGET_NR_setuid32
11608     case TARGET_NR_setuid32:
11609         ret = get_errno(sys_setuid(arg1));
11610         break;
11611 #endif
11612 #ifdef TARGET_NR_setgid32
11613     case TARGET_NR_setgid32:
11614         ret = get_errno(sys_setgid(arg1));
11615         break;
11616 #endif
11617 #ifdef TARGET_NR_setfsuid32
11618     case TARGET_NR_setfsuid32:
11619         ret = get_errno(setfsuid(arg1));
11620         break;
11621 #endif
11622 #ifdef TARGET_NR_setfsgid32
11623     case TARGET_NR_setfsgid32:
11624         ret = get_errno(setfsgid(arg1));
11625         break;
11626 #endif
11627 
11628     case TARGET_NR_pivot_root:
11629         goto unimplemented;
11630 #ifdef TARGET_NR_mincore
11631     case TARGET_NR_mincore:
11632         {
11633             void *a;
11634             ret = -TARGET_ENOMEM;
11635             a = lock_user(VERIFY_READ, arg1, arg2, 0);
11636             if (!a) {
11637                 goto fail;
11638             }
11639             ret = -TARGET_EFAULT;
11640             p = lock_user_string(arg3);
11641             if (!p) {
11642                 goto mincore_fail;
11643             }
11644             ret = get_errno(mincore(a, arg2, p));
11645             unlock_user(p, arg3, ret);
11646             mincore_fail:
11647             unlock_user(a, arg1, 0);
11648         }
11649         break;
11650 #endif
11651 #ifdef TARGET_NR_arm_fadvise64_64
11652     case TARGET_NR_arm_fadvise64_64:
11653         /* arm_fadvise64_64 looks like fadvise64_64 but
11654          * with different argument order: fd, advice, offset, len
11655          * rather than the usual fd, offset, len, advice.
11656          * Note that offset and len are both 64-bit so appear as
11657          * pairs of 32-bit registers.
11658          */
11659         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11660                             target_offset64(arg5, arg6), arg2);
11661         ret = -host_to_target_errno(ret);
11662         break;
11663 #endif
11664 
11665 #if TARGET_ABI_BITS == 32
11666 
11667 #ifdef TARGET_NR_fadvise64_64
11668     case TARGET_NR_fadvise64_64:
11669 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11670         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11671         ret = arg2;
11672         arg2 = arg3;
11673         arg3 = arg4;
11674         arg4 = arg5;
11675         arg5 = arg6;
11676         arg6 = ret;
11677 #else
11678         /* 6 args: fd, offset (high, low), len (high, low), advice */
11679         if (regpairs_aligned(cpu_env, num)) {
11680             /* offset is in (3,4), len in (5,6) and advice in 7 */
11681             arg2 = arg3;
11682             arg3 = arg4;
11683             arg4 = arg5;
11684             arg5 = arg6;
11685             arg6 = arg7;
11686         }
11687 #endif
11688         ret = -host_to_target_errno(posix_fadvise(arg1,
11689                                                   target_offset64(arg2, arg3),
11690                                                   target_offset64(arg4, arg5),
11691                                                   arg6));
11692         break;
11693 #endif
11694 
11695 #ifdef TARGET_NR_fadvise64
11696     case TARGET_NR_fadvise64:
11697         /* 5 args: fd, offset (high, low), len, advice */
11698         if (regpairs_aligned(cpu_env, num)) {
11699             /* offset is in (3,4), len in 5 and advice in 6 */
11700             arg2 = arg3;
11701             arg3 = arg4;
11702             arg4 = arg5;
11703             arg5 = arg6;
11704         }
11705         ret = -host_to_target_errno(posix_fadvise(arg1,
11706                                                   target_offset64(arg2, arg3),
11707                                                   arg4, arg5));
11708         break;
11709 #endif
11710 
11711 #else /* not a 32-bit ABI */
11712 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11713 #ifdef TARGET_NR_fadvise64_64
11714     case TARGET_NR_fadvise64_64:
11715 #endif
11716 #ifdef TARGET_NR_fadvise64
11717     case TARGET_NR_fadvise64:
11718 #endif
11719 #ifdef TARGET_S390X
11720         switch (arg4) {
11721         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11722         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11723         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11724         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11725         default: break;
11726         }
11727 #endif
11728         ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11729         break;
11730 #endif
11731 #endif /* end of 64-bit ABI fadvise handling */
11732 
11733 #ifdef TARGET_NR_madvise
11734     case TARGET_NR_madvise:
11735         /* A straight passthrough may not be safe because qemu sometimes
11736            turns private file-backed mappings into anonymous mappings.
11737            This will break MADV_DONTNEED.
11738            This is a hint, so ignoring and returning success is ok.  */
11739         ret = get_errno(0);
11740         break;
11741 #endif
11742 #if TARGET_ABI_BITS == 32
11743     case TARGET_NR_fcntl64:
11744     {
11745 	int cmd;
11746 	struct flock64 fl;
11747         from_flock64_fn *copyfrom = copy_from_user_flock64;
11748         to_flock64_fn *copyto = copy_to_user_flock64;
11749 
11750 #ifdef TARGET_ARM
11751         if (!((CPUARMState *)cpu_env)->eabi) {
11752             copyfrom = copy_from_user_oabi_flock64;
11753             copyto = copy_to_user_oabi_flock64;
11754         }
11755 #endif
11756 
11757 	cmd = target_to_host_fcntl_cmd(arg2);
11758         if (cmd == -TARGET_EINVAL) {
11759             ret = cmd;
11760             break;
11761         }
11762 
11763         switch(arg2) {
11764         case TARGET_F_GETLK64:
11765             ret = copyfrom(&fl, arg3);
11766             if (ret) {
11767                 break;
11768             }
11769             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11770             if (ret == 0) {
11771                 ret = copyto(arg3, &fl);
11772             }
11773 	    break;
11774 
11775         case TARGET_F_SETLK64:
11776         case TARGET_F_SETLKW64:
11777             ret = copyfrom(&fl, arg3);
11778             if (ret) {
11779                 break;
11780             }
11781             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11782 	    break;
11783         default:
11784             ret = do_fcntl(arg1, arg2, arg3);
11785             break;
11786         }
11787 	break;
11788     }
11789 #endif
11790 #ifdef TARGET_NR_cacheflush
11791     case TARGET_NR_cacheflush:
11792         /* self-modifying code is handled automatically, so nothing needed */
11793         ret = 0;
11794         break;
11795 #endif
11796 #ifdef TARGET_NR_security
11797     case TARGET_NR_security:
11798         goto unimplemented;
11799 #endif
11800 #ifdef TARGET_NR_getpagesize
11801     case TARGET_NR_getpagesize:
11802         ret = TARGET_PAGE_SIZE;
11803         break;
11804 #endif
11805     case TARGET_NR_gettid:
11806         ret = get_errno(gettid());
11807         break;
11808 #ifdef TARGET_NR_readahead
11809     case TARGET_NR_readahead:
11810 #if TARGET_ABI_BITS == 32
11811         if (regpairs_aligned(cpu_env, num)) {
11812             arg2 = arg3;
11813             arg3 = arg4;
11814             arg4 = arg5;
11815         }
11816         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11817 #else
11818         ret = get_errno(readahead(arg1, arg2, arg3));
11819 #endif
11820         break;
11821 #endif
11822 #ifdef CONFIG_ATTR
11823 #ifdef TARGET_NR_setxattr
11824     case TARGET_NR_listxattr:
11825     case TARGET_NR_llistxattr:
11826     {
11827         void *p, *b = 0;
11828         if (arg2) {
11829             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11830             if (!b) {
11831                 ret = -TARGET_EFAULT;
11832                 break;
11833             }
11834         }
11835         p = lock_user_string(arg1);
11836         if (p) {
11837             if (num == TARGET_NR_listxattr) {
11838                 ret = get_errno(listxattr(p, b, arg3));
11839             } else {
11840                 ret = get_errno(llistxattr(p, b, arg3));
11841             }
11842         } else {
11843             ret = -TARGET_EFAULT;
11844         }
11845         unlock_user(p, arg1, 0);
11846         unlock_user(b, arg2, arg3);
11847         break;
11848     }
11849     case TARGET_NR_flistxattr:
11850     {
11851         void *b = 0;
11852         if (arg2) {
11853             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11854             if (!b) {
11855                 ret = -TARGET_EFAULT;
11856                 break;
11857             }
11858         }
11859         ret = get_errno(flistxattr(arg1, b, arg3));
11860         unlock_user(b, arg2, arg3);
11861         break;
11862     }
11863     case TARGET_NR_setxattr:
11864     case TARGET_NR_lsetxattr:
11865         {
11866             void *p, *n, *v = 0;
11867             if (arg3) {
11868                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11869                 if (!v) {
11870                     ret = -TARGET_EFAULT;
11871                     break;
11872                 }
11873             }
11874             p = lock_user_string(arg1);
11875             n = lock_user_string(arg2);
11876             if (p && n) {
11877                 if (num == TARGET_NR_setxattr) {
11878                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11879                 } else {
11880                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11881                 }
11882             } else {
11883                 ret = -TARGET_EFAULT;
11884             }
11885             unlock_user(p, arg1, 0);
11886             unlock_user(n, arg2, 0);
11887             unlock_user(v, arg3, 0);
11888         }
11889         break;
11890     case TARGET_NR_fsetxattr:
11891         {
11892             void *n, *v = 0;
11893             if (arg3) {
11894                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11895                 if (!v) {
11896                     ret = -TARGET_EFAULT;
11897                     break;
11898                 }
11899             }
11900             n = lock_user_string(arg2);
11901             if (n) {
11902                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11903             } else {
11904                 ret = -TARGET_EFAULT;
11905             }
11906             unlock_user(n, arg2, 0);
11907             unlock_user(v, arg3, 0);
11908         }
11909         break;
11910     case TARGET_NR_getxattr:
11911     case TARGET_NR_lgetxattr:
11912         {
11913             void *p, *n, *v = 0;
11914             if (arg3) {
11915                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11916                 if (!v) {
11917                     ret = -TARGET_EFAULT;
11918                     break;
11919                 }
11920             }
11921             p = lock_user_string(arg1);
11922             n = lock_user_string(arg2);
11923             if (p && n) {
11924                 if (num == TARGET_NR_getxattr) {
11925                     ret = get_errno(getxattr(p, n, v, arg4));
11926                 } else {
11927                     ret = get_errno(lgetxattr(p, n, v, arg4));
11928                 }
11929             } else {
11930                 ret = -TARGET_EFAULT;
11931             }
11932             unlock_user(p, arg1, 0);
11933             unlock_user(n, arg2, 0);
11934             unlock_user(v, arg3, arg4);
11935         }
11936         break;
11937     case TARGET_NR_fgetxattr:
11938         {
11939             void *n, *v = 0;
11940             if (arg3) {
11941                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11942                 if (!v) {
11943                     ret = -TARGET_EFAULT;
11944                     break;
11945                 }
11946             }
11947             n = lock_user_string(arg2);
11948             if (n) {
11949                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11950             } else {
11951                 ret = -TARGET_EFAULT;
11952             }
11953             unlock_user(n, arg2, 0);
11954             unlock_user(v, arg3, arg4);
11955         }
11956         break;
11957     case TARGET_NR_removexattr:
11958     case TARGET_NR_lremovexattr:
11959         {
11960             void *p, *n;
11961             p = lock_user_string(arg1);
11962             n = lock_user_string(arg2);
11963             if (p && n) {
11964                 if (num == TARGET_NR_removexattr) {
11965                     ret = get_errno(removexattr(p, n));
11966                 } else {
11967                     ret = get_errno(lremovexattr(p, n));
11968                 }
11969             } else {
11970                 ret = -TARGET_EFAULT;
11971             }
11972             unlock_user(p, arg1, 0);
11973             unlock_user(n, arg2, 0);
11974         }
11975         break;
11976     case TARGET_NR_fremovexattr:
11977         {
11978             void *n;
11979             n = lock_user_string(arg2);
11980             if (n) {
11981                 ret = get_errno(fremovexattr(arg1, n));
11982             } else {
11983                 ret = -TARGET_EFAULT;
11984             }
11985             unlock_user(n, arg2, 0);
11986         }
11987         break;
11988 #endif
11989 #endif /* CONFIG_ATTR */
11990 #ifdef TARGET_NR_set_thread_area
11991     case TARGET_NR_set_thread_area:
11992 #if defined(TARGET_MIPS)
11993       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11994       ret = 0;
11995       break;
11996 #elif defined(TARGET_CRIS)
11997       if (arg1 & 0xff)
11998           ret = -TARGET_EINVAL;
11999       else {
12000           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12001           ret = 0;
12002       }
12003       break;
12004 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12005       ret = do_set_thread_area(cpu_env, arg1);
12006       break;
12007 #elif defined(TARGET_M68K)
12008       {
12009           TaskState *ts = cpu->opaque;
12010           ts->tp_value = arg1;
12011           ret = 0;
12012           break;
12013       }
12014 #else
12015       goto unimplemented_nowarn;
12016 #endif
12017 #endif
12018 #ifdef TARGET_NR_get_thread_area
12019     case TARGET_NR_get_thread_area:
12020 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12021         ret = do_get_thread_area(cpu_env, arg1);
12022         break;
12023 #elif defined(TARGET_M68K)
12024         {
12025             TaskState *ts = cpu->opaque;
12026             ret = ts->tp_value;
12027             break;
12028         }
12029 #else
12030         goto unimplemented_nowarn;
12031 #endif
12032 #endif
12033 #ifdef TARGET_NR_getdomainname
12034     case TARGET_NR_getdomainname:
12035         goto unimplemented_nowarn;
12036 #endif
12037 
12038 #ifdef TARGET_NR_clock_settime
12039     case TARGET_NR_clock_settime:
12040     {
12041         struct timespec ts;
12042 
12043         ret = target_to_host_timespec(&ts, arg2);
12044         if (!is_error(ret)) {
12045             ret = get_errno(clock_settime(arg1, &ts));
12046         }
12047         break;
12048     }
12049 #endif
12050 #ifdef TARGET_NR_clock_gettime
12051     case TARGET_NR_clock_gettime:
12052     {
12053         struct timespec ts;
12054         ret = get_errno(clock_gettime(arg1, &ts));
12055         if (!is_error(ret)) {
12056             ret = host_to_target_timespec(arg2, &ts);
12057         }
12058         break;
12059     }
12060 #endif
12061 #ifdef TARGET_NR_clock_getres
12062     case TARGET_NR_clock_getres:
12063     {
12064         struct timespec ts;
12065         ret = get_errno(clock_getres(arg1, &ts));
12066         if (!is_error(ret)) {
12067             host_to_target_timespec(arg2, &ts);
12068         }
12069         break;
12070     }
12071 #endif
12072 #ifdef TARGET_NR_clock_nanosleep
12073     case TARGET_NR_clock_nanosleep:
12074     {
12075         struct timespec ts;
12076         target_to_host_timespec(&ts, arg3);
12077         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12078                                              &ts, arg4 ? &ts : NULL));
12079         if (arg4)
12080             host_to_target_timespec(arg4, &ts);
12081 
12082 #if defined(TARGET_PPC)
12083         /* clock_nanosleep is odd in that it returns positive errno values.
12084          * On PPC, CR0 bit 3 should be set in such a situation. */
12085         if (ret && ret != -TARGET_ERESTARTSYS) {
12086             ((CPUPPCState *)cpu_env)->crf[0] |= 1;
12087         }
12088 #endif
12089         break;
12090     }
12091 #endif
12092 
12093 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12094     case TARGET_NR_set_tid_address:
12095         ret = get_errno(set_tid_address((int *)g2h(arg1)));
12096         break;
12097 #endif
12098 
12099     case TARGET_NR_tkill:
12100         ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12101         break;
12102 
12103     case TARGET_NR_tgkill:
12104         ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
12105                         target_to_host_signal(arg3)));
12106         break;
12107 
12108 #ifdef TARGET_NR_set_robust_list
12109     case TARGET_NR_set_robust_list:
12110     case TARGET_NR_get_robust_list:
12111         /* The ABI for supporting robust futexes has userspace pass
12112          * the kernel a pointer to a linked list which is updated by
12113          * userspace after the syscall; the list is walked by the kernel
12114          * when the thread exits. Since the linked list in QEMU guest
12115          * memory isn't a valid linked list for the host and we have
12116          * no way to reliably intercept the thread-death event, we can't
12117          * support these. Silently return ENOSYS so that guest userspace
12118          * falls back to a non-robust futex implementation (which should
12119          * be OK except in the corner case of the guest crashing while
12120          * holding a mutex that is shared with another process via
12121          * shared memory).
12122          */
12123         goto unimplemented_nowarn;
12124 #endif
12125 
12126 #if defined(TARGET_NR_utimensat)
12127     case TARGET_NR_utimensat:
12128         {
12129             struct timespec *tsp, ts[2];
12130             if (!arg3) {
12131                 tsp = NULL;
12132             } else {
12133                 target_to_host_timespec(ts, arg3);
12134                 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
12135                 tsp = ts;
12136             }
12137             if (!arg2)
12138                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12139             else {
12140                 if (!(p = lock_user_string(arg2))) {
12141                     ret = -TARGET_EFAULT;
12142                     goto fail;
12143                 }
12144                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12145                 unlock_user(p, arg2, 0);
12146             }
12147         }
12148 	break;
12149 #endif
12150     case TARGET_NR_futex:
12151         ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12152         break;
12153 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12154     case TARGET_NR_inotify_init:
12155         ret = get_errno(sys_inotify_init());
12156         if (ret >= 0) {
12157             fd_trans_register(ret, &target_inotify_trans);
12158         }
12159         break;
12160 #endif
12161 #ifdef CONFIG_INOTIFY1
12162 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12163     case TARGET_NR_inotify_init1:
12164         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12165                                           fcntl_flags_tbl)));
12166         if (ret >= 0) {
12167             fd_trans_register(ret, &target_inotify_trans);
12168         }
12169         break;
12170 #endif
12171 #endif
12172 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12173     case TARGET_NR_inotify_add_watch:
12174         p = lock_user_string(arg2);
12175         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12176         unlock_user(p, arg2, 0);
12177         break;
12178 #endif
12179 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12180     case TARGET_NR_inotify_rm_watch:
12181         ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
12182         break;
12183 #endif
12184 
12185 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12186     case TARGET_NR_mq_open:
12187         {
12188             struct mq_attr posix_mq_attr;
12189             struct mq_attr *pposix_mq_attr;
12190             int host_flags;
12191 
12192             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12193             pposix_mq_attr = NULL;
12194             if (arg4) {
12195                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12196                     goto efault;
12197                 }
12198                 pposix_mq_attr = &posix_mq_attr;
12199             }
12200             p = lock_user_string(arg1 - 1);
12201             if (!p) {
12202                 goto efault;
12203             }
12204             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12205             unlock_user (p, arg1, 0);
12206         }
12207         break;
12208 
12209     case TARGET_NR_mq_unlink:
12210         p = lock_user_string(arg1 - 1);
12211         if (!p) {
12212             ret = -TARGET_EFAULT;
12213             break;
12214         }
12215         ret = get_errno(mq_unlink(p));
12216         unlock_user (p, arg1, 0);
12217         break;
12218 
12219     case TARGET_NR_mq_timedsend:
12220         {
12221             struct timespec ts;
12222 
12223             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12224             if (arg5 != 0) {
12225                 target_to_host_timespec(&ts, arg5);
12226                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12227                 host_to_target_timespec(arg5, &ts);
12228             } else {
12229                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12230             }
12231             unlock_user (p, arg2, arg3);
12232         }
12233         break;
12234 
12235     case TARGET_NR_mq_timedreceive:
12236         {
12237             struct timespec ts;
12238             unsigned int prio;
12239 
12240             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12241             if (arg5 != 0) {
12242                 target_to_host_timespec(&ts, arg5);
12243                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12244                                                      &prio, &ts));
12245                 host_to_target_timespec(arg5, &ts);
12246             } else {
12247                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12248                                                      &prio, NULL));
12249             }
12250             unlock_user (p, arg2, arg3);
12251             if (arg4 != 0)
12252                 put_user_u32(prio, arg4);
12253         }
12254         break;
12255 
12256     /* Not implemented for now... */
12257 /*     case TARGET_NR_mq_notify: */
12258 /*         break; */
12259 
12260     case TARGET_NR_mq_getsetattr:
12261         {
12262             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12263             ret = 0;
12264             if (arg2 != 0) {
12265                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12266                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12267                                            &posix_mq_attr_out));
12268             } else if (arg3 != 0) {
12269                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12270             }
12271             if (ret == 0 && arg3 != 0) {
12272                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12273             }
12274         }
12275         break;
12276 #endif
12277 
12278 #ifdef CONFIG_SPLICE
12279 #ifdef TARGET_NR_tee
12280     case TARGET_NR_tee:
12281         {
12282             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12283         }
12284         break;
12285 #endif
12286 #ifdef TARGET_NR_splice
12287     case TARGET_NR_splice:
12288         {
12289             loff_t loff_in, loff_out;
12290             loff_t *ploff_in = NULL, *ploff_out = NULL;
12291             if (arg2) {
12292                 if (get_user_u64(loff_in, arg2)) {
12293                     goto efault;
12294                 }
12295                 ploff_in = &loff_in;
12296             }
12297             if (arg4) {
12298                 if (get_user_u64(loff_out, arg4)) {
12299                     goto efault;
12300                 }
12301                 ploff_out = &loff_out;
12302             }
12303             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12304             if (arg2) {
12305                 if (put_user_u64(loff_in, arg2)) {
12306                     goto efault;
12307                 }
12308             }
12309             if (arg4) {
12310                 if (put_user_u64(loff_out, arg4)) {
12311                     goto efault;
12312                 }
12313             }
12314         }
12315         break;
12316 #endif
12317 #ifdef TARGET_NR_vmsplice
12318 	case TARGET_NR_vmsplice:
12319         {
12320             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12321             if (vec != NULL) {
12322                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12323                 unlock_iovec(vec, arg2, arg3, 0);
12324             } else {
12325                 ret = -host_to_target_errno(errno);
12326             }
12327         }
12328         break;
12329 #endif
12330 #endif /* CONFIG_SPLICE */
12331 #ifdef CONFIG_EVENTFD
12332 #if defined(TARGET_NR_eventfd)
12333     case TARGET_NR_eventfd:
12334         ret = get_errno(eventfd(arg1, 0));
12335         if (ret >= 0) {
12336             fd_trans_register(ret, &target_eventfd_trans);
12337         }
12338         break;
12339 #endif
12340 #if defined(TARGET_NR_eventfd2)
12341     case TARGET_NR_eventfd2:
12342     {
12343         int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12344         if (arg2 & TARGET_O_NONBLOCK) {
12345             host_flags |= O_NONBLOCK;
12346         }
12347         if (arg2 & TARGET_O_CLOEXEC) {
12348             host_flags |= O_CLOEXEC;
12349         }
12350         ret = get_errno(eventfd(arg1, host_flags));
12351         if (ret >= 0) {
12352             fd_trans_register(ret, &target_eventfd_trans);
12353         }
12354         break;
12355     }
12356 #endif
12357 #endif /* CONFIG_EVENTFD  */
12358 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12359     case TARGET_NR_fallocate:
12360 #if TARGET_ABI_BITS == 32
12361         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12362                                   target_offset64(arg5, arg6)));
12363 #else
12364         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12365 #endif
12366         break;
12367 #endif
12368 #if defined(CONFIG_SYNC_FILE_RANGE)
12369 #if defined(TARGET_NR_sync_file_range)
12370     case TARGET_NR_sync_file_range:
12371 #if TARGET_ABI_BITS == 32
12372 #if defined(TARGET_MIPS)
12373         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12374                                         target_offset64(arg5, arg6), arg7));
12375 #else
12376         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12377                                         target_offset64(arg4, arg5), arg6));
12378 #endif /* !TARGET_MIPS */
12379 #else
12380         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12381 #endif
12382         break;
12383 #endif
12384 #if defined(TARGET_NR_sync_file_range2)
12385     case TARGET_NR_sync_file_range2:
12386         /* This is like sync_file_range but the arguments are reordered */
12387 #if TARGET_ABI_BITS == 32
12388         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12389                                         target_offset64(arg5, arg6), arg2));
12390 #else
12391         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12392 #endif
12393         break;
12394 #endif
12395 #endif
12396 #if defined(TARGET_NR_signalfd4)
12397     case TARGET_NR_signalfd4:
12398         ret = do_signalfd4(arg1, arg2, arg4);
12399         break;
12400 #endif
12401 #if defined(TARGET_NR_signalfd)
12402     case TARGET_NR_signalfd:
12403         ret = do_signalfd4(arg1, arg2, 0);
12404         break;
12405 #endif
12406 #if defined(CONFIG_EPOLL)
12407 #if defined(TARGET_NR_epoll_create)
12408     case TARGET_NR_epoll_create:
12409         ret = get_errno(epoll_create(arg1));
12410         break;
12411 #endif
12412 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12413     case TARGET_NR_epoll_create1:
12414         ret = get_errno(epoll_create1(arg1));
12415         break;
12416 #endif
12417 #if defined(TARGET_NR_epoll_ctl)
12418     case TARGET_NR_epoll_ctl:
12419     {
12420         struct epoll_event ep;
12421         struct epoll_event *epp = 0;
12422         if (arg4) {
12423             struct target_epoll_event *target_ep;
12424             if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12425                 goto efault;
12426             }
12427             ep.events = tswap32(target_ep->events);
12428             /* The epoll_data_t union is just opaque data to the kernel,
12429              * so we transfer all 64 bits across and need not worry what
12430              * actual data type it is.
12431              */
12432             ep.data.u64 = tswap64(target_ep->data.u64);
12433             unlock_user_struct(target_ep, arg4, 0);
12434             epp = &ep;
12435         }
12436         ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12437         break;
12438     }
12439 #endif
12440 
12441 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12442 #if defined(TARGET_NR_epoll_wait)
12443     case TARGET_NR_epoll_wait:
12444 #endif
12445 #if defined(TARGET_NR_epoll_pwait)
12446     case TARGET_NR_epoll_pwait:
12447 #endif
12448     {
12449         struct target_epoll_event *target_ep;
12450         struct epoll_event *ep;
12451         int epfd = arg1;
12452         int maxevents = arg3;
12453         int timeout = arg4;
12454 
12455         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12456             ret = -TARGET_EINVAL;
12457             break;
12458         }
12459 
12460         target_ep = lock_user(VERIFY_WRITE, arg2,
12461                               maxevents * sizeof(struct target_epoll_event), 1);
12462         if (!target_ep) {
12463             goto efault;
12464         }
12465 
12466         ep = g_try_new(struct epoll_event, maxevents);
12467         if (!ep) {
12468             unlock_user(target_ep, arg2, 0);
12469             ret = -TARGET_ENOMEM;
12470             break;
12471         }
12472 
12473         switch (num) {
12474 #if defined(TARGET_NR_epoll_pwait)
12475         case TARGET_NR_epoll_pwait:
12476         {
12477             target_sigset_t *target_set;
12478             sigset_t _set, *set = &_set;
12479 
12480             if (arg5) {
12481                 if (arg6 != sizeof(target_sigset_t)) {
12482                     ret = -TARGET_EINVAL;
12483                     break;
12484                 }
12485 
12486                 target_set = lock_user(VERIFY_READ, arg5,
12487                                        sizeof(target_sigset_t), 1);
12488                 if (!target_set) {
12489                     ret = -TARGET_EFAULT;
12490                     break;
12491                 }
12492                 target_to_host_sigset(set, target_set);
12493                 unlock_user(target_set, arg5, 0);
12494             } else {
12495                 set = NULL;
12496             }
12497 
12498             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12499                                              set, SIGSET_T_SIZE));
12500             break;
12501         }
12502 #endif
12503 #if defined(TARGET_NR_epoll_wait)
12504         case TARGET_NR_epoll_wait:
12505             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12506                                              NULL, 0));
12507             break;
12508 #endif
12509         default:
12510             ret = -TARGET_ENOSYS;
12511         }
12512         if (!is_error(ret)) {
12513             int i;
12514             for (i = 0; i < ret; i++) {
12515                 target_ep[i].events = tswap32(ep[i].events);
12516                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12517             }
12518             unlock_user(target_ep, arg2,
12519                         ret * sizeof(struct target_epoll_event));
12520         } else {
12521             unlock_user(target_ep, arg2, 0);
12522         }
12523         g_free(ep);
12524         break;
12525     }
12526 #endif
12527 #endif
12528 #ifdef TARGET_NR_prlimit64
12529     case TARGET_NR_prlimit64:
12530     {
12531         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12532         struct target_rlimit64 *target_rnew, *target_rold;
12533         struct host_rlimit64 rnew, rold, *rnewp = 0;
12534         int resource = target_to_host_resource(arg2);
12535         if (arg3) {
12536             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12537                 goto efault;
12538             }
12539             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12540             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12541             unlock_user_struct(target_rnew, arg3, 0);
12542             rnewp = &rnew;
12543         }
12544 
12545         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12546         if (!is_error(ret) && arg4) {
12547             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12548                 goto efault;
12549             }
12550             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12551             target_rold->rlim_max = tswap64(rold.rlim_max);
12552             unlock_user_struct(target_rold, arg4, 1);
12553         }
12554         break;
12555     }
12556 #endif
12557 #ifdef TARGET_NR_gethostname
12558     case TARGET_NR_gethostname:
12559     {
12560         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12561         if (name) {
12562             ret = get_errno(gethostname(name, arg2));
12563             unlock_user(name, arg1, arg2);
12564         } else {
12565             ret = -TARGET_EFAULT;
12566         }
12567         break;
12568     }
12569 #endif
12570 #ifdef TARGET_NR_atomic_cmpxchg_32
12571     case TARGET_NR_atomic_cmpxchg_32:
12572     {
12573         /* should use start_exclusive from main.c */
12574         abi_ulong mem_value;
12575         if (get_user_u32(mem_value, arg6)) {
12576             target_siginfo_t info;
12577             info.si_signo = SIGSEGV;
12578             info.si_errno = 0;
12579             info.si_code = TARGET_SEGV_MAPERR;
12580             info._sifields._sigfault._addr = arg6;
12581             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12582                          QEMU_SI_FAULT, &info);
12583             ret = 0xdeadbeef;
12584 
12585         }
12586         if (mem_value == arg2)
12587             put_user_u32(arg1, arg6);
12588         ret = mem_value;
12589         break;
12590     }
12591 #endif
12592 #ifdef TARGET_NR_atomic_barrier
12593     case TARGET_NR_atomic_barrier:
12594     {
12595         /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12596         ret = 0;
12597         break;
12598     }
12599 #endif
12600 
12601 #ifdef TARGET_NR_timer_create
12602     case TARGET_NR_timer_create:
12603     {
12604         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12605 
12606         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12607 
12608         int clkid = arg1;
12609         int timer_index = next_free_host_timer();
12610 
12611         if (timer_index < 0) {
12612             ret = -TARGET_EAGAIN;
12613         } else {
12614             timer_t *phtimer = g_posix_timers  + timer_index;
12615 
12616             if (arg2) {
12617                 phost_sevp = &host_sevp;
12618                 ret = target_to_host_sigevent(phost_sevp, arg2);
12619                 if (ret != 0) {
12620                     break;
12621                 }
12622             }
12623 
12624             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12625             if (ret) {
12626                 phtimer = NULL;
12627             } else {
12628                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12629                     goto efault;
12630                 }
12631             }
12632         }
12633         break;
12634     }
12635 #endif
12636 
12637 #ifdef TARGET_NR_timer_settime
12638     case TARGET_NR_timer_settime:
12639     {
12640         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12641          * struct itimerspec * old_value */
12642         target_timer_t timerid = get_timer_id(arg1);
12643 
12644         if (timerid < 0) {
12645             ret = timerid;
12646         } else if (arg3 == 0) {
12647             ret = -TARGET_EINVAL;
12648         } else {
12649             timer_t htimer = g_posix_timers[timerid];
12650             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12651 
12652             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12653                 goto efault;
12654             }
12655             ret = get_errno(
12656                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12657             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12658                 goto efault;
12659             }
12660         }
12661         break;
12662     }
12663 #endif
12664 
12665 #ifdef TARGET_NR_timer_gettime
12666     case TARGET_NR_timer_gettime:
12667     {
12668         /* args: timer_t timerid, struct itimerspec *curr_value */
12669         target_timer_t timerid = get_timer_id(arg1);
12670 
12671         if (timerid < 0) {
12672             ret = timerid;
12673         } else if (!arg2) {
12674             ret = -TARGET_EFAULT;
12675         } else {
12676             timer_t htimer = g_posix_timers[timerid];
12677             struct itimerspec hspec;
12678             ret = get_errno(timer_gettime(htimer, &hspec));
12679 
12680             if (host_to_target_itimerspec(arg2, &hspec)) {
12681                 ret = -TARGET_EFAULT;
12682             }
12683         }
12684         break;
12685     }
12686 #endif
12687 
12688 #ifdef TARGET_NR_timer_getoverrun
12689     case TARGET_NR_timer_getoverrun:
12690     {
12691         /* args: timer_t timerid */
12692         target_timer_t timerid = get_timer_id(arg1);
12693 
12694         if (timerid < 0) {
12695             ret = timerid;
12696         } else {
12697             timer_t htimer = g_posix_timers[timerid];
12698             ret = get_errno(timer_getoverrun(htimer));
12699         }
12700         fd_trans_unregister(ret);
12701         break;
12702     }
12703 #endif
12704 
12705 #ifdef TARGET_NR_timer_delete
12706     case TARGET_NR_timer_delete:
12707     {
12708         /* args: timer_t timerid */
12709         target_timer_t timerid = get_timer_id(arg1);
12710 
12711         if (timerid < 0) {
12712             ret = timerid;
12713         } else {
12714             timer_t htimer = g_posix_timers[timerid];
12715             ret = get_errno(timer_delete(htimer));
12716             g_posix_timers[timerid] = 0;
12717         }
12718         break;
12719     }
12720 #endif
12721 
12722 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12723     case TARGET_NR_timerfd_create:
12724         ret = get_errno(timerfd_create(arg1,
12725                 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12726         break;
12727 #endif
12728 
12729 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12730     case TARGET_NR_timerfd_gettime:
12731         {
12732             struct itimerspec its_curr;
12733 
12734             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12735 
12736             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12737                 goto efault;
12738             }
12739         }
12740         break;
12741 #endif
12742 
12743 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12744     case TARGET_NR_timerfd_settime:
12745         {
12746             struct itimerspec its_new, its_old, *p_new;
12747 
12748             if (arg3) {
12749                 if (target_to_host_itimerspec(&its_new, arg3)) {
12750                     goto efault;
12751                 }
12752                 p_new = &its_new;
12753             } else {
12754                 p_new = NULL;
12755             }
12756 
12757             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12758 
12759             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12760                 goto efault;
12761             }
12762         }
12763         break;
12764 #endif
12765 
12766 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12767     case TARGET_NR_ioprio_get:
12768         ret = get_errno(ioprio_get(arg1, arg2));
12769         break;
12770 #endif
12771 
12772 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12773     case TARGET_NR_ioprio_set:
12774         ret = get_errno(ioprio_set(arg1, arg2, arg3));
12775         break;
12776 #endif
12777 
12778 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12779     case TARGET_NR_setns:
12780         ret = get_errno(setns(arg1, arg2));
12781         break;
12782 #endif
12783 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12784     case TARGET_NR_unshare:
12785         ret = get_errno(unshare(arg1));
12786         break;
12787 #endif
12788 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12789     case TARGET_NR_kcmp:
12790         ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12791         break;
12792 #endif
12793 #ifdef TARGET_NR_swapcontext
12794     case TARGET_NR_swapcontext:
12795         /* PowerPC specific.  */
12796         ret = do_swapcontext(cpu_env, arg1, arg2, arg3);
12797         break;
12798 #endif
12799 
12800     default:
12801     unimplemented:
12802         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12803 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12804     unimplemented_nowarn:
12805 #endif
12806         ret = -TARGET_ENOSYS;
12807         break;
12808     }
12809 fail:
12810 #ifdef DEBUG
12811     gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12812 #endif
12813     if(do_strace)
12814         print_syscall_ret(num, ret);
12815     trace_guest_user_syscall_ret(cpu, num, ret);
12816     return ret;
12817 efault:
12818     ret = -TARGET_EFAULT;
12819     goto fail;
12820 }
12821