]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/syscall.c
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20180410' into...
[mirror_qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
112
113 #include "qemu.h"
114
115 #ifndef CLONE_IO
116 #define CLONE_IO 0x80000000 /* Clone io context */
117 #endif
118
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
126 */
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
129 */
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
137 */
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
140
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
157
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
164 */
165
166 //#define DEBUG
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
169 */
170 //#define DEBUG_ERESTARTSYS
171
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
175
176 #undef _syscall0
177 #undef _syscall1
178 #undef _syscall2
179 #undef _syscall3
180 #undef _syscall4
181 #undef _syscall5
182 #undef _syscall6
183
184 #define _syscall0(type,name) \
185 static type name (void) \
186 { \
187 return syscall(__NR_##name); \
188 }
189
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
192 { \
193 return syscall(__NR_##name, arg1); \
194 }
195
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
198 { \
199 return syscall(__NR_##name, arg1, arg2); \
200 }
201
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
204 { \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
206 }
207
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
210 { \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
212 }
213
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
215 type5,arg5) \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
217 { \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
219 }
220
221
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
225 type6 arg6) \
226 { \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
228 }
229
230
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
246 #endif
247
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
251 #endif
252
253 #ifdef __NR_gettid
254 _syscall0(int, gettid)
255 #else
256 /* This is a replacement for the host gettid() and must return a host
257 errno. */
258 static int gettid(void) {
259 return -ENOSYS;
260 }
261 #endif
262 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
263 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
264 #endif
265 #if !defined(__NR_getdents) || \
266 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
267 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
268 #endif
269 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
270 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
271 loff_t *, res, uint, wh);
272 #endif
273 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
274 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
275 siginfo_t *, uinfo)
276 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
277 #ifdef __NR_exit_group
278 _syscall1(int,exit_group,int,error_code)
279 #endif
280 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
281 _syscall1(int,set_tid_address,int *,tidptr)
282 #endif
283 #if defined(TARGET_NR_futex) && defined(__NR_futex)
284 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
285 const struct timespec *,timeout,int *,uaddr2,int,val3)
286 #endif
287 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
288 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
289 unsigned long *, user_mask_ptr);
290 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
291 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
292 unsigned long *, user_mask_ptr);
293 #define __NR_sys_getcpu __NR_getcpu
294 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
295 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
296 void *, arg);
297 _syscall2(int, capget, struct __user_cap_header_struct *, header,
298 struct __user_cap_data_struct *, data);
299 _syscall2(int, capset, struct __user_cap_header_struct *, header,
300 struct __user_cap_data_struct *, data);
301 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
302 _syscall2(int, ioprio_get, int, which, int, who)
303 #endif
304 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
305 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
306 #endif
307 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
308 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
309 #endif
310
311 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
312 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
313 unsigned long, idx1, unsigned long, idx2)
314 #endif
315
316 static bitmask_transtbl fcntl_flags_tbl[] = {
317 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
318 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
319 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
320 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
321 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
322 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
323 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
324 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
325 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
326 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
327 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
328 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
329 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
330 #if defined(O_DIRECT)
331 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
332 #endif
333 #if defined(O_NOATIME)
334 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
335 #endif
336 #if defined(O_CLOEXEC)
337 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
338 #endif
339 #if defined(O_PATH)
340 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
341 #endif
342 #if defined(O_TMPFILE)
343 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
344 #endif
345 /* Don't terminate the list prematurely on 64-bit host+guest. */
346 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
347 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
348 #endif
349 { 0, 0, 0, 0 }
350 };
351
352 enum {
353 QEMU_IFLA_BR_UNSPEC,
354 QEMU_IFLA_BR_FORWARD_DELAY,
355 QEMU_IFLA_BR_HELLO_TIME,
356 QEMU_IFLA_BR_MAX_AGE,
357 QEMU_IFLA_BR_AGEING_TIME,
358 QEMU_IFLA_BR_STP_STATE,
359 QEMU_IFLA_BR_PRIORITY,
360 QEMU_IFLA_BR_VLAN_FILTERING,
361 QEMU_IFLA_BR_VLAN_PROTOCOL,
362 QEMU_IFLA_BR_GROUP_FWD_MASK,
363 QEMU_IFLA_BR_ROOT_ID,
364 QEMU_IFLA_BR_BRIDGE_ID,
365 QEMU_IFLA_BR_ROOT_PORT,
366 QEMU_IFLA_BR_ROOT_PATH_COST,
367 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
368 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
369 QEMU_IFLA_BR_HELLO_TIMER,
370 QEMU_IFLA_BR_TCN_TIMER,
371 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
372 QEMU_IFLA_BR_GC_TIMER,
373 QEMU_IFLA_BR_GROUP_ADDR,
374 QEMU_IFLA_BR_FDB_FLUSH,
375 QEMU_IFLA_BR_MCAST_ROUTER,
376 QEMU_IFLA_BR_MCAST_SNOOPING,
377 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
378 QEMU_IFLA_BR_MCAST_QUERIER,
379 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
380 QEMU_IFLA_BR_MCAST_HASH_MAX,
381 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
382 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
383 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
384 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
385 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
386 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
387 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
388 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
389 QEMU_IFLA_BR_NF_CALL_IPTABLES,
390 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
391 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
392 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
393 QEMU_IFLA_BR_PAD,
394 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
395 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
396 QEMU___IFLA_BR_MAX,
397 };
398
399 enum {
400 QEMU_IFLA_UNSPEC,
401 QEMU_IFLA_ADDRESS,
402 QEMU_IFLA_BROADCAST,
403 QEMU_IFLA_IFNAME,
404 QEMU_IFLA_MTU,
405 QEMU_IFLA_LINK,
406 QEMU_IFLA_QDISC,
407 QEMU_IFLA_STATS,
408 QEMU_IFLA_COST,
409 QEMU_IFLA_PRIORITY,
410 QEMU_IFLA_MASTER,
411 QEMU_IFLA_WIRELESS,
412 QEMU_IFLA_PROTINFO,
413 QEMU_IFLA_TXQLEN,
414 QEMU_IFLA_MAP,
415 QEMU_IFLA_WEIGHT,
416 QEMU_IFLA_OPERSTATE,
417 QEMU_IFLA_LINKMODE,
418 QEMU_IFLA_LINKINFO,
419 QEMU_IFLA_NET_NS_PID,
420 QEMU_IFLA_IFALIAS,
421 QEMU_IFLA_NUM_VF,
422 QEMU_IFLA_VFINFO_LIST,
423 QEMU_IFLA_STATS64,
424 QEMU_IFLA_VF_PORTS,
425 QEMU_IFLA_PORT_SELF,
426 QEMU_IFLA_AF_SPEC,
427 QEMU_IFLA_GROUP,
428 QEMU_IFLA_NET_NS_FD,
429 QEMU_IFLA_EXT_MASK,
430 QEMU_IFLA_PROMISCUITY,
431 QEMU_IFLA_NUM_TX_QUEUES,
432 QEMU_IFLA_NUM_RX_QUEUES,
433 QEMU_IFLA_CARRIER,
434 QEMU_IFLA_PHYS_PORT_ID,
435 QEMU_IFLA_CARRIER_CHANGES,
436 QEMU_IFLA_PHYS_SWITCH_ID,
437 QEMU_IFLA_LINK_NETNSID,
438 QEMU_IFLA_PHYS_PORT_NAME,
439 QEMU_IFLA_PROTO_DOWN,
440 QEMU_IFLA_GSO_MAX_SEGS,
441 QEMU_IFLA_GSO_MAX_SIZE,
442 QEMU_IFLA_PAD,
443 QEMU_IFLA_XDP,
444 QEMU___IFLA_MAX
445 };
446
447 enum {
448 QEMU_IFLA_BRPORT_UNSPEC,
449 QEMU_IFLA_BRPORT_STATE,
450 QEMU_IFLA_BRPORT_PRIORITY,
451 QEMU_IFLA_BRPORT_COST,
452 QEMU_IFLA_BRPORT_MODE,
453 QEMU_IFLA_BRPORT_GUARD,
454 QEMU_IFLA_BRPORT_PROTECT,
455 QEMU_IFLA_BRPORT_FAST_LEAVE,
456 QEMU_IFLA_BRPORT_LEARNING,
457 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
458 QEMU_IFLA_BRPORT_PROXYARP,
459 QEMU_IFLA_BRPORT_LEARNING_SYNC,
460 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
461 QEMU_IFLA_BRPORT_ROOT_ID,
462 QEMU_IFLA_BRPORT_BRIDGE_ID,
463 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
464 QEMU_IFLA_BRPORT_DESIGNATED_COST,
465 QEMU_IFLA_BRPORT_ID,
466 QEMU_IFLA_BRPORT_NO,
467 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
468 QEMU_IFLA_BRPORT_CONFIG_PENDING,
469 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
470 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
471 QEMU_IFLA_BRPORT_HOLD_TIMER,
472 QEMU_IFLA_BRPORT_FLUSH,
473 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
474 QEMU_IFLA_BRPORT_PAD,
475 QEMU___IFLA_BRPORT_MAX
476 };
477
478 enum {
479 QEMU_IFLA_INFO_UNSPEC,
480 QEMU_IFLA_INFO_KIND,
481 QEMU_IFLA_INFO_DATA,
482 QEMU_IFLA_INFO_XSTATS,
483 QEMU_IFLA_INFO_SLAVE_KIND,
484 QEMU_IFLA_INFO_SLAVE_DATA,
485 QEMU___IFLA_INFO_MAX,
486 };
487
488 enum {
489 QEMU_IFLA_INET_UNSPEC,
490 QEMU_IFLA_INET_CONF,
491 QEMU___IFLA_INET_MAX,
492 };
493
494 enum {
495 QEMU_IFLA_INET6_UNSPEC,
496 QEMU_IFLA_INET6_FLAGS,
497 QEMU_IFLA_INET6_CONF,
498 QEMU_IFLA_INET6_STATS,
499 QEMU_IFLA_INET6_MCAST,
500 QEMU_IFLA_INET6_CACHEINFO,
501 QEMU_IFLA_INET6_ICMP6STATS,
502 QEMU_IFLA_INET6_TOKEN,
503 QEMU_IFLA_INET6_ADDR_GEN_MODE,
504 QEMU___IFLA_INET6_MAX
505 };
506
507 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
508 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
509 typedef struct TargetFdTrans {
510 TargetFdDataFunc host_to_target_data;
511 TargetFdDataFunc target_to_host_data;
512 TargetFdAddrFunc target_to_host_addr;
513 } TargetFdTrans;
514
515 static TargetFdTrans **target_fd_trans;
516
517 static unsigned int target_fd_max;
518
519 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
520 {
521 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
522 return target_fd_trans[fd]->target_to_host_data;
523 }
524 return NULL;
525 }
526
527 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
528 {
529 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
530 return target_fd_trans[fd]->host_to_target_data;
531 }
532 return NULL;
533 }
534
535 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
536 {
537 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
538 return target_fd_trans[fd]->target_to_host_addr;
539 }
540 return NULL;
541 }
542
543 static void fd_trans_register(int fd, TargetFdTrans *trans)
544 {
545 unsigned int oldmax;
546
547 if (fd >= target_fd_max) {
548 oldmax = target_fd_max;
549 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
550 target_fd_trans = g_renew(TargetFdTrans *,
551 target_fd_trans, target_fd_max);
552 memset((void *)(target_fd_trans + oldmax), 0,
553 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
554 }
555 target_fd_trans[fd] = trans;
556 }
557
558 static void fd_trans_unregister(int fd)
559 {
560 if (fd >= 0 && fd < target_fd_max) {
561 target_fd_trans[fd] = NULL;
562 }
563 }
564
565 static void fd_trans_dup(int oldfd, int newfd)
566 {
567 fd_trans_unregister(newfd);
568 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
569 fd_trans_register(newfd, target_fd_trans[oldfd]);
570 }
571 }
572
573 static int sys_getcwd1(char *buf, size_t size)
574 {
575 if (getcwd(buf, size) == NULL) {
576 /* getcwd() sets errno */
577 return (-1);
578 }
579 return strlen(buf)+1;
580 }
581
582 #ifdef TARGET_NR_utimensat
583 #if defined(__NR_utimensat)
584 #define __NR_sys_utimensat __NR_utimensat
585 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
586 const struct timespec *,tsp,int,flags)
587 #else
588 static int sys_utimensat(int dirfd, const char *pathname,
589 const struct timespec times[2], int flags)
590 {
591 errno = ENOSYS;
592 return -1;
593 }
594 #endif
595 #endif /* TARGET_NR_utimensat */
596
597 #ifdef TARGET_NR_renameat2
598 #if defined(__NR_renameat2)
599 #define __NR_sys_renameat2 __NR_renameat2
600 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
601 const char *, new, unsigned int, flags)
602 #else
603 static int sys_renameat2(int oldfd, const char *old,
604 int newfd, const char *new, int flags)
605 {
606 if (flags == 0) {
607 return renameat(oldfd, old, newfd, new);
608 }
609 errno = ENOSYS;
610 return -1;
611 }
612 #endif
613 #endif /* TARGET_NR_renameat2 */
614
615 #ifdef CONFIG_INOTIFY
616 #include <sys/inotify.h>
617
618 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
619 static int sys_inotify_init(void)
620 {
621 return (inotify_init());
622 }
623 #endif
624 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
625 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
626 {
627 return (inotify_add_watch(fd, pathname, mask));
628 }
629 #endif
630 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
631 static int sys_inotify_rm_watch(int fd, int32_t wd)
632 {
633 return (inotify_rm_watch(fd, wd));
634 }
635 #endif
636 #ifdef CONFIG_INOTIFY1
637 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
638 static int sys_inotify_init1(int flags)
639 {
640 return (inotify_init1(flags));
641 }
642 #endif
643 #endif
644 #else
645 /* Userspace can usually survive runtime without inotify */
646 #undef TARGET_NR_inotify_init
647 #undef TARGET_NR_inotify_init1
648 #undef TARGET_NR_inotify_add_watch
649 #undef TARGET_NR_inotify_rm_watch
650 #endif /* CONFIG_INOTIFY */
651
652 #if defined(TARGET_NR_prlimit64)
653 #ifndef __NR_prlimit64
654 # define __NR_prlimit64 -1
655 #endif
656 #define __NR_sys_prlimit64 __NR_prlimit64
657 /* The glibc rlimit structure may not be that used by the underlying syscall */
658 struct host_rlimit64 {
659 uint64_t rlim_cur;
660 uint64_t rlim_max;
661 };
662 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
663 const struct host_rlimit64 *, new_limit,
664 struct host_rlimit64 *, old_limit)
665 #endif
666
667
668 #if defined(TARGET_NR_timer_create)
669 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
670 static timer_t g_posix_timers[32] = { 0, } ;
671
672 static inline int next_free_host_timer(void)
673 {
674 int k ;
675 /* FIXME: Does finding the next free slot require a lock? */
676 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
677 if (g_posix_timers[k] == 0) {
678 g_posix_timers[k] = (timer_t) 1;
679 return k;
680 }
681 }
682 return -1;
683 }
684 #endif
685
686 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
687 #ifdef TARGET_ARM
688 static inline int regpairs_aligned(void *cpu_env, int num)
689 {
690 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
691 }
692 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
693 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
694 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
695 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
696 * of registers which translates to the same as ARM/MIPS, because we start with
697 * r3 as arg1 */
698 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
699 #elif defined(TARGET_SH4)
700 /* SH4 doesn't align register pairs, except for p{read,write}64 */
701 static inline int regpairs_aligned(void *cpu_env, int num)
702 {
703 switch (num) {
704 case TARGET_NR_pread64:
705 case TARGET_NR_pwrite64:
706 return 1;
707
708 default:
709 return 0;
710 }
711 }
712 #elif defined(TARGET_XTENSA)
713 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
714 #else
715 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
716 #endif
717
718 #define ERRNO_TABLE_SIZE 1200
719
720 /* target_to_host_errno_table[] is initialized from
721 * host_to_target_errno_table[] in syscall_init(). */
722 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
723 };
724
725 /*
726 * This list is the union of errno values overridden in asm-<arch>/errno.h
727 * minus the errnos that are not actually generic to all archs.
728 */
729 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
730 [EAGAIN] = TARGET_EAGAIN,
731 [EIDRM] = TARGET_EIDRM,
732 [ECHRNG] = TARGET_ECHRNG,
733 [EL2NSYNC] = TARGET_EL2NSYNC,
734 [EL3HLT] = TARGET_EL3HLT,
735 [EL3RST] = TARGET_EL3RST,
736 [ELNRNG] = TARGET_ELNRNG,
737 [EUNATCH] = TARGET_EUNATCH,
738 [ENOCSI] = TARGET_ENOCSI,
739 [EL2HLT] = TARGET_EL2HLT,
740 [EDEADLK] = TARGET_EDEADLK,
741 [ENOLCK] = TARGET_ENOLCK,
742 [EBADE] = TARGET_EBADE,
743 [EBADR] = TARGET_EBADR,
744 [EXFULL] = TARGET_EXFULL,
745 [ENOANO] = TARGET_ENOANO,
746 [EBADRQC] = TARGET_EBADRQC,
747 [EBADSLT] = TARGET_EBADSLT,
748 [EBFONT] = TARGET_EBFONT,
749 [ENOSTR] = TARGET_ENOSTR,
750 [ENODATA] = TARGET_ENODATA,
751 [ETIME] = TARGET_ETIME,
752 [ENOSR] = TARGET_ENOSR,
753 [ENONET] = TARGET_ENONET,
754 [ENOPKG] = TARGET_ENOPKG,
755 [EREMOTE] = TARGET_EREMOTE,
756 [ENOLINK] = TARGET_ENOLINK,
757 [EADV] = TARGET_EADV,
758 [ESRMNT] = TARGET_ESRMNT,
759 [ECOMM] = TARGET_ECOMM,
760 [EPROTO] = TARGET_EPROTO,
761 [EDOTDOT] = TARGET_EDOTDOT,
762 [EMULTIHOP] = TARGET_EMULTIHOP,
763 [EBADMSG] = TARGET_EBADMSG,
764 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
765 [EOVERFLOW] = TARGET_EOVERFLOW,
766 [ENOTUNIQ] = TARGET_ENOTUNIQ,
767 [EBADFD] = TARGET_EBADFD,
768 [EREMCHG] = TARGET_EREMCHG,
769 [ELIBACC] = TARGET_ELIBACC,
770 [ELIBBAD] = TARGET_ELIBBAD,
771 [ELIBSCN] = TARGET_ELIBSCN,
772 [ELIBMAX] = TARGET_ELIBMAX,
773 [ELIBEXEC] = TARGET_ELIBEXEC,
774 [EILSEQ] = TARGET_EILSEQ,
775 [ENOSYS] = TARGET_ENOSYS,
776 [ELOOP] = TARGET_ELOOP,
777 [ERESTART] = TARGET_ERESTART,
778 [ESTRPIPE] = TARGET_ESTRPIPE,
779 [ENOTEMPTY] = TARGET_ENOTEMPTY,
780 [EUSERS] = TARGET_EUSERS,
781 [ENOTSOCK] = TARGET_ENOTSOCK,
782 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
783 [EMSGSIZE] = TARGET_EMSGSIZE,
784 [EPROTOTYPE] = TARGET_EPROTOTYPE,
785 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
786 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
787 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
788 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
789 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
790 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
791 [EADDRINUSE] = TARGET_EADDRINUSE,
792 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
793 [ENETDOWN] = TARGET_ENETDOWN,
794 [ENETUNREACH] = TARGET_ENETUNREACH,
795 [ENETRESET] = TARGET_ENETRESET,
796 [ECONNABORTED] = TARGET_ECONNABORTED,
797 [ECONNRESET] = TARGET_ECONNRESET,
798 [ENOBUFS] = TARGET_ENOBUFS,
799 [EISCONN] = TARGET_EISCONN,
800 [ENOTCONN] = TARGET_ENOTCONN,
801 [EUCLEAN] = TARGET_EUCLEAN,
802 [ENOTNAM] = TARGET_ENOTNAM,
803 [ENAVAIL] = TARGET_ENAVAIL,
804 [EISNAM] = TARGET_EISNAM,
805 [EREMOTEIO] = TARGET_EREMOTEIO,
806 [EDQUOT] = TARGET_EDQUOT,
807 [ESHUTDOWN] = TARGET_ESHUTDOWN,
808 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
809 [ETIMEDOUT] = TARGET_ETIMEDOUT,
810 [ECONNREFUSED] = TARGET_ECONNREFUSED,
811 [EHOSTDOWN] = TARGET_EHOSTDOWN,
812 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
813 [EALREADY] = TARGET_EALREADY,
814 [EINPROGRESS] = TARGET_EINPROGRESS,
815 [ESTALE] = TARGET_ESTALE,
816 [ECANCELED] = TARGET_ECANCELED,
817 [ENOMEDIUM] = TARGET_ENOMEDIUM,
818 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
819 #ifdef ENOKEY
820 [ENOKEY] = TARGET_ENOKEY,
821 #endif
822 #ifdef EKEYEXPIRED
823 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
824 #endif
825 #ifdef EKEYREVOKED
826 [EKEYREVOKED] = TARGET_EKEYREVOKED,
827 #endif
828 #ifdef EKEYREJECTED
829 [EKEYREJECTED] = TARGET_EKEYREJECTED,
830 #endif
831 #ifdef EOWNERDEAD
832 [EOWNERDEAD] = TARGET_EOWNERDEAD,
833 #endif
834 #ifdef ENOTRECOVERABLE
835 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
836 #endif
837 #ifdef ENOMSG
838 [ENOMSG] = TARGET_ENOMSG,
839 #endif
840 #ifdef ERKFILL
841 [ERFKILL] = TARGET_ERFKILL,
842 #endif
843 #ifdef EHWPOISON
844 [EHWPOISON] = TARGET_EHWPOISON,
845 #endif
846 };
847
848 static inline int host_to_target_errno(int err)
849 {
850 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
851 host_to_target_errno_table[err]) {
852 return host_to_target_errno_table[err];
853 }
854 return err;
855 }
856
857 static inline int target_to_host_errno(int err)
858 {
859 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
860 target_to_host_errno_table[err]) {
861 return target_to_host_errno_table[err];
862 }
863 return err;
864 }
865
866 static inline abi_long get_errno(abi_long ret)
867 {
868 if (ret == -1)
869 return -host_to_target_errno(errno);
870 else
871 return ret;
872 }
873
874 static inline int is_error(abi_long ret)
875 {
876 return (abi_ulong)ret >= (abi_ulong)(-4096);
877 }
878
879 const char *target_strerror(int err)
880 {
881 if (err == TARGET_ERESTARTSYS) {
882 return "To be restarted";
883 }
884 if (err == TARGET_QEMU_ESIGRETURN) {
885 return "Successful exit from sigreturn";
886 }
887
888 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
889 return NULL;
890 }
891 return strerror(target_to_host_errno(err));
892 }
893
894 #define safe_syscall0(type, name) \
895 static type safe_##name(void) \
896 { \
897 return safe_syscall(__NR_##name); \
898 }
899
900 #define safe_syscall1(type, name, type1, arg1) \
901 static type safe_##name(type1 arg1) \
902 { \
903 return safe_syscall(__NR_##name, arg1); \
904 }
905
906 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
907 static type safe_##name(type1 arg1, type2 arg2) \
908 { \
909 return safe_syscall(__NR_##name, arg1, arg2); \
910 }
911
912 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
913 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
914 { \
915 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
916 }
917
918 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
919 type4, arg4) \
920 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
921 { \
922 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
923 }
924
925 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
926 type4, arg4, type5, arg5) \
927 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
928 type5 arg5) \
929 { \
930 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
931 }
932
933 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
934 type4, arg4, type5, arg5, type6, arg6) \
935 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
936 type5 arg5, type6 arg6) \
937 { \
938 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
939 }
940
941 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
942 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
943 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
944 int, flags, mode_t, mode)
945 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
946 struct rusage *, rusage)
947 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
948 int, options, struct rusage *, rusage)
949 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
950 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
951 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
952 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
953 struct timespec *, tsp, const sigset_t *, sigmask,
954 size_t, sigsetsize)
955 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
956 int, maxevents, int, timeout, const sigset_t *, sigmask,
957 size_t, sigsetsize)
958 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
959 const struct timespec *,timeout,int *,uaddr2,int,val3)
960 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
961 safe_syscall2(int, kill, pid_t, pid, int, sig)
962 safe_syscall2(int, tkill, int, tid, int, sig)
963 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
964 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
965 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
966 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
967 unsigned long, pos_l, unsigned long, pos_h)
968 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
969 unsigned long, pos_l, unsigned long, pos_h)
970 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
971 socklen_t, addrlen)
972 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
973 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
974 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
975 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
976 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
977 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
978 safe_syscall2(int, flock, int, fd, int, operation)
979 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
980 const struct timespec *, uts, size_t, sigsetsize)
981 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
982 int, flags)
983 safe_syscall2(int, nanosleep, const struct timespec *, req,
984 struct timespec *, rem)
985 #ifdef TARGET_NR_clock_nanosleep
986 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
987 const struct timespec *, req, struct timespec *, rem)
988 #endif
989 #ifdef __NR_msgsnd
990 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
991 int, flags)
992 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
993 long, msgtype, int, flags)
994 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
995 unsigned, nsops, const struct timespec *, timeout)
996 #else
997 /* This host kernel architecture uses a single ipc syscall; fake up
998 * wrappers for the sub-operations to hide this implementation detail.
999 * Annoyingly we can't include linux/ipc.h to get the constant definitions
1000 * for the call parameter because some structs in there conflict with the
1001 * sys/ipc.h ones. So we just define them here, and rely on them being
1002 * the same for all host architectures.
1003 */
1004 #define Q_SEMTIMEDOP 4
1005 #define Q_MSGSND 11
1006 #define Q_MSGRCV 12
1007 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1008
1009 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
1010 void *, ptr, long, fifth)
1011 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
1012 {
1013 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
1014 }
1015 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
1016 {
1017 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
1018 }
1019 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
1020 const struct timespec *timeout)
1021 {
1022 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
1023 (long)timeout);
1024 }
1025 #endif
1026 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1027 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
1028 size_t, len, unsigned, prio, const struct timespec *, timeout)
1029 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
1030 size_t, len, unsigned *, prio, const struct timespec *, timeout)
1031 #endif
1032 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1033 * "third argument might be integer or pointer or not present" behaviour of
1034 * the libc function.
1035 */
1036 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1037 /* Similarly for fcntl. Note that callers must always:
1038 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1039 * use the flock64 struct rather than unsuffixed flock
1040 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1041 */
1042 #ifdef __NR_fcntl64
1043 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1044 #else
1045 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1046 #endif
1047
1048 static inline int host_to_target_sock_type(int host_type)
1049 {
1050 int target_type;
1051
1052 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1053 case SOCK_DGRAM:
1054 target_type = TARGET_SOCK_DGRAM;
1055 break;
1056 case SOCK_STREAM:
1057 target_type = TARGET_SOCK_STREAM;
1058 break;
1059 default:
1060 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1061 break;
1062 }
1063
1064 #if defined(SOCK_CLOEXEC)
1065 if (host_type & SOCK_CLOEXEC) {
1066 target_type |= TARGET_SOCK_CLOEXEC;
1067 }
1068 #endif
1069
1070 #if defined(SOCK_NONBLOCK)
1071 if (host_type & SOCK_NONBLOCK) {
1072 target_type |= TARGET_SOCK_NONBLOCK;
1073 }
1074 #endif
1075
1076 return target_type;
1077 }
1078
1079 static abi_ulong target_brk;
1080 static abi_ulong target_original_brk;
1081 static abi_ulong brk_page;
1082
1083 void target_set_brk(abi_ulong new_brk)
1084 {
1085 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1086 brk_page = HOST_PAGE_ALIGN(target_brk);
1087 }
1088
1089 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1090 #define DEBUGF_BRK(message, args...)
1091
1092 /* do_brk() must return target values and target errnos. */
1093 abi_long do_brk(abi_ulong new_brk)
1094 {
1095 abi_long mapped_addr;
1096 abi_ulong new_alloc_size;
1097
1098 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1099
1100 if (!new_brk) {
1101 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1102 return target_brk;
1103 }
1104 if (new_brk < target_original_brk) {
1105 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1106 target_brk);
1107 return target_brk;
1108 }
1109
1110 /* If the new brk is less than the highest page reserved to the
1111 * target heap allocation, set it and we're almost done... */
1112 if (new_brk <= brk_page) {
1113 /* Heap contents are initialized to zero, as for anonymous
1114 * mapped pages. */
1115 if (new_brk > target_brk) {
1116 memset(g2h(target_brk), 0, new_brk - target_brk);
1117 }
1118 target_brk = new_brk;
1119 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1120 return target_brk;
1121 }
1122
1123 /* We need to allocate more memory after the brk... Note that
1124 * we don't use MAP_FIXED because that will map over the top of
1125 * any existing mapping (like the one with the host libc or qemu
1126 * itself); instead we treat "mapped but at wrong address" as
1127 * a failure and unmap again.
1128 */
1129 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1130 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1131 PROT_READ|PROT_WRITE,
1132 MAP_ANON|MAP_PRIVATE, 0, 0));
1133
1134 if (mapped_addr == brk_page) {
1135 /* Heap contents are initialized to zero, as for anonymous
1136 * mapped pages. Technically the new pages are already
1137 * initialized to zero since they *are* anonymous mapped
1138 * pages, however we have to take care with the contents that
1139 * come from the remaining part of the previous page: it may
1140 * contains garbage data due to a previous heap usage (grown
1141 * then shrunken). */
1142 memset(g2h(target_brk), 0, brk_page - target_brk);
1143
1144 target_brk = new_brk;
1145 brk_page = HOST_PAGE_ALIGN(target_brk);
1146 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1147 target_brk);
1148 return target_brk;
1149 } else if (mapped_addr != -1) {
1150 /* Mapped but at wrong address, meaning there wasn't actually
1151 * enough space for this brk.
1152 */
1153 target_munmap(mapped_addr, new_alloc_size);
1154 mapped_addr = -1;
1155 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1156 }
1157 else {
1158 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1159 }
1160
1161 #if defined(TARGET_ALPHA)
1162 /* We (partially) emulate OSF/1 on Alpha, which requires we
1163 return a proper errno, not an unchanged brk value. */
1164 return -TARGET_ENOMEM;
1165 #endif
1166 /* For everything else, return the previous break. */
1167 return target_brk;
1168 }
1169
1170 static inline abi_long copy_from_user_fdset(fd_set *fds,
1171 abi_ulong target_fds_addr,
1172 int n)
1173 {
1174 int i, nw, j, k;
1175 abi_ulong b, *target_fds;
1176
1177 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1178 if (!(target_fds = lock_user(VERIFY_READ,
1179 target_fds_addr,
1180 sizeof(abi_ulong) * nw,
1181 1)))
1182 return -TARGET_EFAULT;
1183
1184 FD_ZERO(fds);
1185 k = 0;
1186 for (i = 0; i < nw; i++) {
1187 /* grab the abi_ulong */
1188 __get_user(b, &target_fds[i]);
1189 for (j = 0; j < TARGET_ABI_BITS; j++) {
1190 /* check the bit inside the abi_ulong */
1191 if ((b >> j) & 1)
1192 FD_SET(k, fds);
1193 k++;
1194 }
1195 }
1196
1197 unlock_user(target_fds, target_fds_addr, 0);
1198
1199 return 0;
1200 }
1201
1202 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1203 abi_ulong target_fds_addr,
1204 int n)
1205 {
1206 if (target_fds_addr) {
1207 if (copy_from_user_fdset(fds, target_fds_addr, n))
1208 return -TARGET_EFAULT;
1209 *fds_ptr = fds;
1210 } else {
1211 *fds_ptr = NULL;
1212 }
1213 return 0;
1214 }
1215
1216 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1217 const fd_set *fds,
1218 int n)
1219 {
1220 int i, nw, j, k;
1221 abi_long v;
1222 abi_ulong *target_fds;
1223
1224 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1225 if (!(target_fds = lock_user(VERIFY_WRITE,
1226 target_fds_addr,
1227 sizeof(abi_ulong) * nw,
1228 0)))
1229 return -TARGET_EFAULT;
1230
1231 k = 0;
1232 for (i = 0; i < nw; i++) {
1233 v = 0;
1234 for (j = 0; j < TARGET_ABI_BITS; j++) {
1235 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1236 k++;
1237 }
1238 __put_user(v, &target_fds[i]);
1239 }
1240
1241 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1242
1243 return 0;
1244 }
1245
1246 #if defined(__alpha__)
1247 #define HOST_HZ 1024
1248 #else
1249 #define HOST_HZ 100
1250 #endif
1251
1252 static inline abi_long host_to_target_clock_t(long ticks)
1253 {
1254 #if HOST_HZ == TARGET_HZ
1255 return ticks;
1256 #else
1257 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1258 #endif
1259 }
1260
1261 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1262 const struct rusage *rusage)
1263 {
1264 struct target_rusage *target_rusage;
1265
1266 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1267 return -TARGET_EFAULT;
1268 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1269 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1270 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1271 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1272 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1273 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1274 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1275 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1276 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1277 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1278 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1279 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1280 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1281 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1282 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1283 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1284 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1285 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1286 unlock_user_struct(target_rusage, target_addr, 1);
1287
1288 return 0;
1289 }
1290
1291 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1292 {
1293 abi_ulong target_rlim_swap;
1294 rlim_t result;
1295
1296 target_rlim_swap = tswapal(target_rlim);
1297 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1298 return RLIM_INFINITY;
1299
1300 result = target_rlim_swap;
1301 if (target_rlim_swap != (rlim_t)result)
1302 return RLIM_INFINITY;
1303
1304 return result;
1305 }
1306
1307 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1308 {
1309 abi_ulong target_rlim_swap;
1310 abi_ulong result;
1311
1312 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1313 target_rlim_swap = TARGET_RLIM_INFINITY;
1314 else
1315 target_rlim_swap = rlim;
1316 result = tswapal(target_rlim_swap);
1317
1318 return result;
1319 }
1320
1321 static inline int target_to_host_resource(int code)
1322 {
1323 switch (code) {
1324 case TARGET_RLIMIT_AS:
1325 return RLIMIT_AS;
1326 case TARGET_RLIMIT_CORE:
1327 return RLIMIT_CORE;
1328 case TARGET_RLIMIT_CPU:
1329 return RLIMIT_CPU;
1330 case TARGET_RLIMIT_DATA:
1331 return RLIMIT_DATA;
1332 case TARGET_RLIMIT_FSIZE:
1333 return RLIMIT_FSIZE;
1334 case TARGET_RLIMIT_LOCKS:
1335 return RLIMIT_LOCKS;
1336 case TARGET_RLIMIT_MEMLOCK:
1337 return RLIMIT_MEMLOCK;
1338 case TARGET_RLIMIT_MSGQUEUE:
1339 return RLIMIT_MSGQUEUE;
1340 case TARGET_RLIMIT_NICE:
1341 return RLIMIT_NICE;
1342 case TARGET_RLIMIT_NOFILE:
1343 return RLIMIT_NOFILE;
1344 case TARGET_RLIMIT_NPROC:
1345 return RLIMIT_NPROC;
1346 case TARGET_RLIMIT_RSS:
1347 return RLIMIT_RSS;
1348 case TARGET_RLIMIT_RTPRIO:
1349 return RLIMIT_RTPRIO;
1350 case TARGET_RLIMIT_SIGPENDING:
1351 return RLIMIT_SIGPENDING;
1352 case TARGET_RLIMIT_STACK:
1353 return RLIMIT_STACK;
1354 default:
1355 return code;
1356 }
1357 }
1358
1359 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1360 abi_ulong target_tv_addr)
1361 {
1362 struct target_timeval *target_tv;
1363
1364 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1365 return -TARGET_EFAULT;
1366
1367 __get_user(tv->tv_sec, &target_tv->tv_sec);
1368 __get_user(tv->tv_usec, &target_tv->tv_usec);
1369
1370 unlock_user_struct(target_tv, target_tv_addr, 0);
1371
1372 return 0;
1373 }
1374
1375 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1376 const struct timeval *tv)
1377 {
1378 struct target_timeval *target_tv;
1379
1380 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1381 return -TARGET_EFAULT;
1382
1383 __put_user(tv->tv_sec, &target_tv->tv_sec);
1384 __put_user(tv->tv_usec, &target_tv->tv_usec);
1385
1386 unlock_user_struct(target_tv, target_tv_addr, 1);
1387
1388 return 0;
1389 }
1390
1391 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1392 abi_ulong target_tz_addr)
1393 {
1394 struct target_timezone *target_tz;
1395
1396 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1397 return -TARGET_EFAULT;
1398 }
1399
1400 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1401 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1402
1403 unlock_user_struct(target_tz, target_tz_addr, 0);
1404
1405 return 0;
1406 }
1407
1408 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1409 #include <mqueue.h>
1410
1411 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1412 abi_ulong target_mq_attr_addr)
1413 {
1414 struct target_mq_attr *target_mq_attr;
1415
1416 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1417 target_mq_attr_addr, 1))
1418 return -TARGET_EFAULT;
1419
1420 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1421 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1422 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1423 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1424
1425 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1426
1427 return 0;
1428 }
1429
1430 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1431 const struct mq_attr *attr)
1432 {
1433 struct target_mq_attr *target_mq_attr;
1434
1435 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1436 target_mq_attr_addr, 0))
1437 return -TARGET_EFAULT;
1438
1439 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1440 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1441 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1442 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1443
1444 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1445
1446 return 0;
1447 }
1448 #endif
1449
1450 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1451 /* do_select() must return target values and target errnos. */
1452 static abi_long do_select(int n,
1453 abi_ulong rfd_addr, abi_ulong wfd_addr,
1454 abi_ulong efd_addr, abi_ulong target_tv_addr)
1455 {
1456 fd_set rfds, wfds, efds;
1457 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1458 struct timeval tv;
1459 struct timespec ts, *ts_ptr;
1460 abi_long ret;
1461
1462 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1463 if (ret) {
1464 return ret;
1465 }
1466 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1467 if (ret) {
1468 return ret;
1469 }
1470 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1471 if (ret) {
1472 return ret;
1473 }
1474
1475 if (target_tv_addr) {
1476 if (copy_from_user_timeval(&tv, target_tv_addr))
1477 return -TARGET_EFAULT;
1478 ts.tv_sec = tv.tv_sec;
1479 ts.tv_nsec = tv.tv_usec * 1000;
1480 ts_ptr = &ts;
1481 } else {
1482 ts_ptr = NULL;
1483 }
1484
1485 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1486 ts_ptr, NULL));
1487
1488 if (!is_error(ret)) {
1489 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1490 return -TARGET_EFAULT;
1491 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1492 return -TARGET_EFAULT;
1493 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1494 return -TARGET_EFAULT;
1495
1496 if (target_tv_addr) {
1497 tv.tv_sec = ts.tv_sec;
1498 tv.tv_usec = ts.tv_nsec / 1000;
1499 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1500 return -TARGET_EFAULT;
1501 }
1502 }
1503 }
1504
1505 return ret;
1506 }
1507
1508 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1509 static abi_long do_old_select(abi_ulong arg1)
1510 {
1511 struct target_sel_arg_struct *sel;
1512 abi_ulong inp, outp, exp, tvp;
1513 long nsel;
1514
1515 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1516 return -TARGET_EFAULT;
1517 }
1518
1519 nsel = tswapal(sel->n);
1520 inp = tswapal(sel->inp);
1521 outp = tswapal(sel->outp);
1522 exp = tswapal(sel->exp);
1523 tvp = tswapal(sel->tvp);
1524
1525 unlock_user_struct(sel, arg1, 0);
1526
1527 return do_select(nsel, inp, outp, exp, tvp);
1528 }
1529 #endif
1530 #endif
1531
1532 static abi_long do_pipe2(int host_pipe[], int flags)
1533 {
1534 #ifdef CONFIG_PIPE2
1535 return pipe2(host_pipe, flags);
1536 #else
1537 return -ENOSYS;
1538 #endif
1539 }
1540
1541 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1542 int flags, int is_pipe2)
1543 {
1544 int host_pipe[2];
1545 abi_long ret;
1546 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1547
1548 if (is_error(ret))
1549 return get_errno(ret);
1550
1551 /* Several targets have special calling conventions for the original
1552 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1553 if (!is_pipe2) {
1554 #if defined(TARGET_ALPHA)
1555 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1556 return host_pipe[0];
1557 #elif defined(TARGET_MIPS)
1558 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1559 return host_pipe[0];
1560 #elif defined(TARGET_SH4)
1561 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1562 return host_pipe[0];
1563 #elif defined(TARGET_SPARC)
1564 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1565 return host_pipe[0];
1566 #endif
1567 }
1568
1569 if (put_user_s32(host_pipe[0], pipedes)
1570 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1571 return -TARGET_EFAULT;
1572 return get_errno(ret);
1573 }
1574
1575 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1576 abi_ulong target_addr,
1577 socklen_t len)
1578 {
1579 struct target_ip_mreqn *target_smreqn;
1580
1581 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1582 if (!target_smreqn)
1583 return -TARGET_EFAULT;
1584 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1585 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1586 if (len == sizeof(struct target_ip_mreqn))
1587 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1588 unlock_user(target_smreqn, target_addr, 0);
1589
1590 return 0;
1591 }
1592
1593 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1594 abi_ulong target_addr,
1595 socklen_t len)
1596 {
1597 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1598 sa_family_t sa_family;
1599 struct target_sockaddr *target_saddr;
1600
1601 if (fd_trans_target_to_host_addr(fd)) {
1602 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1603 }
1604
1605 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1606 if (!target_saddr)
1607 return -TARGET_EFAULT;
1608
1609 sa_family = tswap16(target_saddr->sa_family);
1610
1611 /* Oops. The caller might send a incomplete sun_path; sun_path
1612 * must be terminated by \0 (see the manual page), but
1613 * unfortunately it is quite common to specify sockaddr_un
1614 * length as "strlen(x->sun_path)" while it should be
1615 * "strlen(...) + 1". We'll fix that here if needed.
1616 * Linux kernel has a similar feature.
1617 */
1618
1619 if (sa_family == AF_UNIX) {
1620 if (len < unix_maxlen && len > 0) {
1621 char *cp = (char*)target_saddr;
1622
1623 if ( cp[len-1] && !cp[len] )
1624 len++;
1625 }
1626 if (len > unix_maxlen)
1627 len = unix_maxlen;
1628 }
1629
1630 memcpy(addr, target_saddr, len);
1631 addr->sa_family = sa_family;
1632 if (sa_family == AF_NETLINK) {
1633 struct sockaddr_nl *nladdr;
1634
1635 nladdr = (struct sockaddr_nl *)addr;
1636 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1637 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1638 } else if (sa_family == AF_PACKET) {
1639 struct target_sockaddr_ll *lladdr;
1640
1641 lladdr = (struct target_sockaddr_ll *)addr;
1642 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1643 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1644 }
1645 unlock_user(target_saddr, target_addr, 0);
1646
1647 return 0;
1648 }
1649
1650 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1651 struct sockaddr *addr,
1652 socklen_t len)
1653 {
1654 struct target_sockaddr *target_saddr;
1655
1656 if (len == 0) {
1657 return 0;
1658 }
1659 assert(addr);
1660
1661 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1662 if (!target_saddr)
1663 return -TARGET_EFAULT;
1664 memcpy(target_saddr, addr, len);
1665 if (len >= offsetof(struct target_sockaddr, sa_family) +
1666 sizeof(target_saddr->sa_family)) {
1667 target_saddr->sa_family = tswap16(addr->sa_family);
1668 }
1669 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1670 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1671 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1672 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1673 } else if (addr->sa_family == AF_PACKET) {
1674 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1675 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1676 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1677 } else if (addr->sa_family == AF_INET6 &&
1678 len >= sizeof(struct target_sockaddr_in6)) {
1679 struct target_sockaddr_in6 *target_in6 =
1680 (struct target_sockaddr_in6 *)target_saddr;
1681 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1682 }
1683 unlock_user(target_saddr, target_addr, len);
1684
1685 return 0;
1686 }
1687
1688 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1689 struct target_msghdr *target_msgh)
1690 {
1691 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1692 abi_long msg_controllen;
1693 abi_ulong target_cmsg_addr;
1694 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1695 socklen_t space = 0;
1696
1697 msg_controllen = tswapal(target_msgh->msg_controllen);
1698 if (msg_controllen < sizeof (struct target_cmsghdr))
1699 goto the_end;
1700 target_cmsg_addr = tswapal(target_msgh->msg_control);
1701 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1702 target_cmsg_start = target_cmsg;
1703 if (!target_cmsg)
1704 return -TARGET_EFAULT;
1705
1706 while (cmsg && target_cmsg) {
1707 void *data = CMSG_DATA(cmsg);
1708 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1709
1710 int len = tswapal(target_cmsg->cmsg_len)
1711 - sizeof(struct target_cmsghdr);
1712
1713 space += CMSG_SPACE(len);
1714 if (space > msgh->msg_controllen) {
1715 space -= CMSG_SPACE(len);
1716 /* This is a QEMU bug, since we allocated the payload
1717 * area ourselves (unlike overflow in host-to-target
1718 * conversion, which is just the guest giving us a buffer
1719 * that's too small). It can't happen for the payload types
1720 * we currently support; if it becomes an issue in future
1721 * we would need to improve our allocation strategy to
1722 * something more intelligent than "twice the size of the
1723 * target buffer we're reading from".
1724 */
1725 gemu_log("Host cmsg overflow\n");
1726 break;
1727 }
1728
1729 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1730 cmsg->cmsg_level = SOL_SOCKET;
1731 } else {
1732 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1733 }
1734 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1735 cmsg->cmsg_len = CMSG_LEN(len);
1736
1737 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1738 int *fd = (int *)data;
1739 int *target_fd = (int *)target_data;
1740 int i, numfds = len / sizeof(int);
1741
1742 for (i = 0; i < numfds; i++) {
1743 __get_user(fd[i], target_fd + i);
1744 }
1745 } else if (cmsg->cmsg_level == SOL_SOCKET
1746 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1747 struct ucred *cred = (struct ucred *)data;
1748 struct target_ucred *target_cred =
1749 (struct target_ucred *)target_data;
1750
1751 __get_user(cred->pid, &target_cred->pid);
1752 __get_user(cred->uid, &target_cred->uid);
1753 __get_user(cred->gid, &target_cred->gid);
1754 } else {
1755 gemu_log("Unsupported ancillary data: %d/%d\n",
1756 cmsg->cmsg_level, cmsg->cmsg_type);
1757 memcpy(data, target_data, len);
1758 }
1759
1760 cmsg = CMSG_NXTHDR(msgh, cmsg);
1761 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1762 target_cmsg_start);
1763 }
1764 unlock_user(target_cmsg, target_cmsg_addr, 0);
1765 the_end:
1766 msgh->msg_controllen = space;
1767 return 0;
1768 }
1769
1770 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1771 struct msghdr *msgh)
1772 {
1773 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1774 abi_long msg_controllen;
1775 abi_ulong target_cmsg_addr;
1776 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1777 socklen_t space = 0;
1778
1779 msg_controllen = tswapal(target_msgh->msg_controllen);
1780 if (msg_controllen < sizeof (struct target_cmsghdr))
1781 goto the_end;
1782 target_cmsg_addr = tswapal(target_msgh->msg_control);
1783 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1784 target_cmsg_start = target_cmsg;
1785 if (!target_cmsg)
1786 return -TARGET_EFAULT;
1787
1788 while (cmsg && target_cmsg) {
1789 void *data = CMSG_DATA(cmsg);
1790 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1791
1792 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1793 int tgt_len, tgt_space;
1794
1795 /* We never copy a half-header but may copy half-data;
1796 * this is Linux's behaviour in put_cmsg(). Note that
1797 * truncation here is a guest problem (which we report
1798 * to the guest via the CTRUNC bit), unlike truncation
1799 * in target_to_host_cmsg, which is a QEMU bug.
1800 */
1801 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1802 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1803 break;
1804 }
1805
1806 if (cmsg->cmsg_level == SOL_SOCKET) {
1807 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1808 } else {
1809 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1810 }
1811 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1812
1813 /* Payload types which need a different size of payload on
1814 * the target must adjust tgt_len here.
1815 */
1816 switch (cmsg->cmsg_level) {
1817 case SOL_SOCKET:
1818 switch (cmsg->cmsg_type) {
1819 case SO_TIMESTAMP:
1820 tgt_len = sizeof(struct target_timeval);
1821 break;
1822 default:
1823 break;
1824 }
1825 default:
1826 tgt_len = len;
1827 break;
1828 }
1829
1830 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1831 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1832 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1833 }
1834
1835 /* We must now copy-and-convert len bytes of payload
1836 * into tgt_len bytes of destination space. Bear in mind
1837 * that in both source and destination we may be dealing
1838 * with a truncated value!
1839 */
1840 switch (cmsg->cmsg_level) {
1841 case SOL_SOCKET:
1842 switch (cmsg->cmsg_type) {
1843 case SCM_RIGHTS:
1844 {
1845 int *fd = (int *)data;
1846 int *target_fd = (int *)target_data;
1847 int i, numfds = tgt_len / sizeof(int);
1848
1849 for (i = 0; i < numfds; i++) {
1850 __put_user(fd[i], target_fd + i);
1851 }
1852 break;
1853 }
1854 case SO_TIMESTAMP:
1855 {
1856 struct timeval *tv = (struct timeval *)data;
1857 struct target_timeval *target_tv =
1858 (struct target_timeval *)target_data;
1859
1860 if (len != sizeof(struct timeval) ||
1861 tgt_len != sizeof(struct target_timeval)) {
1862 goto unimplemented;
1863 }
1864
1865 /* copy struct timeval to target */
1866 __put_user(tv->tv_sec, &target_tv->tv_sec);
1867 __put_user(tv->tv_usec, &target_tv->tv_usec);
1868 break;
1869 }
1870 case SCM_CREDENTIALS:
1871 {
1872 struct ucred *cred = (struct ucred *)data;
1873 struct target_ucred *target_cred =
1874 (struct target_ucred *)target_data;
1875
1876 __put_user(cred->pid, &target_cred->pid);
1877 __put_user(cred->uid, &target_cred->uid);
1878 __put_user(cred->gid, &target_cred->gid);
1879 break;
1880 }
1881 default:
1882 goto unimplemented;
1883 }
1884 break;
1885
1886 case SOL_IP:
1887 switch (cmsg->cmsg_type) {
1888 case IP_TTL:
1889 {
1890 uint32_t *v = (uint32_t *)data;
1891 uint32_t *t_int = (uint32_t *)target_data;
1892
1893 if (len != sizeof(uint32_t) ||
1894 tgt_len != sizeof(uint32_t)) {
1895 goto unimplemented;
1896 }
1897 __put_user(*v, t_int);
1898 break;
1899 }
1900 case IP_RECVERR:
1901 {
1902 struct errhdr_t {
1903 struct sock_extended_err ee;
1904 struct sockaddr_in offender;
1905 };
1906 struct errhdr_t *errh = (struct errhdr_t *)data;
1907 struct errhdr_t *target_errh =
1908 (struct errhdr_t *)target_data;
1909
1910 if (len != sizeof(struct errhdr_t) ||
1911 tgt_len != sizeof(struct errhdr_t)) {
1912 goto unimplemented;
1913 }
1914 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1915 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1916 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1917 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1918 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1919 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1920 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1921 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1922 (void *) &errh->offender, sizeof(errh->offender));
1923 break;
1924 }
1925 default:
1926 goto unimplemented;
1927 }
1928 break;
1929
1930 case SOL_IPV6:
1931 switch (cmsg->cmsg_type) {
1932 case IPV6_HOPLIMIT:
1933 {
1934 uint32_t *v = (uint32_t *)data;
1935 uint32_t *t_int = (uint32_t *)target_data;
1936
1937 if (len != sizeof(uint32_t) ||
1938 tgt_len != sizeof(uint32_t)) {
1939 goto unimplemented;
1940 }
1941 __put_user(*v, t_int);
1942 break;
1943 }
1944 case IPV6_RECVERR:
1945 {
1946 struct errhdr6_t {
1947 struct sock_extended_err ee;
1948 struct sockaddr_in6 offender;
1949 };
1950 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1951 struct errhdr6_t *target_errh =
1952 (struct errhdr6_t *)target_data;
1953
1954 if (len != sizeof(struct errhdr6_t) ||
1955 tgt_len != sizeof(struct errhdr6_t)) {
1956 goto unimplemented;
1957 }
1958 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1959 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1960 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1961 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1962 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1963 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1964 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1965 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1966 (void *) &errh->offender, sizeof(errh->offender));
1967 break;
1968 }
1969 default:
1970 goto unimplemented;
1971 }
1972 break;
1973
1974 default:
1975 unimplemented:
1976 gemu_log("Unsupported ancillary data: %d/%d\n",
1977 cmsg->cmsg_level, cmsg->cmsg_type);
1978 memcpy(target_data, data, MIN(len, tgt_len));
1979 if (tgt_len > len) {
1980 memset(target_data + len, 0, tgt_len - len);
1981 }
1982 }
1983
1984 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1985 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1986 if (msg_controllen < tgt_space) {
1987 tgt_space = msg_controllen;
1988 }
1989 msg_controllen -= tgt_space;
1990 space += tgt_space;
1991 cmsg = CMSG_NXTHDR(msgh, cmsg);
1992 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1993 target_cmsg_start);
1994 }
1995 unlock_user(target_cmsg, target_cmsg_addr, space);
1996 the_end:
1997 target_msgh->msg_controllen = tswapal(space);
1998 return 0;
1999 }
2000
2001 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
2002 {
2003 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
2004 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
2005 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
2006 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
2007 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
2008 }
2009
2010 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
2011 size_t len,
2012 abi_long (*host_to_target_nlmsg)
2013 (struct nlmsghdr *))
2014 {
2015 uint32_t nlmsg_len;
2016 abi_long ret;
2017
2018 while (len > sizeof(struct nlmsghdr)) {
2019
2020 nlmsg_len = nlh->nlmsg_len;
2021 if (nlmsg_len < sizeof(struct nlmsghdr) ||
2022 nlmsg_len > len) {
2023 break;
2024 }
2025
2026 switch (nlh->nlmsg_type) {
2027 case NLMSG_DONE:
2028 tswap_nlmsghdr(nlh);
2029 return 0;
2030 case NLMSG_NOOP:
2031 break;
2032 case NLMSG_ERROR:
2033 {
2034 struct nlmsgerr *e = NLMSG_DATA(nlh);
2035 e->error = tswap32(e->error);
2036 tswap_nlmsghdr(&e->msg);
2037 tswap_nlmsghdr(nlh);
2038 return 0;
2039 }
2040 default:
2041 ret = host_to_target_nlmsg(nlh);
2042 if (ret < 0) {
2043 tswap_nlmsghdr(nlh);
2044 return ret;
2045 }
2046 break;
2047 }
2048 tswap_nlmsghdr(nlh);
2049 len -= NLMSG_ALIGN(nlmsg_len);
2050 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
2051 }
2052 return 0;
2053 }
2054
2055 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2056 size_t len,
2057 abi_long (*target_to_host_nlmsg)
2058 (struct nlmsghdr *))
2059 {
2060 int ret;
2061
2062 while (len > sizeof(struct nlmsghdr)) {
2063 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2064 tswap32(nlh->nlmsg_len) > len) {
2065 break;
2066 }
2067 tswap_nlmsghdr(nlh);
2068 switch (nlh->nlmsg_type) {
2069 case NLMSG_DONE:
2070 return 0;
2071 case NLMSG_NOOP:
2072 break;
2073 case NLMSG_ERROR:
2074 {
2075 struct nlmsgerr *e = NLMSG_DATA(nlh);
2076 e->error = tswap32(e->error);
2077 tswap_nlmsghdr(&e->msg);
2078 return 0;
2079 }
2080 default:
2081 ret = target_to_host_nlmsg(nlh);
2082 if (ret < 0) {
2083 return ret;
2084 }
2085 }
2086 len -= NLMSG_ALIGN(nlh->nlmsg_len);
2087 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2088 }
2089 return 0;
2090 }
2091
2092 #ifdef CONFIG_RTNETLINK
2093 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2094 size_t len, void *context,
2095 abi_long (*host_to_target_nlattr)
2096 (struct nlattr *,
2097 void *context))
2098 {
2099 unsigned short nla_len;
2100 abi_long ret;
2101
2102 while (len > sizeof(struct nlattr)) {
2103 nla_len = nlattr->nla_len;
2104 if (nla_len < sizeof(struct nlattr) ||
2105 nla_len > len) {
2106 break;
2107 }
2108 ret = host_to_target_nlattr(nlattr, context);
2109 nlattr->nla_len = tswap16(nlattr->nla_len);
2110 nlattr->nla_type = tswap16(nlattr->nla_type);
2111 if (ret < 0) {
2112 return ret;
2113 }
2114 len -= NLA_ALIGN(nla_len);
2115 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2116 }
2117 return 0;
2118 }
2119
2120 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2121 size_t len,
2122 abi_long (*host_to_target_rtattr)
2123 (struct rtattr *))
2124 {
2125 unsigned short rta_len;
2126 abi_long ret;
2127
2128 while (len > sizeof(struct rtattr)) {
2129 rta_len = rtattr->rta_len;
2130 if (rta_len < sizeof(struct rtattr) ||
2131 rta_len > len) {
2132 break;
2133 }
2134 ret = host_to_target_rtattr(rtattr);
2135 rtattr->rta_len = tswap16(rtattr->rta_len);
2136 rtattr->rta_type = tswap16(rtattr->rta_type);
2137 if (ret < 0) {
2138 return ret;
2139 }
2140 len -= RTA_ALIGN(rta_len);
2141 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2142 }
2143 return 0;
2144 }
2145
2146 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2147
2148 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2149 void *context)
2150 {
2151 uint16_t *u16;
2152 uint32_t *u32;
2153 uint64_t *u64;
2154
2155 switch (nlattr->nla_type) {
2156 /* no data */
2157 case QEMU_IFLA_BR_FDB_FLUSH:
2158 break;
2159 /* binary */
2160 case QEMU_IFLA_BR_GROUP_ADDR:
2161 break;
2162 /* uint8_t */
2163 case QEMU_IFLA_BR_VLAN_FILTERING:
2164 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2165 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2166 case QEMU_IFLA_BR_MCAST_ROUTER:
2167 case QEMU_IFLA_BR_MCAST_SNOOPING:
2168 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2169 case QEMU_IFLA_BR_MCAST_QUERIER:
2170 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2171 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2172 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2173 break;
2174 /* uint16_t */
2175 case QEMU_IFLA_BR_PRIORITY:
2176 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2177 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2178 case QEMU_IFLA_BR_ROOT_PORT:
2179 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2180 u16 = NLA_DATA(nlattr);
2181 *u16 = tswap16(*u16);
2182 break;
2183 /* uint32_t */
2184 case QEMU_IFLA_BR_FORWARD_DELAY:
2185 case QEMU_IFLA_BR_HELLO_TIME:
2186 case QEMU_IFLA_BR_MAX_AGE:
2187 case QEMU_IFLA_BR_AGEING_TIME:
2188 case QEMU_IFLA_BR_STP_STATE:
2189 case QEMU_IFLA_BR_ROOT_PATH_COST:
2190 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2191 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2192 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2193 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2194 u32 = NLA_DATA(nlattr);
2195 *u32 = tswap32(*u32);
2196 break;
2197 /* uint64_t */
2198 case QEMU_IFLA_BR_HELLO_TIMER:
2199 case QEMU_IFLA_BR_TCN_TIMER:
2200 case QEMU_IFLA_BR_GC_TIMER:
2201 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2202 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2203 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2204 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2205 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2206 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2207 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2208 u64 = NLA_DATA(nlattr);
2209 *u64 = tswap64(*u64);
2210 break;
2211 /* ifla_bridge_id: uin8_t[] */
2212 case QEMU_IFLA_BR_ROOT_ID:
2213 case QEMU_IFLA_BR_BRIDGE_ID:
2214 break;
2215 default:
2216 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2217 break;
2218 }
2219 return 0;
2220 }
2221
2222 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2223 void *context)
2224 {
2225 uint16_t *u16;
2226 uint32_t *u32;
2227 uint64_t *u64;
2228
2229 switch (nlattr->nla_type) {
2230 /* uint8_t */
2231 case QEMU_IFLA_BRPORT_STATE:
2232 case QEMU_IFLA_BRPORT_MODE:
2233 case QEMU_IFLA_BRPORT_GUARD:
2234 case QEMU_IFLA_BRPORT_PROTECT:
2235 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2236 case QEMU_IFLA_BRPORT_LEARNING:
2237 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2238 case QEMU_IFLA_BRPORT_PROXYARP:
2239 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2240 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2241 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2242 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2243 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2244 break;
2245 /* uint16_t */
2246 case QEMU_IFLA_BRPORT_PRIORITY:
2247 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2248 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2249 case QEMU_IFLA_BRPORT_ID:
2250 case QEMU_IFLA_BRPORT_NO:
2251 u16 = NLA_DATA(nlattr);
2252 *u16 = tswap16(*u16);
2253 break;
2254 /* uin32_t */
2255 case QEMU_IFLA_BRPORT_COST:
2256 u32 = NLA_DATA(nlattr);
2257 *u32 = tswap32(*u32);
2258 break;
2259 /* uint64_t */
2260 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2261 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2262 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2263 u64 = NLA_DATA(nlattr);
2264 *u64 = tswap64(*u64);
2265 break;
2266 /* ifla_bridge_id: uint8_t[] */
2267 case QEMU_IFLA_BRPORT_ROOT_ID:
2268 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2269 break;
2270 default:
2271 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2272 break;
2273 }
2274 return 0;
2275 }
2276
2277 struct linkinfo_context {
2278 int len;
2279 char *name;
2280 int slave_len;
2281 char *slave_name;
2282 };
2283
2284 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2285 void *context)
2286 {
2287 struct linkinfo_context *li_context = context;
2288
2289 switch (nlattr->nla_type) {
2290 /* string */
2291 case QEMU_IFLA_INFO_KIND:
2292 li_context->name = NLA_DATA(nlattr);
2293 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2294 break;
2295 case QEMU_IFLA_INFO_SLAVE_KIND:
2296 li_context->slave_name = NLA_DATA(nlattr);
2297 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2298 break;
2299 /* stats */
2300 case QEMU_IFLA_INFO_XSTATS:
2301 /* FIXME: only used by CAN */
2302 break;
2303 /* nested */
2304 case QEMU_IFLA_INFO_DATA:
2305 if (strncmp(li_context->name, "bridge",
2306 li_context->len) == 0) {
2307 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2308 nlattr->nla_len,
2309 NULL,
2310 host_to_target_data_bridge_nlattr);
2311 } else {
2312 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2313 }
2314 break;
2315 case QEMU_IFLA_INFO_SLAVE_DATA:
2316 if (strncmp(li_context->slave_name, "bridge",
2317 li_context->slave_len) == 0) {
2318 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2319 nlattr->nla_len,
2320 NULL,
2321 host_to_target_slave_data_bridge_nlattr);
2322 } else {
2323 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2324 li_context->slave_name);
2325 }
2326 break;
2327 default:
2328 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2329 break;
2330 }
2331
2332 return 0;
2333 }
2334
2335 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2336 void *context)
2337 {
2338 uint32_t *u32;
2339 int i;
2340
2341 switch (nlattr->nla_type) {
2342 case QEMU_IFLA_INET_CONF:
2343 u32 = NLA_DATA(nlattr);
2344 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2345 i++) {
2346 u32[i] = tswap32(u32[i]);
2347 }
2348 break;
2349 default:
2350 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2351 }
2352 return 0;
2353 }
2354
2355 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2356 void *context)
2357 {
2358 uint32_t *u32;
2359 uint64_t *u64;
2360 struct ifla_cacheinfo *ci;
2361 int i;
2362
2363 switch (nlattr->nla_type) {
2364 /* binaries */
2365 case QEMU_IFLA_INET6_TOKEN:
2366 break;
2367 /* uint8_t */
2368 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2369 break;
2370 /* uint32_t */
2371 case QEMU_IFLA_INET6_FLAGS:
2372 u32 = NLA_DATA(nlattr);
2373 *u32 = tswap32(*u32);
2374 break;
2375 /* uint32_t[] */
2376 case QEMU_IFLA_INET6_CONF:
2377 u32 = NLA_DATA(nlattr);
2378 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2379 i++) {
2380 u32[i] = tswap32(u32[i]);
2381 }
2382 break;
2383 /* ifla_cacheinfo */
2384 case QEMU_IFLA_INET6_CACHEINFO:
2385 ci = NLA_DATA(nlattr);
2386 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2387 ci->tstamp = tswap32(ci->tstamp);
2388 ci->reachable_time = tswap32(ci->reachable_time);
2389 ci->retrans_time = tswap32(ci->retrans_time);
2390 break;
2391 /* uint64_t[] */
2392 case QEMU_IFLA_INET6_STATS:
2393 case QEMU_IFLA_INET6_ICMP6STATS:
2394 u64 = NLA_DATA(nlattr);
2395 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2396 i++) {
2397 u64[i] = tswap64(u64[i]);
2398 }
2399 break;
2400 default:
2401 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2402 }
2403 return 0;
2404 }
2405
2406 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2407 void *context)
2408 {
2409 switch (nlattr->nla_type) {
2410 case AF_INET:
2411 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2412 NULL,
2413 host_to_target_data_inet_nlattr);
2414 case AF_INET6:
2415 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2416 NULL,
2417 host_to_target_data_inet6_nlattr);
2418 default:
2419 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2420 break;
2421 }
2422 return 0;
2423 }
2424
2425 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2426 {
2427 uint32_t *u32;
2428 struct rtnl_link_stats *st;
2429 struct rtnl_link_stats64 *st64;
2430 struct rtnl_link_ifmap *map;
2431 struct linkinfo_context li_context;
2432
2433 switch (rtattr->rta_type) {
2434 /* binary stream */
2435 case QEMU_IFLA_ADDRESS:
2436 case QEMU_IFLA_BROADCAST:
2437 /* string */
2438 case QEMU_IFLA_IFNAME:
2439 case QEMU_IFLA_QDISC:
2440 break;
2441 /* uin8_t */
2442 case QEMU_IFLA_OPERSTATE:
2443 case QEMU_IFLA_LINKMODE:
2444 case QEMU_IFLA_CARRIER:
2445 case QEMU_IFLA_PROTO_DOWN:
2446 break;
2447 /* uint32_t */
2448 case QEMU_IFLA_MTU:
2449 case QEMU_IFLA_LINK:
2450 case QEMU_IFLA_WEIGHT:
2451 case QEMU_IFLA_TXQLEN:
2452 case QEMU_IFLA_CARRIER_CHANGES:
2453 case QEMU_IFLA_NUM_RX_QUEUES:
2454 case QEMU_IFLA_NUM_TX_QUEUES:
2455 case QEMU_IFLA_PROMISCUITY:
2456 case QEMU_IFLA_EXT_MASK:
2457 case QEMU_IFLA_LINK_NETNSID:
2458 case QEMU_IFLA_GROUP:
2459 case QEMU_IFLA_MASTER:
2460 case QEMU_IFLA_NUM_VF:
2461 case QEMU_IFLA_GSO_MAX_SEGS:
2462 case QEMU_IFLA_GSO_MAX_SIZE:
2463 u32 = RTA_DATA(rtattr);
2464 *u32 = tswap32(*u32);
2465 break;
2466 /* struct rtnl_link_stats */
2467 case QEMU_IFLA_STATS:
2468 st = RTA_DATA(rtattr);
2469 st->rx_packets = tswap32(st->rx_packets);
2470 st->tx_packets = tswap32(st->tx_packets);
2471 st->rx_bytes = tswap32(st->rx_bytes);
2472 st->tx_bytes = tswap32(st->tx_bytes);
2473 st->rx_errors = tswap32(st->rx_errors);
2474 st->tx_errors = tswap32(st->tx_errors);
2475 st->rx_dropped = tswap32(st->rx_dropped);
2476 st->tx_dropped = tswap32(st->tx_dropped);
2477 st->multicast = tswap32(st->multicast);
2478 st->collisions = tswap32(st->collisions);
2479
2480 /* detailed rx_errors: */
2481 st->rx_length_errors = tswap32(st->rx_length_errors);
2482 st->rx_over_errors = tswap32(st->rx_over_errors);
2483 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2484 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2485 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2486 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2487
2488 /* detailed tx_errors */
2489 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2490 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2491 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2492 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2493 st->tx_window_errors = tswap32(st->tx_window_errors);
2494
2495 /* for cslip etc */
2496 st->rx_compressed = tswap32(st->rx_compressed);
2497 st->tx_compressed = tswap32(st->tx_compressed);
2498 break;
2499 /* struct rtnl_link_stats64 */
2500 case QEMU_IFLA_STATS64:
2501 st64 = RTA_DATA(rtattr);
2502 st64->rx_packets = tswap64(st64->rx_packets);
2503 st64->tx_packets = tswap64(st64->tx_packets);
2504 st64->rx_bytes = tswap64(st64->rx_bytes);
2505 st64->tx_bytes = tswap64(st64->tx_bytes);
2506 st64->rx_errors = tswap64(st64->rx_errors);
2507 st64->tx_errors = tswap64(st64->tx_errors);
2508 st64->rx_dropped = tswap64(st64->rx_dropped);
2509 st64->tx_dropped = tswap64(st64->tx_dropped);
2510 st64->multicast = tswap64(st64->multicast);
2511 st64->collisions = tswap64(st64->collisions);
2512
2513 /* detailed rx_errors: */
2514 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2515 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2516 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2517 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2518 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2519 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2520
2521 /* detailed tx_errors */
2522 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2523 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2524 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2525 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2526 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2527
2528 /* for cslip etc */
2529 st64->rx_compressed = tswap64(st64->rx_compressed);
2530 st64->tx_compressed = tswap64(st64->tx_compressed);
2531 break;
2532 /* struct rtnl_link_ifmap */
2533 case QEMU_IFLA_MAP:
2534 map = RTA_DATA(rtattr);
2535 map->mem_start = tswap64(map->mem_start);
2536 map->mem_end = tswap64(map->mem_end);
2537 map->base_addr = tswap64(map->base_addr);
2538 map->irq = tswap16(map->irq);
2539 break;
2540 /* nested */
2541 case QEMU_IFLA_LINKINFO:
2542 memset(&li_context, 0, sizeof(li_context));
2543 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2544 &li_context,
2545 host_to_target_data_linkinfo_nlattr);
2546 case QEMU_IFLA_AF_SPEC:
2547 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2548 NULL,
2549 host_to_target_data_spec_nlattr);
2550 default:
2551 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2552 break;
2553 }
2554 return 0;
2555 }
2556
2557 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2558 {
2559 uint32_t *u32;
2560 struct ifa_cacheinfo *ci;
2561
2562 switch (rtattr->rta_type) {
2563 /* binary: depends on family type */
2564 case IFA_ADDRESS:
2565 case IFA_LOCAL:
2566 break;
2567 /* string */
2568 case IFA_LABEL:
2569 break;
2570 /* u32 */
2571 case IFA_FLAGS:
2572 case IFA_BROADCAST:
2573 u32 = RTA_DATA(rtattr);
2574 *u32 = tswap32(*u32);
2575 break;
2576 /* struct ifa_cacheinfo */
2577 case IFA_CACHEINFO:
2578 ci = RTA_DATA(rtattr);
2579 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2580 ci->ifa_valid = tswap32(ci->ifa_valid);
2581 ci->cstamp = tswap32(ci->cstamp);
2582 ci->tstamp = tswap32(ci->tstamp);
2583 break;
2584 default:
2585 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2586 break;
2587 }
2588 return 0;
2589 }
2590
2591 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2592 {
2593 uint32_t *u32;
2594 switch (rtattr->rta_type) {
2595 /* binary: depends on family type */
2596 case RTA_GATEWAY:
2597 case RTA_DST:
2598 case RTA_PREFSRC:
2599 break;
2600 /* u32 */
2601 case RTA_PRIORITY:
2602 case RTA_TABLE:
2603 case RTA_OIF:
2604 u32 = RTA_DATA(rtattr);
2605 *u32 = tswap32(*u32);
2606 break;
2607 default:
2608 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2609 break;
2610 }
2611 return 0;
2612 }
2613
2614 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2615 uint32_t rtattr_len)
2616 {
2617 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2618 host_to_target_data_link_rtattr);
2619 }
2620
2621 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2622 uint32_t rtattr_len)
2623 {
2624 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2625 host_to_target_data_addr_rtattr);
2626 }
2627
2628 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2629 uint32_t rtattr_len)
2630 {
2631 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2632 host_to_target_data_route_rtattr);
2633 }
2634
2635 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2636 {
2637 uint32_t nlmsg_len;
2638 struct ifinfomsg *ifi;
2639 struct ifaddrmsg *ifa;
2640 struct rtmsg *rtm;
2641
2642 nlmsg_len = nlh->nlmsg_len;
2643 switch (nlh->nlmsg_type) {
2644 case RTM_NEWLINK:
2645 case RTM_DELLINK:
2646 case RTM_GETLINK:
2647 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2648 ifi = NLMSG_DATA(nlh);
2649 ifi->ifi_type = tswap16(ifi->ifi_type);
2650 ifi->ifi_index = tswap32(ifi->ifi_index);
2651 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2652 ifi->ifi_change = tswap32(ifi->ifi_change);
2653 host_to_target_link_rtattr(IFLA_RTA(ifi),
2654 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2655 }
2656 break;
2657 case RTM_NEWADDR:
2658 case RTM_DELADDR:
2659 case RTM_GETADDR:
2660 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2661 ifa = NLMSG_DATA(nlh);
2662 ifa->ifa_index = tswap32(ifa->ifa_index);
2663 host_to_target_addr_rtattr(IFA_RTA(ifa),
2664 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2665 }
2666 break;
2667 case RTM_NEWROUTE:
2668 case RTM_DELROUTE:
2669 case RTM_GETROUTE:
2670 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2671 rtm = NLMSG_DATA(nlh);
2672 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2673 host_to_target_route_rtattr(RTM_RTA(rtm),
2674 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2675 }
2676 break;
2677 default:
2678 return -TARGET_EINVAL;
2679 }
2680 return 0;
2681 }
2682
2683 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2684 size_t len)
2685 {
2686 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2687 }
2688
2689 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2690 size_t len,
2691 abi_long (*target_to_host_rtattr)
2692 (struct rtattr *))
2693 {
2694 abi_long ret;
2695
2696 while (len >= sizeof(struct rtattr)) {
2697 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2698 tswap16(rtattr->rta_len) > len) {
2699 break;
2700 }
2701 rtattr->rta_len = tswap16(rtattr->rta_len);
2702 rtattr->rta_type = tswap16(rtattr->rta_type);
2703 ret = target_to_host_rtattr(rtattr);
2704 if (ret < 0) {
2705 return ret;
2706 }
2707 len -= RTA_ALIGN(rtattr->rta_len);
2708 rtattr = (struct rtattr *)(((char *)rtattr) +
2709 RTA_ALIGN(rtattr->rta_len));
2710 }
2711 return 0;
2712 }
2713
2714 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2715 {
2716 switch (rtattr->rta_type) {
2717 default:
2718 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2719 break;
2720 }
2721 return 0;
2722 }
2723
2724 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2725 {
2726 switch (rtattr->rta_type) {
2727 /* binary: depends on family type */
2728 case IFA_LOCAL:
2729 case IFA_ADDRESS:
2730 break;
2731 default:
2732 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2733 break;
2734 }
2735 return 0;
2736 }
2737
2738 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2739 {
2740 uint32_t *u32;
2741 switch (rtattr->rta_type) {
2742 /* binary: depends on family type */
2743 case RTA_DST:
2744 case RTA_SRC:
2745 case RTA_GATEWAY:
2746 break;
2747 /* u32 */
2748 case RTA_PRIORITY:
2749 case RTA_OIF:
2750 u32 = RTA_DATA(rtattr);
2751 *u32 = tswap32(*u32);
2752 break;
2753 default:
2754 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2755 break;
2756 }
2757 return 0;
2758 }
2759
2760 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2761 uint32_t rtattr_len)
2762 {
2763 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2764 target_to_host_data_link_rtattr);
2765 }
2766
2767 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2768 uint32_t rtattr_len)
2769 {
2770 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2771 target_to_host_data_addr_rtattr);
2772 }
2773
2774 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2775 uint32_t rtattr_len)
2776 {
2777 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2778 target_to_host_data_route_rtattr);
2779 }
2780
2781 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2782 {
2783 struct ifinfomsg *ifi;
2784 struct ifaddrmsg *ifa;
2785 struct rtmsg *rtm;
2786
2787 switch (nlh->nlmsg_type) {
2788 case RTM_GETLINK:
2789 break;
2790 case RTM_NEWLINK:
2791 case RTM_DELLINK:
2792 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2793 ifi = NLMSG_DATA(nlh);
2794 ifi->ifi_type = tswap16(ifi->ifi_type);
2795 ifi->ifi_index = tswap32(ifi->ifi_index);
2796 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2797 ifi->ifi_change = tswap32(ifi->ifi_change);
2798 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2799 NLMSG_LENGTH(sizeof(*ifi)));
2800 }
2801 break;
2802 case RTM_GETADDR:
2803 case RTM_NEWADDR:
2804 case RTM_DELADDR:
2805 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2806 ifa = NLMSG_DATA(nlh);
2807 ifa->ifa_index = tswap32(ifa->ifa_index);
2808 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2809 NLMSG_LENGTH(sizeof(*ifa)));
2810 }
2811 break;
2812 case RTM_GETROUTE:
2813 break;
2814 case RTM_NEWROUTE:
2815 case RTM_DELROUTE:
2816 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2817 rtm = NLMSG_DATA(nlh);
2818 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2819 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2820 NLMSG_LENGTH(sizeof(*rtm)));
2821 }
2822 break;
2823 default:
2824 return -TARGET_EOPNOTSUPP;
2825 }
2826 return 0;
2827 }
2828
2829 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2830 {
2831 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2832 }
2833 #endif /* CONFIG_RTNETLINK */
2834
2835 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2836 {
2837 switch (nlh->nlmsg_type) {
2838 default:
2839 gemu_log("Unknown host audit message type %d\n",
2840 nlh->nlmsg_type);
2841 return -TARGET_EINVAL;
2842 }
2843 return 0;
2844 }
2845
2846 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2847 size_t len)
2848 {
2849 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2850 }
2851
2852 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2853 {
2854 switch (nlh->nlmsg_type) {
2855 case AUDIT_USER:
2856 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2857 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2858 break;
2859 default:
2860 gemu_log("Unknown target audit message type %d\n",
2861 nlh->nlmsg_type);
2862 return -TARGET_EINVAL;
2863 }
2864
2865 return 0;
2866 }
2867
2868 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2869 {
2870 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2871 }
2872
2873 /* do_setsockopt() Must return target values and target errnos. */
2874 static abi_long do_setsockopt(int sockfd, int level, int optname,
2875 abi_ulong optval_addr, socklen_t optlen)
2876 {
2877 abi_long ret;
2878 int val;
2879 struct ip_mreqn *ip_mreq;
2880 struct ip_mreq_source *ip_mreq_source;
2881
2882 switch(level) {
2883 case SOL_TCP:
2884 /* TCP options all take an 'int' value. */
2885 if (optlen < sizeof(uint32_t))
2886 return -TARGET_EINVAL;
2887
2888 if (get_user_u32(val, optval_addr))
2889 return -TARGET_EFAULT;
2890 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2891 break;
2892 case SOL_IP:
2893 switch(optname) {
2894 case IP_TOS:
2895 case IP_TTL:
2896 case IP_HDRINCL:
2897 case IP_ROUTER_ALERT:
2898 case IP_RECVOPTS:
2899 case IP_RETOPTS:
2900 case IP_PKTINFO:
2901 case IP_MTU_DISCOVER:
2902 case IP_RECVERR:
2903 case IP_RECVTTL:
2904 case IP_RECVTOS:
2905 #ifdef IP_FREEBIND
2906 case IP_FREEBIND:
2907 #endif
2908 case IP_MULTICAST_TTL:
2909 case IP_MULTICAST_LOOP:
2910 val = 0;
2911 if (optlen >= sizeof(uint32_t)) {
2912 if (get_user_u32(val, optval_addr))
2913 return -TARGET_EFAULT;
2914 } else if (optlen >= 1) {
2915 if (get_user_u8(val, optval_addr))
2916 return -TARGET_EFAULT;
2917 }
2918 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2919 break;
2920 case IP_ADD_MEMBERSHIP:
2921 case IP_DROP_MEMBERSHIP:
2922 if (optlen < sizeof (struct target_ip_mreq) ||
2923 optlen > sizeof (struct target_ip_mreqn))
2924 return -TARGET_EINVAL;
2925
2926 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2927 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2928 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2929 break;
2930
2931 case IP_BLOCK_SOURCE:
2932 case IP_UNBLOCK_SOURCE:
2933 case IP_ADD_SOURCE_MEMBERSHIP:
2934 case IP_DROP_SOURCE_MEMBERSHIP:
2935 if (optlen != sizeof (struct target_ip_mreq_source))
2936 return -TARGET_EINVAL;
2937
2938 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2939 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2940 unlock_user (ip_mreq_source, optval_addr, 0);
2941 break;
2942
2943 default:
2944 goto unimplemented;
2945 }
2946 break;
2947 case SOL_IPV6:
2948 switch (optname) {
2949 case IPV6_MTU_DISCOVER:
2950 case IPV6_MTU:
2951 case IPV6_V6ONLY:
2952 case IPV6_RECVPKTINFO:
2953 case IPV6_UNICAST_HOPS:
2954 case IPV6_RECVERR:
2955 case IPV6_RECVHOPLIMIT:
2956 case IPV6_2292HOPLIMIT:
2957 case IPV6_CHECKSUM:
2958 val = 0;
2959 if (optlen < sizeof(uint32_t)) {
2960 return -TARGET_EINVAL;
2961 }
2962 if (get_user_u32(val, optval_addr)) {
2963 return -TARGET_EFAULT;
2964 }
2965 ret = get_errno(setsockopt(sockfd, level, optname,
2966 &val, sizeof(val)));
2967 break;
2968 case IPV6_PKTINFO:
2969 {
2970 struct in6_pktinfo pki;
2971
2972 if (optlen < sizeof(pki)) {
2973 return -TARGET_EINVAL;
2974 }
2975
2976 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2977 return -TARGET_EFAULT;
2978 }
2979
2980 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2981
2982 ret = get_errno(setsockopt(sockfd, level, optname,
2983 &pki, sizeof(pki)));
2984 break;
2985 }
2986 default:
2987 goto unimplemented;
2988 }
2989 break;
2990 case SOL_ICMPV6:
2991 switch (optname) {
2992 case ICMPV6_FILTER:
2993 {
2994 struct icmp6_filter icmp6f;
2995
2996 if (optlen > sizeof(icmp6f)) {
2997 optlen = sizeof(icmp6f);
2998 }
2999
3000 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
3001 return -TARGET_EFAULT;
3002 }
3003
3004 for (val = 0; val < 8; val++) {
3005 icmp6f.data[val] = tswap32(icmp6f.data[val]);
3006 }
3007
3008 ret = get_errno(setsockopt(sockfd, level, optname,
3009 &icmp6f, optlen));
3010 break;
3011 }
3012 default:
3013 goto unimplemented;
3014 }
3015 break;
3016 case SOL_RAW:
3017 switch (optname) {
3018 case ICMP_FILTER:
3019 case IPV6_CHECKSUM:
3020 /* those take an u32 value */
3021 if (optlen < sizeof(uint32_t)) {
3022 return -TARGET_EINVAL;
3023 }
3024
3025 if (get_user_u32(val, optval_addr)) {
3026 return -TARGET_EFAULT;
3027 }
3028 ret = get_errno(setsockopt(sockfd, level, optname,
3029 &val, sizeof(val)));
3030 break;
3031
3032 default:
3033 goto unimplemented;
3034 }
3035 break;
3036 case TARGET_SOL_SOCKET:
3037 switch (optname) {
3038 case TARGET_SO_RCVTIMEO:
3039 {
3040 struct timeval tv;
3041
3042 optname = SO_RCVTIMEO;
3043
3044 set_timeout:
3045 if (optlen != sizeof(struct target_timeval)) {
3046 return -TARGET_EINVAL;
3047 }
3048
3049 if (copy_from_user_timeval(&tv, optval_addr)) {
3050 return -TARGET_EFAULT;
3051 }
3052
3053 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3054 &tv, sizeof(tv)));
3055 return ret;
3056 }
3057 case TARGET_SO_SNDTIMEO:
3058 optname = SO_SNDTIMEO;
3059 goto set_timeout;
3060 case TARGET_SO_ATTACH_FILTER:
3061 {
3062 struct target_sock_fprog *tfprog;
3063 struct target_sock_filter *tfilter;
3064 struct sock_fprog fprog;
3065 struct sock_filter *filter;
3066 int i;
3067
3068 if (optlen != sizeof(*tfprog)) {
3069 return -TARGET_EINVAL;
3070 }
3071 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3072 return -TARGET_EFAULT;
3073 }
3074 if (!lock_user_struct(VERIFY_READ, tfilter,
3075 tswapal(tfprog->filter), 0)) {
3076 unlock_user_struct(tfprog, optval_addr, 1);
3077 return -TARGET_EFAULT;
3078 }
3079
3080 fprog.len = tswap16(tfprog->len);
3081 filter = g_try_new(struct sock_filter, fprog.len);
3082 if (filter == NULL) {
3083 unlock_user_struct(tfilter, tfprog->filter, 1);
3084 unlock_user_struct(tfprog, optval_addr, 1);
3085 return -TARGET_ENOMEM;
3086 }
3087 for (i = 0; i < fprog.len; i++) {
3088 filter[i].code = tswap16(tfilter[i].code);
3089 filter[i].jt = tfilter[i].jt;
3090 filter[i].jf = tfilter[i].jf;
3091 filter[i].k = tswap32(tfilter[i].k);
3092 }
3093 fprog.filter = filter;
3094
3095 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3096 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3097 g_free(filter);
3098
3099 unlock_user_struct(tfilter, tfprog->filter, 1);
3100 unlock_user_struct(tfprog, optval_addr, 1);
3101 return ret;
3102 }
3103 case TARGET_SO_BINDTODEVICE:
3104 {
3105 char *dev_ifname, *addr_ifname;
3106
3107 if (optlen > IFNAMSIZ - 1) {
3108 optlen = IFNAMSIZ - 1;
3109 }
3110 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3111 if (!dev_ifname) {
3112 return -TARGET_EFAULT;
3113 }
3114 optname = SO_BINDTODEVICE;
3115 addr_ifname = alloca(IFNAMSIZ);
3116 memcpy(addr_ifname, dev_ifname, optlen);
3117 addr_ifname[optlen] = 0;
3118 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3119 addr_ifname, optlen));
3120 unlock_user (dev_ifname, optval_addr, 0);
3121 return ret;
3122 }
3123 /* Options with 'int' argument. */
3124 case TARGET_SO_DEBUG:
3125 optname = SO_DEBUG;
3126 break;
3127 case TARGET_SO_REUSEADDR:
3128 optname = SO_REUSEADDR;
3129 break;
3130 case TARGET_SO_TYPE:
3131 optname = SO_TYPE;
3132 break;
3133 case TARGET_SO_ERROR:
3134 optname = SO_ERROR;
3135 break;
3136 case TARGET_SO_DONTROUTE:
3137 optname = SO_DONTROUTE;
3138 break;
3139 case TARGET_SO_BROADCAST:
3140 optname = SO_BROADCAST;
3141 break;
3142 case TARGET_SO_SNDBUF:
3143 optname = SO_SNDBUF;
3144 break;
3145 case TARGET_SO_SNDBUFFORCE:
3146 optname = SO_SNDBUFFORCE;
3147 break;
3148 case TARGET_SO_RCVBUF:
3149 optname = SO_RCVBUF;
3150 break;
3151 case TARGET_SO_RCVBUFFORCE:
3152 optname = SO_RCVBUFFORCE;
3153 break;
3154 case TARGET_SO_KEEPALIVE:
3155 optname = SO_KEEPALIVE;
3156 break;
3157 case TARGET_SO_OOBINLINE:
3158 optname = SO_OOBINLINE;
3159 break;
3160 case TARGET_SO_NO_CHECK:
3161 optname = SO_NO_CHECK;
3162 break;
3163 case TARGET_SO_PRIORITY:
3164 optname = SO_PRIORITY;
3165 break;
3166 #ifdef SO_BSDCOMPAT
3167 case TARGET_SO_BSDCOMPAT:
3168 optname = SO_BSDCOMPAT;
3169 break;
3170 #endif
3171 case TARGET_SO_PASSCRED:
3172 optname = SO_PASSCRED;
3173 break;
3174 case TARGET_SO_PASSSEC:
3175 optname = SO_PASSSEC;
3176 break;
3177 case TARGET_SO_TIMESTAMP:
3178 optname = SO_TIMESTAMP;
3179 break;
3180 case TARGET_SO_RCVLOWAT:
3181 optname = SO_RCVLOWAT;
3182 break;
3183 default:
3184 goto unimplemented;
3185 }
3186 if (optlen < sizeof(uint32_t))
3187 return -TARGET_EINVAL;
3188
3189 if (get_user_u32(val, optval_addr))
3190 return -TARGET_EFAULT;
3191 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3192 break;
3193 default:
3194 unimplemented:
3195 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3196 ret = -TARGET_ENOPROTOOPT;
3197 }
3198 return ret;
3199 }
3200
3201 /* do_getsockopt() Must return target values and target errnos. */
3202 static abi_long do_getsockopt(int sockfd, int level, int optname,
3203 abi_ulong optval_addr, abi_ulong optlen)
3204 {
3205 abi_long ret;
3206 int len, val;
3207 socklen_t lv;
3208
3209 switch(level) {
3210 case TARGET_SOL_SOCKET:
3211 level = SOL_SOCKET;
3212 switch (optname) {
3213 /* These don't just return a single integer */
3214 case TARGET_SO_LINGER:
3215 case TARGET_SO_RCVTIMEO:
3216 case TARGET_SO_SNDTIMEO:
3217 case TARGET_SO_PEERNAME:
3218 goto unimplemented;
3219 case TARGET_SO_PEERCRED: {
3220 struct ucred cr;
3221 socklen_t crlen;
3222 struct target_ucred *tcr;
3223
3224 if (get_user_u32(len, optlen)) {
3225 return -TARGET_EFAULT;
3226 }
3227 if (len < 0) {
3228 return -TARGET_EINVAL;
3229 }
3230
3231 crlen = sizeof(cr);
3232 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3233 &cr, &crlen));
3234 if (ret < 0) {
3235 return ret;
3236 }
3237 if (len > crlen) {
3238 len = crlen;
3239 }
3240 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3241 return -TARGET_EFAULT;
3242 }
3243 __put_user(cr.pid, &tcr->pid);
3244 __put_user(cr.uid, &tcr->uid);
3245 __put_user(cr.gid, &tcr->gid);
3246 unlock_user_struct(tcr, optval_addr, 1);
3247 if (put_user_u32(len, optlen)) {
3248 return -TARGET_EFAULT;
3249 }
3250 break;
3251 }
3252 /* Options with 'int' argument. */
3253 case TARGET_SO_DEBUG:
3254 optname = SO_DEBUG;
3255 goto int_case;
3256 case TARGET_SO_REUSEADDR:
3257 optname = SO_REUSEADDR;
3258 goto int_case;
3259 case TARGET_SO_TYPE:
3260 optname = SO_TYPE;
3261 goto int_case;
3262 case TARGET_SO_ERROR:
3263 optname = SO_ERROR;
3264 goto int_case;
3265 case TARGET_SO_DONTROUTE:
3266 optname = SO_DONTROUTE;
3267 goto int_case;
3268 case TARGET_SO_BROADCAST:
3269 optname = SO_BROADCAST;
3270 goto int_case;
3271 case TARGET_SO_SNDBUF:
3272 optname = SO_SNDBUF;
3273 goto int_case;
3274 case TARGET_SO_RCVBUF:
3275 optname = SO_RCVBUF;
3276 goto int_case;
3277 case TARGET_SO_KEEPALIVE:
3278 optname = SO_KEEPALIVE;
3279 goto int_case;
3280 case TARGET_SO_OOBINLINE:
3281 optname = SO_OOBINLINE;
3282 goto int_case;
3283 case TARGET_SO_NO_CHECK:
3284 optname = SO_NO_CHECK;
3285 goto int_case;
3286 case TARGET_SO_PRIORITY:
3287 optname = SO_PRIORITY;
3288 goto int_case;
3289 #ifdef SO_BSDCOMPAT
3290 case TARGET_SO_BSDCOMPAT:
3291 optname = SO_BSDCOMPAT;
3292 goto int_case;
3293 #endif
3294 case TARGET_SO_PASSCRED:
3295 optname = SO_PASSCRED;
3296 goto int_case;
3297 case TARGET_SO_TIMESTAMP:
3298 optname = SO_TIMESTAMP;
3299 goto int_case;
3300 case TARGET_SO_RCVLOWAT:
3301 optname = SO_RCVLOWAT;
3302 goto int_case;
3303 case TARGET_SO_ACCEPTCONN:
3304 optname = SO_ACCEPTCONN;
3305 goto int_case;
3306 default:
3307 goto int_case;
3308 }
3309 break;
3310 case SOL_TCP:
3311 /* TCP options all take an 'int' value. */
3312 int_case:
3313 if (get_user_u32(len, optlen))
3314 return -TARGET_EFAULT;
3315 if (len < 0)
3316 return -TARGET_EINVAL;
3317 lv = sizeof(lv);
3318 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3319 if (ret < 0)
3320 return ret;
3321 if (optname == SO_TYPE) {
3322 val = host_to_target_sock_type(val);
3323 }
3324 if (len > lv)
3325 len = lv;
3326 if (len == 4) {
3327 if (put_user_u32(val, optval_addr))
3328 return -TARGET_EFAULT;
3329 } else {
3330 if (put_user_u8(val, optval_addr))
3331 return -TARGET_EFAULT;
3332 }
3333 if (put_user_u32(len, optlen))
3334 return -TARGET_EFAULT;
3335 break;
3336 case SOL_IP:
3337 switch(optname) {
3338 case IP_TOS:
3339 case IP_TTL:
3340 case IP_HDRINCL:
3341 case IP_ROUTER_ALERT:
3342 case IP_RECVOPTS:
3343 case IP_RETOPTS:
3344 case IP_PKTINFO:
3345 case IP_MTU_DISCOVER:
3346 case IP_RECVERR:
3347 case IP_RECVTOS:
3348 #ifdef IP_FREEBIND
3349 case IP_FREEBIND:
3350 #endif
3351 case IP_MULTICAST_TTL:
3352 case IP_MULTICAST_LOOP:
3353 if (get_user_u32(len, optlen))
3354 return -TARGET_EFAULT;
3355 if (len < 0)
3356 return -TARGET_EINVAL;
3357 lv = sizeof(lv);
3358 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3359 if (ret < 0)
3360 return ret;
3361 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3362 len = 1;
3363 if (put_user_u32(len, optlen)
3364 || put_user_u8(val, optval_addr))
3365 return -TARGET_EFAULT;
3366 } else {
3367 if (len > sizeof(int))
3368 len = sizeof(int);
3369 if (put_user_u32(len, optlen)
3370 || put_user_u32(val, optval_addr))
3371 return -TARGET_EFAULT;
3372 }
3373 break;
3374 default:
3375 ret = -TARGET_ENOPROTOOPT;
3376 break;
3377 }
3378 break;
3379 default:
3380 unimplemented:
3381 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3382 level, optname);
3383 ret = -TARGET_EOPNOTSUPP;
3384 break;
3385 }
3386 return ret;
3387 }
3388
3389 /* Convert target low/high pair representing file offset into the host
3390 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3391 * as the kernel doesn't handle them either.
3392 */
3393 static void target_to_host_low_high(abi_ulong tlow,
3394 abi_ulong thigh,
3395 unsigned long *hlow,
3396 unsigned long *hhigh)
3397 {
3398 uint64_t off = tlow |
3399 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3400 TARGET_LONG_BITS / 2;
3401
3402 *hlow = off;
3403 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3404 }
3405
3406 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3407 abi_ulong count, int copy)
3408 {
3409 struct target_iovec *target_vec;
3410 struct iovec *vec;
3411 abi_ulong total_len, max_len;
3412 int i;
3413 int err = 0;
3414 bool bad_address = false;
3415
3416 if (count == 0) {
3417 errno = 0;
3418 return NULL;
3419 }
3420 if (count > IOV_MAX) {
3421 errno = EINVAL;
3422 return NULL;
3423 }
3424
3425 vec = g_try_new0(struct iovec, count);
3426 if (vec == NULL) {
3427 errno = ENOMEM;
3428 return NULL;
3429 }
3430
3431 target_vec = lock_user(VERIFY_READ, target_addr,
3432 count * sizeof(struct target_iovec), 1);
3433 if (target_vec == NULL) {
3434 err = EFAULT;
3435 goto fail2;
3436 }
3437
3438 /* ??? If host page size > target page size, this will result in a
3439 value larger than what we can actually support. */
3440 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3441 total_len = 0;
3442
3443 for (i = 0; i < count; i++) {
3444 abi_ulong base = tswapal(target_vec[i].iov_base);
3445 abi_long len = tswapal(target_vec[i].iov_len);
3446
3447 if (len < 0) {
3448 err = EINVAL;
3449 goto fail;
3450 } else if (len == 0) {
3451 /* Zero length pointer is ignored. */
3452 vec[i].iov_base = 0;
3453 } else {
3454 vec[i].iov_base = lock_user(type, base, len, copy);
3455 /* If the first buffer pointer is bad, this is a fault. But
3456 * subsequent bad buffers will result in a partial write; this
3457 * is realized by filling the vector with null pointers and
3458 * zero lengths. */
3459 if (!vec[i].iov_base) {
3460 if (i == 0) {
3461 err = EFAULT;
3462 goto fail;
3463 } else {
3464 bad_address = true;
3465 }
3466 }
3467 if (bad_address) {
3468 len = 0;
3469 }
3470 if (len > max_len - total_len) {
3471 len = max_len - total_len;
3472 }
3473 }
3474 vec[i].iov_len = len;
3475 total_len += len;
3476 }
3477
3478 unlock_user(target_vec, target_addr, 0);
3479 return vec;
3480
3481 fail:
3482 while (--i >= 0) {
3483 if (tswapal(target_vec[i].iov_len) > 0) {
3484 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3485 }
3486 }
3487 unlock_user(target_vec, target_addr, 0);
3488 fail2:
3489 g_free(vec);
3490 errno = err;
3491 return NULL;
3492 }
3493
3494 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3495 abi_ulong count, int copy)
3496 {
3497 struct target_iovec *target_vec;
3498 int i;
3499
3500 target_vec = lock_user(VERIFY_READ, target_addr,
3501 count * sizeof(struct target_iovec), 1);
3502 if (target_vec) {
3503 for (i = 0; i < count; i++) {
3504 abi_ulong base = tswapal(target_vec[i].iov_base);
3505 abi_long len = tswapal(target_vec[i].iov_len);
3506 if (len < 0) {
3507 break;
3508 }
3509 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3510 }
3511 unlock_user(target_vec, target_addr, 0);
3512 }
3513
3514 g_free(vec);
3515 }
3516
3517 static inline int target_to_host_sock_type(int *type)
3518 {
3519 int host_type = 0;
3520 int target_type = *type;
3521
3522 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3523 case TARGET_SOCK_DGRAM:
3524 host_type = SOCK_DGRAM;
3525 break;
3526 case TARGET_SOCK_STREAM:
3527 host_type = SOCK_STREAM;
3528 break;
3529 default:
3530 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3531 break;
3532 }
3533 if (target_type & TARGET_SOCK_CLOEXEC) {
3534 #if defined(SOCK_CLOEXEC)
3535 host_type |= SOCK_CLOEXEC;
3536 #else
3537 return -TARGET_EINVAL;
3538 #endif
3539 }
3540 if (target_type & TARGET_SOCK_NONBLOCK) {
3541 #if defined(SOCK_NONBLOCK)
3542 host_type |= SOCK_NONBLOCK;
3543 #elif !defined(O_NONBLOCK)
3544 return -TARGET_EINVAL;
3545 #endif
3546 }
3547 *type = host_type;
3548 return 0;
3549 }
3550
3551 /* Try to emulate socket type flags after socket creation. */
3552 static int sock_flags_fixup(int fd, int target_type)
3553 {
3554 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3555 if (target_type & TARGET_SOCK_NONBLOCK) {
3556 int flags = fcntl(fd, F_GETFL);
3557 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3558 close(fd);
3559 return -TARGET_EINVAL;
3560 }
3561 }
3562 #endif
3563 return fd;
3564 }
3565
3566 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3567 abi_ulong target_addr,
3568 socklen_t len)
3569 {
3570 struct sockaddr *addr = host_addr;
3571 struct target_sockaddr *target_saddr;
3572
3573 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3574 if (!target_saddr) {
3575 return -TARGET_EFAULT;
3576 }
3577
3578 memcpy(addr, target_saddr, len);
3579 addr->sa_family = tswap16(target_saddr->sa_family);
3580 /* spkt_protocol is big-endian */
3581
3582 unlock_user(target_saddr, target_addr, 0);
3583 return 0;
3584 }
3585
3586 static TargetFdTrans target_packet_trans = {
3587 .target_to_host_addr = packet_target_to_host_sockaddr,
3588 };
3589
3590 #ifdef CONFIG_RTNETLINK
3591 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3592 {
3593 abi_long ret;
3594
3595 ret = target_to_host_nlmsg_route(buf, len);
3596 if (ret < 0) {
3597 return ret;
3598 }
3599
3600 return len;
3601 }
3602
3603 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3604 {
3605 abi_long ret;
3606
3607 ret = host_to_target_nlmsg_route(buf, len);
3608 if (ret < 0) {
3609 return ret;
3610 }
3611
3612 return len;
3613 }
3614
3615 static TargetFdTrans target_netlink_route_trans = {
3616 .target_to_host_data = netlink_route_target_to_host,
3617 .host_to_target_data = netlink_route_host_to_target,
3618 };
3619 #endif /* CONFIG_RTNETLINK */
3620
3621 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3622 {
3623 abi_long ret;
3624
3625 ret = target_to_host_nlmsg_audit(buf, len);
3626 if (ret < 0) {
3627 return ret;
3628 }
3629
3630 return len;
3631 }
3632
3633 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3634 {
3635 abi_long ret;
3636
3637 ret = host_to_target_nlmsg_audit(buf, len);
3638 if (ret < 0) {
3639 return ret;
3640 }
3641
3642 return len;
3643 }
3644
3645 static TargetFdTrans target_netlink_audit_trans = {
3646 .target_to_host_data = netlink_audit_target_to_host,
3647 .host_to_target_data = netlink_audit_host_to_target,
3648 };
3649
3650 /* do_socket() Must return target values and target errnos. */
3651 static abi_long do_socket(int domain, int type, int protocol)
3652 {
3653 int target_type = type;
3654 int ret;
3655
3656 ret = target_to_host_sock_type(&type);
3657 if (ret) {
3658 return ret;
3659 }
3660
3661 if (domain == PF_NETLINK && !(
3662 #ifdef CONFIG_RTNETLINK
3663 protocol == NETLINK_ROUTE ||
3664 #endif
3665 protocol == NETLINK_KOBJECT_UEVENT ||
3666 protocol == NETLINK_AUDIT)) {
3667 return -EPFNOSUPPORT;
3668 }
3669
3670 if (domain == AF_PACKET ||
3671 (domain == AF_INET && type == SOCK_PACKET)) {
3672 protocol = tswap16(protocol);
3673 }
3674
3675 ret = get_errno(socket(domain, type, protocol));
3676 if (ret >= 0) {
3677 ret = sock_flags_fixup(ret, target_type);
3678 if (type == SOCK_PACKET) {
3679 /* Manage an obsolete case :
3680 * if socket type is SOCK_PACKET, bind by name
3681 */
3682 fd_trans_register(ret, &target_packet_trans);
3683 } else if (domain == PF_NETLINK) {
3684 switch (protocol) {
3685 #ifdef CONFIG_RTNETLINK
3686 case NETLINK_ROUTE:
3687 fd_trans_register(ret, &target_netlink_route_trans);
3688 break;
3689 #endif
3690 case NETLINK_KOBJECT_UEVENT:
3691 /* nothing to do: messages are strings */
3692 break;
3693 case NETLINK_AUDIT:
3694 fd_trans_register(ret, &target_netlink_audit_trans);
3695 break;
3696 default:
3697 g_assert_not_reached();
3698 }
3699 }
3700 }
3701 return ret;
3702 }
3703
3704 /* do_bind() Must return target values and target errnos. */
3705 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3706 socklen_t addrlen)
3707 {
3708 void *addr;
3709 abi_long ret;
3710
3711 if ((int)addrlen < 0) {
3712 return -TARGET_EINVAL;
3713 }
3714
3715 addr = alloca(addrlen+1);
3716
3717 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3718 if (ret)
3719 return ret;
3720
3721 return get_errno(bind(sockfd, addr, addrlen));
3722 }
3723
3724 /* do_connect() Must return target values and target errnos. */
3725 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3726 socklen_t addrlen)
3727 {
3728 void *addr;
3729 abi_long ret;
3730
3731 if ((int)addrlen < 0) {
3732 return -TARGET_EINVAL;
3733 }
3734
3735 addr = alloca(addrlen+1);
3736
3737 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3738 if (ret)
3739 return ret;
3740
3741 return get_errno(safe_connect(sockfd, addr, addrlen));
3742 }
3743
3744 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3745 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3746 int flags, int send)
3747 {
3748 abi_long ret, len;
3749 struct msghdr msg;
3750 abi_ulong count;
3751 struct iovec *vec;
3752 abi_ulong target_vec;
3753
3754 if (msgp->msg_name) {
3755 msg.msg_namelen = tswap32(msgp->msg_namelen);
3756 msg.msg_name = alloca(msg.msg_namelen+1);
3757 ret = target_to_host_sockaddr(fd, msg.msg_name,
3758 tswapal(msgp->msg_name),
3759 msg.msg_namelen);
3760 if (ret == -TARGET_EFAULT) {
3761 /* For connected sockets msg_name and msg_namelen must
3762 * be ignored, so returning EFAULT immediately is wrong.
3763 * Instead, pass a bad msg_name to the host kernel, and
3764 * let it decide whether to return EFAULT or not.
3765 */
3766 msg.msg_name = (void *)-1;
3767 } else if (ret) {
3768 goto out2;
3769 }
3770 } else {
3771 msg.msg_name = NULL;
3772 msg.msg_namelen = 0;
3773 }
3774 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3775 msg.msg_control = alloca(msg.msg_controllen);
3776 msg.msg_flags = tswap32(msgp->msg_flags);
3777
3778 count = tswapal(msgp->msg_iovlen);
3779 target_vec = tswapal(msgp->msg_iov);
3780
3781 if (count > IOV_MAX) {
3782 /* sendrcvmsg returns a different errno for this condition than
3783 * readv/writev, so we must catch it here before lock_iovec() does.
3784 */
3785 ret = -TARGET_EMSGSIZE;
3786 goto out2;
3787 }
3788
3789 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3790 target_vec, count, send);
3791 if (vec == NULL) {
3792 ret = -host_to_target_errno(errno);
3793 goto out2;
3794 }
3795 msg.msg_iovlen = count;
3796 msg.msg_iov = vec;
3797
3798 if (send) {
3799 if (fd_trans_target_to_host_data(fd)) {
3800 void *host_msg;
3801
3802 host_msg = g_malloc(msg.msg_iov->iov_len);
3803 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3804 ret = fd_trans_target_to_host_data(fd)(host_msg,
3805 msg.msg_iov->iov_len);
3806 if (ret >= 0) {
3807 msg.msg_iov->iov_base = host_msg;
3808 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3809 }
3810 g_free(host_msg);
3811 } else {
3812 ret = target_to_host_cmsg(&msg, msgp);
3813 if (ret == 0) {
3814 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3815 }
3816 }
3817 } else {
3818 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3819 if (!is_error(ret)) {
3820 len = ret;
3821 if (fd_trans_host_to_target_data(fd)) {
3822 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3823 len);
3824 } else {
3825 ret = host_to_target_cmsg(msgp, &msg);
3826 }
3827 if (!is_error(ret)) {
3828 msgp->msg_namelen = tswap32(msg.msg_namelen);
3829 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3830 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3831 msg.msg_name, msg.msg_namelen);
3832 if (ret) {
3833 goto out;
3834 }
3835 }
3836
3837 ret = len;
3838 }
3839 }
3840 }
3841
3842 out:
3843 unlock_iovec(vec, target_vec, count, !send);
3844 out2:
3845 return ret;
3846 }
3847
3848 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3849 int flags, int send)
3850 {
3851 abi_long ret;
3852 struct target_msghdr *msgp;
3853
3854 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3855 msgp,
3856 target_msg,
3857 send ? 1 : 0)) {
3858 return -TARGET_EFAULT;
3859 }
3860 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3861 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3862 return ret;
3863 }
3864
3865 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3866 * so it might not have this *mmsg-specific flag either.
3867 */
3868 #ifndef MSG_WAITFORONE
3869 #define MSG_WAITFORONE 0x10000
3870 #endif
3871
3872 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3873 unsigned int vlen, unsigned int flags,
3874 int send)
3875 {
3876 struct target_mmsghdr *mmsgp;
3877 abi_long ret = 0;
3878 int i;
3879
3880 if (vlen > UIO_MAXIOV) {
3881 vlen = UIO_MAXIOV;
3882 }
3883
3884 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3885 if (!mmsgp) {
3886 return -TARGET_EFAULT;
3887 }
3888
3889 for (i = 0; i < vlen; i++) {
3890 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3891 if (is_error(ret)) {
3892 break;
3893 }
3894 mmsgp[i].msg_len = tswap32(ret);
3895 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3896 if (flags & MSG_WAITFORONE) {
3897 flags |= MSG_DONTWAIT;
3898 }
3899 }
3900
3901 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3902
3903 /* Return number of datagrams sent if we sent any at all;
3904 * otherwise return the error.
3905 */
3906 if (i) {
3907 return i;
3908 }
3909 return ret;
3910 }
3911
3912 /* do_accept4() Must return target values and target errnos. */
3913 static abi_long do_accept4(int fd, abi_ulong target_addr,
3914 abi_ulong target_addrlen_addr, int flags)
3915 {
3916 socklen_t addrlen;
3917 void *addr;
3918 abi_long ret;
3919 int host_flags;
3920
3921 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3922
3923 if (target_addr == 0) {
3924 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3925 }
3926
3927 /* linux returns EINVAL if addrlen pointer is invalid */
3928 if (get_user_u32(addrlen, target_addrlen_addr))
3929 return -TARGET_EINVAL;
3930
3931 if ((int)addrlen < 0) {
3932 return -TARGET_EINVAL;
3933 }
3934
3935 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3936 return -TARGET_EINVAL;
3937
3938 addr = alloca(addrlen);
3939
3940 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3941 if (!is_error(ret)) {
3942 host_to_target_sockaddr(target_addr, addr, addrlen);
3943 if (put_user_u32(addrlen, target_addrlen_addr))
3944 ret = -TARGET_EFAULT;
3945 }
3946 return ret;
3947 }
3948
3949 /* do_getpeername() Must return target values and target errnos. */
3950 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3951 abi_ulong target_addrlen_addr)
3952 {
3953 socklen_t addrlen;
3954 void *addr;
3955 abi_long ret;
3956
3957 if (get_user_u32(addrlen, target_addrlen_addr))
3958 return -TARGET_EFAULT;
3959
3960 if ((int)addrlen < 0) {
3961 return -TARGET_EINVAL;
3962 }
3963
3964 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3965 return -TARGET_EFAULT;
3966
3967 addr = alloca(addrlen);
3968
3969 ret = get_errno(getpeername(fd, addr, &addrlen));
3970 if (!is_error(ret)) {
3971 host_to_target_sockaddr(target_addr, addr, addrlen);
3972 if (put_user_u32(addrlen, target_addrlen_addr))
3973 ret = -TARGET_EFAULT;
3974 }
3975 return ret;
3976 }
3977
3978 /* do_getsockname() Must return target values and target errnos. */
3979 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3980 abi_ulong target_addrlen_addr)
3981 {
3982 socklen_t addrlen;
3983 void *addr;
3984 abi_long ret;
3985
3986 if (get_user_u32(addrlen, target_addrlen_addr))
3987 return -TARGET_EFAULT;
3988
3989 if ((int)addrlen < 0) {
3990 return -TARGET_EINVAL;
3991 }
3992
3993 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3994 return -TARGET_EFAULT;
3995
3996 addr = alloca(addrlen);
3997
3998 ret = get_errno(getsockname(fd, addr, &addrlen));
3999 if (!is_error(ret)) {
4000 host_to_target_sockaddr(target_addr, addr, addrlen);
4001 if (put_user_u32(addrlen, target_addrlen_addr))
4002 ret = -TARGET_EFAULT;
4003 }
4004 return ret;
4005 }
4006
4007 /* do_socketpair() Must return target values and target errnos. */
4008 static abi_long do_socketpair(int domain, int type, int protocol,
4009 abi_ulong target_tab_addr)
4010 {
4011 int tab[2];
4012 abi_long ret;
4013
4014 target_to_host_sock_type(&type);
4015
4016 ret = get_errno(socketpair(domain, type, protocol, tab));
4017 if (!is_error(ret)) {
4018 if (put_user_s32(tab[0], target_tab_addr)
4019 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
4020 ret = -TARGET_EFAULT;
4021 }
4022 return ret;
4023 }
4024
4025 /* do_sendto() Must return target values and target errnos. */
4026 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
4027 abi_ulong target_addr, socklen_t addrlen)
4028 {
4029 void *addr;
4030 void *host_msg;
4031 void *copy_msg = NULL;
4032 abi_long ret;
4033
4034 if ((int)addrlen < 0) {
4035 return -TARGET_EINVAL;
4036 }
4037
4038 host_msg = lock_user(VERIFY_READ, msg, len, 1);
4039 if (!host_msg)
4040 return -TARGET_EFAULT;
4041 if (fd_trans_target_to_host_data(fd)) {
4042 copy_msg = host_msg;
4043 host_msg = g_malloc(len);
4044 memcpy(host_msg, copy_msg, len);
4045 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
4046 if (ret < 0) {
4047 goto fail;
4048 }
4049 }
4050 if (target_addr) {
4051 addr = alloca(addrlen+1);
4052 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
4053 if (ret) {
4054 goto fail;
4055 }
4056 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
4057 } else {
4058 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
4059 }
4060 fail:
4061 if (copy_msg) {
4062 g_free(host_msg);
4063 host_msg = copy_msg;
4064 }
4065 unlock_user(host_msg, msg, 0);
4066 return ret;
4067 }
4068
4069 /* do_recvfrom() Must return target values and target errnos. */
4070 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
4071 abi_ulong target_addr,
4072 abi_ulong target_addrlen)
4073 {
4074 socklen_t addrlen;
4075 void *addr;
4076 void *host_msg;
4077 abi_long ret;
4078
4079 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
4080 if (!host_msg)
4081 return -TARGET_EFAULT;
4082 if (target_addr) {
4083 if (get_user_u32(addrlen, target_addrlen)) {
4084 ret = -TARGET_EFAULT;
4085 goto fail;
4086 }
4087 if ((int)addrlen < 0) {
4088 ret = -TARGET_EINVAL;
4089 goto fail;
4090 }
4091 addr = alloca(addrlen);
4092 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
4093 addr, &addrlen));
4094 } else {
4095 addr = NULL; /* To keep compiler quiet. */
4096 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
4097 }
4098 if (!is_error(ret)) {
4099 if (fd_trans_host_to_target_data(fd)) {
4100 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
4101 }
4102 if (target_addr) {
4103 host_to_target_sockaddr(target_addr, addr, addrlen);
4104 if (put_user_u32(addrlen, target_addrlen)) {
4105 ret = -TARGET_EFAULT;
4106 goto fail;
4107 }
4108 }
4109 unlock_user(host_msg, msg, len);
4110 } else {
4111 fail:
4112 unlock_user(host_msg, msg, 0);
4113 }
4114 return ret;
4115 }
4116
4117 #ifdef TARGET_NR_socketcall
4118 /* do_socketcall() must return target values and target errnos. */
4119 static abi_long do_socketcall(int num, abi_ulong vptr)
4120 {
4121 static const unsigned nargs[] = { /* number of arguments per operation */
4122 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
4123 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
4124 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
4125 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
4126 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
4127 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
4128 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
4129 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
4130 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
4131 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
4132 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
4133 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
4134 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
4135 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4136 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
4137 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
4138 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
4139 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
4140 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
4141 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
4142 };
4143 abi_long a[6]; /* max 6 args */
4144 unsigned i;
4145
4146 /* check the range of the first argument num */
4147 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4148 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
4149 return -TARGET_EINVAL;
4150 }
4151 /* ensure we have space for args */
4152 if (nargs[num] > ARRAY_SIZE(a)) {
4153 return -TARGET_EINVAL;
4154 }
4155 /* collect the arguments in a[] according to nargs[] */
4156 for (i = 0; i < nargs[num]; ++i) {
4157 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
4158 return -TARGET_EFAULT;
4159 }
4160 }
4161 /* now when we have the args, invoke the appropriate underlying function */
4162 switch (num) {
4163 case TARGET_SYS_SOCKET: /* domain, type, protocol */
4164 return do_socket(a[0], a[1], a[2]);
4165 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
4166 return do_bind(a[0], a[1], a[2]);
4167 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
4168 return do_connect(a[0], a[1], a[2]);
4169 case TARGET_SYS_LISTEN: /* sockfd, backlog */
4170 return get_errno(listen(a[0], a[1]));
4171 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
4172 return do_accept4(a[0], a[1], a[2], 0);
4173 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
4174 return do_getsockname(a[0], a[1], a[2]);
4175 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
4176 return do_getpeername(a[0], a[1], a[2]);
4177 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
4178 return do_socketpair(a[0], a[1], a[2], a[3]);
4179 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
4180 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
4181 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
4182 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
4183 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
4184 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
4185 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
4186 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
4187 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
4188 return get_errno(shutdown(a[0], a[1]));
4189 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4190 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
4191 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
4192 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
4193 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
4194 return do_sendrecvmsg(a[0], a[1], a[2], 1);
4195 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
4196 return do_sendrecvmsg(a[0], a[1], a[2], 0);
4197 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
4198 return do_accept4(a[0], a[1], a[2], a[3]);
4199 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
4200 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
4201 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
4202 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
4203 default:
4204 gemu_log("Unsupported socketcall: %d\n", num);
4205 return -TARGET_EINVAL;
4206 }
4207 }
4208 #endif
4209
4210 #define N_SHM_REGIONS 32
4211
4212 static struct shm_region {
4213 abi_ulong start;
4214 abi_ulong size;
4215 bool in_use;
4216 } shm_regions[N_SHM_REGIONS];
4217
4218 #ifndef TARGET_SEMID64_DS
4219 /* asm-generic version of this struct */
4220 struct target_semid64_ds
4221 {
4222 struct target_ipc_perm sem_perm;
4223 abi_ulong sem_otime;
4224 #if TARGET_ABI_BITS == 32
4225 abi_ulong __unused1;
4226 #endif
4227 abi_ulong sem_ctime;
4228 #if TARGET_ABI_BITS == 32
4229 abi_ulong __unused2;
4230 #endif
4231 abi_ulong sem_nsems;
4232 abi_ulong __unused3;
4233 abi_ulong __unused4;
4234 };
4235 #endif
4236
4237 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4238 abi_ulong target_addr)
4239 {
4240 struct target_ipc_perm *target_ip;
4241 struct target_semid64_ds *target_sd;
4242
4243 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4244 return -TARGET_EFAULT;
4245 target_ip = &(target_sd->sem_perm);
4246 host_ip->__key = tswap32(target_ip->__key);
4247 host_ip->uid = tswap32(target_ip->uid);
4248 host_ip->gid = tswap32(target_ip->gid);
4249 host_ip->cuid = tswap32(target_ip->cuid);
4250 host_ip->cgid = tswap32(target_ip->cgid);
4251 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4252 host_ip->mode = tswap32(target_ip->mode);
4253 #else
4254 host_ip->mode = tswap16(target_ip->mode);
4255 #endif
4256 #if defined(TARGET_PPC)
4257 host_ip->__seq = tswap32(target_ip->__seq);
4258 #else
4259 host_ip->__seq = tswap16(target_ip->__seq);
4260 #endif
4261 unlock_user_struct(target_sd, target_addr, 0);
4262 return 0;
4263 }
4264
4265 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4266 struct ipc_perm *host_ip)
4267 {
4268 struct target_ipc_perm *target_ip;
4269 struct target_semid64_ds *target_sd;
4270
4271 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4272 return -TARGET_EFAULT;
4273 target_ip = &(target_sd->sem_perm);
4274 target_ip->__key = tswap32(host_ip->__key);
4275 target_ip->uid = tswap32(host_ip->uid);
4276 target_ip->gid = tswap32(host_ip->gid);
4277 target_ip->cuid = tswap32(host_ip->cuid);
4278 target_ip->cgid = tswap32(host_ip->cgid);
4279 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4280 target_ip->mode = tswap32(host_ip->mode);
4281 #else
4282 target_ip->mode = tswap16(host_ip->mode);
4283 #endif
4284 #if defined(TARGET_PPC)
4285 target_ip->__seq = tswap32(host_ip->__seq);
4286 #else
4287 target_ip->__seq = tswap16(host_ip->__seq);
4288 #endif
4289 unlock_user_struct(target_sd, target_addr, 1);
4290 return 0;
4291 }
4292
4293 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4294 abi_ulong target_addr)
4295 {
4296 struct target_semid64_ds *target_sd;
4297
4298 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4299 return -TARGET_EFAULT;
4300 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4301 return -TARGET_EFAULT;
4302 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4303 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4304 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4305 unlock_user_struct(target_sd, target_addr, 0);
4306 return 0;
4307 }
4308
4309 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4310 struct semid_ds *host_sd)
4311 {
4312 struct target_semid64_ds *target_sd;
4313
4314 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4315 return -TARGET_EFAULT;
4316 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4317 return -TARGET_EFAULT;
4318 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4319 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4320 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4321 unlock_user_struct(target_sd, target_addr, 1);
4322 return 0;
4323 }
4324
4325 struct target_seminfo {
4326 int semmap;
4327 int semmni;
4328 int semmns;
4329 int semmnu;
4330 int semmsl;
4331 int semopm;
4332 int semume;
4333 int semusz;
4334 int semvmx;
4335 int semaem;
4336 };
4337
4338 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4339 struct seminfo *host_seminfo)
4340 {
4341 struct target_seminfo *target_seminfo;
4342 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4343 return -TARGET_EFAULT;
4344 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4345 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4346 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4347 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4348 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4349 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4350 __put_user(host_seminfo->semume, &target_seminfo->semume);
4351 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4352 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4353 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4354 unlock_user_struct(target_seminfo, target_addr, 1);
4355 return 0;
4356 }
4357
4358 union semun {
4359 int val;
4360 struct semid_ds *buf;
4361 unsigned short *array;
4362 struct seminfo *__buf;
4363 };
4364
4365 union target_semun {
4366 int val;
4367 abi_ulong buf;
4368 abi_ulong array;
4369 abi_ulong __buf;
4370 };
4371
4372 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4373 abi_ulong target_addr)
4374 {
4375 int nsems;
4376 unsigned short *array;
4377 union semun semun;
4378 struct semid_ds semid_ds;
4379 int i, ret;
4380
4381 semun.buf = &semid_ds;
4382
4383 ret = semctl(semid, 0, IPC_STAT, semun);
4384 if (ret == -1)
4385 return get_errno(ret);
4386
4387 nsems = semid_ds.sem_nsems;
4388
4389 *host_array = g_try_new(unsigned short, nsems);
4390 if (!*host_array) {
4391 return -TARGET_ENOMEM;
4392 }
4393 array = lock_user(VERIFY_READ, target_addr,
4394 nsems*sizeof(unsigned short), 1);
4395 if (!array) {
4396 g_free(*host_array);
4397 return -TARGET_EFAULT;
4398 }
4399
4400 for(i=0; i<nsems; i++) {
4401 __get_user((*host_array)[i], &array[i]);
4402 }
4403 unlock_user(array, target_addr, 0);
4404
4405 return 0;
4406 }
4407
4408 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4409 unsigned short **host_array)
4410 {
4411 int nsems;
4412 unsigned short *array;
4413 union semun semun;
4414 struct semid_ds semid_ds;
4415 int i, ret;
4416
4417 semun.buf = &semid_ds;
4418
4419 ret = semctl(semid, 0, IPC_STAT, semun);
4420 if (ret == -1)
4421 return get_errno(ret);
4422
4423 nsems = semid_ds.sem_nsems;
4424
4425 array = lock_user(VERIFY_WRITE, target_addr,
4426 nsems*sizeof(unsigned short), 0);
4427 if (!array)
4428 return -TARGET_EFAULT;
4429
4430 for(i=0; i<nsems; i++) {
4431 __put_user((*host_array)[i], &array[i]);
4432 }
4433 g_free(*host_array);
4434 unlock_user(array, target_addr, 1);
4435
4436 return 0;
4437 }
4438
4439 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4440 abi_ulong target_arg)
4441 {
4442 union target_semun target_su = { .buf = target_arg };
4443 union semun arg;
4444 struct semid_ds dsarg;
4445 unsigned short *array = NULL;
4446 struct seminfo seminfo;
4447 abi_long ret = -TARGET_EINVAL;
4448 abi_long err;
4449 cmd &= 0xff;
4450
4451 switch( cmd ) {
4452 case GETVAL:
4453 case SETVAL:
4454 /* In 64 bit cross-endian situations, we will erroneously pick up
4455 * the wrong half of the union for the "val" element. To rectify
4456 * this, the entire 8-byte structure is byteswapped, followed by
4457 * a swap of the 4 byte val field. In other cases, the data is
4458 * already in proper host byte order. */
4459 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4460 target_su.buf = tswapal(target_su.buf);
4461 arg.val = tswap32(target_su.val);
4462 } else {
4463 arg.val = target_su.val;
4464 }
4465 ret = get_errno(semctl(semid, semnum, cmd, arg));
4466 break;
4467 case GETALL:
4468 case SETALL:
4469 err = target_to_host_semarray(semid, &array, target_su.array);
4470 if (err)
4471 return err;
4472 arg.array = array;
4473 ret = get_errno(semctl(semid, semnum, cmd, arg));
4474 err = host_to_target_semarray(semid, target_su.array, &array);
4475 if (err)
4476 return err;
4477 break;
4478 case IPC_STAT:
4479 case IPC_SET:
4480 case SEM_STAT:
4481 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4482 if (err)
4483 return err;
4484 arg.buf = &dsarg;
4485 ret = get_errno(semctl(semid, semnum, cmd, arg));
4486 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4487 if (err)
4488 return err;
4489 break;
4490 case IPC_INFO:
4491 case SEM_INFO:
4492 arg.__buf = &seminfo;
4493 ret = get_errno(semctl(semid, semnum, cmd, arg));
4494 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4495 if (err)
4496 return err;
4497 break;
4498 case IPC_RMID:
4499 case GETPID:
4500 case GETNCNT:
4501 case GETZCNT:
4502 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4503 break;
4504 }
4505
4506 return ret;
4507 }
4508
4509 struct target_sembuf {
4510 unsigned short sem_num;
4511 short sem_op;
4512 short sem_flg;
4513 };
4514
4515 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4516 abi_ulong target_addr,
4517 unsigned nsops)
4518 {
4519 struct target_sembuf *target_sembuf;
4520 int i;
4521
4522 target_sembuf = lock_user(VERIFY_READ, target_addr,
4523 nsops*sizeof(struct target_sembuf), 1);
4524 if (!target_sembuf)
4525 return -TARGET_EFAULT;
4526
4527 for(i=0; i<nsops; i++) {
4528 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4529 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4530 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4531 }
4532
4533 unlock_user(target_sembuf, target_addr, 0);
4534
4535 return 0;
4536 }
4537
4538 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4539 {
4540 struct sembuf sops[nsops];
4541
4542 if (target_to_host_sembuf(sops, ptr, nsops))
4543 return -TARGET_EFAULT;
4544
4545 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4546 }
4547
4548 struct target_msqid_ds
4549 {
4550 struct target_ipc_perm msg_perm;
4551 abi_ulong msg_stime;
4552 #if TARGET_ABI_BITS == 32
4553 abi_ulong __unused1;
4554 #endif
4555 abi_ulong msg_rtime;
4556 #if TARGET_ABI_BITS == 32
4557 abi_ulong __unused2;
4558 #endif
4559 abi_ulong msg_ctime;
4560 #if TARGET_ABI_BITS == 32
4561 abi_ulong __unused3;
4562 #endif
4563 abi_ulong __msg_cbytes;
4564 abi_ulong msg_qnum;
4565 abi_ulong msg_qbytes;
4566 abi_ulong msg_lspid;
4567 abi_ulong msg_lrpid;
4568 abi_ulong __unused4;
4569 abi_ulong __unused5;
4570 };
4571
4572 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4573 abi_ulong target_addr)
4574 {
4575 struct target_msqid_ds *target_md;
4576
4577 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4578 return -TARGET_EFAULT;
4579 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4580 return -TARGET_EFAULT;
4581 host_md->msg_stime = tswapal(target_md->msg_stime);
4582 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4583 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4584 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4585 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4586 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4587 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4588 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4589 unlock_user_struct(target_md, target_addr, 0);
4590 return 0;
4591 }
4592
4593 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4594 struct msqid_ds *host_md)
4595 {
4596 struct target_msqid_ds *target_md;
4597
4598 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4599 return -TARGET_EFAULT;
4600 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4601 return -TARGET_EFAULT;
4602 target_md->msg_stime = tswapal(host_md->msg_stime);
4603 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4604 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4605 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4606 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4607 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4608 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4609 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4610 unlock_user_struct(target_md, target_addr, 1);
4611 return 0;
4612 }
4613
4614 struct target_msginfo {
4615 int msgpool;
4616 int msgmap;
4617 int msgmax;
4618 int msgmnb;
4619 int msgmni;
4620 int msgssz;
4621 int msgtql;
4622 unsigned short int msgseg;
4623 };
4624
4625 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4626 struct msginfo *host_msginfo)
4627 {
4628 struct target_msginfo *target_msginfo;
4629 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4630 return -TARGET_EFAULT;
4631 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4632 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4633 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4634 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4635 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4636 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4637 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4638 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4639 unlock_user_struct(target_msginfo, target_addr, 1);
4640 return 0;
4641 }
4642
4643 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4644 {
4645 struct msqid_ds dsarg;
4646 struct msginfo msginfo;
4647 abi_long ret = -TARGET_EINVAL;
4648
4649 cmd &= 0xff;
4650
4651 switch (cmd) {
4652 case IPC_STAT:
4653 case IPC_SET:
4654 case MSG_STAT:
4655 if (target_to_host_msqid_ds(&dsarg,ptr))
4656 return -TARGET_EFAULT;
4657 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4658 if (host_to_target_msqid_ds(ptr,&dsarg))
4659 return -TARGET_EFAULT;
4660 break;
4661 case IPC_RMID:
4662 ret = get_errno(msgctl(msgid, cmd, NULL));
4663 break;
4664 case IPC_INFO:
4665 case MSG_INFO:
4666 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4667 if (host_to_target_msginfo(ptr, &msginfo))
4668 return -TARGET_EFAULT;
4669 break;
4670 }
4671
4672 return ret;
4673 }
4674
4675 struct target_msgbuf {
4676 abi_long mtype;
4677 char mtext[1];
4678 };
4679
4680 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4681 ssize_t msgsz, int msgflg)
4682 {
4683 struct target_msgbuf *target_mb;
4684 struct msgbuf *host_mb;
4685 abi_long ret = 0;
4686
4687 if (msgsz < 0) {
4688 return -TARGET_EINVAL;
4689 }
4690
4691 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4692 return -TARGET_EFAULT;
4693 host_mb = g_try_malloc(msgsz + sizeof(long));
4694 if (!host_mb) {
4695 unlock_user_struct(target_mb, msgp, 0);
4696 return -TARGET_ENOMEM;
4697 }
4698 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4699 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4700 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4701 g_free(host_mb);
4702 unlock_user_struct(target_mb, msgp, 0);
4703
4704 return ret;
4705 }
4706
4707 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4708 ssize_t msgsz, abi_long msgtyp,
4709 int msgflg)
4710 {
4711 struct target_msgbuf *target_mb;
4712 char *target_mtext;
4713 struct msgbuf *host_mb;
4714 abi_long ret = 0;
4715
4716 if (msgsz < 0) {
4717 return -TARGET_EINVAL;
4718 }
4719
4720 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4721 return -TARGET_EFAULT;
4722
4723 host_mb = g_try_malloc(msgsz + sizeof(long));
4724 if (!host_mb) {
4725 ret = -TARGET_ENOMEM;
4726 goto end;
4727 }
4728 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4729
4730 if (ret > 0) {
4731 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4732 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4733 if (!target_mtext) {
4734 ret = -TARGET_EFAULT;
4735 goto end;
4736 }
4737 memcpy(target_mb->mtext, host_mb->mtext, ret);
4738 unlock_user(target_mtext, target_mtext_addr, ret);
4739 }
4740
4741 target_mb->mtype = tswapal(host_mb->mtype);
4742
4743 end:
4744 if (target_mb)
4745 unlock_user_struct(target_mb, msgp, 1);
4746 g_free(host_mb);
4747 return ret;
4748 }
4749
4750 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4751 abi_ulong target_addr)
4752 {
4753 struct target_shmid_ds *target_sd;
4754
4755 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4756 return -TARGET_EFAULT;
4757 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4758 return -TARGET_EFAULT;
4759 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4760 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4761 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4762 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4763 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4764 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4765 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4766 unlock_user_struct(target_sd, target_addr, 0);
4767 return 0;
4768 }
4769
4770 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4771 struct shmid_ds *host_sd)
4772 {
4773 struct target_shmid_ds *target_sd;
4774
4775 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4776 return -TARGET_EFAULT;
4777 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4778 return -TARGET_EFAULT;
4779 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4780 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4781 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4782 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4783 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4784 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4785 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4786 unlock_user_struct(target_sd, target_addr, 1);
4787 return 0;
4788 }
4789
4790 struct target_shminfo {
4791 abi_ulong shmmax;
4792 abi_ulong shmmin;
4793 abi_ulong shmmni;
4794 abi_ulong shmseg;
4795 abi_ulong shmall;
4796 };
4797
4798 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4799 struct shminfo *host_shminfo)
4800 {
4801 struct target_shminfo *target_shminfo;
4802 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4803 return -TARGET_EFAULT;
4804 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4805 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4806 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4807 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4808 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4809 unlock_user_struct(target_shminfo, target_addr, 1);
4810 return 0;
4811 }
4812
4813 struct target_shm_info {
4814 int used_ids;
4815 abi_ulong shm_tot;
4816 abi_ulong shm_rss;
4817 abi_ulong shm_swp;
4818 abi_ulong swap_attempts;
4819 abi_ulong swap_successes;
4820 };
4821
4822 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4823 struct shm_info *host_shm_info)
4824 {
4825 struct target_shm_info *target_shm_info;
4826 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4827 return -TARGET_EFAULT;
4828 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4829 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4830 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4831 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4832 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4833 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4834 unlock_user_struct(target_shm_info, target_addr, 1);
4835 return 0;
4836 }
4837
4838 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4839 {
4840 struct shmid_ds dsarg;
4841 struct shminfo shminfo;
4842 struct shm_info shm_info;
4843 abi_long ret = -TARGET_EINVAL;
4844
4845 cmd &= 0xff;
4846
4847 switch(cmd) {
4848 case IPC_STAT:
4849 case IPC_SET:
4850 case SHM_STAT:
4851 if (target_to_host_shmid_ds(&dsarg, buf))
4852 return -TARGET_EFAULT;
4853 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4854 if (host_to_target_shmid_ds(buf, &dsarg))
4855 return -TARGET_EFAULT;
4856 break;
4857 case IPC_INFO:
4858 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4859 if (host_to_target_shminfo(buf, &shminfo))
4860 return -TARGET_EFAULT;
4861 break;
4862 case SHM_INFO:
4863 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4864 if (host_to_target_shm_info(buf, &shm_info))
4865 return -TARGET_EFAULT;
4866 break;
4867 case IPC_RMID:
4868 case SHM_LOCK:
4869 case SHM_UNLOCK:
4870 ret = get_errno(shmctl(shmid, cmd, NULL));
4871 break;
4872 }
4873
4874 return ret;
4875 }
4876
4877 #ifndef TARGET_FORCE_SHMLBA
4878 /* For most architectures, SHMLBA is the same as the page size;
4879 * some architectures have larger values, in which case they should
4880 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4881 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4882 * and defining its own value for SHMLBA.
4883 *
4884 * The kernel also permits SHMLBA to be set by the architecture to a
4885 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4886 * this means that addresses are rounded to the large size if
4887 * SHM_RND is set but addresses not aligned to that size are not rejected
4888 * as long as they are at least page-aligned. Since the only architecture
4889 * which uses this is ia64 this code doesn't provide for that oddity.
4890 */
4891 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4892 {
4893 return TARGET_PAGE_SIZE;
4894 }
4895 #endif
4896
4897 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4898 int shmid, abi_ulong shmaddr, int shmflg)
4899 {
4900 abi_long raddr;
4901 void *host_raddr;
4902 struct shmid_ds shm_info;
4903 int i,ret;
4904 abi_ulong shmlba;
4905
4906 /* find out the length of the shared memory segment */
4907 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4908 if (is_error(ret)) {
4909 /* can't get length, bail out */
4910 return ret;
4911 }
4912
4913 shmlba = target_shmlba(cpu_env);
4914
4915 if (shmaddr & (shmlba - 1)) {
4916 if (shmflg & SHM_RND) {
4917 shmaddr &= ~(shmlba - 1);
4918 } else {
4919 return -TARGET_EINVAL;
4920 }
4921 }
4922 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4923 return -TARGET_EINVAL;
4924 }
4925
4926 mmap_lock();
4927
4928 if (shmaddr)
4929 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4930 else {
4931 abi_ulong mmap_start;
4932
4933 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4934
4935 if (mmap_start == -1) {
4936 errno = ENOMEM;
4937 host_raddr = (void *)-1;
4938 } else
4939 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4940 }
4941
4942 if (host_raddr == (void *)-1) {
4943 mmap_unlock();
4944 return get_errno((long)host_raddr);
4945 }
4946 raddr=h2g((unsigned long)host_raddr);
4947
4948 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4949 PAGE_VALID | PAGE_READ |
4950 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4951
4952 for (i = 0; i < N_SHM_REGIONS; i++) {
4953 if (!shm_regions[i].in_use) {
4954 shm_regions[i].in_use = true;
4955 shm_regions[i].start = raddr;
4956 shm_regions[i].size = shm_info.shm_segsz;
4957 break;
4958 }
4959 }
4960
4961 mmap_unlock();
4962 return raddr;
4963
4964 }
4965
4966 static inline abi_long do_shmdt(abi_ulong shmaddr)
4967 {
4968 int i;
4969 abi_long rv;
4970
4971 mmap_lock();
4972
4973 for (i = 0; i < N_SHM_REGIONS; ++i) {
4974 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4975 shm_regions[i].in_use = false;
4976 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4977 break;
4978 }
4979 }
4980 rv = get_errno(shmdt(g2h(shmaddr)));
4981
4982 mmap_unlock();
4983
4984 return rv;
4985 }
4986
4987 #ifdef TARGET_NR_ipc
4988 /* ??? This only works with linear mappings. */
4989 /* do_ipc() must return target values and target errnos. */
4990 static abi_long do_ipc(CPUArchState *cpu_env,
4991 unsigned int call, abi_long first,
4992 abi_long second, abi_long third,
4993 abi_long ptr, abi_long fifth)
4994 {
4995 int version;
4996 abi_long ret = 0;
4997
4998 version = call >> 16;
4999 call &= 0xffff;
5000
5001 switch (call) {
5002 case IPCOP_semop:
5003 ret = do_semop(first, ptr, second);
5004 break;
5005
5006 case IPCOP_semget:
5007 ret = get_errno(semget(first, second, third));
5008 break;
5009
5010 case IPCOP_semctl: {
5011 /* The semun argument to semctl is passed by value, so dereference the
5012 * ptr argument. */
5013 abi_ulong atptr;
5014 get_user_ual(atptr, ptr);
5015 ret = do_semctl(first, second, third, atptr);
5016 break;
5017 }
5018
5019 case IPCOP_msgget:
5020 ret = get_errno(msgget(first, second));
5021 break;
5022
5023 case IPCOP_msgsnd:
5024 ret = do_msgsnd(first, ptr, second, third);
5025 break;
5026
5027 case IPCOP_msgctl:
5028 ret = do_msgctl(first, second, ptr);
5029 break;
5030
5031 case IPCOP_msgrcv:
5032 switch (version) {
5033 case 0:
5034 {
5035 struct target_ipc_kludge {
5036 abi_long msgp;
5037 abi_long msgtyp;
5038 } *tmp;
5039
5040 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
5041 ret = -TARGET_EFAULT;
5042 break;
5043 }
5044
5045 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
5046
5047 unlock_user_struct(tmp, ptr, 0);
5048 break;
5049 }
5050 default:
5051 ret = do_msgrcv(first, ptr, second, fifth, third);
5052 }
5053 break;
5054
5055 case IPCOP_shmat:
5056 switch (version) {
5057 default:
5058 {
5059 abi_ulong raddr;
5060 raddr = do_shmat(cpu_env, first, ptr, second);
5061 if (is_error(raddr))
5062 return get_errno(raddr);
5063 if (put_user_ual(raddr, third))
5064 return -TARGET_EFAULT;
5065 break;
5066 }
5067 case 1:
5068 ret = -TARGET_EINVAL;
5069 break;
5070 }
5071 break;
5072 case IPCOP_shmdt:
5073 ret = do_shmdt(ptr);
5074 break;
5075
5076 case IPCOP_shmget:
5077 /* IPC_* flag values are the same on all linux platforms */
5078 ret = get_errno(shmget(first, second, third));
5079 break;
5080
5081 /* IPC_* and SHM_* command values are the same on all linux platforms */
5082 case IPCOP_shmctl:
5083 ret = do_shmctl(first, second, ptr);
5084 break;
5085 default:
5086 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
5087 ret = -TARGET_ENOSYS;
5088 break;
5089 }
5090 return ret;
5091 }
5092 #endif
5093
5094 /* kernel structure types definitions */
5095
5096 #define STRUCT(name, ...) STRUCT_ ## name,
5097 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5098 enum {
5099 #include "syscall_types.h"
5100 STRUCT_MAX
5101 };
5102 #undef STRUCT
5103 #undef STRUCT_SPECIAL
5104
5105 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5106 #define STRUCT_SPECIAL(name)
5107 #include "syscall_types.h"
5108 #undef STRUCT
5109 #undef STRUCT_SPECIAL
5110
5111 typedef struct IOCTLEntry IOCTLEntry;
5112
5113 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
5114 int fd, int cmd, abi_long arg);
5115
5116 struct IOCTLEntry {
5117 int target_cmd;
5118 unsigned int host_cmd;
5119 const char *name;
5120 int access;
5121 do_ioctl_fn *do_ioctl;
5122 const argtype arg_type[5];
5123 };
5124
5125 #define IOC_R 0x0001
5126 #define IOC_W 0x0002
5127 #define IOC_RW (IOC_R | IOC_W)
5128
5129 #define MAX_STRUCT_SIZE 4096
5130
5131 #ifdef CONFIG_FIEMAP
5132 /* So fiemap access checks don't overflow on 32 bit systems.
5133 * This is very slightly smaller than the limit imposed by
5134 * the underlying kernel.
5135 */
5136 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5137 / sizeof(struct fiemap_extent))
5138
5139 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
5140 int fd, int cmd, abi_long arg)
5141 {
5142 /* The parameter for this ioctl is a struct fiemap followed
5143 * by an array of struct fiemap_extent whose size is set
5144 * in fiemap->fm_extent_count. The array is filled in by the
5145 * ioctl.
5146 */
5147 int target_size_in, target_size_out;
5148 struct fiemap *fm;
5149 const argtype *arg_type = ie->arg_type;
5150 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
5151 void *argptr, *p;
5152 abi_long ret;
5153 int i, extent_size = thunk_type_size(extent_arg_type, 0);
5154 uint32_t outbufsz;
5155 int free_fm = 0;
5156
5157 assert(arg_type[0] == TYPE_PTR);
5158 assert(ie->access == IOC_RW);
5159 arg_type++;
5160 target_size_in = thunk_type_size(arg_type, 0);
5161 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
5162 if (!argptr) {
5163 return -TARGET_EFAULT;
5164 }
5165 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5166 unlock_user(argptr, arg, 0);
5167 fm = (struct fiemap *)buf_temp;
5168 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
5169 return -TARGET_EINVAL;
5170 }
5171
5172 outbufsz = sizeof (*fm) +
5173 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
5174
5175 if (outbufsz > MAX_STRUCT_SIZE) {
5176 /* We can't fit all the extents into the fixed size buffer.
5177 * Allocate one that is large enough and use it instead.
5178 */
5179 fm = g_try_malloc(outbufsz);
5180 if (!fm) {
5181 return -TARGET_ENOMEM;
5182 }
5183 memcpy(fm, buf_temp, sizeof(struct fiemap));
5184 free_fm = 1;
5185 }
5186 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
5187 if (!is_error(ret)) {
5188 target_size_out = target_size_in;
5189 /* An extent_count of 0 means we were only counting the extents
5190 * so there are no structs to copy
5191 */
5192 if (fm->fm_extent_count != 0) {
5193 target_size_out += fm->fm_mapped_extents * extent_size;
5194 }
5195 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
5196 if (!argptr) {
5197 ret = -TARGET_EFAULT;
5198 } else {
5199 /* Convert the struct fiemap */
5200 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
5201 if (fm->fm_extent_count != 0) {
5202 p = argptr + target_size_in;
5203 /* ...and then all the struct fiemap_extents */
5204 for (i = 0; i < fm->fm_mapped_extents; i++) {
5205 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
5206 THUNK_TARGET);
5207 p += extent_size;
5208 }
5209 }
5210 unlock_user(argptr, arg, target_size_out);
5211 }
5212 }
5213 if (free_fm) {
5214 g_free(fm);
5215 }
5216 return ret;
5217 }
5218 #endif
5219
5220 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5221 int fd, int cmd, abi_long arg)
5222 {
5223 const argtype *arg_type = ie->arg_type;
5224 int target_size;
5225 void *argptr;
5226 int ret;
5227 struct ifconf *host_ifconf;
5228 uint32_t outbufsz;
5229 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5230 int target_ifreq_size;
5231 int nb_ifreq;
5232 int free_buf = 0;
5233 int i;
5234 int target_ifc_len;
5235 abi_long target_ifc_buf;
5236 int host_ifc_len;
5237 char *host_ifc_buf;
5238
5239 assert(arg_type[0] == TYPE_PTR);
5240 assert(ie->access == IOC_RW);
5241
5242 arg_type++;
5243 target_size = thunk_type_size(arg_type, 0);
5244
5245 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5246 if (!argptr)
5247 return -TARGET_EFAULT;
5248 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5249 unlock_user(argptr, arg, 0);
5250
5251 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5252 target_ifc_len = host_ifconf->ifc_len;
5253 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5254
5255 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5256 nb_ifreq = target_ifc_len / target_ifreq_size;
5257 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5258
5259 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5260 if (outbufsz > MAX_STRUCT_SIZE) {
5261 /* We can't fit all the extents into the fixed size buffer.
5262 * Allocate one that is large enough and use it instead.
5263 */
5264 host_ifconf = malloc(outbufsz);
5265 if (!host_ifconf) {
5266 return -TARGET_ENOMEM;
5267 }
5268 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5269 free_buf = 1;
5270 }
5271 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5272
5273 host_ifconf->ifc_len = host_ifc_len;
5274 host_ifconf->ifc_buf = host_ifc_buf;
5275
5276 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5277 if (!is_error(ret)) {
5278 /* convert host ifc_len to target ifc_len */
5279
5280 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5281 target_ifc_len = nb_ifreq * target_ifreq_size;
5282 host_ifconf->ifc_len = target_ifc_len;
5283
5284 /* restore target ifc_buf */
5285
5286 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5287
5288 /* copy struct ifconf to target user */
5289
5290 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5291 if (!argptr)
5292 return -TARGET_EFAULT;
5293 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5294 unlock_user(argptr, arg, target_size);
5295
5296 /* copy ifreq[] to target user */
5297
5298 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5299 for (i = 0; i < nb_ifreq ; i++) {
5300 thunk_convert(argptr + i * target_ifreq_size,
5301 host_ifc_buf + i * sizeof(struct ifreq),
5302 ifreq_arg_type, THUNK_TARGET);
5303 }
5304 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5305 }
5306
5307 if (free_buf) {
5308 free(host_ifconf);
5309 }
5310
5311 return ret;
5312 }
5313
5314 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5315 int cmd, abi_long arg)
5316 {
5317 void *argptr;
5318 struct dm_ioctl *host_dm;
5319 abi_long guest_data;
5320 uint32_t guest_data_size;
5321 int target_size;
5322 const argtype *arg_type = ie->arg_type;
5323 abi_long ret;
5324 void *big_buf = NULL;
5325 char *host_data;
5326
5327 arg_type++;
5328 target_size = thunk_type_size(arg_type, 0);
5329 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5330 if (!argptr) {
5331 ret = -TARGET_EFAULT;
5332 goto out;
5333 }
5334 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5335 unlock_user(argptr, arg, 0);
5336
5337 /* buf_temp is too small, so fetch things into a bigger buffer */
5338 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5339 memcpy(big_buf, buf_temp, target_size);
5340 buf_temp = big_buf;
5341 host_dm = big_buf;
5342
5343 guest_data = arg + host_dm->data_start;
5344 if ((guest_data - arg) < 0) {
5345 ret = -TARGET_EINVAL;
5346 goto out;
5347 }
5348 guest_data_size = host_dm->data_size - host_dm->data_start;
5349 host_data = (char*)host_dm + host_dm->data_start;
5350
5351 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5352 if (!argptr) {
5353 ret = -TARGET_EFAULT;
5354 goto out;
5355 }
5356
5357 switch (ie->host_cmd) {
5358 case DM_REMOVE_ALL:
5359 case DM_LIST_DEVICES:
5360 case DM_DEV_CREATE:
5361 case DM_DEV_REMOVE:
5362 case DM_DEV_SUSPEND:
5363 case DM_DEV_STATUS:
5364 case DM_DEV_WAIT:
5365 case DM_TABLE_STATUS:
5366 case DM_TABLE_CLEAR:
5367 case DM_TABLE_DEPS:
5368 case DM_LIST_VERSIONS:
5369 /* no input data */
5370 break;
5371 case DM_DEV_RENAME:
5372 case DM_DEV_SET_GEOMETRY:
5373 /* data contains only strings */
5374 memcpy(host_data, argptr, guest_data_size);
5375 break;
5376 case DM_TARGET_MSG:
5377 memcpy(host_data, argptr, guest_data_size);
5378 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5379 break;
5380 case DM_TABLE_LOAD:
5381 {
5382 void *gspec = argptr;
5383 void *cur_data = host_data;
5384 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5385 int spec_size = thunk_type_size(arg_type, 0);
5386 int i;
5387
5388 for (i = 0; i < host_dm->target_count; i++) {
5389 struct dm_target_spec *spec = cur_data;
5390 uint32_t next;
5391 int slen;
5392
5393 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5394 slen = strlen((char*)gspec + spec_size) + 1;
5395 next = spec->next;
5396 spec->next = sizeof(*spec) + slen;
5397 strcpy((char*)&spec[1], gspec + spec_size);
5398 gspec += next;
5399 cur_data += spec->next;
5400 }
5401 break;
5402 }
5403 default:
5404 ret = -TARGET_EINVAL;
5405 unlock_user(argptr, guest_data, 0);
5406 goto out;
5407 }
5408 unlock_user(argptr, guest_data, 0);
5409
5410 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5411 if (!is_error(ret)) {
5412 guest_data = arg + host_dm->data_start;
5413 guest_data_size = host_dm->data_size - host_dm->data_start;
5414 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5415 switch (ie->host_cmd) {
5416 case DM_REMOVE_ALL:
5417 case DM_DEV_CREATE:
5418 case DM_DEV_REMOVE:
5419 case DM_DEV_RENAME:
5420 case DM_DEV_SUSPEND:
5421 case DM_DEV_STATUS:
5422 case DM_TABLE_LOAD:
5423 case DM_TABLE_CLEAR:
5424 case DM_TARGET_MSG:
5425 case DM_DEV_SET_GEOMETRY:
5426 /* no return data */
5427 break;
5428 case DM_LIST_DEVICES:
5429 {
5430 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5431 uint32_t remaining_data = guest_data_size;
5432 void *cur_data = argptr;
5433 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5434 int nl_size = 12; /* can't use thunk_size due to alignment */
5435
5436 while (1) {
5437 uint32_t next = nl->next;
5438 if (next) {
5439 nl->next = nl_size + (strlen(nl->name) + 1);
5440 }
5441 if (remaining_data < nl->next) {
5442 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5443 break;
5444 }
5445 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5446 strcpy(cur_data + nl_size, nl->name);
5447 cur_data += nl->next;
5448 remaining_data -= nl->next;
5449 if (!next) {
5450 break;
5451 }
5452 nl = (void*)nl + next;
5453 }
5454 break;
5455 }
5456 case DM_DEV_WAIT:
5457 case DM_TABLE_STATUS:
5458 {
5459 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5460 void *cur_data = argptr;
5461 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5462 int spec_size = thunk_type_size(arg_type, 0);
5463 int i;
5464
5465 for (i = 0; i < host_dm->target_count; i++) {
5466 uint32_t next = spec->next;
5467 int slen = strlen((char*)&spec[1]) + 1;
5468 spec->next = (cur_data - argptr) + spec_size + slen;
5469 if (guest_data_size < spec->next) {
5470 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5471 break;
5472 }
5473 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5474 strcpy(cur_data + spec_size, (char*)&spec[1]);
5475 cur_data = argptr + spec->next;
5476 spec = (void*)host_dm + host_dm->data_start + next;
5477 }
5478 break;
5479 }
5480 case DM_TABLE_DEPS:
5481 {
5482 void *hdata = (void*)host_dm + host_dm->data_start;
5483 int count = *(uint32_t*)hdata;
5484 uint64_t *hdev = hdata + 8;
5485 uint64_t *gdev = argptr + 8;
5486 int i;
5487
5488 *(uint32_t*)argptr = tswap32(count);
5489 for (i = 0; i < count; i++) {
5490 *gdev = tswap64(*hdev);
5491 gdev++;
5492 hdev++;
5493 }
5494 break;
5495 }
5496 case DM_LIST_VERSIONS:
5497 {
5498 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5499 uint32_t remaining_data = guest_data_size;
5500 void *cur_data = argptr;
5501 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5502 int vers_size = thunk_type_size(arg_type, 0);
5503
5504 while (1) {
5505 uint32_t next = vers->next;
5506 if (next) {
5507 vers->next = vers_size + (strlen(vers->name) + 1);
5508 }
5509 if (remaining_data < vers->next) {
5510 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5511 break;
5512 }
5513 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5514 strcpy(cur_data + vers_size, vers->name);
5515 cur_data += vers->next;
5516 remaining_data -= vers->next;
5517 if (!next) {
5518 break;
5519 }
5520 vers = (void*)vers + next;
5521 }
5522 break;
5523 }
5524 default:
5525 unlock_user(argptr, guest_data, 0);
5526 ret = -TARGET_EINVAL;
5527 goto out;
5528 }
5529 unlock_user(argptr, guest_data, guest_data_size);
5530
5531 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5532 if (!argptr) {
5533 ret = -TARGET_EFAULT;
5534 goto out;
5535 }
5536 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5537 unlock_user(argptr, arg, target_size);
5538 }
5539 out:
5540 g_free(big_buf);
5541 return ret;
5542 }
5543
5544 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5545 int cmd, abi_long arg)
5546 {
5547 void *argptr;
5548 int target_size;
5549 const argtype *arg_type = ie->arg_type;
5550 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5551 abi_long ret;
5552
5553 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5554 struct blkpg_partition host_part;
5555
5556 /* Read and convert blkpg */
5557 arg_type++;
5558 target_size = thunk_type_size(arg_type, 0);
5559 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5560 if (!argptr) {
5561 ret = -TARGET_EFAULT;
5562 goto out;
5563 }
5564 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5565 unlock_user(argptr, arg, 0);
5566
5567 switch (host_blkpg->op) {
5568 case BLKPG_ADD_PARTITION:
5569 case BLKPG_DEL_PARTITION:
5570 /* payload is struct blkpg_partition */
5571 break;
5572 default:
5573 /* Unknown opcode */
5574 ret = -TARGET_EINVAL;
5575 goto out;
5576 }
5577
5578 /* Read and convert blkpg->data */
5579 arg = (abi_long)(uintptr_t)host_blkpg->data;
5580 target_size = thunk_type_size(part_arg_type, 0);
5581 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5582 if (!argptr) {
5583 ret = -TARGET_EFAULT;
5584 goto out;
5585 }
5586 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5587 unlock_user(argptr, arg, 0);
5588
5589 /* Swizzle the data pointer to our local copy and call! */
5590 host_blkpg->data = &host_part;
5591 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5592
5593 out:
5594 return ret;
5595 }
5596
5597 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5598 int fd, int cmd, abi_long arg)
5599 {
5600 const argtype *arg_type = ie->arg_type;
5601 const StructEntry *se;
5602 const argtype *field_types;
5603 const int *dst_offsets, *src_offsets;
5604 int target_size;
5605 void *argptr;
5606 abi_ulong *target_rt_dev_ptr;
5607 unsigned long *host_rt_dev_ptr;
5608 abi_long ret;
5609 int i;
5610
5611 assert(ie->access == IOC_W);
5612 assert(*arg_type == TYPE_PTR);
5613 arg_type++;
5614 assert(*arg_type == TYPE_STRUCT);
5615 target_size = thunk_type_size(arg_type, 0);
5616 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5617 if (!argptr) {
5618 return -TARGET_EFAULT;
5619 }
5620 arg_type++;
5621 assert(*arg_type == (int)STRUCT_rtentry);
5622 se = struct_entries + *arg_type++;
5623 assert(se->convert[0] == NULL);
5624 /* convert struct here to be able to catch rt_dev string */
5625 field_types = se->field_types;
5626 dst_offsets = se->field_offsets[THUNK_HOST];
5627 src_offsets = se->field_offsets[THUNK_TARGET];
5628 for (i = 0; i < se->nb_fields; i++) {
5629 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5630 assert(*field_types == TYPE_PTRVOID);
5631 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5632 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5633 if (*target_rt_dev_ptr != 0) {
5634 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5635 tswapal(*target_rt_dev_ptr));
5636 if (!*host_rt_dev_ptr) {
5637 unlock_user(argptr, arg, 0);
5638 return -TARGET_EFAULT;
5639 }
5640 } else {
5641 *host_rt_dev_ptr = 0;
5642 }
5643 field_types++;
5644 continue;
5645 }
5646 field_types = thunk_convert(buf_temp + dst_offsets[i],
5647 argptr + src_offsets[i],
5648 field_types, THUNK_HOST);
5649 }
5650 unlock_user(argptr, arg, 0);
5651
5652 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5653 if (*host_rt_dev_ptr != 0) {
5654 unlock_user((void *)*host_rt_dev_ptr,
5655 *target_rt_dev_ptr, 0);
5656 }
5657 return ret;
5658 }
5659
5660 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5661 int fd, int cmd, abi_long arg)
5662 {
5663 int sig = target_to_host_signal(arg);
5664 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5665 }
5666
5667 #ifdef TIOCGPTPEER
5668 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5669 int fd, int cmd, abi_long arg)
5670 {
5671 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5672 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5673 }
5674 #endif
5675
5676 static IOCTLEntry ioctl_entries[] = {
5677 #define IOCTL(cmd, access, ...) \
5678 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5679 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5680 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5681 #define IOCTL_IGNORE(cmd) \
5682 { TARGET_ ## cmd, 0, #cmd },
5683 #include "ioctls.h"
5684 { 0, 0, },
5685 };
5686
5687 /* ??? Implement proper locking for ioctls. */
5688 /* do_ioctl() Must return target values and target errnos. */
5689 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5690 {
5691 const IOCTLEntry *ie;
5692 const argtype *arg_type;
5693 abi_long ret;
5694 uint8_t buf_temp[MAX_STRUCT_SIZE];
5695 int target_size;
5696 void *argptr;
5697
5698 ie = ioctl_entries;
5699 for(;;) {
5700 if (ie->target_cmd == 0) {
5701 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5702 return -TARGET_ENOSYS;
5703 }
5704 if (ie->target_cmd == cmd)
5705 break;
5706 ie++;
5707 }
5708 arg_type = ie->arg_type;
5709 #if defined(DEBUG)
5710 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5711 #endif
5712 if (ie->do_ioctl) {
5713 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5714 } else if (!ie->host_cmd) {
5715 /* Some architectures define BSD ioctls in their headers
5716 that are not implemented in Linux. */
5717 return -TARGET_ENOSYS;
5718 }
5719
5720 switch(arg_type[0]) {
5721 case TYPE_NULL:
5722 /* no argument */
5723 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5724 break;
5725 case TYPE_PTRVOID:
5726 case TYPE_INT:
5727 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5728 break;
5729 case TYPE_PTR:
5730 arg_type++;
5731 target_size = thunk_type_size(arg_type, 0);
5732 switch(ie->access) {
5733 case IOC_R:
5734 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5735 if (!is_error(ret)) {
5736 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5737 if (!argptr)
5738 return -TARGET_EFAULT;
5739 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5740 unlock_user(argptr, arg, target_size);
5741 }
5742 break;
5743 case IOC_W:
5744 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5745 if (!argptr)
5746 return -TARGET_EFAULT;
5747 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5748 unlock_user(argptr, arg, 0);
5749 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5750 break;
5751 default:
5752 case IOC_RW:
5753 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5754 if (!argptr)
5755 return -TARGET_EFAULT;
5756 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5757 unlock_user(argptr, arg, 0);
5758 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5759 if (!is_error(ret)) {
5760 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5761 if (!argptr)
5762 return -TARGET_EFAULT;
5763 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5764 unlock_user(argptr, arg, target_size);
5765 }
5766 break;
5767 }
5768 break;
5769 default:
5770 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5771 (long)cmd, arg_type[0]);
5772 ret = -TARGET_ENOSYS;
5773 break;
5774 }
5775 return ret;
5776 }
5777
5778 static const bitmask_transtbl iflag_tbl[] = {
5779 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5780 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5781 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5782 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5783 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5784 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5785 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5786 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5787 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5788 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5789 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5790 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5791 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5792 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5793 { 0, 0, 0, 0 }
5794 };
5795
5796 static const bitmask_transtbl oflag_tbl[] = {
5797 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5798 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5799 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5800 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5801 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5802 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5803 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5804 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5805 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5806 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5807 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5808 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5809 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5810 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5811 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5812 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5813 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5814 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5815 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5816 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5817 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5818 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5819 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5820 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5821 { 0, 0, 0, 0 }
5822 };
5823
5824 static const bitmask_transtbl cflag_tbl[] = {
5825 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5826 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5827 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5828 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5829 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5830 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5831 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5832 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5833 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5834 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5835 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5836 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5837 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5838 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5839 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5840 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5841 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5842 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5843 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5844 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5845 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5846 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5847 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5848 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5849 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5850 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5851 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5852 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5853 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5854 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5855 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5856 { 0, 0, 0, 0 }
5857 };
5858
5859 static const bitmask_transtbl lflag_tbl[] = {
5860 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5861 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5862 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5863 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5864 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5865 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5866 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5867 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5868 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5869 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5870 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5871 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5872 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5873 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5874 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5875 { 0, 0, 0, 0 }
5876 };
5877
5878 static void target_to_host_termios (void *dst, const void *src)
5879 {
5880 struct host_termios *host = dst;
5881 const struct target_termios *target = src;
5882
5883 host->c_iflag =
5884 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5885 host->c_oflag =
5886 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5887 host->c_cflag =
5888 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5889 host->c_lflag =
5890 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5891 host->c_line = target->c_line;
5892
5893 memset(host->c_cc, 0, sizeof(host->c_cc));
5894 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5895 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5896 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5897 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5898 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5899 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5900 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5901 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5902 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5903 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5904 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5905 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5906 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5907 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5908 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5909 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5910 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5911 }
5912
5913 static void host_to_target_termios (void *dst, const void *src)
5914 {
5915 struct target_termios *target = dst;
5916 const struct host_termios *host = src;
5917
5918 target->c_iflag =
5919 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5920 target->c_oflag =
5921 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5922 target->c_cflag =
5923 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5924 target->c_lflag =
5925 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5926 target->c_line = host->c_line;
5927
5928 memset(target->c_cc, 0, sizeof(target->c_cc));
5929 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5930 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5931 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5932 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5933 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5934 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5935 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5936 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5937 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5938 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5939 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5940 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5941 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5942 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5943 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5944 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5945 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5946 }
5947
5948 static const StructEntry struct_termios_def = {
5949 .convert = { host_to_target_termios, target_to_host_termios },
5950 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5951 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5952 };
5953
5954 static bitmask_transtbl mmap_flags_tbl[] = {
5955 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5956 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5957 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5958 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5959 MAP_ANONYMOUS, MAP_ANONYMOUS },
5960 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5961 MAP_GROWSDOWN, MAP_GROWSDOWN },
5962 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5963 MAP_DENYWRITE, MAP_DENYWRITE },
5964 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5965 MAP_EXECUTABLE, MAP_EXECUTABLE },
5966 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5967 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5968 MAP_NORESERVE, MAP_NORESERVE },
5969 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5970 /* MAP_STACK had been ignored by the kernel for quite some time.
5971 Recognize it for the target insofar as we do not want to pass
5972 it through to the host. */
5973 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5974 { 0, 0, 0, 0 }
5975 };
5976
5977 #if defined(TARGET_I386)
5978
5979 /* NOTE: there is really one LDT for all the threads */
5980 static uint8_t *ldt_table;
5981
5982 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5983 {
5984 int size;
5985 void *p;
5986
5987 if (!ldt_table)
5988 return 0;
5989 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5990 if (size > bytecount)
5991 size = bytecount;
5992 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5993 if (!p)
5994 return -TARGET_EFAULT;
5995 /* ??? Should this by byteswapped? */
5996 memcpy(p, ldt_table, size);
5997 unlock_user(p, ptr, size);
5998 return size;
5999 }
6000
6001 /* XXX: add locking support */
6002 static abi_long write_ldt(CPUX86State *env,
6003 abi_ulong ptr, unsigned long bytecount, int oldmode)
6004 {
6005 struct target_modify_ldt_ldt_s ldt_info;
6006 struct target_modify_ldt_ldt_s *target_ldt_info;
6007 int seg_32bit, contents, read_exec_only, limit_in_pages;
6008 int seg_not_present, useable, lm;
6009 uint32_t *lp, entry_1, entry_2;
6010
6011 if (bytecount != sizeof(ldt_info))
6012 return -TARGET_EINVAL;
6013 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6014 return -TARGET_EFAULT;
6015 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6016 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6017 ldt_info.limit = tswap32(target_ldt_info->limit);
6018 ldt_info.flags = tswap32(target_ldt_info->flags);
6019 unlock_user_struct(target_ldt_info, ptr, 0);
6020
6021 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6022 return -TARGET_EINVAL;
6023 seg_32bit = ldt_info.flags & 1;
6024 contents = (ldt_info.flags >> 1) & 3;
6025 read_exec_only = (ldt_info.flags >> 3) & 1;
6026 limit_in_pages = (ldt_info.flags >> 4) & 1;
6027 seg_not_present = (ldt_info.flags >> 5) & 1;
6028 useable = (ldt_info.flags >> 6) & 1;
6029 #ifdef TARGET_ABI32
6030 lm = 0;
6031 #else
6032 lm = (ldt_info.flags >> 7) & 1;
6033 #endif
6034 if (contents == 3) {
6035 if (oldmode)
6036 return -TARGET_EINVAL;
6037 if (seg_not_present == 0)
6038 return -TARGET_EINVAL;
6039 }
6040 /* allocate the LDT */
6041 if (!ldt_table) {
6042 env->ldt.base = target_mmap(0,
6043 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6044 PROT_READ|PROT_WRITE,
6045 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6046 if (env->ldt.base == -1)
6047 return -TARGET_ENOMEM;
6048 memset(g2h(env->ldt.base), 0,
6049 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6050 env->ldt.limit = 0xffff;
6051 ldt_table = g2h(env->ldt.base);
6052 }
6053
6054 /* NOTE: same code as Linux kernel */
6055 /* Allow LDTs to be cleared by the user. */
6056 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6057 if (oldmode ||
6058 (contents == 0 &&
6059 read_exec_only == 1 &&
6060 seg_32bit == 0 &&
6061 limit_in_pages == 0 &&
6062 seg_not_present == 1 &&
6063 useable == 0 )) {
6064 entry_1 = 0;
6065 entry_2 = 0;
6066 goto install;
6067 }
6068 }
6069
6070 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6071 (ldt_info.limit & 0x0ffff);
6072 entry_2 = (ldt_info.base_addr & 0xff000000) |
6073 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6074 (ldt_info.limit & 0xf0000) |
6075 ((read_exec_only ^ 1) << 9) |
6076 (contents << 10) |
6077 ((seg_not_present ^ 1) << 15) |
6078 (seg_32bit << 22) |
6079 (limit_in_pages << 23) |
6080 (lm << 21) |
6081 0x7000;
6082 if (!oldmode)
6083 entry_2 |= (useable << 20);
6084
6085 /* Install the new entry ... */
6086 install:
6087 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6088 lp[0] = tswap32(entry_1);
6089 lp[1] = tswap32(entry_2);
6090 return 0;
6091 }
6092
6093 /* specific and weird i386 syscalls */
6094 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6095 unsigned long bytecount)
6096 {
6097 abi_long ret;
6098
6099 switch (func) {
6100 case 0:
6101 ret = read_ldt(ptr, bytecount);
6102 break;
6103 case 1:
6104 ret = write_ldt(env, ptr, bytecount, 1);
6105 break;
6106 case 0x11:
6107 ret = write_ldt(env, ptr, bytecount, 0);
6108 break;
6109 default:
6110 ret = -TARGET_ENOSYS;
6111 break;
6112 }
6113 return ret;
6114 }
6115
6116 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6117 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6118 {
6119 uint64_t *gdt_table = g2h(env->gdt.base);
6120 struct target_modify_ldt_ldt_s ldt_info;
6121 struct target_modify_ldt_ldt_s *target_ldt_info;
6122 int seg_32bit, contents, read_exec_only, limit_in_pages;
6123 int seg_not_present, useable, lm;
6124 uint32_t *lp, entry_1, entry_2;
6125 int i;
6126
6127 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6128 if (!target_ldt_info)
6129 return -TARGET_EFAULT;
6130 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6131 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6132 ldt_info.limit = tswap32(target_ldt_info->limit);
6133 ldt_info.flags = tswap32(target_ldt_info->flags);
6134 if (ldt_info.entry_number == -1) {
6135 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6136 if (gdt_table[i] == 0) {
6137 ldt_info.entry_number = i;
6138 target_ldt_info->entry_number = tswap32(i);
6139 break;
6140 }
6141 }
6142 }
6143 unlock_user_struct(target_ldt_info, ptr, 1);
6144
6145 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6146 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6147 return -TARGET_EINVAL;
6148 seg_32bit = ldt_info.flags & 1;
6149 contents = (ldt_info.flags >> 1) & 3;
6150 read_exec_only = (ldt_info.flags >> 3) & 1;
6151 limit_in_pages = (ldt_info.flags >> 4) & 1;
6152 seg_not_present = (ldt_info.flags >> 5) & 1;
6153 useable = (ldt_info.flags >> 6) & 1;
6154 #ifdef TARGET_ABI32
6155 lm = 0;
6156 #else
6157 lm = (ldt_info.flags >> 7) & 1;
6158 #endif
6159
6160 if (contents == 3) {
6161 if (seg_not_present == 0)
6162 return -TARGET_EINVAL;
6163 }
6164
6165 /* NOTE: same code as Linux kernel */
6166 /* Allow LDTs to be cleared by the user. */
6167 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6168 if ((contents == 0 &&
6169 read_exec_only == 1 &&
6170 seg_32bit == 0 &&
6171 limit_in_pages == 0 &&
6172 seg_not_present == 1 &&
6173 useable == 0 )) {
6174 entry_1 = 0;
6175 entry_2 = 0;
6176 goto install;
6177 }
6178 }
6179
6180 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6181 (ldt_info.limit & 0x0ffff);
6182 entry_2 = (ldt_info.base_addr & 0xff000000) |
6183 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6184 (ldt_info.limit & 0xf0000) |
6185 ((read_exec_only ^ 1) << 9) |
6186 (contents << 10) |
6187 ((seg_not_present ^ 1) << 15) |
6188 (seg_32bit << 22) |
6189 (limit_in_pages << 23) |
6190 (useable << 20) |
6191 (lm << 21) |
6192 0x7000;
6193
6194 /* Install the new entry ... */
6195 install:
6196 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6197 lp[0] = tswap32(entry_1);
6198 lp[1] = tswap32(entry_2);
6199 return 0;
6200 }
6201
6202 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6203 {
6204 struct target_modify_ldt_ldt_s *target_ldt_info;
6205 uint64_t *gdt_table = g2h(env->gdt.base);
6206 uint32_t base_addr, limit, flags;
6207 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6208 int seg_not_present, useable, lm;
6209 uint32_t *lp, entry_1, entry_2;
6210
6211 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6212 if (!target_ldt_info)
6213 return -TARGET_EFAULT;
6214 idx = tswap32(target_ldt_info->entry_number);
6215 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6216 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6217 unlock_user_struct(target_ldt_info, ptr, 1);
6218 return -TARGET_EINVAL;
6219 }
6220 lp = (uint32_t *)(gdt_table + idx);
6221 entry_1 = tswap32(lp[0]);
6222 entry_2 = tswap32(lp[1]);
6223
6224 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6225 contents = (entry_2 >> 10) & 3;
6226 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6227 seg_32bit = (entry_2 >> 22) & 1;
6228 limit_in_pages = (entry_2 >> 23) & 1;
6229 useable = (entry_2 >> 20) & 1;
6230 #ifdef TARGET_ABI32
6231 lm = 0;
6232 #else
6233 lm = (entry_2 >> 21) & 1;
6234 #endif
6235 flags = (seg_32bit << 0) | (contents << 1) |
6236 (read_exec_only << 3) | (limit_in_pages << 4) |
6237 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6238 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6239 base_addr = (entry_1 >> 16) |
6240 (entry_2 & 0xff000000) |
6241 ((entry_2 & 0xff) << 16);
6242 target_ldt_info->base_addr = tswapal(base_addr);
6243 target_ldt_info->limit = tswap32(limit);
6244 target_ldt_info->flags = tswap32(flags);
6245 unlock_user_struct(target_ldt_info, ptr, 1);
6246 return 0;
6247 }
6248 #endif /* TARGET_I386 && TARGET_ABI32 */
6249
6250 #ifndef TARGET_ABI32
6251 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6252 {
6253 abi_long ret = 0;
6254 abi_ulong val;
6255 int idx;
6256
6257 switch(code) {
6258 case TARGET_ARCH_SET_GS:
6259 case TARGET_ARCH_SET_FS:
6260 if (code == TARGET_ARCH_SET_GS)
6261 idx = R_GS;
6262 else
6263 idx = R_FS;
6264 cpu_x86_load_seg(env, idx, 0);
6265 env->segs[idx].base = addr;
6266 break;
6267 case TARGET_ARCH_GET_GS:
6268 case TARGET_ARCH_GET_FS:
6269 if (code == TARGET_ARCH_GET_GS)
6270 idx = R_GS;
6271 else
6272 idx = R_FS;
6273 val = env->segs[idx].base;
6274 if (put_user(val, addr, abi_ulong))
6275 ret = -TARGET_EFAULT;
6276 break;
6277 default:
6278 ret = -TARGET_EINVAL;
6279 break;
6280 }
6281 return ret;
6282 }
6283 #endif
6284
6285 #endif /* defined(TARGET_I386) */
6286
6287 #define NEW_STACK_SIZE 0x40000
6288
6289
6290 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6291 typedef struct {
6292 CPUArchState *env;
6293 pthread_mutex_t mutex;
6294 pthread_cond_t cond;
6295 pthread_t thread;
6296 uint32_t tid;
6297 abi_ulong child_tidptr;
6298 abi_ulong parent_tidptr;
6299 sigset_t sigmask;
6300 } new_thread_info;
6301
6302 static void *clone_func(void *arg)
6303 {
6304 new_thread_info *info = arg;
6305 CPUArchState *env;
6306 CPUState *cpu;
6307 TaskState *ts;
6308
6309 rcu_register_thread();
6310 tcg_register_thread();
6311 env = info->env;
6312 cpu = ENV_GET_CPU(env);
6313 thread_cpu = cpu;
6314 ts = (TaskState *)cpu->opaque;
6315 info->tid = gettid();
6316 task_settid(ts);
6317 if (info->child_tidptr)
6318 put_user_u32(info->tid, info->child_tidptr);
6319 if (info->parent_tidptr)
6320 put_user_u32(info->tid, info->parent_tidptr);
6321 /* Enable signals. */
6322 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6323 /* Signal to the parent that we're ready. */
6324 pthread_mutex_lock(&info->mutex);
6325 pthread_cond_broadcast(&info->cond);
6326 pthread_mutex_unlock(&info->mutex);
6327 /* Wait until the parent has finished initializing the tls state. */
6328 pthread_mutex_lock(&clone_lock);
6329 pthread_mutex_unlock(&clone_lock);
6330 cpu_loop(env);
6331 /* never exits */
6332 return NULL;
6333 }
6334
6335 /* do_fork() Must return host values and target errnos (unlike most
6336 do_*() functions). */
6337 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6338 abi_ulong parent_tidptr, target_ulong newtls,
6339 abi_ulong child_tidptr)
6340 {
6341 CPUState *cpu = ENV_GET_CPU(env);
6342 int ret;
6343 TaskState *ts;
6344 CPUState *new_cpu;
6345 CPUArchState *new_env;
6346 sigset_t sigmask;
6347
6348 flags &= ~CLONE_IGNORED_FLAGS;
6349
6350 /* Emulate vfork() with fork() */
6351 if (flags & CLONE_VFORK)
6352 flags &= ~(CLONE_VFORK | CLONE_VM);
6353
6354 if (flags & CLONE_VM) {
6355 TaskState *parent_ts = (TaskState *)cpu->opaque;
6356 new_thread_info info;
6357 pthread_attr_t attr;
6358
6359 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6360 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6361 return -TARGET_EINVAL;
6362 }
6363
6364 ts = g_new0(TaskState, 1);
6365 init_task_state(ts);
6366
6367 /* Grab a mutex so that thread setup appears atomic. */
6368 pthread_mutex_lock(&clone_lock);
6369
6370 /* we create a new CPU instance. */
6371 new_env = cpu_copy(env);
6372 /* Init regs that differ from the parent. */
6373 cpu_clone_regs(new_env, newsp);
6374 new_cpu = ENV_GET_CPU(new_env);
6375 new_cpu->opaque = ts;
6376 ts->bprm = parent_ts->bprm;
6377 ts->info = parent_ts->info;
6378 ts->signal_mask = parent_ts->signal_mask;
6379
6380 if (flags & CLONE_CHILD_CLEARTID) {
6381 ts->child_tidptr = child_tidptr;
6382 }
6383
6384 if (flags & CLONE_SETTLS) {
6385 cpu_set_tls (new_env, newtls);
6386 }
6387
6388 memset(&info, 0, sizeof(info));
6389 pthread_mutex_init(&info.mutex, NULL);
6390 pthread_mutex_lock(&info.mutex);
6391 pthread_cond_init(&info.cond, NULL);
6392 info.env = new_env;
6393 if (flags & CLONE_CHILD_SETTID) {
6394 info.child_tidptr = child_tidptr;
6395 }
6396 if (flags & CLONE_PARENT_SETTID) {
6397 info.parent_tidptr = parent_tidptr;
6398 }
6399
6400 ret = pthread_attr_init(&attr);
6401 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6402 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6403 /* It is not safe to deliver signals until the child has finished
6404 initializing, so temporarily block all signals. */
6405 sigfillset(&sigmask);
6406 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6407
6408 /* If this is our first additional thread, we need to ensure we
6409 * generate code for parallel execution and flush old translations.
6410 */
6411 if (!parallel_cpus) {
6412 parallel_cpus = true;
6413 tb_flush(cpu);
6414 }
6415
6416 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6417 /* TODO: Free new CPU state if thread creation failed. */
6418
6419 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6420 pthread_attr_destroy(&attr);
6421 if (ret == 0) {
6422 /* Wait for the child to initialize. */
6423 pthread_cond_wait(&info.cond, &info.mutex);
6424 ret = info.tid;
6425 } else {
6426 ret = -1;
6427 }
6428 pthread_mutex_unlock(&info.mutex);
6429 pthread_cond_destroy(&info.cond);
6430 pthread_mutex_destroy(&info.mutex);
6431 pthread_mutex_unlock(&clone_lock);
6432 } else {
6433 /* if no CLONE_VM, we consider it is a fork */
6434 if (flags & CLONE_INVALID_FORK_FLAGS) {
6435 return -TARGET_EINVAL;
6436 }
6437
6438 /* We can't support custom termination signals */
6439 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6440 return -TARGET_EINVAL;
6441 }
6442
6443 if (block_signals()) {
6444 return -TARGET_ERESTARTSYS;
6445 }
6446
6447 fork_start();
6448 ret = fork();
6449 if (ret == 0) {
6450 /* Child Process. */
6451 cpu_clone_regs(env, newsp);
6452 fork_end(1);
6453 /* There is a race condition here. The parent process could
6454 theoretically read the TID in the child process before the child
6455 tid is set. This would require using either ptrace
6456 (not implemented) or having *_tidptr to point at a shared memory
6457 mapping. We can't repeat the spinlock hack used above because
6458 the child process gets its own copy of the lock. */
6459 if (flags & CLONE_CHILD_SETTID)
6460 put_user_u32(gettid(), child_tidptr);
6461 if (flags & CLONE_PARENT_SETTID)
6462 put_user_u32(gettid(), parent_tidptr);
6463 ts = (TaskState *)cpu->opaque;
6464 if (flags & CLONE_SETTLS)
6465 cpu_set_tls (env, newtls);
6466 if (flags & CLONE_CHILD_CLEARTID)
6467 ts->child_tidptr = child_tidptr;
6468 } else {
6469 fork_end(0);
6470 }
6471 }
6472 return ret;
6473 }
6474
6475 /* warning : doesn't handle linux specific flags... */
6476 static int target_to_host_fcntl_cmd(int cmd)
6477 {
6478 switch(cmd) {
6479 case TARGET_F_DUPFD:
6480 case TARGET_F_GETFD:
6481 case TARGET_F_SETFD:
6482 case TARGET_F_GETFL:
6483 case TARGET_F_SETFL:
6484 return cmd;
6485 case TARGET_F_GETLK:
6486 return F_GETLK64;
6487 case TARGET_F_SETLK:
6488 return F_SETLK64;
6489 case TARGET_F_SETLKW:
6490 return F_SETLKW64;
6491 case TARGET_F_GETOWN:
6492 return F_GETOWN;
6493 case TARGET_F_SETOWN:
6494 return F_SETOWN;
6495 case TARGET_F_GETSIG:
6496 return F_GETSIG;
6497 case TARGET_F_SETSIG:
6498 return F_SETSIG;
6499 #if TARGET_ABI_BITS == 32
6500 case TARGET_F_GETLK64:
6501 return F_GETLK64;
6502 case TARGET_F_SETLK64:
6503 return F_SETLK64;
6504 case TARGET_F_SETLKW64:
6505 return F_SETLKW64;
6506 #endif
6507 case TARGET_F_SETLEASE:
6508 return F_SETLEASE;
6509 case TARGET_F_GETLEASE:
6510 return F_GETLEASE;
6511 #ifdef F_DUPFD_CLOEXEC
6512 case TARGET_F_DUPFD_CLOEXEC:
6513 return F_DUPFD_CLOEXEC;
6514 #endif
6515 case TARGET_F_NOTIFY:
6516 return F_NOTIFY;
6517 #ifdef F_GETOWN_EX
6518 case TARGET_F_GETOWN_EX:
6519 return F_GETOWN_EX;
6520 #endif
6521 #ifdef F_SETOWN_EX
6522 case TARGET_F_SETOWN_EX:
6523 return F_SETOWN_EX;
6524 #endif
6525 #ifdef F_SETPIPE_SZ
6526 case TARGET_F_SETPIPE_SZ:
6527 return F_SETPIPE_SZ;
6528 case TARGET_F_GETPIPE_SZ:
6529 return F_GETPIPE_SZ;
6530 #endif
6531 default:
6532 return -TARGET_EINVAL;
6533 }
6534 return -TARGET_EINVAL;
6535 }
6536
6537 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6538 static const bitmask_transtbl flock_tbl[] = {
6539 TRANSTBL_CONVERT(F_RDLCK),
6540 TRANSTBL_CONVERT(F_WRLCK),
6541 TRANSTBL_CONVERT(F_UNLCK),
6542 TRANSTBL_CONVERT(F_EXLCK),
6543 TRANSTBL_CONVERT(F_SHLCK),
6544 { 0, 0, 0, 0 }
6545 };
6546
6547 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6548 abi_ulong target_flock_addr)
6549 {
6550 struct target_flock *target_fl;
6551 short l_type;
6552
6553 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6554 return -TARGET_EFAULT;
6555 }
6556
6557 __get_user(l_type, &target_fl->l_type);
6558 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6559 __get_user(fl->l_whence, &target_fl->l_whence);
6560 __get_user(fl->l_start, &target_fl->l_start);
6561 __get_user(fl->l_len, &target_fl->l_len);
6562 __get_user(fl->l_pid, &target_fl->l_pid);
6563 unlock_user_struct(target_fl, target_flock_addr, 0);
6564 return 0;
6565 }
6566
6567 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6568 const struct flock64 *fl)
6569 {
6570 struct target_flock *target_fl;
6571 short l_type;
6572
6573 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6574 return -TARGET_EFAULT;
6575 }
6576
6577 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6578 __put_user(l_type, &target_fl->l_type);
6579 __put_user(fl->l_whence, &target_fl->l_whence);
6580 __put_user(fl->l_start, &target_fl->l_start);
6581 __put_user(fl->l_len, &target_fl->l_len);
6582 __put_user(fl->l_pid, &target_fl->l_pid);
6583 unlock_user_struct(target_fl, target_flock_addr, 1);
6584 return 0;
6585 }
6586
6587 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6588 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6589
6590 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6591 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6592 abi_ulong target_flock_addr)
6593 {
6594 struct target_eabi_flock64 *target_fl;
6595 short l_type;
6596
6597 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6598 return -TARGET_EFAULT;
6599 }
6600
6601 __get_user(l_type, &target_fl->l_type);
6602 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6603 __get_user(fl->l_whence, &target_fl->l_whence);
6604 __get_user(fl->l_start, &target_fl->l_start);
6605 __get_user(fl->l_len, &target_fl->l_len);
6606 __get_user(fl->l_pid, &target_fl->l_pid);
6607 unlock_user_struct(target_fl, target_flock_addr, 0);
6608 return 0;
6609 }
6610
6611 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6612 const struct flock64 *fl)
6613 {
6614 struct target_eabi_flock64 *target_fl;
6615 short l_type;
6616
6617 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6618 return -TARGET_EFAULT;
6619 }
6620
6621 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6622 __put_user(l_type, &target_fl->l_type);
6623 __put_user(fl->l_whence, &target_fl->l_whence);
6624 __put_user(fl->l_start, &target_fl->l_start);
6625 __put_user(fl->l_len, &target_fl->l_len);
6626 __put_user(fl->l_pid, &target_fl->l_pid);
6627 unlock_user_struct(target_fl, target_flock_addr, 1);
6628 return 0;
6629 }
6630 #endif
6631
6632 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6633 abi_ulong target_flock_addr)
6634 {
6635 struct target_flock64 *target_fl;
6636 short l_type;
6637
6638 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6639 return -TARGET_EFAULT;
6640 }
6641
6642 __get_user(l_type, &target_fl->l_type);
6643 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6644 __get_user(fl->l_whence, &target_fl->l_whence);
6645 __get_user(fl->l_start, &target_fl->l_start);
6646 __get_user(fl->l_len, &target_fl->l_len);
6647 __get_user(fl->l_pid, &target_fl->l_pid);
6648 unlock_user_struct(target_fl, target_flock_addr, 0);
6649 return 0;
6650 }
6651
6652 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6653 const struct flock64 *fl)
6654 {
6655 struct target_flock64 *target_fl;
6656 short l_type;
6657
6658 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6659 return -TARGET_EFAULT;
6660 }
6661
6662 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6663 __put_user(l_type, &target_fl->l_type);
6664 __put_user(fl->l_whence, &target_fl->l_whence);
6665 __put_user(fl->l_start, &target_fl->l_start);
6666 __put_user(fl->l_len, &target_fl->l_len);
6667 __put_user(fl->l_pid, &target_fl->l_pid);
6668 unlock_user_struct(target_fl, target_flock_addr, 1);
6669 return 0;
6670 }
6671
6672 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6673 {
6674 struct flock64 fl64;
6675 #ifdef F_GETOWN_EX
6676 struct f_owner_ex fox;
6677 struct target_f_owner_ex *target_fox;
6678 #endif
6679 abi_long ret;
6680 int host_cmd = target_to_host_fcntl_cmd(cmd);
6681
6682 if (host_cmd == -TARGET_EINVAL)
6683 return host_cmd;
6684
6685 switch(cmd) {
6686 case TARGET_F_GETLK:
6687 ret = copy_from_user_flock(&fl64, arg);
6688 if (ret) {
6689 return ret;
6690 }
6691 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6692 if (ret == 0) {
6693 ret = copy_to_user_flock(arg, &fl64);
6694 }
6695 break;
6696
6697 case TARGET_F_SETLK:
6698 case TARGET_F_SETLKW:
6699 ret = copy_from_user_flock(&fl64, arg);
6700 if (ret) {
6701 return ret;
6702 }
6703 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6704 break;
6705
6706 case TARGET_F_GETLK64:
6707 ret = copy_from_user_flock64(&fl64, arg);
6708 if (ret) {
6709 return ret;
6710 }
6711 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6712 if (ret == 0) {
6713 ret = copy_to_user_flock64(arg, &fl64);
6714 }
6715 break;
6716 case TARGET_F_SETLK64:
6717 case TARGET_F_SETLKW64:
6718 ret = copy_from_user_flock64(&fl64, arg);
6719 if (ret) {
6720 return ret;
6721 }
6722 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6723 break;
6724
6725 case TARGET_F_GETFL:
6726 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6727 if (ret >= 0) {
6728 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6729 }
6730 break;
6731
6732 case TARGET_F_SETFL:
6733 ret = get_errno(safe_fcntl(fd, host_cmd,
6734 target_to_host_bitmask(arg,
6735 fcntl_flags_tbl)));
6736 break;
6737
6738 #ifdef F_GETOWN_EX
6739 case TARGET_F_GETOWN_EX:
6740 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6741 if (ret >= 0) {
6742 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6743 return -TARGET_EFAULT;
6744 target_fox->type = tswap32(fox.type);
6745 target_fox->pid = tswap32(fox.pid);
6746 unlock_user_struct(target_fox, arg, 1);
6747 }
6748 break;
6749 #endif
6750
6751 #ifdef F_SETOWN_EX
6752 case TARGET_F_SETOWN_EX:
6753 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6754 return -TARGET_EFAULT;
6755 fox.type = tswap32(target_fox->type);
6756 fox.pid = tswap32(target_fox->pid);
6757 unlock_user_struct(target_fox, arg, 0);
6758 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6759 break;
6760 #endif
6761
6762 case TARGET_F_SETOWN:
6763 case TARGET_F_GETOWN:
6764 case TARGET_F_SETSIG:
6765 case TARGET_F_GETSIG:
6766 case TARGET_F_SETLEASE:
6767 case TARGET_F_GETLEASE:
6768 case TARGET_F_SETPIPE_SZ:
6769 case TARGET_F_GETPIPE_SZ:
6770 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6771 break;
6772
6773 default:
6774 ret = get_errno(safe_fcntl(fd, cmd, arg));
6775 break;
6776 }
6777 return ret;
6778 }
6779
6780 #ifdef USE_UID16
6781
6782 static inline int high2lowuid(int uid)
6783 {
6784 if (uid > 65535)
6785 return 65534;
6786 else
6787 return uid;
6788 }
6789
6790 static inline int high2lowgid(int gid)
6791 {
6792 if (gid > 65535)
6793 return 65534;
6794 else
6795 return gid;
6796 }
6797
6798 static inline int low2highuid(int uid)
6799 {
6800 if ((int16_t)uid == -1)
6801 return -1;
6802 else
6803 return uid;
6804 }
6805
6806 static inline int low2highgid(int gid)
6807 {
6808 if ((int16_t)gid == -1)
6809 return -1;
6810 else
6811 return gid;
6812 }
6813 static inline int tswapid(int id)
6814 {
6815 return tswap16(id);
6816 }
6817
6818 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6819
6820 #else /* !USE_UID16 */
6821 static inline int high2lowuid(int uid)
6822 {
6823 return uid;
6824 }
6825 static inline int high2lowgid(int gid)
6826 {
6827 return gid;
6828 }
6829 static inline int low2highuid(int uid)
6830 {
6831 return uid;
6832 }
6833 static inline int low2highgid(int gid)
6834 {
6835 return gid;
6836 }
6837 static inline int tswapid(int id)
6838 {
6839 return tswap32(id);
6840 }
6841
6842 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6843
6844 #endif /* USE_UID16 */
6845
6846 /* We must do direct syscalls for setting UID/GID, because we want to
6847 * implement the Linux system call semantics of "change only for this thread",
6848 * not the libc/POSIX semantics of "change for all threads in process".
6849 * (See http://ewontfix.com/17/ for more details.)
6850 * We use the 32-bit version of the syscalls if present; if it is not
6851 * then either the host architecture supports 32-bit UIDs natively with
6852 * the standard syscall, or the 16-bit UID is the best we can do.
6853 */
6854 #ifdef __NR_setuid32
6855 #define __NR_sys_setuid __NR_setuid32
6856 #else
6857 #define __NR_sys_setuid __NR_setuid
6858 #endif
6859 #ifdef __NR_setgid32
6860 #define __NR_sys_setgid __NR_setgid32
6861 #else
6862 #define __NR_sys_setgid __NR_setgid
6863 #endif
6864 #ifdef __NR_setresuid32
6865 #define __NR_sys_setresuid __NR_setresuid32
6866 #else
6867 #define __NR_sys_setresuid __NR_setresuid
6868 #endif
6869 #ifdef __NR_setresgid32
6870 #define __NR_sys_setresgid __NR_setresgid32
6871 #else
6872 #define __NR_sys_setresgid __NR_setresgid
6873 #endif
6874
6875 _syscall1(int, sys_setuid, uid_t, uid)
6876 _syscall1(int, sys_setgid, gid_t, gid)
6877 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6878 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6879
6880 void syscall_init(void)
6881 {
6882 IOCTLEntry *ie;
6883 const argtype *arg_type;
6884 int size;
6885 int i;
6886
6887 thunk_init(STRUCT_MAX);
6888
6889 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6890 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6891 #include "syscall_types.h"
6892 #undef STRUCT
6893 #undef STRUCT_SPECIAL
6894
6895 /* Build target_to_host_errno_table[] table from
6896 * host_to_target_errno_table[]. */
6897 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6898 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6899 }
6900
6901 /* we patch the ioctl size if necessary. We rely on the fact that
6902 no ioctl has all the bits at '1' in the size field */
6903 ie = ioctl_entries;
6904 while (ie->target_cmd != 0) {
6905 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6906 TARGET_IOC_SIZEMASK) {
6907 arg_type = ie->arg_type;
6908 if (arg_type[0] != TYPE_PTR) {
6909 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6910 ie->target_cmd);
6911 exit(1);
6912 }
6913 arg_type++;
6914 size = thunk_type_size(arg_type, 0);
6915 ie->target_cmd = (ie->target_cmd &
6916 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6917 (size << TARGET_IOC_SIZESHIFT);
6918 }
6919
6920 /* automatic consistency check if same arch */
6921 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6922 (defined(__x86_64__) && defined(TARGET_X86_64))
6923 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6924 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6925 ie->name, ie->target_cmd, ie->host_cmd);
6926 }
6927 #endif
6928 ie++;
6929 }
6930 }
6931
6932 #if TARGET_ABI_BITS == 32
6933 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6934 {
6935 #ifdef TARGET_WORDS_BIGENDIAN
6936 return ((uint64_t)word0 << 32) | word1;
6937 #else
6938 return ((uint64_t)word1 << 32) | word0;
6939 #endif
6940 }
6941 #else /* TARGET_ABI_BITS == 32 */
6942 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6943 {
6944 return word0;
6945 }
6946 #endif /* TARGET_ABI_BITS != 32 */
6947
6948 #ifdef TARGET_NR_truncate64
6949 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6950 abi_long arg2,
6951 abi_long arg3,
6952 abi_long arg4)
6953 {
6954 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6955 arg2 = arg3;
6956 arg3 = arg4;
6957 }
6958 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6959 }
6960 #endif
6961
6962 #ifdef TARGET_NR_ftruncate64
6963 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6964 abi_long arg2,
6965 abi_long arg3,
6966 abi_long arg4)
6967 {
6968 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6969 arg2 = arg3;
6970 arg3 = arg4;
6971 }
6972 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6973 }
6974 #endif
6975
6976 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6977 abi_ulong target_addr)
6978 {
6979 struct target_timespec *target_ts;
6980
6981 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6982 return -TARGET_EFAULT;
6983 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6984 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6985 unlock_user_struct(target_ts, target_addr, 0);
6986 return 0;
6987 }
6988
6989 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6990 struct timespec *host_ts)
6991 {
6992 struct target_timespec *target_ts;
6993
6994 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6995 return -TARGET_EFAULT;
6996 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6997 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6998 unlock_user_struct(target_ts, target_addr, 1);
6999 return 0;
7000 }
7001
7002 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
7003 abi_ulong target_addr)
7004 {
7005 struct target_itimerspec *target_itspec;
7006
7007 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
7008 return -TARGET_EFAULT;
7009 }
7010
7011 host_itspec->it_interval.tv_sec =
7012 tswapal(target_itspec->it_interval.tv_sec);
7013 host_itspec->it_interval.tv_nsec =
7014 tswapal(target_itspec->it_interval.tv_nsec);
7015 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
7016 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
7017
7018 unlock_user_struct(target_itspec, target_addr, 1);
7019 return 0;
7020 }
7021
7022 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7023 struct itimerspec *host_its)
7024 {
7025 struct target_itimerspec *target_itspec;
7026
7027 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
7028 return -TARGET_EFAULT;
7029 }
7030
7031 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
7032 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
7033
7034 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
7035 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
7036
7037 unlock_user_struct(target_itspec, target_addr, 0);
7038 return 0;
7039 }
7040
7041 static inline abi_long target_to_host_timex(struct timex *host_tx,
7042 abi_long target_addr)
7043 {
7044 struct target_timex *target_tx;
7045
7046 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7047 return -TARGET_EFAULT;
7048 }
7049
7050 __get_user(host_tx->modes, &target_tx->modes);
7051 __get_user(host_tx->offset, &target_tx->offset);
7052 __get_user(host_tx->freq, &target_tx->freq);
7053 __get_user(host_tx->maxerror, &target_tx->maxerror);
7054 __get_user(host_tx->esterror, &target_tx->esterror);
7055 __get_user(host_tx->status, &target_tx->status);
7056 __get_user(host_tx->constant, &target_tx->constant);
7057 __get_user(host_tx->precision, &target_tx->precision);
7058 __get_user(host_tx->tolerance, &target_tx->tolerance);
7059 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7060 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7061 __get_user(host_tx->tick, &target_tx->tick);
7062 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7063 __get_user(host_tx->jitter, &target_tx->jitter);
7064 __get_user(host_tx->shift, &target_tx->shift);
7065 __get_user(host_tx->stabil, &target_tx->stabil);
7066 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7067 __get_user(host_tx->calcnt, &target_tx->calcnt);
7068 __get_user(host_tx->errcnt, &target_tx->errcnt);
7069 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7070 __get_user(host_tx->tai, &target_tx->tai);
7071
7072 unlock_user_struct(target_tx, target_addr, 0);
7073 return 0;
7074 }
7075
7076 static inline abi_long host_to_target_timex(abi_long target_addr,
7077 struct timex *host_tx)
7078 {
7079 struct target_timex *target_tx;
7080
7081 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7082 return -TARGET_EFAULT;
7083 }
7084
7085 __put_user(host_tx->modes, &target_tx->modes);
7086 __put_user(host_tx->offset, &target_tx->offset);
7087 __put_user(host_tx->freq, &target_tx->freq);
7088 __put_user(host_tx->maxerror, &target_tx->maxerror);
7089 __put_user(host_tx->esterror, &target_tx->esterror);
7090 __put_user(host_tx->status, &target_tx->status);
7091 __put_user(host_tx->constant, &target_tx->constant);
7092 __put_user(host_tx->precision, &target_tx->precision);
7093 __put_user(host_tx->tolerance, &target_tx->tolerance);
7094 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7095 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7096 __put_user(host_tx->tick, &target_tx->tick);
7097 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7098 __put_user(host_tx->jitter, &target_tx->jitter);
7099 __put_user(host_tx->shift, &target_tx->shift);
7100 __put_user(host_tx->stabil, &target_tx->stabil);
7101 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7102 __put_user(host_tx->calcnt, &target_tx->calcnt);
7103 __put_user(host_tx->errcnt, &target_tx->errcnt);
7104 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7105 __put_user(host_tx->tai, &target_tx->tai);
7106
7107 unlock_user_struct(target_tx, target_addr, 1);
7108 return 0;
7109 }
7110
7111
7112 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7113 abi_ulong target_addr)
7114 {
7115 struct target_sigevent *target_sevp;
7116
7117 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7118 return -TARGET_EFAULT;
7119 }
7120
7121 /* This union is awkward on 64 bit systems because it has a 32 bit
7122 * integer and a pointer in it; we follow the conversion approach
7123 * used for handling sigval types in signal.c so the guest should get
7124 * the correct value back even if we did a 64 bit byteswap and it's
7125 * using the 32 bit integer.
7126 */
7127 host_sevp->sigev_value.sival_ptr =
7128 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7129 host_sevp->sigev_signo =
7130 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7131 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7132 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7133
7134 unlock_user_struct(target_sevp, target_addr, 1);
7135 return 0;
7136 }
7137
7138 #if defined(TARGET_NR_mlockall)
7139 static inline int target_to_host_mlockall_arg(int arg)
7140 {
7141 int result = 0;
7142
7143 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7144 result |= MCL_CURRENT;
7145 }
7146 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7147 result |= MCL_FUTURE;
7148 }
7149 return result;
7150 }
7151 #endif
7152
7153 static inline abi_long host_to_target_stat64(void *cpu_env,
7154 abi_ulong target_addr,
7155 struct stat *host_st)
7156 {
7157 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7158 if (((CPUARMState *)cpu_env)->eabi) {
7159 struct target_eabi_stat64 *target_st;
7160
7161 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7162 return -TARGET_EFAULT;
7163 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7164 __put_user(host_st->st_dev, &target_st->st_dev);
7165 __put_user(host_st->st_ino, &target_st->st_ino);
7166 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7167 __put_user(host_st->st_ino, &target_st->__st_ino);
7168 #endif
7169 __put_user(host_st->st_mode, &target_st->st_mode);
7170 __put_user(host_st->st_nlink, &target_st->st_nlink);
7171 __put_user(host_st->st_uid, &target_st->st_uid);
7172 __put_user(host_st->st_gid, &target_st->st_gid);
7173 __put_user(host_st->st_rdev, &target_st->st_rdev);
7174 __put_user(host_st->st_size, &target_st->st_size);
7175 __put_user(host_st->st_blksize, &target_st->st_blksize);
7176 __put_user(host_st->st_blocks, &target_st->st_blocks);
7177 __put_user(host_st->st_atime, &target_st->target_st_atime);
7178 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7179 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7180 unlock_user_struct(target_st, target_addr, 1);
7181 } else
7182 #endif
7183 {
7184 #if defined(TARGET_HAS_STRUCT_STAT64)
7185 struct target_stat64 *target_st;
7186 #else
7187 struct target_stat *target_st;
7188 #endif
7189
7190 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7191 return -TARGET_EFAULT;
7192 memset(target_st, 0, sizeof(*target_st));
7193 __put_user(host_st->st_dev, &target_st->st_dev);
7194 __put_user(host_st->st_ino, &target_st->st_ino);
7195 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7196 __put_user(host_st->st_ino, &target_st->__st_ino);
7197 #endif
7198 __put_user(host_st->st_mode, &target_st->st_mode);
7199 __put_user(host_st->st_nlink, &target_st->st_nlink);
7200 __put_user(host_st->st_uid, &target_st->st_uid);
7201 __put_user(host_st->st_gid, &target_st->st_gid);
7202 __put_user(host_st->st_rdev, &target_st->st_rdev);
7203 /* XXX: better use of kernel struct */
7204 __put_user(host_st->st_size, &target_st->st_size);
7205 __put_user(host_st->st_blksize, &target_st->st_blksize);
7206 __put_user(host_st->st_blocks, &target_st->st_blocks);
7207 __put_user(host_st->st_atime, &target_st->target_st_atime);
7208 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7209 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7210 unlock_user_struct(target_st, target_addr, 1);
7211 }
7212
7213 return 0;
7214 }
7215
7216 /* ??? Using host futex calls even when target atomic operations
7217 are not really atomic probably breaks things. However implementing
7218 futexes locally would make futexes shared between multiple processes
7219 tricky. However they're probably useless because guest atomic
7220 operations won't work either. */
7221 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7222 target_ulong uaddr2, int val3)
7223 {
7224 struct timespec ts, *pts;
7225 int base_op;
7226
7227 /* ??? We assume FUTEX_* constants are the same on both host
7228 and target. */
7229 #ifdef FUTEX_CMD_MASK
7230 base_op = op & FUTEX_CMD_MASK;
7231 #else
7232 base_op = op;
7233 #endif
7234 switch (base_op) {
7235 case FUTEX_WAIT:
7236 case FUTEX_WAIT_BITSET:
7237 if (timeout) {
7238 pts = &ts;
7239 target_to_host_timespec(pts, timeout);
7240 } else {
7241 pts = NULL;
7242 }
7243 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7244 pts, NULL, val3));
7245 case FUTEX_WAKE:
7246 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7247 case FUTEX_FD:
7248 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7249 case FUTEX_REQUEUE:
7250 case FUTEX_CMP_REQUEUE:
7251 case FUTEX_WAKE_OP:
7252 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7253 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7254 But the prototype takes a `struct timespec *'; insert casts
7255 to satisfy the compiler. We do not need to tswap TIMEOUT
7256 since it's not compared to guest memory. */
7257 pts = (struct timespec *)(uintptr_t) timeout;
7258 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7259 g2h(uaddr2),
7260 (base_op == FUTEX_CMP_REQUEUE
7261 ? tswap32(val3)
7262 : val3)));
7263 default:
7264 return -TARGET_ENOSYS;
7265 }
7266 }
7267 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7268 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7269 abi_long handle, abi_long mount_id,
7270 abi_long flags)
7271 {
7272 struct file_handle *target_fh;
7273 struct file_handle *fh;
7274 int mid = 0;
7275 abi_long ret;
7276 char *name;
7277 unsigned int size, total_size;
7278
7279 if (get_user_s32(size, handle)) {
7280 return -TARGET_EFAULT;
7281 }
7282
7283 name = lock_user_string(pathname);
7284 if (!name) {
7285 return -TARGET_EFAULT;
7286 }
7287
7288 total_size = sizeof(struct file_handle) + size;
7289 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7290 if (!target_fh) {
7291 unlock_user(name, pathname, 0);
7292 return -TARGET_EFAULT;
7293 }
7294
7295 fh = g_malloc0(total_size);
7296 fh->handle_bytes = size;
7297
7298 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7299 unlock_user(name, pathname, 0);
7300
7301 /* man name_to_handle_at(2):
7302 * Other than the use of the handle_bytes field, the caller should treat
7303 * the file_handle structure as an opaque data type
7304 */
7305
7306 memcpy(target_fh, fh, total_size);
7307 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7308 target_fh->handle_type = tswap32(fh->handle_type);
7309 g_free(fh);
7310 unlock_user(target_fh, handle, total_size);
7311
7312 if (put_user_s32(mid, mount_id)) {
7313 return -TARGET_EFAULT;
7314 }
7315
7316 return ret;
7317
7318 }
7319 #endif
7320
7321 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7322 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7323 abi_long flags)
7324 {
7325 struct file_handle *target_fh;
7326 struct file_handle *fh;
7327 unsigned int size, total_size;
7328 abi_long ret;
7329
7330 if (get_user_s32(size, handle)) {
7331 return -TARGET_EFAULT;
7332 }
7333
7334 total_size = sizeof(struct file_handle) + size;
7335 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7336 if (!target_fh) {
7337 return -TARGET_EFAULT;
7338 }
7339
7340 fh = g_memdup(target_fh, total_size);
7341 fh->handle_bytes = size;
7342 fh->handle_type = tswap32(target_fh->handle_type);
7343
7344 ret = get_errno(open_by_handle_at(mount_fd, fh,
7345 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7346
7347 g_free(fh);
7348
7349 unlock_user(target_fh, handle, total_size);
7350
7351 return ret;
7352 }
7353 #endif
7354
7355 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7356
7357 /* signalfd siginfo conversion */
7358
7359 static void
7360 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7361 const struct signalfd_siginfo *info)
7362 {
7363 int sig = host_to_target_signal(info->ssi_signo);
7364
7365 /* linux/signalfd.h defines a ssi_addr_lsb
7366 * not defined in sys/signalfd.h but used by some kernels
7367 */
7368
7369 #ifdef BUS_MCEERR_AO
7370 if (tinfo->ssi_signo == SIGBUS &&
7371 (tinfo->ssi_code == BUS_MCEERR_AR ||
7372 tinfo->ssi_code == BUS_MCEERR_AO)) {
7373 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7374 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7375 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7376 }
7377 #endif
7378
7379 tinfo->ssi_signo = tswap32(sig);
7380 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7381 tinfo->ssi_code = tswap32(info->ssi_code);
7382 tinfo->ssi_pid = tswap32(info->ssi_pid);
7383 tinfo->ssi_uid = tswap32(info->ssi_uid);
7384 tinfo->ssi_fd = tswap32(info->ssi_fd);
7385 tinfo->ssi_tid = tswap32(info->ssi_tid);
7386 tinfo->ssi_band = tswap32(info->ssi_band);
7387 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7388 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7389 tinfo->ssi_status = tswap32(info->ssi_status);
7390 tinfo->ssi_int = tswap32(info->ssi_int);
7391 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7392 tinfo->ssi_utime = tswap64(info->ssi_utime);
7393 tinfo->ssi_stime = tswap64(info->ssi_stime);
7394 tinfo->ssi_addr = tswap64(info->ssi_addr);
7395 }
7396
7397 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7398 {
7399 int i;
7400
7401 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7402 host_to_target_signalfd_siginfo(buf + i, buf + i);
7403 }
7404
7405 return len;
7406 }
7407
7408 static TargetFdTrans target_signalfd_trans = {
7409 .host_to_target_data = host_to_target_data_signalfd,
7410 };
7411
7412 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7413 {
7414 int host_flags;
7415 target_sigset_t *target_mask;
7416 sigset_t host_mask;
7417 abi_long ret;
7418
7419 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7420 return -TARGET_EINVAL;
7421 }
7422 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7423 return -TARGET_EFAULT;
7424 }
7425
7426 target_to_host_sigset(&host_mask, target_mask);
7427
7428 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7429
7430 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7431 if (ret >= 0) {
7432 fd_trans_register(ret, &target_signalfd_trans);
7433 }
7434
7435 unlock_user_struct(target_mask, mask, 0);
7436
7437 return ret;
7438 }
7439 #endif
7440
7441 /* Map host to target signal numbers for the wait family of syscalls.
7442 Assume all other status bits are the same. */
7443 int host_to_target_waitstatus(int status)
7444 {
7445 if (WIFSIGNALED(status)) {
7446 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7447 }
7448 if (WIFSTOPPED(status)) {
7449 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7450 | (status & 0xff);
7451 }
7452 return status;
7453 }
7454
7455 static int open_self_cmdline(void *cpu_env, int fd)
7456 {
7457 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7458 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7459 int i;
7460
7461 for (i = 0; i < bprm->argc; i++) {
7462 size_t len = strlen(bprm->argv[i]) + 1;
7463
7464 if (write(fd, bprm->argv[i], len) != len) {
7465 return -1;
7466 }
7467 }
7468
7469 return 0;
7470 }
7471
7472 static int open_self_maps(void *cpu_env, int fd)
7473 {
7474 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7475 TaskState *ts = cpu->opaque;
7476 FILE *fp;
7477 char *line = NULL;
7478 size_t len = 0;
7479 ssize_t read;
7480
7481 fp = fopen("/proc/self/maps", "r");
7482 if (fp == NULL) {
7483 return -1;
7484 }
7485
7486 while ((read = getline(&line, &len, fp)) != -1) {
7487 int fields, dev_maj, dev_min, inode;
7488 uint64_t min, max, offset;
7489 char flag_r, flag_w, flag_x, flag_p;
7490 char path[512] = "";
7491 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7492 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7493 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7494
7495 if ((fields < 10) || (fields > 11)) {
7496 continue;
7497 }
7498 if (h2g_valid(min)) {
7499 int flags = page_get_flags(h2g(min));
7500 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7501 if (page_check_range(h2g(min), max - min, flags) == -1) {
7502 continue;
7503 }
7504 if (h2g(min) == ts->info->stack_limit) {
7505 pstrcpy(path, sizeof(path), " [stack]");
7506 }
7507 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7508 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7509 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7510 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7511 path[0] ? " " : "", path);
7512 }
7513 }
7514
7515 free(line);
7516 fclose(fp);
7517
7518 return 0;
7519 }
7520
7521 static int open_self_stat(void *cpu_env, int fd)
7522 {
7523 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7524 TaskState *ts = cpu->opaque;
7525 abi_ulong start_stack = ts->info->start_stack;
7526 int i;
7527
7528 for (i = 0; i < 44; i++) {
7529 char buf[128];
7530 int len;
7531 uint64_t val = 0;
7532
7533 if (i == 0) {
7534 /* pid */
7535 val = getpid();
7536 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7537 } else if (i == 1) {
7538 /* app name */
7539 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7540 } else if (i == 27) {
7541 /* stack bottom */
7542 val = start_stack;
7543 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7544 } else {
7545 /* for the rest, there is MasterCard */
7546 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7547 }
7548
7549 len = strlen(buf);
7550 if (write(fd, buf, len) != len) {
7551 return -1;
7552 }
7553 }
7554
7555 return 0;
7556 }
7557
7558 static int open_self_auxv(void *cpu_env, int fd)
7559 {
7560 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7561 TaskState *ts = cpu->opaque;
7562 abi_ulong auxv = ts->info->saved_auxv;
7563 abi_ulong len = ts->info->auxv_len;
7564 char *ptr;
7565
7566 /*
7567 * Auxiliary vector is stored in target process stack.
7568 * read in whole auxv vector and copy it to file
7569 */
7570 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7571 if (ptr != NULL) {
7572 while (len > 0) {
7573 ssize_t r;
7574 r = write(fd, ptr, len);
7575 if (r <= 0) {
7576 break;
7577 }
7578 len -= r;
7579 ptr += r;
7580 }
7581 lseek(fd, 0, SEEK_SET);
7582 unlock_user(ptr, auxv, len);
7583 }
7584
7585 return 0;
7586 }
7587
7588 static int is_proc_myself(const char *filename, const char *entry)
7589 {
7590 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7591 filename += strlen("/proc/");
7592 if (!strncmp(filename, "self/", strlen("self/"))) {
7593 filename += strlen("self/");
7594 } else if (*filename >= '1' && *filename <= '9') {
7595 char myself[80];
7596 snprintf(myself, sizeof(myself), "%d/", getpid());
7597 if (!strncmp(filename, myself, strlen(myself))) {
7598 filename += strlen(myself);
7599 } else {
7600 return 0;
7601 }
7602 } else {
7603 return 0;
7604 }
7605 if (!strcmp(filename, entry)) {
7606 return 1;
7607 }
7608 }
7609 return 0;
7610 }
7611
7612 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7613 static int is_proc(const char *filename, const char *entry)
7614 {
7615 return strcmp(filename, entry) == 0;
7616 }
7617
7618 static int open_net_route(void *cpu_env, int fd)
7619 {
7620 FILE *fp;
7621 char *line = NULL;
7622 size_t len = 0;
7623 ssize_t read;
7624
7625 fp = fopen("/proc/net/route", "r");
7626 if (fp == NULL) {
7627 return -1;
7628 }
7629
7630 /* read header */
7631
7632 read = getline(&line, &len, fp);
7633 dprintf(fd, "%s", line);
7634
7635 /* read routes */
7636
7637 while ((read = getline(&line, &len, fp)) != -1) {
7638 char iface[16];
7639 uint32_t dest, gw, mask;
7640 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7641 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7642 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7643 &mask, &mtu, &window, &irtt);
7644 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7645 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7646 metric, tswap32(mask), mtu, window, irtt);
7647 }
7648
7649 free(line);
7650 fclose(fp);
7651
7652 return 0;
7653 }
7654 #endif
7655
7656 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7657 {
7658 struct fake_open {
7659 const char *filename;
7660 int (*fill)(void *cpu_env, int fd);
7661 int (*cmp)(const char *s1, const char *s2);
7662 };
7663 const struct fake_open *fake_open;
7664 static const struct fake_open fakes[] = {
7665 { "maps", open_self_maps, is_proc_myself },
7666 { "stat", open_self_stat, is_proc_myself },
7667 { "auxv", open_self_auxv, is_proc_myself },
7668 { "cmdline", open_self_cmdline, is_proc_myself },
7669 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7670 { "/proc/net/route", open_net_route, is_proc },
7671 #endif
7672 { NULL, NULL, NULL }
7673 };
7674
7675 if (is_proc_myself(pathname, "exe")) {
7676 int execfd = qemu_getauxval(AT_EXECFD);
7677 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7678 }
7679
7680 for (fake_open = fakes; fake_open->filename; fake_open++) {
7681 if (fake_open->cmp(pathname, fake_open->filename)) {
7682 break;
7683 }
7684 }
7685
7686 if (fake_open->filename) {
7687 const char *tmpdir;
7688 char filename[PATH_MAX];
7689 int fd, r;
7690
7691 /* create temporary file to map stat to */
7692 tmpdir = getenv("TMPDIR");
7693 if (!tmpdir)
7694 tmpdir = "/tmp";
7695 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7696 fd = mkstemp(filename);
7697 if (fd < 0) {
7698 return fd;
7699 }
7700 unlink(filename);
7701
7702 if ((r = fake_open->fill(cpu_env, fd))) {
7703 int e = errno;
7704 close(fd);
7705 errno = e;
7706 return r;
7707 }
7708 lseek(fd, 0, SEEK_SET);
7709
7710 return fd;
7711 }
7712
7713 return safe_openat(dirfd, path(pathname), flags, mode);
7714 }
7715
7716 #define TIMER_MAGIC 0x0caf0000
7717 #define TIMER_MAGIC_MASK 0xffff0000
7718
7719 /* Convert QEMU provided timer ID back to internal 16bit index format */
7720 static target_timer_t get_timer_id(abi_long arg)
7721 {
7722 target_timer_t timerid = arg;
7723
7724 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7725 return -TARGET_EINVAL;
7726 }
7727
7728 timerid &= 0xffff;
7729
7730 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7731 return -TARGET_EINVAL;
7732 }
7733
7734 return timerid;
7735 }
7736
7737 static abi_long swap_data_eventfd(void *buf, size_t len)
7738 {
7739 uint64_t *counter = buf;
7740 int i;
7741
7742 if (len < sizeof(uint64_t)) {
7743 return -EINVAL;
7744 }
7745
7746 for (i = 0; i < len; i += sizeof(uint64_t)) {
7747 *counter = tswap64(*counter);
7748 counter++;
7749 }
7750
7751 return len;
7752 }
7753
7754 static TargetFdTrans target_eventfd_trans = {
7755 .host_to_target_data = swap_data_eventfd,
7756 .target_to_host_data = swap_data_eventfd,
7757 };
7758
7759 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7760 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7761 defined(__NR_inotify_init1))
7762 static abi_long host_to_target_data_inotify(void *buf, size_t len)
7763 {
7764 struct inotify_event *ev;
7765 int i;
7766 uint32_t name_len;
7767
7768 for (i = 0; i < len; i += sizeof(struct inotify_event) + name_len) {
7769 ev = (struct inotify_event *)((char *)buf + i);
7770 name_len = ev->len;
7771
7772 ev->wd = tswap32(ev->wd);
7773 ev->mask = tswap32(ev->mask);
7774 ev->cookie = tswap32(ev->cookie);
7775 ev->len = tswap32(name_len);
7776 }
7777
7778 return len;
7779 }
7780
7781 static TargetFdTrans target_inotify_trans = {
7782 .host_to_target_data = host_to_target_data_inotify,
7783 };
7784 #endif
7785
7786 static int target_to_host_cpu_mask(unsigned long *host_mask,
7787 size_t host_size,
7788 abi_ulong target_addr,
7789 size_t target_size)
7790 {
7791 unsigned target_bits = sizeof(abi_ulong) * 8;
7792 unsigned host_bits = sizeof(*host_mask) * 8;
7793 abi_ulong *target_mask;
7794 unsigned i, j;
7795
7796 assert(host_size >= target_size);
7797
7798 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7799 if (!target_mask) {
7800 return -TARGET_EFAULT;
7801 }
7802 memset(host_mask, 0, host_size);
7803
7804 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7805 unsigned bit = i * target_bits;
7806 abi_ulong val;
7807
7808 __get_user(val, &target_mask[i]);
7809 for (j = 0; j < target_bits; j++, bit++) {
7810 if (val & (1UL << j)) {
7811 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7812 }
7813 }
7814 }
7815
7816 unlock_user(target_mask, target_addr, 0);
7817 return 0;
7818 }
7819
7820 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7821 size_t host_size,
7822 abi_ulong target_addr,
7823 size_t target_size)
7824 {
7825 unsigned target_bits = sizeof(abi_ulong) * 8;
7826 unsigned host_bits = sizeof(*host_mask) * 8;
7827 abi_ulong *target_mask;
7828 unsigned i, j;
7829
7830 assert(host_size >= target_size);
7831
7832 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7833 if (!target_mask) {
7834 return -TARGET_EFAULT;
7835 }
7836
7837 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7838 unsigned bit = i * target_bits;
7839 abi_ulong val = 0;
7840
7841 for (j = 0; j < target_bits; j++, bit++) {
7842 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7843 val |= 1UL << j;
7844 }
7845 }
7846 __put_user(val, &target_mask[i]);
7847 }
7848
7849 unlock_user(target_mask, target_addr, target_size);
7850 return 0;
7851 }
7852
7853 /* do_syscall() should always have a single exit point at the end so
7854 that actions, such as logging of syscall results, can be performed.
7855 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7856 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7857 abi_long arg2, abi_long arg3, abi_long arg4,
7858 abi_long arg5, abi_long arg6, abi_long arg7,
7859 abi_long arg8)
7860 {
7861 CPUState *cpu = ENV_GET_CPU(cpu_env);
7862 abi_long ret;
7863 struct stat st;
7864 struct statfs stfs;
7865 void *p;
7866
7867 #if defined(DEBUG_ERESTARTSYS)
7868 /* Debug-only code for exercising the syscall-restart code paths
7869 * in the per-architecture cpu main loops: restart every syscall
7870 * the guest makes once before letting it through.
7871 */
7872 {
7873 static int flag;
7874
7875 flag = !flag;
7876 if (flag) {
7877 return -TARGET_ERESTARTSYS;
7878 }
7879 }
7880 #endif
7881
7882 #ifdef DEBUG
7883 gemu_log("syscall %d", num);
7884 #endif
7885 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7886 if(do_strace)
7887 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7888
7889 switch(num) {
7890 case TARGET_NR_exit:
7891 /* In old applications this may be used to implement _exit(2).
7892 However in threaded applictions it is used for thread termination,
7893 and _exit_group is used for application termination.
7894 Do thread termination if we have more then one thread. */
7895
7896 if (block_signals()) {
7897 ret = -TARGET_ERESTARTSYS;
7898 break;
7899 }
7900
7901 cpu_list_lock();
7902
7903 if (CPU_NEXT(first_cpu)) {
7904 TaskState *ts;
7905
7906 /* Remove the CPU from the list. */
7907 QTAILQ_REMOVE(&cpus, cpu, node);
7908
7909 cpu_list_unlock();
7910
7911 ts = cpu->opaque;
7912 if (ts->child_tidptr) {
7913 put_user_u32(0, ts->child_tidptr);
7914 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7915 NULL, NULL, 0);
7916 }
7917 thread_cpu = NULL;
7918 object_unref(OBJECT(cpu));
7919 g_free(ts);
7920 rcu_unregister_thread();
7921 pthread_exit(NULL);
7922 }
7923
7924 cpu_list_unlock();
7925 #ifdef TARGET_GPROF
7926 _mcleanup();
7927 #endif
7928 gdb_exit(cpu_env, arg1);
7929 _exit(arg1);
7930 ret = 0; /* avoid warning */
7931 break;
7932 case TARGET_NR_read:
7933 if (arg3 == 0)
7934 ret = 0;
7935 else {
7936 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7937 goto efault;
7938 ret = get_errno(safe_read(arg1, p, arg3));
7939 if (ret >= 0 &&
7940 fd_trans_host_to_target_data(arg1)) {
7941 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7942 }
7943 unlock_user(p, arg2, ret);
7944 }
7945 break;
7946 case TARGET_NR_write:
7947 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7948 goto efault;
7949 if (fd_trans_target_to_host_data(arg1)) {
7950 void *copy = g_malloc(arg3);
7951 memcpy(copy, p, arg3);
7952 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7953 if (ret >= 0) {
7954 ret = get_errno(safe_write(arg1, copy, ret));
7955 }
7956 g_free(copy);
7957 } else {
7958 ret = get_errno(safe_write(arg1, p, arg3));
7959 }
7960 unlock_user(p, arg2, 0);
7961 break;
7962 #ifdef TARGET_NR_open
7963 case TARGET_NR_open:
7964 if (!(p = lock_user_string(arg1)))
7965 goto efault;
7966 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7967 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7968 arg3));
7969 fd_trans_unregister(ret);
7970 unlock_user(p, arg1, 0);
7971 break;
7972 #endif
7973 case TARGET_NR_openat:
7974 if (!(p = lock_user_string(arg2)))
7975 goto efault;
7976 ret = get_errno(do_openat(cpu_env, arg1, p,
7977 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7978 arg4));
7979 fd_trans_unregister(ret);
7980 unlock_user(p, arg2, 0);
7981 break;
7982 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7983 case TARGET_NR_name_to_handle_at:
7984 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7985 break;
7986 #endif
7987 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7988 case TARGET_NR_open_by_handle_at:
7989 ret = do_open_by_handle_at(arg1, arg2, arg3);
7990 fd_trans_unregister(ret);
7991 break;
7992 #endif
7993 case TARGET_NR_close:
7994 fd_trans_unregister(arg1);
7995 ret = get_errno(close(arg1));
7996 break;
7997 case TARGET_NR_brk:
7998 ret = do_brk(arg1);
7999 break;
8000 #ifdef TARGET_NR_fork
8001 case TARGET_NR_fork:
8002 ret = get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8003 break;
8004 #endif
8005 #ifdef TARGET_NR_waitpid
8006 case TARGET_NR_waitpid:
8007 {
8008 int status;
8009 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8010 if (!is_error(ret) && arg2 && ret
8011 && put_user_s32(host_to_target_waitstatus(status), arg2))
8012 goto efault;
8013 }
8014 break;
8015 #endif
8016 #ifdef TARGET_NR_waitid
8017 case TARGET_NR_waitid:
8018 {
8019 siginfo_t info;
8020 info.si_pid = 0;
8021 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8022 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8023 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8024 goto efault;
8025 host_to_target_siginfo(p, &info);
8026 unlock_user(p, arg3, sizeof(target_siginfo_t));
8027 }
8028 }
8029 break;
8030 #endif
8031 #ifdef TARGET_NR_creat /* not on alpha */
8032 case TARGET_NR_creat:
8033 if (!(p = lock_user_string(arg1)))
8034 goto efault;
8035 ret = get_errno(creat(p, arg2));
8036 fd_trans_unregister(ret);
8037 unlock_user(p, arg1, 0);
8038 break;
8039 #endif
8040 #ifdef TARGET_NR_link
8041 case TARGET_NR_link:
8042 {
8043 void * p2;
8044 p = lock_user_string(arg1);
8045 p2 = lock_user_string(arg2);
8046 if (!p || !p2)
8047 ret = -TARGET_EFAULT;
8048 else
8049 ret = get_errno(link(p, p2));
8050 unlock_user(p2, arg2, 0);
8051 unlock_user(p, arg1, 0);
8052 }
8053 break;
8054 #endif
8055 #if defined(TARGET_NR_linkat)
8056 case TARGET_NR_linkat:
8057 {
8058 void * p2 = NULL;
8059 if (!arg2 || !arg4)
8060 goto efault;
8061 p = lock_user_string(arg2);
8062 p2 = lock_user_string(arg4);
8063 if (!p || !p2)
8064 ret = -TARGET_EFAULT;
8065 else
8066 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8067 unlock_user(p, arg2, 0);
8068 unlock_user(p2, arg4, 0);
8069 }
8070 break;
8071 #endif
8072 #ifdef TARGET_NR_unlink
8073 case TARGET_NR_unlink:
8074 if (!(p = lock_user_string(arg1)))
8075 goto efault;
8076 ret = get_errno(unlink(p));
8077 unlock_user(p, arg1, 0);
8078 break;
8079 #endif
8080 #if defined(TARGET_NR_unlinkat)
8081 case TARGET_NR_unlinkat:
8082 if (!(p = lock_user_string(arg2)))
8083 goto efault;
8084 ret = get_errno(unlinkat(arg1, p, arg3));
8085 unlock_user(p, arg2, 0);
8086 break;
8087 #endif
8088 case TARGET_NR_execve:
8089 {
8090 char **argp, **envp;
8091 int argc, envc;
8092 abi_ulong gp;
8093 abi_ulong guest_argp;
8094 abi_ulong guest_envp;
8095 abi_ulong addr;
8096 char **q;
8097 int total_size = 0;
8098
8099 argc = 0;
8100 guest_argp = arg2;
8101 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8102 if (get_user_ual(addr, gp))
8103 goto efault;
8104 if (!addr)
8105 break;
8106 argc++;
8107 }
8108 envc = 0;
8109 guest_envp = arg3;
8110 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8111 if (get_user_ual(addr, gp))
8112 goto efault;
8113 if (!addr)
8114 break;
8115 envc++;
8116 }
8117
8118 argp = g_new0(char *, argc + 1);
8119 envp = g_new0(char *, envc + 1);
8120
8121 for (gp = guest_argp, q = argp; gp;
8122 gp += sizeof(abi_ulong), q++) {
8123 if (get_user_ual(addr, gp))
8124 goto execve_efault;
8125 if (!addr)
8126 break;
8127 if (!(*q = lock_user_string(addr)))
8128 goto execve_efault;
8129 total_size += strlen(*q) + 1;
8130 }
8131 *q = NULL;
8132
8133 for (gp = guest_envp, q = envp; gp;
8134 gp += sizeof(abi_ulong), q++) {
8135 if (get_user_ual(addr, gp))
8136 goto execve_efault;
8137 if (!addr)
8138 break;
8139 if (!(*q = lock_user_string(addr)))
8140 goto execve_efault;
8141 total_size += strlen(*q) + 1;
8142 }
8143 *q = NULL;
8144
8145 if (!(p = lock_user_string(arg1)))
8146 goto execve_efault;
8147 /* Although execve() is not an interruptible syscall it is
8148 * a special case where we must use the safe_syscall wrapper:
8149 * if we allow a signal to happen before we make the host
8150 * syscall then we will 'lose' it, because at the point of
8151 * execve the process leaves QEMU's control. So we use the
8152 * safe syscall wrapper to ensure that we either take the
8153 * signal as a guest signal, or else it does not happen
8154 * before the execve completes and makes it the other
8155 * program's problem.
8156 */
8157 ret = get_errno(safe_execve(p, argp, envp));
8158 unlock_user(p, arg1, 0);
8159
8160 goto execve_end;
8161
8162 execve_efault:
8163 ret = -TARGET_EFAULT;
8164
8165 execve_end:
8166 for (gp = guest_argp, q = argp; *q;
8167 gp += sizeof(abi_ulong), q++) {
8168 if (get_user_ual(addr, gp)
8169 || !addr)
8170 break;
8171 unlock_user(*q, addr, 0);
8172 }
8173 for (gp = guest_envp, q = envp; *q;
8174 gp += sizeof(abi_ulong), q++) {
8175 if (get_user_ual(addr, gp)
8176 || !addr)
8177 break;
8178 unlock_user(*q, addr, 0);
8179 }
8180
8181 g_free(argp);
8182 g_free(envp);
8183 }
8184 break;
8185 case TARGET_NR_chdir:
8186 if (!(p = lock_user_string(arg1)))
8187 goto efault;
8188 ret = get_errno(chdir(p));
8189 unlock_user(p, arg1, 0);
8190 break;
8191 #ifdef TARGET_NR_time
8192 case TARGET_NR_time:
8193 {
8194 time_t host_time;
8195 ret = get_errno(time(&host_time));
8196 if (!is_error(ret)
8197 && arg1
8198 && put_user_sal(host_time, arg1))
8199 goto efault;
8200 }
8201 break;
8202 #endif
8203 #ifdef TARGET_NR_mknod
8204 case TARGET_NR_mknod:
8205 if (!(p = lock_user_string(arg1)))
8206 goto efault;
8207 ret = get_errno(mknod(p, arg2, arg3));
8208 unlock_user(p, arg1, 0);
8209 break;
8210 #endif
8211 #if defined(TARGET_NR_mknodat)
8212 case TARGET_NR_mknodat:
8213 if (!(p = lock_user_string(arg2)))
8214 goto efault;
8215 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8216 unlock_user(p, arg2, 0);
8217 break;
8218 #endif
8219 #ifdef TARGET_NR_chmod
8220 case TARGET_NR_chmod:
8221 if (!(p = lock_user_string(arg1)))
8222 goto efault;
8223 ret = get_errno(chmod(p, arg2));
8224 unlock_user(p, arg1, 0);
8225 break;
8226 #endif
8227 #ifdef TARGET_NR_break
8228 case TARGET_NR_break:
8229 goto unimplemented;
8230 #endif
8231 #ifdef TARGET_NR_oldstat
8232 case TARGET_NR_oldstat:
8233 goto unimplemented;
8234 #endif
8235 case TARGET_NR_lseek:
8236 ret = get_errno(lseek(arg1, arg2, arg3));
8237 break;
8238 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8239 /* Alpha specific */
8240 case TARGET_NR_getxpid:
8241 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8242 ret = get_errno(getpid());
8243 break;
8244 #endif
8245 #ifdef TARGET_NR_getpid
8246 case TARGET_NR_getpid:
8247 ret = get_errno(getpid());
8248 break;
8249 #endif
8250 case TARGET_NR_mount:
8251 {
8252 /* need to look at the data field */
8253 void *p2, *p3;
8254
8255 if (arg1) {
8256 p = lock_user_string(arg1);
8257 if (!p) {
8258 goto efault;
8259 }
8260 } else {
8261 p = NULL;
8262 }
8263
8264 p2 = lock_user_string(arg2);
8265 if (!p2) {
8266 if (arg1) {
8267 unlock_user(p, arg1, 0);
8268 }
8269 goto efault;
8270 }
8271
8272 if (arg3) {
8273 p3 = lock_user_string(arg3);
8274 if (!p3) {
8275 if (arg1) {
8276 unlock_user(p, arg1, 0);
8277 }
8278 unlock_user(p2, arg2, 0);
8279 goto efault;
8280 }
8281 } else {
8282 p3 = NULL;
8283 }
8284
8285 /* FIXME - arg5 should be locked, but it isn't clear how to
8286 * do that since it's not guaranteed to be a NULL-terminated
8287 * string.
8288 */
8289 if (!arg5) {
8290 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8291 } else {
8292 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8293 }
8294 ret = get_errno(ret);
8295
8296 if (arg1) {
8297 unlock_user(p, arg1, 0);
8298 }
8299 unlock_user(p2, arg2, 0);
8300 if (arg3) {
8301 unlock_user(p3, arg3, 0);
8302 }
8303 }
8304 break;
8305 #ifdef TARGET_NR_umount
8306 case TARGET_NR_umount:
8307 if (!(p = lock_user_string(arg1)))
8308 goto efault;
8309 ret = get_errno(umount(p));
8310 unlock_user(p, arg1, 0);
8311 break;
8312 #endif
8313 #ifdef TARGET_NR_stime /* not on alpha */
8314 case TARGET_NR_stime:
8315 {
8316 time_t host_time;
8317 if (get_user_sal(host_time, arg1))
8318 goto efault;
8319 ret = get_errno(stime(&host_time));
8320 }
8321 break;
8322 #endif
8323 case TARGET_NR_ptrace:
8324 goto unimplemented;
8325 #ifdef TARGET_NR_alarm /* not on alpha */
8326 case TARGET_NR_alarm:
8327 ret = alarm(arg1);
8328 break;
8329 #endif
8330 #ifdef TARGET_NR_oldfstat
8331 case TARGET_NR_oldfstat:
8332 goto unimplemented;
8333 #endif
8334 #ifdef TARGET_NR_pause /* not on alpha */
8335 case TARGET_NR_pause:
8336 if (!block_signals()) {
8337 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8338 }
8339 ret = -TARGET_EINTR;
8340 break;
8341 #endif
8342 #ifdef TARGET_NR_utime
8343 case TARGET_NR_utime:
8344 {
8345 struct utimbuf tbuf, *host_tbuf;
8346 struct target_utimbuf *target_tbuf;
8347 if (arg2) {
8348 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8349 goto efault;
8350 tbuf.actime = tswapal(target_tbuf->actime);
8351 tbuf.modtime = tswapal(target_tbuf->modtime);
8352 unlock_user_struct(target_tbuf, arg2, 0);
8353 host_tbuf = &tbuf;
8354 } else {
8355 host_tbuf = NULL;
8356 }
8357 if (!(p = lock_user_string(arg1)))
8358 goto efault;
8359 ret = get_errno(utime(p, host_tbuf));
8360 unlock_user(p, arg1, 0);
8361 }
8362 break;
8363 #endif
8364 #ifdef TARGET_NR_utimes
8365 case TARGET_NR_utimes:
8366 {
8367 struct timeval *tvp, tv[2];
8368 if (arg2) {
8369 if (copy_from_user_timeval(&tv[0], arg2)
8370 || copy_from_user_timeval(&tv[1],
8371 arg2 + sizeof(struct target_timeval)))
8372 goto efault;
8373 tvp = tv;
8374 } else {
8375 tvp = NULL;
8376 }
8377 if (!(p = lock_user_string(arg1)))
8378 goto efault;
8379 ret = get_errno(utimes(p, tvp));
8380 unlock_user(p, arg1, 0);
8381 }
8382 break;
8383 #endif
8384 #if defined(TARGET_NR_futimesat)
8385 case TARGET_NR_futimesat:
8386 {
8387 struct timeval *tvp, tv[2];
8388 if (arg3) {
8389 if (copy_from_user_timeval(&tv[0], arg3)
8390 || copy_from_user_timeval(&tv[1],
8391 arg3 + sizeof(struct target_timeval)))
8392 goto efault;
8393 tvp = tv;
8394 } else {
8395 tvp = NULL;
8396 }
8397 if (!(p = lock_user_string(arg2)))
8398 goto efault;
8399 ret = get_errno(futimesat(arg1, path(p), tvp));
8400 unlock_user(p, arg2, 0);
8401 }
8402 break;
8403 #endif
8404 #ifdef TARGET_NR_stty
8405 case TARGET_NR_stty:
8406 goto unimplemented;
8407 #endif
8408 #ifdef TARGET_NR_gtty
8409 case TARGET_NR_gtty:
8410 goto unimplemented;
8411 #endif
8412 #ifdef TARGET_NR_access
8413 case TARGET_NR_access:
8414 if (!(p = lock_user_string(arg1)))
8415 goto efault;
8416 ret = get_errno(access(path(p), arg2));
8417 unlock_user(p, arg1, 0);
8418 break;
8419 #endif
8420 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8421 case TARGET_NR_faccessat:
8422 if (!(p = lock_user_string(arg2)))
8423 goto efault;
8424 ret = get_errno(faccessat(arg1, p, arg3, 0));
8425 unlock_user(p, arg2, 0);
8426 break;
8427 #endif
8428 #ifdef TARGET_NR_nice /* not on alpha */
8429 case TARGET_NR_nice:
8430 ret = get_errno(nice(arg1));
8431 break;
8432 #endif
8433 #ifdef TARGET_NR_ftime
8434 case TARGET_NR_ftime:
8435 goto unimplemented;
8436 #endif
8437 case TARGET_NR_sync:
8438 sync();
8439 ret = 0;
8440 break;
8441 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8442 case TARGET_NR_syncfs:
8443 ret = get_errno(syncfs(arg1));
8444 break;
8445 #endif
8446 case TARGET_NR_kill:
8447 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8448 break;
8449 #ifdef TARGET_NR_rename
8450 case TARGET_NR_rename:
8451 {
8452 void *p2;
8453 p = lock_user_string(arg1);
8454 p2 = lock_user_string(arg2);
8455 if (!p || !p2)
8456 ret = -TARGET_EFAULT;
8457 else
8458 ret = get_errno(rename(p, p2));
8459 unlock_user(p2, arg2, 0);
8460 unlock_user(p, arg1, 0);
8461 }
8462 break;
8463 #endif
8464 #if defined(TARGET_NR_renameat)
8465 case TARGET_NR_renameat:
8466 {
8467 void *p2;
8468 p = lock_user_string(arg2);
8469 p2 = lock_user_string(arg4);
8470 if (!p || !p2)
8471 ret = -TARGET_EFAULT;
8472 else
8473 ret = get_errno(renameat(arg1, p, arg3, p2));
8474 unlock_user(p2, arg4, 0);
8475 unlock_user(p, arg2, 0);
8476 }
8477 break;
8478 #endif
8479 #if defined(TARGET_NR_renameat2)
8480 case TARGET_NR_renameat2:
8481 {
8482 void *p2;
8483 p = lock_user_string(arg2);
8484 p2 = lock_user_string(arg4);
8485 if (!p || !p2) {
8486 ret = -TARGET_EFAULT;
8487 } else {
8488 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8489 }
8490 unlock_user(p2, arg4, 0);
8491 unlock_user(p, arg2, 0);
8492 }
8493 break;
8494 #endif
8495 #ifdef TARGET_NR_mkdir
8496 case TARGET_NR_mkdir:
8497 if (!(p = lock_user_string(arg1)))
8498 goto efault;
8499 ret = get_errno(mkdir(p, arg2));
8500 unlock_user(p, arg1, 0);
8501 break;
8502 #endif
8503 #if defined(TARGET_NR_mkdirat)
8504 case TARGET_NR_mkdirat:
8505 if (!(p = lock_user_string(arg2)))
8506 goto efault;
8507 ret = get_errno(mkdirat(arg1, p, arg3));
8508 unlock_user(p, arg2, 0);
8509 break;
8510 #endif
8511 #ifdef TARGET_NR_rmdir
8512 case TARGET_NR_rmdir:
8513 if (!(p = lock_user_string(arg1)))
8514 goto efault;
8515 ret = get_errno(rmdir(p));
8516 unlock_user(p, arg1, 0);
8517 break;
8518 #endif
8519 case TARGET_NR_dup:
8520 ret = get_errno(dup(arg1));
8521 if (ret >= 0) {
8522 fd_trans_dup(arg1, ret);
8523 }
8524 break;
8525 #ifdef TARGET_NR_pipe
8526 case TARGET_NR_pipe:
8527 ret = do_pipe(cpu_env, arg1, 0, 0);
8528 break;
8529 #endif
8530 #ifdef TARGET_NR_pipe2
8531 case TARGET_NR_pipe2:
8532 ret = do_pipe(cpu_env, arg1,
8533 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8534 break;
8535 #endif
8536 case TARGET_NR_times:
8537 {
8538 struct target_tms *tmsp;
8539 struct tms tms;
8540 ret = get_errno(times(&tms));
8541 if (arg1) {
8542 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8543 if (!tmsp)
8544 goto efault;
8545 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8546 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8547 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8548 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8549 }
8550 if (!is_error(ret))
8551 ret = host_to_target_clock_t(ret);
8552 }
8553 break;
8554 #ifdef TARGET_NR_prof
8555 case TARGET_NR_prof:
8556 goto unimplemented;
8557 #endif
8558 #ifdef TARGET_NR_signal
8559 case TARGET_NR_signal:
8560 goto unimplemented;
8561 #endif
8562 case TARGET_NR_acct:
8563 if (arg1 == 0) {
8564 ret = get_errno(acct(NULL));
8565 } else {
8566 if (!(p = lock_user_string(arg1)))
8567 goto efault;
8568 ret = get_errno(acct(path(p)));
8569 unlock_user(p, arg1, 0);
8570 }
8571 break;
8572 #ifdef TARGET_NR_umount2
8573 case TARGET_NR_umount2:
8574 if (!(p = lock_user_string(arg1)))
8575 goto efault;
8576 ret = get_errno(umount2(p, arg2));
8577 unlock_user(p, arg1, 0);
8578 break;
8579 #endif
8580 #ifdef TARGET_NR_lock
8581 case TARGET_NR_lock:
8582 goto unimplemented;
8583 #endif
8584 case TARGET_NR_ioctl:
8585 ret = do_ioctl(arg1, arg2, arg3);
8586 break;
8587 #ifdef TARGET_NR_fcntl
8588 case TARGET_NR_fcntl:
8589 ret = do_fcntl(arg1, arg2, arg3);
8590 break;
8591 #endif
8592 #ifdef TARGET_NR_mpx
8593 case TARGET_NR_mpx:
8594 goto unimplemented;
8595 #endif
8596 case TARGET_NR_setpgid:
8597 ret = get_errno(setpgid(arg1, arg2));
8598 break;
8599 #ifdef TARGET_NR_ulimit
8600 case TARGET_NR_ulimit:
8601 goto unimplemented;
8602 #endif
8603 #ifdef TARGET_NR_oldolduname
8604 case TARGET_NR_oldolduname:
8605 goto unimplemented;
8606 #endif
8607 case TARGET_NR_umask:
8608 ret = get_errno(umask(arg1));
8609 break;
8610 case TARGET_NR_chroot:
8611 if (!(p = lock_user_string(arg1)))
8612 goto efault;
8613 ret = get_errno(chroot(p));
8614 unlock_user(p, arg1, 0);
8615 break;
8616 #ifdef TARGET_NR_ustat
8617 case TARGET_NR_ustat:
8618 goto unimplemented;
8619 #endif
8620 #ifdef TARGET_NR_dup2
8621 case TARGET_NR_dup2:
8622 ret = get_errno(dup2(arg1, arg2));
8623 if (ret >= 0) {
8624 fd_trans_dup(arg1, arg2);
8625 }
8626 break;
8627 #endif
8628 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8629 case TARGET_NR_dup3:
8630 {
8631 int host_flags;
8632
8633 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8634 return -EINVAL;
8635 }
8636 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8637 ret = get_errno(dup3(arg1, arg2, host_flags));
8638 if (ret >= 0) {
8639 fd_trans_dup(arg1, arg2);
8640 }
8641 break;
8642 }
8643 #endif
8644 #ifdef TARGET_NR_getppid /* not on alpha */
8645 case TARGET_NR_getppid:
8646 ret = get_errno(getppid());
8647 break;
8648 #endif
8649 #ifdef TARGET_NR_getpgrp
8650 case TARGET_NR_getpgrp:
8651 ret = get_errno(getpgrp());
8652 break;
8653 #endif
8654 case TARGET_NR_setsid:
8655 ret = get_errno(setsid());
8656 break;
8657 #ifdef TARGET_NR_sigaction
8658 case TARGET_NR_sigaction:
8659 {
8660 #if defined(TARGET_ALPHA)
8661 struct target_sigaction act, oact, *pact = 0;
8662 struct target_old_sigaction *old_act;
8663 if (arg2) {
8664 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8665 goto efault;
8666 act._sa_handler = old_act->_sa_handler;
8667 target_siginitset(&act.sa_mask, old_act->sa_mask);
8668 act.sa_flags = old_act->sa_flags;
8669 act.sa_restorer = 0;
8670 unlock_user_struct(old_act, arg2, 0);
8671 pact = &act;
8672 }
8673 ret = get_errno(do_sigaction(arg1, pact, &oact));
8674 if (!is_error(ret) && arg3) {
8675 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8676 goto efault;
8677 old_act->_sa_handler = oact._sa_handler;
8678 old_act->sa_mask = oact.sa_mask.sig[0];
8679 old_act->sa_flags = oact.sa_flags;
8680 unlock_user_struct(old_act, arg3, 1);
8681 }
8682 #elif defined(TARGET_MIPS)
8683 struct target_sigaction act, oact, *pact, *old_act;
8684
8685 if (arg2) {
8686 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8687 goto efault;
8688 act._sa_handler = old_act->_sa_handler;
8689 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8690 act.sa_flags = old_act->sa_flags;
8691 unlock_user_struct(old_act, arg2, 0);
8692 pact = &act;
8693 } else {
8694 pact = NULL;
8695 }
8696
8697 ret = get_errno(do_sigaction(arg1, pact, &oact));
8698
8699 if (!is_error(ret) && arg3) {
8700 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8701 goto efault;
8702 old_act->_sa_handler = oact._sa_handler;
8703 old_act->sa_flags = oact.sa_flags;
8704 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8705 old_act->sa_mask.sig[1] = 0;
8706 old_act->sa_mask.sig[2] = 0;
8707 old_act->sa_mask.sig[3] = 0;
8708 unlock_user_struct(old_act, arg3, 1);
8709 }
8710 #else
8711 struct target_old_sigaction *old_act;
8712 struct target_sigaction act, oact, *pact;
8713 if (arg2) {
8714 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8715 goto efault;
8716 act._sa_handler = old_act->_sa_handler;
8717 target_siginitset(&act.sa_mask, old_act->sa_mask);
8718 act.sa_flags = old_act->sa_flags;
8719 act.sa_restorer = old_act->sa_restorer;
8720 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8721 act.ka_restorer = 0;
8722 #endif
8723 unlock_user_struct(old_act, arg2, 0);
8724 pact = &act;
8725 } else {
8726 pact = NULL;
8727 }
8728 ret = get_errno(do_sigaction(arg1, pact, &oact));
8729 if (!is_error(ret) && arg3) {
8730 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8731 goto efault;
8732 old_act->_sa_handler = oact._sa_handler;
8733 old_act->sa_mask = oact.sa_mask.sig[0];
8734 old_act->sa_flags = oact.sa_flags;
8735 old_act->sa_restorer = oact.sa_restorer;
8736 unlock_user_struct(old_act, arg3, 1);
8737 }
8738 #endif
8739 }
8740 break;
8741 #endif
8742 case TARGET_NR_rt_sigaction:
8743 {
8744 #if defined(TARGET_ALPHA)
8745 /* For Alpha and SPARC this is a 5 argument syscall, with
8746 * a 'restorer' parameter which must be copied into the
8747 * sa_restorer field of the sigaction struct.
8748 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8749 * and arg5 is the sigsetsize.
8750 * Alpha also has a separate rt_sigaction struct that it uses
8751 * here; SPARC uses the usual sigaction struct.
8752 */
8753 struct target_rt_sigaction *rt_act;
8754 struct target_sigaction act, oact, *pact = 0;
8755
8756 if (arg4 != sizeof(target_sigset_t)) {
8757 ret = -TARGET_EINVAL;
8758 break;
8759 }
8760 if (arg2) {
8761 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8762 goto efault;
8763 act._sa_handler = rt_act->_sa_handler;
8764 act.sa_mask = rt_act->sa_mask;
8765 act.sa_flags = rt_act->sa_flags;
8766 act.sa_restorer = arg5;
8767 unlock_user_struct(rt_act, arg2, 0);
8768 pact = &act;
8769 }
8770 ret = get_errno(do_sigaction(arg1, pact, &oact));
8771 if (!is_error(ret) && arg3) {
8772 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8773 goto efault;
8774 rt_act->_sa_handler = oact._sa_handler;
8775 rt_act->sa_mask = oact.sa_mask;
8776 rt_act->sa_flags = oact.sa_flags;
8777 unlock_user_struct(rt_act, arg3, 1);
8778 }
8779 #else
8780 #ifdef TARGET_SPARC
8781 target_ulong restorer = arg4;
8782 target_ulong sigsetsize = arg5;
8783 #else
8784 target_ulong sigsetsize = arg4;
8785 #endif
8786 struct target_sigaction *act;
8787 struct target_sigaction *oact;
8788
8789 if (sigsetsize != sizeof(target_sigset_t)) {
8790 ret = -TARGET_EINVAL;
8791 break;
8792 }
8793 if (arg2) {
8794 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8795 goto efault;
8796 }
8797 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8798 act->ka_restorer = restorer;
8799 #endif
8800 } else {
8801 act = NULL;
8802 }
8803 if (arg3) {
8804 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8805 ret = -TARGET_EFAULT;
8806 goto rt_sigaction_fail;
8807 }
8808 } else
8809 oact = NULL;
8810 ret = get_errno(do_sigaction(arg1, act, oact));
8811 rt_sigaction_fail:
8812 if (act)
8813 unlock_user_struct(act, arg2, 0);
8814 if (oact)
8815 unlock_user_struct(oact, arg3, 1);
8816 #endif
8817 }
8818 break;
8819 #ifdef TARGET_NR_sgetmask /* not on alpha */
8820 case TARGET_NR_sgetmask:
8821 {
8822 sigset_t cur_set;
8823 abi_ulong target_set;
8824 ret = do_sigprocmask(0, NULL, &cur_set);
8825 if (!ret) {
8826 host_to_target_old_sigset(&target_set, &cur_set);
8827 ret = target_set;
8828 }
8829 }
8830 break;
8831 #endif
8832 #ifdef TARGET_NR_ssetmask /* not on alpha */
8833 case TARGET_NR_ssetmask:
8834 {
8835 sigset_t set, oset;
8836 abi_ulong target_set = arg1;
8837 target_to_host_old_sigset(&set, &target_set);
8838 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8839 if (!ret) {
8840 host_to_target_old_sigset(&target_set, &oset);
8841 ret = target_set;
8842 }
8843 }
8844 break;
8845 #endif
8846 #ifdef TARGET_NR_sigprocmask
8847 case TARGET_NR_sigprocmask:
8848 {
8849 #if defined(TARGET_ALPHA)
8850 sigset_t set, oldset;
8851 abi_ulong mask;
8852 int how;
8853
8854 switch (arg1) {
8855 case TARGET_SIG_BLOCK:
8856 how = SIG_BLOCK;
8857 break;
8858 case TARGET_SIG_UNBLOCK:
8859 how = SIG_UNBLOCK;
8860 break;
8861 case TARGET_SIG_SETMASK:
8862 how = SIG_SETMASK;
8863 break;
8864 default:
8865 ret = -TARGET_EINVAL;
8866 goto fail;
8867 }
8868 mask = arg2;
8869 target_to_host_old_sigset(&set, &mask);
8870
8871 ret = do_sigprocmask(how, &set, &oldset);
8872 if (!is_error(ret)) {
8873 host_to_target_old_sigset(&mask, &oldset);
8874 ret = mask;
8875 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8876 }
8877 #else
8878 sigset_t set, oldset, *set_ptr;
8879 int how;
8880
8881 if (arg2) {
8882 switch (arg1) {
8883 case TARGET_SIG_BLOCK:
8884 how = SIG_BLOCK;
8885 break;
8886 case TARGET_SIG_UNBLOCK:
8887 how = SIG_UNBLOCK;
8888 break;
8889 case TARGET_SIG_SETMASK:
8890 how = SIG_SETMASK;
8891 break;
8892 default:
8893 ret = -TARGET_EINVAL;
8894 goto fail;
8895 }
8896 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8897 goto efault;
8898 target_to_host_old_sigset(&set, p);
8899 unlock_user(p, arg2, 0);
8900 set_ptr = &set;
8901 } else {
8902 how = 0;
8903 set_ptr = NULL;
8904 }
8905 ret = do_sigprocmask(how, set_ptr, &oldset);
8906 if (!is_error(ret) && arg3) {
8907 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8908 goto efault;
8909 host_to_target_old_sigset(p, &oldset);
8910 unlock_user(p, arg3, sizeof(target_sigset_t));
8911 }
8912 #endif
8913 }
8914 break;
8915 #endif
8916 case TARGET_NR_rt_sigprocmask:
8917 {
8918 int how = arg1;
8919 sigset_t set, oldset, *set_ptr;
8920
8921 if (arg4 != sizeof(target_sigset_t)) {
8922 ret = -TARGET_EINVAL;
8923 break;
8924 }
8925
8926 if (arg2) {
8927 switch(how) {
8928 case TARGET_SIG_BLOCK:
8929 how = SIG_BLOCK;
8930 break;
8931 case TARGET_SIG_UNBLOCK:
8932 how = SIG_UNBLOCK;
8933 break;
8934 case TARGET_SIG_SETMASK:
8935 how = SIG_SETMASK;
8936 break;
8937 default:
8938 ret = -TARGET_EINVAL;
8939 goto fail;
8940 }
8941 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8942 goto efault;
8943 target_to_host_sigset(&set, p);
8944 unlock_user(p, arg2, 0);
8945 set_ptr = &set;
8946 } else {
8947 how = 0;
8948 set_ptr = NULL;
8949 }
8950 ret = do_sigprocmask(how, set_ptr, &oldset);
8951 if (!is_error(ret) && arg3) {
8952 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8953 goto efault;
8954 host_to_target_sigset(p, &oldset);
8955 unlock_user(p, arg3, sizeof(target_sigset_t));
8956 }
8957 }
8958 break;
8959 #ifdef TARGET_NR_sigpending
8960 case TARGET_NR_sigpending:
8961 {
8962 sigset_t set;
8963 ret = get_errno(sigpending(&set));
8964 if (!is_error(ret)) {
8965 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8966 goto efault;
8967 host_to_target_old_sigset(p, &set);
8968 unlock_user(p, arg1, sizeof(target_sigset_t));
8969 }
8970 }
8971 break;
8972 #endif
8973 case TARGET_NR_rt_sigpending:
8974 {
8975 sigset_t set;
8976
8977 /* Yes, this check is >, not != like most. We follow the kernel's
8978 * logic and it does it like this because it implements
8979 * NR_sigpending through the same code path, and in that case
8980 * the old_sigset_t is smaller in size.
8981 */
8982 if (arg2 > sizeof(target_sigset_t)) {
8983 ret = -TARGET_EINVAL;
8984 break;
8985 }
8986
8987 ret = get_errno(sigpending(&set));
8988 if (!is_error(ret)) {
8989 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8990 goto efault;
8991 host_to_target_sigset(p, &set);
8992 unlock_user(p, arg1, sizeof(target_sigset_t));
8993 }
8994 }
8995 break;
8996 #ifdef TARGET_NR_sigsuspend
8997 case TARGET_NR_sigsuspend:
8998 {
8999 TaskState *ts = cpu->opaque;
9000 #if defined(TARGET_ALPHA)
9001 abi_ulong mask = arg1;
9002 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9003 #else
9004 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9005 goto efault;
9006 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9007 unlock_user(p, arg1, 0);
9008 #endif
9009 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9010 SIGSET_T_SIZE));
9011 if (ret != -TARGET_ERESTARTSYS) {
9012 ts->in_sigsuspend = 1;
9013 }
9014 }
9015 break;
9016 #endif
9017 case TARGET_NR_rt_sigsuspend:
9018 {
9019 TaskState *ts = cpu->opaque;
9020
9021 if (arg2 != sizeof(target_sigset_t)) {
9022 ret = -TARGET_EINVAL;
9023 break;
9024 }
9025 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9026 goto efault;
9027 target_to_host_sigset(&ts->sigsuspend_mask, p);
9028 unlock_user(p, arg1, 0);
9029 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9030 SIGSET_T_SIZE));
9031 if (ret != -TARGET_ERESTARTSYS) {
9032 ts->in_sigsuspend = 1;
9033 }
9034 }
9035 break;
9036 case TARGET_NR_rt_sigtimedwait:
9037 {
9038 sigset_t set;
9039 struct timespec uts, *puts;
9040 siginfo_t uinfo;
9041
9042 if (arg4 != sizeof(target_sigset_t)) {
9043 ret = -TARGET_EINVAL;
9044 break;
9045 }
9046
9047 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9048 goto efault;
9049 target_to_host_sigset(&set, p);
9050 unlock_user(p, arg1, 0);
9051 if (arg3) {
9052 puts = &uts;
9053 target_to_host_timespec(puts, arg3);
9054 } else {
9055 puts = NULL;
9056 }
9057 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9058 SIGSET_T_SIZE));
9059 if (!is_error(ret)) {
9060 if (arg2) {
9061 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9062 0);
9063 if (!p) {
9064 goto efault;
9065 }
9066 host_to_target_siginfo(p, &uinfo);
9067 unlock_user(p, arg2, sizeof(target_siginfo_t));
9068 }
9069 ret = host_to_target_signal(ret);
9070 }
9071 }
9072 break;
9073 case TARGET_NR_rt_sigqueueinfo:
9074 {
9075 siginfo_t uinfo;
9076
9077 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9078 if (!p) {
9079 goto efault;
9080 }
9081 target_to_host_siginfo(&uinfo, p);
9082 unlock_user(p, arg3, 0);
9083 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9084 }
9085 break;
9086 case TARGET_NR_rt_tgsigqueueinfo:
9087 {
9088 siginfo_t uinfo;
9089
9090 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9091 if (!p) {
9092 goto efault;
9093 }
9094 target_to_host_siginfo(&uinfo, p);
9095 unlock_user(p, arg4, 0);
9096 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9097 }
9098 break;
9099 #ifdef TARGET_NR_sigreturn
9100 case TARGET_NR_sigreturn:
9101 if (block_signals()) {
9102 ret = -TARGET_ERESTARTSYS;
9103 } else {
9104 ret = do_sigreturn(cpu_env);
9105 }
9106 break;
9107 #endif
9108 case TARGET_NR_rt_sigreturn:
9109 if (block_signals()) {
9110 ret = -TARGET_ERESTARTSYS;
9111 } else {
9112 ret = do_rt_sigreturn(cpu_env);
9113 }
9114 break;
9115 case TARGET_NR_sethostname:
9116 if (!(p = lock_user_string(arg1)))
9117 goto efault;
9118 ret = get_errno(sethostname(p, arg2));
9119 unlock_user(p, arg1, 0);
9120 break;
9121 case TARGET_NR_setrlimit:
9122 {
9123 int resource = target_to_host_resource(arg1);
9124 struct target_rlimit *target_rlim;
9125 struct rlimit rlim;
9126 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9127 goto efault;
9128 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9129 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9130 unlock_user_struct(target_rlim, arg2, 0);
9131 ret = get_errno(setrlimit(resource, &rlim));
9132 }
9133 break;
9134 case TARGET_NR_getrlimit:
9135 {
9136 int resource = target_to_host_resource(arg1);
9137 struct target_rlimit *target_rlim;
9138 struct rlimit rlim;
9139
9140 ret = get_errno(getrlimit(resource, &rlim));
9141 if (!is_error(ret)) {
9142 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9143 goto efault;
9144 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9145 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9146 unlock_user_struct(target_rlim, arg2, 1);
9147 }
9148 }
9149 break;
9150 case TARGET_NR_getrusage:
9151 {
9152 struct rusage rusage;
9153 ret = get_errno(getrusage(arg1, &rusage));
9154 if (!is_error(ret)) {
9155 ret = host_to_target_rusage(arg2, &rusage);
9156 }
9157 }
9158 break;
9159 case TARGET_NR_gettimeofday:
9160 {
9161 struct timeval tv;
9162 ret = get_errno(gettimeofday(&tv, NULL));
9163 if (!is_error(ret)) {
9164 if (copy_to_user_timeval(arg1, &tv))
9165 goto efault;
9166 }
9167 }
9168 break;
9169 case TARGET_NR_settimeofday:
9170 {
9171 struct timeval tv, *ptv = NULL;
9172 struct timezone tz, *ptz = NULL;
9173
9174 if (arg1) {
9175 if (copy_from_user_timeval(&tv, arg1)) {
9176 goto efault;
9177 }
9178 ptv = &tv;
9179 }
9180
9181 if (arg2) {
9182 if (copy_from_user_timezone(&tz, arg2)) {
9183 goto efault;
9184 }
9185 ptz = &tz;
9186 }
9187
9188 ret = get_errno(settimeofday(ptv, ptz));
9189 }
9190 break;
9191 #if defined(TARGET_NR_select)
9192 case TARGET_NR_select:
9193 #if defined(TARGET_WANT_NI_OLD_SELECT)
9194 /* some architectures used to have old_select here
9195 * but now ENOSYS it.
9196 */
9197 ret = -TARGET_ENOSYS;
9198 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9199 ret = do_old_select(arg1);
9200 #else
9201 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9202 #endif
9203 break;
9204 #endif
9205 #ifdef TARGET_NR_pselect6
9206 case TARGET_NR_pselect6:
9207 {
9208 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9209 fd_set rfds, wfds, efds;
9210 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9211 struct timespec ts, *ts_ptr;
9212
9213 /*
9214 * The 6th arg is actually two args smashed together,
9215 * so we cannot use the C library.
9216 */
9217 sigset_t set;
9218 struct {
9219 sigset_t *set;
9220 size_t size;
9221 } sig, *sig_ptr;
9222
9223 abi_ulong arg_sigset, arg_sigsize, *arg7;
9224 target_sigset_t *target_sigset;
9225
9226 n = arg1;
9227 rfd_addr = arg2;
9228 wfd_addr = arg3;
9229 efd_addr = arg4;
9230 ts_addr = arg5;
9231
9232 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9233 if (ret) {
9234 goto fail;
9235 }
9236 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9237 if (ret) {
9238 goto fail;
9239 }
9240 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9241 if (ret) {
9242 goto fail;
9243 }
9244
9245 /*
9246 * This takes a timespec, and not a timeval, so we cannot
9247 * use the do_select() helper ...
9248 */
9249 if (ts_addr) {
9250 if (target_to_host_timespec(&ts, ts_addr)) {
9251 goto efault;
9252 }
9253 ts_ptr = &ts;
9254 } else {
9255 ts_ptr = NULL;
9256 }
9257
9258 /* Extract the two packed args for the sigset */
9259 if (arg6) {
9260 sig_ptr = &sig;
9261 sig.size = SIGSET_T_SIZE;
9262
9263 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9264 if (!arg7) {
9265 goto efault;
9266 }
9267 arg_sigset = tswapal(arg7[0]);
9268 arg_sigsize = tswapal(arg7[1]);
9269 unlock_user(arg7, arg6, 0);
9270
9271 if (arg_sigset) {
9272 sig.set = &set;
9273 if (arg_sigsize != sizeof(*target_sigset)) {
9274 /* Like the kernel, we enforce correct size sigsets */
9275 ret = -TARGET_EINVAL;
9276 goto fail;
9277 }
9278 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9279 sizeof(*target_sigset), 1);
9280 if (!target_sigset) {
9281 goto efault;
9282 }
9283 target_to_host_sigset(&set, target_sigset);
9284 unlock_user(target_sigset, arg_sigset, 0);
9285 } else {
9286 sig.set = NULL;
9287 }
9288 } else {
9289 sig_ptr = NULL;
9290 }
9291
9292 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9293 ts_ptr, sig_ptr));
9294
9295 if (!is_error(ret)) {
9296 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9297 goto efault;
9298 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9299 goto efault;
9300 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9301 goto efault;
9302
9303 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9304 goto efault;
9305 }
9306 }
9307 break;
9308 #endif
9309 #ifdef TARGET_NR_symlink
9310 case TARGET_NR_symlink:
9311 {
9312 void *p2;
9313 p = lock_user_string(arg1);
9314 p2 = lock_user_string(arg2);
9315 if (!p || !p2)
9316 ret = -TARGET_EFAULT;
9317 else
9318 ret = get_errno(symlink(p, p2));
9319 unlock_user(p2, arg2, 0);
9320 unlock_user(p, arg1, 0);
9321 }
9322 break;
9323 #endif
9324 #if defined(TARGET_NR_symlinkat)
9325 case TARGET_NR_symlinkat:
9326 {
9327 void *p2;
9328 p = lock_user_string(arg1);
9329 p2 = lock_user_string(arg3);
9330 if (!p || !p2)
9331 ret = -TARGET_EFAULT;
9332 else
9333 ret = get_errno(symlinkat(p, arg2, p2));
9334 unlock_user(p2, arg3, 0);
9335 unlock_user(p, arg1, 0);
9336 }
9337 break;
9338 #endif
9339 #ifdef TARGET_NR_oldlstat
9340 case TARGET_NR_oldlstat:
9341 goto unimplemented;
9342 #endif
9343 #ifdef TARGET_NR_readlink
9344 case TARGET_NR_readlink:
9345 {
9346 void *p2;
9347 p = lock_user_string(arg1);
9348 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9349 if (!p || !p2) {
9350 ret = -TARGET_EFAULT;
9351 } else if (!arg3) {
9352 /* Short circuit this for the magic exe check. */
9353 ret = -TARGET_EINVAL;
9354 } else if (is_proc_myself((const char *)p, "exe")) {
9355 char real[PATH_MAX], *temp;
9356 temp = realpath(exec_path, real);
9357 /* Return value is # of bytes that we wrote to the buffer. */
9358 if (temp == NULL) {
9359 ret = get_errno(-1);
9360 } else {
9361 /* Don't worry about sign mismatch as earlier mapping
9362 * logic would have thrown a bad address error. */
9363 ret = MIN(strlen(real), arg3);
9364 /* We cannot NUL terminate the string. */
9365 memcpy(p2, real, ret);
9366 }
9367 } else {
9368 ret = get_errno(readlink(path(p), p2, arg3));
9369 }
9370 unlock_user(p2, arg2, ret);
9371 unlock_user(p, arg1, 0);
9372 }
9373 break;
9374 #endif
9375 #if defined(TARGET_NR_readlinkat)
9376 case TARGET_NR_readlinkat:
9377 {
9378 void *p2;
9379 p = lock_user_string(arg2);
9380 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9381 if (!p || !p2) {
9382 ret = -TARGET_EFAULT;
9383 } else if (is_proc_myself((const char *)p, "exe")) {
9384 char real[PATH_MAX], *temp;
9385 temp = realpath(exec_path, real);
9386 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9387 snprintf((char *)p2, arg4, "%s", real);
9388 } else {
9389 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9390 }
9391 unlock_user(p2, arg3, ret);
9392 unlock_user(p, arg2, 0);
9393 }
9394 break;
9395 #endif
9396 #ifdef TARGET_NR_uselib
9397 case TARGET_NR_uselib:
9398 goto unimplemented;
9399 #endif
9400 #ifdef TARGET_NR_swapon
9401 case TARGET_NR_swapon:
9402 if (!(p = lock_user_string(arg1)))
9403 goto efault;
9404 ret = get_errno(swapon(p, arg2));
9405 unlock_user(p, arg1, 0);
9406 break;
9407 #endif
9408 case TARGET_NR_reboot:
9409 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9410 /* arg4 must be ignored in all other cases */
9411 p = lock_user_string(arg4);
9412 if (!p) {
9413 goto efault;
9414 }
9415 ret = get_errno(reboot(arg1, arg2, arg3, p));
9416 unlock_user(p, arg4, 0);
9417 } else {
9418 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9419 }
9420 break;
9421 #ifdef TARGET_NR_readdir
9422 case TARGET_NR_readdir:
9423 goto unimplemented;
9424 #endif
9425 #ifdef TARGET_NR_mmap
9426 case TARGET_NR_mmap:
9427 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9428 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9429 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9430 || defined(TARGET_S390X)
9431 {
9432 abi_ulong *v;
9433 abi_ulong v1, v2, v3, v4, v5, v6;
9434 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9435 goto efault;
9436 v1 = tswapal(v[0]);
9437 v2 = tswapal(v[1]);
9438 v3 = tswapal(v[2]);
9439 v4 = tswapal(v[3]);
9440 v5 = tswapal(v[4]);
9441 v6 = tswapal(v[5]);
9442 unlock_user(v, arg1, 0);
9443 ret = get_errno(target_mmap(v1, v2, v3,
9444 target_to_host_bitmask(v4, mmap_flags_tbl),
9445 v5, v6));
9446 }
9447 #else
9448 ret = get_errno(target_mmap(arg1, arg2, arg3,
9449 target_to_host_bitmask(arg4, mmap_flags_tbl),
9450 arg5,
9451 arg6));
9452 #endif
9453 break;
9454 #endif
9455 #ifdef TARGET_NR_mmap2
9456 case TARGET_NR_mmap2:
9457 #ifndef MMAP_SHIFT
9458 #define MMAP_SHIFT 12
9459 #endif
9460 ret = get_errno(target_mmap(arg1, arg2, arg3,
9461 target_to_host_bitmask(arg4, mmap_flags_tbl),
9462 arg5,
9463 arg6 << MMAP_SHIFT));
9464 break;
9465 #endif
9466 case TARGET_NR_munmap:
9467 ret = get_errno(target_munmap(arg1, arg2));
9468 break;
9469 case TARGET_NR_mprotect:
9470 {
9471 TaskState *ts = cpu->opaque;
9472 /* Special hack to detect libc making the stack executable. */
9473 if ((arg3 & PROT_GROWSDOWN)
9474 && arg1 >= ts->info->stack_limit
9475 && arg1 <= ts->info->start_stack) {
9476 arg3 &= ~PROT_GROWSDOWN;
9477 arg2 = arg2 + arg1 - ts->info->stack_limit;
9478 arg1 = ts->info->stack_limit;
9479 }
9480 }
9481 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9482 break;
9483 #ifdef TARGET_NR_mremap
9484 case TARGET_NR_mremap:
9485 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9486 break;
9487 #endif
9488 /* ??? msync/mlock/munlock are broken for softmmu. */
9489 #ifdef TARGET_NR_msync
9490 case TARGET_NR_msync:
9491 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9492 break;
9493 #endif
9494 #ifdef TARGET_NR_mlock
9495 case TARGET_NR_mlock:
9496 ret = get_errno(mlock(g2h(arg1), arg2));
9497 break;
9498 #endif
9499 #ifdef TARGET_NR_munlock
9500 case TARGET_NR_munlock:
9501 ret = get_errno(munlock(g2h(arg1), arg2));
9502 break;
9503 #endif
9504 #ifdef TARGET_NR_mlockall
9505 case TARGET_NR_mlockall:
9506 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9507 break;
9508 #endif
9509 #ifdef TARGET_NR_munlockall
9510 case TARGET_NR_munlockall:
9511 ret = get_errno(munlockall());
9512 break;
9513 #endif
9514 case TARGET_NR_truncate:
9515 if (!(p = lock_user_string(arg1)))
9516 goto efault;
9517 ret = get_errno(truncate(p, arg2));
9518 unlock_user(p, arg1, 0);
9519 break;
9520 case TARGET_NR_ftruncate:
9521 ret = get_errno(ftruncate(arg1, arg2));
9522 break;
9523 case TARGET_NR_fchmod:
9524 ret = get_errno(fchmod(arg1, arg2));
9525 break;
9526 #if defined(TARGET_NR_fchmodat)
9527 case TARGET_NR_fchmodat:
9528 if (!(p = lock_user_string(arg2)))
9529 goto efault;
9530 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9531 unlock_user(p, arg2, 0);
9532 break;
9533 #endif
9534 case TARGET_NR_getpriority:
9535 /* Note that negative values are valid for getpriority, so we must
9536 differentiate based on errno settings. */
9537 errno = 0;
9538 ret = getpriority(arg1, arg2);
9539 if (ret == -1 && errno != 0) {
9540 ret = -host_to_target_errno(errno);
9541 break;
9542 }
9543 #ifdef TARGET_ALPHA
9544 /* Return value is the unbiased priority. Signal no error. */
9545 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9546 #else
9547 /* Return value is a biased priority to avoid negative numbers. */
9548 ret = 20 - ret;
9549 #endif
9550 break;
9551 case TARGET_NR_setpriority:
9552 ret = get_errno(setpriority(arg1, arg2, arg3));
9553 break;
9554 #ifdef TARGET_NR_profil
9555 case TARGET_NR_profil:
9556 goto unimplemented;
9557 #endif
9558 case TARGET_NR_statfs:
9559 if (!(p = lock_user_string(arg1)))
9560 goto efault;
9561 ret = get_errno(statfs(path(p), &stfs));
9562 unlock_user(p, arg1, 0);
9563 convert_statfs:
9564 if (!is_error(ret)) {
9565 struct target_statfs *target_stfs;
9566
9567 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9568 goto efault;
9569 __put_user(stfs.f_type, &target_stfs->f_type);
9570 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9571 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9572 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9573 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9574 __put_user(stfs.f_files, &target_stfs->f_files);
9575 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9576 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9577 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9578 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9579 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9580 #ifdef _STATFS_F_FLAGS
9581 __put_user(stfs.f_flags, &target_stfs->f_flags);
9582 #else
9583 __put_user(0, &target_stfs->f_flags);
9584 #endif
9585 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9586 unlock_user_struct(target_stfs, arg2, 1);
9587 }
9588 break;
9589 case TARGET_NR_fstatfs:
9590 ret = get_errno(fstatfs(arg1, &stfs));
9591 goto convert_statfs;
9592 #ifdef TARGET_NR_statfs64
9593 case TARGET_NR_statfs64:
9594 if (!(p = lock_user_string(arg1)))
9595 goto efault;
9596 ret = get_errno(statfs(path(p), &stfs));
9597 unlock_user(p, arg1, 0);
9598 convert_statfs64:
9599 if (!is_error(ret)) {
9600 struct target_statfs64 *target_stfs;
9601
9602 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9603 goto efault;
9604 __put_user(stfs.f_type, &target_stfs->f_type);
9605 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9606 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9607 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9608 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9609 __put_user(stfs.f_files, &target_stfs->f_files);
9610 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9611 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9612 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9613 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9614 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9615 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9616 unlock_user_struct(target_stfs, arg3, 1);
9617 }
9618 break;
9619 case TARGET_NR_fstatfs64:
9620 ret = get_errno(fstatfs(arg1, &stfs));
9621 goto convert_statfs64;
9622 #endif
9623 #ifdef TARGET_NR_ioperm
9624 case TARGET_NR_ioperm:
9625 goto unimplemented;
9626 #endif
9627 #ifdef TARGET_NR_socketcall
9628 case TARGET_NR_socketcall:
9629 ret = do_socketcall(arg1, arg2);
9630 break;
9631 #endif
9632 #ifdef TARGET_NR_accept
9633 case TARGET_NR_accept:
9634 ret = do_accept4(arg1, arg2, arg3, 0);
9635 break;
9636 #endif
9637 #ifdef TARGET_NR_accept4
9638 case TARGET_NR_accept4:
9639 ret = do_accept4(arg1, arg2, arg3, arg4);
9640 break;
9641 #endif
9642 #ifdef TARGET_NR_bind
9643 case TARGET_NR_bind:
9644 ret = do_bind(arg1, arg2, arg3);
9645 break;
9646 #endif
9647 #ifdef TARGET_NR_connect
9648 case TARGET_NR_connect:
9649 ret = do_connect(arg1, arg2, arg3);
9650 break;
9651 #endif
9652 #ifdef TARGET_NR_getpeername
9653 case TARGET_NR_getpeername:
9654 ret = do_getpeername(arg1, arg2, arg3);
9655 break;
9656 #endif
9657 #ifdef TARGET_NR_getsockname
9658 case TARGET_NR_getsockname:
9659 ret = do_getsockname(arg1, arg2, arg3);
9660 break;
9661 #endif
9662 #ifdef TARGET_NR_getsockopt
9663 case TARGET_NR_getsockopt:
9664 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9665 break;
9666 #endif
9667 #ifdef TARGET_NR_listen
9668 case TARGET_NR_listen:
9669 ret = get_errno(listen(arg1, arg2));
9670 break;
9671 #endif
9672 #ifdef TARGET_NR_recv
9673 case TARGET_NR_recv:
9674 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9675 break;
9676 #endif
9677 #ifdef TARGET_NR_recvfrom
9678 case TARGET_NR_recvfrom:
9679 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9680 break;
9681 #endif
9682 #ifdef TARGET_NR_recvmsg
9683 case TARGET_NR_recvmsg:
9684 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9685 break;
9686 #endif
9687 #ifdef TARGET_NR_send
9688 case TARGET_NR_send:
9689 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9690 break;
9691 #endif
9692 #ifdef TARGET_NR_sendmsg
9693 case TARGET_NR_sendmsg:
9694 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9695 break;
9696 #endif
9697 #ifdef TARGET_NR_sendmmsg
9698 case TARGET_NR_sendmmsg:
9699 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9700 break;
9701 case TARGET_NR_recvmmsg:
9702 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9703 break;
9704 #endif
9705 #ifdef TARGET_NR_sendto
9706 case TARGET_NR_sendto:
9707 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9708 break;
9709 #endif
9710 #ifdef TARGET_NR_shutdown
9711 case TARGET_NR_shutdown:
9712 ret = get_errno(shutdown(arg1, arg2));
9713 break;
9714 #endif
9715 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9716 case TARGET_NR_getrandom:
9717 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9718 if (!p) {
9719 goto efault;
9720 }
9721 ret = get_errno(getrandom(p, arg2, arg3));
9722 unlock_user(p, arg1, ret);
9723 break;
9724 #endif
9725 #ifdef TARGET_NR_socket
9726 case TARGET_NR_socket:
9727 ret = do_socket(arg1, arg2, arg3);
9728 break;
9729 #endif
9730 #ifdef TARGET_NR_socketpair
9731 case TARGET_NR_socketpair:
9732 ret = do_socketpair(arg1, arg2, arg3, arg4);
9733 break;
9734 #endif
9735 #ifdef TARGET_NR_setsockopt
9736 case TARGET_NR_setsockopt:
9737 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9738 break;
9739 #endif
9740 #if defined(TARGET_NR_syslog)
9741 case TARGET_NR_syslog:
9742 {
9743 int len = arg2;
9744
9745 switch (arg1) {
9746 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9747 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9748 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9749 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9750 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9751 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9752 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9753 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9754 {
9755 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9756 }
9757 break;
9758 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9759 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9760 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9761 {
9762 ret = -TARGET_EINVAL;
9763 if (len < 0) {
9764 goto fail;
9765 }
9766 ret = 0;
9767 if (len == 0) {
9768 break;
9769 }
9770 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9771 if (!p) {
9772 ret = -TARGET_EFAULT;
9773 goto fail;
9774 }
9775 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9776 unlock_user(p, arg2, arg3);
9777 }
9778 break;
9779 default:
9780 ret = -EINVAL;
9781 break;
9782 }
9783 }
9784 break;
9785 #endif
9786 case TARGET_NR_setitimer:
9787 {
9788 struct itimerval value, ovalue, *pvalue;
9789
9790 if (arg2) {
9791 pvalue = &value;
9792 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9793 || copy_from_user_timeval(&pvalue->it_value,
9794 arg2 + sizeof(struct target_timeval)))
9795 goto efault;
9796 } else {
9797 pvalue = NULL;
9798 }
9799 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9800 if (!is_error(ret) && arg3) {
9801 if (copy_to_user_timeval(arg3,
9802 &ovalue.it_interval)
9803 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9804 &ovalue.it_value))
9805 goto efault;
9806 }
9807 }
9808 break;
9809 case TARGET_NR_getitimer:
9810 {
9811 struct itimerval value;
9812
9813 ret = get_errno(getitimer(arg1, &value));
9814 if (!is_error(ret) && arg2) {
9815 if (copy_to_user_timeval(arg2,
9816 &value.it_interval)
9817 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9818 &value.it_value))
9819 goto efault;
9820 }
9821 }
9822 break;
9823 #ifdef TARGET_NR_stat
9824 case TARGET_NR_stat:
9825 if (!(p = lock_user_string(arg1)))
9826 goto efault;
9827 ret = get_errno(stat(path(p), &st));
9828 unlock_user(p, arg1, 0);
9829 goto do_stat;
9830 #endif
9831 #ifdef TARGET_NR_lstat
9832 case TARGET_NR_lstat:
9833 if (!(p = lock_user_string(arg1)))
9834 goto efault;
9835 ret = get_errno(lstat(path(p), &st));
9836 unlock_user(p, arg1, 0);
9837 goto do_stat;
9838 #endif
9839 case TARGET_NR_fstat:
9840 {
9841 ret = get_errno(fstat(arg1, &st));
9842 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9843 do_stat:
9844 #endif
9845 if (!is_error(ret)) {
9846 struct target_stat *target_st;
9847
9848 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9849 goto efault;
9850 memset(target_st, 0, sizeof(*target_st));
9851 __put_user(st.st_dev, &target_st->st_dev);
9852 __put_user(st.st_ino, &target_st->st_ino);
9853 __put_user(st.st_mode, &target_st->st_mode);
9854 __put_user(st.st_uid, &target_st->st_uid);
9855 __put_user(st.st_gid, &target_st->st_gid);
9856 __put_user(st.st_nlink, &target_st->st_nlink);
9857 __put_user(st.st_rdev, &target_st->st_rdev);
9858 __put_user(st.st_size, &target_st->st_size);
9859 __put_user(st.st_blksize, &target_st->st_blksize);
9860 __put_user(st.st_blocks, &target_st->st_blocks);
9861 __put_user(st.st_atime, &target_st->target_st_atime);
9862 __put_user(st.st_mtime, &target_st->target_st_mtime);
9863 __put_user(st.st_ctime, &target_st->target_st_ctime);
9864 unlock_user_struct(target_st, arg2, 1);
9865 }
9866 }
9867 break;
9868 #ifdef TARGET_NR_olduname
9869 case TARGET_NR_olduname:
9870 goto unimplemented;
9871 #endif
9872 #ifdef TARGET_NR_iopl
9873 case TARGET_NR_iopl:
9874 goto unimplemented;
9875 #endif
9876 case TARGET_NR_vhangup:
9877 ret = get_errno(vhangup());
9878 break;
9879 #ifdef TARGET_NR_idle
9880 case TARGET_NR_idle:
9881 goto unimplemented;
9882 #endif
9883 #ifdef TARGET_NR_syscall
9884 case TARGET_NR_syscall:
9885 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9886 arg6, arg7, arg8, 0);
9887 break;
9888 #endif
9889 case TARGET_NR_wait4:
9890 {
9891 int status;
9892 abi_long status_ptr = arg2;
9893 struct rusage rusage, *rusage_ptr;
9894 abi_ulong target_rusage = arg4;
9895 abi_long rusage_err;
9896 if (target_rusage)
9897 rusage_ptr = &rusage;
9898 else
9899 rusage_ptr = NULL;
9900 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9901 if (!is_error(ret)) {
9902 if (status_ptr && ret) {
9903 status = host_to_target_waitstatus(status);
9904 if (put_user_s32(status, status_ptr))
9905 goto efault;
9906 }
9907 if (target_rusage) {
9908 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9909 if (rusage_err) {
9910 ret = rusage_err;
9911 }
9912 }
9913 }
9914 }
9915 break;
9916 #ifdef TARGET_NR_swapoff
9917 case TARGET_NR_swapoff:
9918 if (!(p = lock_user_string(arg1)))
9919 goto efault;
9920 ret = get_errno(swapoff(p));
9921 unlock_user(p, arg1, 0);
9922 break;
9923 #endif
9924 case TARGET_NR_sysinfo:
9925 {
9926 struct target_sysinfo *target_value;
9927 struct sysinfo value;
9928 ret = get_errno(sysinfo(&value));
9929 if (!is_error(ret) && arg1)
9930 {
9931 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9932 goto efault;
9933 __put_user(value.uptime, &target_value->uptime);
9934 __put_user(value.loads[0], &target_value->loads[0]);
9935 __put_user(value.loads[1], &target_value->loads[1]);
9936 __put_user(value.loads[2], &target_value->loads[2]);
9937 __put_user(value.totalram, &target_value->totalram);
9938 __put_user(value.freeram, &target_value->freeram);
9939 __put_user(value.sharedram, &target_value->sharedram);
9940 __put_user(value.bufferram, &target_value->bufferram);
9941 __put_user(value.totalswap, &target_value->totalswap);
9942 __put_user(value.freeswap, &target_value->freeswap);
9943 __put_user(value.procs, &target_value->procs);
9944 __put_user(value.totalhigh, &target_value->totalhigh);
9945 __put_user(value.freehigh, &target_value->freehigh);
9946 __put_user(value.mem_unit, &target_value->mem_unit);
9947 unlock_user_struct(target_value, arg1, 1);
9948 }
9949 }
9950 break;
9951 #ifdef TARGET_NR_ipc
9952 case TARGET_NR_ipc:
9953 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9954 break;
9955 #endif
9956 #ifdef TARGET_NR_semget
9957 case TARGET_NR_semget:
9958 ret = get_errno(semget(arg1, arg2, arg3));
9959 break;
9960 #endif
9961 #ifdef TARGET_NR_semop
9962 case TARGET_NR_semop:
9963 ret = do_semop(arg1, arg2, arg3);
9964 break;
9965 #endif
9966 #ifdef TARGET_NR_semctl
9967 case TARGET_NR_semctl:
9968 ret = do_semctl(arg1, arg2, arg3, arg4);
9969 break;
9970 #endif
9971 #ifdef TARGET_NR_msgctl
9972 case TARGET_NR_msgctl:
9973 ret = do_msgctl(arg1, arg2, arg3);
9974 break;
9975 #endif
9976 #ifdef TARGET_NR_msgget
9977 case TARGET_NR_msgget:
9978 ret = get_errno(msgget(arg1, arg2));
9979 break;
9980 #endif
9981 #ifdef TARGET_NR_msgrcv
9982 case TARGET_NR_msgrcv:
9983 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9984 break;
9985 #endif
9986 #ifdef TARGET_NR_msgsnd
9987 case TARGET_NR_msgsnd:
9988 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9989 break;
9990 #endif
9991 #ifdef TARGET_NR_shmget
9992 case TARGET_NR_shmget:
9993 ret = get_errno(shmget(arg1, arg2, arg3));
9994 break;
9995 #endif
9996 #ifdef TARGET_NR_shmctl
9997 case TARGET_NR_shmctl:
9998 ret = do_shmctl(arg1, arg2, arg3);
9999 break;
10000 #endif
10001 #ifdef TARGET_NR_shmat
10002 case TARGET_NR_shmat:
10003 ret = do_shmat(cpu_env, arg1, arg2, arg3);
10004 break;
10005 #endif
10006 #ifdef TARGET_NR_shmdt
10007 case TARGET_NR_shmdt:
10008 ret = do_shmdt(arg1);
10009 break;
10010 #endif
10011 case TARGET_NR_fsync:
10012 ret = get_errno(fsync(arg1));
10013 break;
10014 case TARGET_NR_clone:
10015 /* Linux manages to have three different orderings for its
10016 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10017 * match the kernel's CONFIG_CLONE_* settings.
10018 * Microblaze is further special in that it uses a sixth
10019 * implicit argument to clone for the TLS pointer.
10020 */
10021 #if defined(TARGET_MICROBLAZE)
10022 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10023 #elif defined(TARGET_CLONE_BACKWARDS)
10024 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10025 #elif defined(TARGET_CLONE_BACKWARDS2)
10026 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10027 #else
10028 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10029 #endif
10030 break;
10031 #ifdef __NR_exit_group
10032 /* new thread calls */
10033 case TARGET_NR_exit_group:
10034 #ifdef TARGET_GPROF
10035 _mcleanup();
10036 #endif
10037 gdb_exit(cpu_env, arg1);
10038 ret = get_errno(exit_group(arg1));
10039 break;
10040 #endif
10041 case TARGET_NR_setdomainname:
10042 if (!(p = lock_user_string(arg1)))
10043 goto efault;
10044 ret = get_errno(setdomainname(p, arg2));
10045 unlock_user(p, arg1, 0);
10046 break;
10047 case TARGET_NR_uname:
10048 /* no need to transcode because we use the linux syscall */
10049 {
10050 struct new_utsname * buf;
10051
10052 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10053 goto efault;
10054 ret = get_errno(sys_uname(buf));
10055 if (!is_error(ret)) {
10056 /* Overwrite the native machine name with whatever is being
10057 emulated. */
10058 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
10059 /* Allow the user to override the reported release. */
10060 if (qemu_uname_release && *qemu_uname_release) {
10061 g_strlcpy(buf->release, qemu_uname_release,
10062 sizeof(buf->release));
10063 }
10064 }
10065 unlock_user_struct(buf, arg1, 1);
10066 }
10067 break;
10068 #ifdef TARGET_I386
10069 case TARGET_NR_modify_ldt:
10070 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
10071 break;
10072 #if !defined(TARGET_X86_64)
10073 case TARGET_NR_vm86old:
10074 goto unimplemented;
10075 case TARGET_NR_vm86:
10076 ret = do_vm86(cpu_env, arg1, arg2);
10077 break;
10078 #endif
10079 #endif
10080 case TARGET_NR_adjtimex:
10081 {
10082 struct timex host_buf;
10083
10084 if (target_to_host_timex(&host_buf, arg1) != 0) {
10085 goto efault;
10086 }
10087 ret = get_errno(adjtimex(&host_buf));
10088 if (!is_error(ret)) {
10089 if (host_to_target_timex(arg1, &host_buf) != 0) {
10090 goto efault;
10091 }
10092 }
10093 }
10094 break;
10095 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10096 case TARGET_NR_clock_adjtime:
10097 {
10098 struct timex htx, *phtx = &htx;
10099
10100 if (target_to_host_timex(phtx, arg2) != 0) {
10101 goto efault;
10102 }
10103 ret = get_errno(clock_adjtime(arg1, phtx));
10104 if (!is_error(ret) && phtx) {
10105 if (host_to_target_timex(arg2, phtx) != 0) {
10106 goto efault;
10107 }
10108 }
10109 }
10110 break;
10111 #endif
10112 #ifdef TARGET_NR_create_module
10113 case TARGET_NR_create_module:
10114 #endif
10115 case TARGET_NR_init_module:
10116 case TARGET_NR_delete_module:
10117 #ifdef TARGET_NR_get_kernel_syms
10118 case TARGET_NR_get_kernel_syms:
10119 #endif
10120 goto unimplemented;
10121 case TARGET_NR_quotactl:
10122 goto unimplemented;
10123 case TARGET_NR_getpgid:
10124 ret = get_errno(getpgid(arg1));
10125 break;
10126 case TARGET_NR_fchdir:
10127 ret = get_errno(fchdir(arg1));
10128 break;
10129 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10130 case TARGET_NR_bdflush:
10131 goto unimplemented;
10132 #endif
10133 #ifdef TARGET_NR_sysfs
10134 case TARGET_NR_sysfs:
10135 goto unimplemented;
10136 #endif
10137 case TARGET_NR_personality:
10138 ret = get_errno(personality(arg1));
10139 break;
10140 #ifdef TARGET_NR_afs_syscall
10141 case TARGET_NR_afs_syscall:
10142 goto unimplemented;
10143 #endif
10144 #ifdef TARGET_NR__llseek /* Not on alpha */
10145 case TARGET_NR__llseek:
10146 {
10147 int64_t res;
10148 #if !defined(__NR_llseek)
10149 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10150 if (res == -1) {
10151 ret = get_errno(res);
10152 } else {
10153 ret = 0;
10154 }
10155 #else
10156 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10157 #endif
10158 if ((ret == 0) && put_user_s64(res, arg4)) {
10159 goto efault;
10160 }
10161 }
10162 break;
10163 #endif
10164 #ifdef TARGET_NR_getdents
10165 case TARGET_NR_getdents:
10166 #ifdef __NR_getdents
10167 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10168 {
10169 struct target_dirent *target_dirp;
10170 struct linux_dirent *dirp;
10171 abi_long count = arg3;
10172
10173 dirp = g_try_malloc(count);
10174 if (!dirp) {
10175 ret = -TARGET_ENOMEM;
10176 goto fail;
10177 }
10178
10179 ret = get_errno(sys_getdents(arg1, dirp, count));
10180 if (!is_error(ret)) {
10181 struct linux_dirent *de;
10182 struct target_dirent *tde;
10183 int len = ret;
10184 int reclen, treclen;
10185 int count1, tnamelen;
10186
10187 count1 = 0;
10188 de = dirp;
10189 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10190 goto efault;
10191 tde = target_dirp;
10192 while (len > 0) {
10193 reclen = de->d_reclen;
10194 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10195 assert(tnamelen >= 0);
10196 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10197 assert(count1 + treclen <= count);
10198 tde->d_reclen = tswap16(treclen);
10199 tde->d_ino = tswapal(de->d_ino);
10200 tde->d_off = tswapal(de->d_off);
10201 memcpy(tde->d_name, de->d_name, tnamelen);
10202 de = (struct linux_dirent *)((char *)de + reclen);
10203 len -= reclen;
10204 tde = (struct target_dirent *)((char *)tde + treclen);
10205 count1 += treclen;
10206 }
10207 ret = count1;
10208 unlock_user(target_dirp, arg2, ret);
10209 }
10210 g_free(dirp);
10211 }
10212 #else
10213 {
10214 struct linux_dirent *dirp;
10215 abi_long count = arg3;
10216
10217 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10218 goto efault;
10219 ret = get_errno(sys_getdents(arg1, dirp, count));
10220 if (!is_error(ret)) {
10221 struct linux_dirent *de;
10222 int len = ret;
10223 int reclen;
10224 de = dirp;
10225 while (len > 0) {
10226 reclen = de->d_reclen;
10227 if (reclen > len)
10228 break;
10229 de->d_reclen = tswap16(reclen);
10230 tswapls(&de->d_ino);
10231 tswapls(&de->d_off);
10232 de = (struct linux_dirent *)((char *)de + reclen);
10233 len -= reclen;
10234 }
10235 }
10236 unlock_user(dirp, arg2, ret);
10237 }
10238 #endif
10239 #else
10240 /* Implement getdents in terms of getdents64 */
10241 {
10242 struct linux_dirent64 *dirp;
10243 abi_long count = arg3;
10244
10245 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10246 if (!dirp) {
10247 goto efault;
10248 }
10249 ret = get_errno(sys_getdents64(arg1, dirp, count));
10250 if (!is_error(ret)) {
10251 /* Convert the dirent64 structs to target dirent. We do this
10252 * in-place, since we can guarantee that a target_dirent is no
10253 * larger than a dirent64; however this means we have to be
10254 * careful to read everything before writing in the new format.
10255 */
10256 struct linux_dirent64 *de;
10257 struct target_dirent *tde;
10258 int len = ret;
10259 int tlen = 0;
10260
10261 de = dirp;
10262 tde = (struct target_dirent *)dirp;
10263 while (len > 0) {
10264 int namelen, treclen;
10265 int reclen = de->d_reclen;
10266 uint64_t ino = de->d_ino;
10267 int64_t off = de->d_off;
10268 uint8_t type = de->d_type;
10269
10270 namelen = strlen(de->d_name);
10271 treclen = offsetof(struct target_dirent, d_name)
10272 + namelen + 2;
10273 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10274
10275 memmove(tde->d_name, de->d_name, namelen + 1);
10276 tde->d_ino = tswapal(ino);
10277 tde->d_off = tswapal(off);
10278 tde->d_reclen = tswap16(treclen);
10279 /* The target_dirent type is in what was formerly a padding
10280 * byte at the end of the structure:
10281 */
10282 *(((char *)tde) + treclen - 1) = type;
10283
10284 de = (struct linux_dirent64 *)((char *)de + reclen);
10285 tde = (struct target_dirent *)((char *)tde + treclen);
10286 len -= reclen;
10287 tlen += treclen;
10288 }
10289 ret = tlen;
10290 }
10291 unlock_user(dirp, arg2, ret);
10292 }
10293 #endif
10294 break;
10295 #endif /* TARGET_NR_getdents */
10296 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10297 case TARGET_NR_getdents64:
10298 {
10299 struct linux_dirent64 *dirp;
10300 abi_long count = arg3;
10301 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10302 goto efault;
10303 ret = get_errno(sys_getdents64(arg1, dirp, count));
10304 if (!is_error(ret)) {
10305 struct linux_dirent64 *de;
10306 int len = ret;
10307 int reclen;
10308 de = dirp;
10309 while (len > 0) {
10310 reclen = de->d_reclen;
10311 if (reclen > len)
10312 break;
10313 de->d_reclen = tswap16(reclen);
10314 tswap64s((uint64_t *)&de->d_ino);
10315 tswap64s((uint64_t *)&de->d_off);
10316 de = (struct linux_dirent64 *)((char *)de + reclen);
10317 len -= reclen;
10318 }
10319 }
10320 unlock_user(dirp, arg2, ret);
10321 }
10322 break;
10323 #endif /* TARGET_NR_getdents64 */
10324 #if defined(TARGET_NR__newselect)
10325 case TARGET_NR__newselect:
10326 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10327 break;
10328 #endif
10329 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10330 # ifdef TARGET_NR_poll
10331 case TARGET_NR_poll:
10332 # endif
10333 # ifdef TARGET_NR_ppoll
10334 case TARGET_NR_ppoll:
10335 # endif
10336 {
10337 struct target_pollfd *target_pfd;
10338 unsigned int nfds = arg2;
10339 struct pollfd *pfd;
10340 unsigned int i;
10341
10342 pfd = NULL;
10343 target_pfd = NULL;
10344 if (nfds) {
10345 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10346 ret = -TARGET_EINVAL;
10347 break;
10348 }
10349
10350 target_pfd = lock_user(VERIFY_WRITE, arg1,
10351 sizeof(struct target_pollfd) * nfds, 1);
10352 if (!target_pfd) {
10353 goto efault;
10354 }
10355
10356 pfd = alloca(sizeof(struct pollfd) * nfds);
10357 for (i = 0; i < nfds; i++) {
10358 pfd[i].fd = tswap32(target_pfd[i].fd);
10359 pfd[i].events = tswap16(target_pfd[i].events);
10360 }
10361 }
10362
10363 switch (num) {
10364 # ifdef TARGET_NR_ppoll
10365 case TARGET_NR_ppoll:
10366 {
10367 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10368 target_sigset_t *target_set;
10369 sigset_t _set, *set = &_set;
10370
10371 if (arg3) {
10372 if (target_to_host_timespec(timeout_ts, arg3)) {
10373 unlock_user(target_pfd, arg1, 0);
10374 goto efault;
10375 }
10376 } else {
10377 timeout_ts = NULL;
10378 }
10379
10380 if (arg4) {
10381 if (arg5 != sizeof(target_sigset_t)) {
10382 unlock_user(target_pfd, arg1, 0);
10383 ret = -TARGET_EINVAL;
10384 break;
10385 }
10386
10387 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10388 if (!target_set) {
10389 unlock_user(target_pfd, arg1, 0);
10390 goto efault;
10391 }
10392 target_to_host_sigset(set, target_set);
10393 } else {
10394 set = NULL;
10395 }
10396
10397 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10398 set, SIGSET_T_SIZE));
10399
10400 if (!is_error(ret) && arg3) {
10401 host_to_target_timespec(arg3, timeout_ts);
10402 }
10403 if (arg4) {
10404 unlock_user(target_set, arg4, 0);
10405 }
10406 break;
10407 }
10408 # endif
10409 # ifdef TARGET_NR_poll
10410 case TARGET_NR_poll:
10411 {
10412 struct timespec ts, *pts;
10413
10414 if (arg3 >= 0) {
10415 /* Convert ms to secs, ns */
10416 ts.tv_sec = arg3 / 1000;
10417 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10418 pts = &ts;
10419 } else {
10420 /* -ve poll() timeout means "infinite" */
10421 pts = NULL;
10422 }
10423 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10424 break;
10425 }
10426 # endif
10427 default:
10428 g_assert_not_reached();
10429 }
10430
10431 if (!is_error(ret)) {
10432 for(i = 0; i < nfds; i++) {
10433 target_pfd[i].revents = tswap16(pfd[i].revents);
10434 }
10435 }
10436 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10437 }
10438 break;
10439 #endif
10440 case TARGET_NR_flock:
10441 /* NOTE: the flock constant seems to be the same for every
10442 Linux platform */
10443 ret = get_errno(safe_flock(arg1, arg2));
10444 break;
10445 case TARGET_NR_readv:
10446 {
10447 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10448 if (vec != NULL) {
10449 ret = get_errno(safe_readv(arg1, vec, arg3));
10450 unlock_iovec(vec, arg2, arg3, 1);
10451 } else {
10452 ret = -host_to_target_errno(errno);
10453 }
10454 }
10455 break;
10456 case TARGET_NR_writev:
10457 {
10458 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10459 if (vec != NULL) {
10460 ret = get_errno(safe_writev(arg1, vec, arg3));
10461 unlock_iovec(vec, arg2, arg3, 0);
10462 } else {
10463 ret = -host_to_target_errno(errno);
10464 }
10465 }
10466 break;
10467 #if defined(TARGET_NR_preadv)
10468 case TARGET_NR_preadv:
10469 {
10470 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10471 if (vec != NULL) {
10472 unsigned long low, high;
10473
10474 target_to_host_low_high(arg4, arg5, &low, &high);
10475 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10476 unlock_iovec(vec, arg2, arg3, 1);
10477 } else {
10478 ret = -host_to_target_errno(errno);
10479 }
10480 }
10481 break;
10482 #endif
10483 #if defined(TARGET_NR_pwritev)
10484 case TARGET_NR_pwritev:
10485 {
10486 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10487 if (vec != NULL) {
10488 unsigned long low, high;
10489
10490 target_to_host_low_high(arg4, arg5, &low, &high);
10491 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10492 unlock_iovec(vec, arg2, arg3, 0);
10493 } else {
10494 ret = -host_to_target_errno(errno);
10495 }
10496 }
10497 break;
10498 #endif
10499 case TARGET_NR_getsid:
10500 ret = get_errno(getsid(arg1));
10501 break;
10502 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10503 case TARGET_NR_fdatasync:
10504 ret = get_errno(fdatasync(arg1));
10505 break;
10506 #endif
10507 #ifdef TARGET_NR__sysctl
10508 case TARGET_NR__sysctl:
10509 /* We don't implement this, but ENOTDIR is always a safe
10510 return value. */
10511 ret = -TARGET_ENOTDIR;
10512 break;
10513 #endif
10514 case TARGET_NR_sched_getaffinity:
10515 {
10516 unsigned int mask_size;
10517 unsigned long *mask;
10518
10519 /*
10520 * sched_getaffinity needs multiples of ulong, so need to take
10521 * care of mismatches between target ulong and host ulong sizes.
10522 */
10523 if (arg2 & (sizeof(abi_ulong) - 1)) {
10524 ret = -TARGET_EINVAL;
10525 break;
10526 }
10527 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10528
10529 mask = alloca(mask_size);
10530 memset(mask, 0, mask_size);
10531 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10532
10533 if (!is_error(ret)) {
10534 if (ret > arg2) {
10535 /* More data returned than the caller's buffer will fit.
10536 * This only happens if sizeof(abi_long) < sizeof(long)
10537 * and the caller passed us a buffer holding an odd number
10538 * of abi_longs. If the host kernel is actually using the
10539 * extra 4 bytes then fail EINVAL; otherwise we can just
10540 * ignore them and only copy the interesting part.
10541 */
10542 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10543 if (numcpus > arg2 * 8) {
10544 ret = -TARGET_EINVAL;
10545 break;
10546 }
10547 ret = arg2;
10548 }
10549
10550 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10551 goto efault;
10552 }
10553 }
10554 }
10555 break;
10556 case TARGET_NR_sched_setaffinity:
10557 {
10558 unsigned int mask_size;
10559 unsigned long *mask;
10560
10561 /*
10562 * sched_setaffinity needs multiples of ulong, so need to take
10563 * care of mismatches between target ulong and host ulong sizes.
10564 */
10565 if (arg2 & (sizeof(abi_ulong) - 1)) {
10566 ret = -TARGET_EINVAL;
10567 break;
10568 }
10569 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10570 mask = alloca(mask_size);
10571
10572 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10573 if (ret) {
10574 break;
10575 }
10576
10577 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10578 }
10579 break;
10580 case TARGET_NR_getcpu:
10581 {
10582 unsigned cpu, node;
10583 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10584 arg2 ? &node : NULL,
10585 NULL));
10586 if (is_error(ret)) {
10587 goto fail;
10588 }
10589 if (arg1 && put_user_u32(cpu, arg1)) {
10590 goto efault;
10591 }
10592 if (arg2 && put_user_u32(node, arg2)) {
10593 goto efault;
10594 }
10595 }
10596 break;
10597 case TARGET_NR_sched_setparam:
10598 {
10599 struct sched_param *target_schp;
10600 struct sched_param schp;
10601
10602 if (arg2 == 0) {
10603 return -TARGET_EINVAL;
10604 }
10605 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10606 goto efault;
10607 schp.sched_priority = tswap32(target_schp->sched_priority);
10608 unlock_user_struct(target_schp, arg2, 0);
10609 ret = get_errno(sched_setparam(arg1, &schp));
10610 }
10611 break;
10612 case TARGET_NR_sched_getparam:
10613 {
10614 struct sched_param *target_schp;
10615 struct sched_param schp;
10616
10617 if (arg2 == 0) {
10618 return -TARGET_EINVAL;
10619 }
10620 ret = get_errno(sched_getparam(arg1, &schp));
10621 if (!is_error(ret)) {
10622 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10623 goto efault;
10624 target_schp->sched_priority = tswap32(schp.sched_priority);
10625 unlock_user_struct(target_schp, arg2, 1);
10626 }
10627 }
10628 break;
10629 case TARGET_NR_sched_setscheduler:
10630 {
10631 struct sched_param *target_schp;
10632 struct sched_param schp;
10633 if (arg3 == 0) {
10634 return -TARGET_EINVAL;
10635 }
10636 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10637 goto efault;
10638 schp.sched_priority = tswap32(target_schp->sched_priority);
10639 unlock_user_struct(target_schp, arg3, 0);
10640 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10641 }
10642 break;
10643 case TARGET_NR_sched_getscheduler:
10644 ret = get_errno(sched_getscheduler(arg1));
10645 break;
10646 case TARGET_NR_sched_yield:
10647 ret = get_errno(sched_yield());
10648 break;
10649 case TARGET_NR_sched_get_priority_max:
10650 ret = get_errno(sched_get_priority_max(arg1));
10651 break;
10652 case TARGET_NR_sched_get_priority_min:
10653 ret = get_errno(sched_get_priority_min(arg1));
10654 break;
10655 case TARGET_NR_sched_rr_get_interval:
10656 {
10657 struct timespec ts;
10658 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10659 if (!is_error(ret)) {
10660 ret = host_to_target_timespec(arg2, &ts);
10661 }
10662 }
10663 break;
10664 case TARGET_NR_nanosleep:
10665 {
10666 struct timespec req, rem;
10667 target_to_host_timespec(&req, arg1);
10668 ret = get_errno(safe_nanosleep(&req, &rem));
10669 if (is_error(ret) && arg2) {
10670 host_to_target_timespec(arg2, &rem);
10671 }
10672 }
10673 break;
10674 #ifdef TARGET_NR_query_module
10675 case TARGET_NR_query_module:
10676 goto unimplemented;
10677 #endif
10678 #ifdef TARGET_NR_nfsservctl
10679 case TARGET_NR_nfsservctl:
10680 goto unimplemented;
10681 #endif
10682 case TARGET_NR_prctl:
10683 switch (arg1) {
10684 case PR_GET_PDEATHSIG:
10685 {
10686 int deathsig;
10687 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10688 if (!is_error(ret) && arg2
10689 && put_user_ual(deathsig, arg2)) {
10690 goto efault;
10691 }
10692 break;
10693 }
10694 #ifdef PR_GET_NAME
10695 case PR_GET_NAME:
10696 {
10697 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10698 if (!name) {
10699 goto efault;
10700 }
10701 ret = get_errno(prctl(arg1, (unsigned long)name,
10702 arg3, arg4, arg5));
10703 unlock_user(name, arg2, 16);
10704 break;
10705 }
10706 case PR_SET_NAME:
10707 {
10708 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10709 if (!name) {
10710 goto efault;
10711 }
10712 ret = get_errno(prctl(arg1, (unsigned long)name,
10713 arg3, arg4, arg5));
10714 unlock_user(name, arg2, 0);
10715 break;
10716 }
10717 #endif
10718 #ifdef TARGET_AARCH64
10719 case TARGET_PR_SVE_SET_VL:
10720 /* We cannot support either PR_SVE_SET_VL_ONEXEC
10721 or PR_SVE_VL_INHERIT. Therefore, anything above
10722 ARM_MAX_VQ results in EINVAL. */
10723 ret = -TARGET_EINVAL;
10724 if (arm_feature(cpu_env, ARM_FEATURE_SVE)
10725 && arg2 >= 0 && arg2 <= ARM_MAX_VQ * 16 && !(arg2 & 15)) {
10726 CPUARMState *env = cpu_env;
10727 int old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10728 int vq = MAX(arg2 / 16, 1);
10729
10730 if (vq < old_vq) {
10731 aarch64_sve_narrow_vq(env, vq);
10732 }
10733 env->vfp.zcr_el[1] = vq - 1;
10734 ret = vq * 16;
10735 }
10736 break;
10737 case TARGET_PR_SVE_GET_VL:
10738 ret = -TARGET_EINVAL;
10739 if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
10740 CPUARMState *env = cpu_env;
10741 ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
10742 }
10743 break;
10744 #endif /* AARCH64 */
10745 case PR_GET_SECCOMP:
10746 case PR_SET_SECCOMP:
10747 /* Disable seccomp to prevent the target disabling syscalls we
10748 * need. */
10749 ret = -TARGET_EINVAL;
10750 break;
10751 default:
10752 /* Most prctl options have no pointer arguments */
10753 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10754 break;
10755 }
10756 break;
10757 #ifdef TARGET_NR_arch_prctl
10758 case TARGET_NR_arch_prctl:
10759 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10760 ret = do_arch_prctl(cpu_env, arg1, arg2);
10761 break;
10762 #else
10763 goto unimplemented;
10764 #endif
10765 #endif
10766 #ifdef TARGET_NR_pread64
10767 case TARGET_NR_pread64:
10768 if (regpairs_aligned(cpu_env, num)) {
10769 arg4 = arg5;
10770 arg5 = arg6;
10771 }
10772 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10773 goto efault;
10774 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10775 unlock_user(p, arg2, ret);
10776 break;
10777 case TARGET_NR_pwrite64:
10778 if (regpairs_aligned(cpu_env, num)) {
10779 arg4 = arg5;
10780 arg5 = arg6;
10781 }
10782 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10783 goto efault;
10784 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10785 unlock_user(p, arg2, 0);
10786 break;
10787 #endif
10788 case TARGET_NR_getcwd:
10789 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10790 goto efault;
10791 ret = get_errno(sys_getcwd1(p, arg2));
10792 unlock_user(p, arg1, ret);
10793 break;
10794 case TARGET_NR_capget:
10795 case TARGET_NR_capset:
10796 {
10797 struct target_user_cap_header *target_header;
10798 struct target_user_cap_data *target_data = NULL;
10799 struct __user_cap_header_struct header;
10800 struct __user_cap_data_struct data[2];
10801 struct __user_cap_data_struct *dataptr = NULL;
10802 int i, target_datalen;
10803 int data_items = 1;
10804
10805 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10806 goto efault;
10807 }
10808 header.version = tswap32(target_header->version);
10809 header.pid = tswap32(target_header->pid);
10810
10811 if (header.version != _LINUX_CAPABILITY_VERSION) {
10812 /* Version 2 and up takes pointer to two user_data structs */
10813 data_items = 2;
10814 }
10815
10816 target_datalen = sizeof(*target_data) * data_items;
10817
10818 if (arg2) {
10819 if (num == TARGET_NR_capget) {
10820 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10821 } else {
10822 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10823 }
10824 if (!target_data) {
10825 unlock_user_struct(target_header, arg1, 0);
10826 goto efault;
10827 }
10828
10829 if (num == TARGET_NR_capset) {
10830 for (i = 0; i < data_items; i++) {
10831 data[i].effective = tswap32(target_data[i].effective);
10832 data[i].permitted = tswap32(target_data[i].permitted);
10833 data[i].inheritable = tswap32(target_data[i].inheritable);
10834 }
10835 }
10836
10837 dataptr = data;
10838 }
10839
10840 if (num == TARGET_NR_capget) {
10841 ret = get_errno(capget(&header, dataptr));
10842 } else {
10843 ret = get_errno(capset(&header, dataptr));
10844 }
10845
10846 /* The kernel always updates version for both capget and capset */
10847 target_header->version = tswap32(header.version);
10848 unlock_user_struct(target_header, arg1, 1);
10849
10850 if (arg2) {
10851 if (num == TARGET_NR_capget) {
10852 for (i = 0; i < data_items; i++) {
10853 target_data[i].effective = tswap32(data[i].effective);
10854 target_data[i].permitted = tswap32(data[i].permitted);
10855 target_data[i].inheritable = tswap32(data[i].inheritable);
10856 }
10857 unlock_user(target_data, arg2, target_datalen);
10858 } else {
10859 unlock_user(target_data, arg2, 0);
10860 }
10861 }
10862 break;
10863 }
10864 case TARGET_NR_sigaltstack:
10865 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10866 break;
10867
10868 #ifdef CONFIG_SENDFILE
10869 case TARGET_NR_sendfile:
10870 {
10871 off_t *offp = NULL;
10872 off_t off;
10873 if (arg3) {
10874 ret = get_user_sal(off, arg3);
10875 if (is_error(ret)) {
10876 break;
10877 }
10878 offp = &off;
10879 }
10880 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10881 if (!is_error(ret) && arg3) {
10882 abi_long ret2 = put_user_sal(off, arg3);
10883 if (is_error(ret2)) {
10884 ret = ret2;
10885 }
10886 }
10887 break;
10888 }
10889 #ifdef TARGET_NR_sendfile64
10890 case TARGET_NR_sendfile64:
10891 {
10892 off_t *offp = NULL;
10893 off_t off;
10894 if (arg3) {
10895 ret = get_user_s64(off, arg3);
10896 if (is_error(ret)) {
10897 break;
10898 }
10899 offp = &off;
10900 }
10901 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10902 if (!is_error(ret) && arg3) {
10903 abi_long ret2 = put_user_s64(off, arg3);
10904 if (is_error(ret2)) {
10905 ret = ret2;
10906 }
10907 }
10908 break;
10909 }
10910 #endif
10911 #else
10912 case TARGET_NR_sendfile:
10913 #ifdef TARGET_NR_sendfile64
10914 case TARGET_NR_sendfile64:
10915 #endif
10916 goto unimplemented;
10917 #endif
10918
10919 #ifdef TARGET_NR_getpmsg
10920 case TARGET_NR_getpmsg:
10921 goto unimplemented;
10922 #endif
10923 #ifdef TARGET_NR_putpmsg
10924 case TARGET_NR_putpmsg:
10925 goto unimplemented;
10926 #endif
10927 #ifdef TARGET_NR_vfork
10928 case TARGET_NR_vfork:
10929 ret = get_errno(do_fork(cpu_env,
10930 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10931 0, 0, 0, 0));
10932 break;
10933 #endif
10934 #ifdef TARGET_NR_ugetrlimit
10935 case TARGET_NR_ugetrlimit:
10936 {
10937 struct rlimit rlim;
10938 int resource = target_to_host_resource(arg1);
10939 ret = get_errno(getrlimit(resource, &rlim));
10940 if (!is_error(ret)) {
10941 struct target_rlimit *target_rlim;
10942 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10943 goto efault;
10944 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10945 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10946 unlock_user_struct(target_rlim, arg2, 1);
10947 }
10948 break;
10949 }
10950 #endif
10951 #ifdef TARGET_NR_truncate64
10952 case TARGET_NR_truncate64:
10953 if (!(p = lock_user_string(arg1)))
10954 goto efault;
10955 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10956 unlock_user(p, arg1, 0);
10957 break;
10958 #endif
10959 #ifdef TARGET_NR_ftruncate64
10960 case TARGET_NR_ftruncate64:
10961 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10962 break;
10963 #endif
10964 #ifdef TARGET_NR_stat64
10965 case TARGET_NR_stat64:
10966 if (!(p = lock_user_string(arg1)))
10967 goto efault;
10968 ret = get_errno(stat(path(p), &st));
10969 unlock_user(p, arg1, 0);
10970 if (!is_error(ret))
10971 ret = host_to_target_stat64(cpu_env, arg2, &st);
10972 break;
10973 #endif
10974 #ifdef TARGET_NR_lstat64
10975 case TARGET_NR_lstat64:
10976 if (!(p = lock_user_string(arg1)))
10977 goto efault;
10978 ret = get_errno(lstat(path(p), &st));
10979 unlock_user(p, arg1, 0);
10980 if (!is_error(ret))
10981 ret = host_to_target_stat64(cpu_env, arg2, &st);
10982 break;
10983 #endif
10984 #ifdef TARGET_NR_fstat64
10985 case TARGET_NR_fstat64:
10986 ret = get_errno(fstat(arg1, &st));
10987 if (!is_error(ret))
10988 ret = host_to_target_stat64(cpu_env, arg2, &st);
10989 break;
10990 #endif
10991 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10992 #ifdef TARGET_NR_fstatat64
10993 case TARGET_NR_fstatat64:
10994 #endif
10995 #ifdef TARGET_NR_newfstatat
10996 case TARGET_NR_newfstatat:
10997 #endif
10998 if (!(p = lock_user_string(arg2)))
10999 goto efault;
11000 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11001 if (!is_error(ret))
11002 ret = host_to_target_stat64(cpu_env, arg3, &st);
11003 break;
11004 #endif
11005 #ifdef TARGET_NR_lchown
11006 case TARGET_NR_lchown:
11007 if (!(p = lock_user_string(arg1)))
11008 goto efault;
11009 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11010 unlock_user(p, arg1, 0);
11011 break;
11012 #endif
11013 #ifdef TARGET_NR_getuid
11014 case TARGET_NR_getuid:
11015 ret = get_errno(high2lowuid(getuid()));
11016 break;
11017 #endif
11018 #ifdef TARGET_NR_getgid
11019 case TARGET_NR_getgid:
11020 ret = get_errno(high2lowgid(getgid()));
11021 break;
11022 #endif
11023 #ifdef TARGET_NR_geteuid
11024 case TARGET_NR_geteuid:
11025 ret = get_errno(high2lowuid(geteuid()));
11026 break;
11027 #endif
11028 #ifdef TARGET_NR_getegid
11029 case TARGET_NR_getegid:
11030 ret = get_errno(high2lowgid(getegid()));
11031 break;
11032 #endif
11033 case TARGET_NR_setreuid:
11034 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11035 break;
11036 case TARGET_NR_setregid:
11037 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11038 break;
11039 case TARGET_NR_getgroups:
11040 {
11041 int gidsetsize = arg1;
11042 target_id *target_grouplist;
11043 gid_t *grouplist;
11044 int i;
11045
11046 grouplist = alloca(gidsetsize * sizeof(gid_t));
11047 ret = get_errno(getgroups(gidsetsize, grouplist));
11048 if (gidsetsize == 0)
11049 break;
11050 if (!is_error(ret)) {
11051 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11052 if (!target_grouplist)
11053 goto efault;
11054 for(i = 0;i < ret; i++)
11055 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11056 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11057 }
11058 }
11059 break;
11060 case TARGET_NR_setgroups:
11061 {
11062 int gidsetsize = arg1;
11063 target_id *target_grouplist;
11064 gid_t *grouplist = NULL;
11065 int i;
11066 if (gidsetsize) {
11067 grouplist = alloca(gidsetsize * sizeof(gid_t));
11068 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11069 if (!target_grouplist) {
11070 ret = -TARGET_EFAULT;
11071 goto fail;
11072 }
11073 for (i = 0; i < gidsetsize; i++) {
11074 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11075 }
11076 unlock_user(target_grouplist, arg2, 0);
11077 }
11078 ret = get_errno(setgroups(gidsetsize, grouplist));
11079 }
11080 break;
11081 case TARGET_NR_fchown:
11082 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11083 break;
11084 #if defined(TARGET_NR_fchownat)
11085 case TARGET_NR_fchownat:
11086 if (!(p = lock_user_string(arg2)))
11087 goto efault;
11088 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11089 low2highgid(arg4), arg5));
11090 unlock_user(p, arg2, 0);
11091 break;
11092 #endif
11093 #ifdef TARGET_NR_setresuid
11094 case TARGET_NR_setresuid:
11095 ret = get_errno(sys_setresuid(low2highuid(arg1),
11096 low2highuid(arg2),
11097 low2highuid(arg3)));
11098 break;
11099 #endif
11100 #ifdef TARGET_NR_getresuid
11101 case TARGET_NR_getresuid:
11102 {
11103 uid_t ruid, euid, suid;
11104 ret = get_errno(getresuid(&ruid, &euid, &suid));
11105 if (!is_error(ret)) {
11106 if (put_user_id(high2lowuid(ruid), arg1)
11107 || put_user_id(high2lowuid(euid), arg2)
11108 || put_user_id(high2lowuid(suid), arg3))
11109 goto efault;
11110 }
11111 }
11112 break;
11113 #endif
11114 #ifdef TARGET_NR_getresgid
11115 case TARGET_NR_setresgid:
11116 ret = get_errno(sys_setresgid(low2highgid(arg1),
11117 low2highgid(arg2),
11118 low2highgid(arg3)));
11119 break;
11120 #endif
11121 #ifdef TARGET_NR_getresgid
11122 case TARGET_NR_getresgid:
11123 {
11124 gid_t rgid, egid, sgid;
11125 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11126 if (!is_error(ret)) {
11127 if (put_user_id(high2lowgid(rgid), arg1)
11128 || put_user_id(high2lowgid(egid), arg2)
11129 || put_user_id(high2lowgid(sgid), arg3))
11130 goto efault;
11131 }
11132 }
11133 break;
11134 #endif
11135 #ifdef TARGET_NR_chown
11136 case TARGET_NR_chown:
11137 if (!(p = lock_user_string(arg1)))
11138 goto efault;
11139 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11140 unlock_user(p, arg1, 0);
11141 break;
11142 #endif
11143 case TARGET_NR_setuid:
11144 ret = get_errno(sys_setuid(low2highuid(arg1)));
11145 break;
11146 case TARGET_NR_setgid:
11147 ret = get_errno(sys_setgid(low2highgid(arg1)));
11148 break;
11149 case TARGET_NR_setfsuid:
11150 ret = get_errno(setfsuid(arg1));
11151 break;
11152 case TARGET_NR_setfsgid:
11153 ret = get_errno(setfsgid(arg1));
11154 break;
11155
11156 #ifdef TARGET_NR_lchown32
11157 case TARGET_NR_lchown32:
11158 if (!(p = lock_user_string(arg1)))
11159 goto efault;
11160 ret = get_errno(lchown(p, arg2, arg3));
11161 unlock_user(p, arg1, 0);
11162 break;
11163 #endif
11164 #ifdef TARGET_NR_getuid32
11165 case TARGET_NR_getuid32:
11166 ret = get_errno(getuid());
11167 break;
11168 #endif
11169
11170 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11171 /* Alpha specific */
11172 case TARGET_NR_getxuid:
11173 {
11174 uid_t euid;
11175 euid=geteuid();
11176 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11177 }
11178 ret = get_errno(getuid());
11179 break;
11180 #endif
11181 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11182 /* Alpha specific */
11183 case TARGET_NR_getxgid:
11184 {
11185 uid_t egid;
11186 egid=getegid();
11187 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11188 }
11189 ret = get_errno(getgid());
11190 break;
11191 #endif
11192 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11193 /* Alpha specific */
11194 case TARGET_NR_osf_getsysinfo:
11195 ret = -TARGET_EOPNOTSUPP;
11196 switch (arg1) {
11197 case TARGET_GSI_IEEE_FP_CONTROL:
11198 {
11199 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
11200
11201 /* Copied from linux ieee_fpcr_to_swcr. */
11202 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
11203 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
11204 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
11205 | SWCR_TRAP_ENABLE_DZE
11206 | SWCR_TRAP_ENABLE_OVF);
11207 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
11208 | SWCR_TRAP_ENABLE_INE);
11209 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
11210 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
11211
11212 if (put_user_u64 (swcr, arg2))
11213 goto efault;
11214 ret = 0;
11215 }
11216 break;
11217
11218 /* case GSI_IEEE_STATE_AT_SIGNAL:
11219 -- Not implemented in linux kernel.
11220 case GSI_UACPROC:
11221 -- Retrieves current unaligned access state; not much used.
11222 case GSI_PROC_TYPE:
11223 -- Retrieves implver information; surely not used.
11224 case GSI_GET_HWRPB:
11225 -- Grabs a copy of the HWRPB; surely not used.
11226 */
11227 }
11228 break;
11229 #endif
11230 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11231 /* Alpha specific */
11232 case TARGET_NR_osf_setsysinfo:
11233 ret = -TARGET_EOPNOTSUPP;
11234 switch (arg1) {
11235 case TARGET_SSI_IEEE_FP_CONTROL:
11236 {
11237 uint64_t swcr, fpcr, orig_fpcr;
11238
11239 if (get_user_u64 (swcr, arg2)) {
11240 goto efault;
11241 }
11242 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11243 fpcr = orig_fpcr & FPCR_DYN_MASK;
11244
11245 /* Copied from linux ieee_swcr_to_fpcr. */
11246 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
11247 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
11248 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
11249 | SWCR_TRAP_ENABLE_DZE
11250 | SWCR_TRAP_ENABLE_OVF)) << 48;
11251 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
11252 | SWCR_TRAP_ENABLE_INE)) << 57;
11253 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
11254 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
11255
11256 cpu_alpha_store_fpcr(cpu_env, fpcr);
11257 ret = 0;
11258 }
11259 break;
11260
11261 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11262 {
11263 uint64_t exc, fpcr, orig_fpcr;
11264 int si_code;
11265
11266 if (get_user_u64(exc, arg2)) {
11267 goto efault;
11268 }
11269
11270 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
11271
11272 /* We only add to the exception status here. */
11273 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
11274
11275 cpu_alpha_store_fpcr(cpu_env, fpcr);
11276 ret = 0;
11277
11278 /* Old exceptions are not signaled. */
11279 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
11280
11281 /* If any exceptions set by this call,
11282 and are unmasked, send a signal. */
11283 si_code = 0;
11284 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
11285 si_code = TARGET_FPE_FLTRES;
11286 }
11287 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
11288 si_code = TARGET_FPE_FLTUND;
11289 }
11290 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
11291 si_code = TARGET_FPE_FLTOVF;
11292 }
11293 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
11294 si_code = TARGET_FPE_FLTDIV;
11295 }
11296 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
11297 si_code = TARGET_FPE_FLTINV;
11298 }
11299 if (si_code != 0) {
11300 target_siginfo_t info;
11301 info.si_signo = SIGFPE;
11302 info.si_errno = 0;
11303 info.si_code = si_code;
11304 info._sifields._sigfault._addr
11305 = ((CPUArchState *)cpu_env)->pc;
11306 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11307 QEMU_SI_FAULT, &info);
11308 }
11309 }
11310 break;
11311
11312 /* case SSI_NVPAIRS:
11313 -- Used with SSIN_UACPROC to enable unaligned accesses.
11314 case SSI_IEEE_STATE_AT_SIGNAL:
11315 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11316 -- Not implemented in linux kernel
11317 */
11318 }
11319 break;
11320 #endif
11321 #ifdef TARGET_NR_osf_sigprocmask
11322 /* Alpha specific. */
11323 case TARGET_NR_osf_sigprocmask:
11324 {
11325 abi_ulong mask;
11326 int how;
11327 sigset_t set, oldset;
11328
11329 switch(arg1) {
11330 case TARGET_SIG_BLOCK:
11331 how = SIG_BLOCK;
11332 break;
11333 case TARGET_SIG_UNBLOCK:
11334 how = SIG_UNBLOCK;
11335 break;
11336 case TARGET_SIG_SETMASK:
11337 how = SIG_SETMASK;
11338 break;
11339 default:
11340 ret = -TARGET_EINVAL;
11341 goto fail;
11342 }
11343 mask = arg2;
11344 target_to_host_old_sigset(&set, &mask);
11345 ret = do_sigprocmask(how, &set, &oldset);
11346 if (!ret) {
11347 host_to_target_old_sigset(&mask, &oldset);
11348 ret = mask;
11349 }
11350 }
11351 break;
11352 #endif
11353
11354 #ifdef TARGET_NR_getgid32
11355 case TARGET_NR_getgid32:
11356 ret = get_errno(getgid());
11357 break;
11358 #endif
11359 #ifdef TARGET_NR_geteuid32
11360 case TARGET_NR_geteuid32:
11361 ret = get_errno(geteuid());
11362 break;
11363 #endif
11364 #ifdef TARGET_NR_getegid32
11365 case TARGET_NR_getegid32:
11366 ret = get_errno(getegid());
11367 break;
11368 #endif
11369 #ifdef TARGET_NR_setreuid32
11370 case TARGET_NR_setreuid32:
11371 ret = get_errno(setreuid(arg1, arg2));
11372 break;
11373 #endif
11374 #ifdef TARGET_NR_setregid32
11375 case TARGET_NR_setregid32:
11376 ret = get_errno(setregid(arg1, arg2));
11377 break;
11378 #endif
11379 #ifdef TARGET_NR_getgroups32
11380 case TARGET_NR_getgroups32:
11381 {
11382 int gidsetsize = arg1;
11383 uint32_t *target_grouplist;
11384 gid_t *grouplist;
11385 int i;
11386
11387 grouplist = alloca(gidsetsize * sizeof(gid_t));
11388 ret = get_errno(getgroups(gidsetsize, grouplist));
11389 if (gidsetsize == 0)
11390 break;
11391 if (!is_error(ret)) {
11392 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11393 if (!target_grouplist) {
11394 ret = -TARGET_EFAULT;
11395 goto fail;
11396 }
11397 for(i = 0;i < ret; i++)
11398 target_grouplist[i] = tswap32(grouplist[i]);
11399 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11400 }
11401 }
11402 break;
11403 #endif
11404 #ifdef TARGET_NR_setgroups32
11405 case TARGET_NR_setgroups32:
11406 {
11407 int gidsetsize = arg1;
11408 uint32_t *target_grouplist;
11409 gid_t *grouplist;
11410 int i;
11411
11412 grouplist = alloca(gidsetsize * sizeof(gid_t));
11413 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11414 if (!target_grouplist) {
11415 ret = -TARGET_EFAULT;
11416 goto fail;
11417 }
11418 for(i = 0;i < gidsetsize; i++)
11419 grouplist[i] = tswap32(target_grouplist[i]);
11420 unlock_user(target_grouplist, arg2, 0);
11421 ret = get_errno(setgroups(gidsetsize, grouplist));
11422 }
11423 break;
11424 #endif
11425 #ifdef TARGET_NR_fchown32
11426 case TARGET_NR_fchown32:
11427 ret = get_errno(fchown(arg1, arg2, arg3));
11428 break;
11429 #endif
11430 #ifdef TARGET_NR_setresuid32
11431 case TARGET_NR_setresuid32:
11432 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
11433 break;
11434 #endif
11435 #ifdef TARGET_NR_getresuid32
11436 case TARGET_NR_getresuid32:
11437 {
11438 uid_t ruid, euid, suid;
11439 ret = get_errno(getresuid(&ruid, &euid, &suid));
11440 if (!is_error(ret)) {
11441 if (put_user_u32(ruid, arg1)
11442 || put_user_u32(euid, arg2)
11443 || put_user_u32(suid, arg3))
11444 goto efault;
11445 }
11446 }
11447 break;
11448 #endif
11449 #ifdef TARGET_NR_setresgid32
11450 case TARGET_NR_setresgid32:
11451 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11452 break;
11453 #endif
11454 #ifdef TARGET_NR_getresgid32
11455 case TARGET_NR_getresgid32:
11456 {
11457 gid_t rgid, egid, sgid;
11458 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11459 if (!is_error(ret)) {
11460 if (put_user_u32(rgid, arg1)
11461 || put_user_u32(egid, arg2)
11462 || put_user_u32(sgid, arg3))
11463 goto efault;
11464 }
11465 }
11466 break;
11467 #endif
11468 #ifdef TARGET_NR_chown32
11469 case TARGET_NR_chown32:
11470 if (!(p = lock_user_string(arg1)))
11471 goto efault;
11472 ret = get_errno(chown(p, arg2, arg3));
11473 unlock_user(p, arg1, 0);
11474 break;
11475 #endif
11476 #ifdef TARGET_NR_setuid32
11477 case TARGET_NR_setuid32:
11478 ret = get_errno(sys_setuid(arg1));
11479 break;
11480 #endif
11481 #ifdef TARGET_NR_setgid32
11482 case TARGET_NR_setgid32:
11483 ret = get_errno(sys_setgid(arg1));
11484 break;
11485 #endif
11486 #ifdef TARGET_NR_setfsuid32
11487 case TARGET_NR_setfsuid32:
11488 ret = get_errno(setfsuid(arg1));
11489 break;
11490 #endif
11491 #ifdef TARGET_NR_setfsgid32
11492 case TARGET_NR_setfsgid32:
11493 ret = get_errno(setfsgid(arg1));
11494 break;
11495 #endif
11496
11497 case TARGET_NR_pivot_root:
11498 goto unimplemented;
11499 #ifdef TARGET_NR_mincore
11500 case TARGET_NR_mincore:
11501 {
11502 void *a;
11503 ret = -TARGET_ENOMEM;
11504 a = lock_user(VERIFY_READ, arg1, arg2, 0);
11505 if (!a) {
11506 goto fail;
11507 }
11508 ret = -TARGET_EFAULT;
11509 p = lock_user_string(arg3);
11510 if (!p) {
11511 goto mincore_fail;
11512 }
11513 ret = get_errno(mincore(a, arg2, p));
11514 unlock_user(p, arg3, ret);
11515 mincore_fail:
11516 unlock_user(a, arg1, 0);
11517 }
11518 break;
11519 #endif
11520 #ifdef TARGET_NR_arm_fadvise64_64
11521 case TARGET_NR_arm_fadvise64_64:
11522 /* arm_fadvise64_64 looks like fadvise64_64 but
11523 * with different argument order: fd, advice, offset, len
11524 * rather than the usual fd, offset, len, advice.
11525 * Note that offset and len are both 64-bit so appear as
11526 * pairs of 32-bit registers.
11527 */
11528 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11529 target_offset64(arg5, arg6), arg2);
11530 ret = -host_to_target_errno(ret);
11531 break;
11532 #endif
11533
11534 #if TARGET_ABI_BITS == 32
11535
11536 #ifdef TARGET_NR_fadvise64_64
11537 case TARGET_NR_fadvise64_64:
11538 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11539 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11540 ret = arg2;
11541 arg2 = arg3;
11542 arg3 = arg4;
11543 arg4 = arg5;
11544 arg5 = arg6;
11545 arg6 = ret;
11546 #else
11547 /* 6 args: fd, offset (high, low), len (high, low), advice */
11548 if (regpairs_aligned(cpu_env, num)) {
11549 /* offset is in (3,4), len in (5,6) and advice in 7 */
11550 arg2 = arg3;
11551 arg3 = arg4;
11552 arg4 = arg5;
11553 arg5 = arg6;
11554 arg6 = arg7;
11555 }
11556 #endif
11557 ret = -host_to_target_errno(posix_fadvise(arg1,
11558 target_offset64(arg2, arg3),
11559 target_offset64(arg4, arg5),
11560 arg6));
11561 break;
11562 #endif
11563
11564 #ifdef TARGET_NR_fadvise64
11565 case TARGET_NR_fadvise64:
11566 /* 5 args: fd, offset (high, low), len, advice */
11567 if (regpairs_aligned(cpu_env, num)) {
11568 /* offset is in (3,4), len in 5 and advice in 6 */
11569 arg2 = arg3;
11570 arg3 = arg4;
11571 arg4 = arg5;
11572 arg5 = arg6;
11573 }
11574 ret = -host_to_target_errno(posix_fadvise(arg1,
11575 target_offset64(arg2, arg3),
11576 arg4, arg5));
11577 break;
11578 #endif
11579
11580 #else /* not a 32-bit ABI */
11581 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11582 #ifdef TARGET_NR_fadvise64_64
11583 case TARGET_NR_fadvise64_64:
11584 #endif
11585 #ifdef TARGET_NR_fadvise64
11586 case TARGET_NR_fadvise64:
11587 #endif
11588 #ifdef TARGET_S390X
11589 switch (arg4) {
11590 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11591 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11592 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11593 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11594 default: break;
11595 }
11596 #endif
11597 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11598 break;
11599 #endif
11600 #endif /* end of 64-bit ABI fadvise handling */
11601
11602 #ifdef TARGET_NR_madvise
11603 case TARGET_NR_madvise:
11604 /* A straight passthrough may not be safe because qemu sometimes
11605 turns private file-backed mappings into anonymous mappings.
11606 This will break MADV_DONTNEED.
11607 This is a hint, so ignoring and returning success is ok. */
11608 ret = get_errno(0);
11609 break;
11610 #endif
11611 #if TARGET_ABI_BITS == 32
11612 case TARGET_NR_fcntl64:
11613 {
11614 int cmd;
11615 struct flock64 fl;
11616 from_flock64_fn *copyfrom = copy_from_user_flock64;
11617 to_flock64_fn *copyto = copy_to_user_flock64;
11618
11619 #ifdef TARGET_ARM
11620 if (((CPUARMState *)cpu_env)->eabi) {
11621 copyfrom = copy_from_user_eabi_flock64;
11622 copyto = copy_to_user_eabi_flock64;
11623 }
11624 #endif
11625
11626 cmd = target_to_host_fcntl_cmd(arg2);
11627 if (cmd == -TARGET_EINVAL) {
11628 ret = cmd;
11629 break;
11630 }
11631
11632 switch(arg2) {
11633 case TARGET_F_GETLK64:
11634 ret = copyfrom(&fl, arg3);
11635 if (ret) {
11636 break;
11637 }
11638 ret = get_errno(fcntl(arg1, cmd, &fl));
11639 if (ret == 0) {
11640 ret = copyto(arg3, &fl);
11641 }
11642 break;
11643
11644 case TARGET_F_SETLK64:
11645 case TARGET_F_SETLKW64:
11646 ret = copyfrom(&fl, arg3);
11647 if (ret) {
11648 break;
11649 }
11650 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11651 break;
11652 default:
11653 ret = do_fcntl(arg1, arg2, arg3);
11654 break;
11655 }
11656 break;
11657 }
11658 #endif
11659 #ifdef TARGET_NR_cacheflush
11660 case TARGET_NR_cacheflush:
11661 /* self-modifying code is handled automatically, so nothing needed */
11662 ret = 0;
11663 break;
11664 #endif
11665 #ifdef TARGET_NR_security
11666 case TARGET_NR_security:
11667 goto unimplemented;
11668 #endif
11669 #ifdef TARGET_NR_getpagesize
11670 case TARGET_NR_getpagesize:
11671 ret = TARGET_PAGE_SIZE;
11672 break;
11673 #endif
11674 case TARGET_NR_gettid:
11675 ret = get_errno(gettid());
11676 break;
11677 #ifdef TARGET_NR_readahead
11678 case TARGET_NR_readahead:
11679 #if TARGET_ABI_BITS == 32
11680 if (regpairs_aligned(cpu_env, num)) {
11681 arg2 = arg3;
11682 arg3 = arg4;
11683 arg4 = arg5;
11684 }
11685 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11686 #else
11687 ret = get_errno(readahead(arg1, arg2, arg3));
11688 #endif
11689 break;
11690 #endif
11691 #ifdef CONFIG_ATTR
11692 #ifdef TARGET_NR_setxattr
11693 case TARGET_NR_listxattr:
11694 case TARGET_NR_llistxattr:
11695 {
11696 void *p, *b = 0;
11697 if (arg2) {
11698 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11699 if (!b) {
11700 ret = -TARGET_EFAULT;
11701 break;
11702 }
11703 }
11704 p = lock_user_string(arg1);
11705 if (p) {
11706 if (num == TARGET_NR_listxattr) {
11707 ret = get_errno(listxattr(p, b, arg3));
11708 } else {
11709 ret = get_errno(llistxattr(p, b, arg3));
11710 }
11711 } else {
11712 ret = -TARGET_EFAULT;
11713 }
11714 unlock_user(p, arg1, 0);
11715 unlock_user(b, arg2, arg3);
11716 break;
11717 }
11718 case TARGET_NR_flistxattr:
11719 {
11720 void *b = 0;
11721 if (arg2) {
11722 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11723 if (!b) {
11724 ret = -TARGET_EFAULT;
11725 break;
11726 }
11727 }
11728 ret = get_errno(flistxattr(arg1, b, arg3));
11729 unlock_user(b, arg2, arg3);
11730 break;
11731 }
11732 case TARGET_NR_setxattr:
11733 case TARGET_NR_lsetxattr:
11734 {
11735 void *p, *n, *v = 0;
11736 if (arg3) {
11737 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11738 if (!v) {
11739 ret = -TARGET_EFAULT;
11740 break;
11741 }
11742 }
11743 p = lock_user_string(arg1);
11744 n = lock_user_string(arg2);
11745 if (p && n) {
11746 if (num == TARGET_NR_setxattr) {
11747 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11748 } else {
11749 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11750 }
11751 } else {
11752 ret = -TARGET_EFAULT;
11753 }
11754 unlock_user(p, arg1, 0);
11755 unlock_user(n, arg2, 0);
11756 unlock_user(v, arg3, 0);
11757 }
11758 break;
11759 case TARGET_NR_fsetxattr:
11760 {
11761 void *n, *v = 0;
11762 if (arg3) {
11763 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11764 if (!v) {
11765 ret = -TARGET_EFAULT;
11766 break;
11767 }
11768 }
11769 n = lock_user_string(arg2);
11770 if (n) {
11771 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11772 } else {
11773 ret = -TARGET_EFAULT;
11774 }
11775 unlock_user(n, arg2, 0);
11776 unlock_user(v, arg3, 0);
11777 }
11778 break;
11779 case TARGET_NR_getxattr:
11780 case TARGET_NR_lgetxattr:
11781 {
11782 void *p, *n, *v = 0;
11783 if (arg3) {
11784 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11785 if (!v) {
11786 ret = -TARGET_EFAULT;
11787 break;
11788 }
11789 }
11790 p = lock_user_string(arg1);
11791 n = lock_user_string(arg2);
11792 if (p && n) {
11793 if (num == TARGET_NR_getxattr) {
11794 ret = get_errno(getxattr(p, n, v, arg4));
11795 } else {
11796 ret = get_errno(lgetxattr(p, n, v, arg4));
11797 }
11798 } else {
11799 ret = -TARGET_EFAULT;
11800 }
11801 unlock_user(p, arg1, 0);
11802 unlock_user(n, arg2, 0);
11803 unlock_user(v, arg3, arg4);
11804 }
11805 break;
11806 case TARGET_NR_fgetxattr:
11807 {
11808 void *n, *v = 0;
11809 if (arg3) {
11810 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11811 if (!v) {
11812 ret = -TARGET_EFAULT;
11813 break;
11814 }
11815 }
11816 n = lock_user_string(arg2);
11817 if (n) {
11818 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11819 } else {
11820 ret = -TARGET_EFAULT;
11821 }
11822 unlock_user(n, arg2, 0);
11823 unlock_user(v, arg3, arg4);
11824 }
11825 break;
11826 case TARGET_NR_removexattr:
11827 case TARGET_NR_lremovexattr:
11828 {
11829 void *p, *n;
11830 p = lock_user_string(arg1);
11831 n = lock_user_string(arg2);
11832 if (p && n) {
11833 if (num == TARGET_NR_removexattr) {
11834 ret = get_errno(removexattr(p, n));
11835 } else {
11836 ret = get_errno(lremovexattr(p, n));
11837 }
11838 } else {
11839 ret = -TARGET_EFAULT;
11840 }
11841 unlock_user(p, arg1, 0);
11842 unlock_user(n, arg2, 0);
11843 }
11844 break;
11845 case TARGET_NR_fremovexattr:
11846 {
11847 void *n;
11848 n = lock_user_string(arg2);
11849 if (n) {
11850 ret = get_errno(fremovexattr(arg1, n));
11851 } else {
11852 ret = -TARGET_EFAULT;
11853 }
11854 unlock_user(n, arg2, 0);
11855 }
11856 break;
11857 #endif
11858 #endif /* CONFIG_ATTR */
11859 #ifdef TARGET_NR_set_thread_area
11860 case TARGET_NR_set_thread_area:
11861 #if defined(TARGET_MIPS)
11862 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11863 ret = 0;
11864 break;
11865 #elif defined(TARGET_CRIS)
11866 if (arg1 & 0xff)
11867 ret = -TARGET_EINVAL;
11868 else {
11869 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11870 ret = 0;
11871 }
11872 break;
11873 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11874 ret = do_set_thread_area(cpu_env, arg1);
11875 break;
11876 #elif defined(TARGET_M68K)
11877 {
11878 TaskState *ts = cpu->opaque;
11879 ts->tp_value = arg1;
11880 ret = 0;
11881 break;
11882 }
11883 #else
11884 goto unimplemented_nowarn;
11885 #endif
11886 #endif
11887 #ifdef TARGET_NR_get_thread_area
11888 case TARGET_NR_get_thread_area:
11889 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11890 ret = do_get_thread_area(cpu_env, arg1);
11891 break;
11892 #elif defined(TARGET_M68K)
11893 {
11894 TaskState *ts = cpu->opaque;
11895 ret = ts->tp_value;
11896 break;
11897 }
11898 #else
11899 goto unimplemented_nowarn;
11900 #endif
11901 #endif
11902 #ifdef TARGET_NR_getdomainname
11903 case TARGET_NR_getdomainname:
11904 goto unimplemented_nowarn;
11905 #endif
11906
11907 #ifdef TARGET_NR_clock_settime
11908 case TARGET_NR_clock_settime:
11909 {
11910 struct timespec ts;
11911
11912 ret = target_to_host_timespec(&ts, arg2);
11913 if (!is_error(ret)) {
11914 ret = get_errno(clock_settime(arg1, &ts));
11915 }
11916 break;
11917 }
11918 #endif
11919 #ifdef TARGET_NR_clock_gettime
11920 case TARGET_NR_clock_gettime:
11921 {
11922 struct timespec ts;
11923 ret = get_errno(clock_gettime(arg1, &ts));
11924 if (!is_error(ret)) {
11925 ret = host_to_target_timespec(arg2, &ts);
11926 }
11927 break;
11928 }
11929 #endif
11930 #ifdef TARGET_NR_clock_getres
11931 case TARGET_NR_clock_getres:
11932 {
11933 struct timespec ts;
11934 ret = get_errno(clock_getres(arg1, &ts));
11935 if (!is_error(ret)) {
11936 host_to_target_timespec(arg2, &ts);
11937 }
11938 break;
11939 }
11940 #endif
11941 #ifdef TARGET_NR_clock_nanosleep
11942 case TARGET_NR_clock_nanosleep:
11943 {
11944 struct timespec ts;
11945 target_to_host_timespec(&ts, arg3);
11946 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11947 &ts, arg4 ? &ts : NULL));
11948 if (arg4)
11949 host_to_target_timespec(arg4, &ts);
11950
11951 #if defined(TARGET_PPC)
11952 /* clock_nanosleep is odd in that it returns positive errno values.
11953 * On PPC, CR0 bit 3 should be set in such a situation. */
11954 if (ret && ret != -TARGET_ERESTARTSYS) {
11955 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11956 }
11957 #endif
11958 break;
11959 }
11960 #endif
11961
11962 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11963 case TARGET_NR_set_tid_address:
11964 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11965 break;
11966 #endif
11967
11968 case TARGET_NR_tkill:
11969 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11970 break;
11971
11972 case TARGET_NR_tgkill:
11973 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11974 target_to_host_signal(arg3)));
11975 break;
11976
11977 #ifdef TARGET_NR_set_robust_list
11978 case TARGET_NR_set_robust_list:
11979 case TARGET_NR_get_robust_list:
11980 /* The ABI for supporting robust futexes has userspace pass
11981 * the kernel a pointer to a linked list which is updated by
11982 * userspace after the syscall; the list is walked by the kernel
11983 * when the thread exits. Since the linked list in QEMU guest
11984 * memory isn't a valid linked list for the host and we have
11985 * no way to reliably intercept the thread-death event, we can't
11986 * support these. Silently return ENOSYS so that guest userspace
11987 * falls back to a non-robust futex implementation (which should
11988 * be OK except in the corner case of the guest crashing while
11989 * holding a mutex that is shared with another process via
11990 * shared memory).
11991 */
11992 goto unimplemented_nowarn;
11993 #endif
11994
11995 #if defined(TARGET_NR_utimensat)
11996 case TARGET_NR_utimensat:
11997 {
11998 struct timespec *tsp, ts[2];
11999 if (!arg3) {
12000 tsp = NULL;
12001 } else {
12002 target_to_host_timespec(ts, arg3);
12003 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
12004 tsp = ts;
12005 }
12006 if (!arg2)
12007 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12008 else {
12009 if (!(p = lock_user_string(arg2))) {
12010 ret = -TARGET_EFAULT;
12011 goto fail;
12012 }
12013 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12014 unlock_user(p, arg2, 0);
12015 }
12016 }
12017 break;
12018 #endif
12019 case TARGET_NR_futex:
12020 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
12021 break;
12022 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12023 case TARGET_NR_inotify_init:
12024 ret = get_errno(sys_inotify_init());
12025 if (ret >= 0) {
12026 fd_trans_register(ret, &target_inotify_trans);
12027 }
12028 break;
12029 #endif
12030 #ifdef CONFIG_INOTIFY1
12031 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12032 case TARGET_NR_inotify_init1:
12033 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12034 fcntl_flags_tbl)));
12035 if (ret >= 0) {
12036 fd_trans_register(ret, &target_inotify_trans);
12037 }
12038 break;
12039 #endif
12040 #endif
12041 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12042 case TARGET_NR_inotify_add_watch:
12043 p = lock_user_string(arg2);
12044 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12045 unlock_user(p, arg2, 0);
12046 break;
12047 #endif
12048 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12049 case TARGET_NR_inotify_rm_watch:
12050 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
12051 break;
12052 #endif
12053
12054 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12055 case TARGET_NR_mq_open:
12056 {
12057 struct mq_attr posix_mq_attr;
12058 struct mq_attr *pposix_mq_attr;
12059 int host_flags;
12060
12061 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12062 pposix_mq_attr = NULL;
12063 if (arg4) {
12064 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12065 goto efault;
12066 }
12067 pposix_mq_attr = &posix_mq_attr;
12068 }
12069 p = lock_user_string(arg1 - 1);
12070 if (!p) {
12071 goto efault;
12072 }
12073 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12074 unlock_user (p, arg1, 0);
12075 }
12076 break;
12077
12078 case TARGET_NR_mq_unlink:
12079 p = lock_user_string(arg1 - 1);
12080 if (!p) {
12081 ret = -TARGET_EFAULT;
12082 break;
12083 }
12084 ret = get_errno(mq_unlink(p));
12085 unlock_user (p, arg1, 0);
12086 break;
12087
12088 case TARGET_NR_mq_timedsend:
12089 {
12090 struct timespec ts;
12091
12092 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12093 if (arg5 != 0) {
12094 target_to_host_timespec(&ts, arg5);
12095 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12096 host_to_target_timespec(arg5, &ts);
12097 } else {
12098 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12099 }
12100 unlock_user (p, arg2, arg3);
12101 }
12102 break;
12103
12104 case TARGET_NR_mq_timedreceive:
12105 {
12106 struct timespec ts;
12107 unsigned int prio;
12108
12109 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12110 if (arg5 != 0) {
12111 target_to_host_timespec(&ts, arg5);
12112 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12113 &prio, &ts));
12114 host_to_target_timespec(arg5, &ts);
12115 } else {
12116 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12117 &prio, NULL));
12118 }
12119 unlock_user (p, arg2, arg3);
12120 if (arg4 != 0)
12121 put_user_u32(prio, arg4);
12122 }
12123 break;
12124
12125 /* Not implemented for now... */
12126 /* case TARGET_NR_mq_notify: */
12127 /* break; */
12128
12129 case TARGET_NR_mq_getsetattr:
12130 {
12131 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12132 ret = 0;
12133 if (arg2 != 0) {
12134 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12135 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12136 &posix_mq_attr_out));
12137 } else if (arg3 != 0) {
12138 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12139 }
12140 if (ret == 0 && arg3 != 0) {
12141 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12142 }
12143 }
12144 break;
12145 #endif
12146
12147 #ifdef CONFIG_SPLICE
12148 #ifdef TARGET_NR_tee
12149 case TARGET_NR_tee:
12150 {
12151 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12152 }
12153 break;
12154 #endif
12155 #ifdef TARGET_NR_splice
12156 case TARGET_NR_splice:
12157 {
12158 loff_t loff_in, loff_out;
12159 loff_t *ploff_in = NULL, *ploff_out = NULL;
12160 if (arg2) {
12161 if (get_user_u64(loff_in, arg2)) {
12162 goto efault;
12163 }
12164 ploff_in = &loff_in;
12165 }
12166 if (arg4) {
12167 if (get_user_u64(loff_out, arg4)) {
12168 goto efault;
12169 }
12170 ploff_out = &loff_out;
12171 }
12172 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12173 if (arg2) {
12174 if (put_user_u64(loff_in, arg2)) {
12175 goto efault;
12176 }
12177 }
12178 if (arg4) {
12179 if (put_user_u64(loff_out, arg4)) {
12180 goto efault;
12181 }
12182 }
12183 }
12184 break;
12185 #endif
12186 #ifdef TARGET_NR_vmsplice
12187 case TARGET_NR_vmsplice:
12188 {
12189 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12190 if (vec != NULL) {
12191 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12192 unlock_iovec(vec, arg2, arg3, 0);
12193 } else {
12194 ret = -host_to_target_errno(errno);
12195 }
12196 }
12197 break;
12198 #endif
12199 #endif /* CONFIG_SPLICE */
12200 #ifdef CONFIG_EVENTFD
12201 #if defined(TARGET_NR_eventfd)
12202 case TARGET_NR_eventfd:
12203 ret = get_errno(eventfd(arg1, 0));
12204 if (ret >= 0) {
12205 fd_trans_register(ret, &target_eventfd_trans);
12206 }
12207 break;
12208 #endif
12209 #if defined(TARGET_NR_eventfd2)
12210 case TARGET_NR_eventfd2:
12211 {
12212 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12213 if (arg2 & TARGET_O_NONBLOCK) {
12214 host_flags |= O_NONBLOCK;
12215 }
12216 if (arg2 & TARGET_O_CLOEXEC) {
12217 host_flags |= O_CLOEXEC;
12218 }
12219 ret = get_errno(eventfd(arg1, host_flags));
12220 if (ret >= 0) {
12221 fd_trans_register(ret, &target_eventfd_trans);
12222 }
12223 break;
12224 }
12225 #endif
12226 #endif /* CONFIG_EVENTFD */
12227 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12228 case TARGET_NR_fallocate:
12229 #if TARGET_ABI_BITS == 32
12230 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12231 target_offset64(arg5, arg6)));
12232 #else
12233 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12234 #endif
12235 break;
12236 #endif
12237 #if defined(CONFIG_SYNC_FILE_RANGE)
12238 #if defined(TARGET_NR_sync_file_range)
12239 case TARGET_NR_sync_file_range:
12240 #if TARGET_ABI_BITS == 32
12241 #if defined(TARGET_MIPS)
12242 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12243 target_offset64(arg5, arg6), arg7));
12244 #else
12245 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12246 target_offset64(arg4, arg5), arg6));
12247 #endif /* !TARGET_MIPS */
12248 #else
12249 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12250 #endif
12251 break;
12252 #endif
12253 #if defined(TARGET_NR_sync_file_range2)
12254 case TARGET_NR_sync_file_range2:
12255 /* This is like sync_file_range but the arguments are reordered */
12256 #if TARGET_ABI_BITS == 32
12257 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12258 target_offset64(arg5, arg6), arg2));
12259 #else
12260 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12261 #endif
12262 break;
12263 #endif
12264 #endif
12265 #if defined(TARGET_NR_signalfd4)
12266 case TARGET_NR_signalfd4:
12267 ret = do_signalfd4(arg1, arg2, arg4);
12268 break;
12269 #endif
12270 #if defined(TARGET_NR_signalfd)
12271 case TARGET_NR_signalfd:
12272 ret = do_signalfd4(arg1, arg2, 0);
12273 break;
12274 #endif
12275 #if defined(CONFIG_EPOLL)
12276 #if defined(TARGET_NR_epoll_create)
12277 case TARGET_NR_epoll_create:
12278 ret = get_errno(epoll_create(arg1));
12279 break;
12280 #endif
12281 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12282 case TARGET_NR_epoll_create1:
12283 ret = get_errno(epoll_create1(arg1));
12284 break;
12285 #endif
12286 #if defined(TARGET_NR_epoll_ctl)
12287 case TARGET_NR_epoll_ctl:
12288 {
12289 struct epoll_event ep;
12290 struct epoll_event *epp = 0;
12291 if (arg4) {
12292 struct target_epoll_event *target_ep;
12293 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12294 goto efault;
12295 }
12296 ep.events = tswap32(target_ep->events);
12297 /* The epoll_data_t union is just opaque data to the kernel,
12298 * so we transfer all 64 bits across and need not worry what
12299 * actual data type it is.
12300 */
12301 ep.data.u64 = tswap64(target_ep->data.u64);
12302 unlock_user_struct(target_ep, arg4, 0);
12303 epp = &ep;
12304 }
12305 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12306 break;
12307 }
12308 #endif
12309
12310 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12311 #if defined(TARGET_NR_epoll_wait)
12312 case TARGET_NR_epoll_wait:
12313 #endif
12314 #if defined(TARGET_NR_epoll_pwait)
12315 case TARGET_NR_epoll_pwait:
12316 #endif
12317 {
12318 struct target_epoll_event *target_ep;
12319 struct epoll_event *ep;
12320 int epfd = arg1;
12321 int maxevents = arg3;
12322 int timeout = arg4;
12323
12324 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12325 ret = -TARGET_EINVAL;
12326 break;
12327 }
12328
12329 target_ep = lock_user(VERIFY_WRITE, arg2,
12330 maxevents * sizeof(struct target_epoll_event), 1);
12331 if (!target_ep) {
12332 goto efault;
12333 }
12334
12335 ep = g_try_new(struct epoll_event, maxevents);
12336 if (!ep) {
12337 unlock_user(target_ep, arg2, 0);
12338 ret = -TARGET_ENOMEM;
12339 break;
12340 }
12341
12342 switch (num) {
12343 #if defined(TARGET_NR_epoll_pwait)
12344 case TARGET_NR_epoll_pwait:
12345 {
12346 target_sigset_t *target_set;
12347 sigset_t _set, *set = &_set;
12348
12349 if (arg5) {
12350 if (arg6 != sizeof(target_sigset_t)) {
12351 ret = -TARGET_EINVAL;
12352 break;
12353 }
12354
12355 target_set = lock_user(VERIFY_READ, arg5,
12356 sizeof(target_sigset_t), 1);
12357 if (!target_set) {
12358 ret = -TARGET_EFAULT;
12359 break;
12360 }
12361 target_to_host_sigset(set, target_set);
12362 unlock_user(target_set, arg5, 0);
12363 } else {
12364 set = NULL;
12365 }
12366
12367 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12368 set, SIGSET_T_SIZE));
12369 break;
12370 }
12371 #endif
12372 #if defined(TARGET_NR_epoll_wait)
12373 case TARGET_NR_epoll_wait:
12374 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12375 NULL, 0));
12376 break;
12377 #endif
12378 default:
12379 ret = -TARGET_ENOSYS;
12380 }
12381 if (!is_error(ret)) {
12382 int i;
12383 for (i = 0; i < ret; i++) {
12384 target_ep[i].events = tswap32(ep[i].events);
12385 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12386 }
12387 unlock_user(target_ep, arg2,
12388 ret * sizeof(struct target_epoll_event));
12389 } else {
12390 unlock_user(target_ep, arg2, 0);
12391 }
12392 g_free(ep);
12393 break;
12394 }
12395 #endif
12396 #endif
12397 #ifdef TARGET_NR_prlimit64
12398 case TARGET_NR_prlimit64:
12399 {
12400 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12401 struct target_rlimit64 *target_rnew, *target_rold;
12402 struct host_rlimit64 rnew, rold, *rnewp = 0;
12403 int resource = target_to_host_resource(arg2);
12404 if (arg3) {
12405 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12406 goto efault;
12407 }
12408 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12409 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12410 unlock_user_struct(target_rnew, arg3, 0);
12411 rnewp = &rnew;
12412 }
12413
12414 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12415 if (!is_error(ret) && arg4) {
12416 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12417 goto efault;
12418 }
12419 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12420 target_rold->rlim_max = tswap64(rold.rlim_max);
12421 unlock_user_struct(target_rold, arg4, 1);
12422 }
12423 break;
12424 }
12425 #endif
12426 #ifdef TARGET_NR_gethostname
12427 case TARGET_NR_gethostname:
12428 {
12429 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12430 if (name) {
12431 ret = get_errno(gethostname(name, arg2));
12432 unlock_user(name, arg1, arg2);
12433 } else {
12434 ret = -TARGET_EFAULT;
12435 }
12436 break;
12437 }
12438 #endif
12439 #ifdef TARGET_NR_atomic_cmpxchg_32
12440 case TARGET_NR_atomic_cmpxchg_32:
12441 {
12442 /* should use start_exclusive from main.c */
12443 abi_ulong mem_value;
12444 if (get_user_u32(mem_value, arg6)) {
12445 target_siginfo_t info;
12446 info.si_signo = SIGSEGV;
12447 info.si_errno = 0;
12448 info.si_code = TARGET_SEGV_MAPERR;
12449 info._sifields._sigfault._addr = arg6;
12450 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12451 QEMU_SI_FAULT, &info);
12452 ret = 0xdeadbeef;
12453
12454 }
12455 if (mem_value == arg2)
12456 put_user_u32(arg1, arg6);
12457 ret = mem_value;
12458 break;
12459 }
12460 #endif
12461 #ifdef TARGET_NR_atomic_barrier
12462 case TARGET_NR_atomic_barrier:
12463 {
12464 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12465 ret = 0;
12466 break;
12467 }
12468 #endif
12469
12470 #ifdef TARGET_NR_timer_create
12471 case TARGET_NR_timer_create:
12472 {
12473 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12474
12475 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12476
12477 int clkid = arg1;
12478 int timer_index = next_free_host_timer();
12479
12480 if (timer_index < 0) {
12481 ret = -TARGET_EAGAIN;
12482 } else {
12483 timer_t *phtimer = g_posix_timers + timer_index;
12484
12485 if (arg2) {
12486 phost_sevp = &host_sevp;
12487 ret = target_to_host_sigevent(phost_sevp, arg2);
12488 if (ret != 0) {
12489 break;
12490 }
12491 }
12492
12493 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12494 if (ret) {
12495 phtimer = NULL;
12496 } else {
12497 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12498 goto efault;
12499 }
12500 }
12501 }
12502 break;
12503 }
12504 #endif
12505
12506 #ifdef TARGET_NR_timer_settime
12507 case TARGET_NR_timer_settime:
12508 {
12509 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12510 * struct itimerspec * old_value */
12511 target_timer_t timerid = get_timer_id(arg1);
12512
12513 if (timerid < 0) {
12514 ret = timerid;
12515 } else if (arg3 == 0) {
12516 ret = -TARGET_EINVAL;
12517 } else {
12518 timer_t htimer = g_posix_timers[timerid];
12519 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12520
12521 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12522 goto efault;
12523 }
12524 ret = get_errno(
12525 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12526 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12527 goto efault;
12528 }
12529 }
12530 break;
12531 }
12532 #endif
12533
12534 #ifdef TARGET_NR_timer_gettime
12535 case TARGET_NR_timer_gettime:
12536 {
12537 /* args: timer_t timerid, struct itimerspec *curr_value */
12538 target_timer_t timerid = get_timer_id(arg1);
12539
12540 if (timerid < 0) {
12541 ret = timerid;
12542 } else if (!arg2) {
12543 ret = -TARGET_EFAULT;
12544 } else {
12545 timer_t htimer = g_posix_timers[timerid];
12546 struct itimerspec hspec;
12547 ret = get_errno(timer_gettime(htimer, &hspec));
12548
12549 if (host_to_target_itimerspec(arg2, &hspec)) {
12550 ret = -TARGET_EFAULT;
12551 }
12552 }
12553 break;
12554 }
12555 #endif
12556
12557 #ifdef TARGET_NR_timer_getoverrun
12558 case TARGET_NR_timer_getoverrun:
12559 {
12560 /* args: timer_t timerid */
12561 target_timer_t timerid = get_timer_id(arg1);
12562
12563 if (timerid < 0) {
12564 ret = timerid;
12565 } else {
12566 timer_t htimer = g_posix_timers[timerid];
12567 ret = get_errno(timer_getoverrun(htimer));
12568 }
12569 fd_trans_unregister(ret);
12570 break;
12571 }
12572 #endif
12573
12574 #ifdef TARGET_NR_timer_delete
12575 case TARGET_NR_timer_delete:
12576 {
12577 /* args: timer_t timerid */
12578 target_timer_t timerid = get_timer_id(arg1);
12579
12580 if (timerid < 0) {
12581 ret = timerid;
12582 } else {
12583 timer_t htimer = g_posix_timers[timerid];
12584 ret = get_errno(timer_delete(htimer));
12585 g_posix_timers[timerid] = 0;
12586 }
12587 break;
12588 }
12589 #endif
12590
12591 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12592 case TARGET_NR_timerfd_create:
12593 ret = get_errno(timerfd_create(arg1,
12594 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12595 break;
12596 #endif
12597
12598 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12599 case TARGET_NR_timerfd_gettime:
12600 {
12601 struct itimerspec its_curr;
12602
12603 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12604
12605 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12606 goto efault;
12607 }
12608 }
12609 break;
12610 #endif
12611
12612 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12613 case TARGET_NR_timerfd_settime:
12614 {
12615 struct itimerspec its_new, its_old, *p_new;
12616
12617 if (arg3) {
12618 if (target_to_host_itimerspec(&its_new, arg3)) {
12619 goto efault;
12620 }
12621 p_new = &its_new;
12622 } else {
12623 p_new = NULL;
12624 }
12625
12626 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12627
12628 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12629 goto efault;
12630 }
12631 }
12632 break;
12633 #endif
12634
12635 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12636 case TARGET_NR_ioprio_get:
12637 ret = get_errno(ioprio_get(arg1, arg2));
12638 break;
12639 #endif
12640
12641 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12642 case TARGET_NR_ioprio_set:
12643 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12644 break;
12645 #endif
12646
12647 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12648 case TARGET_NR_setns:
12649 ret = get_errno(setns(arg1, arg2));
12650 break;
12651 #endif
12652 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12653 case TARGET_NR_unshare:
12654 ret = get_errno(unshare(arg1));
12655 break;
12656 #endif
12657 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12658 case TARGET_NR_kcmp:
12659 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12660 break;
12661 #endif
12662
12663 default:
12664 unimplemented:
12665 gemu_log("qemu: Unsupported syscall: %d\n", num);
12666 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12667 unimplemented_nowarn:
12668 #endif
12669 ret = -TARGET_ENOSYS;
12670 break;
12671 }
12672 fail:
12673 #ifdef DEBUG
12674 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12675 #endif
12676 if(do_strace)
12677 print_syscall_ret(num, ret);
12678 trace_guest_user_syscall_ret(cpu, num, ret);
12679 return ret;
12680 efault:
12681 ret = -TARGET_EFAULT;
12682 goto fail;
12683 }