4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
39 int __clone2(int (*fn
)(void *), void *child_stack_base
,
40 size_t stack_size
, int flags
, void *arg
, ...);
42 #include <sys/socket.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
119 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
120 * once. This exercises the codepaths for restart.
122 //#define DEBUG_ERESTARTSYS
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_futex __NR_futex
191 #define __NR_sys_inotify_init __NR_inotify_init
192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
197 #define __NR__llseek __NR_lseek
200 /* Newer kernel ports have llseek() instead of _llseek() */
201 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
202 #define TARGET_NR__llseek TARGET_NR_llseek
206 _syscall0(int, gettid
)
208 /* This is a replacement for the host gettid() and must return a host
210 static int gettid(void) {
214 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
215 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
217 #if !defined(__NR_getdents) || \
218 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
219 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
223 loff_t
*, res
, uint
, wh
);
225 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
226 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
227 #ifdef __NR_exit_group
228 _syscall1(int,exit_group
,int,error_code
)
230 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
231 _syscall1(int,set_tid_address
,int *,tidptr
)
233 #if defined(TARGET_NR_futex) && defined(__NR_futex)
234 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
235 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
237 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
238 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
239 unsigned long *, user_mask_ptr
);
240 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
241 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
242 unsigned long *, user_mask_ptr
);
243 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
245 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
246 struct __user_cap_data_struct
*, data
);
247 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
248 struct __user_cap_data_struct
*, data
);
249 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
250 _syscall2(int, ioprio_get
, int, which
, int, who
)
252 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
253 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
255 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
256 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
259 static bitmask_transtbl fcntl_flags_tbl
[] = {
260 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
261 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
262 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
263 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
264 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
265 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
266 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
267 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
268 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
269 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
270 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
271 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
272 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
273 #if defined(O_DIRECT)
274 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
276 #if defined(O_NOATIME)
277 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
279 #if defined(O_CLOEXEC)
280 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
283 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
285 /* Don't terminate the list prematurely on 64-bit host+guest. */
286 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
287 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
294 QEMU_IFLA_BR_FORWARD_DELAY
,
295 QEMU_IFLA_BR_HELLO_TIME
,
296 QEMU_IFLA_BR_MAX_AGE
,
297 QEMU_IFLA_BR_AGEING_TIME
,
298 QEMU_IFLA_BR_STP_STATE
,
299 QEMU_IFLA_BR_PRIORITY
,
300 QEMU_IFLA_BR_VLAN_FILTERING
,
301 QEMU_IFLA_BR_VLAN_PROTOCOL
,
302 QEMU_IFLA_BR_GROUP_FWD_MASK
,
303 QEMU_IFLA_BR_ROOT_ID
,
304 QEMU_IFLA_BR_BRIDGE_ID
,
305 QEMU_IFLA_BR_ROOT_PORT
,
306 QEMU_IFLA_BR_ROOT_PATH_COST
,
307 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
308 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
309 QEMU_IFLA_BR_HELLO_TIMER
,
310 QEMU_IFLA_BR_TCN_TIMER
,
311 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
312 QEMU_IFLA_BR_GC_TIMER
,
313 QEMU_IFLA_BR_GROUP_ADDR
,
314 QEMU_IFLA_BR_FDB_FLUSH
,
315 QEMU_IFLA_BR_MCAST_ROUTER
,
316 QEMU_IFLA_BR_MCAST_SNOOPING
,
317 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
318 QEMU_IFLA_BR_MCAST_QUERIER
,
319 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
320 QEMU_IFLA_BR_MCAST_HASH_MAX
,
321 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
322 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
323 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
324 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
325 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
326 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
327 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
328 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
329 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
330 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
331 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
332 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
334 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
335 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
359 QEMU_IFLA_NET_NS_PID
,
362 QEMU_IFLA_VFINFO_LIST
,
370 QEMU_IFLA_PROMISCUITY
,
371 QEMU_IFLA_NUM_TX_QUEUES
,
372 QEMU_IFLA_NUM_RX_QUEUES
,
374 QEMU_IFLA_PHYS_PORT_ID
,
375 QEMU_IFLA_CARRIER_CHANGES
,
376 QEMU_IFLA_PHYS_SWITCH_ID
,
377 QEMU_IFLA_LINK_NETNSID
,
378 QEMU_IFLA_PHYS_PORT_NAME
,
379 QEMU_IFLA_PROTO_DOWN
,
380 QEMU_IFLA_GSO_MAX_SEGS
,
381 QEMU_IFLA_GSO_MAX_SIZE
,
388 QEMU_IFLA_BRPORT_UNSPEC
,
389 QEMU_IFLA_BRPORT_STATE
,
390 QEMU_IFLA_BRPORT_PRIORITY
,
391 QEMU_IFLA_BRPORT_COST
,
392 QEMU_IFLA_BRPORT_MODE
,
393 QEMU_IFLA_BRPORT_GUARD
,
394 QEMU_IFLA_BRPORT_PROTECT
,
395 QEMU_IFLA_BRPORT_FAST_LEAVE
,
396 QEMU_IFLA_BRPORT_LEARNING
,
397 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
398 QEMU_IFLA_BRPORT_PROXYARP
,
399 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
400 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
401 QEMU_IFLA_BRPORT_ROOT_ID
,
402 QEMU_IFLA_BRPORT_BRIDGE_ID
,
403 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
404 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
407 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
408 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
409 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
410 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
411 QEMU_IFLA_BRPORT_HOLD_TIMER
,
412 QEMU_IFLA_BRPORT_FLUSH
,
413 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
414 QEMU_IFLA_BRPORT_PAD
,
415 QEMU___IFLA_BRPORT_MAX
419 QEMU_IFLA_INFO_UNSPEC
,
422 QEMU_IFLA_INFO_XSTATS
,
423 QEMU_IFLA_INFO_SLAVE_KIND
,
424 QEMU_IFLA_INFO_SLAVE_DATA
,
425 QEMU___IFLA_INFO_MAX
,
429 QEMU_IFLA_INET_UNSPEC
,
431 QEMU___IFLA_INET_MAX
,
435 QEMU_IFLA_INET6_UNSPEC
,
436 QEMU_IFLA_INET6_FLAGS
,
437 QEMU_IFLA_INET6_CONF
,
438 QEMU_IFLA_INET6_STATS
,
439 QEMU_IFLA_INET6_MCAST
,
440 QEMU_IFLA_INET6_CACHEINFO
,
441 QEMU_IFLA_INET6_ICMP6STATS
,
442 QEMU_IFLA_INET6_TOKEN
,
443 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
444 QEMU___IFLA_INET6_MAX
447 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
448 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
449 typedef struct TargetFdTrans
{
450 TargetFdDataFunc host_to_target_data
;
451 TargetFdDataFunc target_to_host_data
;
452 TargetFdAddrFunc target_to_host_addr
;
455 static TargetFdTrans
**target_fd_trans
;
457 static unsigned int target_fd_max
;
459 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
461 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
462 return target_fd_trans
[fd
]->target_to_host_data
;
467 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
469 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
470 return target_fd_trans
[fd
]->host_to_target_data
;
475 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
477 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
478 return target_fd_trans
[fd
]->target_to_host_addr
;
483 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
487 if (fd
>= target_fd_max
) {
488 oldmax
= target_fd_max
;
489 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
490 target_fd_trans
= g_renew(TargetFdTrans
*,
491 target_fd_trans
, target_fd_max
);
492 memset((void *)(target_fd_trans
+ oldmax
), 0,
493 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
495 target_fd_trans
[fd
] = trans
;
498 static void fd_trans_unregister(int fd
)
500 if (fd
>= 0 && fd
< target_fd_max
) {
501 target_fd_trans
[fd
] = NULL
;
505 static void fd_trans_dup(int oldfd
, int newfd
)
507 fd_trans_unregister(newfd
);
508 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
509 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
513 static int sys_getcwd1(char *buf
, size_t size
)
515 if (getcwd(buf
, size
) == NULL
) {
516 /* getcwd() sets errno */
519 return strlen(buf
)+1;
522 #ifdef TARGET_NR_utimensat
523 #if defined(__NR_utimensat)
524 #define __NR_sys_utimensat __NR_utimensat
525 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
526 const struct timespec
*,tsp
,int,flags
)
528 static int sys_utimensat(int dirfd
, const char *pathname
,
529 const struct timespec times
[2], int flags
)
535 #endif /* TARGET_NR_utimensat */
537 #ifdef CONFIG_INOTIFY
538 #include <sys/inotify.h>
540 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
541 static int sys_inotify_init(void)
543 return (inotify_init());
546 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
547 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
549 return (inotify_add_watch(fd
, pathname
, mask
));
552 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
553 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
555 return (inotify_rm_watch(fd
, wd
));
558 #ifdef CONFIG_INOTIFY1
559 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
560 static int sys_inotify_init1(int flags
)
562 return (inotify_init1(flags
));
567 /* Userspace can usually survive runtime without inotify */
568 #undef TARGET_NR_inotify_init
569 #undef TARGET_NR_inotify_init1
570 #undef TARGET_NR_inotify_add_watch
571 #undef TARGET_NR_inotify_rm_watch
572 #endif /* CONFIG_INOTIFY */
574 #if defined(TARGET_NR_prlimit64)
575 #ifndef __NR_prlimit64
576 # define __NR_prlimit64 -1
578 #define __NR_sys_prlimit64 __NR_prlimit64
579 /* The glibc rlimit structure may not be that used by the underlying syscall */
580 struct host_rlimit64
{
584 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
585 const struct host_rlimit64
*, new_limit
,
586 struct host_rlimit64
*, old_limit
)
590 #if defined(TARGET_NR_timer_create)
591 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
592 static timer_t g_posix_timers
[32] = { 0, } ;
594 static inline int next_free_host_timer(void)
597 /* FIXME: Does finding the next free slot require a lock? */
598 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
599 if (g_posix_timers
[k
] == 0) {
600 g_posix_timers
[k
] = (timer_t
) 1;
608 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
610 static inline int regpairs_aligned(void *cpu_env
) {
611 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
613 #elif defined(TARGET_MIPS)
614 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
615 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
616 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
617 * of registers which translates to the same as ARM/MIPS, because we start with
619 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
621 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
624 #define ERRNO_TABLE_SIZE 1200
626 /* target_to_host_errno_table[] is initialized from
627 * host_to_target_errno_table[] in syscall_init(). */
628 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
632 * This list is the union of errno values overridden in asm-<arch>/errno.h
633 * minus the errnos that are not actually generic to all archs.
635 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
636 [EAGAIN
] = TARGET_EAGAIN
,
637 [EIDRM
] = TARGET_EIDRM
,
638 [ECHRNG
] = TARGET_ECHRNG
,
639 [EL2NSYNC
] = TARGET_EL2NSYNC
,
640 [EL3HLT
] = TARGET_EL3HLT
,
641 [EL3RST
] = TARGET_EL3RST
,
642 [ELNRNG
] = TARGET_ELNRNG
,
643 [EUNATCH
] = TARGET_EUNATCH
,
644 [ENOCSI
] = TARGET_ENOCSI
,
645 [EL2HLT
] = TARGET_EL2HLT
,
646 [EDEADLK
] = TARGET_EDEADLK
,
647 [ENOLCK
] = TARGET_ENOLCK
,
648 [EBADE
] = TARGET_EBADE
,
649 [EBADR
] = TARGET_EBADR
,
650 [EXFULL
] = TARGET_EXFULL
,
651 [ENOANO
] = TARGET_ENOANO
,
652 [EBADRQC
] = TARGET_EBADRQC
,
653 [EBADSLT
] = TARGET_EBADSLT
,
654 [EBFONT
] = TARGET_EBFONT
,
655 [ENOSTR
] = TARGET_ENOSTR
,
656 [ENODATA
] = TARGET_ENODATA
,
657 [ETIME
] = TARGET_ETIME
,
658 [ENOSR
] = TARGET_ENOSR
,
659 [ENONET
] = TARGET_ENONET
,
660 [ENOPKG
] = TARGET_ENOPKG
,
661 [EREMOTE
] = TARGET_EREMOTE
,
662 [ENOLINK
] = TARGET_ENOLINK
,
663 [EADV
] = TARGET_EADV
,
664 [ESRMNT
] = TARGET_ESRMNT
,
665 [ECOMM
] = TARGET_ECOMM
,
666 [EPROTO
] = TARGET_EPROTO
,
667 [EDOTDOT
] = TARGET_EDOTDOT
,
668 [EMULTIHOP
] = TARGET_EMULTIHOP
,
669 [EBADMSG
] = TARGET_EBADMSG
,
670 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
671 [EOVERFLOW
] = TARGET_EOVERFLOW
,
672 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
673 [EBADFD
] = TARGET_EBADFD
,
674 [EREMCHG
] = TARGET_EREMCHG
,
675 [ELIBACC
] = TARGET_ELIBACC
,
676 [ELIBBAD
] = TARGET_ELIBBAD
,
677 [ELIBSCN
] = TARGET_ELIBSCN
,
678 [ELIBMAX
] = TARGET_ELIBMAX
,
679 [ELIBEXEC
] = TARGET_ELIBEXEC
,
680 [EILSEQ
] = TARGET_EILSEQ
,
681 [ENOSYS
] = TARGET_ENOSYS
,
682 [ELOOP
] = TARGET_ELOOP
,
683 [ERESTART
] = TARGET_ERESTART
,
684 [ESTRPIPE
] = TARGET_ESTRPIPE
,
685 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
686 [EUSERS
] = TARGET_EUSERS
,
687 [ENOTSOCK
] = TARGET_ENOTSOCK
,
688 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
689 [EMSGSIZE
] = TARGET_EMSGSIZE
,
690 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
691 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
692 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
693 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
694 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
695 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
696 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
697 [EADDRINUSE
] = TARGET_EADDRINUSE
,
698 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
699 [ENETDOWN
] = TARGET_ENETDOWN
,
700 [ENETUNREACH
] = TARGET_ENETUNREACH
,
701 [ENETRESET
] = TARGET_ENETRESET
,
702 [ECONNABORTED
] = TARGET_ECONNABORTED
,
703 [ECONNRESET
] = TARGET_ECONNRESET
,
704 [ENOBUFS
] = TARGET_ENOBUFS
,
705 [EISCONN
] = TARGET_EISCONN
,
706 [ENOTCONN
] = TARGET_ENOTCONN
,
707 [EUCLEAN
] = TARGET_EUCLEAN
,
708 [ENOTNAM
] = TARGET_ENOTNAM
,
709 [ENAVAIL
] = TARGET_ENAVAIL
,
710 [EISNAM
] = TARGET_EISNAM
,
711 [EREMOTEIO
] = TARGET_EREMOTEIO
,
712 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
713 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
714 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
715 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
716 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
717 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
718 [EALREADY
] = TARGET_EALREADY
,
719 [EINPROGRESS
] = TARGET_EINPROGRESS
,
720 [ESTALE
] = TARGET_ESTALE
,
721 [ECANCELED
] = TARGET_ECANCELED
,
722 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
723 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
725 [ENOKEY
] = TARGET_ENOKEY
,
728 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
731 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
734 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
737 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
739 #ifdef ENOTRECOVERABLE
740 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
744 static inline int host_to_target_errno(int err
)
746 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
747 host_to_target_errno_table
[err
]) {
748 return host_to_target_errno_table
[err
];
753 static inline int target_to_host_errno(int err
)
755 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
756 target_to_host_errno_table
[err
]) {
757 return target_to_host_errno_table
[err
];
762 static inline abi_long
get_errno(abi_long ret
)
765 return -host_to_target_errno(errno
);
770 static inline int is_error(abi_long ret
)
772 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
775 const char *target_strerror(int err
)
777 if (err
== TARGET_ERESTARTSYS
) {
778 return "To be restarted";
780 if (err
== TARGET_QEMU_ESIGRETURN
) {
781 return "Successful exit from sigreturn";
784 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
787 return strerror(target_to_host_errno(err
));
790 #define safe_syscall0(type, name) \
791 static type safe_##name(void) \
793 return safe_syscall(__NR_##name); \
796 #define safe_syscall1(type, name, type1, arg1) \
797 static type safe_##name(type1 arg1) \
799 return safe_syscall(__NR_##name, arg1); \
802 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
803 static type safe_##name(type1 arg1, type2 arg2) \
805 return safe_syscall(__NR_##name, arg1, arg2); \
808 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
809 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
811 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
814 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
816 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
818 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
821 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
822 type4, arg4, type5, arg5) \
823 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
826 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
829 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
830 type4, arg4, type5, arg5, type6, arg6) \
831 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
832 type5 arg5, type6 arg6) \
834 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
837 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
838 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
839 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
840 int, flags
, mode_t
, mode
)
841 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
842 struct rusage
*, rusage
)
843 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
844 int, options
, struct rusage
*, rusage
)
845 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
846 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
847 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
848 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
849 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
851 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
852 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
854 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
855 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
856 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
857 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
858 safe_syscall2(int, tkill
, int, tid
, int, sig
)
859 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
860 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
861 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
862 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
864 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
865 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
866 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
867 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
868 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
869 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
870 safe_syscall2(int, flock
, int, fd
, int, operation
)
871 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
872 const struct timespec
*, uts
, size_t, sigsetsize
)
873 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
875 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
876 struct timespec
*, rem
)
877 #ifdef TARGET_NR_clock_nanosleep
878 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
879 const struct timespec
*, req
, struct timespec
*, rem
)
882 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
884 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
885 long, msgtype
, int, flags
)
886 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
887 unsigned, nsops
, const struct timespec
*, timeout
)
889 /* This host kernel architecture uses a single ipc syscall; fake up
890 * wrappers for the sub-operations to hide this implementation detail.
891 * Annoyingly we can't include linux/ipc.h to get the constant definitions
892 * for the call parameter because some structs in there conflict with the
893 * sys/ipc.h ones. So we just define them here, and rely on them being
894 * the same for all host architectures.
896 #define Q_SEMTIMEDOP 4
899 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
901 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
902 void *, ptr
, long, fifth
)
903 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
905 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
907 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
909 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
911 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
912 const struct timespec
*timeout
)
914 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
918 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
919 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
920 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
921 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
922 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
924 /* We do ioctl like this rather than via safe_syscall3 to preserve the
925 * "third argument might be integer or pointer or not present" behaviour of
928 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
929 /* Similarly for fcntl. Note that callers must always:
930 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
931 * use the flock64 struct rather than unsuffixed flock
932 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
935 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
937 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
940 static inline int host_to_target_sock_type(int host_type
)
944 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
946 target_type
= TARGET_SOCK_DGRAM
;
949 target_type
= TARGET_SOCK_STREAM
;
952 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
956 #if defined(SOCK_CLOEXEC)
957 if (host_type
& SOCK_CLOEXEC
) {
958 target_type
|= TARGET_SOCK_CLOEXEC
;
962 #if defined(SOCK_NONBLOCK)
963 if (host_type
& SOCK_NONBLOCK
) {
964 target_type
|= TARGET_SOCK_NONBLOCK
;
971 static abi_ulong target_brk
;
972 static abi_ulong target_original_brk
;
973 static abi_ulong brk_page
;
975 void target_set_brk(abi_ulong new_brk
)
977 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
978 brk_page
= HOST_PAGE_ALIGN(target_brk
);
981 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
982 #define DEBUGF_BRK(message, args...)
984 /* do_brk() must return target values and target errnos. */
985 abi_long
do_brk(abi_ulong new_brk
)
987 abi_long mapped_addr
;
988 abi_ulong new_alloc_size
;
990 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
993 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
996 if (new_brk
< target_original_brk
) {
997 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1002 /* If the new brk is less than the highest page reserved to the
1003 * target heap allocation, set it and we're almost done... */
1004 if (new_brk
<= brk_page
) {
1005 /* Heap contents are initialized to zero, as for anonymous
1007 if (new_brk
> target_brk
) {
1008 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1010 target_brk
= new_brk
;
1011 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1015 /* We need to allocate more memory after the brk... Note that
1016 * we don't use MAP_FIXED because that will map over the top of
1017 * any existing mapping (like the one with the host libc or qemu
1018 * itself); instead we treat "mapped but at wrong address" as
1019 * a failure and unmap again.
1021 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1022 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1023 PROT_READ
|PROT_WRITE
,
1024 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1026 if (mapped_addr
== brk_page
) {
1027 /* Heap contents are initialized to zero, as for anonymous
1028 * mapped pages. Technically the new pages are already
1029 * initialized to zero since they *are* anonymous mapped
1030 * pages, however we have to take care with the contents that
1031 * come from the remaining part of the previous page: it may
1032 * contains garbage data due to a previous heap usage (grown
1033 * then shrunken). */
1034 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1036 target_brk
= new_brk
;
1037 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1038 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1041 } else if (mapped_addr
!= -1) {
1042 /* Mapped but at wrong address, meaning there wasn't actually
1043 * enough space for this brk.
1045 target_munmap(mapped_addr
, new_alloc_size
);
1047 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1050 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1053 #if defined(TARGET_ALPHA)
1054 /* We (partially) emulate OSF/1 on Alpha, which requires we
1055 return a proper errno, not an unchanged brk value. */
1056 return -TARGET_ENOMEM
;
1058 /* For everything else, return the previous break. */
1062 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1063 abi_ulong target_fds_addr
,
1067 abi_ulong b
, *target_fds
;
1069 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1070 if (!(target_fds
= lock_user(VERIFY_READ
,
1072 sizeof(abi_ulong
) * nw
,
1074 return -TARGET_EFAULT
;
1078 for (i
= 0; i
< nw
; i
++) {
1079 /* grab the abi_ulong */
1080 __get_user(b
, &target_fds
[i
]);
1081 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1082 /* check the bit inside the abi_ulong */
1089 unlock_user(target_fds
, target_fds_addr
, 0);
1094 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1095 abi_ulong target_fds_addr
,
1098 if (target_fds_addr
) {
1099 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1100 return -TARGET_EFAULT
;
1108 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1114 abi_ulong
*target_fds
;
1116 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1117 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1119 sizeof(abi_ulong
) * nw
,
1121 return -TARGET_EFAULT
;
1124 for (i
= 0; i
< nw
; i
++) {
1126 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1127 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1130 __put_user(v
, &target_fds
[i
]);
1133 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1138 #if defined(__alpha__)
1139 #define HOST_HZ 1024
1144 static inline abi_long
host_to_target_clock_t(long ticks
)
1146 #if HOST_HZ == TARGET_HZ
1149 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1153 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1154 const struct rusage
*rusage
)
1156 struct target_rusage
*target_rusage
;
1158 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1159 return -TARGET_EFAULT
;
1160 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1161 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1162 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1163 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1164 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1165 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1166 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1167 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1168 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1169 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1170 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1171 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1172 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1173 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1174 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1175 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1176 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1177 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1178 unlock_user_struct(target_rusage
, target_addr
, 1);
1183 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1185 abi_ulong target_rlim_swap
;
1188 target_rlim_swap
= tswapal(target_rlim
);
1189 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1190 return RLIM_INFINITY
;
1192 result
= target_rlim_swap
;
1193 if (target_rlim_swap
!= (rlim_t
)result
)
1194 return RLIM_INFINITY
;
1199 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1201 abi_ulong target_rlim_swap
;
1204 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1205 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1207 target_rlim_swap
= rlim
;
1208 result
= tswapal(target_rlim_swap
);
1213 static inline int target_to_host_resource(int code
)
1216 case TARGET_RLIMIT_AS
:
1218 case TARGET_RLIMIT_CORE
:
1220 case TARGET_RLIMIT_CPU
:
1222 case TARGET_RLIMIT_DATA
:
1224 case TARGET_RLIMIT_FSIZE
:
1225 return RLIMIT_FSIZE
;
1226 case TARGET_RLIMIT_LOCKS
:
1227 return RLIMIT_LOCKS
;
1228 case TARGET_RLIMIT_MEMLOCK
:
1229 return RLIMIT_MEMLOCK
;
1230 case TARGET_RLIMIT_MSGQUEUE
:
1231 return RLIMIT_MSGQUEUE
;
1232 case TARGET_RLIMIT_NICE
:
1234 case TARGET_RLIMIT_NOFILE
:
1235 return RLIMIT_NOFILE
;
1236 case TARGET_RLIMIT_NPROC
:
1237 return RLIMIT_NPROC
;
1238 case TARGET_RLIMIT_RSS
:
1240 case TARGET_RLIMIT_RTPRIO
:
1241 return RLIMIT_RTPRIO
;
1242 case TARGET_RLIMIT_SIGPENDING
:
1243 return RLIMIT_SIGPENDING
;
1244 case TARGET_RLIMIT_STACK
:
1245 return RLIMIT_STACK
;
1251 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1252 abi_ulong target_tv_addr
)
1254 struct target_timeval
*target_tv
;
1256 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1257 return -TARGET_EFAULT
;
1259 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1260 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1262 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1267 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1268 const struct timeval
*tv
)
1270 struct target_timeval
*target_tv
;
1272 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1273 return -TARGET_EFAULT
;
1275 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1276 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1278 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1283 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1284 abi_ulong target_tz_addr
)
1286 struct target_timezone
*target_tz
;
1288 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1289 return -TARGET_EFAULT
;
1292 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1293 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1295 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1300 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1303 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1304 abi_ulong target_mq_attr_addr
)
1306 struct target_mq_attr
*target_mq_attr
;
1308 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1309 target_mq_attr_addr
, 1))
1310 return -TARGET_EFAULT
;
1312 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1313 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1314 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1315 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1317 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1322 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1323 const struct mq_attr
*attr
)
1325 struct target_mq_attr
*target_mq_attr
;
1327 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1328 target_mq_attr_addr
, 0))
1329 return -TARGET_EFAULT
;
1331 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1332 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1333 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1334 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1336 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1342 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1343 /* do_select() must return target values and target errnos. */
1344 static abi_long
do_select(int n
,
1345 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1346 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1348 fd_set rfds
, wfds
, efds
;
1349 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1351 struct timespec ts
, *ts_ptr
;
1354 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1358 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1362 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1367 if (target_tv_addr
) {
1368 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1369 return -TARGET_EFAULT
;
1370 ts
.tv_sec
= tv
.tv_sec
;
1371 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1377 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1380 if (!is_error(ret
)) {
1381 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1382 return -TARGET_EFAULT
;
1383 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1384 return -TARGET_EFAULT
;
1385 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1386 return -TARGET_EFAULT
;
1388 if (target_tv_addr
) {
1389 tv
.tv_sec
= ts
.tv_sec
;
1390 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1391 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1392 return -TARGET_EFAULT
;
1401 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1404 return pipe2(host_pipe
, flags
);
1410 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1411 int flags
, int is_pipe2
)
1415 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1418 return get_errno(ret
);
1420 /* Several targets have special calling conventions for the original
1421 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1423 #if defined(TARGET_ALPHA)
1424 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1425 return host_pipe
[0];
1426 #elif defined(TARGET_MIPS)
1427 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1428 return host_pipe
[0];
1429 #elif defined(TARGET_SH4)
1430 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1431 return host_pipe
[0];
1432 #elif defined(TARGET_SPARC)
1433 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1434 return host_pipe
[0];
1438 if (put_user_s32(host_pipe
[0], pipedes
)
1439 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1440 return -TARGET_EFAULT
;
1441 return get_errno(ret
);
1444 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1445 abi_ulong target_addr
,
1448 struct target_ip_mreqn
*target_smreqn
;
1450 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1452 return -TARGET_EFAULT
;
1453 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1454 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1455 if (len
== sizeof(struct target_ip_mreqn
))
1456 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1457 unlock_user(target_smreqn
, target_addr
, 0);
1462 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1463 abi_ulong target_addr
,
1466 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1467 sa_family_t sa_family
;
1468 struct target_sockaddr
*target_saddr
;
1470 if (fd_trans_target_to_host_addr(fd
)) {
1471 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1474 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1476 return -TARGET_EFAULT
;
1478 sa_family
= tswap16(target_saddr
->sa_family
);
1480 /* Oops. The caller might send a incomplete sun_path; sun_path
1481 * must be terminated by \0 (see the manual page), but
1482 * unfortunately it is quite common to specify sockaddr_un
1483 * length as "strlen(x->sun_path)" while it should be
1484 * "strlen(...) + 1". We'll fix that here if needed.
1485 * Linux kernel has a similar feature.
1488 if (sa_family
== AF_UNIX
) {
1489 if (len
< unix_maxlen
&& len
> 0) {
1490 char *cp
= (char*)target_saddr
;
1492 if ( cp
[len
-1] && !cp
[len
] )
1495 if (len
> unix_maxlen
)
1499 memcpy(addr
, target_saddr
, len
);
1500 addr
->sa_family
= sa_family
;
1501 if (sa_family
== AF_NETLINK
) {
1502 struct sockaddr_nl
*nladdr
;
1504 nladdr
= (struct sockaddr_nl
*)addr
;
1505 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1506 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1507 } else if (sa_family
== AF_PACKET
) {
1508 struct target_sockaddr_ll
*lladdr
;
1510 lladdr
= (struct target_sockaddr_ll
*)addr
;
1511 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1512 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1514 unlock_user(target_saddr
, target_addr
, 0);
1519 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1520 struct sockaddr
*addr
,
1523 struct target_sockaddr
*target_saddr
;
1529 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1531 return -TARGET_EFAULT
;
1532 memcpy(target_saddr
, addr
, len
);
1533 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1534 sizeof(target_saddr
->sa_family
)) {
1535 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1537 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1538 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1539 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1540 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1541 } else if (addr
->sa_family
== AF_PACKET
) {
1542 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1543 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1544 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1546 unlock_user(target_saddr
, target_addr
, len
);
1551 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1552 struct target_msghdr
*target_msgh
)
1554 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1555 abi_long msg_controllen
;
1556 abi_ulong target_cmsg_addr
;
1557 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1558 socklen_t space
= 0;
1560 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1561 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1563 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1564 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1565 target_cmsg_start
= target_cmsg
;
1567 return -TARGET_EFAULT
;
1569 while (cmsg
&& target_cmsg
) {
1570 void *data
= CMSG_DATA(cmsg
);
1571 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1573 int len
= tswapal(target_cmsg
->cmsg_len
)
1574 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1576 space
+= CMSG_SPACE(len
);
1577 if (space
> msgh
->msg_controllen
) {
1578 space
-= CMSG_SPACE(len
);
1579 /* This is a QEMU bug, since we allocated the payload
1580 * area ourselves (unlike overflow in host-to-target
1581 * conversion, which is just the guest giving us a buffer
1582 * that's too small). It can't happen for the payload types
1583 * we currently support; if it becomes an issue in future
1584 * we would need to improve our allocation strategy to
1585 * something more intelligent than "twice the size of the
1586 * target buffer we're reading from".
1588 gemu_log("Host cmsg overflow\n");
1592 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1593 cmsg
->cmsg_level
= SOL_SOCKET
;
1595 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1597 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1598 cmsg
->cmsg_len
= CMSG_LEN(len
);
1600 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1601 int *fd
= (int *)data
;
1602 int *target_fd
= (int *)target_data
;
1603 int i
, numfds
= len
/ sizeof(int);
1605 for (i
= 0; i
< numfds
; i
++) {
1606 __get_user(fd
[i
], target_fd
+ i
);
1608 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1609 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1610 struct ucred
*cred
= (struct ucred
*)data
;
1611 struct target_ucred
*target_cred
=
1612 (struct target_ucred
*)target_data
;
1614 __get_user(cred
->pid
, &target_cred
->pid
);
1615 __get_user(cred
->uid
, &target_cred
->uid
);
1616 __get_user(cred
->gid
, &target_cred
->gid
);
1618 gemu_log("Unsupported ancillary data: %d/%d\n",
1619 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1620 memcpy(data
, target_data
, len
);
1623 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1624 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1627 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1629 msgh
->msg_controllen
= space
;
1633 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1634 struct msghdr
*msgh
)
1636 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1637 abi_long msg_controllen
;
1638 abi_ulong target_cmsg_addr
;
1639 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1640 socklen_t space
= 0;
1642 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1643 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1645 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1646 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1647 target_cmsg_start
= target_cmsg
;
1649 return -TARGET_EFAULT
;
1651 while (cmsg
&& target_cmsg
) {
1652 void *data
= CMSG_DATA(cmsg
);
1653 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1655 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1656 int tgt_len
, tgt_space
;
1658 /* We never copy a half-header but may copy half-data;
1659 * this is Linux's behaviour in put_cmsg(). Note that
1660 * truncation here is a guest problem (which we report
1661 * to the guest via the CTRUNC bit), unlike truncation
1662 * in target_to_host_cmsg, which is a QEMU bug.
1664 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1665 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1669 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1670 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1672 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1674 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1676 tgt_len
= TARGET_CMSG_LEN(len
);
1678 /* Payload types which need a different size of payload on
1679 * the target must adjust tgt_len here.
1681 switch (cmsg
->cmsg_level
) {
1683 switch (cmsg
->cmsg_type
) {
1685 tgt_len
= sizeof(struct target_timeval
);
1694 if (msg_controllen
< tgt_len
) {
1695 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1696 tgt_len
= msg_controllen
;
1699 /* We must now copy-and-convert len bytes of payload
1700 * into tgt_len bytes of destination space. Bear in mind
1701 * that in both source and destination we may be dealing
1702 * with a truncated value!
1704 switch (cmsg
->cmsg_level
) {
1706 switch (cmsg
->cmsg_type
) {
1709 int *fd
= (int *)data
;
1710 int *target_fd
= (int *)target_data
;
1711 int i
, numfds
= tgt_len
/ sizeof(int);
1713 for (i
= 0; i
< numfds
; i
++) {
1714 __put_user(fd
[i
], target_fd
+ i
);
1720 struct timeval
*tv
= (struct timeval
*)data
;
1721 struct target_timeval
*target_tv
=
1722 (struct target_timeval
*)target_data
;
1724 if (len
!= sizeof(struct timeval
) ||
1725 tgt_len
!= sizeof(struct target_timeval
)) {
1729 /* copy struct timeval to target */
1730 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1731 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1734 case SCM_CREDENTIALS
:
1736 struct ucred
*cred
= (struct ucred
*)data
;
1737 struct target_ucred
*target_cred
=
1738 (struct target_ucred
*)target_data
;
1740 __put_user(cred
->pid
, &target_cred
->pid
);
1741 __put_user(cred
->uid
, &target_cred
->uid
);
1742 __put_user(cred
->gid
, &target_cred
->gid
);
1752 gemu_log("Unsupported ancillary data: %d/%d\n",
1753 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1754 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1755 if (tgt_len
> len
) {
1756 memset(target_data
+ len
, 0, tgt_len
- len
);
1760 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1761 tgt_space
= TARGET_CMSG_SPACE(len
);
1762 if (msg_controllen
< tgt_space
) {
1763 tgt_space
= msg_controllen
;
1765 msg_controllen
-= tgt_space
;
1767 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1768 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1771 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1773 target_msgh
->msg_controllen
= tswapal(space
);
1777 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1779 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1780 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1781 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1782 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1783 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1786 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1788 abi_long (*host_to_target_nlmsg
)
1789 (struct nlmsghdr
*))
1794 while (len
> sizeof(struct nlmsghdr
)) {
1796 nlmsg_len
= nlh
->nlmsg_len
;
1797 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1802 switch (nlh
->nlmsg_type
) {
1804 tswap_nlmsghdr(nlh
);
1810 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1811 e
->error
= tswap32(e
->error
);
1812 tswap_nlmsghdr(&e
->msg
);
1813 tswap_nlmsghdr(nlh
);
1817 ret
= host_to_target_nlmsg(nlh
);
1819 tswap_nlmsghdr(nlh
);
1824 tswap_nlmsghdr(nlh
);
1825 len
-= NLMSG_ALIGN(nlmsg_len
);
1826 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1831 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1833 abi_long (*target_to_host_nlmsg
)
1834 (struct nlmsghdr
*))
1838 while (len
> sizeof(struct nlmsghdr
)) {
1839 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1840 tswap32(nlh
->nlmsg_len
) > len
) {
1843 tswap_nlmsghdr(nlh
);
1844 switch (nlh
->nlmsg_type
) {
1851 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1852 e
->error
= tswap32(e
->error
);
1853 tswap_nlmsghdr(&e
->msg
);
1857 ret
= target_to_host_nlmsg(nlh
);
1862 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1863 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1868 #ifdef CONFIG_RTNETLINK
1869 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
1870 size_t len
, void *context
,
1871 abi_long (*host_to_target_nlattr
)
1875 unsigned short nla_len
;
1878 while (len
> sizeof(struct nlattr
)) {
1879 nla_len
= nlattr
->nla_len
;
1880 if (nla_len
< sizeof(struct nlattr
) ||
1884 ret
= host_to_target_nlattr(nlattr
, context
);
1885 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
1886 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
1890 len
-= NLA_ALIGN(nla_len
);
1891 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
1896 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1898 abi_long (*host_to_target_rtattr
)
1901 unsigned short rta_len
;
1904 while (len
> sizeof(struct rtattr
)) {
1905 rta_len
= rtattr
->rta_len
;
1906 if (rta_len
< sizeof(struct rtattr
) ||
1910 ret
= host_to_target_rtattr(rtattr
);
1911 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1912 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1916 len
-= RTA_ALIGN(rta_len
);
1917 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
1922 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1924 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
1931 switch (nlattr
->nla_type
) {
1933 case QEMU_IFLA_BR_FDB_FLUSH
:
1936 case QEMU_IFLA_BR_GROUP_ADDR
:
1939 case QEMU_IFLA_BR_VLAN_FILTERING
:
1940 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
1941 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
1942 case QEMU_IFLA_BR_MCAST_ROUTER
:
1943 case QEMU_IFLA_BR_MCAST_SNOOPING
:
1944 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
1945 case QEMU_IFLA_BR_MCAST_QUERIER
:
1946 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
1947 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
1948 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
1951 case QEMU_IFLA_BR_PRIORITY
:
1952 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
1953 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
1954 case QEMU_IFLA_BR_ROOT_PORT
:
1955 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
1956 u16
= NLA_DATA(nlattr
);
1957 *u16
= tswap16(*u16
);
1960 case QEMU_IFLA_BR_FORWARD_DELAY
:
1961 case QEMU_IFLA_BR_HELLO_TIME
:
1962 case QEMU_IFLA_BR_MAX_AGE
:
1963 case QEMU_IFLA_BR_AGEING_TIME
:
1964 case QEMU_IFLA_BR_STP_STATE
:
1965 case QEMU_IFLA_BR_ROOT_PATH_COST
:
1966 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
1967 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
1968 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
1969 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
1970 u32
= NLA_DATA(nlattr
);
1971 *u32
= tswap32(*u32
);
1974 case QEMU_IFLA_BR_HELLO_TIMER
:
1975 case QEMU_IFLA_BR_TCN_TIMER
:
1976 case QEMU_IFLA_BR_GC_TIMER
:
1977 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
1978 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
1979 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
1980 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
1981 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
1982 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
1983 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
1984 u64
= NLA_DATA(nlattr
);
1985 *u64
= tswap64(*u64
);
1987 /* ifla_bridge_id: uin8_t[] */
1988 case QEMU_IFLA_BR_ROOT_ID
:
1989 case QEMU_IFLA_BR_BRIDGE_ID
:
1992 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
1998 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2005 switch (nlattr
->nla_type
) {
2007 case QEMU_IFLA_BRPORT_STATE
:
2008 case QEMU_IFLA_BRPORT_MODE
:
2009 case QEMU_IFLA_BRPORT_GUARD
:
2010 case QEMU_IFLA_BRPORT_PROTECT
:
2011 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2012 case QEMU_IFLA_BRPORT_LEARNING
:
2013 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2014 case QEMU_IFLA_BRPORT_PROXYARP
:
2015 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2016 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2017 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2018 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2019 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2022 case QEMU_IFLA_BRPORT_PRIORITY
:
2023 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2024 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2025 case QEMU_IFLA_BRPORT_ID
:
2026 case QEMU_IFLA_BRPORT_NO
:
2027 u16
= NLA_DATA(nlattr
);
2028 *u16
= tswap16(*u16
);
2031 case QEMU_IFLA_BRPORT_COST
:
2032 u32
= NLA_DATA(nlattr
);
2033 *u32
= tswap32(*u32
);
2036 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2037 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2038 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2039 u64
= NLA_DATA(nlattr
);
2040 *u64
= tswap64(*u64
);
2042 /* ifla_bridge_id: uint8_t[] */
2043 case QEMU_IFLA_BRPORT_ROOT_ID
:
2044 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2047 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2053 struct linkinfo_context
{
2060 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2063 struct linkinfo_context
*li_context
= context
;
2065 switch (nlattr
->nla_type
) {
2067 case QEMU_IFLA_INFO_KIND
:
2068 li_context
->name
= NLA_DATA(nlattr
);
2069 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2071 case QEMU_IFLA_INFO_SLAVE_KIND
:
2072 li_context
->slave_name
= NLA_DATA(nlattr
);
2073 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2076 case QEMU_IFLA_INFO_XSTATS
:
2077 /* FIXME: only used by CAN */
2080 case QEMU_IFLA_INFO_DATA
:
2081 if (strncmp(li_context
->name
, "bridge",
2082 li_context
->len
) == 0) {
2083 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2086 host_to_target_data_bridge_nlattr
);
2088 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2091 case QEMU_IFLA_INFO_SLAVE_DATA
:
2092 if (strncmp(li_context
->slave_name
, "bridge",
2093 li_context
->slave_len
) == 0) {
2094 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2097 host_to_target_slave_data_bridge_nlattr
);
2099 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2100 li_context
->slave_name
);
2104 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2111 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2117 switch (nlattr
->nla_type
) {
2118 case QEMU_IFLA_INET_CONF
:
2119 u32
= NLA_DATA(nlattr
);
2120 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2122 u32
[i
] = tswap32(u32
[i
]);
2126 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2131 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2136 struct ifla_cacheinfo
*ci
;
2139 switch (nlattr
->nla_type
) {
2141 case QEMU_IFLA_INET6_TOKEN
:
2144 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2147 case QEMU_IFLA_INET6_FLAGS
:
2148 u32
= NLA_DATA(nlattr
);
2149 *u32
= tswap32(*u32
);
2152 case QEMU_IFLA_INET6_CONF
:
2153 u32
= NLA_DATA(nlattr
);
2154 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2156 u32
[i
] = tswap32(u32
[i
]);
2159 /* ifla_cacheinfo */
2160 case QEMU_IFLA_INET6_CACHEINFO
:
2161 ci
= NLA_DATA(nlattr
);
2162 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2163 ci
->tstamp
= tswap32(ci
->tstamp
);
2164 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2165 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2168 case QEMU_IFLA_INET6_STATS
:
2169 case QEMU_IFLA_INET6_ICMP6STATS
:
2170 u64
= NLA_DATA(nlattr
);
2171 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2173 u64
[i
] = tswap64(u64
[i
]);
2177 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2182 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2185 switch (nlattr
->nla_type
) {
2187 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2189 host_to_target_data_inet_nlattr
);
2191 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2193 host_to_target_data_inet6_nlattr
);
2195 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2201 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2204 struct rtnl_link_stats
*st
;
2205 struct rtnl_link_stats64
*st64
;
2206 struct rtnl_link_ifmap
*map
;
2207 struct linkinfo_context li_context
;
2209 switch (rtattr
->rta_type
) {
2211 case QEMU_IFLA_ADDRESS
:
2212 case QEMU_IFLA_BROADCAST
:
2214 case QEMU_IFLA_IFNAME
:
2215 case QEMU_IFLA_QDISC
:
2218 case QEMU_IFLA_OPERSTATE
:
2219 case QEMU_IFLA_LINKMODE
:
2220 case QEMU_IFLA_CARRIER
:
2221 case QEMU_IFLA_PROTO_DOWN
:
2225 case QEMU_IFLA_LINK
:
2226 case QEMU_IFLA_WEIGHT
:
2227 case QEMU_IFLA_TXQLEN
:
2228 case QEMU_IFLA_CARRIER_CHANGES
:
2229 case QEMU_IFLA_NUM_RX_QUEUES
:
2230 case QEMU_IFLA_NUM_TX_QUEUES
:
2231 case QEMU_IFLA_PROMISCUITY
:
2232 case QEMU_IFLA_EXT_MASK
:
2233 case QEMU_IFLA_LINK_NETNSID
:
2234 case QEMU_IFLA_GROUP
:
2235 case QEMU_IFLA_MASTER
:
2236 case QEMU_IFLA_NUM_VF
:
2237 u32
= RTA_DATA(rtattr
);
2238 *u32
= tswap32(*u32
);
2240 /* struct rtnl_link_stats */
2241 case QEMU_IFLA_STATS
:
2242 st
= RTA_DATA(rtattr
);
2243 st
->rx_packets
= tswap32(st
->rx_packets
);
2244 st
->tx_packets
= tswap32(st
->tx_packets
);
2245 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2246 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2247 st
->rx_errors
= tswap32(st
->rx_errors
);
2248 st
->tx_errors
= tswap32(st
->tx_errors
);
2249 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2250 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2251 st
->multicast
= tswap32(st
->multicast
);
2252 st
->collisions
= tswap32(st
->collisions
);
2254 /* detailed rx_errors: */
2255 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2256 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2257 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2258 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2259 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2260 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2262 /* detailed tx_errors */
2263 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2264 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2265 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2266 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2267 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2270 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2271 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2273 /* struct rtnl_link_stats64 */
2274 case QEMU_IFLA_STATS64
:
2275 st64
= RTA_DATA(rtattr
);
2276 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2277 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2278 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2279 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2280 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2281 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2282 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2283 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2284 st64
->multicast
= tswap64(st64
->multicast
);
2285 st64
->collisions
= tswap64(st64
->collisions
);
2287 /* detailed rx_errors: */
2288 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2289 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2290 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2291 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2292 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2293 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2295 /* detailed tx_errors */
2296 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2297 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2298 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2299 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2300 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2303 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2304 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2306 /* struct rtnl_link_ifmap */
2308 map
= RTA_DATA(rtattr
);
2309 map
->mem_start
= tswap64(map
->mem_start
);
2310 map
->mem_end
= tswap64(map
->mem_end
);
2311 map
->base_addr
= tswap64(map
->base_addr
);
2312 map
->irq
= tswap16(map
->irq
);
2315 case QEMU_IFLA_LINKINFO
:
2316 memset(&li_context
, 0, sizeof(li_context
));
2317 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2319 host_to_target_data_linkinfo_nlattr
);
2320 case QEMU_IFLA_AF_SPEC
:
2321 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2323 host_to_target_data_spec_nlattr
);
2325 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2331 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2334 struct ifa_cacheinfo
*ci
;
2336 switch (rtattr
->rta_type
) {
2337 /* binary: depends on family type */
2347 u32
= RTA_DATA(rtattr
);
2348 *u32
= tswap32(*u32
);
2350 /* struct ifa_cacheinfo */
2352 ci
= RTA_DATA(rtattr
);
2353 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2354 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2355 ci
->cstamp
= tswap32(ci
->cstamp
);
2356 ci
->tstamp
= tswap32(ci
->tstamp
);
2359 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2365 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2368 switch (rtattr
->rta_type
) {
2369 /* binary: depends on family type */
2378 u32
= RTA_DATA(rtattr
);
2379 *u32
= tswap32(*u32
);
2382 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2388 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2389 uint32_t rtattr_len
)
2391 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2392 host_to_target_data_link_rtattr
);
2395 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2396 uint32_t rtattr_len
)
2398 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2399 host_to_target_data_addr_rtattr
);
2402 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2403 uint32_t rtattr_len
)
2405 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2406 host_to_target_data_route_rtattr
);
2409 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2412 struct ifinfomsg
*ifi
;
2413 struct ifaddrmsg
*ifa
;
2416 nlmsg_len
= nlh
->nlmsg_len
;
2417 switch (nlh
->nlmsg_type
) {
2421 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2422 ifi
= NLMSG_DATA(nlh
);
2423 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2424 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2425 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2426 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2427 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2428 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2434 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2435 ifa
= NLMSG_DATA(nlh
);
2436 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2437 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2438 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2444 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2445 rtm
= NLMSG_DATA(nlh
);
2446 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2447 host_to_target_route_rtattr(RTM_RTA(rtm
),
2448 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2452 return -TARGET_EINVAL
;
2457 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2460 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2463 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2465 abi_long (*target_to_host_rtattr
)
2470 while (len
>= sizeof(struct rtattr
)) {
2471 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2472 tswap16(rtattr
->rta_len
) > len
) {
2475 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2476 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2477 ret
= target_to_host_rtattr(rtattr
);
2481 len
-= RTA_ALIGN(rtattr
->rta_len
);
2482 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2483 RTA_ALIGN(rtattr
->rta_len
));
2488 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2490 switch (rtattr
->rta_type
) {
2492 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2498 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2500 switch (rtattr
->rta_type
) {
2501 /* binary: depends on family type */
2506 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2512 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2515 switch (rtattr
->rta_type
) {
2516 /* binary: depends on family type */
2523 u32
= RTA_DATA(rtattr
);
2524 *u32
= tswap32(*u32
);
2527 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2533 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2534 uint32_t rtattr_len
)
2536 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2537 target_to_host_data_link_rtattr
);
2540 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2541 uint32_t rtattr_len
)
2543 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2544 target_to_host_data_addr_rtattr
);
2547 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2548 uint32_t rtattr_len
)
2550 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2551 target_to_host_data_route_rtattr
);
2554 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2556 struct ifinfomsg
*ifi
;
2557 struct ifaddrmsg
*ifa
;
2560 switch (nlh
->nlmsg_type
) {
2565 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2566 ifi
= NLMSG_DATA(nlh
);
2567 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2568 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2569 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2570 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2571 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2572 NLMSG_LENGTH(sizeof(*ifi
)));
2578 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2579 ifa
= NLMSG_DATA(nlh
);
2580 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2581 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2582 NLMSG_LENGTH(sizeof(*ifa
)));
2589 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2590 rtm
= NLMSG_DATA(nlh
);
2591 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2592 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2593 NLMSG_LENGTH(sizeof(*rtm
)));
2597 return -TARGET_EOPNOTSUPP
;
2602 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2604 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2606 #endif /* CONFIG_RTNETLINK */
2608 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2610 switch (nlh
->nlmsg_type
) {
2612 gemu_log("Unknown host audit message type %d\n",
2614 return -TARGET_EINVAL
;
2619 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2622 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2625 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2627 switch (nlh
->nlmsg_type
) {
2629 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2630 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2633 gemu_log("Unknown target audit message type %d\n",
2635 return -TARGET_EINVAL
;
2641 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2643 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2646 /* do_setsockopt() Must return target values and target errnos. */
2647 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2648 abi_ulong optval_addr
, socklen_t optlen
)
2652 struct ip_mreqn
*ip_mreq
;
2653 struct ip_mreq_source
*ip_mreq_source
;
2657 /* TCP options all take an 'int' value. */
2658 if (optlen
< sizeof(uint32_t))
2659 return -TARGET_EINVAL
;
2661 if (get_user_u32(val
, optval_addr
))
2662 return -TARGET_EFAULT
;
2663 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2670 case IP_ROUTER_ALERT
:
2674 case IP_MTU_DISCOVER
:
2680 case IP_MULTICAST_TTL
:
2681 case IP_MULTICAST_LOOP
:
2683 if (optlen
>= sizeof(uint32_t)) {
2684 if (get_user_u32(val
, optval_addr
))
2685 return -TARGET_EFAULT
;
2686 } else if (optlen
>= 1) {
2687 if (get_user_u8(val
, optval_addr
))
2688 return -TARGET_EFAULT
;
2690 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2692 case IP_ADD_MEMBERSHIP
:
2693 case IP_DROP_MEMBERSHIP
:
2694 if (optlen
< sizeof (struct target_ip_mreq
) ||
2695 optlen
> sizeof (struct target_ip_mreqn
))
2696 return -TARGET_EINVAL
;
2698 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2699 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2700 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2703 case IP_BLOCK_SOURCE
:
2704 case IP_UNBLOCK_SOURCE
:
2705 case IP_ADD_SOURCE_MEMBERSHIP
:
2706 case IP_DROP_SOURCE_MEMBERSHIP
:
2707 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2708 return -TARGET_EINVAL
;
2710 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2711 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2712 unlock_user (ip_mreq_source
, optval_addr
, 0);
2721 case IPV6_MTU_DISCOVER
:
2724 case IPV6_RECVPKTINFO
:
2726 if (optlen
< sizeof(uint32_t)) {
2727 return -TARGET_EINVAL
;
2729 if (get_user_u32(val
, optval_addr
)) {
2730 return -TARGET_EFAULT
;
2732 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2733 &val
, sizeof(val
)));
2742 /* struct icmp_filter takes an u32 value */
2743 if (optlen
< sizeof(uint32_t)) {
2744 return -TARGET_EINVAL
;
2747 if (get_user_u32(val
, optval_addr
)) {
2748 return -TARGET_EFAULT
;
2750 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2751 &val
, sizeof(val
)));
2758 case TARGET_SOL_SOCKET
:
2760 case TARGET_SO_RCVTIMEO
:
2764 optname
= SO_RCVTIMEO
;
2767 if (optlen
!= sizeof(struct target_timeval
)) {
2768 return -TARGET_EINVAL
;
2771 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2772 return -TARGET_EFAULT
;
2775 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2779 case TARGET_SO_SNDTIMEO
:
2780 optname
= SO_SNDTIMEO
;
2782 case TARGET_SO_ATTACH_FILTER
:
2784 struct target_sock_fprog
*tfprog
;
2785 struct target_sock_filter
*tfilter
;
2786 struct sock_fprog fprog
;
2787 struct sock_filter
*filter
;
2790 if (optlen
!= sizeof(*tfprog
)) {
2791 return -TARGET_EINVAL
;
2793 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2794 return -TARGET_EFAULT
;
2796 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2797 tswapal(tfprog
->filter
), 0)) {
2798 unlock_user_struct(tfprog
, optval_addr
, 1);
2799 return -TARGET_EFAULT
;
2802 fprog
.len
= tswap16(tfprog
->len
);
2803 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2804 if (filter
== NULL
) {
2805 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2806 unlock_user_struct(tfprog
, optval_addr
, 1);
2807 return -TARGET_ENOMEM
;
2809 for (i
= 0; i
< fprog
.len
; i
++) {
2810 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2811 filter
[i
].jt
= tfilter
[i
].jt
;
2812 filter
[i
].jf
= tfilter
[i
].jf
;
2813 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2815 fprog
.filter
= filter
;
2817 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2818 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2821 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2822 unlock_user_struct(tfprog
, optval_addr
, 1);
2825 case TARGET_SO_BINDTODEVICE
:
2827 char *dev_ifname
, *addr_ifname
;
2829 if (optlen
> IFNAMSIZ
- 1) {
2830 optlen
= IFNAMSIZ
- 1;
2832 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2834 return -TARGET_EFAULT
;
2836 optname
= SO_BINDTODEVICE
;
2837 addr_ifname
= alloca(IFNAMSIZ
);
2838 memcpy(addr_ifname
, dev_ifname
, optlen
);
2839 addr_ifname
[optlen
] = 0;
2840 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2841 addr_ifname
, optlen
));
2842 unlock_user (dev_ifname
, optval_addr
, 0);
2845 /* Options with 'int' argument. */
2846 case TARGET_SO_DEBUG
:
2849 case TARGET_SO_REUSEADDR
:
2850 optname
= SO_REUSEADDR
;
2852 case TARGET_SO_TYPE
:
2855 case TARGET_SO_ERROR
:
2858 case TARGET_SO_DONTROUTE
:
2859 optname
= SO_DONTROUTE
;
2861 case TARGET_SO_BROADCAST
:
2862 optname
= SO_BROADCAST
;
2864 case TARGET_SO_SNDBUF
:
2865 optname
= SO_SNDBUF
;
2867 case TARGET_SO_SNDBUFFORCE
:
2868 optname
= SO_SNDBUFFORCE
;
2870 case TARGET_SO_RCVBUF
:
2871 optname
= SO_RCVBUF
;
2873 case TARGET_SO_RCVBUFFORCE
:
2874 optname
= SO_RCVBUFFORCE
;
2876 case TARGET_SO_KEEPALIVE
:
2877 optname
= SO_KEEPALIVE
;
2879 case TARGET_SO_OOBINLINE
:
2880 optname
= SO_OOBINLINE
;
2882 case TARGET_SO_NO_CHECK
:
2883 optname
= SO_NO_CHECK
;
2885 case TARGET_SO_PRIORITY
:
2886 optname
= SO_PRIORITY
;
2889 case TARGET_SO_BSDCOMPAT
:
2890 optname
= SO_BSDCOMPAT
;
2893 case TARGET_SO_PASSCRED
:
2894 optname
= SO_PASSCRED
;
2896 case TARGET_SO_PASSSEC
:
2897 optname
= SO_PASSSEC
;
2899 case TARGET_SO_TIMESTAMP
:
2900 optname
= SO_TIMESTAMP
;
2902 case TARGET_SO_RCVLOWAT
:
2903 optname
= SO_RCVLOWAT
;
2909 if (optlen
< sizeof(uint32_t))
2910 return -TARGET_EINVAL
;
2912 if (get_user_u32(val
, optval_addr
))
2913 return -TARGET_EFAULT
;
2914 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2918 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2919 ret
= -TARGET_ENOPROTOOPT
;
2924 /* do_getsockopt() Must return target values and target errnos. */
2925 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2926 abi_ulong optval_addr
, abi_ulong optlen
)
2933 case TARGET_SOL_SOCKET
:
2936 /* These don't just return a single integer */
2937 case TARGET_SO_LINGER
:
2938 case TARGET_SO_RCVTIMEO
:
2939 case TARGET_SO_SNDTIMEO
:
2940 case TARGET_SO_PEERNAME
:
2942 case TARGET_SO_PEERCRED
: {
2945 struct target_ucred
*tcr
;
2947 if (get_user_u32(len
, optlen
)) {
2948 return -TARGET_EFAULT
;
2951 return -TARGET_EINVAL
;
2955 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2963 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2964 return -TARGET_EFAULT
;
2966 __put_user(cr
.pid
, &tcr
->pid
);
2967 __put_user(cr
.uid
, &tcr
->uid
);
2968 __put_user(cr
.gid
, &tcr
->gid
);
2969 unlock_user_struct(tcr
, optval_addr
, 1);
2970 if (put_user_u32(len
, optlen
)) {
2971 return -TARGET_EFAULT
;
2975 /* Options with 'int' argument. */
2976 case TARGET_SO_DEBUG
:
2979 case TARGET_SO_REUSEADDR
:
2980 optname
= SO_REUSEADDR
;
2982 case TARGET_SO_TYPE
:
2985 case TARGET_SO_ERROR
:
2988 case TARGET_SO_DONTROUTE
:
2989 optname
= SO_DONTROUTE
;
2991 case TARGET_SO_BROADCAST
:
2992 optname
= SO_BROADCAST
;
2994 case TARGET_SO_SNDBUF
:
2995 optname
= SO_SNDBUF
;
2997 case TARGET_SO_RCVBUF
:
2998 optname
= SO_RCVBUF
;
3000 case TARGET_SO_KEEPALIVE
:
3001 optname
= SO_KEEPALIVE
;
3003 case TARGET_SO_OOBINLINE
:
3004 optname
= SO_OOBINLINE
;
3006 case TARGET_SO_NO_CHECK
:
3007 optname
= SO_NO_CHECK
;
3009 case TARGET_SO_PRIORITY
:
3010 optname
= SO_PRIORITY
;
3013 case TARGET_SO_BSDCOMPAT
:
3014 optname
= SO_BSDCOMPAT
;
3017 case TARGET_SO_PASSCRED
:
3018 optname
= SO_PASSCRED
;
3020 case TARGET_SO_TIMESTAMP
:
3021 optname
= SO_TIMESTAMP
;
3023 case TARGET_SO_RCVLOWAT
:
3024 optname
= SO_RCVLOWAT
;
3026 case TARGET_SO_ACCEPTCONN
:
3027 optname
= SO_ACCEPTCONN
;
3034 /* TCP options all take an 'int' value. */
3036 if (get_user_u32(len
, optlen
))
3037 return -TARGET_EFAULT
;
3039 return -TARGET_EINVAL
;
3041 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3044 if (optname
== SO_TYPE
) {
3045 val
= host_to_target_sock_type(val
);
3050 if (put_user_u32(val
, optval_addr
))
3051 return -TARGET_EFAULT
;
3053 if (put_user_u8(val
, optval_addr
))
3054 return -TARGET_EFAULT
;
3056 if (put_user_u32(len
, optlen
))
3057 return -TARGET_EFAULT
;
3064 case IP_ROUTER_ALERT
:
3068 case IP_MTU_DISCOVER
:
3074 case IP_MULTICAST_TTL
:
3075 case IP_MULTICAST_LOOP
:
3076 if (get_user_u32(len
, optlen
))
3077 return -TARGET_EFAULT
;
3079 return -TARGET_EINVAL
;
3081 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3084 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3086 if (put_user_u32(len
, optlen
)
3087 || put_user_u8(val
, optval_addr
))
3088 return -TARGET_EFAULT
;
3090 if (len
> sizeof(int))
3092 if (put_user_u32(len
, optlen
)
3093 || put_user_u32(val
, optval_addr
))
3094 return -TARGET_EFAULT
;
3098 ret
= -TARGET_ENOPROTOOPT
;
3104 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3106 ret
= -TARGET_EOPNOTSUPP
;
3112 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3113 abi_ulong count
, int copy
)
3115 struct target_iovec
*target_vec
;
3117 abi_ulong total_len
, max_len
;
3120 bool bad_address
= false;
3126 if (count
> IOV_MAX
) {
3131 vec
= g_try_new0(struct iovec
, count
);
3137 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3138 count
* sizeof(struct target_iovec
), 1);
3139 if (target_vec
== NULL
) {
3144 /* ??? If host page size > target page size, this will result in a
3145 value larger than what we can actually support. */
3146 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3149 for (i
= 0; i
< count
; i
++) {
3150 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3151 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3156 } else if (len
== 0) {
3157 /* Zero length pointer is ignored. */
3158 vec
[i
].iov_base
= 0;
3160 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3161 /* If the first buffer pointer is bad, this is a fault. But
3162 * subsequent bad buffers will result in a partial write; this
3163 * is realized by filling the vector with null pointers and
3165 if (!vec
[i
].iov_base
) {
3176 if (len
> max_len
- total_len
) {
3177 len
= max_len
- total_len
;
3180 vec
[i
].iov_len
= len
;
3184 unlock_user(target_vec
, target_addr
, 0);
3189 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3190 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3193 unlock_user(target_vec
, target_addr
, 0);
3200 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3201 abi_ulong count
, int copy
)
3203 struct target_iovec
*target_vec
;
3206 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3207 count
* sizeof(struct target_iovec
), 1);
3209 for (i
= 0; i
< count
; i
++) {
3210 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3211 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3215 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3217 unlock_user(target_vec
, target_addr
, 0);
3223 static inline int target_to_host_sock_type(int *type
)
3226 int target_type
= *type
;
3228 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3229 case TARGET_SOCK_DGRAM
:
3230 host_type
= SOCK_DGRAM
;
3232 case TARGET_SOCK_STREAM
:
3233 host_type
= SOCK_STREAM
;
3236 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3239 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3240 #if defined(SOCK_CLOEXEC)
3241 host_type
|= SOCK_CLOEXEC
;
3243 return -TARGET_EINVAL
;
3246 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3247 #if defined(SOCK_NONBLOCK)
3248 host_type
|= SOCK_NONBLOCK
;
3249 #elif !defined(O_NONBLOCK)
3250 return -TARGET_EINVAL
;
3257 /* Try to emulate socket type flags after socket creation. */
3258 static int sock_flags_fixup(int fd
, int target_type
)
3260 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3261 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3262 int flags
= fcntl(fd
, F_GETFL
);
3263 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3265 return -TARGET_EINVAL
;
3272 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3273 abi_ulong target_addr
,
3276 struct sockaddr
*addr
= host_addr
;
3277 struct target_sockaddr
*target_saddr
;
3279 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3280 if (!target_saddr
) {
3281 return -TARGET_EFAULT
;
3284 memcpy(addr
, target_saddr
, len
);
3285 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3286 /* spkt_protocol is big-endian */
3288 unlock_user(target_saddr
, target_addr
, 0);
3292 static TargetFdTrans target_packet_trans
= {
3293 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3296 #ifdef CONFIG_RTNETLINK
3297 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3301 ret
= target_to_host_nlmsg_route(buf
, len
);
3309 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3313 ret
= host_to_target_nlmsg_route(buf
, len
);
3321 static TargetFdTrans target_netlink_route_trans
= {
3322 .target_to_host_data
= netlink_route_target_to_host
,
3323 .host_to_target_data
= netlink_route_host_to_target
,
3325 #endif /* CONFIG_RTNETLINK */
3327 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3331 ret
= target_to_host_nlmsg_audit(buf
, len
);
3339 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3343 ret
= host_to_target_nlmsg_audit(buf
, len
);
3351 static TargetFdTrans target_netlink_audit_trans
= {
3352 .target_to_host_data
= netlink_audit_target_to_host
,
3353 .host_to_target_data
= netlink_audit_host_to_target
,
3356 /* do_socket() Must return target values and target errnos. */
3357 static abi_long
do_socket(int domain
, int type
, int protocol
)
3359 int target_type
= type
;
3362 ret
= target_to_host_sock_type(&type
);
3367 if (domain
== PF_NETLINK
&& !(
3368 #ifdef CONFIG_RTNETLINK
3369 protocol
== NETLINK_ROUTE
||
3371 protocol
== NETLINK_KOBJECT_UEVENT
||
3372 protocol
== NETLINK_AUDIT
)) {
3373 return -EPFNOSUPPORT
;
3376 if (domain
== AF_PACKET
||
3377 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3378 protocol
= tswap16(protocol
);
3381 ret
= get_errno(socket(domain
, type
, protocol
));
3383 ret
= sock_flags_fixup(ret
, target_type
);
3384 if (type
== SOCK_PACKET
) {
3385 /* Manage an obsolete case :
3386 * if socket type is SOCK_PACKET, bind by name
3388 fd_trans_register(ret
, &target_packet_trans
);
3389 } else if (domain
== PF_NETLINK
) {
3391 #ifdef CONFIG_RTNETLINK
3393 fd_trans_register(ret
, &target_netlink_route_trans
);
3396 case NETLINK_KOBJECT_UEVENT
:
3397 /* nothing to do: messages are strings */
3400 fd_trans_register(ret
, &target_netlink_audit_trans
);
3403 g_assert_not_reached();
3410 /* do_bind() Must return target values and target errnos. */
3411 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3417 if ((int)addrlen
< 0) {
3418 return -TARGET_EINVAL
;
3421 addr
= alloca(addrlen
+1);
3423 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3427 return get_errno(bind(sockfd
, addr
, addrlen
));
3430 /* do_connect() Must return target values and target errnos. */
3431 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3437 if ((int)addrlen
< 0) {
3438 return -TARGET_EINVAL
;
3441 addr
= alloca(addrlen
+1);
3443 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3447 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3450 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3451 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3452 int flags
, int send
)
3458 abi_ulong target_vec
;
3460 if (msgp
->msg_name
) {
3461 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3462 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3463 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3464 tswapal(msgp
->msg_name
),
3466 if (ret
== -TARGET_EFAULT
) {
3467 /* For connected sockets msg_name and msg_namelen must
3468 * be ignored, so returning EFAULT immediately is wrong.
3469 * Instead, pass a bad msg_name to the host kernel, and
3470 * let it decide whether to return EFAULT or not.
3472 msg
.msg_name
= (void *)-1;
3477 msg
.msg_name
= NULL
;
3478 msg
.msg_namelen
= 0;
3480 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3481 msg
.msg_control
= alloca(msg
.msg_controllen
);
3482 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3484 count
= tswapal(msgp
->msg_iovlen
);
3485 target_vec
= tswapal(msgp
->msg_iov
);
3487 if (count
> IOV_MAX
) {
3488 /* sendrcvmsg returns a different errno for this condition than
3489 * readv/writev, so we must catch it here before lock_iovec() does.
3491 ret
= -TARGET_EMSGSIZE
;
3495 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3496 target_vec
, count
, send
);
3498 ret
= -host_to_target_errno(errno
);
3501 msg
.msg_iovlen
= count
;
3505 if (fd_trans_target_to_host_data(fd
)) {
3508 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3509 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3510 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3511 msg
.msg_iov
->iov_len
);
3513 msg
.msg_iov
->iov_base
= host_msg
;
3514 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3518 ret
= target_to_host_cmsg(&msg
, msgp
);
3520 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3524 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3525 if (!is_error(ret
)) {
3527 if (fd_trans_host_to_target_data(fd
)) {
3528 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3531 ret
= host_to_target_cmsg(msgp
, &msg
);
3533 if (!is_error(ret
)) {
3534 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3535 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3536 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3537 msg
.msg_name
, msg
.msg_namelen
);
3549 unlock_iovec(vec
, target_vec
, count
, !send
);
3554 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3555 int flags
, int send
)
3558 struct target_msghdr
*msgp
;
3560 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3564 return -TARGET_EFAULT
;
3566 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3567 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3571 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3572 * so it might not have this *mmsg-specific flag either.
3574 #ifndef MSG_WAITFORONE
3575 #define MSG_WAITFORONE 0x10000
3578 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3579 unsigned int vlen
, unsigned int flags
,
3582 struct target_mmsghdr
*mmsgp
;
3586 if (vlen
> UIO_MAXIOV
) {
3590 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3592 return -TARGET_EFAULT
;
3595 for (i
= 0; i
< vlen
; i
++) {
3596 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3597 if (is_error(ret
)) {
3600 mmsgp
[i
].msg_len
= tswap32(ret
);
3601 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3602 if (flags
& MSG_WAITFORONE
) {
3603 flags
|= MSG_DONTWAIT
;
3607 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3609 /* Return number of datagrams sent if we sent any at all;
3610 * otherwise return the error.
3618 /* do_accept4() Must return target values and target errnos. */
3619 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3620 abi_ulong target_addrlen_addr
, int flags
)
3627 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3629 if (target_addr
== 0) {
3630 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3633 /* linux returns EINVAL if addrlen pointer is invalid */
3634 if (get_user_u32(addrlen
, target_addrlen_addr
))
3635 return -TARGET_EINVAL
;
3637 if ((int)addrlen
< 0) {
3638 return -TARGET_EINVAL
;
3641 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3642 return -TARGET_EINVAL
;
3644 addr
= alloca(addrlen
);
3646 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3647 if (!is_error(ret
)) {
3648 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3649 if (put_user_u32(addrlen
, target_addrlen_addr
))
3650 ret
= -TARGET_EFAULT
;
3655 /* do_getpeername() Must return target values and target errnos. */
3656 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3657 abi_ulong target_addrlen_addr
)
3663 if (get_user_u32(addrlen
, target_addrlen_addr
))
3664 return -TARGET_EFAULT
;
3666 if ((int)addrlen
< 0) {
3667 return -TARGET_EINVAL
;
3670 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3671 return -TARGET_EFAULT
;
3673 addr
= alloca(addrlen
);
3675 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3676 if (!is_error(ret
)) {
3677 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3678 if (put_user_u32(addrlen
, target_addrlen_addr
))
3679 ret
= -TARGET_EFAULT
;
3684 /* do_getsockname() Must return target values and target errnos. */
3685 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3686 abi_ulong target_addrlen_addr
)
3692 if (get_user_u32(addrlen
, target_addrlen_addr
))
3693 return -TARGET_EFAULT
;
3695 if ((int)addrlen
< 0) {
3696 return -TARGET_EINVAL
;
3699 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3700 return -TARGET_EFAULT
;
3702 addr
= alloca(addrlen
);
3704 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3705 if (!is_error(ret
)) {
3706 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3707 if (put_user_u32(addrlen
, target_addrlen_addr
))
3708 ret
= -TARGET_EFAULT
;
3713 /* do_socketpair() Must return target values and target errnos. */
3714 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3715 abi_ulong target_tab_addr
)
3720 target_to_host_sock_type(&type
);
3722 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3723 if (!is_error(ret
)) {
3724 if (put_user_s32(tab
[0], target_tab_addr
)
3725 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3726 ret
= -TARGET_EFAULT
;
3731 /* do_sendto() Must return target values and target errnos. */
3732 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3733 abi_ulong target_addr
, socklen_t addrlen
)
3737 void *copy_msg
= NULL
;
3740 if ((int)addrlen
< 0) {
3741 return -TARGET_EINVAL
;
3744 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3746 return -TARGET_EFAULT
;
3747 if (fd_trans_target_to_host_data(fd
)) {
3748 copy_msg
= host_msg
;
3749 host_msg
= g_malloc(len
);
3750 memcpy(host_msg
, copy_msg
, len
);
3751 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3757 addr
= alloca(addrlen
+1);
3758 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3762 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3764 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3769 host_msg
= copy_msg
;
3771 unlock_user(host_msg
, msg
, 0);
3775 /* do_recvfrom() Must return target values and target errnos. */
3776 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3777 abi_ulong target_addr
,
3778 abi_ulong target_addrlen
)
3785 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3787 return -TARGET_EFAULT
;
3789 if (get_user_u32(addrlen
, target_addrlen
)) {
3790 ret
= -TARGET_EFAULT
;
3793 if ((int)addrlen
< 0) {
3794 ret
= -TARGET_EINVAL
;
3797 addr
= alloca(addrlen
);
3798 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3801 addr
= NULL
; /* To keep compiler quiet. */
3802 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3804 if (!is_error(ret
)) {
3805 if (fd_trans_host_to_target_data(fd
)) {
3806 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
3809 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3810 if (put_user_u32(addrlen
, target_addrlen
)) {
3811 ret
= -TARGET_EFAULT
;
3815 unlock_user(host_msg
, msg
, len
);
3818 unlock_user(host_msg
, msg
, 0);
3823 #ifdef TARGET_NR_socketcall
3824 /* do_socketcall() Must return target values and target errnos. */
3825 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3827 static const unsigned ac
[] = { /* number of arguments per call */
3828 [SOCKOP_socket
] = 3, /* domain, type, protocol */
3829 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
3830 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
3831 [SOCKOP_listen
] = 2, /* sockfd, backlog */
3832 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
3833 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
3834 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
3835 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
3836 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
3837 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
3838 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
3839 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3840 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3841 [SOCKOP_shutdown
] = 2, /* sockfd, how */
3842 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
3843 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
3844 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3845 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3846 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3847 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3849 abi_long a
[6]; /* max 6 args */
3851 /* first, collect the arguments in a[] according to ac[] */
3852 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
3854 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
3855 for (i
= 0; i
< ac
[num
]; ++i
) {
3856 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3857 return -TARGET_EFAULT
;
3862 /* now when we have the args, actually handle the call */
3864 case SOCKOP_socket
: /* domain, type, protocol */
3865 return do_socket(a
[0], a
[1], a
[2]);
3866 case SOCKOP_bind
: /* sockfd, addr, addrlen */
3867 return do_bind(a
[0], a
[1], a
[2]);
3868 case SOCKOP_connect
: /* sockfd, addr, addrlen */
3869 return do_connect(a
[0], a
[1], a
[2]);
3870 case SOCKOP_listen
: /* sockfd, backlog */
3871 return get_errno(listen(a
[0], a
[1]));
3872 case SOCKOP_accept
: /* sockfd, addr, addrlen */
3873 return do_accept4(a
[0], a
[1], a
[2], 0);
3874 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
3875 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3876 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
3877 return do_getsockname(a
[0], a
[1], a
[2]);
3878 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
3879 return do_getpeername(a
[0], a
[1], a
[2]);
3880 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
3881 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3882 case SOCKOP_send
: /* sockfd, msg, len, flags */
3883 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3884 case SOCKOP_recv
: /* sockfd, msg, len, flags */
3885 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3886 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
3887 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3888 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
3889 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3890 case SOCKOP_shutdown
: /* sockfd, how */
3891 return get_errno(shutdown(a
[0], a
[1]));
3892 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
3893 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3894 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
3895 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3896 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
3897 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3898 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
3899 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3900 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
3901 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3902 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
3903 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3905 gemu_log("Unsupported socketcall: %d\n", num
);
3906 return -TARGET_ENOSYS
;
3911 #define N_SHM_REGIONS 32
3913 static struct shm_region
{
3917 } shm_regions
[N_SHM_REGIONS
];
3919 #ifndef TARGET_SEMID64_DS
3920 /* asm-generic version of this struct */
3921 struct target_semid64_ds
3923 struct target_ipc_perm sem_perm
;
3924 abi_ulong sem_otime
;
3925 #if TARGET_ABI_BITS == 32
3926 abi_ulong __unused1
;
3928 abi_ulong sem_ctime
;
3929 #if TARGET_ABI_BITS == 32
3930 abi_ulong __unused2
;
3932 abi_ulong sem_nsems
;
3933 abi_ulong __unused3
;
3934 abi_ulong __unused4
;
3938 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3939 abi_ulong target_addr
)
3941 struct target_ipc_perm
*target_ip
;
3942 struct target_semid64_ds
*target_sd
;
3944 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3945 return -TARGET_EFAULT
;
3946 target_ip
= &(target_sd
->sem_perm
);
3947 host_ip
->__key
= tswap32(target_ip
->__key
);
3948 host_ip
->uid
= tswap32(target_ip
->uid
);
3949 host_ip
->gid
= tswap32(target_ip
->gid
);
3950 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3951 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3952 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3953 host_ip
->mode
= tswap32(target_ip
->mode
);
3955 host_ip
->mode
= tswap16(target_ip
->mode
);
3957 #if defined(TARGET_PPC)
3958 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3960 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3962 unlock_user_struct(target_sd
, target_addr
, 0);
3966 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3967 struct ipc_perm
*host_ip
)
3969 struct target_ipc_perm
*target_ip
;
3970 struct target_semid64_ds
*target_sd
;
3972 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3973 return -TARGET_EFAULT
;
3974 target_ip
= &(target_sd
->sem_perm
);
3975 target_ip
->__key
= tswap32(host_ip
->__key
);
3976 target_ip
->uid
= tswap32(host_ip
->uid
);
3977 target_ip
->gid
= tswap32(host_ip
->gid
);
3978 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3979 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3980 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3981 target_ip
->mode
= tswap32(host_ip
->mode
);
3983 target_ip
->mode
= tswap16(host_ip
->mode
);
3985 #if defined(TARGET_PPC)
3986 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3988 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3990 unlock_user_struct(target_sd
, target_addr
, 1);
3994 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3995 abi_ulong target_addr
)
3997 struct target_semid64_ds
*target_sd
;
3999 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4000 return -TARGET_EFAULT
;
4001 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4002 return -TARGET_EFAULT
;
4003 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4004 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4005 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4006 unlock_user_struct(target_sd
, target_addr
, 0);
4010 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4011 struct semid_ds
*host_sd
)
4013 struct target_semid64_ds
*target_sd
;
4015 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4016 return -TARGET_EFAULT
;
4017 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4018 return -TARGET_EFAULT
;
4019 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4020 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4021 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4022 unlock_user_struct(target_sd
, target_addr
, 1);
4026 struct target_seminfo
{
4039 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4040 struct seminfo
*host_seminfo
)
4042 struct target_seminfo
*target_seminfo
;
4043 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4044 return -TARGET_EFAULT
;
4045 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4046 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4047 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4048 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4049 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4050 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4051 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4052 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4053 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4054 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4055 unlock_user_struct(target_seminfo
, target_addr
, 1);
4061 struct semid_ds
*buf
;
4062 unsigned short *array
;
4063 struct seminfo
*__buf
;
4066 union target_semun
{
4073 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4074 abi_ulong target_addr
)
4077 unsigned short *array
;
4079 struct semid_ds semid_ds
;
4082 semun
.buf
= &semid_ds
;
4084 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4086 return get_errno(ret
);
4088 nsems
= semid_ds
.sem_nsems
;
4090 *host_array
= g_try_new(unsigned short, nsems
);
4092 return -TARGET_ENOMEM
;
4094 array
= lock_user(VERIFY_READ
, target_addr
,
4095 nsems
*sizeof(unsigned short), 1);
4097 g_free(*host_array
);
4098 return -TARGET_EFAULT
;
4101 for(i
=0; i
<nsems
; i
++) {
4102 __get_user((*host_array
)[i
], &array
[i
]);
4104 unlock_user(array
, target_addr
, 0);
4109 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4110 unsigned short **host_array
)
4113 unsigned short *array
;
4115 struct semid_ds semid_ds
;
4118 semun
.buf
= &semid_ds
;
4120 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4122 return get_errno(ret
);
4124 nsems
= semid_ds
.sem_nsems
;
4126 array
= lock_user(VERIFY_WRITE
, target_addr
,
4127 nsems
*sizeof(unsigned short), 0);
4129 return -TARGET_EFAULT
;
4131 for(i
=0; i
<nsems
; i
++) {
4132 __put_user((*host_array
)[i
], &array
[i
]);
4134 g_free(*host_array
);
4135 unlock_user(array
, target_addr
, 1);
4140 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4141 abi_ulong target_arg
)
4143 union target_semun target_su
= { .buf
= target_arg
};
4145 struct semid_ds dsarg
;
4146 unsigned short *array
= NULL
;
4147 struct seminfo seminfo
;
4148 abi_long ret
= -TARGET_EINVAL
;
4155 /* In 64 bit cross-endian situations, we will erroneously pick up
4156 * the wrong half of the union for the "val" element. To rectify
4157 * this, the entire 8-byte structure is byteswapped, followed by
4158 * a swap of the 4 byte val field. In other cases, the data is
4159 * already in proper host byte order. */
4160 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4161 target_su
.buf
= tswapal(target_su
.buf
);
4162 arg
.val
= tswap32(target_su
.val
);
4164 arg
.val
= target_su
.val
;
4166 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4170 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4174 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4175 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4182 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4186 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4187 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4193 arg
.__buf
= &seminfo
;
4194 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4195 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4203 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4210 struct target_sembuf
{
4211 unsigned short sem_num
;
4216 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4217 abi_ulong target_addr
,
4220 struct target_sembuf
*target_sembuf
;
4223 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4224 nsops
*sizeof(struct target_sembuf
), 1);
4226 return -TARGET_EFAULT
;
4228 for(i
=0; i
<nsops
; i
++) {
4229 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4230 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4231 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4234 unlock_user(target_sembuf
, target_addr
, 0);
4239 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4241 struct sembuf sops
[nsops
];
4243 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4244 return -TARGET_EFAULT
;
4246 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4249 struct target_msqid_ds
4251 struct target_ipc_perm msg_perm
;
4252 abi_ulong msg_stime
;
4253 #if TARGET_ABI_BITS == 32
4254 abi_ulong __unused1
;
4256 abi_ulong msg_rtime
;
4257 #if TARGET_ABI_BITS == 32
4258 abi_ulong __unused2
;
4260 abi_ulong msg_ctime
;
4261 #if TARGET_ABI_BITS == 32
4262 abi_ulong __unused3
;
4264 abi_ulong __msg_cbytes
;
4266 abi_ulong msg_qbytes
;
4267 abi_ulong msg_lspid
;
4268 abi_ulong msg_lrpid
;
4269 abi_ulong __unused4
;
4270 abi_ulong __unused5
;
4273 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4274 abi_ulong target_addr
)
4276 struct target_msqid_ds
*target_md
;
4278 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4279 return -TARGET_EFAULT
;
4280 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4281 return -TARGET_EFAULT
;
4282 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4283 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4284 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4285 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4286 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4287 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4288 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4289 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4290 unlock_user_struct(target_md
, target_addr
, 0);
4294 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4295 struct msqid_ds
*host_md
)
4297 struct target_msqid_ds
*target_md
;
4299 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4300 return -TARGET_EFAULT
;
4301 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4302 return -TARGET_EFAULT
;
4303 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4304 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4305 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4306 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4307 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4308 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4309 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4310 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4311 unlock_user_struct(target_md
, target_addr
, 1);
4315 struct target_msginfo
{
4323 unsigned short int msgseg
;
4326 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4327 struct msginfo
*host_msginfo
)
4329 struct target_msginfo
*target_msginfo
;
4330 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4331 return -TARGET_EFAULT
;
4332 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4333 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4334 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4335 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4336 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4337 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4338 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4339 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4340 unlock_user_struct(target_msginfo
, target_addr
, 1);
4344 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4346 struct msqid_ds dsarg
;
4347 struct msginfo msginfo
;
4348 abi_long ret
= -TARGET_EINVAL
;
4356 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4357 return -TARGET_EFAULT
;
4358 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4359 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4360 return -TARGET_EFAULT
;
4363 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4367 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4368 if (host_to_target_msginfo(ptr
, &msginfo
))
4369 return -TARGET_EFAULT
;
4376 struct target_msgbuf
{
4381 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4382 ssize_t msgsz
, int msgflg
)
4384 struct target_msgbuf
*target_mb
;
4385 struct msgbuf
*host_mb
;
4389 return -TARGET_EINVAL
;
4392 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4393 return -TARGET_EFAULT
;
4394 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4396 unlock_user_struct(target_mb
, msgp
, 0);
4397 return -TARGET_ENOMEM
;
4399 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4400 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4401 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4403 unlock_user_struct(target_mb
, msgp
, 0);
4408 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4409 ssize_t msgsz
, abi_long msgtyp
,
4412 struct target_msgbuf
*target_mb
;
4414 struct msgbuf
*host_mb
;
4418 return -TARGET_EINVAL
;
4421 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4422 return -TARGET_EFAULT
;
4424 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4426 ret
= -TARGET_ENOMEM
;
4429 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4432 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4433 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4434 if (!target_mtext
) {
4435 ret
= -TARGET_EFAULT
;
4438 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4439 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4442 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4446 unlock_user_struct(target_mb
, msgp
, 1);
4451 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4452 abi_ulong target_addr
)
4454 struct target_shmid_ds
*target_sd
;
4456 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4457 return -TARGET_EFAULT
;
4458 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4459 return -TARGET_EFAULT
;
4460 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4461 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4462 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4463 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4464 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4465 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4466 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4467 unlock_user_struct(target_sd
, target_addr
, 0);
4471 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4472 struct shmid_ds
*host_sd
)
4474 struct target_shmid_ds
*target_sd
;
4476 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4477 return -TARGET_EFAULT
;
4478 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4479 return -TARGET_EFAULT
;
4480 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4481 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4482 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4483 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4484 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4485 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4486 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4487 unlock_user_struct(target_sd
, target_addr
, 1);
4491 struct target_shminfo
{
4499 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4500 struct shminfo
*host_shminfo
)
4502 struct target_shminfo
*target_shminfo
;
4503 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4504 return -TARGET_EFAULT
;
4505 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4506 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4507 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4508 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4509 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4510 unlock_user_struct(target_shminfo
, target_addr
, 1);
4514 struct target_shm_info
{
4519 abi_ulong swap_attempts
;
4520 abi_ulong swap_successes
;
4523 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4524 struct shm_info
*host_shm_info
)
4526 struct target_shm_info
*target_shm_info
;
4527 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4528 return -TARGET_EFAULT
;
4529 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4530 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4531 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4532 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4533 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4534 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4535 unlock_user_struct(target_shm_info
, target_addr
, 1);
4539 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4541 struct shmid_ds dsarg
;
4542 struct shminfo shminfo
;
4543 struct shm_info shm_info
;
4544 abi_long ret
= -TARGET_EINVAL
;
4552 if (target_to_host_shmid_ds(&dsarg
, buf
))
4553 return -TARGET_EFAULT
;
4554 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4555 if (host_to_target_shmid_ds(buf
, &dsarg
))
4556 return -TARGET_EFAULT
;
4559 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4560 if (host_to_target_shminfo(buf
, &shminfo
))
4561 return -TARGET_EFAULT
;
4564 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4565 if (host_to_target_shm_info(buf
, &shm_info
))
4566 return -TARGET_EFAULT
;
4571 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4578 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
4582 struct shmid_ds shm_info
;
4585 /* find out the length of the shared memory segment */
4586 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4587 if (is_error(ret
)) {
4588 /* can't get length, bail out */
4595 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4597 abi_ulong mmap_start
;
4599 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4601 if (mmap_start
== -1) {
4603 host_raddr
= (void *)-1;
4605 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4608 if (host_raddr
== (void *)-1) {
4610 return get_errno((long)host_raddr
);
4612 raddr
=h2g((unsigned long)host_raddr
);
4614 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4615 PAGE_VALID
| PAGE_READ
|
4616 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4618 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4619 if (!shm_regions
[i
].in_use
) {
4620 shm_regions
[i
].in_use
= true;
4621 shm_regions
[i
].start
= raddr
;
4622 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4632 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4636 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4637 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4638 shm_regions
[i
].in_use
= false;
4639 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4644 return get_errno(shmdt(g2h(shmaddr
)));
4647 #ifdef TARGET_NR_ipc
4648 /* ??? This only works with linear mappings. */
4649 /* do_ipc() must return target values and target errnos. */
4650 static abi_long
do_ipc(unsigned int call
, abi_long first
,
4651 abi_long second
, abi_long third
,
4652 abi_long ptr
, abi_long fifth
)
4657 version
= call
>> 16;
4662 ret
= do_semop(first
, ptr
, second
);
4666 ret
= get_errno(semget(first
, second
, third
));
4669 case IPCOP_semctl
: {
4670 /* The semun argument to semctl is passed by value, so dereference the
4673 get_user_ual(atptr
, ptr
);
4674 ret
= do_semctl(first
, second
, third
, atptr
);
4679 ret
= get_errno(msgget(first
, second
));
4683 ret
= do_msgsnd(first
, ptr
, second
, third
);
4687 ret
= do_msgctl(first
, second
, ptr
);
4694 struct target_ipc_kludge
{
4699 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4700 ret
= -TARGET_EFAULT
;
4704 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4706 unlock_user_struct(tmp
, ptr
, 0);
4710 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4719 raddr
= do_shmat(first
, ptr
, second
);
4720 if (is_error(raddr
))
4721 return get_errno(raddr
);
4722 if (put_user_ual(raddr
, third
))
4723 return -TARGET_EFAULT
;
4727 ret
= -TARGET_EINVAL
;
4732 ret
= do_shmdt(ptr
);
4736 /* IPC_* flag values are the same on all linux platforms */
4737 ret
= get_errno(shmget(first
, second
, third
));
4740 /* IPC_* and SHM_* command values are the same on all linux platforms */
4742 ret
= do_shmctl(first
, second
, ptr
);
4745 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4746 ret
= -TARGET_ENOSYS
;
4753 /* kernel structure types definitions */
4755 #define STRUCT(name, ...) STRUCT_ ## name,
4756 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4758 #include "syscall_types.h"
4762 #undef STRUCT_SPECIAL
4764 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4765 #define STRUCT_SPECIAL(name)
4766 #include "syscall_types.h"
4768 #undef STRUCT_SPECIAL
4770 typedef struct IOCTLEntry IOCTLEntry
;
4772 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4773 int fd
, int cmd
, abi_long arg
);
4777 unsigned int host_cmd
;
4780 do_ioctl_fn
*do_ioctl
;
4781 const argtype arg_type
[5];
4784 #define IOC_R 0x0001
4785 #define IOC_W 0x0002
4786 #define IOC_RW (IOC_R | IOC_W)
4788 #define MAX_STRUCT_SIZE 4096
4790 #ifdef CONFIG_FIEMAP
4791 /* So fiemap access checks don't overflow on 32 bit systems.
4792 * This is very slightly smaller than the limit imposed by
4793 * the underlying kernel.
4795 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4796 / sizeof(struct fiemap_extent))
4798 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4799 int fd
, int cmd
, abi_long arg
)
4801 /* The parameter for this ioctl is a struct fiemap followed
4802 * by an array of struct fiemap_extent whose size is set
4803 * in fiemap->fm_extent_count. The array is filled in by the
4806 int target_size_in
, target_size_out
;
4808 const argtype
*arg_type
= ie
->arg_type
;
4809 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4812 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4816 assert(arg_type
[0] == TYPE_PTR
);
4817 assert(ie
->access
== IOC_RW
);
4819 target_size_in
= thunk_type_size(arg_type
, 0);
4820 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4822 return -TARGET_EFAULT
;
4824 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4825 unlock_user(argptr
, arg
, 0);
4826 fm
= (struct fiemap
*)buf_temp
;
4827 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4828 return -TARGET_EINVAL
;
4831 outbufsz
= sizeof (*fm
) +
4832 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4834 if (outbufsz
> MAX_STRUCT_SIZE
) {
4835 /* We can't fit all the extents into the fixed size buffer.
4836 * Allocate one that is large enough and use it instead.
4838 fm
= g_try_malloc(outbufsz
);
4840 return -TARGET_ENOMEM
;
4842 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4845 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4846 if (!is_error(ret
)) {
4847 target_size_out
= target_size_in
;
4848 /* An extent_count of 0 means we were only counting the extents
4849 * so there are no structs to copy
4851 if (fm
->fm_extent_count
!= 0) {
4852 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4854 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4856 ret
= -TARGET_EFAULT
;
4858 /* Convert the struct fiemap */
4859 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4860 if (fm
->fm_extent_count
!= 0) {
4861 p
= argptr
+ target_size_in
;
4862 /* ...and then all the struct fiemap_extents */
4863 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4864 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4869 unlock_user(argptr
, arg
, target_size_out
);
4879 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4880 int fd
, int cmd
, abi_long arg
)
4882 const argtype
*arg_type
= ie
->arg_type
;
4886 struct ifconf
*host_ifconf
;
4888 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4889 int target_ifreq_size
;
4894 abi_long target_ifc_buf
;
4898 assert(arg_type
[0] == TYPE_PTR
);
4899 assert(ie
->access
== IOC_RW
);
4902 target_size
= thunk_type_size(arg_type
, 0);
4904 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4906 return -TARGET_EFAULT
;
4907 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4908 unlock_user(argptr
, arg
, 0);
4910 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4911 target_ifc_len
= host_ifconf
->ifc_len
;
4912 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4914 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4915 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4916 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4918 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4919 if (outbufsz
> MAX_STRUCT_SIZE
) {
4920 /* We can't fit all the extents into the fixed size buffer.
4921 * Allocate one that is large enough and use it instead.
4923 host_ifconf
= malloc(outbufsz
);
4925 return -TARGET_ENOMEM
;
4927 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4930 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
4932 host_ifconf
->ifc_len
= host_ifc_len
;
4933 host_ifconf
->ifc_buf
= host_ifc_buf
;
4935 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4936 if (!is_error(ret
)) {
4937 /* convert host ifc_len to target ifc_len */
4939 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4940 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4941 host_ifconf
->ifc_len
= target_ifc_len
;
4943 /* restore target ifc_buf */
4945 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4947 /* copy struct ifconf to target user */
4949 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4951 return -TARGET_EFAULT
;
4952 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4953 unlock_user(argptr
, arg
, target_size
);
4955 /* copy ifreq[] to target user */
4957 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4958 for (i
= 0; i
< nb_ifreq
; i
++) {
4959 thunk_convert(argptr
+ i
* target_ifreq_size
,
4960 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4961 ifreq_arg_type
, THUNK_TARGET
);
4963 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4973 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4974 int cmd
, abi_long arg
)
4977 struct dm_ioctl
*host_dm
;
4978 abi_long guest_data
;
4979 uint32_t guest_data_size
;
4981 const argtype
*arg_type
= ie
->arg_type
;
4983 void *big_buf
= NULL
;
4987 target_size
= thunk_type_size(arg_type
, 0);
4988 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4990 ret
= -TARGET_EFAULT
;
4993 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4994 unlock_user(argptr
, arg
, 0);
4996 /* buf_temp is too small, so fetch things into a bigger buffer */
4997 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4998 memcpy(big_buf
, buf_temp
, target_size
);
5002 guest_data
= arg
+ host_dm
->data_start
;
5003 if ((guest_data
- arg
) < 0) {
5007 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5008 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5010 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5011 switch (ie
->host_cmd
) {
5013 case DM_LIST_DEVICES
:
5016 case DM_DEV_SUSPEND
:
5019 case DM_TABLE_STATUS
:
5020 case DM_TABLE_CLEAR
:
5022 case DM_LIST_VERSIONS
:
5026 case DM_DEV_SET_GEOMETRY
:
5027 /* data contains only strings */
5028 memcpy(host_data
, argptr
, guest_data_size
);
5031 memcpy(host_data
, argptr
, guest_data_size
);
5032 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5036 void *gspec
= argptr
;
5037 void *cur_data
= host_data
;
5038 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5039 int spec_size
= thunk_type_size(arg_type
, 0);
5042 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5043 struct dm_target_spec
*spec
= cur_data
;
5047 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5048 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5050 spec
->next
= sizeof(*spec
) + slen
;
5051 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5053 cur_data
+= spec
->next
;
5058 ret
= -TARGET_EINVAL
;
5059 unlock_user(argptr
, guest_data
, 0);
5062 unlock_user(argptr
, guest_data
, 0);
5064 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5065 if (!is_error(ret
)) {
5066 guest_data
= arg
+ host_dm
->data_start
;
5067 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5068 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5069 switch (ie
->host_cmd
) {
5074 case DM_DEV_SUSPEND
:
5077 case DM_TABLE_CLEAR
:
5079 case DM_DEV_SET_GEOMETRY
:
5080 /* no return data */
5082 case DM_LIST_DEVICES
:
5084 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5085 uint32_t remaining_data
= guest_data_size
;
5086 void *cur_data
= argptr
;
5087 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5088 int nl_size
= 12; /* can't use thunk_size due to alignment */
5091 uint32_t next
= nl
->next
;
5093 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5095 if (remaining_data
< nl
->next
) {
5096 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5099 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5100 strcpy(cur_data
+ nl_size
, nl
->name
);
5101 cur_data
+= nl
->next
;
5102 remaining_data
-= nl
->next
;
5106 nl
= (void*)nl
+ next
;
5111 case DM_TABLE_STATUS
:
5113 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5114 void *cur_data
= argptr
;
5115 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5116 int spec_size
= thunk_type_size(arg_type
, 0);
5119 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5120 uint32_t next
= spec
->next
;
5121 int slen
= strlen((char*)&spec
[1]) + 1;
5122 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5123 if (guest_data_size
< spec
->next
) {
5124 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5127 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5128 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5129 cur_data
= argptr
+ spec
->next
;
5130 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5136 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5137 int count
= *(uint32_t*)hdata
;
5138 uint64_t *hdev
= hdata
+ 8;
5139 uint64_t *gdev
= argptr
+ 8;
5142 *(uint32_t*)argptr
= tswap32(count
);
5143 for (i
= 0; i
< count
; i
++) {
5144 *gdev
= tswap64(*hdev
);
5150 case DM_LIST_VERSIONS
:
5152 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5153 uint32_t remaining_data
= guest_data_size
;
5154 void *cur_data
= argptr
;
5155 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5156 int vers_size
= thunk_type_size(arg_type
, 0);
5159 uint32_t next
= vers
->next
;
5161 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5163 if (remaining_data
< vers
->next
) {
5164 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5167 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5168 strcpy(cur_data
+ vers_size
, vers
->name
);
5169 cur_data
+= vers
->next
;
5170 remaining_data
-= vers
->next
;
5174 vers
= (void*)vers
+ next
;
5179 unlock_user(argptr
, guest_data
, 0);
5180 ret
= -TARGET_EINVAL
;
5183 unlock_user(argptr
, guest_data
, guest_data_size
);
5185 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5187 ret
= -TARGET_EFAULT
;
5190 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5191 unlock_user(argptr
, arg
, target_size
);
5198 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5199 int cmd
, abi_long arg
)
5203 const argtype
*arg_type
= ie
->arg_type
;
5204 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5207 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5208 struct blkpg_partition host_part
;
5210 /* Read and convert blkpg */
5212 target_size
= thunk_type_size(arg_type
, 0);
5213 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5215 ret
= -TARGET_EFAULT
;
5218 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5219 unlock_user(argptr
, arg
, 0);
5221 switch (host_blkpg
->op
) {
5222 case BLKPG_ADD_PARTITION
:
5223 case BLKPG_DEL_PARTITION
:
5224 /* payload is struct blkpg_partition */
5227 /* Unknown opcode */
5228 ret
= -TARGET_EINVAL
;
5232 /* Read and convert blkpg->data */
5233 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5234 target_size
= thunk_type_size(part_arg_type
, 0);
5235 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5237 ret
= -TARGET_EFAULT
;
5240 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5241 unlock_user(argptr
, arg
, 0);
5243 /* Swizzle the data pointer to our local copy and call! */
5244 host_blkpg
->data
= &host_part
;
5245 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5251 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5252 int fd
, int cmd
, abi_long arg
)
5254 const argtype
*arg_type
= ie
->arg_type
;
5255 const StructEntry
*se
;
5256 const argtype
*field_types
;
5257 const int *dst_offsets
, *src_offsets
;
5260 abi_ulong
*target_rt_dev_ptr
;
5261 unsigned long *host_rt_dev_ptr
;
5265 assert(ie
->access
== IOC_W
);
5266 assert(*arg_type
== TYPE_PTR
);
5268 assert(*arg_type
== TYPE_STRUCT
);
5269 target_size
= thunk_type_size(arg_type
, 0);
5270 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5272 return -TARGET_EFAULT
;
5275 assert(*arg_type
== (int)STRUCT_rtentry
);
5276 se
= struct_entries
+ *arg_type
++;
5277 assert(se
->convert
[0] == NULL
);
5278 /* convert struct here to be able to catch rt_dev string */
5279 field_types
= se
->field_types
;
5280 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5281 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5282 for (i
= 0; i
< se
->nb_fields
; i
++) {
5283 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5284 assert(*field_types
== TYPE_PTRVOID
);
5285 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5286 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5287 if (*target_rt_dev_ptr
!= 0) {
5288 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5289 tswapal(*target_rt_dev_ptr
));
5290 if (!*host_rt_dev_ptr
) {
5291 unlock_user(argptr
, arg
, 0);
5292 return -TARGET_EFAULT
;
5295 *host_rt_dev_ptr
= 0;
5300 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5301 argptr
+ src_offsets
[i
],
5302 field_types
, THUNK_HOST
);
5304 unlock_user(argptr
, arg
, 0);
5306 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5307 if (*host_rt_dev_ptr
!= 0) {
5308 unlock_user((void *)*host_rt_dev_ptr
,
5309 *target_rt_dev_ptr
, 0);
5314 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5315 int fd
, int cmd
, abi_long arg
)
5317 int sig
= target_to_host_signal(arg
);
5318 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5321 static IOCTLEntry ioctl_entries
[] = {
5322 #define IOCTL(cmd, access, ...) \
5323 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5324 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5325 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5330 /* ??? Implement proper locking for ioctls. */
5331 /* do_ioctl() Must return target values and target errnos. */
5332 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5334 const IOCTLEntry
*ie
;
5335 const argtype
*arg_type
;
5337 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5343 if (ie
->target_cmd
== 0) {
5344 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5345 return -TARGET_ENOSYS
;
5347 if (ie
->target_cmd
== cmd
)
5351 arg_type
= ie
->arg_type
;
5353 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5356 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5359 switch(arg_type
[0]) {
5362 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5366 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5370 target_size
= thunk_type_size(arg_type
, 0);
5371 switch(ie
->access
) {
5373 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5374 if (!is_error(ret
)) {
5375 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5377 return -TARGET_EFAULT
;
5378 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5379 unlock_user(argptr
, arg
, target_size
);
5383 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5385 return -TARGET_EFAULT
;
5386 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5387 unlock_user(argptr
, arg
, 0);
5388 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5392 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5394 return -TARGET_EFAULT
;
5395 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5396 unlock_user(argptr
, arg
, 0);
5397 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5398 if (!is_error(ret
)) {
5399 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5401 return -TARGET_EFAULT
;
5402 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5403 unlock_user(argptr
, arg
, target_size
);
5409 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5410 (long)cmd
, arg_type
[0]);
5411 ret
= -TARGET_ENOSYS
;
5417 static const bitmask_transtbl iflag_tbl
[] = {
5418 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5419 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5420 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5421 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5422 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5423 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5424 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5425 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5426 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5427 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5428 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5429 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5430 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5431 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5435 static const bitmask_transtbl oflag_tbl
[] = {
5436 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5437 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5438 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5439 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5440 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5441 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5442 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5443 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5444 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5445 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5446 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5447 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5448 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5449 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5450 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5451 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5452 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5453 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5454 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5455 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5456 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5457 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5458 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5459 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5463 static const bitmask_transtbl cflag_tbl
[] = {
5464 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5465 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5466 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5467 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5468 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5469 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5470 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5471 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5472 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5473 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5474 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5475 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5476 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5477 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5478 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5479 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5480 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5481 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5482 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5483 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5484 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5485 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5486 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5487 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5488 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5489 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5490 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5491 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5492 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5493 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5494 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5498 static const bitmask_transtbl lflag_tbl
[] = {
5499 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5500 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5501 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5502 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5503 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5504 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5505 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5506 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5507 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5508 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5509 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5510 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5511 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5512 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5513 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5517 static void target_to_host_termios (void *dst
, const void *src
)
5519 struct host_termios
*host
= dst
;
5520 const struct target_termios
*target
= src
;
5523 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5525 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5527 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5529 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5530 host
->c_line
= target
->c_line
;
5532 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5533 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5534 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5535 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5536 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5537 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5538 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5539 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5540 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5541 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5542 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5543 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5544 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5545 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5546 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5547 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5548 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5549 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5552 static void host_to_target_termios (void *dst
, const void *src
)
5554 struct target_termios
*target
= dst
;
5555 const struct host_termios
*host
= src
;
5558 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5560 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5562 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5564 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5565 target
->c_line
= host
->c_line
;
5567 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5568 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5569 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5570 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5571 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5572 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5573 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5574 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5575 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5576 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5577 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5578 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5579 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5580 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5581 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5582 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5583 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5584 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5587 static const StructEntry struct_termios_def
= {
5588 .convert
= { host_to_target_termios
, target_to_host_termios
},
5589 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5590 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5593 static bitmask_transtbl mmap_flags_tbl
[] = {
5594 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5595 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5596 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5597 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5598 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5599 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5600 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5601 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5602 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5607 #if defined(TARGET_I386)
5609 /* NOTE: there is really one LDT for all the threads */
5610 static uint8_t *ldt_table
;
5612 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5619 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5620 if (size
> bytecount
)
5622 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5624 return -TARGET_EFAULT
;
5625 /* ??? Should this by byteswapped? */
5626 memcpy(p
, ldt_table
, size
);
5627 unlock_user(p
, ptr
, size
);
5631 /* XXX: add locking support */
5632 static abi_long
write_ldt(CPUX86State
*env
,
5633 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5635 struct target_modify_ldt_ldt_s ldt_info
;
5636 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5637 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5638 int seg_not_present
, useable
, lm
;
5639 uint32_t *lp
, entry_1
, entry_2
;
5641 if (bytecount
!= sizeof(ldt_info
))
5642 return -TARGET_EINVAL
;
5643 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5644 return -TARGET_EFAULT
;
5645 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5646 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5647 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5648 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5649 unlock_user_struct(target_ldt_info
, ptr
, 0);
5651 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5652 return -TARGET_EINVAL
;
5653 seg_32bit
= ldt_info
.flags
& 1;
5654 contents
= (ldt_info
.flags
>> 1) & 3;
5655 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5656 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5657 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5658 useable
= (ldt_info
.flags
>> 6) & 1;
5662 lm
= (ldt_info
.flags
>> 7) & 1;
5664 if (contents
== 3) {
5666 return -TARGET_EINVAL
;
5667 if (seg_not_present
== 0)
5668 return -TARGET_EINVAL
;
5670 /* allocate the LDT */
5672 env
->ldt
.base
= target_mmap(0,
5673 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5674 PROT_READ
|PROT_WRITE
,
5675 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5676 if (env
->ldt
.base
== -1)
5677 return -TARGET_ENOMEM
;
5678 memset(g2h(env
->ldt
.base
), 0,
5679 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5680 env
->ldt
.limit
= 0xffff;
5681 ldt_table
= g2h(env
->ldt
.base
);
5684 /* NOTE: same code as Linux kernel */
5685 /* Allow LDTs to be cleared by the user. */
5686 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5689 read_exec_only
== 1 &&
5691 limit_in_pages
== 0 &&
5692 seg_not_present
== 1 &&
5700 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5701 (ldt_info
.limit
& 0x0ffff);
5702 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5703 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5704 (ldt_info
.limit
& 0xf0000) |
5705 ((read_exec_only
^ 1) << 9) |
5707 ((seg_not_present
^ 1) << 15) |
5709 (limit_in_pages
<< 23) |
5713 entry_2
|= (useable
<< 20);
5715 /* Install the new entry ... */
5717 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5718 lp
[0] = tswap32(entry_1
);
5719 lp
[1] = tswap32(entry_2
);
5723 /* specific and weird i386 syscalls */
5724 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5725 unsigned long bytecount
)
5731 ret
= read_ldt(ptr
, bytecount
);
5734 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5737 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5740 ret
= -TARGET_ENOSYS
;
5746 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5747 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5749 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5750 struct target_modify_ldt_ldt_s ldt_info
;
5751 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5752 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5753 int seg_not_present
, useable
, lm
;
5754 uint32_t *lp
, entry_1
, entry_2
;
5757 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5758 if (!target_ldt_info
)
5759 return -TARGET_EFAULT
;
5760 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5761 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5762 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5763 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5764 if (ldt_info
.entry_number
== -1) {
5765 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5766 if (gdt_table
[i
] == 0) {
5767 ldt_info
.entry_number
= i
;
5768 target_ldt_info
->entry_number
= tswap32(i
);
5773 unlock_user_struct(target_ldt_info
, ptr
, 1);
5775 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5776 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5777 return -TARGET_EINVAL
;
5778 seg_32bit
= ldt_info
.flags
& 1;
5779 contents
= (ldt_info
.flags
>> 1) & 3;
5780 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5781 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5782 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5783 useable
= (ldt_info
.flags
>> 6) & 1;
5787 lm
= (ldt_info
.flags
>> 7) & 1;
5790 if (contents
== 3) {
5791 if (seg_not_present
== 0)
5792 return -TARGET_EINVAL
;
5795 /* NOTE: same code as Linux kernel */
5796 /* Allow LDTs to be cleared by the user. */
5797 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5798 if ((contents
== 0 &&
5799 read_exec_only
== 1 &&
5801 limit_in_pages
== 0 &&
5802 seg_not_present
== 1 &&
5810 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5811 (ldt_info
.limit
& 0x0ffff);
5812 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5813 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5814 (ldt_info
.limit
& 0xf0000) |
5815 ((read_exec_only
^ 1) << 9) |
5817 ((seg_not_present
^ 1) << 15) |
5819 (limit_in_pages
<< 23) |
5824 /* Install the new entry ... */
5826 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5827 lp
[0] = tswap32(entry_1
);
5828 lp
[1] = tswap32(entry_2
);
5832 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5834 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5835 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5836 uint32_t base_addr
, limit
, flags
;
5837 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5838 int seg_not_present
, useable
, lm
;
5839 uint32_t *lp
, entry_1
, entry_2
;
5841 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5842 if (!target_ldt_info
)
5843 return -TARGET_EFAULT
;
5844 idx
= tswap32(target_ldt_info
->entry_number
);
5845 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5846 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5847 unlock_user_struct(target_ldt_info
, ptr
, 1);
5848 return -TARGET_EINVAL
;
5850 lp
= (uint32_t *)(gdt_table
+ idx
);
5851 entry_1
= tswap32(lp
[0]);
5852 entry_2
= tswap32(lp
[1]);
5854 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5855 contents
= (entry_2
>> 10) & 3;
5856 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5857 seg_32bit
= (entry_2
>> 22) & 1;
5858 limit_in_pages
= (entry_2
>> 23) & 1;
5859 useable
= (entry_2
>> 20) & 1;
5863 lm
= (entry_2
>> 21) & 1;
5865 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5866 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5867 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5868 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5869 base_addr
= (entry_1
>> 16) |
5870 (entry_2
& 0xff000000) |
5871 ((entry_2
& 0xff) << 16);
5872 target_ldt_info
->base_addr
= tswapal(base_addr
);
5873 target_ldt_info
->limit
= tswap32(limit
);
5874 target_ldt_info
->flags
= tswap32(flags
);
5875 unlock_user_struct(target_ldt_info
, ptr
, 1);
5878 #endif /* TARGET_I386 && TARGET_ABI32 */
5880 #ifndef TARGET_ABI32
5881 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5888 case TARGET_ARCH_SET_GS
:
5889 case TARGET_ARCH_SET_FS
:
5890 if (code
== TARGET_ARCH_SET_GS
)
5894 cpu_x86_load_seg(env
, idx
, 0);
5895 env
->segs
[idx
].base
= addr
;
5897 case TARGET_ARCH_GET_GS
:
5898 case TARGET_ARCH_GET_FS
:
5899 if (code
== TARGET_ARCH_GET_GS
)
5903 val
= env
->segs
[idx
].base
;
5904 if (put_user(val
, addr
, abi_ulong
))
5905 ret
= -TARGET_EFAULT
;
5908 ret
= -TARGET_EINVAL
;
5915 #endif /* defined(TARGET_I386) */
5917 #define NEW_STACK_SIZE 0x40000
5920 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5923 pthread_mutex_t mutex
;
5924 pthread_cond_t cond
;
5927 abi_ulong child_tidptr
;
5928 abi_ulong parent_tidptr
;
5932 static void *clone_func(void *arg
)
5934 new_thread_info
*info
= arg
;
5939 rcu_register_thread();
5941 cpu
= ENV_GET_CPU(env
);
5943 ts
= (TaskState
*)cpu
->opaque
;
5944 info
->tid
= gettid();
5945 cpu
->host_tid
= info
->tid
;
5947 if (info
->child_tidptr
)
5948 put_user_u32(info
->tid
, info
->child_tidptr
);
5949 if (info
->parent_tidptr
)
5950 put_user_u32(info
->tid
, info
->parent_tidptr
);
5951 /* Enable signals. */
5952 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5953 /* Signal to the parent that we're ready. */
5954 pthread_mutex_lock(&info
->mutex
);
5955 pthread_cond_broadcast(&info
->cond
);
5956 pthread_mutex_unlock(&info
->mutex
);
5957 /* Wait until the parent has finshed initializing the tls state. */
5958 pthread_mutex_lock(&clone_lock
);
5959 pthread_mutex_unlock(&clone_lock
);
5965 /* do_fork() Must return host values and target errnos (unlike most
5966 do_*() functions). */
5967 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5968 abi_ulong parent_tidptr
, target_ulong newtls
,
5969 abi_ulong child_tidptr
)
5971 CPUState
*cpu
= ENV_GET_CPU(env
);
5975 CPUArchState
*new_env
;
5976 unsigned int nptl_flags
;
5979 /* Emulate vfork() with fork() */
5980 if (flags
& CLONE_VFORK
)
5981 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5983 if (flags
& CLONE_VM
) {
5984 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5985 new_thread_info info
;
5986 pthread_attr_t attr
;
5988 ts
= g_new0(TaskState
, 1);
5989 init_task_state(ts
);
5990 /* we create a new CPU instance. */
5991 new_env
= cpu_copy(env
);
5992 /* Init regs that differ from the parent. */
5993 cpu_clone_regs(new_env
, newsp
);
5994 new_cpu
= ENV_GET_CPU(new_env
);
5995 new_cpu
->opaque
= ts
;
5996 ts
->bprm
= parent_ts
->bprm
;
5997 ts
->info
= parent_ts
->info
;
5998 ts
->signal_mask
= parent_ts
->signal_mask
;
6000 flags
&= ~CLONE_NPTL_FLAGS2
;
6002 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
6003 ts
->child_tidptr
= child_tidptr
;
6006 if (nptl_flags
& CLONE_SETTLS
)
6007 cpu_set_tls (new_env
, newtls
);
6009 /* Grab a mutex so that thread setup appears atomic. */
6010 pthread_mutex_lock(&clone_lock
);
6012 memset(&info
, 0, sizeof(info
));
6013 pthread_mutex_init(&info
.mutex
, NULL
);
6014 pthread_mutex_lock(&info
.mutex
);
6015 pthread_cond_init(&info
.cond
, NULL
);
6017 if (nptl_flags
& CLONE_CHILD_SETTID
)
6018 info
.child_tidptr
= child_tidptr
;
6019 if (nptl_flags
& CLONE_PARENT_SETTID
)
6020 info
.parent_tidptr
= parent_tidptr
;
6022 ret
= pthread_attr_init(&attr
);
6023 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6024 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6025 /* It is not safe to deliver signals until the child has finished
6026 initializing, so temporarily block all signals. */
6027 sigfillset(&sigmask
);
6028 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6030 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6031 /* TODO: Free new CPU state if thread creation failed. */
6033 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6034 pthread_attr_destroy(&attr
);
6036 /* Wait for the child to initialize. */
6037 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6039 if (flags
& CLONE_PARENT_SETTID
)
6040 put_user_u32(ret
, parent_tidptr
);
6044 pthread_mutex_unlock(&info
.mutex
);
6045 pthread_cond_destroy(&info
.cond
);
6046 pthread_mutex_destroy(&info
.mutex
);
6047 pthread_mutex_unlock(&clone_lock
);
6049 /* if no CLONE_VM, we consider it is a fork */
6050 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0) {
6051 return -TARGET_EINVAL
;
6054 if (block_signals()) {
6055 return -TARGET_ERESTARTSYS
;
6061 /* Child Process. */
6063 cpu_clone_regs(env
, newsp
);
6065 /* There is a race condition here. The parent process could
6066 theoretically read the TID in the child process before the child
6067 tid is set. This would require using either ptrace
6068 (not implemented) or having *_tidptr to point at a shared memory
6069 mapping. We can't repeat the spinlock hack used above because
6070 the child process gets its own copy of the lock. */
6071 if (flags
& CLONE_CHILD_SETTID
)
6072 put_user_u32(gettid(), child_tidptr
);
6073 if (flags
& CLONE_PARENT_SETTID
)
6074 put_user_u32(gettid(), parent_tidptr
);
6075 ts
= (TaskState
*)cpu
->opaque
;
6076 if (flags
& CLONE_SETTLS
)
6077 cpu_set_tls (env
, newtls
);
6078 if (flags
& CLONE_CHILD_CLEARTID
)
6079 ts
->child_tidptr
= child_tidptr
;
6087 /* warning : doesn't handle linux specific flags... */
6088 static int target_to_host_fcntl_cmd(int cmd
)
6091 case TARGET_F_DUPFD
:
6092 case TARGET_F_GETFD
:
6093 case TARGET_F_SETFD
:
6094 case TARGET_F_GETFL
:
6095 case TARGET_F_SETFL
:
6097 case TARGET_F_GETLK
:
6099 case TARGET_F_SETLK
:
6101 case TARGET_F_SETLKW
:
6103 case TARGET_F_GETOWN
:
6105 case TARGET_F_SETOWN
:
6107 case TARGET_F_GETSIG
:
6109 case TARGET_F_SETSIG
:
6111 #if TARGET_ABI_BITS == 32
6112 case TARGET_F_GETLK64
:
6114 case TARGET_F_SETLK64
:
6116 case TARGET_F_SETLKW64
:
6119 case TARGET_F_SETLEASE
:
6121 case TARGET_F_GETLEASE
:
6123 #ifdef F_DUPFD_CLOEXEC
6124 case TARGET_F_DUPFD_CLOEXEC
:
6125 return F_DUPFD_CLOEXEC
;
6127 case TARGET_F_NOTIFY
:
6130 case TARGET_F_GETOWN_EX
:
6134 case TARGET_F_SETOWN_EX
:
6138 case TARGET_F_SETPIPE_SZ
:
6139 return F_SETPIPE_SZ
;
6140 case TARGET_F_GETPIPE_SZ
:
6141 return F_GETPIPE_SZ
;
6144 return -TARGET_EINVAL
;
6146 return -TARGET_EINVAL
;
6149 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6150 static const bitmask_transtbl flock_tbl
[] = {
6151 TRANSTBL_CONVERT(F_RDLCK
),
6152 TRANSTBL_CONVERT(F_WRLCK
),
6153 TRANSTBL_CONVERT(F_UNLCK
),
6154 TRANSTBL_CONVERT(F_EXLCK
),
6155 TRANSTBL_CONVERT(F_SHLCK
),
6159 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6160 abi_ulong target_flock_addr
)
6162 struct target_flock
*target_fl
;
6165 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6166 return -TARGET_EFAULT
;
6169 __get_user(l_type
, &target_fl
->l_type
);
6170 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6171 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6172 __get_user(fl
->l_start
, &target_fl
->l_start
);
6173 __get_user(fl
->l_len
, &target_fl
->l_len
);
6174 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6175 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6179 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6180 const struct flock64
*fl
)
6182 struct target_flock
*target_fl
;
6185 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6186 return -TARGET_EFAULT
;
6189 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6190 __put_user(l_type
, &target_fl
->l_type
);
6191 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6192 __put_user(fl
->l_start
, &target_fl
->l_start
);
6193 __put_user(fl
->l_len
, &target_fl
->l_len
);
6194 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6195 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6199 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6200 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6202 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6203 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6204 abi_ulong target_flock_addr
)
6206 struct target_eabi_flock64
*target_fl
;
6209 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6210 return -TARGET_EFAULT
;
6213 __get_user(l_type
, &target_fl
->l_type
);
6214 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6215 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6216 __get_user(fl
->l_start
, &target_fl
->l_start
);
6217 __get_user(fl
->l_len
, &target_fl
->l_len
);
6218 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6219 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6223 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6224 const struct flock64
*fl
)
6226 struct target_eabi_flock64
*target_fl
;
6229 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6230 return -TARGET_EFAULT
;
6233 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6234 __put_user(l_type
, &target_fl
->l_type
);
6235 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6236 __put_user(fl
->l_start
, &target_fl
->l_start
);
6237 __put_user(fl
->l_len
, &target_fl
->l_len
);
6238 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6239 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6244 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6245 abi_ulong target_flock_addr
)
6247 struct target_flock64
*target_fl
;
6250 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6251 return -TARGET_EFAULT
;
6254 __get_user(l_type
, &target_fl
->l_type
);
6255 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6256 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6257 __get_user(fl
->l_start
, &target_fl
->l_start
);
6258 __get_user(fl
->l_len
, &target_fl
->l_len
);
6259 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6260 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6264 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6265 const struct flock64
*fl
)
6267 struct target_flock64
*target_fl
;
6270 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6271 return -TARGET_EFAULT
;
6274 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6275 __put_user(l_type
, &target_fl
->l_type
);
6276 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6277 __put_user(fl
->l_start
, &target_fl
->l_start
);
6278 __put_user(fl
->l_len
, &target_fl
->l_len
);
6279 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6280 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6284 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6286 struct flock64 fl64
;
6288 struct f_owner_ex fox
;
6289 struct target_f_owner_ex
*target_fox
;
6292 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6294 if (host_cmd
== -TARGET_EINVAL
)
6298 case TARGET_F_GETLK
:
6299 ret
= copy_from_user_flock(&fl64
, arg
);
6303 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6305 ret
= copy_to_user_flock(arg
, &fl64
);
6309 case TARGET_F_SETLK
:
6310 case TARGET_F_SETLKW
:
6311 ret
= copy_from_user_flock(&fl64
, arg
);
6315 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6318 case TARGET_F_GETLK64
:
6319 ret
= copy_from_user_flock64(&fl64
, arg
);
6323 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6325 ret
= copy_to_user_flock64(arg
, &fl64
);
6328 case TARGET_F_SETLK64
:
6329 case TARGET_F_SETLKW64
:
6330 ret
= copy_from_user_flock64(&fl64
, arg
);
6334 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6337 case TARGET_F_GETFL
:
6338 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6340 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6344 case TARGET_F_SETFL
:
6345 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6346 target_to_host_bitmask(arg
,
6351 case TARGET_F_GETOWN_EX
:
6352 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6354 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6355 return -TARGET_EFAULT
;
6356 target_fox
->type
= tswap32(fox
.type
);
6357 target_fox
->pid
= tswap32(fox
.pid
);
6358 unlock_user_struct(target_fox
, arg
, 1);
6364 case TARGET_F_SETOWN_EX
:
6365 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6366 return -TARGET_EFAULT
;
6367 fox
.type
= tswap32(target_fox
->type
);
6368 fox
.pid
= tswap32(target_fox
->pid
);
6369 unlock_user_struct(target_fox
, arg
, 0);
6370 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6374 case TARGET_F_SETOWN
:
6375 case TARGET_F_GETOWN
:
6376 case TARGET_F_SETSIG
:
6377 case TARGET_F_GETSIG
:
6378 case TARGET_F_SETLEASE
:
6379 case TARGET_F_GETLEASE
:
6380 case TARGET_F_SETPIPE_SZ
:
6381 case TARGET_F_GETPIPE_SZ
:
6382 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6386 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6394 static inline int high2lowuid(int uid
)
6402 static inline int high2lowgid(int gid
)
6410 static inline int low2highuid(int uid
)
6412 if ((int16_t)uid
== -1)
6418 static inline int low2highgid(int gid
)
6420 if ((int16_t)gid
== -1)
6425 static inline int tswapid(int id
)
6430 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6432 #else /* !USE_UID16 */
6433 static inline int high2lowuid(int uid
)
6437 static inline int high2lowgid(int gid
)
6441 static inline int low2highuid(int uid
)
6445 static inline int low2highgid(int gid
)
6449 static inline int tswapid(int id
)
6454 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6456 #endif /* USE_UID16 */
6458 /* We must do direct syscalls for setting UID/GID, because we want to
6459 * implement the Linux system call semantics of "change only for this thread",
6460 * not the libc/POSIX semantics of "change for all threads in process".
6461 * (See http://ewontfix.com/17/ for more details.)
6462 * We use the 32-bit version of the syscalls if present; if it is not
6463 * then either the host architecture supports 32-bit UIDs natively with
6464 * the standard syscall, or the 16-bit UID is the best we can do.
6466 #ifdef __NR_setuid32
6467 #define __NR_sys_setuid __NR_setuid32
6469 #define __NR_sys_setuid __NR_setuid
6471 #ifdef __NR_setgid32
6472 #define __NR_sys_setgid __NR_setgid32
6474 #define __NR_sys_setgid __NR_setgid
6476 #ifdef __NR_setresuid32
6477 #define __NR_sys_setresuid __NR_setresuid32
6479 #define __NR_sys_setresuid __NR_setresuid
6481 #ifdef __NR_setresgid32
6482 #define __NR_sys_setresgid __NR_setresgid32
6484 #define __NR_sys_setresgid __NR_setresgid
6487 _syscall1(int, sys_setuid
, uid_t
, uid
)
6488 _syscall1(int, sys_setgid
, gid_t
, gid
)
6489 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6490 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6492 void syscall_init(void)
6495 const argtype
*arg_type
;
6499 thunk_init(STRUCT_MAX
);
6501 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6502 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6503 #include "syscall_types.h"
6505 #undef STRUCT_SPECIAL
6507 /* Build target_to_host_errno_table[] table from
6508 * host_to_target_errno_table[]. */
6509 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6510 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6513 /* we patch the ioctl size if necessary. We rely on the fact that
6514 no ioctl has all the bits at '1' in the size field */
6516 while (ie
->target_cmd
!= 0) {
6517 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6518 TARGET_IOC_SIZEMASK
) {
6519 arg_type
= ie
->arg_type
;
6520 if (arg_type
[0] != TYPE_PTR
) {
6521 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6526 size
= thunk_type_size(arg_type
, 0);
6527 ie
->target_cmd
= (ie
->target_cmd
&
6528 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6529 (size
<< TARGET_IOC_SIZESHIFT
);
6532 /* automatic consistency check if same arch */
6533 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6534 (defined(__x86_64__) && defined(TARGET_X86_64))
6535 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6536 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6537 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6544 #if TARGET_ABI_BITS == 32
6545 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6547 #ifdef TARGET_WORDS_BIGENDIAN
6548 return ((uint64_t)word0
<< 32) | word1
;
6550 return ((uint64_t)word1
<< 32) | word0
;
6553 #else /* TARGET_ABI_BITS == 32 */
6554 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6558 #endif /* TARGET_ABI_BITS != 32 */
6560 #ifdef TARGET_NR_truncate64
6561 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6566 if (regpairs_aligned(cpu_env
)) {
6570 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6574 #ifdef TARGET_NR_ftruncate64
6575 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6580 if (regpairs_aligned(cpu_env
)) {
6584 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6588 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6589 abi_ulong target_addr
)
6591 struct target_timespec
*target_ts
;
6593 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6594 return -TARGET_EFAULT
;
6595 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6596 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6597 unlock_user_struct(target_ts
, target_addr
, 0);
6601 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6602 struct timespec
*host_ts
)
6604 struct target_timespec
*target_ts
;
6606 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6607 return -TARGET_EFAULT
;
6608 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6609 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6610 unlock_user_struct(target_ts
, target_addr
, 1);
6614 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6615 abi_ulong target_addr
)
6617 struct target_itimerspec
*target_itspec
;
6619 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6620 return -TARGET_EFAULT
;
6623 host_itspec
->it_interval
.tv_sec
=
6624 tswapal(target_itspec
->it_interval
.tv_sec
);
6625 host_itspec
->it_interval
.tv_nsec
=
6626 tswapal(target_itspec
->it_interval
.tv_nsec
);
6627 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6628 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6630 unlock_user_struct(target_itspec
, target_addr
, 1);
6634 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6635 struct itimerspec
*host_its
)
6637 struct target_itimerspec
*target_itspec
;
6639 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6640 return -TARGET_EFAULT
;
6643 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6644 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6646 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6647 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6649 unlock_user_struct(target_itspec
, target_addr
, 0);
6653 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6654 abi_ulong target_addr
)
6656 struct target_sigevent
*target_sevp
;
6658 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6659 return -TARGET_EFAULT
;
6662 /* This union is awkward on 64 bit systems because it has a 32 bit
6663 * integer and a pointer in it; we follow the conversion approach
6664 * used for handling sigval types in signal.c so the guest should get
6665 * the correct value back even if we did a 64 bit byteswap and it's
6666 * using the 32 bit integer.
6668 host_sevp
->sigev_value
.sival_ptr
=
6669 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6670 host_sevp
->sigev_signo
=
6671 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6672 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6673 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6675 unlock_user_struct(target_sevp
, target_addr
, 1);
6679 #if defined(TARGET_NR_mlockall)
6680 static inline int target_to_host_mlockall_arg(int arg
)
6684 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6685 result
|= MCL_CURRENT
;
6687 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6688 result
|= MCL_FUTURE
;
6694 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6695 abi_ulong target_addr
,
6696 struct stat
*host_st
)
6698 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6699 if (((CPUARMState
*)cpu_env
)->eabi
) {
6700 struct target_eabi_stat64
*target_st
;
6702 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6703 return -TARGET_EFAULT
;
6704 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6705 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6706 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6707 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6708 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6710 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6711 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6712 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6713 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6714 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6715 __put_user(host_st
->st_size
, &target_st
->st_size
);
6716 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6717 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6718 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6719 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6720 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6721 unlock_user_struct(target_st
, target_addr
, 1);
6725 #if defined(TARGET_HAS_STRUCT_STAT64)
6726 struct target_stat64
*target_st
;
6728 struct target_stat
*target_st
;
6731 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6732 return -TARGET_EFAULT
;
6733 memset(target_st
, 0, sizeof(*target_st
));
6734 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6735 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6736 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6737 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6739 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6740 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6741 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6742 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6743 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6744 /* XXX: better use of kernel struct */
6745 __put_user(host_st
->st_size
, &target_st
->st_size
);
6746 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6747 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6748 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6749 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6750 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6751 unlock_user_struct(target_st
, target_addr
, 1);
6757 /* ??? Using host futex calls even when target atomic operations
6758 are not really atomic probably breaks things. However implementing
6759 futexes locally would make futexes shared between multiple processes
6760 tricky. However they're probably useless because guest atomic
6761 operations won't work either. */
6762 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6763 target_ulong uaddr2
, int val3
)
6765 struct timespec ts
, *pts
;
6768 /* ??? We assume FUTEX_* constants are the same on both host
6770 #ifdef FUTEX_CMD_MASK
6771 base_op
= op
& FUTEX_CMD_MASK
;
6777 case FUTEX_WAIT_BITSET
:
6780 target_to_host_timespec(pts
, timeout
);
6784 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6787 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6789 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6791 case FUTEX_CMP_REQUEUE
:
6793 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6794 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6795 But the prototype takes a `struct timespec *'; insert casts
6796 to satisfy the compiler. We do not need to tswap TIMEOUT
6797 since it's not compared to guest memory. */
6798 pts
= (struct timespec
*)(uintptr_t) timeout
;
6799 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6801 (base_op
== FUTEX_CMP_REQUEUE
6805 return -TARGET_ENOSYS
;
6808 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6809 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6810 abi_long handle
, abi_long mount_id
,
6813 struct file_handle
*target_fh
;
6814 struct file_handle
*fh
;
6818 unsigned int size
, total_size
;
6820 if (get_user_s32(size
, handle
)) {
6821 return -TARGET_EFAULT
;
6824 name
= lock_user_string(pathname
);
6826 return -TARGET_EFAULT
;
6829 total_size
= sizeof(struct file_handle
) + size
;
6830 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6832 unlock_user(name
, pathname
, 0);
6833 return -TARGET_EFAULT
;
6836 fh
= g_malloc0(total_size
);
6837 fh
->handle_bytes
= size
;
6839 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6840 unlock_user(name
, pathname
, 0);
6842 /* man name_to_handle_at(2):
6843 * Other than the use of the handle_bytes field, the caller should treat
6844 * the file_handle structure as an opaque data type
6847 memcpy(target_fh
, fh
, total_size
);
6848 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6849 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6851 unlock_user(target_fh
, handle
, total_size
);
6853 if (put_user_s32(mid
, mount_id
)) {
6854 return -TARGET_EFAULT
;
6862 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6863 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6866 struct file_handle
*target_fh
;
6867 struct file_handle
*fh
;
6868 unsigned int size
, total_size
;
6871 if (get_user_s32(size
, handle
)) {
6872 return -TARGET_EFAULT
;
6875 total_size
= sizeof(struct file_handle
) + size
;
6876 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6878 return -TARGET_EFAULT
;
6881 fh
= g_memdup(target_fh
, total_size
);
6882 fh
->handle_bytes
= size
;
6883 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6885 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6886 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6890 unlock_user(target_fh
, handle
, total_size
);
6896 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6898 /* signalfd siginfo conversion */
6901 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
6902 const struct signalfd_siginfo
*info
)
6904 int sig
= host_to_target_signal(info
->ssi_signo
);
6906 /* linux/signalfd.h defines a ssi_addr_lsb
6907 * not defined in sys/signalfd.h but used by some kernels
6910 #ifdef BUS_MCEERR_AO
6911 if (tinfo
->ssi_signo
== SIGBUS
&&
6912 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
6913 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
6914 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
6915 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
6916 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
6920 tinfo
->ssi_signo
= tswap32(sig
);
6921 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
6922 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
6923 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
6924 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
6925 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
6926 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
6927 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
6928 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
6929 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
6930 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
6931 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
6932 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
6933 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
6934 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
6935 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
6938 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
6942 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
6943 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
6949 static TargetFdTrans target_signalfd_trans
= {
6950 .host_to_target_data
= host_to_target_data_signalfd
,
6953 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6956 target_sigset_t
*target_mask
;
6960 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6961 return -TARGET_EINVAL
;
6963 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6964 return -TARGET_EFAULT
;
6967 target_to_host_sigset(&host_mask
, target_mask
);
6969 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6971 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6973 fd_trans_register(ret
, &target_signalfd_trans
);
6976 unlock_user_struct(target_mask
, mask
, 0);
6982 /* Map host to target signal numbers for the wait family of syscalls.
6983 Assume all other status bits are the same. */
6984 int host_to_target_waitstatus(int status
)
6986 if (WIFSIGNALED(status
)) {
6987 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6989 if (WIFSTOPPED(status
)) {
6990 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6996 static int open_self_cmdline(void *cpu_env
, int fd
)
6999 bool word_skipped
= false;
7001 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
7011 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
7014 fd_orig
= close(fd_orig
);
7017 } else if (nb_read
== 0) {
7021 if (!word_skipped
) {
7022 /* Skip the first string, which is the path to qemu-*-static
7023 instead of the actual command. */
7024 cp_buf
= memchr(buf
, 0, nb_read
);
7026 /* Null byte found, skip one string */
7028 nb_read
-= cp_buf
- buf
;
7029 word_skipped
= true;
7034 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
7043 return close(fd_orig
);
7046 static int open_self_maps(void *cpu_env
, int fd
)
7048 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7049 TaskState
*ts
= cpu
->opaque
;
7055 fp
= fopen("/proc/self/maps", "r");
7060 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7061 int fields
, dev_maj
, dev_min
, inode
;
7062 uint64_t min
, max
, offset
;
7063 char flag_r
, flag_w
, flag_x
, flag_p
;
7064 char path
[512] = "";
7065 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7066 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7067 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7069 if ((fields
< 10) || (fields
> 11)) {
7072 if (h2g_valid(min
)) {
7073 int flags
= page_get_flags(h2g(min
));
7074 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
7075 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7078 if (h2g(min
) == ts
->info
->stack_limit
) {
7079 pstrcpy(path
, sizeof(path
), " [stack]");
7081 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7082 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7083 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7084 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7085 path
[0] ? " " : "", path
);
7095 static int open_self_stat(void *cpu_env
, int fd
)
7097 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7098 TaskState
*ts
= cpu
->opaque
;
7099 abi_ulong start_stack
= ts
->info
->start_stack
;
7102 for (i
= 0; i
< 44; i
++) {
7110 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7111 } else if (i
== 1) {
7113 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7114 } else if (i
== 27) {
7117 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7119 /* for the rest, there is MasterCard */
7120 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7124 if (write(fd
, buf
, len
) != len
) {
7132 static int open_self_auxv(void *cpu_env
, int fd
)
7134 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7135 TaskState
*ts
= cpu
->opaque
;
7136 abi_ulong auxv
= ts
->info
->saved_auxv
;
7137 abi_ulong len
= ts
->info
->auxv_len
;
7141 * Auxiliary vector is stored in target process stack.
7142 * read in whole auxv vector and copy it to file
7144 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7148 r
= write(fd
, ptr
, len
);
7155 lseek(fd
, 0, SEEK_SET
);
7156 unlock_user(ptr
, auxv
, len
);
7162 static int is_proc_myself(const char *filename
, const char *entry
)
7164 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7165 filename
+= strlen("/proc/");
7166 if (!strncmp(filename
, "self/", strlen("self/"))) {
7167 filename
+= strlen("self/");
7168 } else if (*filename
>= '1' && *filename
<= '9') {
7170 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7171 if (!strncmp(filename
, myself
, strlen(myself
))) {
7172 filename
+= strlen(myself
);
7179 if (!strcmp(filename
, entry
)) {
7186 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7187 static int is_proc(const char *filename
, const char *entry
)
7189 return strcmp(filename
, entry
) == 0;
7192 static int open_net_route(void *cpu_env
, int fd
)
7199 fp
= fopen("/proc/net/route", "r");
7206 read
= getline(&line
, &len
, fp
);
7207 dprintf(fd
, "%s", line
);
7211 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7213 uint32_t dest
, gw
, mask
;
7214 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7215 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7216 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7217 &mask
, &mtu
, &window
, &irtt
);
7218 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7219 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7220 metric
, tswap32(mask
), mtu
, window
, irtt
);
7230 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7233 const char *filename
;
7234 int (*fill
)(void *cpu_env
, int fd
);
7235 int (*cmp
)(const char *s1
, const char *s2
);
7237 const struct fake_open
*fake_open
;
7238 static const struct fake_open fakes
[] = {
7239 { "maps", open_self_maps
, is_proc_myself
},
7240 { "stat", open_self_stat
, is_proc_myself
},
7241 { "auxv", open_self_auxv
, is_proc_myself
},
7242 { "cmdline", open_self_cmdline
, is_proc_myself
},
7243 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7244 { "/proc/net/route", open_net_route
, is_proc
},
7246 { NULL
, NULL
, NULL
}
7249 if (is_proc_myself(pathname
, "exe")) {
7250 int execfd
= qemu_getauxval(AT_EXECFD
);
7251 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7254 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7255 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7260 if (fake_open
->filename
) {
7262 char filename
[PATH_MAX
];
7265 /* create temporary file to map stat to */
7266 tmpdir
= getenv("TMPDIR");
7269 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7270 fd
= mkstemp(filename
);
7276 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7282 lseek(fd
, 0, SEEK_SET
);
7287 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7290 #define TIMER_MAGIC 0x0caf0000
7291 #define TIMER_MAGIC_MASK 0xffff0000
7293 /* Convert QEMU provided timer ID back to internal 16bit index format */
7294 static target_timer_t
get_timer_id(abi_long arg
)
7296 target_timer_t timerid
= arg
;
7298 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7299 return -TARGET_EINVAL
;
7304 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7305 return -TARGET_EINVAL
;
7311 /* do_syscall() should always have a single exit point at the end so
7312 that actions, such as logging of syscall results, can be performed.
7313 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7314 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7315 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7316 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7319 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7325 #if defined(DEBUG_ERESTARTSYS)
7326 /* Debug-only code for exercising the syscall-restart code paths
7327 * in the per-architecture cpu main loops: restart every syscall
7328 * the guest makes once before letting it through.
7335 return -TARGET_ERESTARTSYS
;
7341 gemu_log("syscall %d", num
);
7343 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7345 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7348 case TARGET_NR_exit
:
7349 /* In old applications this may be used to implement _exit(2).
7350 However in threaded applictions it is used for thread termination,
7351 and _exit_group is used for application termination.
7352 Do thread termination if we have more then one thread. */
7354 if (block_signals()) {
7355 ret
= -TARGET_ERESTARTSYS
;
7359 if (CPU_NEXT(first_cpu
)) {
7363 /* Remove the CPU from the list. */
7364 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7367 if (ts
->child_tidptr
) {
7368 put_user_u32(0, ts
->child_tidptr
);
7369 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7373 object_unref(OBJECT(cpu
));
7375 rcu_unregister_thread();
7381 gdb_exit(cpu_env
, arg1
);
7383 ret
= 0; /* avoid warning */
7385 case TARGET_NR_read
:
7389 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7391 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7393 fd_trans_host_to_target_data(arg1
)) {
7394 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7396 unlock_user(p
, arg2
, ret
);
7399 case TARGET_NR_write
:
7400 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7402 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7403 unlock_user(p
, arg2
, 0);
7405 #ifdef TARGET_NR_open
7406 case TARGET_NR_open
:
7407 if (!(p
= lock_user_string(arg1
)))
7409 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7410 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7412 fd_trans_unregister(ret
);
7413 unlock_user(p
, arg1
, 0);
7416 case TARGET_NR_openat
:
7417 if (!(p
= lock_user_string(arg2
)))
7419 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7420 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7422 fd_trans_unregister(ret
);
7423 unlock_user(p
, arg2
, 0);
7425 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7426 case TARGET_NR_name_to_handle_at
:
7427 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7430 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7431 case TARGET_NR_open_by_handle_at
:
7432 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7433 fd_trans_unregister(ret
);
7436 case TARGET_NR_close
:
7437 fd_trans_unregister(arg1
);
7438 ret
= get_errno(close(arg1
));
7443 #ifdef TARGET_NR_fork
7444 case TARGET_NR_fork
:
7445 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
7448 #ifdef TARGET_NR_waitpid
7449 case TARGET_NR_waitpid
:
7452 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7453 if (!is_error(ret
) && arg2
&& ret
7454 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7459 #ifdef TARGET_NR_waitid
7460 case TARGET_NR_waitid
:
7464 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7465 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7466 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7468 host_to_target_siginfo(p
, &info
);
7469 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7474 #ifdef TARGET_NR_creat /* not on alpha */
7475 case TARGET_NR_creat
:
7476 if (!(p
= lock_user_string(arg1
)))
7478 ret
= get_errno(creat(p
, arg2
));
7479 fd_trans_unregister(ret
);
7480 unlock_user(p
, arg1
, 0);
7483 #ifdef TARGET_NR_link
7484 case TARGET_NR_link
:
7487 p
= lock_user_string(arg1
);
7488 p2
= lock_user_string(arg2
);
7490 ret
= -TARGET_EFAULT
;
7492 ret
= get_errno(link(p
, p2
));
7493 unlock_user(p2
, arg2
, 0);
7494 unlock_user(p
, arg1
, 0);
7498 #if defined(TARGET_NR_linkat)
7499 case TARGET_NR_linkat
:
7504 p
= lock_user_string(arg2
);
7505 p2
= lock_user_string(arg4
);
7507 ret
= -TARGET_EFAULT
;
7509 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7510 unlock_user(p
, arg2
, 0);
7511 unlock_user(p2
, arg4
, 0);
7515 #ifdef TARGET_NR_unlink
7516 case TARGET_NR_unlink
:
7517 if (!(p
= lock_user_string(arg1
)))
7519 ret
= get_errno(unlink(p
));
7520 unlock_user(p
, arg1
, 0);
7523 #if defined(TARGET_NR_unlinkat)
7524 case TARGET_NR_unlinkat
:
7525 if (!(p
= lock_user_string(arg2
)))
7527 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7528 unlock_user(p
, arg2
, 0);
7531 case TARGET_NR_execve
:
7533 char **argp
, **envp
;
7536 abi_ulong guest_argp
;
7537 abi_ulong guest_envp
;
7544 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7545 if (get_user_ual(addr
, gp
))
7553 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7554 if (get_user_ual(addr
, gp
))
7561 argp
= alloca((argc
+ 1) * sizeof(void *));
7562 envp
= alloca((envc
+ 1) * sizeof(void *));
7564 for (gp
= guest_argp
, q
= argp
; gp
;
7565 gp
+= sizeof(abi_ulong
), q
++) {
7566 if (get_user_ual(addr
, gp
))
7570 if (!(*q
= lock_user_string(addr
)))
7572 total_size
+= strlen(*q
) + 1;
7576 for (gp
= guest_envp
, q
= envp
; gp
;
7577 gp
+= sizeof(abi_ulong
), q
++) {
7578 if (get_user_ual(addr
, gp
))
7582 if (!(*q
= lock_user_string(addr
)))
7584 total_size
+= strlen(*q
) + 1;
7588 if (!(p
= lock_user_string(arg1
)))
7590 /* Although execve() is not an interruptible syscall it is
7591 * a special case where we must use the safe_syscall wrapper:
7592 * if we allow a signal to happen before we make the host
7593 * syscall then we will 'lose' it, because at the point of
7594 * execve the process leaves QEMU's control. So we use the
7595 * safe syscall wrapper to ensure that we either take the
7596 * signal as a guest signal, or else it does not happen
7597 * before the execve completes and makes it the other
7598 * program's problem.
7600 ret
= get_errno(safe_execve(p
, argp
, envp
));
7601 unlock_user(p
, arg1
, 0);
7606 ret
= -TARGET_EFAULT
;
7609 for (gp
= guest_argp
, q
= argp
; *q
;
7610 gp
+= sizeof(abi_ulong
), q
++) {
7611 if (get_user_ual(addr
, gp
)
7614 unlock_user(*q
, addr
, 0);
7616 for (gp
= guest_envp
, q
= envp
; *q
;
7617 gp
+= sizeof(abi_ulong
), q
++) {
7618 if (get_user_ual(addr
, gp
)
7621 unlock_user(*q
, addr
, 0);
7625 case TARGET_NR_chdir
:
7626 if (!(p
= lock_user_string(arg1
)))
7628 ret
= get_errno(chdir(p
));
7629 unlock_user(p
, arg1
, 0);
7631 #ifdef TARGET_NR_time
7632 case TARGET_NR_time
:
7635 ret
= get_errno(time(&host_time
));
7638 && put_user_sal(host_time
, arg1
))
7643 #ifdef TARGET_NR_mknod
7644 case TARGET_NR_mknod
:
7645 if (!(p
= lock_user_string(arg1
)))
7647 ret
= get_errno(mknod(p
, arg2
, arg3
));
7648 unlock_user(p
, arg1
, 0);
7651 #if defined(TARGET_NR_mknodat)
7652 case TARGET_NR_mknodat
:
7653 if (!(p
= lock_user_string(arg2
)))
7655 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7656 unlock_user(p
, arg2
, 0);
7659 #ifdef TARGET_NR_chmod
7660 case TARGET_NR_chmod
:
7661 if (!(p
= lock_user_string(arg1
)))
7663 ret
= get_errno(chmod(p
, arg2
));
7664 unlock_user(p
, arg1
, 0);
7667 #ifdef TARGET_NR_break
7668 case TARGET_NR_break
:
7671 #ifdef TARGET_NR_oldstat
7672 case TARGET_NR_oldstat
:
7675 case TARGET_NR_lseek
:
7676 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
7678 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7679 /* Alpha specific */
7680 case TARGET_NR_getxpid
:
7681 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7682 ret
= get_errno(getpid());
7685 #ifdef TARGET_NR_getpid
7686 case TARGET_NR_getpid
:
7687 ret
= get_errno(getpid());
7690 case TARGET_NR_mount
:
7692 /* need to look at the data field */
7696 p
= lock_user_string(arg1
);
7704 p2
= lock_user_string(arg2
);
7707 unlock_user(p
, arg1
, 0);
7713 p3
= lock_user_string(arg3
);
7716 unlock_user(p
, arg1
, 0);
7718 unlock_user(p2
, arg2
, 0);
7725 /* FIXME - arg5 should be locked, but it isn't clear how to
7726 * do that since it's not guaranteed to be a NULL-terminated
7730 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7732 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7734 ret
= get_errno(ret
);
7737 unlock_user(p
, arg1
, 0);
7739 unlock_user(p2
, arg2
, 0);
7741 unlock_user(p3
, arg3
, 0);
7745 #ifdef TARGET_NR_umount
7746 case TARGET_NR_umount
:
7747 if (!(p
= lock_user_string(arg1
)))
7749 ret
= get_errno(umount(p
));
7750 unlock_user(p
, arg1
, 0);
7753 #ifdef TARGET_NR_stime /* not on alpha */
7754 case TARGET_NR_stime
:
7757 if (get_user_sal(host_time
, arg1
))
7759 ret
= get_errno(stime(&host_time
));
7763 case TARGET_NR_ptrace
:
7765 #ifdef TARGET_NR_alarm /* not on alpha */
7766 case TARGET_NR_alarm
:
7770 #ifdef TARGET_NR_oldfstat
7771 case TARGET_NR_oldfstat
:
7774 #ifdef TARGET_NR_pause /* not on alpha */
7775 case TARGET_NR_pause
:
7776 if (!block_signals()) {
7777 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7779 ret
= -TARGET_EINTR
;
7782 #ifdef TARGET_NR_utime
7783 case TARGET_NR_utime
:
7785 struct utimbuf tbuf
, *host_tbuf
;
7786 struct target_utimbuf
*target_tbuf
;
7788 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7790 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7791 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7792 unlock_user_struct(target_tbuf
, arg2
, 0);
7797 if (!(p
= lock_user_string(arg1
)))
7799 ret
= get_errno(utime(p
, host_tbuf
));
7800 unlock_user(p
, arg1
, 0);
7804 #ifdef TARGET_NR_utimes
7805 case TARGET_NR_utimes
:
7807 struct timeval
*tvp
, tv
[2];
7809 if (copy_from_user_timeval(&tv
[0], arg2
)
7810 || copy_from_user_timeval(&tv
[1],
7811 arg2
+ sizeof(struct target_timeval
)))
7817 if (!(p
= lock_user_string(arg1
)))
7819 ret
= get_errno(utimes(p
, tvp
));
7820 unlock_user(p
, arg1
, 0);
7824 #if defined(TARGET_NR_futimesat)
7825 case TARGET_NR_futimesat
:
7827 struct timeval
*tvp
, tv
[2];
7829 if (copy_from_user_timeval(&tv
[0], arg3
)
7830 || copy_from_user_timeval(&tv
[1],
7831 arg3
+ sizeof(struct target_timeval
)))
7837 if (!(p
= lock_user_string(arg2
)))
7839 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7840 unlock_user(p
, arg2
, 0);
7844 #ifdef TARGET_NR_stty
7845 case TARGET_NR_stty
:
7848 #ifdef TARGET_NR_gtty
7849 case TARGET_NR_gtty
:
7852 #ifdef TARGET_NR_access
7853 case TARGET_NR_access
:
7854 if (!(p
= lock_user_string(arg1
)))
7856 ret
= get_errno(access(path(p
), arg2
));
7857 unlock_user(p
, arg1
, 0);
7860 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7861 case TARGET_NR_faccessat
:
7862 if (!(p
= lock_user_string(arg2
)))
7864 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7865 unlock_user(p
, arg2
, 0);
7868 #ifdef TARGET_NR_nice /* not on alpha */
7869 case TARGET_NR_nice
:
7870 ret
= get_errno(nice(arg1
));
7873 #ifdef TARGET_NR_ftime
7874 case TARGET_NR_ftime
:
7877 case TARGET_NR_sync
:
7881 case TARGET_NR_kill
:
7882 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7884 #ifdef TARGET_NR_rename
7885 case TARGET_NR_rename
:
7888 p
= lock_user_string(arg1
);
7889 p2
= lock_user_string(arg2
);
7891 ret
= -TARGET_EFAULT
;
7893 ret
= get_errno(rename(p
, p2
));
7894 unlock_user(p2
, arg2
, 0);
7895 unlock_user(p
, arg1
, 0);
7899 #if defined(TARGET_NR_renameat)
7900 case TARGET_NR_renameat
:
7903 p
= lock_user_string(arg2
);
7904 p2
= lock_user_string(arg4
);
7906 ret
= -TARGET_EFAULT
;
7908 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7909 unlock_user(p2
, arg4
, 0);
7910 unlock_user(p
, arg2
, 0);
7914 #ifdef TARGET_NR_mkdir
7915 case TARGET_NR_mkdir
:
7916 if (!(p
= lock_user_string(arg1
)))
7918 ret
= get_errno(mkdir(p
, arg2
));
7919 unlock_user(p
, arg1
, 0);
7922 #if defined(TARGET_NR_mkdirat)
7923 case TARGET_NR_mkdirat
:
7924 if (!(p
= lock_user_string(arg2
)))
7926 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7927 unlock_user(p
, arg2
, 0);
7930 #ifdef TARGET_NR_rmdir
7931 case TARGET_NR_rmdir
:
7932 if (!(p
= lock_user_string(arg1
)))
7934 ret
= get_errno(rmdir(p
));
7935 unlock_user(p
, arg1
, 0);
7939 ret
= get_errno(dup(arg1
));
7941 fd_trans_dup(arg1
, ret
);
7944 #ifdef TARGET_NR_pipe
7945 case TARGET_NR_pipe
:
7946 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
7949 #ifdef TARGET_NR_pipe2
7950 case TARGET_NR_pipe2
:
7951 ret
= do_pipe(cpu_env
, arg1
,
7952 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7955 case TARGET_NR_times
:
7957 struct target_tms
*tmsp
;
7959 ret
= get_errno(times(&tms
));
7961 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7964 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7965 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7966 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7967 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7970 ret
= host_to_target_clock_t(ret
);
7973 #ifdef TARGET_NR_prof
7974 case TARGET_NR_prof
:
7977 #ifdef TARGET_NR_signal
7978 case TARGET_NR_signal
:
7981 case TARGET_NR_acct
:
7983 ret
= get_errno(acct(NULL
));
7985 if (!(p
= lock_user_string(arg1
)))
7987 ret
= get_errno(acct(path(p
)));
7988 unlock_user(p
, arg1
, 0);
7991 #ifdef TARGET_NR_umount2
7992 case TARGET_NR_umount2
:
7993 if (!(p
= lock_user_string(arg1
)))
7995 ret
= get_errno(umount2(p
, arg2
));
7996 unlock_user(p
, arg1
, 0);
7999 #ifdef TARGET_NR_lock
8000 case TARGET_NR_lock
:
8003 case TARGET_NR_ioctl
:
8004 ret
= do_ioctl(arg1
, arg2
, arg3
);
8006 case TARGET_NR_fcntl
:
8007 ret
= do_fcntl(arg1
, arg2
, arg3
);
8009 #ifdef TARGET_NR_mpx
8013 case TARGET_NR_setpgid
:
8014 ret
= get_errno(setpgid(arg1
, arg2
));
8016 #ifdef TARGET_NR_ulimit
8017 case TARGET_NR_ulimit
:
8020 #ifdef TARGET_NR_oldolduname
8021 case TARGET_NR_oldolduname
:
8024 case TARGET_NR_umask
:
8025 ret
= get_errno(umask(arg1
));
8027 case TARGET_NR_chroot
:
8028 if (!(p
= lock_user_string(arg1
)))
8030 ret
= get_errno(chroot(p
));
8031 unlock_user(p
, arg1
, 0);
8033 #ifdef TARGET_NR_ustat
8034 case TARGET_NR_ustat
:
8037 #ifdef TARGET_NR_dup2
8038 case TARGET_NR_dup2
:
8039 ret
= get_errno(dup2(arg1
, arg2
));
8041 fd_trans_dup(arg1
, arg2
);
8045 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8046 case TARGET_NR_dup3
:
8047 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
8049 fd_trans_dup(arg1
, arg2
);
8053 #ifdef TARGET_NR_getppid /* not on alpha */
8054 case TARGET_NR_getppid
:
8055 ret
= get_errno(getppid());
8058 #ifdef TARGET_NR_getpgrp
8059 case TARGET_NR_getpgrp
:
8060 ret
= get_errno(getpgrp());
8063 case TARGET_NR_setsid
:
8064 ret
= get_errno(setsid());
8066 #ifdef TARGET_NR_sigaction
8067 case TARGET_NR_sigaction
:
8069 #if defined(TARGET_ALPHA)
8070 struct target_sigaction act
, oact
, *pact
= 0;
8071 struct target_old_sigaction
*old_act
;
8073 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8075 act
._sa_handler
= old_act
->_sa_handler
;
8076 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8077 act
.sa_flags
= old_act
->sa_flags
;
8078 act
.sa_restorer
= 0;
8079 unlock_user_struct(old_act
, arg2
, 0);
8082 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8083 if (!is_error(ret
) && arg3
) {
8084 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8086 old_act
->_sa_handler
= oact
._sa_handler
;
8087 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8088 old_act
->sa_flags
= oact
.sa_flags
;
8089 unlock_user_struct(old_act
, arg3
, 1);
8091 #elif defined(TARGET_MIPS)
8092 struct target_sigaction act
, oact
, *pact
, *old_act
;
8095 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8097 act
._sa_handler
= old_act
->_sa_handler
;
8098 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8099 act
.sa_flags
= old_act
->sa_flags
;
8100 unlock_user_struct(old_act
, arg2
, 0);
8106 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8108 if (!is_error(ret
) && arg3
) {
8109 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8111 old_act
->_sa_handler
= oact
._sa_handler
;
8112 old_act
->sa_flags
= oact
.sa_flags
;
8113 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8114 old_act
->sa_mask
.sig
[1] = 0;
8115 old_act
->sa_mask
.sig
[2] = 0;
8116 old_act
->sa_mask
.sig
[3] = 0;
8117 unlock_user_struct(old_act
, arg3
, 1);
8120 struct target_old_sigaction
*old_act
;
8121 struct target_sigaction act
, oact
, *pact
;
8123 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8125 act
._sa_handler
= old_act
->_sa_handler
;
8126 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8127 act
.sa_flags
= old_act
->sa_flags
;
8128 act
.sa_restorer
= old_act
->sa_restorer
;
8129 unlock_user_struct(old_act
, arg2
, 0);
8134 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8135 if (!is_error(ret
) && arg3
) {
8136 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8138 old_act
->_sa_handler
= oact
._sa_handler
;
8139 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8140 old_act
->sa_flags
= oact
.sa_flags
;
8141 old_act
->sa_restorer
= oact
.sa_restorer
;
8142 unlock_user_struct(old_act
, arg3
, 1);
8148 case TARGET_NR_rt_sigaction
:
8150 #if defined(TARGET_ALPHA)
8151 struct target_sigaction act
, oact
, *pact
= 0;
8152 struct target_rt_sigaction
*rt_act
;
8154 if (arg4
!= sizeof(target_sigset_t
)) {
8155 ret
= -TARGET_EINVAL
;
8159 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8161 act
._sa_handler
= rt_act
->_sa_handler
;
8162 act
.sa_mask
= rt_act
->sa_mask
;
8163 act
.sa_flags
= rt_act
->sa_flags
;
8164 act
.sa_restorer
= arg5
;
8165 unlock_user_struct(rt_act
, arg2
, 0);
8168 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8169 if (!is_error(ret
) && arg3
) {
8170 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8172 rt_act
->_sa_handler
= oact
._sa_handler
;
8173 rt_act
->sa_mask
= oact
.sa_mask
;
8174 rt_act
->sa_flags
= oact
.sa_flags
;
8175 unlock_user_struct(rt_act
, arg3
, 1);
8178 struct target_sigaction
*act
;
8179 struct target_sigaction
*oact
;
8181 if (arg4
!= sizeof(target_sigset_t
)) {
8182 ret
= -TARGET_EINVAL
;
8186 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
8191 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8192 ret
= -TARGET_EFAULT
;
8193 goto rt_sigaction_fail
;
8197 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8200 unlock_user_struct(act
, arg2
, 0);
8202 unlock_user_struct(oact
, arg3
, 1);
8206 #ifdef TARGET_NR_sgetmask /* not on alpha */
8207 case TARGET_NR_sgetmask
:
8210 abi_ulong target_set
;
8211 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8213 host_to_target_old_sigset(&target_set
, &cur_set
);
8219 #ifdef TARGET_NR_ssetmask /* not on alpha */
8220 case TARGET_NR_ssetmask
:
8222 sigset_t set
, oset
, cur_set
;
8223 abi_ulong target_set
= arg1
;
8224 /* We only have one word of the new mask so we must read
8225 * the rest of it with do_sigprocmask() and OR in this word.
8226 * We are guaranteed that a do_sigprocmask() that only queries
8227 * the signal mask will not fail.
8229 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8231 target_to_host_old_sigset(&set
, &target_set
);
8232 sigorset(&set
, &set
, &cur_set
);
8233 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8235 host_to_target_old_sigset(&target_set
, &oset
);
8241 #ifdef TARGET_NR_sigprocmask
8242 case TARGET_NR_sigprocmask
:
8244 #if defined(TARGET_ALPHA)
8245 sigset_t set
, oldset
;
8250 case TARGET_SIG_BLOCK
:
8253 case TARGET_SIG_UNBLOCK
:
8256 case TARGET_SIG_SETMASK
:
8260 ret
= -TARGET_EINVAL
;
8264 target_to_host_old_sigset(&set
, &mask
);
8266 ret
= do_sigprocmask(how
, &set
, &oldset
);
8267 if (!is_error(ret
)) {
8268 host_to_target_old_sigset(&mask
, &oldset
);
8270 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8273 sigset_t set
, oldset
, *set_ptr
;
8278 case TARGET_SIG_BLOCK
:
8281 case TARGET_SIG_UNBLOCK
:
8284 case TARGET_SIG_SETMASK
:
8288 ret
= -TARGET_EINVAL
;
8291 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8293 target_to_host_old_sigset(&set
, p
);
8294 unlock_user(p
, arg2
, 0);
8300 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8301 if (!is_error(ret
) && arg3
) {
8302 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8304 host_to_target_old_sigset(p
, &oldset
);
8305 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8311 case TARGET_NR_rt_sigprocmask
:
8314 sigset_t set
, oldset
, *set_ptr
;
8316 if (arg4
!= sizeof(target_sigset_t
)) {
8317 ret
= -TARGET_EINVAL
;
8323 case TARGET_SIG_BLOCK
:
8326 case TARGET_SIG_UNBLOCK
:
8329 case TARGET_SIG_SETMASK
:
8333 ret
= -TARGET_EINVAL
;
8336 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8338 target_to_host_sigset(&set
, p
);
8339 unlock_user(p
, arg2
, 0);
8345 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8346 if (!is_error(ret
) && arg3
) {
8347 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8349 host_to_target_sigset(p
, &oldset
);
8350 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8354 #ifdef TARGET_NR_sigpending
8355 case TARGET_NR_sigpending
:
8358 ret
= get_errno(sigpending(&set
));
8359 if (!is_error(ret
)) {
8360 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8362 host_to_target_old_sigset(p
, &set
);
8363 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8368 case TARGET_NR_rt_sigpending
:
8372 /* Yes, this check is >, not != like most. We follow the kernel's
8373 * logic and it does it like this because it implements
8374 * NR_sigpending through the same code path, and in that case
8375 * the old_sigset_t is smaller in size.
8377 if (arg2
> sizeof(target_sigset_t
)) {
8378 ret
= -TARGET_EINVAL
;
8382 ret
= get_errno(sigpending(&set
));
8383 if (!is_error(ret
)) {
8384 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8386 host_to_target_sigset(p
, &set
);
8387 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8391 #ifdef TARGET_NR_sigsuspend
8392 case TARGET_NR_sigsuspend
:
8394 TaskState
*ts
= cpu
->opaque
;
8395 #if defined(TARGET_ALPHA)
8396 abi_ulong mask
= arg1
;
8397 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8399 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8401 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8402 unlock_user(p
, arg1
, 0);
8404 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8406 if (ret
!= -TARGET_ERESTARTSYS
) {
8407 ts
->in_sigsuspend
= 1;
8412 case TARGET_NR_rt_sigsuspend
:
8414 TaskState
*ts
= cpu
->opaque
;
8416 if (arg2
!= sizeof(target_sigset_t
)) {
8417 ret
= -TARGET_EINVAL
;
8420 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8422 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8423 unlock_user(p
, arg1
, 0);
8424 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8426 if (ret
!= -TARGET_ERESTARTSYS
) {
8427 ts
->in_sigsuspend
= 1;
8431 case TARGET_NR_rt_sigtimedwait
:
8434 struct timespec uts
, *puts
;
8437 if (arg4
!= sizeof(target_sigset_t
)) {
8438 ret
= -TARGET_EINVAL
;
8442 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8444 target_to_host_sigset(&set
, p
);
8445 unlock_user(p
, arg1
, 0);
8448 target_to_host_timespec(puts
, arg3
);
8452 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8454 if (!is_error(ret
)) {
8456 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8461 host_to_target_siginfo(p
, &uinfo
);
8462 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8464 ret
= host_to_target_signal(ret
);
8468 case TARGET_NR_rt_sigqueueinfo
:
8472 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8476 target_to_host_siginfo(&uinfo
, p
);
8477 unlock_user(p
, arg1
, 0);
8478 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8481 #ifdef TARGET_NR_sigreturn
8482 case TARGET_NR_sigreturn
:
8483 if (block_signals()) {
8484 ret
= -TARGET_ERESTARTSYS
;
8486 ret
= do_sigreturn(cpu_env
);
8490 case TARGET_NR_rt_sigreturn
:
8491 if (block_signals()) {
8492 ret
= -TARGET_ERESTARTSYS
;
8494 ret
= do_rt_sigreturn(cpu_env
);
8497 case TARGET_NR_sethostname
:
8498 if (!(p
= lock_user_string(arg1
)))
8500 ret
= get_errno(sethostname(p
, arg2
));
8501 unlock_user(p
, arg1
, 0);
8503 case TARGET_NR_setrlimit
:
8505 int resource
= target_to_host_resource(arg1
);
8506 struct target_rlimit
*target_rlim
;
8508 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8510 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8511 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8512 unlock_user_struct(target_rlim
, arg2
, 0);
8513 ret
= get_errno(setrlimit(resource
, &rlim
));
8516 case TARGET_NR_getrlimit
:
8518 int resource
= target_to_host_resource(arg1
);
8519 struct target_rlimit
*target_rlim
;
8522 ret
= get_errno(getrlimit(resource
, &rlim
));
8523 if (!is_error(ret
)) {
8524 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8526 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8527 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8528 unlock_user_struct(target_rlim
, arg2
, 1);
8532 case TARGET_NR_getrusage
:
8534 struct rusage rusage
;
8535 ret
= get_errno(getrusage(arg1
, &rusage
));
8536 if (!is_error(ret
)) {
8537 ret
= host_to_target_rusage(arg2
, &rusage
);
8541 case TARGET_NR_gettimeofday
:
8544 ret
= get_errno(gettimeofday(&tv
, NULL
));
8545 if (!is_error(ret
)) {
8546 if (copy_to_user_timeval(arg1
, &tv
))
8551 case TARGET_NR_settimeofday
:
8553 struct timeval tv
, *ptv
= NULL
;
8554 struct timezone tz
, *ptz
= NULL
;
8557 if (copy_from_user_timeval(&tv
, arg1
)) {
8564 if (copy_from_user_timezone(&tz
, arg2
)) {
8570 ret
= get_errno(settimeofday(ptv
, ptz
));
8573 #if defined(TARGET_NR_select)
8574 case TARGET_NR_select
:
8575 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
8576 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8579 struct target_sel_arg_struct
*sel
;
8580 abi_ulong inp
, outp
, exp
, tvp
;
8583 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
8585 nsel
= tswapal(sel
->n
);
8586 inp
= tswapal(sel
->inp
);
8587 outp
= tswapal(sel
->outp
);
8588 exp
= tswapal(sel
->exp
);
8589 tvp
= tswapal(sel
->tvp
);
8590 unlock_user_struct(sel
, arg1
, 0);
8591 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
8596 #ifdef TARGET_NR_pselect6
8597 case TARGET_NR_pselect6
:
8599 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8600 fd_set rfds
, wfds
, efds
;
8601 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8602 struct timespec ts
, *ts_ptr
;
8605 * The 6th arg is actually two args smashed together,
8606 * so we cannot use the C library.
8614 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8615 target_sigset_t
*target_sigset
;
8623 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8627 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8631 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8637 * This takes a timespec, and not a timeval, so we cannot
8638 * use the do_select() helper ...
8641 if (target_to_host_timespec(&ts
, ts_addr
)) {
8649 /* Extract the two packed args for the sigset */
8652 sig
.size
= SIGSET_T_SIZE
;
8654 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8658 arg_sigset
= tswapal(arg7
[0]);
8659 arg_sigsize
= tswapal(arg7
[1]);
8660 unlock_user(arg7
, arg6
, 0);
8664 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8665 /* Like the kernel, we enforce correct size sigsets */
8666 ret
= -TARGET_EINVAL
;
8669 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8670 sizeof(*target_sigset
), 1);
8671 if (!target_sigset
) {
8674 target_to_host_sigset(&set
, target_sigset
);
8675 unlock_user(target_sigset
, arg_sigset
, 0);
8683 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8686 if (!is_error(ret
)) {
8687 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8689 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8691 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8694 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8700 #ifdef TARGET_NR_symlink
8701 case TARGET_NR_symlink
:
8704 p
= lock_user_string(arg1
);
8705 p2
= lock_user_string(arg2
);
8707 ret
= -TARGET_EFAULT
;
8709 ret
= get_errno(symlink(p
, p2
));
8710 unlock_user(p2
, arg2
, 0);
8711 unlock_user(p
, arg1
, 0);
8715 #if defined(TARGET_NR_symlinkat)
8716 case TARGET_NR_symlinkat
:
8719 p
= lock_user_string(arg1
);
8720 p2
= lock_user_string(arg3
);
8722 ret
= -TARGET_EFAULT
;
8724 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8725 unlock_user(p2
, arg3
, 0);
8726 unlock_user(p
, arg1
, 0);
8730 #ifdef TARGET_NR_oldlstat
8731 case TARGET_NR_oldlstat
:
8734 #ifdef TARGET_NR_readlink
8735 case TARGET_NR_readlink
:
8738 p
= lock_user_string(arg1
);
8739 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8741 ret
= -TARGET_EFAULT
;
8743 /* Short circuit this for the magic exe check. */
8744 ret
= -TARGET_EINVAL
;
8745 } else if (is_proc_myself((const char *)p
, "exe")) {
8746 char real
[PATH_MAX
], *temp
;
8747 temp
= realpath(exec_path
, real
);
8748 /* Return value is # of bytes that we wrote to the buffer. */
8750 ret
= get_errno(-1);
8752 /* Don't worry about sign mismatch as earlier mapping
8753 * logic would have thrown a bad address error. */
8754 ret
= MIN(strlen(real
), arg3
);
8755 /* We cannot NUL terminate the string. */
8756 memcpy(p2
, real
, ret
);
8759 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8761 unlock_user(p2
, arg2
, ret
);
8762 unlock_user(p
, arg1
, 0);
8766 #if defined(TARGET_NR_readlinkat)
8767 case TARGET_NR_readlinkat
:
8770 p
= lock_user_string(arg2
);
8771 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8773 ret
= -TARGET_EFAULT
;
8774 } else if (is_proc_myself((const char *)p
, "exe")) {
8775 char real
[PATH_MAX
], *temp
;
8776 temp
= realpath(exec_path
, real
);
8777 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8778 snprintf((char *)p2
, arg4
, "%s", real
);
8780 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8782 unlock_user(p2
, arg3
, ret
);
8783 unlock_user(p
, arg2
, 0);
8787 #ifdef TARGET_NR_uselib
8788 case TARGET_NR_uselib
:
8791 #ifdef TARGET_NR_swapon
8792 case TARGET_NR_swapon
:
8793 if (!(p
= lock_user_string(arg1
)))
8795 ret
= get_errno(swapon(p
, arg2
));
8796 unlock_user(p
, arg1
, 0);
8799 case TARGET_NR_reboot
:
8800 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8801 /* arg4 must be ignored in all other cases */
8802 p
= lock_user_string(arg4
);
8806 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8807 unlock_user(p
, arg4
, 0);
8809 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8812 #ifdef TARGET_NR_readdir
8813 case TARGET_NR_readdir
:
8816 #ifdef TARGET_NR_mmap
8817 case TARGET_NR_mmap
:
8818 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8819 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8820 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8821 || defined(TARGET_S390X)
8824 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8825 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8833 unlock_user(v
, arg1
, 0);
8834 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8835 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8839 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8840 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8846 #ifdef TARGET_NR_mmap2
8847 case TARGET_NR_mmap2
:
8849 #define MMAP_SHIFT 12
8851 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8852 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8854 arg6
<< MMAP_SHIFT
));
8857 case TARGET_NR_munmap
:
8858 ret
= get_errno(target_munmap(arg1
, arg2
));
8860 case TARGET_NR_mprotect
:
8862 TaskState
*ts
= cpu
->opaque
;
8863 /* Special hack to detect libc making the stack executable. */
8864 if ((arg3
& PROT_GROWSDOWN
)
8865 && arg1
>= ts
->info
->stack_limit
8866 && arg1
<= ts
->info
->start_stack
) {
8867 arg3
&= ~PROT_GROWSDOWN
;
8868 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8869 arg1
= ts
->info
->stack_limit
;
8872 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
8874 #ifdef TARGET_NR_mremap
8875 case TARGET_NR_mremap
:
8876 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8879 /* ??? msync/mlock/munlock are broken for softmmu. */
8880 #ifdef TARGET_NR_msync
8881 case TARGET_NR_msync
:
8882 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
8885 #ifdef TARGET_NR_mlock
8886 case TARGET_NR_mlock
:
8887 ret
= get_errno(mlock(g2h(arg1
), arg2
));
8890 #ifdef TARGET_NR_munlock
8891 case TARGET_NR_munlock
:
8892 ret
= get_errno(munlock(g2h(arg1
), arg2
));
8895 #ifdef TARGET_NR_mlockall
8896 case TARGET_NR_mlockall
:
8897 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8900 #ifdef TARGET_NR_munlockall
8901 case TARGET_NR_munlockall
:
8902 ret
= get_errno(munlockall());
8905 case TARGET_NR_truncate
:
8906 if (!(p
= lock_user_string(arg1
)))
8908 ret
= get_errno(truncate(p
, arg2
));
8909 unlock_user(p
, arg1
, 0);
8911 case TARGET_NR_ftruncate
:
8912 ret
= get_errno(ftruncate(arg1
, arg2
));
8914 case TARGET_NR_fchmod
:
8915 ret
= get_errno(fchmod(arg1
, arg2
));
8917 #if defined(TARGET_NR_fchmodat)
8918 case TARGET_NR_fchmodat
:
8919 if (!(p
= lock_user_string(arg2
)))
8921 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8922 unlock_user(p
, arg2
, 0);
8925 case TARGET_NR_getpriority
:
8926 /* Note that negative values are valid for getpriority, so we must
8927 differentiate based on errno settings. */
8929 ret
= getpriority(arg1
, arg2
);
8930 if (ret
== -1 && errno
!= 0) {
8931 ret
= -host_to_target_errno(errno
);
8935 /* Return value is the unbiased priority. Signal no error. */
8936 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8938 /* Return value is a biased priority to avoid negative numbers. */
8942 case TARGET_NR_setpriority
:
8943 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
8945 #ifdef TARGET_NR_profil
8946 case TARGET_NR_profil
:
8949 case TARGET_NR_statfs
:
8950 if (!(p
= lock_user_string(arg1
)))
8952 ret
= get_errno(statfs(path(p
), &stfs
));
8953 unlock_user(p
, arg1
, 0);
8955 if (!is_error(ret
)) {
8956 struct target_statfs
*target_stfs
;
8958 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8960 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8961 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8962 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8963 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8964 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8965 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8966 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8967 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8968 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8969 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8970 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8971 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8972 unlock_user_struct(target_stfs
, arg2
, 1);
8975 case TARGET_NR_fstatfs
:
8976 ret
= get_errno(fstatfs(arg1
, &stfs
));
8977 goto convert_statfs
;
8978 #ifdef TARGET_NR_statfs64
8979 case TARGET_NR_statfs64
:
8980 if (!(p
= lock_user_string(arg1
)))
8982 ret
= get_errno(statfs(path(p
), &stfs
));
8983 unlock_user(p
, arg1
, 0);
8985 if (!is_error(ret
)) {
8986 struct target_statfs64
*target_stfs
;
8988 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8990 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8991 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8992 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8993 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8994 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8995 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8996 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8997 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8998 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8999 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9000 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9001 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9002 unlock_user_struct(target_stfs
, arg3
, 1);
9005 case TARGET_NR_fstatfs64
:
9006 ret
= get_errno(fstatfs(arg1
, &stfs
));
9007 goto convert_statfs64
;
9009 #ifdef TARGET_NR_ioperm
9010 case TARGET_NR_ioperm
:
9013 #ifdef TARGET_NR_socketcall
9014 case TARGET_NR_socketcall
:
9015 ret
= do_socketcall(arg1
, arg2
);
9018 #ifdef TARGET_NR_accept
9019 case TARGET_NR_accept
:
9020 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9023 #ifdef TARGET_NR_accept4
9024 case TARGET_NR_accept4
:
9025 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9028 #ifdef TARGET_NR_bind
9029 case TARGET_NR_bind
:
9030 ret
= do_bind(arg1
, arg2
, arg3
);
9033 #ifdef TARGET_NR_connect
9034 case TARGET_NR_connect
:
9035 ret
= do_connect(arg1
, arg2
, arg3
);
9038 #ifdef TARGET_NR_getpeername
9039 case TARGET_NR_getpeername
:
9040 ret
= do_getpeername(arg1
, arg2
, arg3
);
9043 #ifdef TARGET_NR_getsockname
9044 case TARGET_NR_getsockname
:
9045 ret
= do_getsockname(arg1
, arg2
, arg3
);
9048 #ifdef TARGET_NR_getsockopt
9049 case TARGET_NR_getsockopt
:
9050 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9053 #ifdef TARGET_NR_listen
9054 case TARGET_NR_listen
:
9055 ret
= get_errno(listen(arg1
, arg2
));
9058 #ifdef TARGET_NR_recv
9059 case TARGET_NR_recv
:
9060 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9063 #ifdef TARGET_NR_recvfrom
9064 case TARGET_NR_recvfrom
:
9065 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9068 #ifdef TARGET_NR_recvmsg
9069 case TARGET_NR_recvmsg
:
9070 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9073 #ifdef TARGET_NR_send
9074 case TARGET_NR_send
:
9075 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9078 #ifdef TARGET_NR_sendmsg
9079 case TARGET_NR_sendmsg
:
9080 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9083 #ifdef TARGET_NR_sendmmsg
9084 case TARGET_NR_sendmmsg
:
9085 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9087 case TARGET_NR_recvmmsg
:
9088 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9091 #ifdef TARGET_NR_sendto
9092 case TARGET_NR_sendto
:
9093 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9096 #ifdef TARGET_NR_shutdown
9097 case TARGET_NR_shutdown
:
9098 ret
= get_errno(shutdown(arg1
, arg2
));
9101 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9102 case TARGET_NR_getrandom
:
9103 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9107 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9108 unlock_user(p
, arg1
, ret
);
9111 #ifdef TARGET_NR_socket
9112 case TARGET_NR_socket
:
9113 ret
= do_socket(arg1
, arg2
, arg3
);
9114 fd_trans_unregister(ret
);
9117 #ifdef TARGET_NR_socketpair
9118 case TARGET_NR_socketpair
:
9119 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9122 #ifdef TARGET_NR_setsockopt
9123 case TARGET_NR_setsockopt
:
9124 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9128 case TARGET_NR_syslog
:
9129 if (!(p
= lock_user_string(arg2
)))
9131 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9132 unlock_user(p
, arg2
, 0);
9135 case TARGET_NR_setitimer
:
9137 struct itimerval value
, ovalue
, *pvalue
;
9141 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9142 || copy_from_user_timeval(&pvalue
->it_value
,
9143 arg2
+ sizeof(struct target_timeval
)))
9148 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9149 if (!is_error(ret
) && arg3
) {
9150 if (copy_to_user_timeval(arg3
,
9151 &ovalue
.it_interval
)
9152 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9158 case TARGET_NR_getitimer
:
9160 struct itimerval value
;
9162 ret
= get_errno(getitimer(arg1
, &value
));
9163 if (!is_error(ret
) && arg2
) {
9164 if (copy_to_user_timeval(arg2
,
9166 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9172 #ifdef TARGET_NR_stat
9173 case TARGET_NR_stat
:
9174 if (!(p
= lock_user_string(arg1
)))
9176 ret
= get_errno(stat(path(p
), &st
));
9177 unlock_user(p
, arg1
, 0);
9180 #ifdef TARGET_NR_lstat
9181 case TARGET_NR_lstat
:
9182 if (!(p
= lock_user_string(arg1
)))
9184 ret
= get_errno(lstat(path(p
), &st
));
9185 unlock_user(p
, arg1
, 0);
9188 case TARGET_NR_fstat
:
9190 ret
= get_errno(fstat(arg1
, &st
));
9191 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9194 if (!is_error(ret
)) {
9195 struct target_stat
*target_st
;
9197 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9199 memset(target_st
, 0, sizeof(*target_st
));
9200 __put_user(st
.st_dev
, &target_st
->st_dev
);
9201 __put_user(st
.st_ino
, &target_st
->st_ino
);
9202 __put_user(st
.st_mode
, &target_st
->st_mode
);
9203 __put_user(st
.st_uid
, &target_st
->st_uid
);
9204 __put_user(st
.st_gid
, &target_st
->st_gid
);
9205 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9206 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9207 __put_user(st
.st_size
, &target_st
->st_size
);
9208 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9209 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9210 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9211 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9212 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9213 unlock_user_struct(target_st
, arg2
, 1);
9217 #ifdef TARGET_NR_olduname
9218 case TARGET_NR_olduname
:
9221 #ifdef TARGET_NR_iopl
9222 case TARGET_NR_iopl
:
9225 case TARGET_NR_vhangup
:
9226 ret
= get_errno(vhangup());
9228 #ifdef TARGET_NR_idle
9229 case TARGET_NR_idle
:
9232 #ifdef TARGET_NR_syscall
9233 case TARGET_NR_syscall
:
9234 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9235 arg6
, arg7
, arg8
, 0);
9238 case TARGET_NR_wait4
:
9241 abi_long status_ptr
= arg2
;
9242 struct rusage rusage
, *rusage_ptr
;
9243 abi_ulong target_rusage
= arg4
;
9244 abi_long rusage_err
;
9246 rusage_ptr
= &rusage
;
9249 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9250 if (!is_error(ret
)) {
9251 if (status_ptr
&& ret
) {
9252 status
= host_to_target_waitstatus(status
);
9253 if (put_user_s32(status
, status_ptr
))
9256 if (target_rusage
) {
9257 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9265 #ifdef TARGET_NR_swapoff
9266 case TARGET_NR_swapoff
:
9267 if (!(p
= lock_user_string(arg1
)))
9269 ret
= get_errno(swapoff(p
));
9270 unlock_user(p
, arg1
, 0);
9273 case TARGET_NR_sysinfo
:
9275 struct target_sysinfo
*target_value
;
9276 struct sysinfo value
;
9277 ret
= get_errno(sysinfo(&value
));
9278 if (!is_error(ret
) && arg1
)
9280 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9282 __put_user(value
.uptime
, &target_value
->uptime
);
9283 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9284 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9285 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9286 __put_user(value
.totalram
, &target_value
->totalram
);
9287 __put_user(value
.freeram
, &target_value
->freeram
);
9288 __put_user(value
.sharedram
, &target_value
->sharedram
);
9289 __put_user(value
.bufferram
, &target_value
->bufferram
);
9290 __put_user(value
.totalswap
, &target_value
->totalswap
);
9291 __put_user(value
.freeswap
, &target_value
->freeswap
);
9292 __put_user(value
.procs
, &target_value
->procs
);
9293 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9294 __put_user(value
.freehigh
, &target_value
->freehigh
);
9295 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9296 unlock_user_struct(target_value
, arg1
, 1);
9300 #ifdef TARGET_NR_ipc
9302 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9305 #ifdef TARGET_NR_semget
9306 case TARGET_NR_semget
:
9307 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9310 #ifdef TARGET_NR_semop
9311 case TARGET_NR_semop
:
9312 ret
= do_semop(arg1
, arg2
, arg3
);
9315 #ifdef TARGET_NR_semctl
9316 case TARGET_NR_semctl
:
9317 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9320 #ifdef TARGET_NR_msgctl
9321 case TARGET_NR_msgctl
:
9322 ret
= do_msgctl(arg1
, arg2
, arg3
);
9325 #ifdef TARGET_NR_msgget
9326 case TARGET_NR_msgget
:
9327 ret
= get_errno(msgget(arg1
, arg2
));
9330 #ifdef TARGET_NR_msgrcv
9331 case TARGET_NR_msgrcv
:
9332 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9335 #ifdef TARGET_NR_msgsnd
9336 case TARGET_NR_msgsnd
:
9337 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9340 #ifdef TARGET_NR_shmget
9341 case TARGET_NR_shmget
:
9342 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9345 #ifdef TARGET_NR_shmctl
9346 case TARGET_NR_shmctl
:
9347 ret
= do_shmctl(arg1
, arg2
, arg3
);
9350 #ifdef TARGET_NR_shmat
9351 case TARGET_NR_shmat
:
9352 ret
= do_shmat(arg1
, arg2
, arg3
);
9355 #ifdef TARGET_NR_shmdt
9356 case TARGET_NR_shmdt
:
9357 ret
= do_shmdt(arg1
);
9360 case TARGET_NR_fsync
:
9361 ret
= get_errno(fsync(arg1
));
9363 case TARGET_NR_clone
:
9364 /* Linux manages to have three different orderings for its
9365 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9366 * match the kernel's CONFIG_CLONE_* settings.
9367 * Microblaze is further special in that it uses a sixth
9368 * implicit argument to clone for the TLS pointer.
9370 #if defined(TARGET_MICROBLAZE)
9371 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9372 #elif defined(TARGET_CLONE_BACKWARDS)
9373 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9374 #elif defined(TARGET_CLONE_BACKWARDS2)
9375 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9377 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9380 #ifdef __NR_exit_group
9381 /* new thread calls */
9382 case TARGET_NR_exit_group
:
9386 gdb_exit(cpu_env
, arg1
);
9387 ret
= get_errno(exit_group(arg1
));
9390 case TARGET_NR_setdomainname
:
9391 if (!(p
= lock_user_string(arg1
)))
9393 ret
= get_errno(setdomainname(p
, arg2
));
9394 unlock_user(p
, arg1
, 0);
9396 case TARGET_NR_uname
:
9397 /* no need to transcode because we use the linux syscall */
9399 struct new_utsname
* buf
;
9401 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9403 ret
= get_errno(sys_uname(buf
));
9404 if (!is_error(ret
)) {
9405 /* Overwrite the native machine name with whatever is being
9407 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
9408 /* Allow the user to override the reported release. */
9409 if (qemu_uname_release
&& *qemu_uname_release
) {
9410 g_strlcpy(buf
->release
, qemu_uname_release
,
9411 sizeof(buf
->release
));
9414 unlock_user_struct(buf
, arg1
, 1);
9418 case TARGET_NR_modify_ldt
:
9419 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9421 #if !defined(TARGET_X86_64)
9422 case TARGET_NR_vm86old
:
9424 case TARGET_NR_vm86
:
9425 ret
= do_vm86(cpu_env
, arg1
, arg2
);
9429 case TARGET_NR_adjtimex
:
9431 #ifdef TARGET_NR_create_module
9432 case TARGET_NR_create_module
:
9434 case TARGET_NR_init_module
:
9435 case TARGET_NR_delete_module
:
9436 #ifdef TARGET_NR_get_kernel_syms
9437 case TARGET_NR_get_kernel_syms
:
9440 case TARGET_NR_quotactl
:
9442 case TARGET_NR_getpgid
:
9443 ret
= get_errno(getpgid(arg1
));
9445 case TARGET_NR_fchdir
:
9446 ret
= get_errno(fchdir(arg1
));
9448 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9449 case TARGET_NR_bdflush
:
9452 #ifdef TARGET_NR_sysfs
9453 case TARGET_NR_sysfs
:
9456 case TARGET_NR_personality
:
9457 ret
= get_errno(personality(arg1
));
9459 #ifdef TARGET_NR_afs_syscall
9460 case TARGET_NR_afs_syscall
:
9463 #ifdef TARGET_NR__llseek /* Not on alpha */
9464 case TARGET_NR__llseek
:
9467 #if !defined(__NR_llseek)
9468 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9470 ret
= get_errno(res
);
9475 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9477 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9483 #ifdef TARGET_NR_getdents
9484 case TARGET_NR_getdents
:
9485 #ifdef __NR_getdents
9486 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9488 struct target_dirent
*target_dirp
;
9489 struct linux_dirent
*dirp
;
9490 abi_long count
= arg3
;
9492 dirp
= g_try_malloc(count
);
9494 ret
= -TARGET_ENOMEM
;
9498 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9499 if (!is_error(ret
)) {
9500 struct linux_dirent
*de
;
9501 struct target_dirent
*tde
;
9503 int reclen
, treclen
;
9504 int count1
, tnamelen
;
9508 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9512 reclen
= de
->d_reclen
;
9513 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9514 assert(tnamelen
>= 0);
9515 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9516 assert(count1
+ treclen
<= count
);
9517 tde
->d_reclen
= tswap16(treclen
);
9518 tde
->d_ino
= tswapal(de
->d_ino
);
9519 tde
->d_off
= tswapal(de
->d_off
);
9520 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9521 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9523 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9527 unlock_user(target_dirp
, arg2
, ret
);
9533 struct linux_dirent
*dirp
;
9534 abi_long count
= arg3
;
9536 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9538 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9539 if (!is_error(ret
)) {
9540 struct linux_dirent
*de
;
9545 reclen
= de
->d_reclen
;
9548 de
->d_reclen
= tswap16(reclen
);
9549 tswapls(&de
->d_ino
);
9550 tswapls(&de
->d_off
);
9551 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9555 unlock_user(dirp
, arg2
, ret
);
9559 /* Implement getdents in terms of getdents64 */
9561 struct linux_dirent64
*dirp
;
9562 abi_long count
= arg3
;
9564 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9568 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9569 if (!is_error(ret
)) {
9570 /* Convert the dirent64 structs to target dirent. We do this
9571 * in-place, since we can guarantee that a target_dirent is no
9572 * larger than a dirent64; however this means we have to be
9573 * careful to read everything before writing in the new format.
9575 struct linux_dirent64
*de
;
9576 struct target_dirent
*tde
;
9581 tde
= (struct target_dirent
*)dirp
;
9583 int namelen
, treclen
;
9584 int reclen
= de
->d_reclen
;
9585 uint64_t ino
= de
->d_ino
;
9586 int64_t off
= de
->d_off
;
9587 uint8_t type
= de
->d_type
;
9589 namelen
= strlen(de
->d_name
);
9590 treclen
= offsetof(struct target_dirent
, d_name
)
9592 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9594 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9595 tde
->d_ino
= tswapal(ino
);
9596 tde
->d_off
= tswapal(off
);
9597 tde
->d_reclen
= tswap16(treclen
);
9598 /* The target_dirent type is in what was formerly a padding
9599 * byte at the end of the structure:
9601 *(((char *)tde
) + treclen
- 1) = type
;
9603 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9604 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9610 unlock_user(dirp
, arg2
, ret
);
9614 #endif /* TARGET_NR_getdents */
9615 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9616 case TARGET_NR_getdents64
:
9618 struct linux_dirent64
*dirp
;
9619 abi_long count
= arg3
;
9620 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9622 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9623 if (!is_error(ret
)) {
9624 struct linux_dirent64
*de
;
9629 reclen
= de
->d_reclen
;
9632 de
->d_reclen
= tswap16(reclen
);
9633 tswap64s((uint64_t *)&de
->d_ino
);
9634 tswap64s((uint64_t *)&de
->d_off
);
9635 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9639 unlock_user(dirp
, arg2
, ret
);
9642 #endif /* TARGET_NR_getdents64 */
9643 #if defined(TARGET_NR__newselect)
9644 case TARGET_NR__newselect
:
9645 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9648 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9649 # ifdef TARGET_NR_poll
9650 case TARGET_NR_poll
:
9652 # ifdef TARGET_NR_ppoll
9653 case TARGET_NR_ppoll
:
9656 struct target_pollfd
*target_pfd
;
9657 unsigned int nfds
= arg2
;
9664 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9665 sizeof(struct target_pollfd
) * nfds
, 1);
9670 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9671 for (i
= 0; i
< nfds
; i
++) {
9672 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9673 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9678 # ifdef TARGET_NR_ppoll
9679 case TARGET_NR_ppoll
:
9681 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9682 target_sigset_t
*target_set
;
9683 sigset_t _set
, *set
= &_set
;
9686 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9687 unlock_user(target_pfd
, arg1
, 0);
9695 if (arg5
!= sizeof(target_sigset_t
)) {
9696 unlock_user(target_pfd
, arg1
, 0);
9697 ret
= -TARGET_EINVAL
;
9701 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9703 unlock_user(target_pfd
, arg1
, 0);
9706 target_to_host_sigset(set
, target_set
);
9711 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9712 set
, SIGSET_T_SIZE
));
9714 if (!is_error(ret
) && arg3
) {
9715 host_to_target_timespec(arg3
, timeout_ts
);
9718 unlock_user(target_set
, arg4
, 0);
9723 # ifdef TARGET_NR_poll
9724 case TARGET_NR_poll
:
9726 struct timespec ts
, *pts
;
9729 /* Convert ms to secs, ns */
9730 ts
.tv_sec
= arg3
/ 1000;
9731 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9734 /* -ve poll() timeout means "infinite" */
9737 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9742 g_assert_not_reached();
9745 if (!is_error(ret
)) {
9746 for(i
= 0; i
< nfds
; i
++) {
9747 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9750 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9754 case TARGET_NR_flock
:
9755 /* NOTE: the flock constant seems to be the same for every
9757 ret
= get_errno(safe_flock(arg1
, arg2
));
9759 case TARGET_NR_readv
:
9761 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9763 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9764 unlock_iovec(vec
, arg2
, arg3
, 1);
9766 ret
= -host_to_target_errno(errno
);
9770 case TARGET_NR_writev
:
9772 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9774 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9775 unlock_iovec(vec
, arg2
, arg3
, 0);
9777 ret
= -host_to_target_errno(errno
);
9781 case TARGET_NR_getsid
:
9782 ret
= get_errno(getsid(arg1
));
9784 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9785 case TARGET_NR_fdatasync
:
9786 ret
= get_errno(fdatasync(arg1
));
9789 #ifdef TARGET_NR__sysctl
9790 case TARGET_NR__sysctl
:
9791 /* We don't implement this, but ENOTDIR is always a safe
9793 ret
= -TARGET_ENOTDIR
;
9796 case TARGET_NR_sched_getaffinity
:
9798 unsigned int mask_size
;
9799 unsigned long *mask
;
9802 * sched_getaffinity needs multiples of ulong, so need to take
9803 * care of mismatches between target ulong and host ulong sizes.
9805 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9806 ret
= -TARGET_EINVAL
;
9809 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9811 mask
= alloca(mask_size
);
9812 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9814 if (!is_error(ret
)) {
9816 /* More data returned than the caller's buffer will fit.
9817 * This only happens if sizeof(abi_long) < sizeof(long)
9818 * and the caller passed us a buffer holding an odd number
9819 * of abi_longs. If the host kernel is actually using the
9820 * extra 4 bytes then fail EINVAL; otherwise we can just
9821 * ignore them and only copy the interesting part.
9823 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9824 if (numcpus
> arg2
* 8) {
9825 ret
= -TARGET_EINVAL
;
9831 if (copy_to_user(arg3
, mask
, ret
)) {
9837 case TARGET_NR_sched_setaffinity
:
9839 unsigned int mask_size
;
9840 unsigned long *mask
;
9843 * sched_setaffinity needs multiples of ulong, so need to take
9844 * care of mismatches between target ulong and host ulong sizes.
9846 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9847 ret
= -TARGET_EINVAL
;
9850 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9852 mask
= alloca(mask_size
);
9853 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
9856 memcpy(mask
, p
, arg2
);
9857 unlock_user_struct(p
, arg2
, 0);
9859 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9862 case TARGET_NR_sched_setparam
:
9864 struct sched_param
*target_schp
;
9865 struct sched_param schp
;
9868 return -TARGET_EINVAL
;
9870 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9872 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9873 unlock_user_struct(target_schp
, arg2
, 0);
9874 ret
= get_errno(sched_setparam(arg1
, &schp
));
9877 case TARGET_NR_sched_getparam
:
9879 struct sched_param
*target_schp
;
9880 struct sched_param schp
;
9883 return -TARGET_EINVAL
;
9885 ret
= get_errno(sched_getparam(arg1
, &schp
));
9886 if (!is_error(ret
)) {
9887 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9889 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9890 unlock_user_struct(target_schp
, arg2
, 1);
9894 case TARGET_NR_sched_setscheduler
:
9896 struct sched_param
*target_schp
;
9897 struct sched_param schp
;
9899 return -TARGET_EINVAL
;
9901 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9903 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9904 unlock_user_struct(target_schp
, arg3
, 0);
9905 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9908 case TARGET_NR_sched_getscheduler
:
9909 ret
= get_errno(sched_getscheduler(arg1
));
9911 case TARGET_NR_sched_yield
:
9912 ret
= get_errno(sched_yield());
9914 case TARGET_NR_sched_get_priority_max
:
9915 ret
= get_errno(sched_get_priority_max(arg1
));
9917 case TARGET_NR_sched_get_priority_min
:
9918 ret
= get_errno(sched_get_priority_min(arg1
));
9920 case TARGET_NR_sched_rr_get_interval
:
9923 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9924 if (!is_error(ret
)) {
9925 ret
= host_to_target_timespec(arg2
, &ts
);
9929 case TARGET_NR_nanosleep
:
9931 struct timespec req
, rem
;
9932 target_to_host_timespec(&req
, arg1
);
9933 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9934 if (is_error(ret
) && arg2
) {
9935 host_to_target_timespec(arg2
, &rem
);
9939 #ifdef TARGET_NR_query_module
9940 case TARGET_NR_query_module
:
9943 #ifdef TARGET_NR_nfsservctl
9944 case TARGET_NR_nfsservctl
:
9947 case TARGET_NR_prctl
:
9949 case PR_GET_PDEATHSIG
:
9952 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9953 if (!is_error(ret
) && arg2
9954 && put_user_ual(deathsig
, arg2
)) {
9962 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9966 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9968 unlock_user(name
, arg2
, 16);
9973 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9977 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9979 unlock_user(name
, arg2
, 0);
9984 /* Most prctl options have no pointer arguments */
9985 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
9989 #ifdef TARGET_NR_arch_prctl
9990 case TARGET_NR_arch_prctl
:
9991 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9992 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
9998 #ifdef TARGET_NR_pread64
9999 case TARGET_NR_pread64
:
10000 if (regpairs_aligned(cpu_env
)) {
10004 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10006 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10007 unlock_user(p
, arg2
, ret
);
10009 case TARGET_NR_pwrite64
:
10010 if (regpairs_aligned(cpu_env
)) {
10014 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10016 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10017 unlock_user(p
, arg2
, 0);
10020 case TARGET_NR_getcwd
:
10021 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10023 ret
= get_errno(sys_getcwd1(p
, arg2
));
10024 unlock_user(p
, arg1
, ret
);
10026 case TARGET_NR_capget
:
10027 case TARGET_NR_capset
:
10029 struct target_user_cap_header
*target_header
;
10030 struct target_user_cap_data
*target_data
= NULL
;
10031 struct __user_cap_header_struct header
;
10032 struct __user_cap_data_struct data
[2];
10033 struct __user_cap_data_struct
*dataptr
= NULL
;
10034 int i
, target_datalen
;
10035 int data_items
= 1;
10037 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10040 header
.version
= tswap32(target_header
->version
);
10041 header
.pid
= tswap32(target_header
->pid
);
10043 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10044 /* Version 2 and up takes pointer to two user_data structs */
10048 target_datalen
= sizeof(*target_data
) * data_items
;
10051 if (num
== TARGET_NR_capget
) {
10052 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10054 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10056 if (!target_data
) {
10057 unlock_user_struct(target_header
, arg1
, 0);
10061 if (num
== TARGET_NR_capset
) {
10062 for (i
= 0; i
< data_items
; i
++) {
10063 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10064 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10065 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10072 if (num
== TARGET_NR_capget
) {
10073 ret
= get_errno(capget(&header
, dataptr
));
10075 ret
= get_errno(capset(&header
, dataptr
));
10078 /* The kernel always updates version for both capget and capset */
10079 target_header
->version
= tswap32(header
.version
);
10080 unlock_user_struct(target_header
, arg1
, 1);
10083 if (num
== TARGET_NR_capget
) {
10084 for (i
= 0; i
< data_items
; i
++) {
10085 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10086 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10087 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10089 unlock_user(target_data
, arg2
, target_datalen
);
10091 unlock_user(target_data
, arg2
, 0);
10096 case TARGET_NR_sigaltstack
:
10097 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10100 #ifdef CONFIG_SENDFILE
10101 case TARGET_NR_sendfile
:
10103 off_t
*offp
= NULL
;
10106 ret
= get_user_sal(off
, arg3
);
10107 if (is_error(ret
)) {
10112 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10113 if (!is_error(ret
) && arg3
) {
10114 abi_long ret2
= put_user_sal(off
, arg3
);
10115 if (is_error(ret2
)) {
10121 #ifdef TARGET_NR_sendfile64
10122 case TARGET_NR_sendfile64
:
10124 off_t
*offp
= NULL
;
10127 ret
= get_user_s64(off
, arg3
);
10128 if (is_error(ret
)) {
10133 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10134 if (!is_error(ret
) && arg3
) {
10135 abi_long ret2
= put_user_s64(off
, arg3
);
10136 if (is_error(ret2
)) {
10144 case TARGET_NR_sendfile
:
10145 #ifdef TARGET_NR_sendfile64
10146 case TARGET_NR_sendfile64
:
10148 goto unimplemented
;
10151 #ifdef TARGET_NR_getpmsg
10152 case TARGET_NR_getpmsg
:
10153 goto unimplemented
;
10155 #ifdef TARGET_NR_putpmsg
10156 case TARGET_NR_putpmsg
:
10157 goto unimplemented
;
10159 #ifdef TARGET_NR_vfork
10160 case TARGET_NR_vfork
:
10161 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
10165 #ifdef TARGET_NR_ugetrlimit
10166 case TARGET_NR_ugetrlimit
:
10168 struct rlimit rlim
;
10169 int resource
= target_to_host_resource(arg1
);
10170 ret
= get_errno(getrlimit(resource
, &rlim
));
10171 if (!is_error(ret
)) {
10172 struct target_rlimit
*target_rlim
;
10173 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10175 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10176 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10177 unlock_user_struct(target_rlim
, arg2
, 1);
10182 #ifdef TARGET_NR_truncate64
10183 case TARGET_NR_truncate64
:
10184 if (!(p
= lock_user_string(arg1
)))
10186 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10187 unlock_user(p
, arg1
, 0);
10190 #ifdef TARGET_NR_ftruncate64
10191 case TARGET_NR_ftruncate64
:
10192 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10195 #ifdef TARGET_NR_stat64
10196 case TARGET_NR_stat64
:
10197 if (!(p
= lock_user_string(arg1
)))
10199 ret
= get_errno(stat(path(p
), &st
));
10200 unlock_user(p
, arg1
, 0);
10201 if (!is_error(ret
))
10202 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10205 #ifdef TARGET_NR_lstat64
10206 case TARGET_NR_lstat64
:
10207 if (!(p
= lock_user_string(arg1
)))
10209 ret
= get_errno(lstat(path(p
), &st
));
10210 unlock_user(p
, arg1
, 0);
10211 if (!is_error(ret
))
10212 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10215 #ifdef TARGET_NR_fstat64
10216 case TARGET_NR_fstat64
:
10217 ret
= get_errno(fstat(arg1
, &st
));
10218 if (!is_error(ret
))
10219 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10222 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10223 #ifdef TARGET_NR_fstatat64
10224 case TARGET_NR_fstatat64
:
10226 #ifdef TARGET_NR_newfstatat
10227 case TARGET_NR_newfstatat
:
10229 if (!(p
= lock_user_string(arg2
)))
10231 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10232 if (!is_error(ret
))
10233 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10236 #ifdef TARGET_NR_lchown
10237 case TARGET_NR_lchown
:
10238 if (!(p
= lock_user_string(arg1
)))
10240 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10241 unlock_user(p
, arg1
, 0);
10244 #ifdef TARGET_NR_getuid
10245 case TARGET_NR_getuid
:
10246 ret
= get_errno(high2lowuid(getuid()));
10249 #ifdef TARGET_NR_getgid
10250 case TARGET_NR_getgid
:
10251 ret
= get_errno(high2lowgid(getgid()));
10254 #ifdef TARGET_NR_geteuid
10255 case TARGET_NR_geteuid
:
10256 ret
= get_errno(high2lowuid(geteuid()));
10259 #ifdef TARGET_NR_getegid
10260 case TARGET_NR_getegid
:
10261 ret
= get_errno(high2lowgid(getegid()));
10264 case TARGET_NR_setreuid
:
10265 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10267 case TARGET_NR_setregid
:
10268 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10270 case TARGET_NR_getgroups
:
10272 int gidsetsize
= arg1
;
10273 target_id
*target_grouplist
;
10277 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10278 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10279 if (gidsetsize
== 0)
10281 if (!is_error(ret
)) {
10282 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10283 if (!target_grouplist
)
10285 for(i
= 0;i
< ret
; i
++)
10286 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10287 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10291 case TARGET_NR_setgroups
:
10293 int gidsetsize
= arg1
;
10294 target_id
*target_grouplist
;
10295 gid_t
*grouplist
= NULL
;
10298 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10299 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10300 if (!target_grouplist
) {
10301 ret
= -TARGET_EFAULT
;
10304 for (i
= 0; i
< gidsetsize
; i
++) {
10305 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10307 unlock_user(target_grouplist
, arg2
, 0);
10309 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10312 case TARGET_NR_fchown
:
10313 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10315 #if defined(TARGET_NR_fchownat)
10316 case TARGET_NR_fchownat
:
10317 if (!(p
= lock_user_string(arg2
)))
10319 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10320 low2highgid(arg4
), arg5
));
10321 unlock_user(p
, arg2
, 0);
10324 #ifdef TARGET_NR_setresuid
10325 case TARGET_NR_setresuid
:
10326 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
10328 low2highuid(arg3
)));
10331 #ifdef TARGET_NR_getresuid
10332 case TARGET_NR_getresuid
:
10334 uid_t ruid
, euid
, suid
;
10335 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10336 if (!is_error(ret
)) {
10337 if (put_user_id(high2lowuid(ruid
), arg1
)
10338 || put_user_id(high2lowuid(euid
), arg2
)
10339 || put_user_id(high2lowuid(suid
), arg3
))
10345 #ifdef TARGET_NR_getresgid
10346 case TARGET_NR_setresgid
:
10347 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
10349 low2highgid(arg3
)));
10352 #ifdef TARGET_NR_getresgid
10353 case TARGET_NR_getresgid
:
10355 gid_t rgid
, egid
, sgid
;
10356 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10357 if (!is_error(ret
)) {
10358 if (put_user_id(high2lowgid(rgid
), arg1
)
10359 || put_user_id(high2lowgid(egid
), arg2
)
10360 || put_user_id(high2lowgid(sgid
), arg3
))
10366 #ifdef TARGET_NR_chown
10367 case TARGET_NR_chown
:
10368 if (!(p
= lock_user_string(arg1
)))
10370 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10371 unlock_user(p
, arg1
, 0);
10374 case TARGET_NR_setuid
:
10375 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
10377 case TARGET_NR_setgid
:
10378 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
10380 case TARGET_NR_setfsuid
:
10381 ret
= get_errno(setfsuid(arg1
));
10383 case TARGET_NR_setfsgid
:
10384 ret
= get_errno(setfsgid(arg1
));
10387 #ifdef TARGET_NR_lchown32
10388 case TARGET_NR_lchown32
:
10389 if (!(p
= lock_user_string(arg1
)))
10391 ret
= get_errno(lchown(p
, arg2
, arg3
));
10392 unlock_user(p
, arg1
, 0);
10395 #ifdef TARGET_NR_getuid32
10396 case TARGET_NR_getuid32
:
10397 ret
= get_errno(getuid());
10401 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10402 /* Alpha specific */
10403 case TARGET_NR_getxuid
:
10407 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10409 ret
= get_errno(getuid());
10412 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10413 /* Alpha specific */
10414 case TARGET_NR_getxgid
:
10418 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10420 ret
= get_errno(getgid());
10423 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10424 /* Alpha specific */
10425 case TARGET_NR_osf_getsysinfo
:
10426 ret
= -TARGET_EOPNOTSUPP
;
10428 case TARGET_GSI_IEEE_FP_CONTROL
:
10430 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10432 /* Copied from linux ieee_fpcr_to_swcr. */
10433 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10434 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10435 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10436 | SWCR_TRAP_ENABLE_DZE
10437 | SWCR_TRAP_ENABLE_OVF
);
10438 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10439 | SWCR_TRAP_ENABLE_INE
);
10440 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10441 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10443 if (put_user_u64 (swcr
, arg2
))
10449 /* case GSI_IEEE_STATE_AT_SIGNAL:
10450 -- Not implemented in linux kernel.
10452 -- Retrieves current unaligned access state; not much used.
10453 case GSI_PROC_TYPE:
10454 -- Retrieves implver information; surely not used.
10455 case GSI_GET_HWRPB:
10456 -- Grabs a copy of the HWRPB; surely not used.
10461 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10462 /* Alpha specific */
10463 case TARGET_NR_osf_setsysinfo
:
10464 ret
= -TARGET_EOPNOTSUPP
;
10466 case TARGET_SSI_IEEE_FP_CONTROL
:
10468 uint64_t swcr
, fpcr
, orig_fpcr
;
10470 if (get_user_u64 (swcr
, arg2
)) {
10473 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10474 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10476 /* Copied from linux ieee_swcr_to_fpcr. */
10477 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10478 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10479 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10480 | SWCR_TRAP_ENABLE_DZE
10481 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10482 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10483 | SWCR_TRAP_ENABLE_INE
)) << 57;
10484 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10485 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10487 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10492 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10494 uint64_t exc
, fpcr
, orig_fpcr
;
10497 if (get_user_u64(exc
, arg2
)) {
10501 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10503 /* We only add to the exception status here. */
10504 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
10506 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10509 /* Old exceptions are not signaled. */
10510 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
10512 /* If any exceptions set by this call,
10513 and are unmasked, send a signal. */
10515 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
10516 si_code
= TARGET_FPE_FLTRES
;
10518 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
10519 si_code
= TARGET_FPE_FLTUND
;
10521 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
10522 si_code
= TARGET_FPE_FLTOVF
;
10524 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
10525 si_code
= TARGET_FPE_FLTDIV
;
10527 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
10528 si_code
= TARGET_FPE_FLTINV
;
10530 if (si_code
!= 0) {
10531 target_siginfo_t info
;
10532 info
.si_signo
= SIGFPE
;
10534 info
.si_code
= si_code
;
10535 info
._sifields
._sigfault
._addr
10536 = ((CPUArchState
*)cpu_env
)->pc
;
10537 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
10542 /* case SSI_NVPAIRS:
10543 -- Used with SSIN_UACPROC to enable unaligned accesses.
10544 case SSI_IEEE_STATE_AT_SIGNAL:
10545 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10546 -- Not implemented in linux kernel
10551 #ifdef TARGET_NR_osf_sigprocmask
10552 /* Alpha specific. */
10553 case TARGET_NR_osf_sigprocmask
:
10557 sigset_t set
, oldset
;
10560 case TARGET_SIG_BLOCK
:
10563 case TARGET_SIG_UNBLOCK
:
10566 case TARGET_SIG_SETMASK
:
10570 ret
= -TARGET_EINVAL
;
10574 target_to_host_old_sigset(&set
, &mask
);
10575 ret
= do_sigprocmask(how
, &set
, &oldset
);
10577 host_to_target_old_sigset(&mask
, &oldset
);
10584 #ifdef TARGET_NR_getgid32
10585 case TARGET_NR_getgid32
:
10586 ret
= get_errno(getgid());
10589 #ifdef TARGET_NR_geteuid32
10590 case TARGET_NR_geteuid32
:
10591 ret
= get_errno(geteuid());
10594 #ifdef TARGET_NR_getegid32
10595 case TARGET_NR_getegid32
:
10596 ret
= get_errno(getegid());
10599 #ifdef TARGET_NR_setreuid32
10600 case TARGET_NR_setreuid32
:
10601 ret
= get_errno(setreuid(arg1
, arg2
));
10604 #ifdef TARGET_NR_setregid32
10605 case TARGET_NR_setregid32
:
10606 ret
= get_errno(setregid(arg1
, arg2
));
10609 #ifdef TARGET_NR_getgroups32
10610 case TARGET_NR_getgroups32
:
10612 int gidsetsize
= arg1
;
10613 uint32_t *target_grouplist
;
10617 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10618 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10619 if (gidsetsize
== 0)
10621 if (!is_error(ret
)) {
10622 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10623 if (!target_grouplist
) {
10624 ret
= -TARGET_EFAULT
;
10627 for(i
= 0;i
< ret
; i
++)
10628 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10629 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10634 #ifdef TARGET_NR_setgroups32
10635 case TARGET_NR_setgroups32
:
10637 int gidsetsize
= arg1
;
10638 uint32_t *target_grouplist
;
10642 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10643 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10644 if (!target_grouplist
) {
10645 ret
= -TARGET_EFAULT
;
10648 for(i
= 0;i
< gidsetsize
; i
++)
10649 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10650 unlock_user(target_grouplist
, arg2
, 0);
10651 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10655 #ifdef TARGET_NR_fchown32
10656 case TARGET_NR_fchown32
:
10657 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
10660 #ifdef TARGET_NR_setresuid32
10661 case TARGET_NR_setresuid32
:
10662 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10665 #ifdef TARGET_NR_getresuid32
10666 case TARGET_NR_getresuid32
:
10668 uid_t ruid
, euid
, suid
;
10669 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10670 if (!is_error(ret
)) {
10671 if (put_user_u32(ruid
, arg1
)
10672 || put_user_u32(euid
, arg2
)
10673 || put_user_u32(suid
, arg3
))
10679 #ifdef TARGET_NR_setresgid32
10680 case TARGET_NR_setresgid32
:
10681 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10684 #ifdef TARGET_NR_getresgid32
10685 case TARGET_NR_getresgid32
:
10687 gid_t rgid
, egid
, sgid
;
10688 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10689 if (!is_error(ret
)) {
10690 if (put_user_u32(rgid
, arg1
)
10691 || put_user_u32(egid
, arg2
)
10692 || put_user_u32(sgid
, arg3
))
10698 #ifdef TARGET_NR_chown32
10699 case TARGET_NR_chown32
:
10700 if (!(p
= lock_user_string(arg1
)))
10702 ret
= get_errno(chown(p
, arg2
, arg3
));
10703 unlock_user(p
, arg1
, 0);
10706 #ifdef TARGET_NR_setuid32
10707 case TARGET_NR_setuid32
:
10708 ret
= get_errno(sys_setuid(arg1
));
10711 #ifdef TARGET_NR_setgid32
10712 case TARGET_NR_setgid32
:
10713 ret
= get_errno(sys_setgid(arg1
));
10716 #ifdef TARGET_NR_setfsuid32
10717 case TARGET_NR_setfsuid32
:
10718 ret
= get_errno(setfsuid(arg1
));
10721 #ifdef TARGET_NR_setfsgid32
10722 case TARGET_NR_setfsgid32
:
10723 ret
= get_errno(setfsgid(arg1
));
10727 case TARGET_NR_pivot_root
:
10728 goto unimplemented
;
10729 #ifdef TARGET_NR_mincore
10730 case TARGET_NR_mincore
:
10733 ret
= -TARGET_EFAULT
;
10734 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
10736 if (!(p
= lock_user_string(arg3
)))
10738 ret
= get_errno(mincore(a
, arg2
, p
));
10739 unlock_user(p
, arg3
, ret
);
10741 unlock_user(a
, arg1
, 0);
10745 #ifdef TARGET_NR_arm_fadvise64_64
10746 case TARGET_NR_arm_fadvise64_64
:
10747 /* arm_fadvise64_64 looks like fadvise64_64 but
10748 * with different argument order: fd, advice, offset, len
10749 * rather than the usual fd, offset, len, advice.
10750 * Note that offset and len are both 64-bit so appear as
10751 * pairs of 32-bit registers.
10753 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10754 target_offset64(arg5
, arg6
), arg2
);
10755 ret
= -host_to_target_errno(ret
);
10759 #if TARGET_ABI_BITS == 32
10761 #ifdef TARGET_NR_fadvise64_64
10762 case TARGET_NR_fadvise64_64
:
10763 /* 6 args: fd, offset (high, low), len (high, low), advice */
10764 if (regpairs_aligned(cpu_env
)) {
10765 /* offset is in (3,4), len in (5,6) and advice in 7 */
10772 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10773 target_offset64(arg2
, arg3
),
10774 target_offset64(arg4
, arg5
),
10779 #ifdef TARGET_NR_fadvise64
10780 case TARGET_NR_fadvise64
:
10781 /* 5 args: fd, offset (high, low), len, advice */
10782 if (regpairs_aligned(cpu_env
)) {
10783 /* offset is in (3,4), len in 5 and advice in 6 */
10789 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10790 target_offset64(arg2
, arg3
),
10795 #else /* not a 32-bit ABI */
10796 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10797 #ifdef TARGET_NR_fadvise64_64
10798 case TARGET_NR_fadvise64_64
:
10800 #ifdef TARGET_NR_fadvise64
10801 case TARGET_NR_fadvise64
:
10803 #ifdef TARGET_S390X
10805 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10806 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10807 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10808 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10812 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10815 #endif /* end of 64-bit ABI fadvise handling */
10817 #ifdef TARGET_NR_madvise
10818 case TARGET_NR_madvise
:
10819 /* A straight passthrough may not be safe because qemu sometimes
10820 turns private file-backed mappings into anonymous mappings.
10821 This will break MADV_DONTNEED.
10822 This is a hint, so ignoring and returning success is ok. */
10823 ret
= get_errno(0);
10826 #if TARGET_ABI_BITS == 32
10827 case TARGET_NR_fcntl64
:
10831 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
10832 to_flock64_fn
*copyto
= copy_to_user_flock64
;
10835 if (((CPUARMState
*)cpu_env
)->eabi
) {
10836 copyfrom
= copy_from_user_eabi_flock64
;
10837 copyto
= copy_to_user_eabi_flock64
;
10841 cmd
= target_to_host_fcntl_cmd(arg2
);
10842 if (cmd
== -TARGET_EINVAL
) {
10848 case TARGET_F_GETLK64
:
10849 ret
= copyfrom(&fl
, arg3
);
10853 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10855 ret
= copyto(arg3
, &fl
);
10859 case TARGET_F_SETLK64
:
10860 case TARGET_F_SETLKW64
:
10861 ret
= copyfrom(&fl
, arg3
);
10865 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10868 ret
= do_fcntl(arg1
, arg2
, arg3
);
10874 #ifdef TARGET_NR_cacheflush
10875 case TARGET_NR_cacheflush
:
10876 /* self-modifying code is handled automatically, so nothing needed */
10880 #ifdef TARGET_NR_security
10881 case TARGET_NR_security
:
10882 goto unimplemented
;
10884 #ifdef TARGET_NR_getpagesize
10885 case TARGET_NR_getpagesize
:
10886 ret
= TARGET_PAGE_SIZE
;
10889 case TARGET_NR_gettid
:
10890 ret
= get_errno(gettid());
10892 #ifdef TARGET_NR_readahead
10893 case TARGET_NR_readahead
:
10894 #if TARGET_ABI_BITS == 32
10895 if (regpairs_aligned(cpu_env
)) {
10900 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
10902 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10907 #ifdef TARGET_NR_setxattr
10908 case TARGET_NR_listxattr
:
10909 case TARGET_NR_llistxattr
:
10913 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10915 ret
= -TARGET_EFAULT
;
10919 p
= lock_user_string(arg1
);
10921 if (num
== TARGET_NR_listxattr
) {
10922 ret
= get_errno(listxattr(p
, b
, arg3
));
10924 ret
= get_errno(llistxattr(p
, b
, arg3
));
10927 ret
= -TARGET_EFAULT
;
10929 unlock_user(p
, arg1
, 0);
10930 unlock_user(b
, arg2
, arg3
);
10933 case TARGET_NR_flistxattr
:
10937 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10939 ret
= -TARGET_EFAULT
;
10943 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10944 unlock_user(b
, arg2
, arg3
);
10947 case TARGET_NR_setxattr
:
10948 case TARGET_NR_lsetxattr
:
10950 void *p
, *n
, *v
= 0;
10952 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10954 ret
= -TARGET_EFAULT
;
10958 p
= lock_user_string(arg1
);
10959 n
= lock_user_string(arg2
);
10961 if (num
== TARGET_NR_setxattr
) {
10962 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
10964 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
10967 ret
= -TARGET_EFAULT
;
10969 unlock_user(p
, arg1
, 0);
10970 unlock_user(n
, arg2
, 0);
10971 unlock_user(v
, arg3
, 0);
10974 case TARGET_NR_fsetxattr
:
10978 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10980 ret
= -TARGET_EFAULT
;
10984 n
= lock_user_string(arg2
);
10986 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
10988 ret
= -TARGET_EFAULT
;
10990 unlock_user(n
, arg2
, 0);
10991 unlock_user(v
, arg3
, 0);
10994 case TARGET_NR_getxattr
:
10995 case TARGET_NR_lgetxattr
:
10997 void *p
, *n
, *v
= 0;
10999 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11001 ret
= -TARGET_EFAULT
;
11005 p
= lock_user_string(arg1
);
11006 n
= lock_user_string(arg2
);
11008 if (num
== TARGET_NR_getxattr
) {
11009 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11011 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11014 ret
= -TARGET_EFAULT
;
11016 unlock_user(p
, arg1
, 0);
11017 unlock_user(n
, arg2
, 0);
11018 unlock_user(v
, arg3
, arg4
);
11021 case TARGET_NR_fgetxattr
:
11025 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11027 ret
= -TARGET_EFAULT
;
11031 n
= lock_user_string(arg2
);
11033 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11035 ret
= -TARGET_EFAULT
;
11037 unlock_user(n
, arg2
, 0);
11038 unlock_user(v
, arg3
, arg4
);
11041 case TARGET_NR_removexattr
:
11042 case TARGET_NR_lremovexattr
:
11045 p
= lock_user_string(arg1
);
11046 n
= lock_user_string(arg2
);
11048 if (num
== TARGET_NR_removexattr
) {
11049 ret
= get_errno(removexattr(p
, n
));
11051 ret
= get_errno(lremovexattr(p
, n
));
11054 ret
= -TARGET_EFAULT
;
11056 unlock_user(p
, arg1
, 0);
11057 unlock_user(n
, arg2
, 0);
11060 case TARGET_NR_fremovexattr
:
11063 n
= lock_user_string(arg2
);
11065 ret
= get_errno(fremovexattr(arg1
, n
));
11067 ret
= -TARGET_EFAULT
;
11069 unlock_user(n
, arg2
, 0);
11073 #endif /* CONFIG_ATTR */
11074 #ifdef TARGET_NR_set_thread_area
11075 case TARGET_NR_set_thread_area
:
11076 #if defined(TARGET_MIPS)
11077 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11080 #elif defined(TARGET_CRIS)
11082 ret
= -TARGET_EINVAL
;
11084 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11088 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11089 ret
= do_set_thread_area(cpu_env
, arg1
);
11091 #elif defined(TARGET_M68K)
11093 TaskState
*ts
= cpu
->opaque
;
11094 ts
->tp_value
= arg1
;
11099 goto unimplemented_nowarn
;
11102 #ifdef TARGET_NR_get_thread_area
11103 case TARGET_NR_get_thread_area
:
11104 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11105 ret
= do_get_thread_area(cpu_env
, arg1
);
11107 #elif defined(TARGET_M68K)
11109 TaskState
*ts
= cpu
->opaque
;
11110 ret
= ts
->tp_value
;
11114 goto unimplemented_nowarn
;
11117 #ifdef TARGET_NR_getdomainname
11118 case TARGET_NR_getdomainname
:
11119 goto unimplemented_nowarn
;
11122 #ifdef TARGET_NR_clock_gettime
11123 case TARGET_NR_clock_gettime
:
11125 struct timespec ts
;
11126 ret
= get_errno(clock_gettime(arg1
, &ts
));
11127 if (!is_error(ret
)) {
11128 host_to_target_timespec(arg2
, &ts
);
11133 #ifdef TARGET_NR_clock_getres
11134 case TARGET_NR_clock_getres
:
11136 struct timespec ts
;
11137 ret
= get_errno(clock_getres(arg1
, &ts
));
11138 if (!is_error(ret
)) {
11139 host_to_target_timespec(arg2
, &ts
);
11144 #ifdef TARGET_NR_clock_nanosleep
11145 case TARGET_NR_clock_nanosleep
:
11147 struct timespec ts
;
11148 target_to_host_timespec(&ts
, arg3
);
11149 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11150 &ts
, arg4
? &ts
: NULL
));
11152 host_to_target_timespec(arg4
, &ts
);
11154 #if defined(TARGET_PPC)
11155 /* clock_nanosleep is odd in that it returns positive errno values.
11156 * On PPC, CR0 bit 3 should be set in such a situation. */
11157 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11158 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11165 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11166 case TARGET_NR_set_tid_address
:
11167 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11171 case TARGET_NR_tkill
:
11172 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11175 case TARGET_NR_tgkill
:
11176 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11177 target_to_host_signal(arg3
)));
11180 #ifdef TARGET_NR_set_robust_list
11181 case TARGET_NR_set_robust_list
:
11182 case TARGET_NR_get_robust_list
:
11183 /* The ABI for supporting robust futexes has userspace pass
11184 * the kernel a pointer to a linked list which is updated by
11185 * userspace after the syscall; the list is walked by the kernel
11186 * when the thread exits. Since the linked list in QEMU guest
11187 * memory isn't a valid linked list for the host and we have
11188 * no way to reliably intercept the thread-death event, we can't
11189 * support these. Silently return ENOSYS so that guest userspace
11190 * falls back to a non-robust futex implementation (which should
11191 * be OK except in the corner case of the guest crashing while
11192 * holding a mutex that is shared with another process via
11195 goto unimplemented_nowarn
;
11198 #if defined(TARGET_NR_utimensat)
11199 case TARGET_NR_utimensat
:
11201 struct timespec
*tsp
, ts
[2];
11205 target_to_host_timespec(ts
, arg3
);
11206 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11210 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11212 if (!(p
= lock_user_string(arg2
))) {
11213 ret
= -TARGET_EFAULT
;
11216 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11217 unlock_user(p
, arg2
, 0);
11222 case TARGET_NR_futex
:
11223 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11225 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11226 case TARGET_NR_inotify_init
:
11227 ret
= get_errno(sys_inotify_init());
11230 #ifdef CONFIG_INOTIFY1
11231 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11232 case TARGET_NR_inotify_init1
:
11233 ret
= get_errno(sys_inotify_init1(arg1
));
11237 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11238 case TARGET_NR_inotify_add_watch
:
11239 p
= lock_user_string(arg2
);
11240 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11241 unlock_user(p
, arg2
, 0);
11244 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11245 case TARGET_NR_inotify_rm_watch
:
11246 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11250 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11251 case TARGET_NR_mq_open
:
11253 struct mq_attr posix_mq_attr
, *attrp
;
11255 p
= lock_user_string(arg1
- 1);
11257 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
11258 attrp
= &posix_mq_attr
;
11262 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
11263 unlock_user (p
, arg1
, 0);
11267 case TARGET_NR_mq_unlink
:
11268 p
= lock_user_string(arg1
- 1);
11269 ret
= get_errno(mq_unlink(p
));
11270 unlock_user (p
, arg1
, 0);
11273 case TARGET_NR_mq_timedsend
:
11275 struct timespec ts
;
11277 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11279 target_to_host_timespec(&ts
, arg5
);
11280 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11281 host_to_target_timespec(arg5
, &ts
);
11283 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11285 unlock_user (p
, arg2
, arg3
);
11289 case TARGET_NR_mq_timedreceive
:
11291 struct timespec ts
;
11294 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11296 target_to_host_timespec(&ts
, arg5
);
11297 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11299 host_to_target_timespec(arg5
, &ts
);
11301 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11304 unlock_user (p
, arg2
, arg3
);
11306 put_user_u32(prio
, arg4
);
11310 /* Not implemented for now... */
11311 /* case TARGET_NR_mq_notify: */
11314 case TARGET_NR_mq_getsetattr
:
11316 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11319 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
11320 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11323 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11324 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
11331 #ifdef CONFIG_SPLICE
11332 #ifdef TARGET_NR_tee
11333 case TARGET_NR_tee
:
11335 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11339 #ifdef TARGET_NR_splice
11340 case TARGET_NR_splice
:
11342 loff_t loff_in
, loff_out
;
11343 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11345 if (get_user_u64(loff_in
, arg2
)) {
11348 ploff_in
= &loff_in
;
11351 if (get_user_u64(loff_out
, arg4
)) {
11354 ploff_out
= &loff_out
;
11356 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11358 if (put_user_u64(loff_in
, arg2
)) {
11363 if (put_user_u64(loff_out
, arg4
)) {
11370 #ifdef TARGET_NR_vmsplice
11371 case TARGET_NR_vmsplice
:
11373 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11375 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11376 unlock_iovec(vec
, arg2
, arg3
, 0);
11378 ret
= -host_to_target_errno(errno
);
11383 #endif /* CONFIG_SPLICE */
11384 #ifdef CONFIG_EVENTFD
11385 #if defined(TARGET_NR_eventfd)
11386 case TARGET_NR_eventfd
:
11387 ret
= get_errno(eventfd(arg1
, 0));
11388 fd_trans_unregister(ret
);
11391 #if defined(TARGET_NR_eventfd2)
11392 case TARGET_NR_eventfd2
:
11394 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11395 if (arg2
& TARGET_O_NONBLOCK
) {
11396 host_flags
|= O_NONBLOCK
;
11398 if (arg2
& TARGET_O_CLOEXEC
) {
11399 host_flags
|= O_CLOEXEC
;
11401 ret
= get_errno(eventfd(arg1
, host_flags
));
11402 fd_trans_unregister(ret
);
11406 #endif /* CONFIG_EVENTFD */
11407 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11408 case TARGET_NR_fallocate
:
11409 #if TARGET_ABI_BITS == 32
11410 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11411 target_offset64(arg5
, arg6
)));
11413 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11417 #if defined(CONFIG_SYNC_FILE_RANGE)
11418 #if defined(TARGET_NR_sync_file_range)
11419 case TARGET_NR_sync_file_range
:
11420 #if TARGET_ABI_BITS == 32
11421 #if defined(TARGET_MIPS)
11422 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11423 target_offset64(arg5
, arg6
), arg7
));
11425 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11426 target_offset64(arg4
, arg5
), arg6
));
11427 #endif /* !TARGET_MIPS */
11429 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11433 #if defined(TARGET_NR_sync_file_range2)
11434 case TARGET_NR_sync_file_range2
:
11435 /* This is like sync_file_range but the arguments are reordered */
11436 #if TARGET_ABI_BITS == 32
11437 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11438 target_offset64(arg5
, arg6
), arg2
));
11440 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11445 #if defined(TARGET_NR_signalfd4)
11446 case TARGET_NR_signalfd4
:
11447 ret
= do_signalfd4(arg1
, arg2
, arg4
);
11450 #if defined(TARGET_NR_signalfd)
11451 case TARGET_NR_signalfd
:
11452 ret
= do_signalfd4(arg1
, arg2
, 0);
11455 #if defined(CONFIG_EPOLL)
11456 #if defined(TARGET_NR_epoll_create)
11457 case TARGET_NR_epoll_create
:
11458 ret
= get_errno(epoll_create(arg1
));
11461 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11462 case TARGET_NR_epoll_create1
:
11463 ret
= get_errno(epoll_create1(arg1
));
11466 #if defined(TARGET_NR_epoll_ctl)
11467 case TARGET_NR_epoll_ctl
:
11469 struct epoll_event ep
;
11470 struct epoll_event
*epp
= 0;
11472 struct target_epoll_event
*target_ep
;
11473 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11476 ep
.events
= tswap32(target_ep
->events
);
11477 /* The epoll_data_t union is just opaque data to the kernel,
11478 * so we transfer all 64 bits across and need not worry what
11479 * actual data type it is.
11481 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11482 unlock_user_struct(target_ep
, arg4
, 0);
11485 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11490 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11491 #if defined(TARGET_NR_epoll_wait)
11492 case TARGET_NR_epoll_wait
:
11494 #if defined(TARGET_NR_epoll_pwait)
11495 case TARGET_NR_epoll_pwait
:
11498 struct target_epoll_event
*target_ep
;
11499 struct epoll_event
*ep
;
11501 int maxevents
= arg3
;
11502 int timeout
= arg4
;
11504 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11505 maxevents
* sizeof(struct target_epoll_event
), 1);
11510 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
11513 #if defined(TARGET_NR_epoll_pwait)
11514 case TARGET_NR_epoll_pwait
:
11516 target_sigset_t
*target_set
;
11517 sigset_t _set
, *set
= &_set
;
11520 if (arg6
!= sizeof(target_sigset_t
)) {
11521 ret
= -TARGET_EINVAL
;
11525 target_set
= lock_user(VERIFY_READ
, arg5
,
11526 sizeof(target_sigset_t
), 1);
11528 unlock_user(target_ep
, arg2
, 0);
11531 target_to_host_sigset(set
, target_set
);
11532 unlock_user(target_set
, arg5
, 0);
11537 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11538 set
, SIGSET_T_SIZE
));
11542 #if defined(TARGET_NR_epoll_wait)
11543 case TARGET_NR_epoll_wait
:
11544 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11549 ret
= -TARGET_ENOSYS
;
11551 if (!is_error(ret
)) {
11553 for (i
= 0; i
< ret
; i
++) {
11554 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11555 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11558 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
11563 #ifdef TARGET_NR_prlimit64
11564 case TARGET_NR_prlimit64
:
11566 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11567 struct target_rlimit64
*target_rnew
, *target_rold
;
11568 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11569 int resource
= target_to_host_resource(arg2
);
11571 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11574 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11575 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11576 unlock_user_struct(target_rnew
, arg3
, 0);
11580 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11581 if (!is_error(ret
) && arg4
) {
11582 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11585 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11586 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11587 unlock_user_struct(target_rold
, arg4
, 1);
11592 #ifdef TARGET_NR_gethostname
11593 case TARGET_NR_gethostname
:
11595 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11597 ret
= get_errno(gethostname(name
, arg2
));
11598 unlock_user(name
, arg1
, arg2
);
11600 ret
= -TARGET_EFAULT
;
11605 #ifdef TARGET_NR_atomic_cmpxchg_32
11606 case TARGET_NR_atomic_cmpxchg_32
:
11608 /* should use start_exclusive from main.c */
11609 abi_ulong mem_value
;
11610 if (get_user_u32(mem_value
, arg6
)) {
11611 target_siginfo_t info
;
11612 info
.si_signo
= SIGSEGV
;
11614 info
.si_code
= TARGET_SEGV_MAPERR
;
11615 info
._sifields
._sigfault
._addr
= arg6
;
11616 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
11620 if (mem_value
== arg2
)
11621 put_user_u32(arg1
, arg6
);
11626 #ifdef TARGET_NR_atomic_barrier
11627 case TARGET_NR_atomic_barrier
:
11629 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11635 #ifdef TARGET_NR_timer_create
11636 case TARGET_NR_timer_create
:
11638 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11640 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11643 int timer_index
= next_free_host_timer();
11645 if (timer_index
< 0) {
11646 ret
= -TARGET_EAGAIN
;
11648 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11651 phost_sevp
= &host_sevp
;
11652 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11658 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11662 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11671 #ifdef TARGET_NR_timer_settime
11672 case TARGET_NR_timer_settime
:
11674 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11675 * struct itimerspec * old_value */
11676 target_timer_t timerid
= get_timer_id(arg1
);
11680 } else if (arg3
== 0) {
11681 ret
= -TARGET_EINVAL
;
11683 timer_t htimer
= g_posix_timers
[timerid
];
11684 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11686 target_to_host_itimerspec(&hspec_new
, arg3
);
11688 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11689 host_to_target_itimerspec(arg2
, &hspec_old
);
11695 #ifdef TARGET_NR_timer_gettime
11696 case TARGET_NR_timer_gettime
:
11698 /* args: timer_t timerid, struct itimerspec *curr_value */
11699 target_timer_t timerid
= get_timer_id(arg1
);
11703 } else if (!arg2
) {
11704 ret
= -TARGET_EFAULT
;
11706 timer_t htimer
= g_posix_timers
[timerid
];
11707 struct itimerspec hspec
;
11708 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11710 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11711 ret
= -TARGET_EFAULT
;
11718 #ifdef TARGET_NR_timer_getoverrun
11719 case TARGET_NR_timer_getoverrun
:
11721 /* args: timer_t timerid */
11722 target_timer_t timerid
= get_timer_id(arg1
);
11727 timer_t htimer
= g_posix_timers
[timerid
];
11728 ret
= get_errno(timer_getoverrun(htimer
));
11730 fd_trans_unregister(ret
);
11735 #ifdef TARGET_NR_timer_delete
11736 case TARGET_NR_timer_delete
:
11738 /* args: timer_t timerid */
11739 target_timer_t timerid
= get_timer_id(arg1
);
11744 timer_t htimer
= g_posix_timers
[timerid
];
11745 ret
= get_errno(timer_delete(htimer
));
11746 g_posix_timers
[timerid
] = 0;
11752 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11753 case TARGET_NR_timerfd_create
:
11754 ret
= get_errno(timerfd_create(arg1
,
11755 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11759 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11760 case TARGET_NR_timerfd_gettime
:
11762 struct itimerspec its_curr
;
11764 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11766 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11773 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11774 case TARGET_NR_timerfd_settime
:
11776 struct itimerspec its_new
, its_old
, *p_new
;
11779 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11787 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11789 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11796 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11797 case TARGET_NR_ioprio_get
:
11798 ret
= get_errno(ioprio_get(arg1
, arg2
));
11802 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11803 case TARGET_NR_ioprio_set
:
11804 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
11808 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11809 case TARGET_NR_setns
:
11810 ret
= get_errno(setns(arg1
, arg2
));
11813 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11814 case TARGET_NR_unshare
:
11815 ret
= get_errno(unshare(arg1
));
11821 gemu_log("qemu: Unsupported syscall: %d\n", num
);
11822 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11823 unimplemented_nowarn
:
11825 ret
= -TARGET_ENOSYS
;
11830 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
11833 print_syscall_ret(num
, ret
);
11834 trace_guest_user_syscall_ret(cpu
, num
, ret
);
11837 ret
= -TARGET_EFAULT
;