4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
39 int __clone2(int (*fn
)(void *), void *child_stack_base
,
40 size_t stack_size
, int flags
, void *arg
, ...);
42 #include <sys/socket.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
119 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
120 * once. This exercises the codepaths for restart.
122 //#define DEBUG_ERESTARTSYS
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_futex __NR_futex
191 #define __NR_sys_inotify_init __NR_inotify_init
192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
197 #define __NR__llseek __NR_lseek
200 /* Newer kernel ports have llseek() instead of _llseek() */
201 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
202 #define TARGET_NR__llseek TARGET_NR_llseek
206 _syscall0(int, gettid
)
208 /* This is a replacement for the host gettid() and must return a host
210 static int gettid(void) {
214 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
215 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
217 #if !defined(__NR_getdents) || \
218 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
219 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
223 loff_t
*, res
, uint
, wh
);
225 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
226 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
227 #ifdef __NR_exit_group
228 _syscall1(int,exit_group
,int,error_code
)
230 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
231 _syscall1(int,set_tid_address
,int *,tidptr
)
233 #if defined(TARGET_NR_futex) && defined(__NR_futex)
234 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
235 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
237 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
238 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
239 unsigned long *, user_mask_ptr
);
240 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
241 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
242 unsigned long *, user_mask_ptr
);
243 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
245 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
246 struct __user_cap_data_struct
*, data
);
247 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
248 struct __user_cap_data_struct
*, data
);
249 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
250 _syscall2(int, ioprio_get
, int, which
, int, who
)
252 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
253 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
255 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
256 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
259 static bitmask_transtbl fcntl_flags_tbl
[] = {
260 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
261 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
262 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
263 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
264 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
265 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
266 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
267 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
268 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
269 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
270 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
271 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
272 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
273 #if defined(O_DIRECT)
274 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
276 #if defined(O_NOATIME)
277 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
279 #if defined(O_CLOEXEC)
280 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
283 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
285 /* Don't terminate the list prematurely on 64-bit host+guest. */
286 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
287 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
294 QEMU_IFLA_BR_FORWARD_DELAY
,
295 QEMU_IFLA_BR_HELLO_TIME
,
296 QEMU_IFLA_BR_MAX_AGE
,
297 QEMU_IFLA_BR_AGEING_TIME
,
298 QEMU_IFLA_BR_STP_STATE
,
299 QEMU_IFLA_BR_PRIORITY
,
300 QEMU_IFLA_BR_VLAN_FILTERING
,
301 QEMU_IFLA_BR_VLAN_PROTOCOL
,
302 QEMU_IFLA_BR_GROUP_FWD_MASK
,
303 QEMU_IFLA_BR_ROOT_ID
,
304 QEMU_IFLA_BR_BRIDGE_ID
,
305 QEMU_IFLA_BR_ROOT_PORT
,
306 QEMU_IFLA_BR_ROOT_PATH_COST
,
307 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
308 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
309 QEMU_IFLA_BR_HELLO_TIMER
,
310 QEMU_IFLA_BR_TCN_TIMER
,
311 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
312 QEMU_IFLA_BR_GC_TIMER
,
313 QEMU_IFLA_BR_GROUP_ADDR
,
314 QEMU_IFLA_BR_FDB_FLUSH
,
315 QEMU_IFLA_BR_MCAST_ROUTER
,
316 QEMU_IFLA_BR_MCAST_SNOOPING
,
317 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
318 QEMU_IFLA_BR_MCAST_QUERIER
,
319 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
320 QEMU_IFLA_BR_MCAST_HASH_MAX
,
321 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
322 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
323 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
324 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
325 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
326 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
327 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
328 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
329 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
330 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
331 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
332 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
334 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
335 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
359 QEMU_IFLA_NET_NS_PID
,
362 QEMU_IFLA_VFINFO_LIST
,
370 QEMU_IFLA_PROMISCUITY
,
371 QEMU_IFLA_NUM_TX_QUEUES
,
372 QEMU_IFLA_NUM_RX_QUEUES
,
374 QEMU_IFLA_PHYS_PORT_ID
,
375 QEMU_IFLA_CARRIER_CHANGES
,
376 QEMU_IFLA_PHYS_SWITCH_ID
,
377 QEMU_IFLA_LINK_NETNSID
,
378 QEMU_IFLA_PHYS_PORT_NAME
,
379 QEMU_IFLA_PROTO_DOWN
,
380 QEMU_IFLA_GSO_MAX_SEGS
,
381 QEMU_IFLA_GSO_MAX_SIZE
,
388 QEMU_IFLA_BRPORT_UNSPEC
,
389 QEMU_IFLA_BRPORT_STATE
,
390 QEMU_IFLA_BRPORT_PRIORITY
,
391 QEMU_IFLA_BRPORT_COST
,
392 QEMU_IFLA_BRPORT_MODE
,
393 QEMU_IFLA_BRPORT_GUARD
,
394 QEMU_IFLA_BRPORT_PROTECT
,
395 QEMU_IFLA_BRPORT_FAST_LEAVE
,
396 QEMU_IFLA_BRPORT_LEARNING
,
397 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
398 QEMU_IFLA_BRPORT_PROXYARP
,
399 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
400 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
401 QEMU_IFLA_BRPORT_ROOT_ID
,
402 QEMU_IFLA_BRPORT_BRIDGE_ID
,
403 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
404 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
407 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
408 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
409 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
410 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
411 QEMU_IFLA_BRPORT_HOLD_TIMER
,
412 QEMU_IFLA_BRPORT_FLUSH
,
413 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
414 QEMU_IFLA_BRPORT_PAD
,
415 QEMU___IFLA_BRPORT_MAX
419 QEMU_IFLA_INFO_UNSPEC
,
422 QEMU_IFLA_INFO_XSTATS
,
423 QEMU_IFLA_INFO_SLAVE_KIND
,
424 QEMU_IFLA_INFO_SLAVE_DATA
,
425 QEMU___IFLA_INFO_MAX
,
429 QEMU_IFLA_INET_UNSPEC
,
431 QEMU___IFLA_INET_MAX
,
435 QEMU_IFLA_INET6_UNSPEC
,
436 QEMU_IFLA_INET6_FLAGS
,
437 QEMU_IFLA_INET6_CONF
,
438 QEMU_IFLA_INET6_STATS
,
439 QEMU_IFLA_INET6_MCAST
,
440 QEMU_IFLA_INET6_CACHEINFO
,
441 QEMU_IFLA_INET6_ICMP6STATS
,
442 QEMU_IFLA_INET6_TOKEN
,
443 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
444 QEMU___IFLA_INET6_MAX
447 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
448 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
449 typedef struct TargetFdTrans
{
450 TargetFdDataFunc host_to_target_data
;
451 TargetFdDataFunc target_to_host_data
;
452 TargetFdAddrFunc target_to_host_addr
;
455 static TargetFdTrans
**target_fd_trans
;
457 static unsigned int target_fd_max
;
459 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
461 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
462 return target_fd_trans
[fd
]->target_to_host_data
;
467 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
469 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
470 return target_fd_trans
[fd
]->host_to_target_data
;
475 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
477 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
478 return target_fd_trans
[fd
]->target_to_host_addr
;
483 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
487 if (fd
>= target_fd_max
) {
488 oldmax
= target_fd_max
;
489 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
490 target_fd_trans
= g_renew(TargetFdTrans
*,
491 target_fd_trans
, target_fd_max
);
492 memset((void *)(target_fd_trans
+ oldmax
), 0,
493 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
495 target_fd_trans
[fd
] = trans
;
498 static void fd_trans_unregister(int fd
)
500 if (fd
>= 0 && fd
< target_fd_max
) {
501 target_fd_trans
[fd
] = NULL
;
505 static void fd_trans_dup(int oldfd
, int newfd
)
507 fd_trans_unregister(newfd
);
508 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
509 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
513 static int sys_getcwd1(char *buf
, size_t size
)
515 if (getcwd(buf
, size
) == NULL
) {
516 /* getcwd() sets errno */
519 return strlen(buf
)+1;
522 #ifdef TARGET_NR_utimensat
523 #if defined(__NR_utimensat)
524 #define __NR_sys_utimensat __NR_utimensat
525 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
526 const struct timespec
*,tsp
,int,flags
)
528 static int sys_utimensat(int dirfd
, const char *pathname
,
529 const struct timespec times
[2], int flags
)
535 #endif /* TARGET_NR_utimensat */
537 #ifdef CONFIG_INOTIFY
538 #include <sys/inotify.h>
540 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
541 static int sys_inotify_init(void)
543 return (inotify_init());
546 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
547 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
549 return (inotify_add_watch(fd
, pathname
, mask
));
552 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
553 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
555 return (inotify_rm_watch(fd
, wd
));
558 #ifdef CONFIG_INOTIFY1
559 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
560 static int sys_inotify_init1(int flags
)
562 return (inotify_init1(flags
));
567 /* Userspace can usually survive runtime without inotify */
568 #undef TARGET_NR_inotify_init
569 #undef TARGET_NR_inotify_init1
570 #undef TARGET_NR_inotify_add_watch
571 #undef TARGET_NR_inotify_rm_watch
572 #endif /* CONFIG_INOTIFY */
574 #if defined(TARGET_NR_prlimit64)
575 #ifndef __NR_prlimit64
576 # define __NR_prlimit64 -1
578 #define __NR_sys_prlimit64 __NR_prlimit64
579 /* The glibc rlimit structure may not be that used by the underlying syscall */
580 struct host_rlimit64
{
584 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
585 const struct host_rlimit64
*, new_limit
,
586 struct host_rlimit64
*, old_limit
)
590 #if defined(TARGET_NR_timer_create)
591 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
592 static timer_t g_posix_timers
[32] = { 0, } ;
594 static inline int next_free_host_timer(void)
597 /* FIXME: Does finding the next free slot require a lock? */
598 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
599 if (g_posix_timers
[k
] == 0) {
600 g_posix_timers
[k
] = (timer_t
) 1;
608 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
610 static inline int regpairs_aligned(void *cpu_env
) {
611 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
613 #elif defined(TARGET_MIPS)
614 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
615 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
616 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
617 * of registers which translates to the same as ARM/MIPS, because we start with
619 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
621 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
624 #define ERRNO_TABLE_SIZE 1200
626 /* target_to_host_errno_table[] is initialized from
627 * host_to_target_errno_table[] in syscall_init(). */
628 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
632 * This list is the union of errno values overridden in asm-<arch>/errno.h
633 * minus the errnos that are not actually generic to all archs.
635 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
636 [EAGAIN
] = TARGET_EAGAIN
,
637 [EIDRM
] = TARGET_EIDRM
,
638 [ECHRNG
] = TARGET_ECHRNG
,
639 [EL2NSYNC
] = TARGET_EL2NSYNC
,
640 [EL3HLT
] = TARGET_EL3HLT
,
641 [EL3RST
] = TARGET_EL3RST
,
642 [ELNRNG
] = TARGET_ELNRNG
,
643 [EUNATCH
] = TARGET_EUNATCH
,
644 [ENOCSI
] = TARGET_ENOCSI
,
645 [EL2HLT
] = TARGET_EL2HLT
,
646 [EDEADLK
] = TARGET_EDEADLK
,
647 [ENOLCK
] = TARGET_ENOLCK
,
648 [EBADE
] = TARGET_EBADE
,
649 [EBADR
] = TARGET_EBADR
,
650 [EXFULL
] = TARGET_EXFULL
,
651 [ENOANO
] = TARGET_ENOANO
,
652 [EBADRQC
] = TARGET_EBADRQC
,
653 [EBADSLT
] = TARGET_EBADSLT
,
654 [EBFONT
] = TARGET_EBFONT
,
655 [ENOSTR
] = TARGET_ENOSTR
,
656 [ENODATA
] = TARGET_ENODATA
,
657 [ETIME
] = TARGET_ETIME
,
658 [ENOSR
] = TARGET_ENOSR
,
659 [ENONET
] = TARGET_ENONET
,
660 [ENOPKG
] = TARGET_ENOPKG
,
661 [EREMOTE
] = TARGET_EREMOTE
,
662 [ENOLINK
] = TARGET_ENOLINK
,
663 [EADV
] = TARGET_EADV
,
664 [ESRMNT
] = TARGET_ESRMNT
,
665 [ECOMM
] = TARGET_ECOMM
,
666 [EPROTO
] = TARGET_EPROTO
,
667 [EDOTDOT
] = TARGET_EDOTDOT
,
668 [EMULTIHOP
] = TARGET_EMULTIHOP
,
669 [EBADMSG
] = TARGET_EBADMSG
,
670 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
671 [EOVERFLOW
] = TARGET_EOVERFLOW
,
672 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
673 [EBADFD
] = TARGET_EBADFD
,
674 [EREMCHG
] = TARGET_EREMCHG
,
675 [ELIBACC
] = TARGET_ELIBACC
,
676 [ELIBBAD
] = TARGET_ELIBBAD
,
677 [ELIBSCN
] = TARGET_ELIBSCN
,
678 [ELIBMAX
] = TARGET_ELIBMAX
,
679 [ELIBEXEC
] = TARGET_ELIBEXEC
,
680 [EILSEQ
] = TARGET_EILSEQ
,
681 [ENOSYS
] = TARGET_ENOSYS
,
682 [ELOOP
] = TARGET_ELOOP
,
683 [ERESTART
] = TARGET_ERESTART
,
684 [ESTRPIPE
] = TARGET_ESTRPIPE
,
685 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
686 [EUSERS
] = TARGET_EUSERS
,
687 [ENOTSOCK
] = TARGET_ENOTSOCK
,
688 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
689 [EMSGSIZE
] = TARGET_EMSGSIZE
,
690 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
691 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
692 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
693 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
694 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
695 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
696 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
697 [EADDRINUSE
] = TARGET_EADDRINUSE
,
698 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
699 [ENETDOWN
] = TARGET_ENETDOWN
,
700 [ENETUNREACH
] = TARGET_ENETUNREACH
,
701 [ENETRESET
] = TARGET_ENETRESET
,
702 [ECONNABORTED
] = TARGET_ECONNABORTED
,
703 [ECONNRESET
] = TARGET_ECONNRESET
,
704 [ENOBUFS
] = TARGET_ENOBUFS
,
705 [EISCONN
] = TARGET_EISCONN
,
706 [ENOTCONN
] = TARGET_ENOTCONN
,
707 [EUCLEAN
] = TARGET_EUCLEAN
,
708 [ENOTNAM
] = TARGET_ENOTNAM
,
709 [ENAVAIL
] = TARGET_ENAVAIL
,
710 [EISNAM
] = TARGET_EISNAM
,
711 [EREMOTEIO
] = TARGET_EREMOTEIO
,
712 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
713 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
714 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
715 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
716 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
717 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
718 [EALREADY
] = TARGET_EALREADY
,
719 [EINPROGRESS
] = TARGET_EINPROGRESS
,
720 [ESTALE
] = TARGET_ESTALE
,
721 [ECANCELED
] = TARGET_ECANCELED
,
722 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
723 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
725 [ENOKEY
] = TARGET_ENOKEY
,
728 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
731 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
734 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
737 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
739 #ifdef ENOTRECOVERABLE
740 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
744 static inline int host_to_target_errno(int err
)
746 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
747 host_to_target_errno_table
[err
]) {
748 return host_to_target_errno_table
[err
];
753 static inline int target_to_host_errno(int err
)
755 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
756 target_to_host_errno_table
[err
]) {
757 return target_to_host_errno_table
[err
];
762 static inline abi_long
get_errno(abi_long ret
)
765 return -host_to_target_errno(errno
);
770 static inline int is_error(abi_long ret
)
772 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
775 const char *target_strerror(int err
)
777 if (err
== TARGET_ERESTARTSYS
) {
778 return "To be restarted";
780 if (err
== TARGET_QEMU_ESIGRETURN
) {
781 return "Successful exit from sigreturn";
784 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
787 return strerror(target_to_host_errno(err
));
790 #define safe_syscall0(type, name) \
791 static type safe_##name(void) \
793 return safe_syscall(__NR_##name); \
796 #define safe_syscall1(type, name, type1, arg1) \
797 static type safe_##name(type1 arg1) \
799 return safe_syscall(__NR_##name, arg1); \
802 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
803 static type safe_##name(type1 arg1, type2 arg2) \
805 return safe_syscall(__NR_##name, arg1, arg2); \
808 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
809 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
811 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
814 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
816 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
818 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
821 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
822 type4, arg4, type5, arg5) \
823 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
826 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
829 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
830 type4, arg4, type5, arg5, type6, arg6) \
831 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
832 type5 arg5, type6 arg6) \
834 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
837 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
838 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
839 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
840 int, flags
, mode_t
, mode
)
841 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
842 struct rusage
*, rusage
)
843 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
844 int, options
, struct rusage
*, rusage
)
845 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
846 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
847 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
848 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
849 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
851 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
852 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
854 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
855 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
856 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
857 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
858 safe_syscall2(int, tkill
, int, tid
, int, sig
)
859 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
860 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
861 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
862 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
864 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
865 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
866 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
867 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
868 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
869 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
870 safe_syscall2(int, flock
, int, fd
, int, operation
)
871 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
872 const struct timespec
*, uts
, size_t, sigsetsize
)
873 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
875 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
876 struct timespec
*, rem
)
877 #ifdef TARGET_NR_clock_nanosleep
878 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
879 const struct timespec
*, req
, struct timespec
*, rem
)
882 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
884 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
885 long, msgtype
, int, flags
)
886 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
887 unsigned, nsops
, const struct timespec
*, timeout
)
889 /* This host kernel architecture uses a single ipc syscall; fake up
890 * wrappers for the sub-operations to hide this implementation detail.
891 * Annoyingly we can't include linux/ipc.h to get the constant definitions
892 * for the call parameter because some structs in there conflict with the
893 * sys/ipc.h ones. So we just define them here, and rely on them being
894 * the same for all host architectures.
896 #define Q_SEMTIMEDOP 4
899 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
901 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
902 void *, ptr
, long, fifth
)
903 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
905 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
907 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
909 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
911 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
912 const struct timespec
*timeout
)
914 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
918 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
919 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
920 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
921 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
922 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
924 /* We do ioctl like this rather than via safe_syscall3 to preserve the
925 * "third argument might be integer or pointer or not present" behaviour of
928 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
929 /* Similarly for fcntl. Note that callers must always:
930 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
931 * use the flock64 struct rather than unsuffixed flock
932 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
935 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
937 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
940 static inline int host_to_target_sock_type(int host_type
)
944 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
946 target_type
= TARGET_SOCK_DGRAM
;
949 target_type
= TARGET_SOCK_STREAM
;
952 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
956 #if defined(SOCK_CLOEXEC)
957 if (host_type
& SOCK_CLOEXEC
) {
958 target_type
|= TARGET_SOCK_CLOEXEC
;
962 #if defined(SOCK_NONBLOCK)
963 if (host_type
& SOCK_NONBLOCK
) {
964 target_type
|= TARGET_SOCK_NONBLOCK
;
971 static abi_ulong target_brk
;
972 static abi_ulong target_original_brk
;
973 static abi_ulong brk_page
;
975 void target_set_brk(abi_ulong new_brk
)
977 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
978 brk_page
= HOST_PAGE_ALIGN(target_brk
);
981 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
982 #define DEBUGF_BRK(message, args...)
984 /* do_brk() must return target values and target errnos. */
985 abi_long
do_brk(abi_ulong new_brk
)
987 abi_long mapped_addr
;
988 abi_ulong new_alloc_size
;
990 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
993 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
996 if (new_brk
< target_original_brk
) {
997 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1002 /* If the new brk is less than the highest page reserved to the
1003 * target heap allocation, set it and we're almost done... */
1004 if (new_brk
<= brk_page
) {
1005 /* Heap contents are initialized to zero, as for anonymous
1007 if (new_brk
> target_brk
) {
1008 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1010 target_brk
= new_brk
;
1011 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1015 /* We need to allocate more memory after the brk... Note that
1016 * we don't use MAP_FIXED because that will map over the top of
1017 * any existing mapping (like the one with the host libc or qemu
1018 * itself); instead we treat "mapped but at wrong address" as
1019 * a failure and unmap again.
1021 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1022 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1023 PROT_READ
|PROT_WRITE
,
1024 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1026 if (mapped_addr
== brk_page
) {
1027 /* Heap contents are initialized to zero, as for anonymous
1028 * mapped pages. Technically the new pages are already
1029 * initialized to zero since they *are* anonymous mapped
1030 * pages, however we have to take care with the contents that
1031 * come from the remaining part of the previous page: it may
1032 * contains garbage data due to a previous heap usage (grown
1033 * then shrunken). */
1034 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1036 target_brk
= new_brk
;
1037 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1038 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1041 } else if (mapped_addr
!= -1) {
1042 /* Mapped but at wrong address, meaning there wasn't actually
1043 * enough space for this brk.
1045 target_munmap(mapped_addr
, new_alloc_size
);
1047 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1050 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1053 #if defined(TARGET_ALPHA)
1054 /* We (partially) emulate OSF/1 on Alpha, which requires we
1055 return a proper errno, not an unchanged brk value. */
1056 return -TARGET_ENOMEM
;
1058 /* For everything else, return the previous break. */
1062 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1063 abi_ulong target_fds_addr
,
1067 abi_ulong b
, *target_fds
;
1069 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1070 if (!(target_fds
= lock_user(VERIFY_READ
,
1072 sizeof(abi_ulong
) * nw
,
1074 return -TARGET_EFAULT
;
1078 for (i
= 0; i
< nw
; i
++) {
1079 /* grab the abi_ulong */
1080 __get_user(b
, &target_fds
[i
]);
1081 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1082 /* check the bit inside the abi_ulong */
1089 unlock_user(target_fds
, target_fds_addr
, 0);
1094 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1095 abi_ulong target_fds_addr
,
1098 if (target_fds_addr
) {
1099 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1100 return -TARGET_EFAULT
;
1108 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1114 abi_ulong
*target_fds
;
1116 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1117 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1119 sizeof(abi_ulong
) * nw
,
1121 return -TARGET_EFAULT
;
1124 for (i
= 0; i
< nw
; i
++) {
1126 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1127 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1130 __put_user(v
, &target_fds
[i
]);
1133 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1138 #if defined(__alpha__)
1139 #define HOST_HZ 1024
1144 static inline abi_long
host_to_target_clock_t(long ticks
)
1146 #if HOST_HZ == TARGET_HZ
1149 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1153 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1154 const struct rusage
*rusage
)
1156 struct target_rusage
*target_rusage
;
1158 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1159 return -TARGET_EFAULT
;
1160 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1161 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1162 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1163 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1164 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1165 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1166 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1167 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1168 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1169 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1170 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1171 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1172 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1173 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1174 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1175 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1176 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1177 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1178 unlock_user_struct(target_rusage
, target_addr
, 1);
1183 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1185 abi_ulong target_rlim_swap
;
1188 target_rlim_swap
= tswapal(target_rlim
);
1189 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1190 return RLIM_INFINITY
;
1192 result
= target_rlim_swap
;
1193 if (target_rlim_swap
!= (rlim_t
)result
)
1194 return RLIM_INFINITY
;
1199 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1201 abi_ulong target_rlim_swap
;
1204 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1205 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1207 target_rlim_swap
= rlim
;
1208 result
= tswapal(target_rlim_swap
);
1213 static inline int target_to_host_resource(int code
)
1216 case TARGET_RLIMIT_AS
:
1218 case TARGET_RLIMIT_CORE
:
1220 case TARGET_RLIMIT_CPU
:
1222 case TARGET_RLIMIT_DATA
:
1224 case TARGET_RLIMIT_FSIZE
:
1225 return RLIMIT_FSIZE
;
1226 case TARGET_RLIMIT_LOCKS
:
1227 return RLIMIT_LOCKS
;
1228 case TARGET_RLIMIT_MEMLOCK
:
1229 return RLIMIT_MEMLOCK
;
1230 case TARGET_RLIMIT_MSGQUEUE
:
1231 return RLIMIT_MSGQUEUE
;
1232 case TARGET_RLIMIT_NICE
:
1234 case TARGET_RLIMIT_NOFILE
:
1235 return RLIMIT_NOFILE
;
1236 case TARGET_RLIMIT_NPROC
:
1237 return RLIMIT_NPROC
;
1238 case TARGET_RLIMIT_RSS
:
1240 case TARGET_RLIMIT_RTPRIO
:
1241 return RLIMIT_RTPRIO
;
1242 case TARGET_RLIMIT_SIGPENDING
:
1243 return RLIMIT_SIGPENDING
;
1244 case TARGET_RLIMIT_STACK
:
1245 return RLIMIT_STACK
;
1251 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1252 abi_ulong target_tv_addr
)
1254 struct target_timeval
*target_tv
;
1256 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1257 return -TARGET_EFAULT
;
1259 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1260 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1262 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1267 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1268 const struct timeval
*tv
)
1270 struct target_timeval
*target_tv
;
1272 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1273 return -TARGET_EFAULT
;
1275 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1276 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1278 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1283 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1284 abi_ulong target_tz_addr
)
1286 struct target_timezone
*target_tz
;
1288 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1289 return -TARGET_EFAULT
;
1292 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1293 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1295 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1300 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1303 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1304 abi_ulong target_mq_attr_addr
)
1306 struct target_mq_attr
*target_mq_attr
;
1308 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1309 target_mq_attr_addr
, 1))
1310 return -TARGET_EFAULT
;
1312 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1313 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1314 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1315 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1317 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1322 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1323 const struct mq_attr
*attr
)
1325 struct target_mq_attr
*target_mq_attr
;
1327 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1328 target_mq_attr_addr
, 0))
1329 return -TARGET_EFAULT
;
1331 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1332 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1333 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1334 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1336 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1342 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1343 /* do_select() must return target values and target errnos. */
1344 static abi_long
do_select(int n
,
1345 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1346 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1348 fd_set rfds
, wfds
, efds
;
1349 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1351 struct timespec ts
, *ts_ptr
;
1354 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1358 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1362 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1367 if (target_tv_addr
) {
1368 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1369 return -TARGET_EFAULT
;
1370 ts
.tv_sec
= tv
.tv_sec
;
1371 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1377 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1380 if (!is_error(ret
)) {
1381 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1382 return -TARGET_EFAULT
;
1383 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1384 return -TARGET_EFAULT
;
1385 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1386 return -TARGET_EFAULT
;
1388 if (target_tv_addr
) {
1389 tv
.tv_sec
= ts
.tv_sec
;
1390 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1391 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1392 return -TARGET_EFAULT
;
1401 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1404 return pipe2(host_pipe
, flags
);
1410 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1411 int flags
, int is_pipe2
)
1415 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1418 return get_errno(ret
);
1420 /* Several targets have special calling conventions for the original
1421 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1423 #if defined(TARGET_ALPHA)
1424 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1425 return host_pipe
[0];
1426 #elif defined(TARGET_MIPS)
1427 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1428 return host_pipe
[0];
1429 #elif defined(TARGET_SH4)
1430 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1431 return host_pipe
[0];
1432 #elif defined(TARGET_SPARC)
1433 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1434 return host_pipe
[0];
1438 if (put_user_s32(host_pipe
[0], pipedes
)
1439 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1440 return -TARGET_EFAULT
;
1441 return get_errno(ret
);
1444 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1445 abi_ulong target_addr
,
1448 struct target_ip_mreqn
*target_smreqn
;
1450 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1452 return -TARGET_EFAULT
;
1453 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1454 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1455 if (len
== sizeof(struct target_ip_mreqn
))
1456 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1457 unlock_user(target_smreqn
, target_addr
, 0);
1462 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1463 abi_ulong target_addr
,
1466 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1467 sa_family_t sa_family
;
1468 struct target_sockaddr
*target_saddr
;
1470 if (fd_trans_target_to_host_addr(fd
)) {
1471 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1474 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1476 return -TARGET_EFAULT
;
1478 sa_family
= tswap16(target_saddr
->sa_family
);
1480 /* Oops. The caller might send a incomplete sun_path; sun_path
1481 * must be terminated by \0 (see the manual page), but
1482 * unfortunately it is quite common to specify sockaddr_un
1483 * length as "strlen(x->sun_path)" while it should be
1484 * "strlen(...) + 1". We'll fix that here if needed.
1485 * Linux kernel has a similar feature.
1488 if (sa_family
== AF_UNIX
) {
1489 if (len
< unix_maxlen
&& len
> 0) {
1490 char *cp
= (char*)target_saddr
;
1492 if ( cp
[len
-1] && !cp
[len
] )
1495 if (len
> unix_maxlen
)
1499 memcpy(addr
, target_saddr
, len
);
1500 addr
->sa_family
= sa_family
;
1501 if (sa_family
== AF_NETLINK
) {
1502 struct sockaddr_nl
*nladdr
;
1504 nladdr
= (struct sockaddr_nl
*)addr
;
1505 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1506 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1507 } else if (sa_family
== AF_PACKET
) {
1508 struct target_sockaddr_ll
*lladdr
;
1510 lladdr
= (struct target_sockaddr_ll
*)addr
;
1511 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1512 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1514 unlock_user(target_saddr
, target_addr
, 0);
1519 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1520 struct sockaddr
*addr
,
1523 struct target_sockaddr
*target_saddr
;
1529 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1531 return -TARGET_EFAULT
;
1532 memcpy(target_saddr
, addr
, len
);
1533 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1534 sizeof(target_saddr
->sa_family
)) {
1535 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1537 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1538 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1539 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1540 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1541 } else if (addr
->sa_family
== AF_PACKET
) {
1542 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1543 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1544 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1546 unlock_user(target_saddr
, target_addr
, len
);
1551 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1552 struct target_msghdr
*target_msgh
)
1554 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1555 abi_long msg_controllen
;
1556 abi_ulong target_cmsg_addr
;
1557 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1558 socklen_t space
= 0;
1560 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1561 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1563 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1564 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1565 target_cmsg_start
= target_cmsg
;
1567 return -TARGET_EFAULT
;
1569 while (cmsg
&& target_cmsg
) {
1570 void *data
= CMSG_DATA(cmsg
);
1571 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1573 int len
= tswapal(target_cmsg
->cmsg_len
)
1574 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1576 space
+= CMSG_SPACE(len
);
1577 if (space
> msgh
->msg_controllen
) {
1578 space
-= CMSG_SPACE(len
);
1579 /* This is a QEMU bug, since we allocated the payload
1580 * area ourselves (unlike overflow in host-to-target
1581 * conversion, which is just the guest giving us a buffer
1582 * that's too small). It can't happen for the payload types
1583 * we currently support; if it becomes an issue in future
1584 * we would need to improve our allocation strategy to
1585 * something more intelligent than "twice the size of the
1586 * target buffer we're reading from".
1588 gemu_log("Host cmsg overflow\n");
1592 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1593 cmsg
->cmsg_level
= SOL_SOCKET
;
1595 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1597 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1598 cmsg
->cmsg_len
= CMSG_LEN(len
);
1600 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1601 int *fd
= (int *)data
;
1602 int *target_fd
= (int *)target_data
;
1603 int i
, numfds
= len
/ sizeof(int);
1605 for (i
= 0; i
< numfds
; i
++) {
1606 __get_user(fd
[i
], target_fd
+ i
);
1608 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1609 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1610 struct ucred
*cred
= (struct ucred
*)data
;
1611 struct target_ucred
*target_cred
=
1612 (struct target_ucred
*)target_data
;
1614 __get_user(cred
->pid
, &target_cred
->pid
);
1615 __get_user(cred
->uid
, &target_cred
->uid
);
1616 __get_user(cred
->gid
, &target_cred
->gid
);
1618 gemu_log("Unsupported ancillary data: %d/%d\n",
1619 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1620 memcpy(data
, target_data
, len
);
1623 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1624 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1627 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1629 msgh
->msg_controllen
= space
;
1633 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1634 struct msghdr
*msgh
)
1636 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1637 abi_long msg_controllen
;
1638 abi_ulong target_cmsg_addr
;
1639 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1640 socklen_t space
= 0;
1642 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1643 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1645 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1646 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1647 target_cmsg_start
= target_cmsg
;
1649 return -TARGET_EFAULT
;
1651 while (cmsg
&& target_cmsg
) {
1652 void *data
= CMSG_DATA(cmsg
);
1653 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1655 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1656 int tgt_len
, tgt_space
;
1658 /* We never copy a half-header but may copy half-data;
1659 * this is Linux's behaviour in put_cmsg(). Note that
1660 * truncation here is a guest problem (which we report
1661 * to the guest via the CTRUNC bit), unlike truncation
1662 * in target_to_host_cmsg, which is a QEMU bug.
1664 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1665 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1669 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1670 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1672 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1674 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1676 tgt_len
= TARGET_CMSG_LEN(len
);
1678 /* Payload types which need a different size of payload on
1679 * the target must adjust tgt_len here.
1681 switch (cmsg
->cmsg_level
) {
1683 switch (cmsg
->cmsg_type
) {
1685 tgt_len
= sizeof(struct target_timeval
);
1694 if (msg_controllen
< tgt_len
) {
1695 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1696 tgt_len
= msg_controllen
;
1699 /* We must now copy-and-convert len bytes of payload
1700 * into tgt_len bytes of destination space. Bear in mind
1701 * that in both source and destination we may be dealing
1702 * with a truncated value!
1704 switch (cmsg
->cmsg_level
) {
1706 switch (cmsg
->cmsg_type
) {
1709 int *fd
= (int *)data
;
1710 int *target_fd
= (int *)target_data
;
1711 int i
, numfds
= tgt_len
/ sizeof(int);
1713 for (i
= 0; i
< numfds
; i
++) {
1714 __put_user(fd
[i
], target_fd
+ i
);
1720 struct timeval
*tv
= (struct timeval
*)data
;
1721 struct target_timeval
*target_tv
=
1722 (struct target_timeval
*)target_data
;
1724 if (len
!= sizeof(struct timeval
) ||
1725 tgt_len
!= sizeof(struct target_timeval
)) {
1729 /* copy struct timeval to target */
1730 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1731 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1734 case SCM_CREDENTIALS
:
1736 struct ucred
*cred
= (struct ucred
*)data
;
1737 struct target_ucred
*target_cred
=
1738 (struct target_ucred
*)target_data
;
1740 __put_user(cred
->pid
, &target_cred
->pid
);
1741 __put_user(cred
->uid
, &target_cred
->uid
);
1742 __put_user(cred
->gid
, &target_cred
->gid
);
1752 gemu_log("Unsupported ancillary data: %d/%d\n",
1753 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1754 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1755 if (tgt_len
> len
) {
1756 memset(target_data
+ len
, 0, tgt_len
- len
);
1760 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1761 tgt_space
= TARGET_CMSG_SPACE(len
);
1762 if (msg_controllen
< tgt_space
) {
1763 tgt_space
= msg_controllen
;
1765 msg_controllen
-= tgt_space
;
1767 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1768 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1771 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1773 target_msgh
->msg_controllen
= tswapal(space
);
1777 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1779 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1780 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1781 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1782 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1783 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1786 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1788 abi_long (*host_to_target_nlmsg
)
1789 (struct nlmsghdr
*))
1794 while (len
> sizeof(struct nlmsghdr
)) {
1796 nlmsg_len
= nlh
->nlmsg_len
;
1797 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1802 switch (nlh
->nlmsg_type
) {
1804 tswap_nlmsghdr(nlh
);
1810 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1811 e
->error
= tswap32(e
->error
);
1812 tswap_nlmsghdr(&e
->msg
);
1813 tswap_nlmsghdr(nlh
);
1817 ret
= host_to_target_nlmsg(nlh
);
1819 tswap_nlmsghdr(nlh
);
1824 tswap_nlmsghdr(nlh
);
1825 len
-= NLMSG_ALIGN(nlmsg_len
);
1826 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1831 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1833 abi_long (*target_to_host_nlmsg
)
1834 (struct nlmsghdr
*))
1838 while (len
> sizeof(struct nlmsghdr
)) {
1839 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1840 tswap32(nlh
->nlmsg_len
) > len
) {
1843 tswap_nlmsghdr(nlh
);
1844 switch (nlh
->nlmsg_type
) {
1851 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1852 e
->error
= tswap32(e
->error
);
1853 tswap_nlmsghdr(&e
->msg
);
1857 ret
= target_to_host_nlmsg(nlh
);
1862 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1863 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1868 #ifdef CONFIG_RTNETLINK
1869 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
1870 size_t len
, void *context
,
1871 abi_long (*host_to_target_nlattr
)
1875 unsigned short nla_len
;
1878 while (len
> sizeof(struct nlattr
)) {
1879 nla_len
= nlattr
->nla_len
;
1880 if (nla_len
< sizeof(struct nlattr
) ||
1884 ret
= host_to_target_nlattr(nlattr
, context
);
1885 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
1886 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
1890 len
-= NLA_ALIGN(nla_len
);
1891 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
1896 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1898 abi_long (*host_to_target_rtattr
)
1901 unsigned short rta_len
;
1904 while (len
> sizeof(struct rtattr
)) {
1905 rta_len
= rtattr
->rta_len
;
1906 if (rta_len
< sizeof(struct rtattr
) ||
1910 ret
= host_to_target_rtattr(rtattr
);
1911 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1912 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1916 len
-= RTA_ALIGN(rta_len
);
1917 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
1922 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1924 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
1931 switch (nlattr
->nla_type
) {
1933 case QEMU_IFLA_BR_FDB_FLUSH
:
1936 case QEMU_IFLA_BR_GROUP_ADDR
:
1939 case QEMU_IFLA_BR_VLAN_FILTERING
:
1940 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
1941 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
1942 case QEMU_IFLA_BR_MCAST_ROUTER
:
1943 case QEMU_IFLA_BR_MCAST_SNOOPING
:
1944 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
1945 case QEMU_IFLA_BR_MCAST_QUERIER
:
1946 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
1947 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
1948 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
1951 case QEMU_IFLA_BR_PRIORITY
:
1952 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
1953 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
1954 case QEMU_IFLA_BR_ROOT_PORT
:
1955 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
1956 u16
= NLA_DATA(nlattr
);
1957 *u16
= tswap16(*u16
);
1960 case QEMU_IFLA_BR_FORWARD_DELAY
:
1961 case QEMU_IFLA_BR_HELLO_TIME
:
1962 case QEMU_IFLA_BR_MAX_AGE
:
1963 case QEMU_IFLA_BR_AGEING_TIME
:
1964 case QEMU_IFLA_BR_STP_STATE
:
1965 case QEMU_IFLA_BR_ROOT_PATH_COST
:
1966 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
1967 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
1968 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
1969 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
1970 u32
= NLA_DATA(nlattr
);
1971 *u32
= tswap32(*u32
);
1974 case QEMU_IFLA_BR_HELLO_TIMER
:
1975 case QEMU_IFLA_BR_TCN_TIMER
:
1976 case QEMU_IFLA_BR_GC_TIMER
:
1977 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
1978 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
1979 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
1980 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
1981 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
1982 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
1983 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
1984 u64
= NLA_DATA(nlattr
);
1985 *u64
= tswap64(*u64
);
1987 /* ifla_bridge_id: uin8_t[] */
1988 case QEMU_IFLA_BR_ROOT_ID
:
1989 case QEMU_IFLA_BR_BRIDGE_ID
:
1992 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
1998 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2005 switch (nlattr
->nla_type
) {
2007 case QEMU_IFLA_BRPORT_STATE
:
2008 case QEMU_IFLA_BRPORT_MODE
:
2009 case QEMU_IFLA_BRPORT_GUARD
:
2010 case QEMU_IFLA_BRPORT_PROTECT
:
2011 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2012 case QEMU_IFLA_BRPORT_LEARNING
:
2013 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2014 case QEMU_IFLA_BRPORT_PROXYARP
:
2015 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2016 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2017 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2018 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2019 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2022 case QEMU_IFLA_BRPORT_PRIORITY
:
2023 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2024 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2025 case QEMU_IFLA_BRPORT_ID
:
2026 case QEMU_IFLA_BRPORT_NO
:
2027 u16
= NLA_DATA(nlattr
);
2028 *u16
= tswap16(*u16
);
2031 case QEMU_IFLA_BRPORT_COST
:
2032 u32
= NLA_DATA(nlattr
);
2033 *u32
= tswap32(*u32
);
2036 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2037 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2038 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2039 u64
= NLA_DATA(nlattr
);
2040 *u64
= tswap64(*u64
);
2042 /* ifla_bridge_id: uint8_t[] */
2043 case QEMU_IFLA_BRPORT_ROOT_ID
:
2044 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2047 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2053 struct linkinfo_context
{
2060 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2063 struct linkinfo_context
*li_context
= context
;
2065 switch (nlattr
->nla_type
) {
2067 case QEMU_IFLA_INFO_KIND
:
2068 li_context
->name
= NLA_DATA(nlattr
);
2069 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2071 case QEMU_IFLA_INFO_SLAVE_KIND
:
2072 li_context
->slave_name
= NLA_DATA(nlattr
);
2073 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2076 case QEMU_IFLA_INFO_XSTATS
:
2077 /* FIXME: only used by CAN */
2080 case QEMU_IFLA_INFO_DATA
:
2081 if (strncmp(li_context
->name
, "bridge",
2082 li_context
->len
) == 0) {
2083 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2086 host_to_target_data_bridge_nlattr
);
2088 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2091 case QEMU_IFLA_INFO_SLAVE_DATA
:
2092 if (strncmp(li_context
->slave_name
, "bridge",
2093 li_context
->slave_len
) == 0) {
2094 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2097 host_to_target_slave_data_bridge_nlattr
);
2099 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2100 li_context
->slave_name
);
2104 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2111 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2117 switch (nlattr
->nla_type
) {
2118 case QEMU_IFLA_INET_CONF
:
2119 u32
= NLA_DATA(nlattr
);
2120 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2122 u32
[i
] = tswap32(u32
[i
]);
2126 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2131 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2136 struct ifla_cacheinfo
*ci
;
2139 switch (nlattr
->nla_type
) {
2141 case QEMU_IFLA_INET6_TOKEN
:
2144 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2147 case QEMU_IFLA_INET6_FLAGS
:
2148 u32
= NLA_DATA(nlattr
);
2149 *u32
= tswap32(*u32
);
2152 case QEMU_IFLA_INET6_CONF
:
2153 u32
= NLA_DATA(nlattr
);
2154 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2156 u32
[i
] = tswap32(u32
[i
]);
2159 /* ifla_cacheinfo */
2160 case QEMU_IFLA_INET6_CACHEINFO
:
2161 ci
= NLA_DATA(nlattr
);
2162 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2163 ci
->tstamp
= tswap32(ci
->tstamp
);
2164 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2165 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2168 case QEMU_IFLA_INET6_STATS
:
2169 case QEMU_IFLA_INET6_ICMP6STATS
:
2170 u64
= NLA_DATA(nlattr
);
2171 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2173 u64
[i
] = tswap64(u64
[i
]);
2177 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2182 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2185 switch (nlattr
->nla_type
) {
2187 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2189 host_to_target_data_inet_nlattr
);
2191 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2193 host_to_target_data_inet6_nlattr
);
2195 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2201 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2204 struct rtnl_link_stats
*st
;
2205 struct rtnl_link_stats64
*st64
;
2206 struct rtnl_link_ifmap
*map
;
2207 struct linkinfo_context li_context
;
2209 switch (rtattr
->rta_type
) {
2211 case QEMU_IFLA_ADDRESS
:
2212 case QEMU_IFLA_BROADCAST
:
2214 case QEMU_IFLA_IFNAME
:
2215 case QEMU_IFLA_QDISC
:
2218 case QEMU_IFLA_OPERSTATE
:
2219 case QEMU_IFLA_LINKMODE
:
2220 case QEMU_IFLA_CARRIER
:
2221 case QEMU_IFLA_PROTO_DOWN
:
2225 case QEMU_IFLA_LINK
:
2226 case QEMU_IFLA_WEIGHT
:
2227 case QEMU_IFLA_TXQLEN
:
2228 case QEMU_IFLA_CARRIER_CHANGES
:
2229 case QEMU_IFLA_NUM_RX_QUEUES
:
2230 case QEMU_IFLA_NUM_TX_QUEUES
:
2231 case QEMU_IFLA_PROMISCUITY
:
2232 case QEMU_IFLA_EXT_MASK
:
2233 case QEMU_IFLA_LINK_NETNSID
:
2234 case QEMU_IFLA_GROUP
:
2235 case QEMU_IFLA_MASTER
:
2236 case QEMU_IFLA_NUM_VF
:
2237 u32
= RTA_DATA(rtattr
);
2238 *u32
= tswap32(*u32
);
2240 /* struct rtnl_link_stats */
2241 case QEMU_IFLA_STATS
:
2242 st
= RTA_DATA(rtattr
);
2243 st
->rx_packets
= tswap32(st
->rx_packets
);
2244 st
->tx_packets
= tswap32(st
->tx_packets
);
2245 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2246 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2247 st
->rx_errors
= tswap32(st
->rx_errors
);
2248 st
->tx_errors
= tswap32(st
->tx_errors
);
2249 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2250 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2251 st
->multicast
= tswap32(st
->multicast
);
2252 st
->collisions
= tswap32(st
->collisions
);
2254 /* detailed rx_errors: */
2255 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2256 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2257 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2258 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2259 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2260 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2262 /* detailed tx_errors */
2263 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2264 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2265 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2266 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2267 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2270 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2271 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2273 /* struct rtnl_link_stats64 */
2274 case QEMU_IFLA_STATS64
:
2275 st64
= RTA_DATA(rtattr
);
2276 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2277 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2278 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2279 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2280 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2281 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2282 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2283 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2284 st64
->multicast
= tswap64(st64
->multicast
);
2285 st64
->collisions
= tswap64(st64
->collisions
);
2287 /* detailed rx_errors: */
2288 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2289 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2290 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2291 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2292 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2293 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2295 /* detailed tx_errors */
2296 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2297 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2298 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2299 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2300 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2303 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2304 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2306 /* struct rtnl_link_ifmap */
2308 map
= RTA_DATA(rtattr
);
2309 map
->mem_start
= tswap64(map
->mem_start
);
2310 map
->mem_end
= tswap64(map
->mem_end
);
2311 map
->base_addr
= tswap64(map
->base_addr
);
2312 map
->irq
= tswap16(map
->irq
);
2315 case QEMU_IFLA_LINKINFO
:
2316 memset(&li_context
, 0, sizeof(li_context
));
2317 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2319 host_to_target_data_linkinfo_nlattr
);
2320 case QEMU_IFLA_AF_SPEC
:
2321 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2323 host_to_target_data_spec_nlattr
);
2325 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2331 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2334 struct ifa_cacheinfo
*ci
;
2336 switch (rtattr
->rta_type
) {
2337 /* binary: depends on family type */
2347 u32
= RTA_DATA(rtattr
);
2348 *u32
= tswap32(*u32
);
2350 /* struct ifa_cacheinfo */
2352 ci
= RTA_DATA(rtattr
);
2353 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2354 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2355 ci
->cstamp
= tswap32(ci
->cstamp
);
2356 ci
->tstamp
= tswap32(ci
->tstamp
);
2359 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2365 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2368 switch (rtattr
->rta_type
) {
2369 /* binary: depends on family type */
2378 u32
= RTA_DATA(rtattr
);
2379 *u32
= tswap32(*u32
);
2382 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2388 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2389 uint32_t rtattr_len
)
2391 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2392 host_to_target_data_link_rtattr
);
2395 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2396 uint32_t rtattr_len
)
2398 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2399 host_to_target_data_addr_rtattr
);
2402 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2403 uint32_t rtattr_len
)
2405 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2406 host_to_target_data_route_rtattr
);
2409 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2412 struct ifinfomsg
*ifi
;
2413 struct ifaddrmsg
*ifa
;
2416 nlmsg_len
= nlh
->nlmsg_len
;
2417 switch (nlh
->nlmsg_type
) {
2421 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2422 ifi
= NLMSG_DATA(nlh
);
2423 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2424 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2425 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2426 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2427 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2428 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2434 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2435 ifa
= NLMSG_DATA(nlh
);
2436 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2437 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2438 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2444 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2445 rtm
= NLMSG_DATA(nlh
);
2446 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2447 host_to_target_route_rtattr(RTM_RTA(rtm
),
2448 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2452 return -TARGET_EINVAL
;
2457 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2460 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2463 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2465 abi_long (*target_to_host_rtattr
)
2470 while (len
>= sizeof(struct rtattr
)) {
2471 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2472 tswap16(rtattr
->rta_len
) > len
) {
2475 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2476 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2477 ret
= target_to_host_rtattr(rtattr
);
2481 len
-= RTA_ALIGN(rtattr
->rta_len
);
2482 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2483 RTA_ALIGN(rtattr
->rta_len
));
2488 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2490 switch (rtattr
->rta_type
) {
2492 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2498 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2500 switch (rtattr
->rta_type
) {
2501 /* binary: depends on family type */
2506 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2512 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2515 switch (rtattr
->rta_type
) {
2516 /* binary: depends on family type */
2523 u32
= RTA_DATA(rtattr
);
2524 *u32
= tswap32(*u32
);
2527 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2533 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2534 uint32_t rtattr_len
)
2536 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2537 target_to_host_data_link_rtattr
);
2540 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2541 uint32_t rtattr_len
)
2543 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2544 target_to_host_data_addr_rtattr
);
2547 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2548 uint32_t rtattr_len
)
2550 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2551 target_to_host_data_route_rtattr
);
2554 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2556 struct ifinfomsg
*ifi
;
2557 struct ifaddrmsg
*ifa
;
2560 switch (nlh
->nlmsg_type
) {
2565 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2566 ifi
= NLMSG_DATA(nlh
);
2567 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2568 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2569 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2570 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2571 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2572 NLMSG_LENGTH(sizeof(*ifi
)));
2578 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2579 ifa
= NLMSG_DATA(nlh
);
2580 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2581 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2582 NLMSG_LENGTH(sizeof(*ifa
)));
2589 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2590 rtm
= NLMSG_DATA(nlh
);
2591 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2592 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2593 NLMSG_LENGTH(sizeof(*rtm
)));
2597 return -TARGET_EOPNOTSUPP
;
2602 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2604 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2606 #endif /* CONFIG_RTNETLINK */
2608 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2610 switch (nlh
->nlmsg_type
) {
2612 gemu_log("Unknown host audit message type %d\n",
2614 return -TARGET_EINVAL
;
2619 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2622 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2625 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2627 switch (nlh
->nlmsg_type
) {
2629 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2630 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2633 gemu_log("Unknown target audit message type %d\n",
2635 return -TARGET_EINVAL
;
2641 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2643 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2646 /* do_setsockopt() Must return target values and target errnos. */
2647 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2648 abi_ulong optval_addr
, socklen_t optlen
)
2652 struct ip_mreqn
*ip_mreq
;
2653 struct ip_mreq_source
*ip_mreq_source
;
2657 /* TCP options all take an 'int' value. */
2658 if (optlen
< sizeof(uint32_t))
2659 return -TARGET_EINVAL
;
2661 if (get_user_u32(val
, optval_addr
))
2662 return -TARGET_EFAULT
;
2663 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2670 case IP_ROUTER_ALERT
:
2674 case IP_MTU_DISCOVER
:
2680 case IP_MULTICAST_TTL
:
2681 case IP_MULTICAST_LOOP
:
2683 if (optlen
>= sizeof(uint32_t)) {
2684 if (get_user_u32(val
, optval_addr
))
2685 return -TARGET_EFAULT
;
2686 } else if (optlen
>= 1) {
2687 if (get_user_u8(val
, optval_addr
))
2688 return -TARGET_EFAULT
;
2690 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2692 case IP_ADD_MEMBERSHIP
:
2693 case IP_DROP_MEMBERSHIP
:
2694 if (optlen
< sizeof (struct target_ip_mreq
) ||
2695 optlen
> sizeof (struct target_ip_mreqn
))
2696 return -TARGET_EINVAL
;
2698 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2699 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2700 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2703 case IP_BLOCK_SOURCE
:
2704 case IP_UNBLOCK_SOURCE
:
2705 case IP_ADD_SOURCE_MEMBERSHIP
:
2706 case IP_DROP_SOURCE_MEMBERSHIP
:
2707 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2708 return -TARGET_EINVAL
;
2710 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2711 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2712 unlock_user (ip_mreq_source
, optval_addr
, 0);
2721 case IPV6_MTU_DISCOVER
:
2724 case IPV6_RECVPKTINFO
:
2726 if (optlen
< sizeof(uint32_t)) {
2727 return -TARGET_EINVAL
;
2729 if (get_user_u32(val
, optval_addr
)) {
2730 return -TARGET_EFAULT
;
2732 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2733 &val
, sizeof(val
)));
2742 /* struct icmp_filter takes an u32 value */
2743 if (optlen
< sizeof(uint32_t)) {
2744 return -TARGET_EINVAL
;
2747 if (get_user_u32(val
, optval_addr
)) {
2748 return -TARGET_EFAULT
;
2750 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2751 &val
, sizeof(val
)));
2758 case TARGET_SOL_SOCKET
:
2760 case TARGET_SO_RCVTIMEO
:
2764 optname
= SO_RCVTIMEO
;
2767 if (optlen
!= sizeof(struct target_timeval
)) {
2768 return -TARGET_EINVAL
;
2771 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2772 return -TARGET_EFAULT
;
2775 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2779 case TARGET_SO_SNDTIMEO
:
2780 optname
= SO_SNDTIMEO
;
2782 case TARGET_SO_ATTACH_FILTER
:
2784 struct target_sock_fprog
*tfprog
;
2785 struct target_sock_filter
*tfilter
;
2786 struct sock_fprog fprog
;
2787 struct sock_filter
*filter
;
2790 if (optlen
!= sizeof(*tfprog
)) {
2791 return -TARGET_EINVAL
;
2793 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2794 return -TARGET_EFAULT
;
2796 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2797 tswapal(tfprog
->filter
), 0)) {
2798 unlock_user_struct(tfprog
, optval_addr
, 1);
2799 return -TARGET_EFAULT
;
2802 fprog
.len
= tswap16(tfprog
->len
);
2803 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2804 if (filter
== NULL
) {
2805 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2806 unlock_user_struct(tfprog
, optval_addr
, 1);
2807 return -TARGET_ENOMEM
;
2809 for (i
= 0; i
< fprog
.len
; i
++) {
2810 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2811 filter
[i
].jt
= tfilter
[i
].jt
;
2812 filter
[i
].jf
= tfilter
[i
].jf
;
2813 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2815 fprog
.filter
= filter
;
2817 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2818 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2821 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2822 unlock_user_struct(tfprog
, optval_addr
, 1);
2825 case TARGET_SO_BINDTODEVICE
:
2827 char *dev_ifname
, *addr_ifname
;
2829 if (optlen
> IFNAMSIZ
- 1) {
2830 optlen
= IFNAMSIZ
- 1;
2832 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2834 return -TARGET_EFAULT
;
2836 optname
= SO_BINDTODEVICE
;
2837 addr_ifname
= alloca(IFNAMSIZ
);
2838 memcpy(addr_ifname
, dev_ifname
, optlen
);
2839 addr_ifname
[optlen
] = 0;
2840 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2841 addr_ifname
, optlen
));
2842 unlock_user (dev_ifname
, optval_addr
, 0);
2845 /* Options with 'int' argument. */
2846 case TARGET_SO_DEBUG
:
2849 case TARGET_SO_REUSEADDR
:
2850 optname
= SO_REUSEADDR
;
2852 case TARGET_SO_TYPE
:
2855 case TARGET_SO_ERROR
:
2858 case TARGET_SO_DONTROUTE
:
2859 optname
= SO_DONTROUTE
;
2861 case TARGET_SO_BROADCAST
:
2862 optname
= SO_BROADCAST
;
2864 case TARGET_SO_SNDBUF
:
2865 optname
= SO_SNDBUF
;
2867 case TARGET_SO_SNDBUFFORCE
:
2868 optname
= SO_SNDBUFFORCE
;
2870 case TARGET_SO_RCVBUF
:
2871 optname
= SO_RCVBUF
;
2873 case TARGET_SO_RCVBUFFORCE
:
2874 optname
= SO_RCVBUFFORCE
;
2876 case TARGET_SO_KEEPALIVE
:
2877 optname
= SO_KEEPALIVE
;
2879 case TARGET_SO_OOBINLINE
:
2880 optname
= SO_OOBINLINE
;
2882 case TARGET_SO_NO_CHECK
:
2883 optname
= SO_NO_CHECK
;
2885 case TARGET_SO_PRIORITY
:
2886 optname
= SO_PRIORITY
;
2889 case TARGET_SO_BSDCOMPAT
:
2890 optname
= SO_BSDCOMPAT
;
2893 case TARGET_SO_PASSCRED
:
2894 optname
= SO_PASSCRED
;
2896 case TARGET_SO_PASSSEC
:
2897 optname
= SO_PASSSEC
;
2899 case TARGET_SO_TIMESTAMP
:
2900 optname
= SO_TIMESTAMP
;
2902 case TARGET_SO_RCVLOWAT
:
2903 optname
= SO_RCVLOWAT
;
2909 if (optlen
< sizeof(uint32_t))
2910 return -TARGET_EINVAL
;
2912 if (get_user_u32(val
, optval_addr
))
2913 return -TARGET_EFAULT
;
2914 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2918 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2919 ret
= -TARGET_ENOPROTOOPT
;
2924 /* do_getsockopt() Must return target values and target errnos. */
2925 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2926 abi_ulong optval_addr
, abi_ulong optlen
)
2933 case TARGET_SOL_SOCKET
:
2936 /* These don't just return a single integer */
2937 case TARGET_SO_LINGER
:
2938 case TARGET_SO_RCVTIMEO
:
2939 case TARGET_SO_SNDTIMEO
:
2940 case TARGET_SO_PEERNAME
:
2942 case TARGET_SO_PEERCRED
: {
2945 struct target_ucred
*tcr
;
2947 if (get_user_u32(len
, optlen
)) {
2948 return -TARGET_EFAULT
;
2951 return -TARGET_EINVAL
;
2955 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2963 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2964 return -TARGET_EFAULT
;
2966 __put_user(cr
.pid
, &tcr
->pid
);
2967 __put_user(cr
.uid
, &tcr
->uid
);
2968 __put_user(cr
.gid
, &tcr
->gid
);
2969 unlock_user_struct(tcr
, optval_addr
, 1);
2970 if (put_user_u32(len
, optlen
)) {
2971 return -TARGET_EFAULT
;
2975 /* Options with 'int' argument. */
2976 case TARGET_SO_DEBUG
:
2979 case TARGET_SO_REUSEADDR
:
2980 optname
= SO_REUSEADDR
;
2982 case TARGET_SO_TYPE
:
2985 case TARGET_SO_ERROR
:
2988 case TARGET_SO_DONTROUTE
:
2989 optname
= SO_DONTROUTE
;
2991 case TARGET_SO_BROADCAST
:
2992 optname
= SO_BROADCAST
;
2994 case TARGET_SO_SNDBUF
:
2995 optname
= SO_SNDBUF
;
2997 case TARGET_SO_RCVBUF
:
2998 optname
= SO_RCVBUF
;
3000 case TARGET_SO_KEEPALIVE
:
3001 optname
= SO_KEEPALIVE
;
3003 case TARGET_SO_OOBINLINE
:
3004 optname
= SO_OOBINLINE
;
3006 case TARGET_SO_NO_CHECK
:
3007 optname
= SO_NO_CHECK
;
3009 case TARGET_SO_PRIORITY
:
3010 optname
= SO_PRIORITY
;
3013 case TARGET_SO_BSDCOMPAT
:
3014 optname
= SO_BSDCOMPAT
;
3017 case TARGET_SO_PASSCRED
:
3018 optname
= SO_PASSCRED
;
3020 case TARGET_SO_TIMESTAMP
:
3021 optname
= SO_TIMESTAMP
;
3023 case TARGET_SO_RCVLOWAT
:
3024 optname
= SO_RCVLOWAT
;
3026 case TARGET_SO_ACCEPTCONN
:
3027 optname
= SO_ACCEPTCONN
;
3034 /* TCP options all take an 'int' value. */
3036 if (get_user_u32(len
, optlen
))
3037 return -TARGET_EFAULT
;
3039 return -TARGET_EINVAL
;
3041 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3044 if (optname
== SO_TYPE
) {
3045 val
= host_to_target_sock_type(val
);
3050 if (put_user_u32(val
, optval_addr
))
3051 return -TARGET_EFAULT
;
3053 if (put_user_u8(val
, optval_addr
))
3054 return -TARGET_EFAULT
;
3056 if (put_user_u32(len
, optlen
))
3057 return -TARGET_EFAULT
;
3064 case IP_ROUTER_ALERT
:
3068 case IP_MTU_DISCOVER
:
3074 case IP_MULTICAST_TTL
:
3075 case IP_MULTICAST_LOOP
:
3076 if (get_user_u32(len
, optlen
))
3077 return -TARGET_EFAULT
;
3079 return -TARGET_EINVAL
;
3081 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3084 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3086 if (put_user_u32(len
, optlen
)
3087 || put_user_u8(val
, optval_addr
))
3088 return -TARGET_EFAULT
;
3090 if (len
> sizeof(int))
3092 if (put_user_u32(len
, optlen
)
3093 || put_user_u32(val
, optval_addr
))
3094 return -TARGET_EFAULT
;
3098 ret
= -TARGET_ENOPROTOOPT
;
3104 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3106 ret
= -TARGET_EOPNOTSUPP
;
3112 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3113 abi_ulong count
, int copy
)
3115 struct target_iovec
*target_vec
;
3117 abi_ulong total_len
, max_len
;
3120 bool bad_address
= false;
3126 if (count
> IOV_MAX
) {
3131 vec
= g_try_new0(struct iovec
, count
);
3137 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3138 count
* sizeof(struct target_iovec
), 1);
3139 if (target_vec
== NULL
) {
3144 /* ??? If host page size > target page size, this will result in a
3145 value larger than what we can actually support. */
3146 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3149 for (i
= 0; i
< count
; i
++) {
3150 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3151 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3156 } else if (len
== 0) {
3157 /* Zero length pointer is ignored. */
3158 vec
[i
].iov_base
= 0;
3160 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3161 /* If the first buffer pointer is bad, this is a fault. But
3162 * subsequent bad buffers will result in a partial write; this
3163 * is realized by filling the vector with null pointers and
3165 if (!vec
[i
].iov_base
) {
3176 if (len
> max_len
- total_len
) {
3177 len
= max_len
- total_len
;
3180 vec
[i
].iov_len
= len
;
3184 unlock_user(target_vec
, target_addr
, 0);
3189 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3190 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3193 unlock_user(target_vec
, target_addr
, 0);
3200 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3201 abi_ulong count
, int copy
)
3203 struct target_iovec
*target_vec
;
3206 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3207 count
* sizeof(struct target_iovec
), 1);
3209 for (i
= 0; i
< count
; i
++) {
3210 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3211 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3215 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3217 unlock_user(target_vec
, target_addr
, 0);
3223 static inline int target_to_host_sock_type(int *type
)
3226 int target_type
= *type
;
3228 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3229 case TARGET_SOCK_DGRAM
:
3230 host_type
= SOCK_DGRAM
;
3232 case TARGET_SOCK_STREAM
:
3233 host_type
= SOCK_STREAM
;
3236 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3239 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3240 #if defined(SOCK_CLOEXEC)
3241 host_type
|= SOCK_CLOEXEC
;
3243 return -TARGET_EINVAL
;
3246 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3247 #if defined(SOCK_NONBLOCK)
3248 host_type
|= SOCK_NONBLOCK
;
3249 #elif !defined(O_NONBLOCK)
3250 return -TARGET_EINVAL
;
3257 /* Try to emulate socket type flags after socket creation. */
3258 static int sock_flags_fixup(int fd
, int target_type
)
3260 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3261 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3262 int flags
= fcntl(fd
, F_GETFL
);
3263 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3265 return -TARGET_EINVAL
;
3272 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3273 abi_ulong target_addr
,
3276 struct sockaddr
*addr
= host_addr
;
3277 struct target_sockaddr
*target_saddr
;
3279 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3280 if (!target_saddr
) {
3281 return -TARGET_EFAULT
;
3284 memcpy(addr
, target_saddr
, len
);
3285 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3286 /* spkt_protocol is big-endian */
3288 unlock_user(target_saddr
, target_addr
, 0);
3292 static TargetFdTrans target_packet_trans
= {
3293 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3296 #ifdef CONFIG_RTNETLINK
3297 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3301 ret
= target_to_host_nlmsg_route(buf
, len
);
3309 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3313 ret
= host_to_target_nlmsg_route(buf
, len
);
3321 static TargetFdTrans target_netlink_route_trans
= {
3322 .target_to_host_data
= netlink_route_target_to_host
,
3323 .host_to_target_data
= netlink_route_host_to_target
,
3325 #endif /* CONFIG_RTNETLINK */
3327 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3331 ret
= target_to_host_nlmsg_audit(buf
, len
);
3339 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3343 ret
= host_to_target_nlmsg_audit(buf
, len
);
3351 static TargetFdTrans target_netlink_audit_trans
= {
3352 .target_to_host_data
= netlink_audit_target_to_host
,
3353 .host_to_target_data
= netlink_audit_host_to_target
,
3356 /* do_socket() Must return target values and target errnos. */
3357 static abi_long
do_socket(int domain
, int type
, int protocol
)
3359 int target_type
= type
;
3362 ret
= target_to_host_sock_type(&type
);
3367 if (domain
== PF_NETLINK
&& !(
3368 #ifdef CONFIG_RTNETLINK
3369 protocol
== NETLINK_ROUTE
||
3371 protocol
== NETLINK_KOBJECT_UEVENT
||
3372 protocol
== NETLINK_AUDIT
)) {
3373 return -EPFNOSUPPORT
;
3376 if (domain
== AF_PACKET
||
3377 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3378 protocol
= tswap16(protocol
);
3381 ret
= get_errno(socket(domain
, type
, protocol
));
3383 ret
= sock_flags_fixup(ret
, target_type
);
3384 if (type
== SOCK_PACKET
) {
3385 /* Manage an obsolete case :
3386 * if socket type is SOCK_PACKET, bind by name
3388 fd_trans_register(ret
, &target_packet_trans
);
3389 } else if (domain
== PF_NETLINK
) {
3391 #ifdef CONFIG_RTNETLINK
3393 fd_trans_register(ret
, &target_netlink_route_trans
);
3396 case NETLINK_KOBJECT_UEVENT
:
3397 /* nothing to do: messages are strings */
3400 fd_trans_register(ret
, &target_netlink_audit_trans
);
3403 g_assert_not_reached();
3410 /* do_bind() Must return target values and target errnos. */
3411 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3417 if ((int)addrlen
< 0) {
3418 return -TARGET_EINVAL
;
3421 addr
= alloca(addrlen
+1);
3423 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3427 return get_errno(bind(sockfd
, addr
, addrlen
));
3430 /* do_connect() Must return target values and target errnos. */
3431 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3437 if ((int)addrlen
< 0) {
3438 return -TARGET_EINVAL
;
3441 addr
= alloca(addrlen
+1);
3443 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3447 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3450 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3451 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3452 int flags
, int send
)
3458 abi_ulong target_vec
;
3460 if (msgp
->msg_name
) {
3461 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3462 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3463 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3464 tswapal(msgp
->msg_name
),
3466 if (ret
== -TARGET_EFAULT
) {
3467 /* For connected sockets msg_name and msg_namelen must
3468 * be ignored, so returning EFAULT immediately is wrong.
3469 * Instead, pass a bad msg_name to the host kernel, and
3470 * let it decide whether to return EFAULT or not.
3472 msg
.msg_name
= (void *)-1;
3477 msg
.msg_name
= NULL
;
3478 msg
.msg_namelen
= 0;
3480 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3481 msg
.msg_control
= alloca(msg
.msg_controllen
);
3482 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3484 count
= tswapal(msgp
->msg_iovlen
);
3485 target_vec
= tswapal(msgp
->msg_iov
);
3487 if (count
> IOV_MAX
) {
3488 /* sendrcvmsg returns a different errno for this condition than
3489 * readv/writev, so we must catch it here before lock_iovec() does.
3491 ret
= -TARGET_EMSGSIZE
;
3495 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3496 target_vec
, count
, send
);
3498 ret
= -host_to_target_errno(errno
);
3501 msg
.msg_iovlen
= count
;
3505 if (fd_trans_target_to_host_data(fd
)) {
3508 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3509 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3510 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3511 msg
.msg_iov
->iov_len
);
3513 msg
.msg_iov
->iov_base
= host_msg
;
3514 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3518 ret
= target_to_host_cmsg(&msg
, msgp
);
3520 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3524 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3525 if (!is_error(ret
)) {
3527 if (fd_trans_host_to_target_data(fd
)) {
3528 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3531 ret
= host_to_target_cmsg(msgp
, &msg
);
3533 if (!is_error(ret
)) {
3534 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3535 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3536 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3537 msg
.msg_name
, msg
.msg_namelen
);
3549 unlock_iovec(vec
, target_vec
, count
, !send
);
3554 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3555 int flags
, int send
)
3558 struct target_msghdr
*msgp
;
3560 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3564 return -TARGET_EFAULT
;
3566 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3567 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3571 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3572 * so it might not have this *mmsg-specific flag either.
3574 #ifndef MSG_WAITFORONE
3575 #define MSG_WAITFORONE 0x10000
3578 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3579 unsigned int vlen
, unsigned int flags
,
3582 struct target_mmsghdr
*mmsgp
;
3586 if (vlen
> UIO_MAXIOV
) {
3590 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3592 return -TARGET_EFAULT
;
3595 for (i
= 0; i
< vlen
; i
++) {
3596 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3597 if (is_error(ret
)) {
3600 mmsgp
[i
].msg_len
= tswap32(ret
);
3601 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3602 if (flags
& MSG_WAITFORONE
) {
3603 flags
|= MSG_DONTWAIT
;
3607 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3609 /* Return number of datagrams sent if we sent any at all;
3610 * otherwise return the error.
3618 /* do_accept4() Must return target values and target errnos. */
3619 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3620 abi_ulong target_addrlen_addr
, int flags
)
3627 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3629 if (target_addr
== 0) {
3630 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3633 /* linux returns EINVAL if addrlen pointer is invalid */
3634 if (get_user_u32(addrlen
, target_addrlen_addr
))
3635 return -TARGET_EINVAL
;
3637 if ((int)addrlen
< 0) {
3638 return -TARGET_EINVAL
;
3641 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3642 return -TARGET_EINVAL
;
3644 addr
= alloca(addrlen
);
3646 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3647 if (!is_error(ret
)) {
3648 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3649 if (put_user_u32(addrlen
, target_addrlen_addr
))
3650 ret
= -TARGET_EFAULT
;
3655 /* do_getpeername() Must return target values and target errnos. */
3656 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3657 abi_ulong target_addrlen_addr
)
3663 if (get_user_u32(addrlen
, target_addrlen_addr
))
3664 return -TARGET_EFAULT
;
3666 if ((int)addrlen
< 0) {
3667 return -TARGET_EINVAL
;
3670 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3671 return -TARGET_EFAULT
;
3673 addr
= alloca(addrlen
);
3675 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3676 if (!is_error(ret
)) {
3677 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3678 if (put_user_u32(addrlen
, target_addrlen_addr
))
3679 ret
= -TARGET_EFAULT
;
3684 /* do_getsockname() Must return target values and target errnos. */
3685 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3686 abi_ulong target_addrlen_addr
)
3692 if (get_user_u32(addrlen
, target_addrlen_addr
))
3693 return -TARGET_EFAULT
;
3695 if ((int)addrlen
< 0) {
3696 return -TARGET_EINVAL
;
3699 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3700 return -TARGET_EFAULT
;
3702 addr
= alloca(addrlen
);
3704 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3705 if (!is_error(ret
)) {
3706 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3707 if (put_user_u32(addrlen
, target_addrlen_addr
))
3708 ret
= -TARGET_EFAULT
;
3713 /* do_socketpair() Must return target values and target errnos. */
3714 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3715 abi_ulong target_tab_addr
)
3720 target_to_host_sock_type(&type
);
3722 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3723 if (!is_error(ret
)) {
3724 if (put_user_s32(tab
[0], target_tab_addr
)
3725 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3726 ret
= -TARGET_EFAULT
;
3731 /* do_sendto() Must return target values and target errnos. */
3732 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3733 abi_ulong target_addr
, socklen_t addrlen
)
3737 void *copy_msg
= NULL
;
3740 if ((int)addrlen
< 0) {
3741 return -TARGET_EINVAL
;
3744 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3746 return -TARGET_EFAULT
;
3747 if (fd_trans_target_to_host_data(fd
)) {
3748 copy_msg
= host_msg
;
3749 host_msg
= g_malloc(len
);
3750 memcpy(host_msg
, copy_msg
, len
);
3751 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3757 addr
= alloca(addrlen
+1);
3758 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3762 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3764 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3769 host_msg
= copy_msg
;
3771 unlock_user(host_msg
, msg
, 0);
3775 /* do_recvfrom() Must return target values and target errnos. */
3776 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3777 abi_ulong target_addr
,
3778 abi_ulong target_addrlen
)
3785 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3787 return -TARGET_EFAULT
;
3789 if (get_user_u32(addrlen
, target_addrlen
)) {
3790 ret
= -TARGET_EFAULT
;
3793 if ((int)addrlen
< 0) {
3794 ret
= -TARGET_EINVAL
;
3797 addr
= alloca(addrlen
);
3798 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3801 addr
= NULL
; /* To keep compiler quiet. */
3802 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3804 if (!is_error(ret
)) {
3805 if (fd_trans_host_to_target_data(fd
)) {
3806 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
3809 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3810 if (put_user_u32(addrlen
, target_addrlen
)) {
3811 ret
= -TARGET_EFAULT
;
3815 unlock_user(host_msg
, msg
, len
);
3818 unlock_user(host_msg
, msg
, 0);
3823 #ifdef TARGET_NR_socketcall
3824 /* do_socketcall() Must return target values and target errnos. */
3825 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3827 static const unsigned ac
[] = { /* number of arguments per call */
3828 [SOCKOP_socket
] = 3, /* domain, type, protocol */
3829 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
3830 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
3831 [SOCKOP_listen
] = 2, /* sockfd, backlog */
3832 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
3833 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
3834 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
3835 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
3836 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
3837 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
3838 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
3839 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3840 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3841 [SOCKOP_shutdown
] = 2, /* sockfd, how */
3842 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
3843 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
3844 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3845 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3846 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3847 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3849 abi_long a
[6]; /* max 6 args */
3851 /* first, collect the arguments in a[] according to ac[] */
3852 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
3854 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
3855 for (i
= 0; i
< ac
[num
]; ++i
) {
3856 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3857 return -TARGET_EFAULT
;
3862 /* now when we have the args, actually handle the call */
3864 case SOCKOP_socket
: /* domain, type, protocol */
3865 return do_socket(a
[0], a
[1], a
[2]);
3866 case SOCKOP_bind
: /* sockfd, addr, addrlen */
3867 return do_bind(a
[0], a
[1], a
[2]);
3868 case SOCKOP_connect
: /* sockfd, addr, addrlen */
3869 return do_connect(a
[0], a
[1], a
[2]);
3870 case SOCKOP_listen
: /* sockfd, backlog */
3871 return get_errno(listen(a
[0], a
[1]));
3872 case SOCKOP_accept
: /* sockfd, addr, addrlen */
3873 return do_accept4(a
[0], a
[1], a
[2], 0);
3874 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
3875 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3876 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
3877 return do_getsockname(a
[0], a
[1], a
[2]);
3878 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
3879 return do_getpeername(a
[0], a
[1], a
[2]);
3880 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
3881 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3882 case SOCKOP_send
: /* sockfd, msg, len, flags */
3883 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3884 case SOCKOP_recv
: /* sockfd, msg, len, flags */
3885 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3886 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
3887 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3888 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
3889 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3890 case SOCKOP_shutdown
: /* sockfd, how */
3891 return get_errno(shutdown(a
[0], a
[1]));
3892 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
3893 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3894 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
3895 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3896 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
3897 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3898 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
3899 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3900 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
3901 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3902 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
3903 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3905 gemu_log("Unsupported socketcall: %d\n", num
);
3906 return -TARGET_ENOSYS
;
3911 #define N_SHM_REGIONS 32
3913 static struct shm_region
{
3917 } shm_regions
[N_SHM_REGIONS
];
3919 #ifndef TARGET_SEMID64_DS
3920 /* asm-generic version of this struct */
3921 struct target_semid64_ds
3923 struct target_ipc_perm sem_perm
;
3924 abi_ulong sem_otime
;
3925 #if TARGET_ABI_BITS == 32
3926 abi_ulong __unused1
;
3928 abi_ulong sem_ctime
;
3929 #if TARGET_ABI_BITS == 32
3930 abi_ulong __unused2
;
3932 abi_ulong sem_nsems
;
3933 abi_ulong __unused3
;
3934 abi_ulong __unused4
;
3938 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3939 abi_ulong target_addr
)
3941 struct target_ipc_perm
*target_ip
;
3942 struct target_semid64_ds
*target_sd
;
3944 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3945 return -TARGET_EFAULT
;
3946 target_ip
= &(target_sd
->sem_perm
);
3947 host_ip
->__key
= tswap32(target_ip
->__key
);
3948 host_ip
->uid
= tswap32(target_ip
->uid
);
3949 host_ip
->gid
= tswap32(target_ip
->gid
);
3950 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3951 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3952 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3953 host_ip
->mode
= tswap32(target_ip
->mode
);
3955 host_ip
->mode
= tswap16(target_ip
->mode
);
3957 #if defined(TARGET_PPC)
3958 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3960 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3962 unlock_user_struct(target_sd
, target_addr
, 0);
3966 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3967 struct ipc_perm
*host_ip
)
3969 struct target_ipc_perm
*target_ip
;
3970 struct target_semid64_ds
*target_sd
;
3972 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3973 return -TARGET_EFAULT
;
3974 target_ip
= &(target_sd
->sem_perm
);
3975 target_ip
->__key
= tswap32(host_ip
->__key
);
3976 target_ip
->uid
= tswap32(host_ip
->uid
);
3977 target_ip
->gid
= tswap32(host_ip
->gid
);
3978 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3979 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3980 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3981 target_ip
->mode
= tswap32(host_ip
->mode
);
3983 target_ip
->mode
= tswap16(host_ip
->mode
);
3985 #if defined(TARGET_PPC)
3986 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3988 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3990 unlock_user_struct(target_sd
, target_addr
, 1);
3994 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3995 abi_ulong target_addr
)
3997 struct target_semid64_ds
*target_sd
;
3999 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4000 return -TARGET_EFAULT
;
4001 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4002 return -TARGET_EFAULT
;
4003 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4004 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4005 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4006 unlock_user_struct(target_sd
, target_addr
, 0);
4010 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4011 struct semid_ds
*host_sd
)
4013 struct target_semid64_ds
*target_sd
;
4015 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4016 return -TARGET_EFAULT
;
4017 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4018 return -TARGET_EFAULT
;
4019 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4020 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4021 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4022 unlock_user_struct(target_sd
, target_addr
, 1);
4026 struct target_seminfo
{
4039 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4040 struct seminfo
*host_seminfo
)
4042 struct target_seminfo
*target_seminfo
;
4043 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4044 return -TARGET_EFAULT
;
4045 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4046 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4047 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4048 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4049 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4050 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4051 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4052 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4053 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4054 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4055 unlock_user_struct(target_seminfo
, target_addr
, 1);
4061 struct semid_ds
*buf
;
4062 unsigned short *array
;
4063 struct seminfo
*__buf
;
4066 union target_semun
{
4073 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4074 abi_ulong target_addr
)
4077 unsigned short *array
;
4079 struct semid_ds semid_ds
;
4082 semun
.buf
= &semid_ds
;
4084 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4086 return get_errno(ret
);
4088 nsems
= semid_ds
.sem_nsems
;
4090 *host_array
= g_try_new(unsigned short, nsems
);
4092 return -TARGET_ENOMEM
;
4094 array
= lock_user(VERIFY_READ
, target_addr
,
4095 nsems
*sizeof(unsigned short), 1);
4097 g_free(*host_array
);
4098 return -TARGET_EFAULT
;
4101 for(i
=0; i
<nsems
; i
++) {
4102 __get_user((*host_array
)[i
], &array
[i
]);
4104 unlock_user(array
, target_addr
, 0);
4109 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4110 unsigned short **host_array
)
4113 unsigned short *array
;
4115 struct semid_ds semid_ds
;
4118 semun
.buf
= &semid_ds
;
4120 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4122 return get_errno(ret
);
4124 nsems
= semid_ds
.sem_nsems
;
4126 array
= lock_user(VERIFY_WRITE
, target_addr
,
4127 nsems
*sizeof(unsigned short), 0);
4129 return -TARGET_EFAULT
;
4131 for(i
=0; i
<nsems
; i
++) {
4132 __put_user((*host_array
)[i
], &array
[i
]);
4134 g_free(*host_array
);
4135 unlock_user(array
, target_addr
, 1);
4140 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4141 abi_ulong target_arg
)
4143 union target_semun target_su
= { .buf
= target_arg
};
4145 struct semid_ds dsarg
;
4146 unsigned short *array
= NULL
;
4147 struct seminfo seminfo
;
4148 abi_long ret
= -TARGET_EINVAL
;
4155 /* In 64 bit cross-endian situations, we will erroneously pick up
4156 * the wrong half of the union for the "val" element. To rectify
4157 * this, the entire 8-byte structure is byteswapped, followed by
4158 * a swap of the 4 byte val field. In other cases, the data is
4159 * already in proper host byte order. */
4160 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4161 target_su
.buf
= tswapal(target_su
.buf
);
4162 arg
.val
= tswap32(target_su
.val
);
4164 arg
.val
= target_su
.val
;
4166 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4170 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4174 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4175 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4182 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4186 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4187 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4193 arg
.__buf
= &seminfo
;
4194 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4195 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4203 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4210 struct target_sembuf
{
4211 unsigned short sem_num
;
4216 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4217 abi_ulong target_addr
,
4220 struct target_sembuf
*target_sembuf
;
4223 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4224 nsops
*sizeof(struct target_sembuf
), 1);
4226 return -TARGET_EFAULT
;
4228 for(i
=0; i
<nsops
; i
++) {
4229 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4230 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4231 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4234 unlock_user(target_sembuf
, target_addr
, 0);
4239 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4241 struct sembuf sops
[nsops
];
4243 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4244 return -TARGET_EFAULT
;
4246 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4249 struct target_msqid_ds
4251 struct target_ipc_perm msg_perm
;
4252 abi_ulong msg_stime
;
4253 #if TARGET_ABI_BITS == 32
4254 abi_ulong __unused1
;
4256 abi_ulong msg_rtime
;
4257 #if TARGET_ABI_BITS == 32
4258 abi_ulong __unused2
;
4260 abi_ulong msg_ctime
;
4261 #if TARGET_ABI_BITS == 32
4262 abi_ulong __unused3
;
4264 abi_ulong __msg_cbytes
;
4266 abi_ulong msg_qbytes
;
4267 abi_ulong msg_lspid
;
4268 abi_ulong msg_lrpid
;
4269 abi_ulong __unused4
;
4270 abi_ulong __unused5
;
4273 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4274 abi_ulong target_addr
)
4276 struct target_msqid_ds
*target_md
;
4278 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4279 return -TARGET_EFAULT
;
4280 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4281 return -TARGET_EFAULT
;
4282 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4283 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4284 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4285 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4286 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4287 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4288 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4289 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4290 unlock_user_struct(target_md
, target_addr
, 0);
4294 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4295 struct msqid_ds
*host_md
)
4297 struct target_msqid_ds
*target_md
;
4299 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4300 return -TARGET_EFAULT
;
4301 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4302 return -TARGET_EFAULT
;
4303 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4304 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4305 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4306 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4307 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4308 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4309 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4310 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4311 unlock_user_struct(target_md
, target_addr
, 1);
4315 struct target_msginfo
{
4323 unsigned short int msgseg
;
4326 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4327 struct msginfo
*host_msginfo
)
4329 struct target_msginfo
*target_msginfo
;
4330 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4331 return -TARGET_EFAULT
;
4332 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4333 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4334 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4335 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4336 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4337 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4338 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4339 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4340 unlock_user_struct(target_msginfo
, target_addr
, 1);
4344 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4346 struct msqid_ds dsarg
;
4347 struct msginfo msginfo
;
4348 abi_long ret
= -TARGET_EINVAL
;
4356 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4357 return -TARGET_EFAULT
;
4358 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4359 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4360 return -TARGET_EFAULT
;
4363 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4367 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4368 if (host_to_target_msginfo(ptr
, &msginfo
))
4369 return -TARGET_EFAULT
;
4376 struct target_msgbuf
{
4381 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4382 ssize_t msgsz
, int msgflg
)
4384 struct target_msgbuf
*target_mb
;
4385 struct msgbuf
*host_mb
;
4389 return -TARGET_EINVAL
;
4392 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4393 return -TARGET_EFAULT
;
4394 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4396 unlock_user_struct(target_mb
, msgp
, 0);
4397 return -TARGET_ENOMEM
;
4399 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4400 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4401 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4403 unlock_user_struct(target_mb
, msgp
, 0);
4408 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4409 ssize_t msgsz
, abi_long msgtyp
,
4412 struct target_msgbuf
*target_mb
;
4414 struct msgbuf
*host_mb
;
4418 return -TARGET_EINVAL
;
4421 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4422 return -TARGET_EFAULT
;
4424 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4426 ret
= -TARGET_ENOMEM
;
4429 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4432 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4433 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4434 if (!target_mtext
) {
4435 ret
= -TARGET_EFAULT
;
4438 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4439 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4442 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4446 unlock_user_struct(target_mb
, msgp
, 1);
4451 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4452 abi_ulong target_addr
)
4454 struct target_shmid_ds
*target_sd
;
4456 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4457 return -TARGET_EFAULT
;
4458 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4459 return -TARGET_EFAULT
;
4460 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4461 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4462 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4463 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4464 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4465 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4466 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4467 unlock_user_struct(target_sd
, target_addr
, 0);
4471 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4472 struct shmid_ds
*host_sd
)
4474 struct target_shmid_ds
*target_sd
;
4476 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4477 return -TARGET_EFAULT
;
4478 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4479 return -TARGET_EFAULT
;
4480 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4481 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4482 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4483 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4484 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4485 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4486 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4487 unlock_user_struct(target_sd
, target_addr
, 1);
4491 struct target_shminfo
{
4499 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4500 struct shminfo
*host_shminfo
)
4502 struct target_shminfo
*target_shminfo
;
4503 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4504 return -TARGET_EFAULT
;
4505 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4506 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4507 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4508 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4509 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4510 unlock_user_struct(target_shminfo
, target_addr
, 1);
4514 struct target_shm_info
{
4519 abi_ulong swap_attempts
;
4520 abi_ulong swap_successes
;
4523 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4524 struct shm_info
*host_shm_info
)
4526 struct target_shm_info
*target_shm_info
;
4527 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4528 return -TARGET_EFAULT
;
4529 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4530 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4531 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4532 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4533 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4534 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4535 unlock_user_struct(target_shm_info
, target_addr
, 1);
4539 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4541 struct shmid_ds dsarg
;
4542 struct shminfo shminfo
;
4543 struct shm_info shm_info
;
4544 abi_long ret
= -TARGET_EINVAL
;
4552 if (target_to_host_shmid_ds(&dsarg
, buf
))
4553 return -TARGET_EFAULT
;
4554 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4555 if (host_to_target_shmid_ds(buf
, &dsarg
))
4556 return -TARGET_EFAULT
;
4559 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4560 if (host_to_target_shminfo(buf
, &shminfo
))
4561 return -TARGET_EFAULT
;
4564 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4565 if (host_to_target_shm_info(buf
, &shm_info
))
4566 return -TARGET_EFAULT
;
4571 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4578 #ifndef TARGET_FORCE_SHMLBA
4579 /* For most architectures, SHMLBA is the same as the page size;
4580 * some architectures have larger values, in which case they should
4581 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4582 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4583 * and defining its own value for SHMLBA.
4585 * The kernel also permits SHMLBA to be set by the architecture to a
4586 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4587 * this means that addresses are rounded to the large size if
4588 * SHM_RND is set but addresses not aligned to that size are not rejected
4589 * as long as they are at least page-aligned. Since the only architecture
4590 * which uses this is ia64 this code doesn't provide for that oddity.
4592 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4594 return TARGET_PAGE_SIZE
;
4598 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4599 int shmid
, abi_ulong shmaddr
, int shmflg
)
4603 struct shmid_ds shm_info
;
4607 /* find out the length of the shared memory segment */
4608 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4609 if (is_error(ret
)) {
4610 /* can't get length, bail out */
4614 shmlba
= target_shmlba(cpu_env
);
4616 if (shmaddr
& (shmlba
- 1)) {
4617 if (shmflg
& SHM_RND
) {
4618 shmaddr
&= ~(shmlba
- 1);
4620 return -TARGET_EINVAL
;
4627 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4629 abi_ulong mmap_start
;
4631 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4633 if (mmap_start
== -1) {
4635 host_raddr
= (void *)-1;
4637 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4640 if (host_raddr
== (void *)-1) {
4642 return get_errno((long)host_raddr
);
4644 raddr
=h2g((unsigned long)host_raddr
);
4646 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4647 PAGE_VALID
| PAGE_READ
|
4648 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4650 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4651 if (!shm_regions
[i
].in_use
) {
4652 shm_regions
[i
].in_use
= true;
4653 shm_regions
[i
].start
= raddr
;
4654 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4664 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4668 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4669 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4670 shm_regions
[i
].in_use
= false;
4671 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4676 return get_errno(shmdt(g2h(shmaddr
)));
4679 #ifdef TARGET_NR_ipc
4680 /* ??? This only works with linear mappings. */
4681 /* do_ipc() must return target values and target errnos. */
4682 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4683 unsigned int call
, abi_long first
,
4684 abi_long second
, abi_long third
,
4685 abi_long ptr
, abi_long fifth
)
4690 version
= call
>> 16;
4695 ret
= do_semop(first
, ptr
, second
);
4699 ret
= get_errno(semget(first
, second
, third
));
4702 case IPCOP_semctl
: {
4703 /* The semun argument to semctl is passed by value, so dereference the
4706 get_user_ual(atptr
, ptr
);
4707 ret
= do_semctl(first
, second
, third
, atptr
);
4712 ret
= get_errno(msgget(first
, second
));
4716 ret
= do_msgsnd(first
, ptr
, second
, third
);
4720 ret
= do_msgctl(first
, second
, ptr
);
4727 struct target_ipc_kludge
{
4732 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4733 ret
= -TARGET_EFAULT
;
4737 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4739 unlock_user_struct(tmp
, ptr
, 0);
4743 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4752 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4753 if (is_error(raddr
))
4754 return get_errno(raddr
);
4755 if (put_user_ual(raddr
, third
))
4756 return -TARGET_EFAULT
;
4760 ret
= -TARGET_EINVAL
;
4765 ret
= do_shmdt(ptr
);
4769 /* IPC_* flag values are the same on all linux platforms */
4770 ret
= get_errno(shmget(first
, second
, third
));
4773 /* IPC_* and SHM_* command values are the same on all linux platforms */
4775 ret
= do_shmctl(first
, second
, ptr
);
4778 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4779 ret
= -TARGET_ENOSYS
;
4786 /* kernel structure types definitions */
4788 #define STRUCT(name, ...) STRUCT_ ## name,
4789 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4791 #include "syscall_types.h"
4795 #undef STRUCT_SPECIAL
4797 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4798 #define STRUCT_SPECIAL(name)
4799 #include "syscall_types.h"
4801 #undef STRUCT_SPECIAL
4803 typedef struct IOCTLEntry IOCTLEntry
;
4805 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4806 int fd
, int cmd
, abi_long arg
);
4810 unsigned int host_cmd
;
4813 do_ioctl_fn
*do_ioctl
;
4814 const argtype arg_type
[5];
4817 #define IOC_R 0x0001
4818 #define IOC_W 0x0002
4819 #define IOC_RW (IOC_R | IOC_W)
4821 #define MAX_STRUCT_SIZE 4096
4823 #ifdef CONFIG_FIEMAP
4824 /* So fiemap access checks don't overflow on 32 bit systems.
4825 * This is very slightly smaller than the limit imposed by
4826 * the underlying kernel.
4828 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4829 / sizeof(struct fiemap_extent))
4831 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4832 int fd
, int cmd
, abi_long arg
)
4834 /* The parameter for this ioctl is a struct fiemap followed
4835 * by an array of struct fiemap_extent whose size is set
4836 * in fiemap->fm_extent_count. The array is filled in by the
4839 int target_size_in
, target_size_out
;
4841 const argtype
*arg_type
= ie
->arg_type
;
4842 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4845 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4849 assert(arg_type
[0] == TYPE_PTR
);
4850 assert(ie
->access
== IOC_RW
);
4852 target_size_in
= thunk_type_size(arg_type
, 0);
4853 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4855 return -TARGET_EFAULT
;
4857 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4858 unlock_user(argptr
, arg
, 0);
4859 fm
= (struct fiemap
*)buf_temp
;
4860 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4861 return -TARGET_EINVAL
;
4864 outbufsz
= sizeof (*fm
) +
4865 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4867 if (outbufsz
> MAX_STRUCT_SIZE
) {
4868 /* We can't fit all the extents into the fixed size buffer.
4869 * Allocate one that is large enough and use it instead.
4871 fm
= g_try_malloc(outbufsz
);
4873 return -TARGET_ENOMEM
;
4875 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4878 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4879 if (!is_error(ret
)) {
4880 target_size_out
= target_size_in
;
4881 /* An extent_count of 0 means we were only counting the extents
4882 * so there are no structs to copy
4884 if (fm
->fm_extent_count
!= 0) {
4885 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4887 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4889 ret
= -TARGET_EFAULT
;
4891 /* Convert the struct fiemap */
4892 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4893 if (fm
->fm_extent_count
!= 0) {
4894 p
= argptr
+ target_size_in
;
4895 /* ...and then all the struct fiemap_extents */
4896 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4897 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4902 unlock_user(argptr
, arg
, target_size_out
);
4912 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4913 int fd
, int cmd
, abi_long arg
)
4915 const argtype
*arg_type
= ie
->arg_type
;
4919 struct ifconf
*host_ifconf
;
4921 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4922 int target_ifreq_size
;
4927 abi_long target_ifc_buf
;
4931 assert(arg_type
[0] == TYPE_PTR
);
4932 assert(ie
->access
== IOC_RW
);
4935 target_size
= thunk_type_size(arg_type
, 0);
4937 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4939 return -TARGET_EFAULT
;
4940 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4941 unlock_user(argptr
, arg
, 0);
4943 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4944 target_ifc_len
= host_ifconf
->ifc_len
;
4945 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4947 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4948 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4949 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4951 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4952 if (outbufsz
> MAX_STRUCT_SIZE
) {
4953 /* We can't fit all the extents into the fixed size buffer.
4954 * Allocate one that is large enough and use it instead.
4956 host_ifconf
= malloc(outbufsz
);
4958 return -TARGET_ENOMEM
;
4960 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4963 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
4965 host_ifconf
->ifc_len
= host_ifc_len
;
4966 host_ifconf
->ifc_buf
= host_ifc_buf
;
4968 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4969 if (!is_error(ret
)) {
4970 /* convert host ifc_len to target ifc_len */
4972 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4973 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4974 host_ifconf
->ifc_len
= target_ifc_len
;
4976 /* restore target ifc_buf */
4978 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4980 /* copy struct ifconf to target user */
4982 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4984 return -TARGET_EFAULT
;
4985 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4986 unlock_user(argptr
, arg
, target_size
);
4988 /* copy ifreq[] to target user */
4990 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4991 for (i
= 0; i
< nb_ifreq
; i
++) {
4992 thunk_convert(argptr
+ i
* target_ifreq_size
,
4993 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4994 ifreq_arg_type
, THUNK_TARGET
);
4996 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5006 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5007 int cmd
, abi_long arg
)
5010 struct dm_ioctl
*host_dm
;
5011 abi_long guest_data
;
5012 uint32_t guest_data_size
;
5014 const argtype
*arg_type
= ie
->arg_type
;
5016 void *big_buf
= NULL
;
5020 target_size
= thunk_type_size(arg_type
, 0);
5021 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5023 ret
= -TARGET_EFAULT
;
5026 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5027 unlock_user(argptr
, arg
, 0);
5029 /* buf_temp is too small, so fetch things into a bigger buffer */
5030 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5031 memcpy(big_buf
, buf_temp
, target_size
);
5035 guest_data
= arg
+ host_dm
->data_start
;
5036 if ((guest_data
- arg
) < 0) {
5037 ret
= -TARGET_EINVAL
;
5040 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5041 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5043 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5045 ret
= -TARGET_EFAULT
;
5049 switch (ie
->host_cmd
) {
5051 case DM_LIST_DEVICES
:
5054 case DM_DEV_SUSPEND
:
5057 case DM_TABLE_STATUS
:
5058 case DM_TABLE_CLEAR
:
5060 case DM_LIST_VERSIONS
:
5064 case DM_DEV_SET_GEOMETRY
:
5065 /* data contains only strings */
5066 memcpy(host_data
, argptr
, guest_data_size
);
5069 memcpy(host_data
, argptr
, guest_data_size
);
5070 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5074 void *gspec
= argptr
;
5075 void *cur_data
= host_data
;
5076 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5077 int spec_size
= thunk_type_size(arg_type
, 0);
5080 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5081 struct dm_target_spec
*spec
= cur_data
;
5085 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5086 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5088 spec
->next
= sizeof(*spec
) + slen
;
5089 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5091 cur_data
+= spec
->next
;
5096 ret
= -TARGET_EINVAL
;
5097 unlock_user(argptr
, guest_data
, 0);
5100 unlock_user(argptr
, guest_data
, 0);
5102 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5103 if (!is_error(ret
)) {
5104 guest_data
= arg
+ host_dm
->data_start
;
5105 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5106 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5107 switch (ie
->host_cmd
) {
5112 case DM_DEV_SUSPEND
:
5115 case DM_TABLE_CLEAR
:
5117 case DM_DEV_SET_GEOMETRY
:
5118 /* no return data */
5120 case DM_LIST_DEVICES
:
5122 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5123 uint32_t remaining_data
= guest_data_size
;
5124 void *cur_data
= argptr
;
5125 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5126 int nl_size
= 12; /* can't use thunk_size due to alignment */
5129 uint32_t next
= nl
->next
;
5131 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5133 if (remaining_data
< nl
->next
) {
5134 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5137 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5138 strcpy(cur_data
+ nl_size
, nl
->name
);
5139 cur_data
+= nl
->next
;
5140 remaining_data
-= nl
->next
;
5144 nl
= (void*)nl
+ next
;
5149 case DM_TABLE_STATUS
:
5151 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5152 void *cur_data
= argptr
;
5153 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5154 int spec_size
= thunk_type_size(arg_type
, 0);
5157 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5158 uint32_t next
= spec
->next
;
5159 int slen
= strlen((char*)&spec
[1]) + 1;
5160 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5161 if (guest_data_size
< spec
->next
) {
5162 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5165 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5166 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5167 cur_data
= argptr
+ spec
->next
;
5168 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5174 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5175 int count
= *(uint32_t*)hdata
;
5176 uint64_t *hdev
= hdata
+ 8;
5177 uint64_t *gdev
= argptr
+ 8;
5180 *(uint32_t*)argptr
= tswap32(count
);
5181 for (i
= 0; i
< count
; i
++) {
5182 *gdev
= tswap64(*hdev
);
5188 case DM_LIST_VERSIONS
:
5190 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5191 uint32_t remaining_data
= guest_data_size
;
5192 void *cur_data
= argptr
;
5193 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5194 int vers_size
= thunk_type_size(arg_type
, 0);
5197 uint32_t next
= vers
->next
;
5199 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5201 if (remaining_data
< vers
->next
) {
5202 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5205 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5206 strcpy(cur_data
+ vers_size
, vers
->name
);
5207 cur_data
+= vers
->next
;
5208 remaining_data
-= vers
->next
;
5212 vers
= (void*)vers
+ next
;
5217 unlock_user(argptr
, guest_data
, 0);
5218 ret
= -TARGET_EINVAL
;
5221 unlock_user(argptr
, guest_data
, guest_data_size
);
5223 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5225 ret
= -TARGET_EFAULT
;
5228 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5229 unlock_user(argptr
, arg
, target_size
);
5236 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5237 int cmd
, abi_long arg
)
5241 const argtype
*arg_type
= ie
->arg_type
;
5242 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5245 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5246 struct blkpg_partition host_part
;
5248 /* Read and convert blkpg */
5250 target_size
= thunk_type_size(arg_type
, 0);
5251 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5253 ret
= -TARGET_EFAULT
;
5256 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5257 unlock_user(argptr
, arg
, 0);
5259 switch (host_blkpg
->op
) {
5260 case BLKPG_ADD_PARTITION
:
5261 case BLKPG_DEL_PARTITION
:
5262 /* payload is struct blkpg_partition */
5265 /* Unknown opcode */
5266 ret
= -TARGET_EINVAL
;
5270 /* Read and convert blkpg->data */
5271 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5272 target_size
= thunk_type_size(part_arg_type
, 0);
5273 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5275 ret
= -TARGET_EFAULT
;
5278 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5279 unlock_user(argptr
, arg
, 0);
5281 /* Swizzle the data pointer to our local copy and call! */
5282 host_blkpg
->data
= &host_part
;
5283 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5289 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5290 int fd
, int cmd
, abi_long arg
)
5292 const argtype
*arg_type
= ie
->arg_type
;
5293 const StructEntry
*se
;
5294 const argtype
*field_types
;
5295 const int *dst_offsets
, *src_offsets
;
5298 abi_ulong
*target_rt_dev_ptr
;
5299 unsigned long *host_rt_dev_ptr
;
5303 assert(ie
->access
== IOC_W
);
5304 assert(*arg_type
== TYPE_PTR
);
5306 assert(*arg_type
== TYPE_STRUCT
);
5307 target_size
= thunk_type_size(arg_type
, 0);
5308 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5310 return -TARGET_EFAULT
;
5313 assert(*arg_type
== (int)STRUCT_rtentry
);
5314 se
= struct_entries
+ *arg_type
++;
5315 assert(se
->convert
[0] == NULL
);
5316 /* convert struct here to be able to catch rt_dev string */
5317 field_types
= se
->field_types
;
5318 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5319 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5320 for (i
= 0; i
< se
->nb_fields
; i
++) {
5321 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5322 assert(*field_types
== TYPE_PTRVOID
);
5323 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5324 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5325 if (*target_rt_dev_ptr
!= 0) {
5326 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5327 tswapal(*target_rt_dev_ptr
));
5328 if (!*host_rt_dev_ptr
) {
5329 unlock_user(argptr
, arg
, 0);
5330 return -TARGET_EFAULT
;
5333 *host_rt_dev_ptr
= 0;
5338 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5339 argptr
+ src_offsets
[i
],
5340 field_types
, THUNK_HOST
);
5342 unlock_user(argptr
, arg
, 0);
5344 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5345 if (*host_rt_dev_ptr
!= 0) {
5346 unlock_user((void *)*host_rt_dev_ptr
,
5347 *target_rt_dev_ptr
, 0);
5352 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5353 int fd
, int cmd
, abi_long arg
)
5355 int sig
= target_to_host_signal(arg
);
5356 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5359 static IOCTLEntry ioctl_entries
[] = {
5360 #define IOCTL(cmd, access, ...) \
5361 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5362 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5363 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5368 /* ??? Implement proper locking for ioctls. */
5369 /* do_ioctl() Must return target values and target errnos. */
5370 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5372 const IOCTLEntry
*ie
;
5373 const argtype
*arg_type
;
5375 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5381 if (ie
->target_cmd
== 0) {
5382 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5383 return -TARGET_ENOSYS
;
5385 if (ie
->target_cmd
== cmd
)
5389 arg_type
= ie
->arg_type
;
5391 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5394 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5397 switch(arg_type
[0]) {
5400 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5404 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5408 target_size
= thunk_type_size(arg_type
, 0);
5409 switch(ie
->access
) {
5411 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5412 if (!is_error(ret
)) {
5413 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5415 return -TARGET_EFAULT
;
5416 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5417 unlock_user(argptr
, arg
, target_size
);
5421 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5423 return -TARGET_EFAULT
;
5424 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5425 unlock_user(argptr
, arg
, 0);
5426 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5430 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5432 return -TARGET_EFAULT
;
5433 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5434 unlock_user(argptr
, arg
, 0);
5435 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5436 if (!is_error(ret
)) {
5437 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5439 return -TARGET_EFAULT
;
5440 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5441 unlock_user(argptr
, arg
, target_size
);
5447 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5448 (long)cmd
, arg_type
[0]);
5449 ret
= -TARGET_ENOSYS
;
5455 static const bitmask_transtbl iflag_tbl
[] = {
5456 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5457 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5458 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5459 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5460 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5461 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5462 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5463 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5464 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5465 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5466 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5467 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5468 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5469 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5473 static const bitmask_transtbl oflag_tbl
[] = {
5474 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5475 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5476 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5477 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5478 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5479 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5480 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5481 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5482 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5483 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5484 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5485 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5486 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5487 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5488 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5489 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5490 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5491 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5492 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5493 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5494 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5495 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5496 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5497 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5501 static const bitmask_transtbl cflag_tbl
[] = {
5502 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5503 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5504 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5505 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5506 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5507 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5508 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5509 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5510 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5511 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5512 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5513 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5514 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5515 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5516 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5517 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5518 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5519 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5520 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5521 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5522 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5523 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5524 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5525 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5526 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5527 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5528 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5529 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5530 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5531 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5532 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5536 static const bitmask_transtbl lflag_tbl
[] = {
5537 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5538 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5539 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5540 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5541 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5542 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5543 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5544 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5545 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5546 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5547 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5548 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5549 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5550 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5551 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5555 static void target_to_host_termios (void *dst
, const void *src
)
5557 struct host_termios
*host
= dst
;
5558 const struct target_termios
*target
= src
;
5561 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5563 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5565 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5567 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5568 host
->c_line
= target
->c_line
;
5570 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5571 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5572 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5573 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5574 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5575 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5576 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5577 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5578 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5579 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5580 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5581 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5582 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5583 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5584 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5585 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5586 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5587 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5590 static void host_to_target_termios (void *dst
, const void *src
)
5592 struct target_termios
*target
= dst
;
5593 const struct host_termios
*host
= src
;
5596 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5598 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5600 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5602 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5603 target
->c_line
= host
->c_line
;
5605 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5606 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5607 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5608 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5609 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5610 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5611 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5612 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5613 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5614 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5615 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5616 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5617 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5618 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5619 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5620 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5621 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5622 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5625 static const StructEntry struct_termios_def
= {
5626 .convert
= { host_to_target_termios
, target_to_host_termios
},
5627 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5628 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5631 static bitmask_transtbl mmap_flags_tbl
[] = {
5632 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5633 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5634 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5635 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5636 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5637 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5638 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5639 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5640 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5645 #if defined(TARGET_I386)
5647 /* NOTE: there is really one LDT for all the threads */
5648 static uint8_t *ldt_table
;
5650 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5657 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5658 if (size
> bytecount
)
5660 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5662 return -TARGET_EFAULT
;
5663 /* ??? Should this by byteswapped? */
5664 memcpy(p
, ldt_table
, size
);
5665 unlock_user(p
, ptr
, size
);
5669 /* XXX: add locking support */
5670 static abi_long
write_ldt(CPUX86State
*env
,
5671 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5673 struct target_modify_ldt_ldt_s ldt_info
;
5674 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5675 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5676 int seg_not_present
, useable
, lm
;
5677 uint32_t *lp
, entry_1
, entry_2
;
5679 if (bytecount
!= sizeof(ldt_info
))
5680 return -TARGET_EINVAL
;
5681 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5682 return -TARGET_EFAULT
;
5683 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5684 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5685 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5686 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5687 unlock_user_struct(target_ldt_info
, ptr
, 0);
5689 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5690 return -TARGET_EINVAL
;
5691 seg_32bit
= ldt_info
.flags
& 1;
5692 contents
= (ldt_info
.flags
>> 1) & 3;
5693 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5694 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5695 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5696 useable
= (ldt_info
.flags
>> 6) & 1;
5700 lm
= (ldt_info
.flags
>> 7) & 1;
5702 if (contents
== 3) {
5704 return -TARGET_EINVAL
;
5705 if (seg_not_present
== 0)
5706 return -TARGET_EINVAL
;
5708 /* allocate the LDT */
5710 env
->ldt
.base
= target_mmap(0,
5711 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5712 PROT_READ
|PROT_WRITE
,
5713 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5714 if (env
->ldt
.base
== -1)
5715 return -TARGET_ENOMEM
;
5716 memset(g2h(env
->ldt
.base
), 0,
5717 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5718 env
->ldt
.limit
= 0xffff;
5719 ldt_table
= g2h(env
->ldt
.base
);
5722 /* NOTE: same code as Linux kernel */
5723 /* Allow LDTs to be cleared by the user. */
5724 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5727 read_exec_only
== 1 &&
5729 limit_in_pages
== 0 &&
5730 seg_not_present
== 1 &&
5738 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5739 (ldt_info
.limit
& 0x0ffff);
5740 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5741 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5742 (ldt_info
.limit
& 0xf0000) |
5743 ((read_exec_only
^ 1) << 9) |
5745 ((seg_not_present
^ 1) << 15) |
5747 (limit_in_pages
<< 23) |
5751 entry_2
|= (useable
<< 20);
5753 /* Install the new entry ... */
5755 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5756 lp
[0] = tswap32(entry_1
);
5757 lp
[1] = tswap32(entry_2
);
5761 /* specific and weird i386 syscalls */
5762 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5763 unsigned long bytecount
)
5769 ret
= read_ldt(ptr
, bytecount
);
5772 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5775 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5778 ret
= -TARGET_ENOSYS
;
5784 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5785 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5787 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5788 struct target_modify_ldt_ldt_s ldt_info
;
5789 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5790 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5791 int seg_not_present
, useable
, lm
;
5792 uint32_t *lp
, entry_1
, entry_2
;
5795 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5796 if (!target_ldt_info
)
5797 return -TARGET_EFAULT
;
5798 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5799 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5800 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5801 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5802 if (ldt_info
.entry_number
== -1) {
5803 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5804 if (gdt_table
[i
] == 0) {
5805 ldt_info
.entry_number
= i
;
5806 target_ldt_info
->entry_number
= tswap32(i
);
5811 unlock_user_struct(target_ldt_info
, ptr
, 1);
5813 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5814 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5815 return -TARGET_EINVAL
;
5816 seg_32bit
= ldt_info
.flags
& 1;
5817 contents
= (ldt_info
.flags
>> 1) & 3;
5818 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5819 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5820 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5821 useable
= (ldt_info
.flags
>> 6) & 1;
5825 lm
= (ldt_info
.flags
>> 7) & 1;
5828 if (contents
== 3) {
5829 if (seg_not_present
== 0)
5830 return -TARGET_EINVAL
;
5833 /* NOTE: same code as Linux kernel */
5834 /* Allow LDTs to be cleared by the user. */
5835 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5836 if ((contents
== 0 &&
5837 read_exec_only
== 1 &&
5839 limit_in_pages
== 0 &&
5840 seg_not_present
== 1 &&
5848 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5849 (ldt_info
.limit
& 0x0ffff);
5850 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5851 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5852 (ldt_info
.limit
& 0xf0000) |
5853 ((read_exec_only
^ 1) << 9) |
5855 ((seg_not_present
^ 1) << 15) |
5857 (limit_in_pages
<< 23) |
5862 /* Install the new entry ... */
5864 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5865 lp
[0] = tswap32(entry_1
);
5866 lp
[1] = tswap32(entry_2
);
5870 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5872 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5873 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5874 uint32_t base_addr
, limit
, flags
;
5875 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5876 int seg_not_present
, useable
, lm
;
5877 uint32_t *lp
, entry_1
, entry_2
;
5879 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5880 if (!target_ldt_info
)
5881 return -TARGET_EFAULT
;
5882 idx
= tswap32(target_ldt_info
->entry_number
);
5883 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5884 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5885 unlock_user_struct(target_ldt_info
, ptr
, 1);
5886 return -TARGET_EINVAL
;
5888 lp
= (uint32_t *)(gdt_table
+ idx
);
5889 entry_1
= tswap32(lp
[0]);
5890 entry_2
= tswap32(lp
[1]);
5892 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5893 contents
= (entry_2
>> 10) & 3;
5894 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5895 seg_32bit
= (entry_2
>> 22) & 1;
5896 limit_in_pages
= (entry_2
>> 23) & 1;
5897 useable
= (entry_2
>> 20) & 1;
5901 lm
= (entry_2
>> 21) & 1;
5903 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5904 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5905 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5906 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5907 base_addr
= (entry_1
>> 16) |
5908 (entry_2
& 0xff000000) |
5909 ((entry_2
& 0xff) << 16);
5910 target_ldt_info
->base_addr
= tswapal(base_addr
);
5911 target_ldt_info
->limit
= tswap32(limit
);
5912 target_ldt_info
->flags
= tswap32(flags
);
5913 unlock_user_struct(target_ldt_info
, ptr
, 1);
5916 #endif /* TARGET_I386 && TARGET_ABI32 */
5918 #ifndef TARGET_ABI32
5919 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5926 case TARGET_ARCH_SET_GS
:
5927 case TARGET_ARCH_SET_FS
:
5928 if (code
== TARGET_ARCH_SET_GS
)
5932 cpu_x86_load_seg(env
, idx
, 0);
5933 env
->segs
[idx
].base
= addr
;
5935 case TARGET_ARCH_GET_GS
:
5936 case TARGET_ARCH_GET_FS
:
5937 if (code
== TARGET_ARCH_GET_GS
)
5941 val
= env
->segs
[idx
].base
;
5942 if (put_user(val
, addr
, abi_ulong
))
5943 ret
= -TARGET_EFAULT
;
5946 ret
= -TARGET_EINVAL
;
5953 #endif /* defined(TARGET_I386) */
5955 #define NEW_STACK_SIZE 0x40000
5958 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5961 pthread_mutex_t mutex
;
5962 pthread_cond_t cond
;
5965 abi_ulong child_tidptr
;
5966 abi_ulong parent_tidptr
;
5970 static void *clone_func(void *arg
)
5972 new_thread_info
*info
= arg
;
5977 rcu_register_thread();
5979 cpu
= ENV_GET_CPU(env
);
5981 ts
= (TaskState
*)cpu
->opaque
;
5982 info
->tid
= gettid();
5983 cpu
->host_tid
= info
->tid
;
5985 if (info
->child_tidptr
)
5986 put_user_u32(info
->tid
, info
->child_tidptr
);
5987 if (info
->parent_tidptr
)
5988 put_user_u32(info
->tid
, info
->parent_tidptr
);
5989 /* Enable signals. */
5990 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5991 /* Signal to the parent that we're ready. */
5992 pthread_mutex_lock(&info
->mutex
);
5993 pthread_cond_broadcast(&info
->cond
);
5994 pthread_mutex_unlock(&info
->mutex
);
5995 /* Wait until the parent has finshed initializing the tls state. */
5996 pthread_mutex_lock(&clone_lock
);
5997 pthread_mutex_unlock(&clone_lock
);
6003 /* do_fork() Must return host values and target errnos (unlike most
6004 do_*() functions). */
6005 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6006 abi_ulong parent_tidptr
, target_ulong newtls
,
6007 abi_ulong child_tidptr
)
6009 CPUState
*cpu
= ENV_GET_CPU(env
);
6013 CPUArchState
*new_env
;
6014 unsigned int nptl_flags
;
6017 /* Emulate vfork() with fork() */
6018 if (flags
& CLONE_VFORK
)
6019 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6021 if (flags
& CLONE_VM
) {
6022 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6023 new_thread_info info
;
6024 pthread_attr_t attr
;
6026 ts
= g_new0(TaskState
, 1);
6027 init_task_state(ts
);
6028 /* we create a new CPU instance. */
6029 new_env
= cpu_copy(env
);
6030 /* Init regs that differ from the parent. */
6031 cpu_clone_regs(new_env
, newsp
);
6032 new_cpu
= ENV_GET_CPU(new_env
);
6033 new_cpu
->opaque
= ts
;
6034 ts
->bprm
= parent_ts
->bprm
;
6035 ts
->info
= parent_ts
->info
;
6036 ts
->signal_mask
= parent_ts
->signal_mask
;
6038 flags
&= ~CLONE_NPTL_FLAGS2
;
6040 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
6041 ts
->child_tidptr
= child_tidptr
;
6044 if (nptl_flags
& CLONE_SETTLS
)
6045 cpu_set_tls (new_env
, newtls
);
6047 /* Grab a mutex so that thread setup appears atomic. */
6048 pthread_mutex_lock(&clone_lock
);
6050 memset(&info
, 0, sizeof(info
));
6051 pthread_mutex_init(&info
.mutex
, NULL
);
6052 pthread_mutex_lock(&info
.mutex
);
6053 pthread_cond_init(&info
.cond
, NULL
);
6055 if (nptl_flags
& CLONE_CHILD_SETTID
)
6056 info
.child_tidptr
= child_tidptr
;
6057 if (nptl_flags
& CLONE_PARENT_SETTID
)
6058 info
.parent_tidptr
= parent_tidptr
;
6060 ret
= pthread_attr_init(&attr
);
6061 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6062 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6063 /* It is not safe to deliver signals until the child has finished
6064 initializing, so temporarily block all signals. */
6065 sigfillset(&sigmask
);
6066 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6068 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6069 /* TODO: Free new CPU state if thread creation failed. */
6071 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6072 pthread_attr_destroy(&attr
);
6074 /* Wait for the child to initialize. */
6075 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6077 if (flags
& CLONE_PARENT_SETTID
)
6078 put_user_u32(ret
, parent_tidptr
);
6082 pthread_mutex_unlock(&info
.mutex
);
6083 pthread_cond_destroy(&info
.cond
);
6084 pthread_mutex_destroy(&info
.mutex
);
6085 pthread_mutex_unlock(&clone_lock
);
6087 /* if no CLONE_VM, we consider it is a fork */
6088 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0) {
6089 return -TARGET_EINVAL
;
6092 if (block_signals()) {
6093 return -TARGET_ERESTARTSYS
;
6099 /* Child Process. */
6101 cpu_clone_regs(env
, newsp
);
6103 /* There is a race condition here. The parent process could
6104 theoretically read the TID in the child process before the child
6105 tid is set. This would require using either ptrace
6106 (not implemented) or having *_tidptr to point at a shared memory
6107 mapping. We can't repeat the spinlock hack used above because
6108 the child process gets its own copy of the lock. */
6109 if (flags
& CLONE_CHILD_SETTID
)
6110 put_user_u32(gettid(), child_tidptr
);
6111 if (flags
& CLONE_PARENT_SETTID
)
6112 put_user_u32(gettid(), parent_tidptr
);
6113 ts
= (TaskState
*)cpu
->opaque
;
6114 if (flags
& CLONE_SETTLS
)
6115 cpu_set_tls (env
, newtls
);
6116 if (flags
& CLONE_CHILD_CLEARTID
)
6117 ts
->child_tidptr
= child_tidptr
;
6125 /* warning : doesn't handle linux specific flags... */
6126 static int target_to_host_fcntl_cmd(int cmd
)
6129 case TARGET_F_DUPFD
:
6130 case TARGET_F_GETFD
:
6131 case TARGET_F_SETFD
:
6132 case TARGET_F_GETFL
:
6133 case TARGET_F_SETFL
:
6135 case TARGET_F_GETLK
:
6137 case TARGET_F_SETLK
:
6139 case TARGET_F_SETLKW
:
6141 case TARGET_F_GETOWN
:
6143 case TARGET_F_SETOWN
:
6145 case TARGET_F_GETSIG
:
6147 case TARGET_F_SETSIG
:
6149 #if TARGET_ABI_BITS == 32
6150 case TARGET_F_GETLK64
:
6152 case TARGET_F_SETLK64
:
6154 case TARGET_F_SETLKW64
:
6157 case TARGET_F_SETLEASE
:
6159 case TARGET_F_GETLEASE
:
6161 #ifdef F_DUPFD_CLOEXEC
6162 case TARGET_F_DUPFD_CLOEXEC
:
6163 return F_DUPFD_CLOEXEC
;
6165 case TARGET_F_NOTIFY
:
6168 case TARGET_F_GETOWN_EX
:
6172 case TARGET_F_SETOWN_EX
:
6176 case TARGET_F_SETPIPE_SZ
:
6177 return F_SETPIPE_SZ
;
6178 case TARGET_F_GETPIPE_SZ
:
6179 return F_GETPIPE_SZ
;
6182 return -TARGET_EINVAL
;
6184 return -TARGET_EINVAL
;
6187 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6188 static const bitmask_transtbl flock_tbl
[] = {
6189 TRANSTBL_CONVERT(F_RDLCK
),
6190 TRANSTBL_CONVERT(F_WRLCK
),
6191 TRANSTBL_CONVERT(F_UNLCK
),
6192 TRANSTBL_CONVERT(F_EXLCK
),
6193 TRANSTBL_CONVERT(F_SHLCK
),
6197 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6198 abi_ulong target_flock_addr
)
6200 struct target_flock
*target_fl
;
6203 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6204 return -TARGET_EFAULT
;
6207 __get_user(l_type
, &target_fl
->l_type
);
6208 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6209 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6210 __get_user(fl
->l_start
, &target_fl
->l_start
);
6211 __get_user(fl
->l_len
, &target_fl
->l_len
);
6212 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6213 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6217 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6218 const struct flock64
*fl
)
6220 struct target_flock
*target_fl
;
6223 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6224 return -TARGET_EFAULT
;
6227 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6228 __put_user(l_type
, &target_fl
->l_type
);
6229 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6230 __put_user(fl
->l_start
, &target_fl
->l_start
);
6231 __put_user(fl
->l_len
, &target_fl
->l_len
);
6232 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6233 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6237 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6238 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6240 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6241 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6242 abi_ulong target_flock_addr
)
6244 struct target_eabi_flock64
*target_fl
;
6247 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6248 return -TARGET_EFAULT
;
6251 __get_user(l_type
, &target_fl
->l_type
);
6252 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6253 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6254 __get_user(fl
->l_start
, &target_fl
->l_start
);
6255 __get_user(fl
->l_len
, &target_fl
->l_len
);
6256 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6257 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6261 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6262 const struct flock64
*fl
)
6264 struct target_eabi_flock64
*target_fl
;
6267 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6268 return -TARGET_EFAULT
;
6271 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6272 __put_user(l_type
, &target_fl
->l_type
);
6273 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6274 __put_user(fl
->l_start
, &target_fl
->l_start
);
6275 __put_user(fl
->l_len
, &target_fl
->l_len
);
6276 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6277 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6282 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6283 abi_ulong target_flock_addr
)
6285 struct target_flock64
*target_fl
;
6288 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6289 return -TARGET_EFAULT
;
6292 __get_user(l_type
, &target_fl
->l_type
);
6293 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6294 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6295 __get_user(fl
->l_start
, &target_fl
->l_start
);
6296 __get_user(fl
->l_len
, &target_fl
->l_len
);
6297 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6298 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6302 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6303 const struct flock64
*fl
)
6305 struct target_flock64
*target_fl
;
6308 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6309 return -TARGET_EFAULT
;
6312 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6313 __put_user(l_type
, &target_fl
->l_type
);
6314 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6315 __put_user(fl
->l_start
, &target_fl
->l_start
);
6316 __put_user(fl
->l_len
, &target_fl
->l_len
);
6317 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6318 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6322 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6324 struct flock64 fl64
;
6326 struct f_owner_ex fox
;
6327 struct target_f_owner_ex
*target_fox
;
6330 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6332 if (host_cmd
== -TARGET_EINVAL
)
6336 case TARGET_F_GETLK
:
6337 ret
= copy_from_user_flock(&fl64
, arg
);
6341 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6343 ret
= copy_to_user_flock(arg
, &fl64
);
6347 case TARGET_F_SETLK
:
6348 case TARGET_F_SETLKW
:
6349 ret
= copy_from_user_flock(&fl64
, arg
);
6353 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6356 case TARGET_F_GETLK64
:
6357 ret
= copy_from_user_flock64(&fl64
, arg
);
6361 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6363 ret
= copy_to_user_flock64(arg
, &fl64
);
6366 case TARGET_F_SETLK64
:
6367 case TARGET_F_SETLKW64
:
6368 ret
= copy_from_user_flock64(&fl64
, arg
);
6372 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6375 case TARGET_F_GETFL
:
6376 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6378 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6382 case TARGET_F_SETFL
:
6383 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6384 target_to_host_bitmask(arg
,
6389 case TARGET_F_GETOWN_EX
:
6390 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6392 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6393 return -TARGET_EFAULT
;
6394 target_fox
->type
= tswap32(fox
.type
);
6395 target_fox
->pid
= tswap32(fox
.pid
);
6396 unlock_user_struct(target_fox
, arg
, 1);
6402 case TARGET_F_SETOWN_EX
:
6403 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6404 return -TARGET_EFAULT
;
6405 fox
.type
= tswap32(target_fox
->type
);
6406 fox
.pid
= tswap32(target_fox
->pid
);
6407 unlock_user_struct(target_fox
, arg
, 0);
6408 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6412 case TARGET_F_SETOWN
:
6413 case TARGET_F_GETOWN
:
6414 case TARGET_F_SETSIG
:
6415 case TARGET_F_GETSIG
:
6416 case TARGET_F_SETLEASE
:
6417 case TARGET_F_GETLEASE
:
6418 case TARGET_F_SETPIPE_SZ
:
6419 case TARGET_F_GETPIPE_SZ
:
6420 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6424 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6432 static inline int high2lowuid(int uid
)
6440 static inline int high2lowgid(int gid
)
6448 static inline int low2highuid(int uid
)
6450 if ((int16_t)uid
== -1)
6456 static inline int low2highgid(int gid
)
6458 if ((int16_t)gid
== -1)
6463 static inline int tswapid(int id
)
6468 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6470 #else /* !USE_UID16 */
6471 static inline int high2lowuid(int uid
)
6475 static inline int high2lowgid(int gid
)
6479 static inline int low2highuid(int uid
)
6483 static inline int low2highgid(int gid
)
6487 static inline int tswapid(int id
)
6492 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6494 #endif /* USE_UID16 */
6496 /* We must do direct syscalls for setting UID/GID, because we want to
6497 * implement the Linux system call semantics of "change only for this thread",
6498 * not the libc/POSIX semantics of "change for all threads in process".
6499 * (See http://ewontfix.com/17/ for more details.)
6500 * We use the 32-bit version of the syscalls if present; if it is not
6501 * then either the host architecture supports 32-bit UIDs natively with
6502 * the standard syscall, or the 16-bit UID is the best we can do.
6504 #ifdef __NR_setuid32
6505 #define __NR_sys_setuid __NR_setuid32
6507 #define __NR_sys_setuid __NR_setuid
6509 #ifdef __NR_setgid32
6510 #define __NR_sys_setgid __NR_setgid32
6512 #define __NR_sys_setgid __NR_setgid
6514 #ifdef __NR_setresuid32
6515 #define __NR_sys_setresuid __NR_setresuid32
6517 #define __NR_sys_setresuid __NR_setresuid
6519 #ifdef __NR_setresgid32
6520 #define __NR_sys_setresgid __NR_setresgid32
6522 #define __NR_sys_setresgid __NR_setresgid
6525 _syscall1(int, sys_setuid
, uid_t
, uid
)
6526 _syscall1(int, sys_setgid
, gid_t
, gid
)
6527 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6528 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6530 void syscall_init(void)
6533 const argtype
*arg_type
;
6537 thunk_init(STRUCT_MAX
);
6539 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6540 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6541 #include "syscall_types.h"
6543 #undef STRUCT_SPECIAL
6545 /* Build target_to_host_errno_table[] table from
6546 * host_to_target_errno_table[]. */
6547 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6548 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6551 /* we patch the ioctl size if necessary. We rely on the fact that
6552 no ioctl has all the bits at '1' in the size field */
6554 while (ie
->target_cmd
!= 0) {
6555 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6556 TARGET_IOC_SIZEMASK
) {
6557 arg_type
= ie
->arg_type
;
6558 if (arg_type
[0] != TYPE_PTR
) {
6559 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6564 size
= thunk_type_size(arg_type
, 0);
6565 ie
->target_cmd
= (ie
->target_cmd
&
6566 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6567 (size
<< TARGET_IOC_SIZESHIFT
);
6570 /* automatic consistency check if same arch */
6571 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6572 (defined(__x86_64__) && defined(TARGET_X86_64))
6573 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6574 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6575 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6582 #if TARGET_ABI_BITS == 32
6583 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6585 #ifdef TARGET_WORDS_BIGENDIAN
6586 return ((uint64_t)word0
<< 32) | word1
;
6588 return ((uint64_t)word1
<< 32) | word0
;
6591 #else /* TARGET_ABI_BITS == 32 */
6592 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6596 #endif /* TARGET_ABI_BITS != 32 */
6598 #ifdef TARGET_NR_truncate64
6599 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6604 if (regpairs_aligned(cpu_env
)) {
6608 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6612 #ifdef TARGET_NR_ftruncate64
6613 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6618 if (regpairs_aligned(cpu_env
)) {
6622 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6626 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6627 abi_ulong target_addr
)
6629 struct target_timespec
*target_ts
;
6631 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6632 return -TARGET_EFAULT
;
6633 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6634 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6635 unlock_user_struct(target_ts
, target_addr
, 0);
6639 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6640 struct timespec
*host_ts
)
6642 struct target_timespec
*target_ts
;
6644 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6645 return -TARGET_EFAULT
;
6646 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6647 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6648 unlock_user_struct(target_ts
, target_addr
, 1);
6652 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6653 abi_ulong target_addr
)
6655 struct target_itimerspec
*target_itspec
;
6657 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6658 return -TARGET_EFAULT
;
6661 host_itspec
->it_interval
.tv_sec
=
6662 tswapal(target_itspec
->it_interval
.tv_sec
);
6663 host_itspec
->it_interval
.tv_nsec
=
6664 tswapal(target_itspec
->it_interval
.tv_nsec
);
6665 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6666 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6668 unlock_user_struct(target_itspec
, target_addr
, 1);
6672 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6673 struct itimerspec
*host_its
)
6675 struct target_itimerspec
*target_itspec
;
6677 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6678 return -TARGET_EFAULT
;
6681 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6682 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6684 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6685 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6687 unlock_user_struct(target_itspec
, target_addr
, 0);
6691 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6692 abi_ulong target_addr
)
6694 struct target_sigevent
*target_sevp
;
6696 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6697 return -TARGET_EFAULT
;
6700 /* This union is awkward on 64 bit systems because it has a 32 bit
6701 * integer and a pointer in it; we follow the conversion approach
6702 * used for handling sigval types in signal.c so the guest should get
6703 * the correct value back even if we did a 64 bit byteswap and it's
6704 * using the 32 bit integer.
6706 host_sevp
->sigev_value
.sival_ptr
=
6707 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6708 host_sevp
->sigev_signo
=
6709 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6710 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6711 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6713 unlock_user_struct(target_sevp
, target_addr
, 1);
6717 #if defined(TARGET_NR_mlockall)
6718 static inline int target_to_host_mlockall_arg(int arg
)
6722 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6723 result
|= MCL_CURRENT
;
6725 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6726 result
|= MCL_FUTURE
;
6732 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6733 abi_ulong target_addr
,
6734 struct stat
*host_st
)
6736 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6737 if (((CPUARMState
*)cpu_env
)->eabi
) {
6738 struct target_eabi_stat64
*target_st
;
6740 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6741 return -TARGET_EFAULT
;
6742 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6743 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6744 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6745 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6746 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6748 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6749 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6750 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6751 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6752 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6753 __put_user(host_st
->st_size
, &target_st
->st_size
);
6754 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6755 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6756 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6757 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6758 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6759 unlock_user_struct(target_st
, target_addr
, 1);
6763 #if defined(TARGET_HAS_STRUCT_STAT64)
6764 struct target_stat64
*target_st
;
6766 struct target_stat
*target_st
;
6769 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6770 return -TARGET_EFAULT
;
6771 memset(target_st
, 0, sizeof(*target_st
));
6772 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6773 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6774 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6775 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6777 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6778 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6779 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6780 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6781 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6782 /* XXX: better use of kernel struct */
6783 __put_user(host_st
->st_size
, &target_st
->st_size
);
6784 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6785 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6786 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6787 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6788 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6789 unlock_user_struct(target_st
, target_addr
, 1);
6795 /* ??? Using host futex calls even when target atomic operations
6796 are not really atomic probably breaks things. However implementing
6797 futexes locally would make futexes shared between multiple processes
6798 tricky. However they're probably useless because guest atomic
6799 operations won't work either. */
6800 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6801 target_ulong uaddr2
, int val3
)
6803 struct timespec ts
, *pts
;
6806 /* ??? We assume FUTEX_* constants are the same on both host
6808 #ifdef FUTEX_CMD_MASK
6809 base_op
= op
& FUTEX_CMD_MASK
;
6815 case FUTEX_WAIT_BITSET
:
6818 target_to_host_timespec(pts
, timeout
);
6822 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6825 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6827 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6829 case FUTEX_CMP_REQUEUE
:
6831 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6832 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6833 But the prototype takes a `struct timespec *'; insert casts
6834 to satisfy the compiler. We do not need to tswap TIMEOUT
6835 since it's not compared to guest memory. */
6836 pts
= (struct timespec
*)(uintptr_t) timeout
;
6837 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6839 (base_op
== FUTEX_CMP_REQUEUE
6843 return -TARGET_ENOSYS
;
6846 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6847 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6848 abi_long handle
, abi_long mount_id
,
6851 struct file_handle
*target_fh
;
6852 struct file_handle
*fh
;
6856 unsigned int size
, total_size
;
6858 if (get_user_s32(size
, handle
)) {
6859 return -TARGET_EFAULT
;
6862 name
= lock_user_string(pathname
);
6864 return -TARGET_EFAULT
;
6867 total_size
= sizeof(struct file_handle
) + size
;
6868 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6870 unlock_user(name
, pathname
, 0);
6871 return -TARGET_EFAULT
;
6874 fh
= g_malloc0(total_size
);
6875 fh
->handle_bytes
= size
;
6877 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6878 unlock_user(name
, pathname
, 0);
6880 /* man name_to_handle_at(2):
6881 * Other than the use of the handle_bytes field, the caller should treat
6882 * the file_handle structure as an opaque data type
6885 memcpy(target_fh
, fh
, total_size
);
6886 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6887 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6889 unlock_user(target_fh
, handle
, total_size
);
6891 if (put_user_s32(mid
, mount_id
)) {
6892 return -TARGET_EFAULT
;
6900 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6901 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6904 struct file_handle
*target_fh
;
6905 struct file_handle
*fh
;
6906 unsigned int size
, total_size
;
6909 if (get_user_s32(size
, handle
)) {
6910 return -TARGET_EFAULT
;
6913 total_size
= sizeof(struct file_handle
) + size
;
6914 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6916 return -TARGET_EFAULT
;
6919 fh
= g_memdup(target_fh
, total_size
);
6920 fh
->handle_bytes
= size
;
6921 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6923 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6924 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6928 unlock_user(target_fh
, handle
, total_size
);
6934 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6936 /* signalfd siginfo conversion */
6939 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
6940 const struct signalfd_siginfo
*info
)
6942 int sig
= host_to_target_signal(info
->ssi_signo
);
6944 /* linux/signalfd.h defines a ssi_addr_lsb
6945 * not defined in sys/signalfd.h but used by some kernels
6948 #ifdef BUS_MCEERR_AO
6949 if (tinfo
->ssi_signo
== SIGBUS
&&
6950 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
6951 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
6952 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
6953 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
6954 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
6958 tinfo
->ssi_signo
= tswap32(sig
);
6959 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
6960 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
6961 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
6962 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
6963 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
6964 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
6965 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
6966 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
6967 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
6968 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
6969 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
6970 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
6971 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
6972 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
6973 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
6976 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
6980 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
6981 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
6987 static TargetFdTrans target_signalfd_trans
= {
6988 .host_to_target_data
= host_to_target_data_signalfd
,
6991 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6994 target_sigset_t
*target_mask
;
6998 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6999 return -TARGET_EINVAL
;
7001 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7002 return -TARGET_EFAULT
;
7005 target_to_host_sigset(&host_mask
, target_mask
);
7007 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7009 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7011 fd_trans_register(ret
, &target_signalfd_trans
);
7014 unlock_user_struct(target_mask
, mask
, 0);
7020 /* Map host to target signal numbers for the wait family of syscalls.
7021 Assume all other status bits are the same. */
7022 int host_to_target_waitstatus(int status
)
7024 if (WIFSIGNALED(status
)) {
7025 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7027 if (WIFSTOPPED(status
)) {
7028 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7034 static int open_self_cmdline(void *cpu_env
, int fd
)
7037 bool word_skipped
= false;
7039 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
7049 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
7052 fd_orig
= close(fd_orig
);
7055 } else if (nb_read
== 0) {
7059 if (!word_skipped
) {
7060 /* Skip the first string, which is the path to qemu-*-static
7061 instead of the actual command. */
7062 cp_buf
= memchr(buf
, 0, nb_read
);
7064 /* Null byte found, skip one string */
7066 nb_read
-= cp_buf
- buf
;
7067 word_skipped
= true;
7072 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
7081 return close(fd_orig
);
7084 static int open_self_maps(void *cpu_env
, int fd
)
7086 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7087 TaskState
*ts
= cpu
->opaque
;
7093 fp
= fopen("/proc/self/maps", "r");
7098 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7099 int fields
, dev_maj
, dev_min
, inode
;
7100 uint64_t min
, max
, offset
;
7101 char flag_r
, flag_w
, flag_x
, flag_p
;
7102 char path
[512] = "";
7103 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7104 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7105 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7107 if ((fields
< 10) || (fields
> 11)) {
7110 if (h2g_valid(min
)) {
7111 int flags
= page_get_flags(h2g(min
));
7112 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
7113 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7116 if (h2g(min
) == ts
->info
->stack_limit
) {
7117 pstrcpy(path
, sizeof(path
), " [stack]");
7119 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7120 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7121 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7122 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7123 path
[0] ? " " : "", path
);
7133 static int open_self_stat(void *cpu_env
, int fd
)
7135 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7136 TaskState
*ts
= cpu
->opaque
;
7137 abi_ulong start_stack
= ts
->info
->start_stack
;
7140 for (i
= 0; i
< 44; i
++) {
7148 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7149 } else if (i
== 1) {
7151 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7152 } else if (i
== 27) {
7155 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7157 /* for the rest, there is MasterCard */
7158 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7162 if (write(fd
, buf
, len
) != len
) {
7170 static int open_self_auxv(void *cpu_env
, int fd
)
7172 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7173 TaskState
*ts
= cpu
->opaque
;
7174 abi_ulong auxv
= ts
->info
->saved_auxv
;
7175 abi_ulong len
= ts
->info
->auxv_len
;
7179 * Auxiliary vector is stored in target process stack.
7180 * read in whole auxv vector and copy it to file
7182 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7186 r
= write(fd
, ptr
, len
);
7193 lseek(fd
, 0, SEEK_SET
);
7194 unlock_user(ptr
, auxv
, len
);
7200 static int is_proc_myself(const char *filename
, const char *entry
)
7202 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7203 filename
+= strlen("/proc/");
7204 if (!strncmp(filename
, "self/", strlen("self/"))) {
7205 filename
+= strlen("self/");
7206 } else if (*filename
>= '1' && *filename
<= '9') {
7208 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7209 if (!strncmp(filename
, myself
, strlen(myself
))) {
7210 filename
+= strlen(myself
);
7217 if (!strcmp(filename
, entry
)) {
7224 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7225 static int is_proc(const char *filename
, const char *entry
)
7227 return strcmp(filename
, entry
) == 0;
7230 static int open_net_route(void *cpu_env
, int fd
)
7237 fp
= fopen("/proc/net/route", "r");
7244 read
= getline(&line
, &len
, fp
);
7245 dprintf(fd
, "%s", line
);
7249 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7251 uint32_t dest
, gw
, mask
;
7252 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7253 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7254 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7255 &mask
, &mtu
, &window
, &irtt
);
7256 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7257 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7258 metric
, tswap32(mask
), mtu
, window
, irtt
);
7268 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7271 const char *filename
;
7272 int (*fill
)(void *cpu_env
, int fd
);
7273 int (*cmp
)(const char *s1
, const char *s2
);
7275 const struct fake_open
*fake_open
;
7276 static const struct fake_open fakes
[] = {
7277 { "maps", open_self_maps
, is_proc_myself
},
7278 { "stat", open_self_stat
, is_proc_myself
},
7279 { "auxv", open_self_auxv
, is_proc_myself
},
7280 { "cmdline", open_self_cmdline
, is_proc_myself
},
7281 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7282 { "/proc/net/route", open_net_route
, is_proc
},
7284 { NULL
, NULL
, NULL
}
7287 if (is_proc_myself(pathname
, "exe")) {
7288 int execfd
= qemu_getauxval(AT_EXECFD
);
7289 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7292 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7293 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7298 if (fake_open
->filename
) {
7300 char filename
[PATH_MAX
];
7303 /* create temporary file to map stat to */
7304 tmpdir
= getenv("TMPDIR");
7307 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7308 fd
= mkstemp(filename
);
7314 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7320 lseek(fd
, 0, SEEK_SET
);
7325 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7328 #define TIMER_MAGIC 0x0caf0000
7329 #define TIMER_MAGIC_MASK 0xffff0000
7331 /* Convert QEMU provided timer ID back to internal 16bit index format */
7332 static target_timer_t
get_timer_id(abi_long arg
)
7334 target_timer_t timerid
= arg
;
7336 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7337 return -TARGET_EINVAL
;
7342 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7343 return -TARGET_EINVAL
;
7349 /* do_syscall() should always have a single exit point at the end so
7350 that actions, such as logging of syscall results, can be performed.
7351 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7352 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7353 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7354 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7357 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7363 #if defined(DEBUG_ERESTARTSYS)
7364 /* Debug-only code for exercising the syscall-restart code paths
7365 * in the per-architecture cpu main loops: restart every syscall
7366 * the guest makes once before letting it through.
7373 return -TARGET_ERESTARTSYS
;
7379 gemu_log("syscall %d", num
);
7381 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7383 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7386 case TARGET_NR_exit
:
7387 /* In old applications this may be used to implement _exit(2).
7388 However in threaded applictions it is used for thread termination,
7389 and _exit_group is used for application termination.
7390 Do thread termination if we have more then one thread. */
7392 if (block_signals()) {
7393 ret
= -TARGET_ERESTARTSYS
;
7397 if (CPU_NEXT(first_cpu
)) {
7401 /* Remove the CPU from the list. */
7402 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7405 if (ts
->child_tidptr
) {
7406 put_user_u32(0, ts
->child_tidptr
);
7407 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7411 object_unref(OBJECT(cpu
));
7413 rcu_unregister_thread();
7419 gdb_exit(cpu_env
, arg1
);
7421 ret
= 0; /* avoid warning */
7423 case TARGET_NR_read
:
7427 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7429 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7431 fd_trans_host_to_target_data(arg1
)) {
7432 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7434 unlock_user(p
, arg2
, ret
);
7437 case TARGET_NR_write
:
7438 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7440 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7441 unlock_user(p
, arg2
, 0);
7443 #ifdef TARGET_NR_open
7444 case TARGET_NR_open
:
7445 if (!(p
= lock_user_string(arg1
)))
7447 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7448 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7450 fd_trans_unregister(ret
);
7451 unlock_user(p
, arg1
, 0);
7454 case TARGET_NR_openat
:
7455 if (!(p
= lock_user_string(arg2
)))
7457 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7458 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7460 fd_trans_unregister(ret
);
7461 unlock_user(p
, arg2
, 0);
7463 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7464 case TARGET_NR_name_to_handle_at
:
7465 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7468 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7469 case TARGET_NR_open_by_handle_at
:
7470 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7471 fd_trans_unregister(ret
);
7474 case TARGET_NR_close
:
7475 fd_trans_unregister(arg1
);
7476 ret
= get_errno(close(arg1
));
7481 #ifdef TARGET_NR_fork
7482 case TARGET_NR_fork
:
7483 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
7486 #ifdef TARGET_NR_waitpid
7487 case TARGET_NR_waitpid
:
7490 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7491 if (!is_error(ret
) && arg2
&& ret
7492 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7497 #ifdef TARGET_NR_waitid
7498 case TARGET_NR_waitid
:
7502 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7503 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7504 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7506 host_to_target_siginfo(p
, &info
);
7507 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7512 #ifdef TARGET_NR_creat /* not on alpha */
7513 case TARGET_NR_creat
:
7514 if (!(p
= lock_user_string(arg1
)))
7516 ret
= get_errno(creat(p
, arg2
));
7517 fd_trans_unregister(ret
);
7518 unlock_user(p
, arg1
, 0);
7521 #ifdef TARGET_NR_link
7522 case TARGET_NR_link
:
7525 p
= lock_user_string(arg1
);
7526 p2
= lock_user_string(arg2
);
7528 ret
= -TARGET_EFAULT
;
7530 ret
= get_errno(link(p
, p2
));
7531 unlock_user(p2
, arg2
, 0);
7532 unlock_user(p
, arg1
, 0);
7536 #if defined(TARGET_NR_linkat)
7537 case TARGET_NR_linkat
:
7542 p
= lock_user_string(arg2
);
7543 p2
= lock_user_string(arg4
);
7545 ret
= -TARGET_EFAULT
;
7547 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7548 unlock_user(p
, arg2
, 0);
7549 unlock_user(p2
, arg4
, 0);
7553 #ifdef TARGET_NR_unlink
7554 case TARGET_NR_unlink
:
7555 if (!(p
= lock_user_string(arg1
)))
7557 ret
= get_errno(unlink(p
));
7558 unlock_user(p
, arg1
, 0);
7561 #if defined(TARGET_NR_unlinkat)
7562 case TARGET_NR_unlinkat
:
7563 if (!(p
= lock_user_string(arg2
)))
7565 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7566 unlock_user(p
, arg2
, 0);
7569 case TARGET_NR_execve
:
7571 char **argp
, **envp
;
7574 abi_ulong guest_argp
;
7575 abi_ulong guest_envp
;
7582 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7583 if (get_user_ual(addr
, gp
))
7591 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7592 if (get_user_ual(addr
, gp
))
7599 argp
= alloca((argc
+ 1) * sizeof(void *));
7600 envp
= alloca((envc
+ 1) * sizeof(void *));
7602 for (gp
= guest_argp
, q
= argp
; gp
;
7603 gp
+= sizeof(abi_ulong
), q
++) {
7604 if (get_user_ual(addr
, gp
))
7608 if (!(*q
= lock_user_string(addr
)))
7610 total_size
+= strlen(*q
) + 1;
7614 for (gp
= guest_envp
, q
= envp
; gp
;
7615 gp
+= sizeof(abi_ulong
), q
++) {
7616 if (get_user_ual(addr
, gp
))
7620 if (!(*q
= lock_user_string(addr
)))
7622 total_size
+= strlen(*q
) + 1;
7626 if (!(p
= lock_user_string(arg1
)))
7628 /* Although execve() is not an interruptible syscall it is
7629 * a special case where we must use the safe_syscall wrapper:
7630 * if we allow a signal to happen before we make the host
7631 * syscall then we will 'lose' it, because at the point of
7632 * execve the process leaves QEMU's control. So we use the
7633 * safe syscall wrapper to ensure that we either take the
7634 * signal as a guest signal, or else it does not happen
7635 * before the execve completes and makes it the other
7636 * program's problem.
7638 ret
= get_errno(safe_execve(p
, argp
, envp
));
7639 unlock_user(p
, arg1
, 0);
7644 ret
= -TARGET_EFAULT
;
7647 for (gp
= guest_argp
, q
= argp
; *q
;
7648 gp
+= sizeof(abi_ulong
), q
++) {
7649 if (get_user_ual(addr
, gp
)
7652 unlock_user(*q
, addr
, 0);
7654 for (gp
= guest_envp
, q
= envp
; *q
;
7655 gp
+= sizeof(abi_ulong
), q
++) {
7656 if (get_user_ual(addr
, gp
)
7659 unlock_user(*q
, addr
, 0);
7663 case TARGET_NR_chdir
:
7664 if (!(p
= lock_user_string(arg1
)))
7666 ret
= get_errno(chdir(p
));
7667 unlock_user(p
, arg1
, 0);
7669 #ifdef TARGET_NR_time
7670 case TARGET_NR_time
:
7673 ret
= get_errno(time(&host_time
));
7676 && put_user_sal(host_time
, arg1
))
7681 #ifdef TARGET_NR_mknod
7682 case TARGET_NR_mknod
:
7683 if (!(p
= lock_user_string(arg1
)))
7685 ret
= get_errno(mknod(p
, arg2
, arg3
));
7686 unlock_user(p
, arg1
, 0);
7689 #if defined(TARGET_NR_mknodat)
7690 case TARGET_NR_mknodat
:
7691 if (!(p
= lock_user_string(arg2
)))
7693 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7694 unlock_user(p
, arg2
, 0);
7697 #ifdef TARGET_NR_chmod
7698 case TARGET_NR_chmod
:
7699 if (!(p
= lock_user_string(arg1
)))
7701 ret
= get_errno(chmod(p
, arg2
));
7702 unlock_user(p
, arg1
, 0);
7705 #ifdef TARGET_NR_break
7706 case TARGET_NR_break
:
7709 #ifdef TARGET_NR_oldstat
7710 case TARGET_NR_oldstat
:
7713 case TARGET_NR_lseek
:
7714 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
7716 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7717 /* Alpha specific */
7718 case TARGET_NR_getxpid
:
7719 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7720 ret
= get_errno(getpid());
7723 #ifdef TARGET_NR_getpid
7724 case TARGET_NR_getpid
:
7725 ret
= get_errno(getpid());
7728 case TARGET_NR_mount
:
7730 /* need to look at the data field */
7734 p
= lock_user_string(arg1
);
7742 p2
= lock_user_string(arg2
);
7745 unlock_user(p
, arg1
, 0);
7751 p3
= lock_user_string(arg3
);
7754 unlock_user(p
, arg1
, 0);
7756 unlock_user(p2
, arg2
, 0);
7763 /* FIXME - arg5 should be locked, but it isn't clear how to
7764 * do that since it's not guaranteed to be a NULL-terminated
7768 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7770 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7772 ret
= get_errno(ret
);
7775 unlock_user(p
, arg1
, 0);
7777 unlock_user(p2
, arg2
, 0);
7779 unlock_user(p3
, arg3
, 0);
7783 #ifdef TARGET_NR_umount
7784 case TARGET_NR_umount
:
7785 if (!(p
= lock_user_string(arg1
)))
7787 ret
= get_errno(umount(p
));
7788 unlock_user(p
, arg1
, 0);
7791 #ifdef TARGET_NR_stime /* not on alpha */
7792 case TARGET_NR_stime
:
7795 if (get_user_sal(host_time
, arg1
))
7797 ret
= get_errno(stime(&host_time
));
7801 case TARGET_NR_ptrace
:
7803 #ifdef TARGET_NR_alarm /* not on alpha */
7804 case TARGET_NR_alarm
:
7808 #ifdef TARGET_NR_oldfstat
7809 case TARGET_NR_oldfstat
:
7812 #ifdef TARGET_NR_pause /* not on alpha */
7813 case TARGET_NR_pause
:
7814 if (!block_signals()) {
7815 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7817 ret
= -TARGET_EINTR
;
7820 #ifdef TARGET_NR_utime
7821 case TARGET_NR_utime
:
7823 struct utimbuf tbuf
, *host_tbuf
;
7824 struct target_utimbuf
*target_tbuf
;
7826 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7828 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7829 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7830 unlock_user_struct(target_tbuf
, arg2
, 0);
7835 if (!(p
= lock_user_string(arg1
)))
7837 ret
= get_errno(utime(p
, host_tbuf
));
7838 unlock_user(p
, arg1
, 0);
7842 #ifdef TARGET_NR_utimes
7843 case TARGET_NR_utimes
:
7845 struct timeval
*tvp
, tv
[2];
7847 if (copy_from_user_timeval(&tv
[0], arg2
)
7848 || copy_from_user_timeval(&tv
[1],
7849 arg2
+ sizeof(struct target_timeval
)))
7855 if (!(p
= lock_user_string(arg1
)))
7857 ret
= get_errno(utimes(p
, tvp
));
7858 unlock_user(p
, arg1
, 0);
7862 #if defined(TARGET_NR_futimesat)
7863 case TARGET_NR_futimesat
:
7865 struct timeval
*tvp
, tv
[2];
7867 if (copy_from_user_timeval(&tv
[0], arg3
)
7868 || copy_from_user_timeval(&tv
[1],
7869 arg3
+ sizeof(struct target_timeval
)))
7875 if (!(p
= lock_user_string(arg2
)))
7877 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7878 unlock_user(p
, arg2
, 0);
7882 #ifdef TARGET_NR_stty
7883 case TARGET_NR_stty
:
7886 #ifdef TARGET_NR_gtty
7887 case TARGET_NR_gtty
:
7890 #ifdef TARGET_NR_access
7891 case TARGET_NR_access
:
7892 if (!(p
= lock_user_string(arg1
)))
7894 ret
= get_errno(access(path(p
), arg2
));
7895 unlock_user(p
, arg1
, 0);
7898 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7899 case TARGET_NR_faccessat
:
7900 if (!(p
= lock_user_string(arg2
)))
7902 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7903 unlock_user(p
, arg2
, 0);
7906 #ifdef TARGET_NR_nice /* not on alpha */
7907 case TARGET_NR_nice
:
7908 ret
= get_errno(nice(arg1
));
7911 #ifdef TARGET_NR_ftime
7912 case TARGET_NR_ftime
:
7915 case TARGET_NR_sync
:
7919 case TARGET_NR_kill
:
7920 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7922 #ifdef TARGET_NR_rename
7923 case TARGET_NR_rename
:
7926 p
= lock_user_string(arg1
);
7927 p2
= lock_user_string(arg2
);
7929 ret
= -TARGET_EFAULT
;
7931 ret
= get_errno(rename(p
, p2
));
7932 unlock_user(p2
, arg2
, 0);
7933 unlock_user(p
, arg1
, 0);
7937 #if defined(TARGET_NR_renameat)
7938 case TARGET_NR_renameat
:
7941 p
= lock_user_string(arg2
);
7942 p2
= lock_user_string(arg4
);
7944 ret
= -TARGET_EFAULT
;
7946 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7947 unlock_user(p2
, arg4
, 0);
7948 unlock_user(p
, arg2
, 0);
7952 #ifdef TARGET_NR_mkdir
7953 case TARGET_NR_mkdir
:
7954 if (!(p
= lock_user_string(arg1
)))
7956 ret
= get_errno(mkdir(p
, arg2
));
7957 unlock_user(p
, arg1
, 0);
7960 #if defined(TARGET_NR_mkdirat)
7961 case TARGET_NR_mkdirat
:
7962 if (!(p
= lock_user_string(arg2
)))
7964 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7965 unlock_user(p
, arg2
, 0);
7968 #ifdef TARGET_NR_rmdir
7969 case TARGET_NR_rmdir
:
7970 if (!(p
= lock_user_string(arg1
)))
7972 ret
= get_errno(rmdir(p
));
7973 unlock_user(p
, arg1
, 0);
7977 ret
= get_errno(dup(arg1
));
7979 fd_trans_dup(arg1
, ret
);
7982 #ifdef TARGET_NR_pipe
7983 case TARGET_NR_pipe
:
7984 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
7987 #ifdef TARGET_NR_pipe2
7988 case TARGET_NR_pipe2
:
7989 ret
= do_pipe(cpu_env
, arg1
,
7990 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7993 case TARGET_NR_times
:
7995 struct target_tms
*tmsp
;
7997 ret
= get_errno(times(&tms
));
7999 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8002 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8003 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8004 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8005 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8008 ret
= host_to_target_clock_t(ret
);
8011 #ifdef TARGET_NR_prof
8012 case TARGET_NR_prof
:
8015 #ifdef TARGET_NR_signal
8016 case TARGET_NR_signal
:
8019 case TARGET_NR_acct
:
8021 ret
= get_errno(acct(NULL
));
8023 if (!(p
= lock_user_string(arg1
)))
8025 ret
= get_errno(acct(path(p
)));
8026 unlock_user(p
, arg1
, 0);
8029 #ifdef TARGET_NR_umount2
8030 case TARGET_NR_umount2
:
8031 if (!(p
= lock_user_string(arg1
)))
8033 ret
= get_errno(umount2(p
, arg2
));
8034 unlock_user(p
, arg1
, 0);
8037 #ifdef TARGET_NR_lock
8038 case TARGET_NR_lock
:
8041 case TARGET_NR_ioctl
:
8042 ret
= do_ioctl(arg1
, arg2
, arg3
);
8044 case TARGET_NR_fcntl
:
8045 ret
= do_fcntl(arg1
, arg2
, arg3
);
8047 #ifdef TARGET_NR_mpx
8051 case TARGET_NR_setpgid
:
8052 ret
= get_errno(setpgid(arg1
, arg2
));
8054 #ifdef TARGET_NR_ulimit
8055 case TARGET_NR_ulimit
:
8058 #ifdef TARGET_NR_oldolduname
8059 case TARGET_NR_oldolduname
:
8062 case TARGET_NR_umask
:
8063 ret
= get_errno(umask(arg1
));
8065 case TARGET_NR_chroot
:
8066 if (!(p
= lock_user_string(arg1
)))
8068 ret
= get_errno(chroot(p
));
8069 unlock_user(p
, arg1
, 0);
8071 #ifdef TARGET_NR_ustat
8072 case TARGET_NR_ustat
:
8075 #ifdef TARGET_NR_dup2
8076 case TARGET_NR_dup2
:
8077 ret
= get_errno(dup2(arg1
, arg2
));
8079 fd_trans_dup(arg1
, arg2
);
8083 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8084 case TARGET_NR_dup3
:
8085 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
8087 fd_trans_dup(arg1
, arg2
);
8091 #ifdef TARGET_NR_getppid /* not on alpha */
8092 case TARGET_NR_getppid
:
8093 ret
= get_errno(getppid());
8096 #ifdef TARGET_NR_getpgrp
8097 case TARGET_NR_getpgrp
:
8098 ret
= get_errno(getpgrp());
8101 case TARGET_NR_setsid
:
8102 ret
= get_errno(setsid());
8104 #ifdef TARGET_NR_sigaction
8105 case TARGET_NR_sigaction
:
8107 #if defined(TARGET_ALPHA)
8108 struct target_sigaction act
, oact
, *pact
= 0;
8109 struct target_old_sigaction
*old_act
;
8111 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8113 act
._sa_handler
= old_act
->_sa_handler
;
8114 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8115 act
.sa_flags
= old_act
->sa_flags
;
8116 act
.sa_restorer
= 0;
8117 unlock_user_struct(old_act
, arg2
, 0);
8120 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8121 if (!is_error(ret
) && arg3
) {
8122 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8124 old_act
->_sa_handler
= oact
._sa_handler
;
8125 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8126 old_act
->sa_flags
= oact
.sa_flags
;
8127 unlock_user_struct(old_act
, arg3
, 1);
8129 #elif defined(TARGET_MIPS)
8130 struct target_sigaction act
, oact
, *pact
, *old_act
;
8133 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8135 act
._sa_handler
= old_act
->_sa_handler
;
8136 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8137 act
.sa_flags
= old_act
->sa_flags
;
8138 unlock_user_struct(old_act
, arg2
, 0);
8144 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8146 if (!is_error(ret
) && arg3
) {
8147 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8149 old_act
->_sa_handler
= oact
._sa_handler
;
8150 old_act
->sa_flags
= oact
.sa_flags
;
8151 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8152 old_act
->sa_mask
.sig
[1] = 0;
8153 old_act
->sa_mask
.sig
[2] = 0;
8154 old_act
->sa_mask
.sig
[3] = 0;
8155 unlock_user_struct(old_act
, arg3
, 1);
8158 struct target_old_sigaction
*old_act
;
8159 struct target_sigaction act
, oact
, *pact
;
8161 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8163 act
._sa_handler
= old_act
->_sa_handler
;
8164 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8165 act
.sa_flags
= old_act
->sa_flags
;
8166 act
.sa_restorer
= old_act
->sa_restorer
;
8167 unlock_user_struct(old_act
, arg2
, 0);
8172 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8173 if (!is_error(ret
) && arg3
) {
8174 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8176 old_act
->_sa_handler
= oact
._sa_handler
;
8177 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8178 old_act
->sa_flags
= oact
.sa_flags
;
8179 old_act
->sa_restorer
= oact
.sa_restorer
;
8180 unlock_user_struct(old_act
, arg3
, 1);
8186 case TARGET_NR_rt_sigaction
:
8188 #if defined(TARGET_ALPHA)
8189 struct target_sigaction act
, oact
, *pact
= 0;
8190 struct target_rt_sigaction
*rt_act
;
8192 if (arg4
!= sizeof(target_sigset_t
)) {
8193 ret
= -TARGET_EINVAL
;
8197 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8199 act
._sa_handler
= rt_act
->_sa_handler
;
8200 act
.sa_mask
= rt_act
->sa_mask
;
8201 act
.sa_flags
= rt_act
->sa_flags
;
8202 act
.sa_restorer
= arg5
;
8203 unlock_user_struct(rt_act
, arg2
, 0);
8206 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8207 if (!is_error(ret
) && arg3
) {
8208 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8210 rt_act
->_sa_handler
= oact
._sa_handler
;
8211 rt_act
->sa_mask
= oact
.sa_mask
;
8212 rt_act
->sa_flags
= oact
.sa_flags
;
8213 unlock_user_struct(rt_act
, arg3
, 1);
8216 struct target_sigaction
*act
;
8217 struct target_sigaction
*oact
;
8219 if (arg4
!= sizeof(target_sigset_t
)) {
8220 ret
= -TARGET_EINVAL
;
8224 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
8229 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8230 ret
= -TARGET_EFAULT
;
8231 goto rt_sigaction_fail
;
8235 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8238 unlock_user_struct(act
, arg2
, 0);
8240 unlock_user_struct(oact
, arg3
, 1);
8244 #ifdef TARGET_NR_sgetmask /* not on alpha */
8245 case TARGET_NR_sgetmask
:
8248 abi_ulong target_set
;
8249 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8251 host_to_target_old_sigset(&target_set
, &cur_set
);
8257 #ifdef TARGET_NR_ssetmask /* not on alpha */
8258 case TARGET_NR_ssetmask
:
8260 sigset_t set
, oset
, cur_set
;
8261 abi_ulong target_set
= arg1
;
8262 /* We only have one word of the new mask so we must read
8263 * the rest of it with do_sigprocmask() and OR in this word.
8264 * We are guaranteed that a do_sigprocmask() that only queries
8265 * the signal mask will not fail.
8267 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8269 target_to_host_old_sigset(&set
, &target_set
);
8270 sigorset(&set
, &set
, &cur_set
);
8271 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8273 host_to_target_old_sigset(&target_set
, &oset
);
8279 #ifdef TARGET_NR_sigprocmask
8280 case TARGET_NR_sigprocmask
:
8282 #if defined(TARGET_ALPHA)
8283 sigset_t set
, oldset
;
8288 case TARGET_SIG_BLOCK
:
8291 case TARGET_SIG_UNBLOCK
:
8294 case TARGET_SIG_SETMASK
:
8298 ret
= -TARGET_EINVAL
;
8302 target_to_host_old_sigset(&set
, &mask
);
8304 ret
= do_sigprocmask(how
, &set
, &oldset
);
8305 if (!is_error(ret
)) {
8306 host_to_target_old_sigset(&mask
, &oldset
);
8308 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8311 sigset_t set
, oldset
, *set_ptr
;
8316 case TARGET_SIG_BLOCK
:
8319 case TARGET_SIG_UNBLOCK
:
8322 case TARGET_SIG_SETMASK
:
8326 ret
= -TARGET_EINVAL
;
8329 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8331 target_to_host_old_sigset(&set
, p
);
8332 unlock_user(p
, arg2
, 0);
8338 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8339 if (!is_error(ret
) && arg3
) {
8340 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8342 host_to_target_old_sigset(p
, &oldset
);
8343 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8349 case TARGET_NR_rt_sigprocmask
:
8352 sigset_t set
, oldset
, *set_ptr
;
8354 if (arg4
!= sizeof(target_sigset_t
)) {
8355 ret
= -TARGET_EINVAL
;
8361 case TARGET_SIG_BLOCK
:
8364 case TARGET_SIG_UNBLOCK
:
8367 case TARGET_SIG_SETMASK
:
8371 ret
= -TARGET_EINVAL
;
8374 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8376 target_to_host_sigset(&set
, p
);
8377 unlock_user(p
, arg2
, 0);
8383 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8384 if (!is_error(ret
) && arg3
) {
8385 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8387 host_to_target_sigset(p
, &oldset
);
8388 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8392 #ifdef TARGET_NR_sigpending
8393 case TARGET_NR_sigpending
:
8396 ret
= get_errno(sigpending(&set
));
8397 if (!is_error(ret
)) {
8398 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8400 host_to_target_old_sigset(p
, &set
);
8401 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8406 case TARGET_NR_rt_sigpending
:
8410 /* Yes, this check is >, not != like most. We follow the kernel's
8411 * logic and it does it like this because it implements
8412 * NR_sigpending through the same code path, and in that case
8413 * the old_sigset_t is smaller in size.
8415 if (arg2
> sizeof(target_sigset_t
)) {
8416 ret
= -TARGET_EINVAL
;
8420 ret
= get_errno(sigpending(&set
));
8421 if (!is_error(ret
)) {
8422 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8424 host_to_target_sigset(p
, &set
);
8425 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8429 #ifdef TARGET_NR_sigsuspend
8430 case TARGET_NR_sigsuspend
:
8432 TaskState
*ts
= cpu
->opaque
;
8433 #if defined(TARGET_ALPHA)
8434 abi_ulong mask
= arg1
;
8435 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8437 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8439 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8440 unlock_user(p
, arg1
, 0);
8442 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8444 if (ret
!= -TARGET_ERESTARTSYS
) {
8445 ts
->in_sigsuspend
= 1;
8450 case TARGET_NR_rt_sigsuspend
:
8452 TaskState
*ts
= cpu
->opaque
;
8454 if (arg2
!= sizeof(target_sigset_t
)) {
8455 ret
= -TARGET_EINVAL
;
8458 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8460 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8461 unlock_user(p
, arg1
, 0);
8462 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8464 if (ret
!= -TARGET_ERESTARTSYS
) {
8465 ts
->in_sigsuspend
= 1;
8469 case TARGET_NR_rt_sigtimedwait
:
8472 struct timespec uts
, *puts
;
8475 if (arg4
!= sizeof(target_sigset_t
)) {
8476 ret
= -TARGET_EINVAL
;
8480 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8482 target_to_host_sigset(&set
, p
);
8483 unlock_user(p
, arg1
, 0);
8486 target_to_host_timespec(puts
, arg3
);
8490 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8492 if (!is_error(ret
)) {
8494 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8499 host_to_target_siginfo(p
, &uinfo
);
8500 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8502 ret
= host_to_target_signal(ret
);
8506 case TARGET_NR_rt_sigqueueinfo
:
8510 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8514 target_to_host_siginfo(&uinfo
, p
);
8515 unlock_user(p
, arg1
, 0);
8516 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8519 #ifdef TARGET_NR_sigreturn
8520 case TARGET_NR_sigreturn
:
8521 if (block_signals()) {
8522 ret
= -TARGET_ERESTARTSYS
;
8524 ret
= do_sigreturn(cpu_env
);
8528 case TARGET_NR_rt_sigreturn
:
8529 if (block_signals()) {
8530 ret
= -TARGET_ERESTARTSYS
;
8532 ret
= do_rt_sigreturn(cpu_env
);
8535 case TARGET_NR_sethostname
:
8536 if (!(p
= lock_user_string(arg1
)))
8538 ret
= get_errno(sethostname(p
, arg2
));
8539 unlock_user(p
, arg1
, 0);
8541 case TARGET_NR_setrlimit
:
8543 int resource
= target_to_host_resource(arg1
);
8544 struct target_rlimit
*target_rlim
;
8546 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8548 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8549 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8550 unlock_user_struct(target_rlim
, arg2
, 0);
8551 ret
= get_errno(setrlimit(resource
, &rlim
));
8554 case TARGET_NR_getrlimit
:
8556 int resource
= target_to_host_resource(arg1
);
8557 struct target_rlimit
*target_rlim
;
8560 ret
= get_errno(getrlimit(resource
, &rlim
));
8561 if (!is_error(ret
)) {
8562 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8564 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8565 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8566 unlock_user_struct(target_rlim
, arg2
, 1);
8570 case TARGET_NR_getrusage
:
8572 struct rusage rusage
;
8573 ret
= get_errno(getrusage(arg1
, &rusage
));
8574 if (!is_error(ret
)) {
8575 ret
= host_to_target_rusage(arg2
, &rusage
);
8579 case TARGET_NR_gettimeofday
:
8582 ret
= get_errno(gettimeofday(&tv
, NULL
));
8583 if (!is_error(ret
)) {
8584 if (copy_to_user_timeval(arg1
, &tv
))
8589 case TARGET_NR_settimeofday
:
8591 struct timeval tv
, *ptv
= NULL
;
8592 struct timezone tz
, *ptz
= NULL
;
8595 if (copy_from_user_timeval(&tv
, arg1
)) {
8602 if (copy_from_user_timezone(&tz
, arg2
)) {
8608 ret
= get_errno(settimeofday(ptv
, ptz
));
8611 #if defined(TARGET_NR_select)
8612 case TARGET_NR_select
:
8613 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
8614 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8617 struct target_sel_arg_struct
*sel
;
8618 abi_ulong inp
, outp
, exp
, tvp
;
8621 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
8623 nsel
= tswapal(sel
->n
);
8624 inp
= tswapal(sel
->inp
);
8625 outp
= tswapal(sel
->outp
);
8626 exp
= tswapal(sel
->exp
);
8627 tvp
= tswapal(sel
->tvp
);
8628 unlock_user_struct(sel
, arg1
, 0);
8629 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
8634 #ifdef TARGET_NR_pselect6
8635 case TARGET_NR_pselect6
:
8637 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8638 fd_set rfds
, wfds
, efds
;
8639 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8640 struct timespec ts
, *ts_ptr
;
8643 * The 6th arg is actually two args smashed together,
8644 * so we cannot use the C library.
8652 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8653 target_sigset_t
*target_sigset
;
8661 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8665 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8669 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8675 * This takes a timespec, and not a timeval, so we cannot
8676 * use the do_select() helper ...
8679 if (target_to_host_timespec(&ts
, ts_addr
)) {
8687 /* Extract the two packed args for the sigset */
8690 sig
.size
= SIGSET_T_SIZE
;
8692 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8696 arg_sigset
= tswapal(arg7
[0]);
8697 arg_sigsize
= tswapal(arg7
[1]);
8698 unlock_user(arg7
, arg6
, 0);
8702 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8703 /* Like the kernel, we enforce correct size sigsets */
8704 ret
= -TARGET_EINVAL
;
8707 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8708 sizeof(*target_sigset
), 1);
8709 if (!target_sigset
) {
8712 target_to_host_sigset(&set
, target_sigset
);
8713 unlock_user(target_sigset
, arg_sigset
, 0);
8721 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8724 if (!is_error(ret
)) {
8725 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8727 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8729 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8732 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8738 #ifdef TARGET_NR_symlink
8739 case TARGET_NR_symlink
:
8742 p
= lock_user_string(arg1
);
8743 p2
= lock_user_string(arg2
);
8745 ret
= -TARGET_EFAULT
;
8747 ret
= get_errno(symlink(p
, p2
));
8748 unlock_user(p2
, arg2
, 0);
8749 unlock_user(p
, arg1
, 0);
8753 #if defined(TARGET_NR_symlinkat)
8754 case TARGET_NR_symlinkat
:
8757 p
= lock_user_string(arg1
);
8758 p2
= lock_user_string(arg3
);
8760 ret
= -TARGET_EFAULT
;
8762 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8763 unlock_user(p2
, arg3
, 0);
8764 unlock_user(p
, arg1
, 0);
8768 #ifdef TARGET_NR_oldlstat
8769 case TARGET_NR_oldlstat
:
8772 #ifdef TARGET_NR_readlink
8773 case TARGET_NR_readlink
:
8776 p
= lock_user_string(arg1
);
8777 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8779 ret
= -TARGET_EFAULT
;
8781 /* Short circuit this for the magic exe check. */
8782 ret
= -TARGET_EINVAL
;
8783 } else if (is_proc_myself((const char *)p
, "exe")) {
8784 char real
[PATH_MAX
], *temp
;
8785 temp
= realpath(exec_path
, real
);
8786 /* Return value is # of bytes that we wrote to the buffer. */
8788 ret
= get_errno(-1);
8790 /* Don't worry about sign mismatch as earlier mapping
8791 * logic would have thrown a bad address error. */
8792 ret
= MIN(strlen(real
), arg3
);
8793 /* We cannot NUL terminate the string. */
8794 memcpy(p2
, real
, ret
);
8797 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8799 unlock_user(p2
, arg2
, ret
);
8800 unlock_user(p
, arg1
, 0);
8804 #if defined(TARGET_NR_readlinkat)
8805 case TARGET_NR_readlinkat
:
8808 p
= lock_user_string(arg2
);
8809 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8811 ret
= -TARGET_EFAULT
;
8812 } else if (is_proc_myself((const char *)p
, "exe")) {
8813 char real
[PATH_MAX
], *temp
;
8814 temp
= realpath(exec_path
, real
);
8815 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8816 snprintf((char *)p2
, arg4
, "%s", real
);
8818 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8820 unlock_user(p2
, arg3
, ret
);
8821 unlock_user(p
, arg2
, 0);
8825 #ifdef TARGET_NR_uselib
8826 case TARGET_NR_uselib
:
8829 #ifdef TARGET_NR_swapon
8830 case TARGET_NR_swapon
:
8831 if (!(p
= lock_user_string(arg1
)))
8833 ret
= get_errno(swapon(p
, arg2
));
8834 unlock_user(p
, arg1
, 0);
8837 case TARGET_NR_reboot
:
8838 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8839 /* arg4 must be ignored in all other cases */
8840 p
= lock_user_string(arg4
);
8844 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8845 unlock_user(p
, arg4
, 0);
8847 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8850 #ifdef TARGET_NR_readdir
8851 case TARGET_NR_readdir
:
8854 #ifdef TARGET_NR_mmap
8855 case TARGET_NR_mmap
:
8856 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8857 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8858 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8859 || defined(TARGET_S390X)
8862 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8863 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8871 unlock_user(v
, arg1
, 0);
8872 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8873 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8877 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8878 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8884 #ifdef TARGET_NR_mmap2
8885 case TARGET_NR_mmap2
:
8887 #define MMAP_SHIFT 12
8889 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8890 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8892 arg6
<< MMAP_SHIFT
));
8895 case TARGET_NR_munmap
:
8896 ret
= get_errno(target_munmap(arg1
, arg2
));
8898 case TARGET_NR_mprotect
:
8900 TaskState
*ts
= cpu
->opaque
;
8901 /* Special hack to detect libc making the stack executable. */
8902 if ((arg3
& PROT_GROWSDOWN
)
8903 && arg1
>= ts
->info
->stack_limit
8904 && arg1
<= ts
->info
->start_stack
) {
8905 arg3
&= ~PROT_GROWSDOWN
;
8906 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8907 arg1
= ts
->info
->stack_limit
;
8910 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
8912 #ifdef TARGET_NR_mremap
8913 case TARGET_NR_mremap
:
8914 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8917 /* ??? msync/mlock/munlock are broken for softmmu. */
8918 #ifdef TARGET_NR_msync
8919 case TARGET_NR_msync
:
8920 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
8923 #ifdef TARGET_NR_mlock
8924 case TARGET_NR_mlock
:
8925 ret
= get_errno(mlock(g2h(arg1
), arg2
));
8928 #ifdef TARGET_NR_munlock
8929 case TARGET_NR_munlock
:
8930 ret
= get_errno(munlock(g2h(arg1
), arg2
));
8933 #ifdef TARGET_NR_mlockall
8934 case TARGET_NR_mlockall
:
8935 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8938 #ifdef TARGET_NR_munlockall
8939 case TARGET_NR_munlockall
:
8940 ret
= get_errno(munlockall());
8943 case TARGET_NR_truncate
:
8944 if (!(p
= lock_user_string(arg1
)))
8946 ret
= get_errno(truncate(p
, arg2
));
8947 unlock_user(p
, arg1
, 0);
8949 case TARGET_NR_ftruncate
:
8950 ret
= get_errno(ftruncate(arg1
, arg2
));
8952 case TARGET_NR_fchmod
:
8953 ret
= get_errno(fchmod(arg1
, arg2
));
8955 #if defined(TARGET_NR_fchmodat)
8956 case TARGET_NR_fchmodat
:
8957 if (!(p
= lock_user_string(arg2
)))
8959 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8960 unlock_user(p
, arg2
, 0);
8963 case TARGET_NR_getpriority
:
8964 /* Note that negative values are valid for getpriority, so we must
8965 differentiate based on errno settings. */
8967 ret
= getpriority(arg1
, arg2
);
8968 if (ret
== -1 && errno
!= 0) {
8969 ret
= -host_to_target_errno(errno
);
8973 /* Return value is the unbiased priority. Signal no error. */
8974 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8976 /* Return value is a biased priority to avoid negative numbers. */
8980 case TARGET_NR_setpriority
:
8981 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
8983 #ifdef TARGET_NR_profil
8984 case TARGET_NR_profil
:
8987 case TARGET_NR_statfs
:
8988 if (!(p
= lock_user_string(arg1
)))
8990 ret
= get_errno(statfs(path(p
), &stfs
));
8991 unlock_user(p
, arg1
, 0);
8993 if (!is_error(ret
)) {
8994 struct target_statfs
*target_stfs
;
8996 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8998 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8999 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9000 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9001 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9002 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9003 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9004 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9005 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9006 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9007 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9008 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9009 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9010 unlock_user_struct(target_stfs
, arg2
, 1);
9013 case TARGET_NR_fstatfs
:
9014 ret
= get_errno(fstatfs(arg1
, &stfs
));
9015 goto convert_statfs
;
9016 #ifdef TARGET_NR_statfs64
9017 case TARGET_NR_statfs64
:
9018 if (!(p
= lock_user_string(arg1
)))
9020 ret
= get_errno(statfs(path(p
), &stfs
));
9021 unlock_user(p
, arg1
, 0);
9023 if (!is_error(ret
)) {
9024 struct target_statfs64
*target_stfs
;
9026 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9028 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9029 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9030 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9031 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9032 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9033 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9034 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9035 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9036 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9037 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9038 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9039 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9040 unlock_user_struct(target_stfs
, arg3
, 1);
9043 case TARGET_NR_fstatfs64
:
9044 ret
= get_errno(fstatfs(arg1
, &stfs
));
9045 goto convert_statfs64
;
9047 #ifdef TARGET_NR_ioperm
9048 case TARGET_NR_ioperm
:
9051 #ifdef TARGET_NR_socketcall
9052 case TARGET_NR_socketcall
:
9053 ret
= do_socketcall(arg1
, arg2
);
9056 #ifdef TARGET_NR_accept
9057 case TARGET_NR_accept
:
9058 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9061 #ifdef TARGET_NR_accept4
9062 case TARGET_NR_accept4
:
9063 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9066 #ifdef TARGET_NR_bind
9067 case TARGET_NR_bind
:
9068 ret
= do_bind(arg1
, arg2
, arg3
);
9071 #ifdef TARGET_NR_connect
9072 case TARGET_NR_connect
:
9073 ret
= do_connect(arg1
, arg2
, arg3
);
9076 #ifdef TARGET_NR_getpeername
9077 case TARGET_NR_getpeername
:
9078 ret
= do_getpeername(arg1
, arg2
, arg3
);
9081 #ifdef TARGET_NR_getsockname
9082 case TARGET_NR_getsockname
:
9083 ret
= do_getsockname(arg1
, arg2
, arg3
);
9086 #ifdef TARGET_NR_getsockopt
9087 case TARGET_NR_getsockopt
:
9088 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9091 #ifdef TARGET_NR_listen
9092 case TARGET_NR_listen
:
9093 ret
= get_errno(listen(arg1
, arg2
));
9096 #ifdef TARGET_NR_recv
9097 case TARGET_NR_recv
:
9098 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9101 #ifdef TARGET_NR_recvfrom
9102 case TARGET_NR_recvfrom
:
9103 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9106 #ifdef TARGET_NR_recvmsg
9107 case TARGET_NR_recvmsg
:
9108 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9111 #ifdef TARGET_NR_send
9112 case TARGET_NR_send
:
9113 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9116 #ifdef TARGET_NR_sendmsg
9117 case TARGET_NR_sendmsg
:
9118 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9121 #ifdef TARGET_NR_sendmmsg
9122 case TARGET_NR_sendmmsg
:
9123 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9125 case TARGET_NR_recvmmsg
:
9126 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9129 #ifdef TARGET_NR_sendto
9130 case TARGET_NR_sendto
:
9131 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9134 #ifdef TARGET_NR_shutdown
9135 case TARGET_NR_shutdown
:
9136 ret
= get_errno(shutdown(arg1
, arg2
));
9139 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9140 case TARGET_NR_getrandom
:
9141 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9145 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9146 unlock_user(p
, arg1
, ret
);
9149 #ifdef TARGET_NR_socket
9150 case TARGET_NR_socket
:
9151 ret
= do_socket(arg1
, arg2
, arg3
);
9152 fd_trans_unregister(ret
);
9155 #ifdef TARGET_NR_socketpair
9156 case TARGET_NR_socketpair
:
9157 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9160 #ifdef TARGET_NR_setsockopt
9161 case TARGET_NR_setsockopt
:
9162 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9166 case TARGET_NR_syslog
:
9167 if (!(p
= lock_user_string(arg2
)))
9169 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9170 unlock_user(p
, arg2
, 0);
9173 case TARGET_NR_setitimer
:
9175 struct itimerval value
, ovalue
, *pvalue
;
9179 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9180 || copy_from_user_timeval(&pvalue
->it_value
,
9181 arg2
+ sizeof(struct target_timeval
)))
9186 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9187 if (!is_error(ret
) && arg3
) {
9188 if (copy_to_user_timeval(arg3
,
9189 &ovalue
.it_interval
)
9190 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9196 case TARGET_NR_getitimer
:
9198 struct itimerval value
;
9200 ret
= get_errno(getitimer(arg1
, &value
));
9201 if (!is_error(ret
) && arg2
) {
9202 if (copy_to_user_timeval(arg2
,
9204 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9210 #ifdef TARGET_NR_stat
9211 case TARGET_NR_stat
:
9212 if (!(p
= lock_user_string(arg1
)))
9214 ret
= get_errno(stat(path(p
), &st
));
9215 unlock_user(p
, arg1
, 0);
9218 #ifdef TARGET_NR_lstat
9219 case TARGET_NR_lstat
:
9220 if (!(p
= lock_user_string(arg1
)))
9222 ret
= get_errno(lstat(path(p
), &st
));
9223 unlock_user(p
, arg1
, 0);
9226 case TARGET_NR_fstat
:
9228 ret
= get_errno(fstat(arg1
, &st
));
9229 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9232 if (!is_error(ret
)) {
9233 struct target_stat
*target_st
;
9235 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9237 memset(target_st
, 0, sizeof(*target_st
));
9238 __put_user(st
.st_dev
, &target_st
->st_dev
);
9239 __put_user(st
.st_ino
, &target_st
->st_ino
);
9240 __put_user(st
.st_mode
, &target_st
->st_mode
);
9241 __put_user(st
.st_uid
, &target_st
->st_uid
);
9242 __put_user(st
.st_gid
, &target_st
->st_gid
);
9243 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9244 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9245 __put_user(st
.st_size
, &target_st
->st_size
);
9246 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9247 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9248 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9249 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9250 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9251 unlock_user_struct(target_st
, arg2
, 1);
9255 #ifdef TARGET_NR_olduname
9256 case TARGET_NR_olduname
:
9259 #ifdef TARGET_NR_iopl
9260 case TARGET_NR_iopl
:
9263 case TARGET_NR_vhangup
:
9264 ret
= get_errno(vhangup());
9266 #ifdef TARGET_NR_idle
9267 case TARGET_NR_idle
:
9270 #ifdef TARGET_NR_syscall
9271 case TARGET_NR_syscall
:
9272 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9273 arg6
, arg7
, arg8
, 0);
9276 case TARGET_NR_wait4
:
9279 abi_long status_ptr
= arg2
;
9280 struct rusage rusage
, *rusage_ptr
;
9281 abi_ulong target_rusage
= arg4
;
9282 abi_long rusage_err
;
9284 rusage_ptr
= &rusage
;
9287 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9288 if (!is_error(ret
)) {
9289 if (status_ptr
&& ret
) {
9290 status
= host_to_target_waitstatus(status
);
9291 if (put_user_s32(status
, status_ptr
))
9294 if (target_rusage
) {
9295 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9303 #ifdef TARGET_NR_swapoff
9304 case TARGET_NR_swapoff
:
9305 if (!(p
= lock_user_string(arg1
)))
9307 ret
= get_errno(swapoff(p
));
9308 unlock_user(p
, arg1
, 0);
9311 case TARGET_NR_sysinfo
:
9313 struct target_sysinfo
*target_value
;
9314 struct sysinfo value
;
9315 ret
= get_errno(sysinfo(&value
));
9316 if (!is_error(ret
) && arg1
)
9318 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9320 __put_user(value
.uptime
, &target_value
->uptime
);
9321 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9322 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9323 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9324 __put_user(value
.totalram
, &target_value
->totalram
);
9325 __put_user(value
.freeram
, &target_value
->freeram
);
9326 __put_user(value
.sharedram
, &target_value
->sharedram
);
9327 __put_user(value
.bufferram
, &target_value
->bufferram
);
9328 __put_user(value
.totalswap
, &target_value
->totalswap
);
9329 __put_user(value
.freeswap
, &target_value
->freeswap
);
9330 __put_user(value
.procs
, &target_value
->procs
);
9331 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9332 __put_user(value
.freehigh
, &target_value
->freehigh
);
9333 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9334 unlock_user_struct(target_value
, arg1
, 1);
9338 #ifdef TARGET_NR_ipc
9340 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9343 #ifdef TARGET_NR_semget
9344 case TARGET_NR_semget
:
9345 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9348 #ifdef TARGET_NR_semop
9349 case TARGET_NR_semop
:
9350 ret
= do_semop(arg1
, arg2
, arg3
);
9353 #ifdef TARGET_NR_semctl
9354 case TARGET_NR_semctl
:
9355 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9358 #ifdef TARGET_NR_msgctl
9359 case TARGET_NR_msgctl
:
9360 ret
= do_msgctl(arg1
, arg2
, arg3
);
9363 #ifdef TARGET_NR_msgget
9364 case TARGET_NR_msgget
:
9365 ret
= get_errno(msgget(arg1
, arg2
));
9368 #ifdef TARGET_NR_msgrcv
9369 case TARGET_NR_msgrcv
:
9370 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9373 #ifdef TARGET_NR_msgsnd
9374 case TARGET_NR_msgsnd
:
9375 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9378 #ifdef TARGET_NR_shmget
9379 case TARGET_NR_shmget
:
9380 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9383 #ifdef TARGET_NR_shmctl
9384 case TARGET_NR_shmctl
:
9385 ret
= do_shmctl(arg1
, arg2
, arg3
);
9388 #ifdef TARGET_NR_shmat
9389 case TARGET_NR_shmat
:
9390 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9393 #ifdef TARGET_NR_shmdt
9394 case TARGET_NR_shmdt
:
9395 ret
= do_shmdt(arg1
);
9398 case TARGET_NR_fsync
:
9399 ret
= get_errno(fsync(arg1
));
9401 case TARGET_NR_clone
:
9402 /* Linux manages to have three different orderings for its
9403 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9404 * match the kernel's CONFIG_CLONE_* settings.
9405 * Microblaze is further special in that it uses a sixth
9406 * implicit argument to clone for the TLS pointer.
9408 #if defined(TARGET_MICROBLAZE)
9409 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9410 #elif defined(TARGET_CLONE_BACKWARDS)
9411 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9412 #elif defined(TARGET_CLONE_BACKWARDS2)
9413 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9415 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9418 #ifdef __NR_exit_group
9419 /* new thread calls */
9420 case TARGET_NR_exit_group
:
9424 gdb_exit(cpu_env
, arg1
);
9425 ret
= get_errno(exit_group(arg1
));
9428 case TARGET_NR_setdomainname
:
9429 if (!(p
= lock_user_string(arg1
)))
9431 ret
= get_errno(setdomainname(p
, arg2
));
9432 unlock_user(p
, arg1
, 0);
9434 case TARGET_NR_uname
:
9435 /* no need to transcode because we use the linux syscall */
9437 struct new_utsname
* buf
;
9439 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9441 ret
= get_errno(sys_uname(buf
));
9442 if (!is_error(ret
)) {
9443 /* Overwrite the native machine name with whatever is being
9445 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
9446 /* Allow the user to override the reported release. */
9447 if (qemu_uname_release
&& *qemu_uname_release
) {
9448 g_strlcpy(buf
->release
, qemu_uname_release
,
9449 sizeof(buf
->release
));
9452 unlock_user_struct(buf
, arg1
, 1);
9456 case TARGET_NR_modify_ldt
:
9457 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9459 #if !defined(TARGET_X86_64)
9460 case TARGET_NR_vm86old
:
9462 case TARGET_NR_vm86
:
9463 ret
= do_vm86(cpu_env
, arg1
, arg2
);
9467 case TARGET_NR_adjtimex
:
9469 #ifdef TARGET_NR_create_module
9470 case TARGET_NR_create_module
:
9472 case TARGET_NR_init_module
:
9473 case TARGET_NR_delete_module
:
9474 #ifdef TARGET_NR_get_kernel_syms
9475 case TARGET_NR_get_kernel_syms
:
9478 case TARGET_NR_quotactl
:
9480 case TARGET_NR_getpgid
:
9481 ret
= get_errno(getpgid(arg1
));
9483 case TARGET_NR_fchdir
:
9484 ret
= get_errno(fchdir(arg1
));
9486 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9487 case TARGET_NR_bdflush
:
9490 #ifdef TARGET_NR_sysfs
9491 case TARGET_NR_sysfs
:
9494 case TARGET_NR_personality
:
9495 ret
= get_errno(personality(arg1
));
9497 #ifdef TARGET_NR_afs_syscall
9498 case TARGET_NR_afs_syscall
:
9501 #ifdef TARGET_NR__llseek /* Not on alpha */
9502 case TARGET_NR__llseek
:
9505 #if !defined(__NR_llseek)
9506 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9508 ret
= get_errno(res
);
9513 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9515 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9521 #ifdef TARGET_NR_getdents
9522 case TARGET_NR_getdents
:
9523 #ifdef __NR_getdents
9524 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9526 struct target_dirent
*target_dirp
;
9527 struct linux_dirent
*dirp
;
9528 abi_long count
= arg3
;
9530 dirp
= g_try_malloc(count
);
9532 ret
= -TARGET_ENOMEM
;
9536 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9537 if (!is_error(ret
)) {
9538 struct linux_dirent
*de
;
9539 struct target_dirent
*tde
;
9541 int reclen
, treclen
;
9542 int count1
, tnamelen
;
9546 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9550 reclen
= de
->d_reclen
;
9551 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9552 assert(tnamelen
>= 0);
9553 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9554 assert(count1
+ treclen
<= count
);
9555 tde
->d_reclen
= tswap16(treclen
);
9556 tde
->d_ino
= tswapal(de
->d_ino
);
9557 tde
->d_off
= tswapal(de
->d_off
);
9558 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9559 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9561 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9565 unlock_user(target_dirp
, arg2
, ret
);
9571 struct linux_dirent
*dirp
;
9572 abi_long count
= arg3
;
9574 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9576 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9577 if (!is_error(ret
)) {
9578 struct linux_dirent
*de
;
9583 reclen
= de
->d_reclen
;
9586 de
->d_reclen
= tswap16(reclen
);
9587 tswapls(&de
->d_ino
);
9588 tswapls(&de
->d_off
);
9589 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9593 unlock_user(dirp
, arg2
, ret
);
9597 /* Implement getdents in terms of getdents64 */
9599 struct linux_dirent64
*dirp
;
9600 abi_long count
= arg3
;
9602 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9606 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9607 if (!is_error(ret
)) {
9608 /* Convert the dirent64 structs to target dirent. We do this
9609 * in-place, since we can guarantee that a target_dirent is no
9610 * larger than a dirent64; however this means we have to be
9611 * careful to read everything before writing in the new format.
9613 struct linux_dirent64
*de
;
9614 struct target_dirent
*tde
;
9619 tde
= (struct target_dirent
*)dirp
;
9621 int namelen
, treclen
;
9622 int reclen
= de
->d_reclen
;
9623 uint64_t ino
= de
->d_ino
;
9624 int64_t off
= de
->d_off
;
9625 uint8_t type
= de
->d_type
;
9627 namelen
= strlen(de
->d_name
);
9628 treclen
= offsetof(struct target_dirent
, d_name
)
9630 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9632 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9633 tde
->d_ino
= tswapal(ino
);
9634 tde
->d_off
= tswapal(off
);
9635 tde
->d_reclen
= tswap16(treclen
);
9636 /* The target_dirent type is in what was formerly a padding
9637 * byte at the end of the structure:
9639 *(((char *)tde
) + treclen
- 1) = type
;
9641 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9642 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9648 unlock_user(dirp
, arg2
, ret
);
9652 #endif /* TARGET_NR_getdents */
9653 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9654 case TARGET_NR_getdents64
:
9656 struct linux_dirent64
*dirp
;
9657 abi_long count
= arg3
;
9658 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9660 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9661 if (!is_error(ret
)) {
9662 struct linux_dirent64
*de
;
9667 reclen
= de
->d_reclen
;
9670 de
->d_reclen
= tswap16(reclen
);
9671 tswap64s((uint64_t *)&de
->d_ino
);
9672 tswap64s((uint64_t *)&de
->d_off
);
9673 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9677 unlock_user(dirp
, arg2
, ret
);
9680 #endif /* TARGET_NR_getdents64 */
9681 #if defined(TARGET_NR__newselect)
9682 case TARGET_NR__newselect
:
9683 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9686 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9687 # ifdef TARGET_NR_poll
9688 case TARGET_NR_poll
:
9690 # ifdef TARGET_NR_ppoll
9691 case TARGET_NR_ppoll
:
9694 struct target_pollfd
*target_pfd
;
9695 unsigned int nfds
= arg2
;
9702 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9703 ret
= -TARGET_EINVAL
;
9707 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9708 sizeof(struct target_pollfd
) * nfds
, 1);
9713 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9714 for (i
= 0; i
< nfds
; i
++) {
9715 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9716 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9721 # ifdef TARGET_NR_ppoll
9722 case TARGET_NR_ppoll
:
9724 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9725 target_sigset_t
*target_set
;
9726 sigset_t _set
, *set
= &_set
;
9729 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9730 unlock_user(target_pfd
, arg1
, 0);
9738 if (arg5
!= sizeof(target_sigset_t
)) {
9739 unlock_user(target_pfd
, arg1
, 0);
9740 ret
= -TARGET_EINVAL
;
9744 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9746 unlock_user(target_pfd
, arg1
, 0);
9749 target_to_host_sigset(set
, target_set
);
9754 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9755 set
, SIGSET_T_SIZE
));
9757 if (!is_error(ret
) && arg3
) {
9758 host_to_target_timespec(arg3
, timeout_ts
);
9761 unlock_user(target_set
, arg4
, 0);
9766 # ifdef TARGET_NR_poll
9767 case TARGET_NR_poll
:
9769 struct timespec ts
, *pts
;
9772 /* Convert ms to secs, ns */
9773 ts
.tv_sec
= arg3
/ 1000;
9774 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9777 /* -ve poll() timeout means "infinite" */
9780 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9785 g_assert_not_reached();
9788 if (!is_error(ret
)) {
9789 for(i
= 0; i
< nfds
; i
++) {
9790 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9793 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9797 case TARGET_NR_flock
:
9798 /* NOTE: the flock constant seems to be the same for every
9800 ret
= get_errno(safe_flock(arg1
, arg2
));
9802 case TARGET_NR_readv
:
9804 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9806 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9807 unlock_iovec(vec
, arg2
, arg3
, 1);
9809 ret
= -host_to_target_errno(errno
);
9813 case TARGET_NR_writev
:
9815 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9817 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9818 unlock_iovec(vec
, arg2
, arg3
, 0);
9820 ret
= -host_to_target_errno(errno
);
9824 case TARGET_NR_getsid
:
9825 ret
= get_errno(getsid(arg1
));
9827 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9828 case TARGET_NR_fdatasync
:
9829 ret
= get_errno(fdatasync(arg1
));
9832 #ifdef TARGET_NR__sysctl
9833 case TARGET_NR__sysctl
:
9834 /* We don't implement this, but ENOTDIR is always a safe
9836 ret
= -TARGET_ENOTDIR
;
9839 case TARGET_NR_sched_getaffinity
:
9841 unsigned int mask_size
;
9842 unsigned long *mask
;
9845 * sched_getaffinity needs multiples of ulong, so need to take
9846 * care of mismatches between target ulong and host ulong sizes.
9848 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9849 ret
= -TARGET_EINVAL
;
9852 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9854 mask
= alloca(mask_size
);
9855 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9857 if (!is_error(ret
)) {
9859 /* More data returned than the caller's buffer will fit.
9860 * This only happens if sizeof(abi_long) < sizeof(long)
9861 * and the caller passed us a buffer holding an odd number
9862 * of abi_longs. If the host kernel is actually using the
9863 * extra 4 bytes then fail EINVAL; otherwise we can just
9864 * ignore them and only copy the interesting part.
9866 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9867 if (numcpus
> arg2
* 8) {
9868 ret
= -TARGET_EINVAL
;
9874 if (copy_to_user(arg3
, mask
, ret
)) {
9880 case TARGET_NR_sched_setaffinity
:
9882 unsigned int mask_size
;
9883 unsigned long *mask
;
9886 * sched_setaffinity needs multiples of ulong, so need to take
9887 * care of mismatches between target ulong and host ulong sizes.
9889 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9890 ret
= -TARGET_EINVAL
;
9893 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9895 mask
= alloca(mask_size
);
9896 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
9899 memcpy(mask
, p
, arg2
);
9900 unlock_user_struct(p
, arg2
, 0);
9902 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9905 case TARGET_NR_sched_setparam
:
9907 struct sched_param
*target_schp
;
9908 struct sched_param schp
;
9911 return -TARGET_EINVAL
;
9913 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9915 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9916 unlock_user_struct(target_schp
, arg2
, 0);
9917 ret
= get_errno(sched_setparam(arg1
, &schp
));
9920 case TARGET_NR_sched_getparam
:
9922 struct sched_param
*target_schp
;
9923 struct sched_param schp
;
9926 return -TARGET_EINVAL
;
9928 ret
= get_errno(sched_getparam(arg1
, &schp
));
9929 if (!is_error(ret
)) {
9930 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9932 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9933 unlock_user_struct(target_schp
, arg2
, 1);
9937 case TARGET_NR_sched_setscheduler
:
9939 struct sched_param
*target_schp
;
9940 struct sched_param schp
;
9942 return -TARGET_EINVAL
;
9944 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9946 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9947 unlock_user_struct(target_schp
, arg3
, 0);
9948 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9951 case TARGET_NR_sched_getscheduler
:
9952 ret
= get_errno(sched_getscheduler(arg1
));
9954 case TARGET_NR_sched_yield
:
9955 ret
= get_errno(sched_yield());
9957 case TARGET_NR_sched_get_priority_max
:
9958 ret
= get_errno(sched_get_priority_max(arg1
));
9960 case TARGET_NR_sched_get_priority_min
:
9961 ret
= get_errno(sched_get_priority_min(arg1
));
9963 case TARGET_NR_sched_rr_get_interval
:
9966 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9967 if (!is_error(ret
)) {
9968 ret
= host_to_target_timespec(arg2
, &ts
);
9972 case TARGET_NR_nanosleep
:
9974 struct timespec req
, rem
;
9975 target_to_host_timespec(&req
, arg1
);
9976 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9977 if (is_error(ret
) && arg2
) {
9978 host_to_target_timespec(arg2
, &rem
);
9982 #ifdef TARGET_NR_query_module
9983 case TARGET_NR_query_module
:
9986 #ifdef TARGET_NR_nfsservctl
9987 case TARGET_NR_nfsservctl
:
9990 case TARGET_NR_prctl
:
9992 case PR_GET_PDEATHSIG
:
9995 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9996 if (!is_error(ret
) && arg2
9997 && put_user_ual(deathsig
, arg2
)) {
10005 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10009 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10010 arg3
, arg4
, arg5
));
10011 unlock_user(name
, arg2
, 16);
10016 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10020 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10021 arg3
, arg4
, arg5
));
10022 unlock_user(name
, arg2
, 0);
10027 /* Most prctl options have no pointer arguments */
10028 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10032 #ifdef TARGET_NR_arch_prctl
10033 case TARGET_NR_arch_prctl
:
10034 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10035 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
10038 goto unimplemented
;
10041 #ifdef TARGET_NR_pread64
10042 case TARGET_NR_pread64
:
10043 if (regpairs_aligned(cpu_env
)) {
10047 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10049 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10050 unlock_user(p
, arg2
, ret
);
10052 case TARGET_NR_pwrite64
:
10053 if (regpairs_aligned(cpu_env
)) {
10057 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10059 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10060 unlock_user(p
, arg2
, 0);
10063 case TARGET_NR_getcwd
:
10064 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10066 ret
= get_errno(sys_getcwd1(p
, arg2
));
10067 unlock_user(p
, arg1
, ret
);
10069 case TARGET_NR_capget
:
10070 case TARGET_NR_capset
:
10072 struct target_user_cap_header
*target_header
;
10073 struct target_user_cap_data
*target_data
= NULL
;
10074 struct __user_cap_header_struct header
;
10075 struct __user_cap_data_struct data
[2];
10076 struct __user_cap_data_struct
*dataptr
= NULL
;
10077 int i
, target_datalen
;
10078 int data_items
= 1;
10080 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10083 header
.version
= tswap32(target_header
->version
);
10084 header
.pid
= tswap32(target_header
->pid
);
10086 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10087 /* Version 2 and up takes pointer to two user_data structs */
10091 target_datalen
= sizeof(*target_data
) * data_items
;
10094 if (num
== TARGET_NR_capget
) {
10095 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10097 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10099 if (!target_data
) {
10100 unlock_user_struct(target_header
, arg1
, 0);
10104 if (num
== TARGET_NR_capset
) {
10105 for (i
= 0; i
< data_items
; i
++) {
10106 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10107 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10108 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10115 if (num
== TARGET_NR_capget
) {
10116 ret
= get_errno(capget(&header
, dataptr
));
10118 ret
= get_errno(capset(&header
, dataptr
));
10121 /* The kernel always updates version for both capget and capset */
10122 target_header
->version
= tswap32(header
.version
);
10123 unlock_user_struct(target_header
, arg1
, 1);
10126 if (num
== TARGET_NR_capget
) {
10127 for (i
= 0; i
< data_items
; i
++) {
10128 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10129 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10130 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10132 unlock_user(target_data
, arg2
, target_datalen
);
10134 unlock_user(target_data
, arg2
, 0);
10139 case TARGET_NR_sigaltstack
:
10140 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10143 #ifdef CONFIG_SENDFILE
10144 case TARGET_NR_sendfile
:
10146 off_t
*offp
= NULL
;
10149 ret
= get_user_sal(off
, arg3
);
10150 if (is_error(ret
)) {
10155 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10156 if (!is_error(ret
) && arg3
) {
10157 abi_long ret2
= put_user_sal(off
, arg3
);
10158 if (is_error(ret2
)) {
10164 #ifdef TARGET_NR_sendfile64
10165 case TARGET_NR_sendfile64
:
10167 off_t
*offp
= NULL
;
10170 ret
= get_user_s64(off
, arg3
);
10171 if (is_error(ret
)) {
10176 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10177 if (!is_error(ret
) && arg3
) {
10178 abi_long ret2
= put_user_s64(off
, arg3
);
10179 if (is_error(ret2
)) {
10187 case TARGET_NR_sendfile
:
10188 #ifdef TARGET_NR_sendfile64
10189 case TARGET_NR_sendfile64
:
10191 goto unimplemented
;
10194 #ifdef TARGET_NR_getpmsg
10195 case TARGET_NR_getpmsg
:
10196 goto unimplemented
;
10198 #ifdef TARGET_NR_putpmsg
10199 case TARGET_NR_putpmsg
:
10200 goto unimplemented
;
10202 #ifdef TARGET_NR_vfork
10203 case TARGET_NR_vfork
:
10204 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
10208 #ifdef TARGET_NR_ugetrlimit
10209 case TARGET_NR_ugetrlimit
:
10211 struct rlimit rlim
;
10212 int resource
= target_to_host_resource(arg1
);
10213 ret
= get_errno(getrlimit(resource
, &rlim
));
10214 if (!is_error(ret
)) {
10215 struct target_rlimit
*target_rlim
;
10216 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10218 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10219 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10220 unlock_user_struct(target_rlim
, arg2
, 1);
10225 #ifdef TARGET_NR_truncate64
10226 case TARGET_NR_truncate64
:
10227 if (!(p
= lock_user_string(arg1
)))
10229 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10230 unlock_user(p
, arg1
, 0);
10233 #ifdef TARGET_NR_ftruncate64
10234 case TARGET_NR_ftruncate64
:
10235 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10238 #ifdef TARGET_NR_stat64
10239 case TARGET_NR_stat64
:
10240 if (!(p
= lock_user_string(arg1
)))
10242 ret
= get_errno(stat(path(p
), &st
));
10243 unlock_user(p
, arg1
, 0);
10244 if (!is_error(ret
))
10245 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10248 #ifdef TARGET_NR_lstat64
10249 case TARGET_NR_lstat64
:
10250 if (!(p
= lock_user_string(arg1
)))
10252 ret
= get_errno(lstat(path(p
), &st
));
10253 unlock_user(p
, arg1
, 0);
10254 if (!is_error(ret
))
10255 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10258 #ifdef TARGET_NR_fstat64
10259 case TARGET_NR_fstat64
:
10260 ret
= get_errno(fstat(arg1
, &st
));
10261 if (!is_error(ret
))
10262 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10265 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10266 #ifdef TARGET_NR_fstatat64
10267 case TARGET_NR_fstatat64
:
10269 #ifdef TARGET_NR_newfstatat
10270 case TARGET_NR_newfstatat
:
10272 if (!(p
= lock_user_string(arg2
)))
10274 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10275 if (!is_error(ret
))
10276 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10279 #ifdef TARGET_NR_lchown
10280 case TARGET_NR_lchown
:
10281 if (!(p
= lock_user_string(arg1
)))
10283 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10284 unlock_user(p
, arg1
, 0);
10287 #ifdef TARGET_NR_getuid
10288 case TARGET_NR_getuid
:
10289 ret
= get_errno(high2lowuid(getuid()));
10292 #ifdef TARGET_NR_getgid
10293 case TARGET_NR_getgid
:
10294 ret
= get_errno(high2lowgid(getgid()));
10297 #ifdef TARGET_NR_geteuid
10298 case TARGET_NR_geteuid
:
10299 ret
= get_errno(high2lowuid(geteuid()));
10302 #ifdef TARGET_NR_getegid
10303 case TARGET_NR_getegid
:
10304 ret
= get_errno(high2lowgid(getegid()));
10307 case TARGET_NR_setreuid
:
10308 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10310 case TARGET_NR_setregid
:
10311 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10313 case TARGET_NR_getgroups
:
10315 int gidsetsize
= arg1
;
10316 target_id
*target_grouplist
;
10320 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10321 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10322 if (gidsetsize
== 0)
10324 if (!is_error(ret
)) {
10325 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10326 if (!target_grouplist
)
10328 for(i
= 0;i
< ret
; i
++)
10329 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10330 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10334 case TARGET_NR_setgroups
:
10336 int gidsetsize
= arg1
;
10337 target_id
*target_grouplist
;
10338 gid_t
*grouplist
= NULL
;
10341 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10342 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10343 if (!target_grouplist
) {
10344 ret
= -TARGET_EFAULT
;
10347 for (i
= 0; i
< gidsetsize
; i
++) {
10348 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10350 unlock_user(target_grouplist
, arg2
, 0);
10352 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10355 case TARGET_NR_fchown
:
10356 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10358 #if defined(TARGET_NR_fchownat)
10359 case TARGET_NR_fchownat
:
10360 if (!(p
= lock_user_string(arg2
)))
10362 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10363 low2highgid(arg4
), arg5
));
10364 unlock_user(p
, arg2
, 0);
10367 #ifdef TARGET_NR_setresuid
10368 case TARGET_NR_setresuid
:
10369 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
10371 low2highuid(arg3
)));
10374 #ifdef TARGET_NR_getresuid
10375 case TARGET_NR_getresuid
:
10377 uid_t ruid
, euid
, suid
;
10378 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10379 if (!is_error(ret
)) {
10380 if (put_user_id(high2lowuid(ruid
), arg1
)
10381 || put_user_id(high2lowuid(euid
), arg2
)
10382 || put_user_id(high2lowuid(suid
), arg3
))
10388 #ifdef TARGET_NR_getresgid
10389 case TARGET_NR_setresgid
:
10390 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
10392 low2highgid(arg3
)));
10395 #ifdef TARGET_NR_getresgid
10396 case TARGET_NR_getresgid
:
10398 gid_t rgid
, egid
, sgid
;
10399 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10400 if (!is_error(ret
)) {
10401 if (put_user_id(high2lowgid(rgid
), arg1
)
10402 || put_user_id(high2lowgid(egid
), arg2
)
10403 || put_user_id(high2lowgid(sgid
), arg3
))
10409 #ifdef TARGET_NR_chown
10410 case TARGET_NR_chown
:
10411 if (!(p
= lock_user_string(arg1
)))
10413 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10414 unlock_user(p
, arg1
, 0);
10417 case TARGET_NR_setuid
:
10418 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
10420 case TARGET_NR_setgid
:
10421 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
10423 case TARGET_NR_setfsuid
:
10424 ret
= get_errno(setfsuid(arg1
));
10426 case TARGET_NR_setfsgid
:
10427 ret
= get_errno(setfsgid(arg1
));
10430 #ifdef TARGET_NR_lchown32
10431 case TARGET_NR_lchown32
:
10432 if (!(p
= lock_user_string(arg1
)))
10434 ret
= get_errno(lchown(p
, arg2
, arg3
));
10435 unlock_user(p
, arg1
, 0);
10438 #ifdef TARGET_NR_getuid32
10439 case TARGET_NR_getuid32
:
10440 ret
= get_errno(getuid());
10444 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10445 /* Alpha specific */
10446 case TARGET_NR_getxuid
:
10450 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10452 ret
= get_errno(getuid());
10455 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10456 /* Alpha specific */
10457 case TARGET_NR_getxgid
:
10461 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10463 ret
= get_errno(getgid());
10466 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10467 /* Alpha specific */
10468 case TARGET_NR_osf_getsysinfo
:
10469 ret
= -TARGET_EOPNOTSUPP
;
10471 case TARGET_GSI_IEEE_FP_CONTROL
:
10473 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10475 /* Copied from linux ieee_fpcr_to_swcr. */
10476 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10477 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10478 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10479 | SWCR_TRAP_ENABLE_DZE
10480 | SWCR_TRAP_ENABLE_OVF
);
10481 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10482 | SWCR_TRAP_ENABLE_INE
);
10483 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10484 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10486 if (put_user_u64 (swcr
, arg2
))
10492 /* case GSI_IEEE_STATE_AT_SIGNAL:
10493 -- Not implemented in linux kernel.
10495 -- Retrieves current unaligned access state; not much used.
10496 case GSI_PROC_TYPE:
10497 -- Retrieves implver information; surely not used.
10498 case GSI_GET_HWRPB:
10499 -- Grabs a copy of the HWRPB; surely not used.
10504 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10505 /* Alpha specific */
10506 case TARGET_NR_osf_setsysinfo
:
10507 ret
= -TARGET_EOPNOTSUPP
;
10509 case TARGET_SSI_IEEE_FP_CONTROL
:
10511 uint64_t swcr
, fpcr
, orig_fpcr
;
10513 if (get_user_u64 (swcr
, arg2
)) {
10516 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10517 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10519 /* Copied from linux ieee_swcr_to_fpcr. */
10520 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10521 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10522 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10523 | SWCR_TRAP_ENABLE_DZE
10524 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10525 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10526 | SWCR_TRAP_ENABLE_INE
)) << 57;
10527 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10528 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10530 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10535 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10537 uint64_t exc
, fpcr
, orig_fpcr
;
10540 if (get_user_u64(exc
, arg2
)) {
10544 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10546 /* We only add to the exception status here. */
10547 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
10549 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10552 /* Old exceptions are not signaled. */
10553 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
10555 /* If any exceptions set by this call,
10556 and are unmasked, send a signal. */
10558 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
10559 si_code
= TARGET_FPE_FLTRES
;
10561 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
10562 si_code
= TARGET_FPE_FLTUND
;
10564 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
10565 si_code
= TARGET_FPE_FLTOVF
;
10567 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
10568 si_code
= TARGET_FPE_FLTDIV
;
10570 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
10571 si_code
= TARGET_FPE_FLTINV
;
10573 if (si_code
!= 0) {
10574 target_siginfo_t info
;
10575 info
.si_signo
= SIGFPE
;
10577 info
.si_code
= si_code
;
10578 info
._sifields
._sigfault
._addr
10579 = ((CPUArchState
*)cpu_env
)->pc
;
10580 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
10585 /* case SSI_NVPAIRS:
10586 -- Used with SSIN_UACPROC to enable unaligned accesses.
10587 case SSI_IEEE_STATE_AT_SIGNAL:
10588 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10589 -- Not implemented in linux kernel
10594 #ifdef TARGET_NR_osf_sigprocmask
10595 /* Alpha specific. */
10596 case TARGET_NR_osf_sigprocmask
:
10600 sigset_t set
, oldset
;
10603 case TARGET_SIG_BLOCK
:
10606 case TARGET_SIG_UNBLOCK
:
10609 case TARGET_SIG_SETMASK
:
10613 ret
= -TARGET_EINVAL
;
10617 target_to_host_old_sigset(&set
, &mask
);
10618 ret
= do_sigprocmask(how
, &set
, &oldset
);
10620 host_to_target_old_sigset(&mask
, &oldset
);
10627 #ifdef TARGET_NR_getgid32
10628 case TARGET_NR_getgid32
:
10629 ret
= get_errno(getgid());
10632 #ifdef TARGET_NR_geteuid32
10633 case TARGET_NR_geteuid32
:
10634 ret
= get_errno(geteuid());
10637 #ifdef TARGET_NR_getegid32
10638 case TARGET_NR_getegid32
:
10639 ret
= get_errno(getegid());
10642 #ifdef TARGET_NR_setreuid32
10643 case TARGET_NR_setreuid32
:
10644 ret
= get_errno(setreuid(arg1
, arg2
));
10647 #ifdef TARGET_NR_setregid32
10648 case TARGET_NR_setregid32
:
10649 ret
= get_errno(setregid(arg1
, arg2
));
10652 #ifdef TARGET_NR_getgroups32
10653 case TARGET_NR_getgroups32
:
10655 int gidsetsize
= arg1
;
10656 uint32_t *target_grouplist
;
10660 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10661 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10662 if (gidsetsize
== 0)
10664 if (!is_error(ret
)) {
10665 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10666 if (!target_grouplist
) {
10667 ret
= -TARGET_EFAULT
;
10670 for(i
= 0;i
< ret
; i
++)
10671 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10672 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10677 #ifdef TARGET_NR_setgroups32
10678 case TARGET_NR_setgroups32
:
10680 int gidsetsize
= arg1
;
10681 uint32_t *target_grouplist
;
10685 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10686 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10687 if (!target_grouplist
) {
10688 ret
= -TARGET_EFAULT
;
10691 for(i
= 0;i
< gidsetsize
; i
++)
10692 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10693 unlock_user(target_grouplist
, arg2
, 0);
10694 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10698 #ifdef TARGET_NR_fchown32
10699 case TARGET_NR_fchown32
:
10700 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
10703 #ifdef TARGET_NR_setresuid32
10704 case TARGET_NR_setresuid32
:
10705 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10708 #ifdef TARGET_NR_getresuid32
10709 case TARGET_NR_getresuid32
:
10711 uid_t ruid
, euid
, suid
;
10712 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10713 if (!is_error(ret
)) {
10714 if (put_user_u32(ruid
, arg1
)
10715 || put_user_u32(euid
, arg2
)
10716 || put_user_u32(suid
, arg3
))
10722 #ifdef TARGET_NR_setresgid32
10723 case TARGET_NR_setresgid32
:
10724 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10727 #ifdef TARGET_NR_getresgid32
10728 case TARGET_NR_getresgid32
:
10730 gid_t rgid
, egid
, sgid
;
10731 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10732 if (!is_error(ret
)) {
10733 if (put_user_u32(rgid
, arg1
)
10734 || put_user_u32(egid
, arg2
)
10735 || put_user_u32(sgid
, arg3
))
10741 #ifdef TARGET_NR_chown32
10742 case TARGET_NR_chown32
:
10743 if (!(p
= lock_user_string(arg1
)))
10745 ret
= get_errno(chown(p
, arg2
, arg3
));
10746 unlock_user(p
, arg1
, 0);
10749 #ifdef TARGET_NR_setuid32
10750 case TARGET_NR_setuid32
:
10751 ret
= get_errno(sys_setuid(arg1
));
10754 #ifdef TARGET_NR_setgid32
10755 case TARGET_NR_setgid32
:
10756 ret
= get_errno(sys_setgid(arg1
));
10759 #ifdef TARGET_NR_setfsuid32
10760 case TARGET_NR_setfsuid32
:
10761 ret
= get_errno(setfsuid(arg1
));
10764 #ifdef TARGET_NR_setfsgid32
10765 case TARGET_NR_setfsgid32
:
10766 ret
= get_errno(setfsgid(arg1
));
10770 case TARGET_NR_pivot_root
:
10771 goto unimplemented
;
10772 #ifdef TARGET_NR_mincore
10773 case TARGET_NR_mincore
:
10776 ret
= -TARGET_EFAULT
;
10777 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
10779 if (!(p
= lock_user_string(arg3
)))
10781 ret
= get_errno(mincore(a
, arg2
, p
));
10782 unlock_user(p
, arg3
, ret
);
10784 unlock_user(a
, arg1
, 0);
10788 #ifdef TARGET_NR_arm_fadvise64_64
10789 case TARGET_NR_arm_fadvise64_64
:
10790 /* arm_fadvise64_64 looks like fadvise64_64 but
10791 * with different argument order: fd, advice, offset, len
10792 * rather than the usual fd, offset, len, advice.
10793 * Note that offset and len are both 64-bit so appear as
10794 * pairs of 32-bit registers.
10796 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10797 target_offset64(arg5
, arg6
), arg2
);
10798 ret
= -host_to_target_errno(ret
);
10802 #if TARGET_ABI_BITS == 32
10804 #ifdef TARGET_NR_fadvise64_64
10805 case TARGET_NR_fadvise64_64
:
10806 /* 6 args: fd, offset (high, low), len (high, low), advice */
10807 if (regpairs_aligned(cpu_env
)) {
10808 /* offset is in (3,4), len in (5,6) and advice in 7 */
10815 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10816 target_offset64(arg2
, arg3
),
10817 target_offset64(arg4
, arg5
),
10822 #ifdef TARGET_NR_fadvise64
10823 case TARGET_NR_fadvise64
:
10824 /* 5 args: fd, offset (high, low), len, advice */
10825 if (regpairs_aligned(cpu_env
)) {
10826 /* offset is in (3,4), len in 5 and advice in 6 */
10832 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10833 target_offset64(arg2
, arg3
),
10838 #else /* not a 32-bit ABI */
10839 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10840 #ifdef TARGET_NR_fadvise64_64
10841 case TARGET_NR_fadvise64_64
:
10843 #ifdef TARGET_NR_fadvise64
10844 case TARGET_NR_fadvise64
:
10846 #ifdef TARGET_S390X
10848 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10849 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10850 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10851 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10855 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10858 #endif /* end of 64-bit ABI fadvise handling */
10860 #ifdef TARGET_NR_madvise
10861 case TARGET_NR_madvise
:
10862 /* A straight passthrough may not be safe because qemu sometimes
10863 turns private file-backed mappings into anonymous mappings.
10864 This will break MADV_DONTNEED.
10865 This is a hint, so ignoring and returning success is ok. */
10866 ret
= get_errno(0);
10869 #if TARGET_ABI_BITS == 32
10870 case TARGET_NR_fcntl64
:
10874 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
10875 to_flock64_fn
*copyto
= copy_to_user_flock64
;
10878 if (((CPUARMState
*)cpu_env
)->eabi
) {
10879 copyfrom
= copy_from_user_eabi_flock64
;
10880 copyto
= copy_to_user_eabi_flock64
;
10884 cmd
= target_to_host_fcntl_cmd(arg2
);
10885 if (cmd
== -TARGET_EINVAL
) {
10891 case TARGET_F_GETLK64
:
10892 ret
= copyfrom(&fl
, arg3
);
10896 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10898 ret
= copyto(arg3
, &fl
);
10902 case TARGET_F_SETLK64
:
10903 case TARGET_F_SETLKW64
:
10904 ret
= copyfrom(&fl
, arg3
);
10908 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10911 ret
= do_fcntl(arg1
, arg2
, arg3
);
10917 #ifdef TARGET_NR_cacheflush
10918 case TARGET_NR_cacheflush
:
10919 /* self-modifying code is handled automatically, so nothing needed */
10923 #ifdef TARGET_NR_security
10924 case TARGET_NR_security
:
10925 goto unimplemented
;
10927 #ifdef TARGET_NR_getpagesize
10928 case TARGET_NR_getpagesize
:
10929 ret
= TARGET_PAGE_SIZE
;
10932 case TARGET_NR_gettid
:
10933 ret
= get_errno(gettid());
10935 #ifdef TARGET_NR_readahead
10936 case TARGET_NR_readahead
:
10937 #if TARGET_ABI_BITS == 32
10938 if (regpairs_aligned(cpu_env
)) {
10943 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
10945 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10950 #ifdef TARGET_NR_setxattr
10951 case TARGET_NR_listxattr
:
10952 case TARGET_NR_llistxattr
:
10956 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10958 ret
= -TARGET_EFAULT
;
10962 p
= lock_user_string(arg1
);
10964 if (num
== TARGET_NR_listxattr
) {
10965 ret
= get_errno(listxattr(p
, b
, arg3
));
10967 ret
= get_errno(llistxattr(p
, b
, arg3
));
10970 ret
= -TARGET_EFAULT
;
10972 unlock_user(p
, arg1
, 0);
10973 unlock_user(b
, arg2
, arg3
);
10976 case TARGET_NR_flistxattr
:
10980 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10982 ret
= -TARGET_EFAULT
;
10986 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10987 unlock_user(b
, arg2
, arg3
);
10990 case TARGET_NR_setxattr
:
10991 case TARGET_NR_lsetxattr
:
10993 void *p
, *n
, *v
= 0;
10995 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10997 ret
= -TARGET_EFAULT
;
11001 p
= lock_user_string(arg1
);
11002 n
= lock_user_string(arg2
);
11004 if (num
== TARGET_NR_setxattr
) {
11005 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11007 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11010 ret
= -TARGET_EFAULT
;
11012 unlock_user(p
, arg1
, 0);
11013 unlock_user(n
, arg2
, 0);
11014 unlock_user(v
, arg3
, 0);
11017 case TARGET_NR_fsetxattr
:
11021 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11023 ret
= -TARGET_EFAULT
;
11027 n
= lock_user_string(arg2
);
11029 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11031 ret
= -TARGET_EFAULT
;
11033 unlock_user(n
, arg2
, 0);
11034 unlock_user(v
, arg3
, 0);
11037 case TARGET_NR_getxattr
:
11038 case TARGET_NR_lgetxattr
:
11040 void *p
, *n
, *v
= 0;
11042 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11044 ret
= -TARGET_EFAULT
;
11048 p
= lock_user_string(arg1
);
11049 n
= lock_user_string(arg2
);
11051 if (num
== TARGET_NR_getxattr
) {
11052 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11054 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11057 ret
= -TARGET_EFAULT
;
11059 unlock_user(p
, arg1
, 0);
11060 unlock_user(n
, arg2
, 0);
11061 unlock_user(v
, arg3
, arg4
);
11064 case TARGET_NR_fgetxattr
:
11068 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11070 ret
= -TARGET_EFAULT
;
11074 n
= lock_user_string(arg2
);
11076 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11078 ret
= -TARGET_EFAULT
;
11080 unlock_user(n
, arg2
, 0);
11081 unlock_user(v
, arg3
, arg4
);
11084 case TARGET_NR_removexattr
:
11085 case TARGET_NR_lremovexattr
:
11088 p
= lock_user_string(arg1
);
11089 n
= lock_user_string(arg2
);
11091 if (num
== TARGET_NR_removexattr
) {
11092 ret
= get_errno(removexattr(p
, n
));
11094 ret
= get_errno(lremovexattr(p
, n
));
11097 ret
= -TARGET_EFAULT
;
11099 unlock_user(p
, arg1
, 0);
11100 unlock_user(n
, arg2
, 0);
11103 case TARGET_NR_fremovexattr
:
11106 n
= lock_user_string(arg2
);
11108 ret
= get_errno(fremovexattr(arg1
, n
));
11110 ret
= -TARGET_EFAULT
;
11112 unlock_user(n
, arg2
, 0);
11116 #endif /* CONFIG_ATTR */
11117 #ifdef TARGET_NR_set_thread_area
11118 case TARGET_NR_set_thread_area
:
11119 #if defined(TARGET_MIPS)
11120 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11123 #elif defined(TARGET_CRIS)
11125 ret
= -TARGET_EINVAL
;
11127 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11131 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11132 ret
= do_set_thread_area(cpu_env
, arg1
);
11134 #elif defined(TARGET_M68K)
11136 TaskState
*ts
= cpu
->opaque
;
11137 ts
->tp_value
= arg1
;
11142 goto unimplemented_nowarn
;
11145 #ifdef TARGET_NR_get_thread_area
11146 case TARGET_NR_get_thread_area
:
11147 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11148 ret
= do_get_thread_area(cpu_env
, arg1
);
11150 #elif defined(TARGET_M68K)
11152 TaskState
*ts
= cpu
->opaque
;
11153 ret
= ts
->tp_value
;
11157 goto unimplemented_nowarn
;
11160 #ifdef TARGET_NR_getdomainname
11161 case TARGET_NR_getdomainname
:
11162 goto unimplemented_nowarn
;
11165 #ifdef TARGET_NR_clock_gettime
11166 case TARGET_NR_clock_gettime
:
11168 struct timespec ts
;
11169 ret
= get_errno(clock_gettime(arg1
, &ts
));
11170 if (!is_error(ret
)) {
11171 host_to_target_timespec(arg2
, &ts
);
11176 #ifdef TARGET_NR_clock_getres
11177 case TARGET_NR_clock_getres
:
11179 struct timespec ts
;
11180 ret
= get_errno(clock_getres(arg1
, &ts
));
11181 if (!is_error(ret
)) {
11182 host_to_target_timespec(arg2
, &ts
);
11187 #ifdef TARGET_NR_clock_nanosleep
11188 case TARGET_NR_clock_nanosleep
:
11190 struct timespec ts
;
11191 target_to_host_timespec(&ts
, arg3
);
11192 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11193 &ts
, arg4
? &ts
: NULL
));
11195 host_to_target_timespec(arg4
, &ts
);
11197 #if defined(TARGET_PPC)
11198 /* clock_nanosleep is odd in that it returns positive errno values.
11199 * On PPC, CR0 bit 3 should be set in such a situation. */
11200 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11201 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11208 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11209 case TARGET_NR_set_tid_address
:
11210 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11214 case TARGET_NR_tkill
:
11215 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11218 case TARGET_NR_tgkill
:
11219 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11220 target_to_host_signal(arg3
)));
11223 #ifdef TARGET_NR_set_robust_list
11224 case TARGET_NR_set_robust_list
:
11225 case TARGET_NR_get_robust_list
:
11226 /* The ABI for supporting robust futexes has userspace pass
11227 * the kernel a pointer to a linked list which is updated by
11228 * userspace after the syscall; the list is walked by the kernel
11229 * when the thread exits. Since the linked list in QEMU guest
11230 * memory isn't a valid linked list for the host and we have
11231 * no way to reliably intercept the thread-death event, we can't
11232 * support these. Silently return ENOSYS so that guest userspace
11233 * falls back to a non-robust futex implementation (which should
11234 * be OK except in the corner case of the guest crashing while
11235 * holding a mutex that is shared with another process via
11238 goto unimplemented_nowarn
;
11241 #if defined(TARGET_NR_utimensat)
11242 case TARGET_NR_utimensat
:
11244 struct timespec
*tsp
, ts
[2];
11248 target_to_host_timespec(ts
, arg3
);
11249 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11253 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11255 if (!(p
= lock_user_string(arg2
))) {
11256 ret
= -TARGET_EFAULT
;
11259 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11260 unlock_user(p
, arg2
, 0);
11265 case TARGET_NR_futex
:
11266 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11268 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11269 case TARGET_NR_inotify_init
:
11270 ret
= get_errno(sys_inotify_init());
11273 #ifdef CONFIG_INOTIFY1
11274 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11275 case TARGET_NR_inotify_init1
:
11276 ret
= get_errno(sys_inotify_init1(arg1
));
11280 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11281 case TARGET_NR_inotify_add_watch
:
11282 p
= lock_user_string(arg2
);
11283 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11284 unlock_user(p
, arg2
, 0);
11287 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11288 case TARGET_NR_inotify_rm_watch
:
11289 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11293 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11294 case TARGET_NR_mq_open
:
11296 struct mq_attr posix_mq_attr
, *attrp
;
11298 p
= lock_user_string(arg1
- 1);
11300 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
11301 attrp
= &posix_mq_attr
;
11305 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
11306 unlock_user (p
, arg1
, 0);
11310 case TARGET_NR_mq_unlink
:
11311 p
= lock_user_string(arg1
- 1);
11313 ret
= -TARGET_EFAULT
;
11316 ret
= get_errno(mq_unlink(p
));
11317 unlock_user (p
, arg1
, 0);
11320 case TARGET_NR_mq_timedsend
:
11322 struct timespec ts
;
11324 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11326 target_to_host_timespec(&ts
, arg5
);
11327 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11328 host_to_target_timespec(arg5
, &ts
);
11330 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11332 unlock_user (p
, arg2
, arg3
);
11336 case TARGET_NR_mq_timedreceive
:
11338 struct timespec ts
;
11341 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11343 target_to_host_timespec(&ts
, arg5
);
11344 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11346 host_to_target_timespec(arg5
, &ts
);
11348 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11351 unlock_user (p
, arg2
, arg3
);
11353 put_user_u32(prio
, arg4
);
11357 /* Not implemented for now... */
11358 /* case TARGET_NR_mq_notify: */
11361 case TARGET_NR_mq_getsetattr
:
11363 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11366 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
11367 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11370 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11371 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
11378 #ifdef CONFIG_SPLICE
11379 #ifdef TARGET_NR_tee
11380 case TARGET_NR_tee
:
11382 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11386 #ifdef TARGET_NR_splice
11387 case TARGET_NR_splice
:
11389 loff_t loff_in
, loff_out
;
11390 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11392 if (get_user_u64(loff_in
, arg2
)) {
11395 ploff_in
= &loff_in
;
11398 if (get_user_u64(loff_out
, arg4
)) {
11401 ploff_out
= &loff_out
;
11403 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11405 if (put_user_u64(loff_in
, arg2
)) {
11410 if (put_user_u64(loff_out
, arg4
)) {
11417 #ifdef TARGET_NR_vmsplice
11418 case TARGET_NR_vmsplice
:
11420 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11422 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11423 unlock_iovec(vec
, arg2
, arg3
, 0);
11425 ret
= -host_to_target_errno(errno
);
11430 #endif /* CONFIG_SPLICE */
11431 #ifdef CONFIG_EVENTFD
11432 #if defined(TARGET_NR_eventfd)
11433 case TARGET_NR_eventfd
:
11434 ret
= get_errno(eventfd(arg1
, 0));
11435 fd_trans_unregister(ret
);
11438 #if defined(TARGET_NR_eventfd2)
11439 case TARGET_NR_eventfd2
:
11441 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11442 if (arg2
& TARGET_O_NONBLOCK
) {
11443 host_flags
|= O_NONBLOCK
;
11445 if (arg2
& TARGET_O_CLOEXEC
) {
11446 host_flags
|= O_CLOEXEC
;
11448 ret
= get_errno(eventfd(arg1
, host_flags
));
11449 fd_trans_unregister(ret
);
11453 #endif /* CONFIG_EVENTFD */
11454 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11455 case TARGET_NR_fallocate
:
11456 #if TARGET_ABI_BITS == 32
11457 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11458 target_offset64(arg5
, arg6
)));
11460 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11464 #if defined(CONFIG_SYNC_FILE_RANGE)
11465 #if defined(TARGET_NR_sync_file_range)
11466 case TARGET_NR_sync_file_range
:
11467 #if TARGET_ABI_BITS == 32
11468 #if defined(TARGET_MIPS)
11469 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11470 target_offset64(arg5
, arg6
), arg7
));
11472 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11473 target_offset64(arg4
, arg5
), arg6
));
11474 #endif /* !TARGET_MIPS */
11476 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11480 #if defined(TARGET_NR_sync_file_range2)
11481 case TARGET_NR_sync_file_range2
:
11482 /* This is like sync_file_range but the arguments are reordered */
11483 #if TARGET_ABI_BITS == 32
11484 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11485 target_offset64(arg5
, arg6
), arg2
));
11487 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11492 #if defined(TARGET_NR_signalfd4)
11493 case TARGET_NR_signalfd4
:
11494 ret
= do_signalfd4(arg1
, arg2
, arg4
);
11497 #if defined(TARGET_NR_signalfd)
11498 case TARGET_NR_signalfd
:
11499 ret
= do_signalfd4(arg1
, arg2
, 0);
11502 #if defined(CONFIG_EPOLL)
11503 #if defined(TARGET_NR_epoll_create)
11504 case TARGET_NR_epoll_create
:
11505 ret
= get_errno(epoll_create(arg1
));
11508 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11509 case TARGET_NR_epoll_create1
:
11510 ret
= get_errno(epoll_create1(arg1
));
11513 #if defined(TARGET_NR_epoll_ctl)
11514 case TARGET_NR_epoll_ctl
:
11516 struct epoll_event ep
;
11517 struct epoll_event
*epp
= 0;
11519 struct target_epoll_event
*target_ep
;
11520 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11523 ep
.events
= tswap32(target_ep
->events
);
11524 /* The epoll_data_t union is just opaque data to the kernel,
11525 * so we transfer all 64 bits across and need not worry what
11526 * actual data type it is.
11528 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11529 unlock_user_struct(target_ep
, arg4
, 0);
11532 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11537 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11538 #if defined(TARGET_NR_epoll_wait)
11539 case TARGET_NR_epoll_wait
:
11541 #if defined(TARGET_NR_epoll_pwait)
11542 case TARGET_NR_epoll_pwait
:
11545 struct target_epoll_event
*target_ep
;
11546 struct epoll_event
*ep
;
11548 int maxevents
= arg3
;
11549 int timeout
= arg4
;
11551 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11552 ret
= -TARGET_EINVAL
;
11556 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11557 maxevents
* sizeof(struct target_epoll_event
), 1);
11562 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
11565 #if defined(TARGET_NR_epoll_pwait)
11566 case TARGET_NR_epoll_pwait
:
11568 target_sigset_t
*target_set
;
11569 sigset_t _set
, *set
= &_set
;
11572 if (arg6
!= sizeof(target_sigset_t
)) {
11573 ret
= -TARGET_EINVAL
;
11577 target_set
= lock_user(VERIFY_READ
, arg5
,
11578 sizeof(target_sigset_t
), 1);
11580 unlock_user(target_ep
, arg2
, 0);
11583 target_to_host_sigset(set
, target_set
);
11584 unlock_user(target_set
, arg5
, 0);
11589 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11590 set
, SIGSET_T_SIZE
));
11594 #if defined(TARGET_NR_epoll_wait)
11595 case TARGET_NR_epoll_wait
:
11596 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11601 ret
= -TARGET_ENOSYS
;
11603 if (!is_error(ret
)) {
11605 for (i
= 0; i
< ret
; i
++) {
11606 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11607 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11610 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
11615 #ifdef TARGET_NR_prlimit64
11616 case TARGET_NR_prlimit64
:
11618 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11619 struct target_rlimit64
*target_rnew
, *target_rold
;
11620 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11621 int resource
= target_to_host_resource(arg2
);
11623 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11626 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11627 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11628 unlock_user_struct(target_rnew
, arg3
, 0);
11632 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11633 if (!is_error(ret
) && arg4
) {
11634 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11637 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11638 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11639 unlock_user_struct(target_rold
, arg4
, 1);
11644 #ifdef TARGET_NR_gethostname
11645 case TARGET_NR_gethostname
:
11647 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11649 ret
= get_errno(gethostname(name
, arg2
));
11650 unlock_user(name
, arg1
, arg2
);
11652 ret
= -TARGET_EFAULT
;
11657 #ifdef TARGET_NR_atomic_cmpxchg_32
11658 case TARGET_NR_atomic_cmpxchg_32
:
11660 /* should use start_exclusive from main.c */
11661 abi_ulong mem_value
;
11662 if (get_user_u32(mem_value
, arg6
)) {
11663 target_siginfo_t info
;
11664 info
.si_signo
= SIGSEGV
;
11666 info
.si_code
= TARGET_SEGV_MAPERR
;
11667 info
._sifields
._sigfault
._addr
= arg6
;
11668 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
11672 if (mem_value
== arg2
)
11673 put_user_u32(arg1
, arg6
);
11678 #ifdef TARGET_NR_atomic_barrier
11679 case TARGET_NR_atomic_barrier
:
11681 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11687 #ifdef TARGET_NR_timer_create
11688 case TARGET_NR_timer_create
:
11690 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11692 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11695 int timer_index
= next_free_host_timer();
11697 if (timer_index
< 0) {
11698 ret
= -TARGET_EAGAIN
;
11700 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11703 phost_sevp
= &host_sevp
;
11704 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11710 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11714 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11723 #ifdef TARGET_NR_timer_settime
11724 case TARGET_NR_timer_settime
:
11726 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11727 * struct itimerspec * old_value */
11728 target_timer_t timerid
= get_timer_id(arg1
);
11732 } else if (arg3
== 0) {
11733 ret
= -TARGET_EINVAL
;
11735 timer_t htimer
= g_posix_timers
[timerid
];
11736 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11738 target_to_host_itimerspec(&hspec_new
, arg3
);
11740 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11741 host_to_target_itimerspec(arg2
, &hspec_old
);
11747 #ifdef TARGET_NR_timer_gettime
11748 case TARGET_NR_timer_gettime
:
11750 /* args: timer_t timerid, struct itimerspec *curr_value */
11751 target_timer_t timerid
= get_timer_id(arg1
);
11755 } else if (!arg2
) {
11756 ret
= -TARGET_EFAULT
;
11758 timer_t htimer
= g_posix_timers
[timerid
];
11759 struct itimerspec hspec
;
11760 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11762 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11763 ret
= -TARGET_EFAULT
;
11770 #ifdef TARGET_NR_timer_getoverrun
11771 case TARGET_NR_timer_getoverrun
:
11773 /* args: timer_t timerid */
11774 target_timer_t timerid
= get_timer_id(arg1
);
11779 timer_t htimer
= g_posix_timers
[timerid
];
11780 ret
= get_errno(timer_getoverrun(htimer
));
11782 fd_trans_unregister(ret
);
11787 #ifdef TARGET_NR_timer_delete
11788 case TARGET_NR_timer_delete
:
11790 /* args: timer_t timerid */
11791 target_timer_t timerid
= get_timer_id(arg1
);
11796 timer_t htimer
= g_posix_timers
[timerid
];
11797 ret
= get_errno(timer_delete(htimer
));
11798 g_posix_timers
[timerid
] = 0;
11804 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11805 case TARGET_NR_timerfd_create
:
11806 ret
= get_errno(timerfd_create(arg1
,
11807 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11811 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11812 case TARGET_NR_timerfd_gettime
:
11814 struct itimerspec its_curr
;
11816 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11818 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11825 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11826 case TARGET_NR_timerfd_settime
:
11828 struct itimerspec its_new
, its_old
, *p_new
;
11831 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11839 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11841 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11848 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11849 case TARGET_NR_ioprio_get
:
11850 ret
= get_errno(ioprio_get(arg1
, arg2
));
11854 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11855 case TARGET_NR_ioprio_set
:
11856 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
11860 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11861 case TARGET_NR_setns
:
11862 ret
= get_errno(setns(arg1
, arg2
));
11865 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11866 case TARGET_NR_unshare
:
11867 ret
= get_errno(unshare(arg1
));
11873 gemu_log("qemu: Unsupported syscall: %d\n", num
);
11874 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11875 unimplemented_nowarn
:
11877 ret
= -TARGET_ENOSYS
;
11882 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
11885 print_syscall_ret(num
, ret
);
11886 trace_guest_user_syscall_ret(cpu
, num
, ret
);
11889 ret
= -TARGET_EFAULT
;