4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include "qemu-common.h"
62 #include <sys/timerfd.h>
68 #include <sys/eventfd.h>
71 #include <sys/epoll.h>
74 #include "qemu/xattr.h"
76 #ifdef CONFIG_SENDFILE
77 #include <sys/sendfile.h>
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
93 #include <linux/mtio.h>
95 #if defined(CONFIG_FIEMAP)
96 #include <linux/fiemap.h>
100 #include <linux/dm-ioctl.h>
101 #include <linux/reboot.h>
102 #include <linux/route.h>
103 #include <linux/filter.h>
104 #include <linux/blkpg.h>
105 #include <netpacket/packet.h>
106 #include <linux/netlink.h>
107 #ifdef CONFIG_RTNETLINK
108 #include <linux/rtnetlink.h>
109 #include <linux/if_bridge.h>
111 #include <linux/audit.h>
112 #include "linux_loop.h"
118 #define CLONE_IO 0x80000000 /* Clone io context */
121 /* We can't directly call the host clone syscall, because this will
122 * badly confuse libc (breaking mutexes, for example). So we must
123 * divide clone flags into:
124 * * flag combinations that look like pthread_create()
125 * * flag combinations that look like fork()
126 * * flags we can implement within QEMU itself
127 * * flags we can't support and will return an error for
129 /* For thread creation, all these flags must be present; for
130 * fork, none must be present.
132 #define CLONE_THREAD_FLAGS \
133 (CLONE_VM | CLONE_FS | CLONE_FILES | \
134 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
136 /* These flags are ignored:
137 * CLONE_DETACHED is now ignored by the kernel;
138 * CLONE_IO is just an optimisation hint to the I/O scheduler
140 #define CLONE_IGNORED_FLAGS \
141 (CLONE_DETACHED | CLONE_IO)
143 /* Flags for fork which we can implement within QEMU itself */
144 #define CLONE_OPTIONAL_FORK_FLAGS \
145 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
146 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
148 /* Flags for thread creation which we can implement within QEMU itself */
149 #define CLONE_OPTIONAL_THREAD_FLAGS \
150 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
151 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
153 #define CLONE_INVALID_FORK_FLAGS \
154 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
156 #define CLONE_INVALID_THREAD_FLAGS \
157 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
158 CLONE_IGNORED_FLAGS))
160 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
161 * have almost all been allocated. We cannot support any of
162 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
163 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
164 * The checks against the invalid thread masks above will catch these.
165 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
169 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
170 * once. This exercises the codepaths for restart.
172 //#define DEBUG_ERESTARTSYS
174 //#include <linux/msdos_fs.h>
175 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
176 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
186 #define _syscall0(type,name) \
187 static type name (void) \
189 return syscall(__NR_##name); \
192 #define _syscall1(type,name,type1,arg1) \
193 static type name (type1 arg1) \
195 return syscall(__NR_##name, arg1); \
198 #define _syscall2(type,name,type1,arg1,type2,arg2) \
199 static type name (type1 arg1,type2 arg2) \
201 return syscall(__NR_##name, arg1, arg2); \
204 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
205 static type name (type1 arg1,type2 arg2,type3 arg3) \
207 return syscall(__NR_##name, arg1, arg2, arg3); \
210 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
211 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
213 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
216 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
218 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
220 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
224 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
225 type5,arg5,type6,arg6) \
226 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
229 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
233 #define __NR_sys_uname __NR_uname
234 #define __NR_sys_getcwd1 __NR_getcwd
235 #define __NR_sys_getdents __NR_getdents
236 #define __NR_sys_getdents64 __NR_getdents64
237 #define __NR_sys_getpriority __NR_getpriority
238 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
239 #define __NR_sys_syslog __NR_syslog
240 #define __NR_sys_futex __NR_futex
241 #define __NR_sys_inotify_init __NR_inotify_init
242 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
243 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
245 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
247 #define __NR__llseek __NR_lseek
250 /* Newer kernel ports have llseek() instead of _llseek() */
251 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
252 #define TARGET_NR__llseek TARGET_NR_llseek
256 _syscall0(int, gettid
)
258 /* This is a replacement for the host gettid() and must return a host
260 static int gettid(void) {
264 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
265 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
267 #if !defined(__NR_getdents) || \
268 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
269 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
271 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
272 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
273 loff_t
*, res
, uint
, wh
);
275 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
276 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
277 #ifdef __NR_exit_group
278 _syscall1(int,exit_group
,int,error_code
)
280 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
281 _syscall1(int,set_tid_address
,int *,tidptr
)
283 #if defined(TARGET_NR_futex) && defined(__NR_futex)
284 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
285 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
287 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
288 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
289 unsigned long *, user_mask_ptr
);
290 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
291 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
292 unsigned long *, user_mask_ptr
);
293 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
295 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
296 struct __user_cap_data_struct
*, data
);
297 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
298 struct __user_cap_data_struct
*, data
);
299 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
300 _syscall2(int, ioprio_get
, int, which
, int, who
)
302 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
303 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
305 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
306 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
309 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
310 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
311 unsigned long, idx1
, unsigned long, idx2
)
314 static bitmask_transtbl fcntl_flags_tbl
[] = {
315 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
316 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
317 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
318 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
319 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
320 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
321 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
322 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
323 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
324 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
325 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
326 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
327 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
328 #if defined(O_DIRECT)
329 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
331 #if defined(O_NOATIME)
332 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
334 #if defined(O_CLOEXEC)
335 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
338 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
340 /* Don't terminate the list prematurely on 64-bit host+guest. */
341 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
342 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
349 QEMU_IFLA_BR_FORWARD_DELAY
,
350 QEMU_IFLA_BR_HELLO_TIME
,
351 QEMU_IFLA_BR_MAX_AGE
,
352 QEMU_IFLA_BR_AGEING_TIME
,
353 QEMU_IFLA_BR_STP_STATE
,
354 QEMU_IFLA_BR_PRIORITY
,
355 QEMU_IFLA_BR_VLAN_FILTERING
,
356 QEMU_IFLA_BR_VLAN_PROTOCOL
,
357 QEMU_IFLA_BR_GROUP_FWD_MASK
,
358 QEMU_IFLA_BR_ROOT_ID
,
359 QEMU_IFLA_BR_BRIDGE_ID
,
360 QEMU_IFLA_BR_ROOT_PORT
,
361 QEMU_IFLA_BR_ROOT_PATH_COST
,
362 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
363 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
364 QEMU_IFLA_BR_HELLO_TIMER
,
365 QEMU_IFLA_BR_TCN_TIMER
,
366 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
367 QEMU_IFLA_BR_GC_TIMER
,
368 QEMU_IFLA_BR_GROUP_ADDR
,
369 QEMU_IFLA_BR_FDB_FLUSH
,
370 QEMU_IFLA_BR_MCAST_ROUTER
,
371 QEMU_IFLA_BR_MCAST_SNOOPING
,
372 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
373 QEMU_IFLA_BR_MCAST_QUERIER
,
374 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
375 QEMU_IFLA_BR_MCAST_HASH_MAX
,
376 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
377 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
378 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
379 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
380 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
381 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
382 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
383 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
384 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
385 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
386 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
387 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
389 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
390 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
414 QEMU_IFLA_NET_NS_PID
,
417 QEMU_IFLA_VFINFO_LIST
,
425 QEMU_IFLA_PROMISCUITY
,
426 QEMU_IFLA_NUM_TX_QUEUES
,
427 QEMU_IFLA_NUM_RX_QUEUES
,
429 QEMU_IFLA_PHYS_PORT_ID
,
430 QEMU_IFLA_CARRIER_CHANGES
,
431 QEMU_IFLA_PHYS_SWITCH_ID
,
432 QEMU_IFLA_LINK_NETNSID
,
433 QEMU_IFLA_PHYS_PORT_NAME
,
434 QEMU_IFLA_PROTO_DOWN
,
435 QEMU_IFLA_GSO_MAX_SEGS
,
436 QEMU_IFLA_GSO_MAX_SIZE
,
443 QEMU_IFLA_BRPORT_UNSPEC
,
444 QEMU_IFLA_BRPORT_STATE
,
445 QEMU_IFLA_BRPORT_PRIORITY
,
446 QEMU_IFLA_BRPORT_COST
,
447 QEMU_IFLA_BRPORT_MODE
,
448 QEMU_IFLA_BRPORT_GUARD
,
449 QEMU_IFLA_BRPORT_PROTECT
,
450 QEMU_IFLA_BRPORT_FAST_LEAVE
,
451 QEMU_IFLA_BRPORT_LEARNING
,
452 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
453 QEMU_IFLA_BRPORT_PROXYARP
,
454 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
455 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
456 QEMU_IFLA_BRPORT_ROOT_ID
,
457 QEMU_IFLA_BRPORT_BRIDGE_ID
,
458 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
459 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
462 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
463 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
464 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
465 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
466 QEMU_IFLA_BRPORT_HOLD_TIMER
,
467 QEMU_IFLA_BRPORT_FLUSH
,
468 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
469 QEMU_IFLA_BRPORT_PAD
,
470 QEMU___IFLA_BRPORT_MAX
474 QEMU_IFLA_INFO_UNSPEC
,
477 QEMU_IFLA_INFO_XSTATS
,
478 QEMU_IFLA_INFO_SLAVE_KIND
,
479 QEMU_IFLA_INFO_SLAVE_DATA
,
480 QEMU___IFLA_INFO_MAX
,
484 QEMU_IFLA_INET_UNSPEC
,
486 QEMU___IFLA_INET_MAX
,
490 QEMU_IFLA_INET6_UNSPEC
,
491 QEMU_IFLA_INET6_FLAGS
,
492 QEMU_IFLA_INET6_CONF
,
493 QEMU_IFLA_INET6_STATS
,
494 QEMU_IFLA_INET6_MCAST
,
495 QEMU_IFLA_INET6_CACHEINFO
,
496 QEMU_IFLA_INET6_ICMP6STATS
,
497 QEMU_IFLA_INET6_TOKEN
,
498 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
499 QEMU___IFLA_INET6_MAX
502 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
503 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
504 typedef struct TargetFdTrans
{
505 TargetFdDataFunc host_to_target_data
;
506 TargetFdDataFunc target_to_host_data
;
507 TargetFdAddrFunc target_to_host_addr
;
510 static TargetFdTrans
**target_fd_trans
;
512 static unsigned int target_fd_max
;
514 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
516 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
517 return target_fd_trans
[fd
]->target_to_host_data
;
522 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
524 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
525 return target_fd_trans
[fd
]->host_to_target_data
;
530 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
532 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
533 return target_fd_trans
[fd
]->target_to_host_addr
;
538 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
542 if (fd
>= target_fd_max
) {
543 oldmax
= target_fd_max
;
544 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
545 target_fd_trans
= g_renew(TargetFdTrans
*,
546 target_fd_trans
, target_fd_max
);
547 memset((void *)(target_fd_trans
+ oldmax
), 0,
548 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
550 target_fd_trans
[fd
] = trans
;
553 static void fd_trans_unregister(int fd
)
555 if (fd
>= 0 && fd
< target_fd_max
) {
556 target_fd_trans
[fd
] = NULL
;
560 static void fd_trans_dup(int oldfd
, int newfd
)
562 fd_trans_unregister(newfd
);
563 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
564 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
568 static int sys_getcwd1(char *buf
, size_t size
)
570 if (getcwd(buf
, size
) == NULL
) {
571 /* getcwd() sets errno */
574 return strlen(buf
)+1;
577 #ifdef TARGET_NR_utimensat
578 #if defined(__NR_utimensat)
579 #define __NR_sys_utimensat __NR_utimensat
580 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
581 const struct timespec
*,tsp
,int,flags
)
583 static int sys_utimensat(int dirfd
, const char *pathname
,
584 const struct timespec times
[2], int flags
)
590 #endif /* TARGET_NR_utimensat */
592 #ifdef CONFIG_INOTIFY
593 #include <sys/inotify.h>
595 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
596 static int sys_inotify_init(void)
598 return (inotify_init());
601 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
602 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
604 return (inotify_add_watch(fd
, pathname
, mask
));
607 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
608 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
610 return (inotify_rm_watch(fd
, wd
));
613 #ifdef CONFIG_INOTIFY1
614 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
615 static int sys_inotify_init1(int flags
)
617 return (inotify_init1(flags
));
622 /* Userspace can usually survive runtime without inotify */
623 #undef TARGET_NR_inotify_init
624 #undef TARGET_NR_inotify_init1
625 #undef TARGET_NR_inotify_add_watch
626 #undef TARGET_NR_inotify_rm_watch
627 #endif /* CONFIG_INOTIFY */
629 #if defined(TARGET_NR_prlimit64)
630 #ifndef __NR_prlimit64
631 # define __NR_prlimit64 -1
633 #define __NR_sys_prlimit64 __NR_prlimit64
634 /* The glibc rlimit structure may not be that used by the underlying syscall */
635 struct host_rlimit64
{
639 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
640 const struct host_rlimit64
*, new_limit
,
641 struct host_rlimit64
*, old_limit
)
645 #if defined(TARGET_NR_timer_create)
646 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
647 static timer_t g_posix_timers
[32] = { 0, } ;
649 static inline int next_free_host_timer(void)
652 /* FIXME: Does finding the next free slot require a lock? */
653 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
654 if (g_posix_timers
[k
] == 0) {
655 g_posix_timers
[k
] = (timer_t
) 1;
663 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
665 static inline int regpairs_aligned(void *cpu_env
) {
666 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
668 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
669 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
670 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
671 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
672 * of registers which translates to the same as ARM/MIPS, because we start with
674 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
676 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
679 #define ERRNO_TABLE_SIZE 1200
681 /* target_to_host_errno_table[] is initialized from
682 * host_to_target_errno_table[] in syscall_init(). */
683 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
687 * This list is the union of errno values overridden in asm-<arch>/errno.h
688 * minus the errnos that are not actually generic to all archs.
690 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
691 [EAGAIN
] = TARGET_EAGAIN
,
692 [EIDRM
] = TARGET_EIDRM
,
693 [ECHRNG
] = TARGET_ECHRNG
,
694 [EL2NSYNC
] = TARGET_EL2NSYNC
,
695 [EL3HLT
] = TARGET_EL3HLT
,
696 [EL3RST
] = TARGET_EL3RST
,
697 [ELNRNG
] = TARGET_ELNRNG
,
698 [EUNATCH
] = TARGET_EUNATCH
,
699 [ENOCSI
] = TARGET_ENOCSI
,
700 [EL2HLT
] = TARGET_EL2HLT
,
701 [EDEADLK
] = TARGET_EDEADLK
,
702 [ENOLCK
] = TARGET_ENOLCK
,
703 [EBADE
] = TARGET_EBADE
,
704 [EBADR
] = TARGET_EBADR
,
705 [EXFULL
] = TARGET_EXFULL
,
706 [ENOANO
] = TARGET_ENOANO
,
707 [EBADRQC
] = TARGET_EBADRQC
,
708 [EBADSLT
] = TARGET_EBADSLT
,
709 [EBFONT
] = TARGET_EBFONT
,
710 [ENOSTR
] = TARGET_ENOSTR
,
711 [ENODATA
] = TARGET_ENODATA
,
712 [ETIME
] = TARGET_ETIME
,
713 [ENOSR
] = TARGET_ENOSR
,
714 [ENONET
] = TARGET_ENONET
,
715 [ENOPKG
] = TARGET_ENOPKG
,
716 [EREMOTE
] = TARGET_EREMOTE
,
717 [ENOLINK
] = TARGET_ENOLINK
,
718 [EADV
] = TARGET_EADV
,
719 [ESRMNT
] = TARGET_ESRMNT
,
720 [ECOMM
] = TARGET_ECOMM
,
721 [EPROTO
] = TARGET_EPROTO
,
722 [EDOTDOT
] = TARGET_EDOTDOT
,
723 [EMULTIHOP
] = TARGET_EMULTIHOP
,
724 [EBADMSG
] = TARGET_EBADMSG
,
725 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
726 [EOVERFLOW
] = TARGET_EOVERFLOW
,
727 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
728 [EBADFD
] = TARGET_EBADFD
,
729 [EREMCHG
] = TARGET_EREMCHG
,
730 [ELIBACC
] = TARGET_ELIBACC
,
731 [ELIBBAD
] = TARGET_ELIBBAD
,
732 [ELIBSCN
] = TARGET_ELIBSCN
,
733 [ELIBMAX
] = TARGET_ELIBMAX
,
734 [ELIBEXEC
] = TARGET_ELIBEXEC
,
735 [EILSEQ
] = TARGET_EILSEQ
,
736 [ENOSYS
] = TARGET_ENOSYS
,
737 [ELOOP
] = TARGET_ELOOP
,
738 [ERESTART
] = TARGET_ERESTART
,
739 [ESTRPIPE
] = TARGET_ESTRPIPE
,
740 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
741 [EUSERS
] = TARGET_EUSERS
,
742 [ENOTSOCK
] = TARGET_ENOTSOCK
,
743 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
744 [EMSGSIZE
] = TARGET_EMSGSIZE
,
745 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
746 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
747 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
748 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
749 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
750 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
751 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
752 [EADDRINUSE
] = TARGET_EADDRINUSE
,
753 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
754 [ENETDOWN
] = TARGET_ENETDOWN
,
755 [ENETUNREACH
] = TARGET_ENETUNREACH
,
756 [ENETRESET
] = TARGET_ENETRESET
,
757 [ECONNABORTED
] = TARGET_ECONNABORTED
,
758 [ECONNRESET
] = TARGET_ECONNRESET
,
759 [ENOBUFS
] = TARGET_ENOBUFS
,
760 [EISCONN
] = TARGET_EISCONN
,
761 [ENOTCONN
] = TARGET_ENOTCONN
,
762 [EUCLEAN
] = TARGET_EUCLEAN
,
763 [ENOTNAM
] = TARGET_ENOTNAM
,
764 [ENAVAIL
] = TARGET_ENAVAIL
,
765 [EISNAM
] = TARGET_EISNAM
,
766 [EREMOTEIO
] = TARGET_EREMOTEIO
,
767 [EDQUOT
] = TARGET_EDQUOT
,
768 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
769 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
770 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
771 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
772 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
773 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
774 [EALREADY
] = TARGET_EALREADY
,
775 [EINPROGRESS
] = TARGET_EINPROGRESS
,
776 [ESTALE
] = TARGET_ESTALE
,
777 [ECANCELED
] = TARGET_ECANCELED
,
778 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
779 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
781 [ENOKEY
] = TARGET_ENOKEY
,
784 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
787 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
790 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
793 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
795 #ifdef ENOTRECOVERABLE
796 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
799 [ENOMSG
] = TARGET_ENOMSG
,
803 static inline int host_to_target_errno(int err
)
805 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
806 host_to_target_errno_table
[err
]) {
807 return host_to_target_errno_table
[err
];
812 static inline int target_to_host_errno(int err
)
814 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
815 target_to_host_errno_table
[err
]) {
816 return target_to_host_errno_table
[err
];
821 static inline abi_long
get_errno(abi_long ret
)
824 return -host_to_target_errno(errno
);
829 static inline int is_error(abi_long ret
)
831 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
834 const char *target_strerror(int err
)
836 if (err
== TARGET_ERESTARTSYS
) {
837 return "To be restarted";
839 if (err
== TARGET_QEMU_ESIGRETURN
) {
840 return "Successful exit from sigreturn";
843 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
846 return strerror(target_to_host_errno(err
));
849 #define safe_syscall0(type, name) \
850 static type safe_##name(void) \
852 return safe_syscall(__NR_##name); \
855 #define safe_syscall1(type, name, type1, arg1) \
856 static type safe_##name(type1 arg1) \
858 return safe_syscall(__NR_##name, arg1); \
861 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
862 static type safe_##name(type1 arg1, type2 arg2) \
864 return safe_syscall(__NR_##name, arg1, arg2); \
867 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
868 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
870 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
873 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
875 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
877 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
880 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
881 type4, arg4, type5, arg5) \
882 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
885 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
888 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
889 type4, arg4, type5, arg5, type6, arg6) \
890 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
891 type5 arg5, type6 arg6) \
893 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
896 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
897 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
898 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
899 int, flags
, mode_t
, mode
)
900 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
901 struct rusage
*, rusage
)
902 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
903 int, options
, struct rusage
*, rusage
)
904 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
905 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
906 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
907 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
908 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
910 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
911 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
913 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
914 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
915 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
916 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
917 safe_syscall2(int, tkill
, int, tid
, int, sig
)
918 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
919 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
920 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
921 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
922 unsigned long, pos_l
, unsigned long, pos_h
)
923 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
924 unsigned long, pos_l
, unsigned long, pos_h
)
925 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
927 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
928 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
929 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
930 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
931 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
932 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
933 safe_syscall2(int, flock
, int, fd
, int, operation
)
934 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
935 const struct timespec
*, uts
, size_t, sigsetsize
)
936 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
938 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
939 struct timespec
*, rem
)
940 #ifdef TARGET_NR_clock_nanosleep
941 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
942 const struct timespec
*, req
, struct timespec
*, rem
)
945 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
947 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
948 long, msgtype
, int, flags
)
949 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
950 unsigned, nsops
, const struct timespec
*, timeout
)
952 /* This host kernel architecture uses a single ipc syscall; fake up
953 * wrappers for the sub-operations to hide this implementation detail.
954 * Annoyingly we can't include linux/ipc.h to get the constant definitions
955 * for the call parameter because some structs in there conflict with the
956 * sys/ipc.h ones. So we just define them here, and rely on them being
957 * the same for all host architectures.
959 #define Q_SEMTIMEDOP 4
962 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
964 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
965 void *, ptr
, long, fifth
)
966 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
968 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
970 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
972 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
974 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
975 const struct timespec
*timeout
)
977 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
981 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
982 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
983 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
984 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
985 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
987 /* We do ioctl like this rather than via safe_syscall3 to preserve the
988 * "third argument might be integer or pointer or not present" behaviour of
991 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
992 /* Similarly for fcntl. Note that callers must always:
993 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
994 * use the flock64 struct rather than unsuffixed flock
995 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
998 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1000 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1003 static inline int host_to_target_sock_type(int host_type
)
1007 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
1009 target_type
= TARGET_SOCK_DGRAM
;
1012 target_type
= TARGET_SOCK_STREAM
;
1015 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1019 #if defined(SOCK_CLOEXEC)
1020 if (host_type
& SOCK_CLOEXEC
) {
1021 target_type
|= TARGET_SOCK_CLOEXEC
;
1025 #if defined(SOCK_NONBLOCK)
1026 if (host_type
& SOCK_NONBLOCK
) {
1027 target_type
|= TARGET_SOCK_NONBLOCK
;
1034 static abi_ulong target_brk
;
1035 static abi_ulong target_original_brk
;
1036 static abi_ulong brk_page
;
1038 void target_set_brk(abi_ulong new_brk
)
1040 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1041 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1044 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1045 #define DEBUGF_BRK(message, args...)
1047 /* do_brk() must return target values and target errnos. */
1048 abi_long
do_brk(abi_ulong new_brk
)
1050 abi_long mapped_addr
;
1051 abi_ulong new_alloc_size
;
1053 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1056 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1059 if (new_brk
< target_original_brk
) {
1060 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1065 /* If the new brk is less than the highest page reserved to the
1066 * target heap allocation, set it and we're almost done... */
1067 if (new_brk
<= brk_page
) {
1068 /* Heap contents are initialized to zero, as for anonymous
1070 if (new_brk
> target_brk
) {
1071 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1073 target_brk
= new_brk
;
1074 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1078 /* We need to allocate more memory after the brk... Note that
1079 * we don't use MAP_FIXED because that will map over the top of
1080 * any existing mapping (like the one with the host libc or qemu
1081 * itself); instead we treat "mapped but at wrong address" as
1082 * a failure and unmap again.
1084 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1085 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1086 PROT_READ
|PROT_WRITE
,
1087 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1089 if (mapped_addr
== brk_page
) {
1090 /* Heap contents are initialized to zero, as for anonymous
1091 * mapped pages. Technically the new pages are already
1092 * initialized to zero since they *are* anonymous mapped
1093 * pages, however we have to take care with the contents that
1094 * come from the remaining part of the previous page: it may
1095 * contains garbage data due to a previous heap usage (grown
1096 * then shrunken). */
1097 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1099 target_brk
= new_brk
;
1100 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1101 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1104 } else if (mapped_addr
!= -1) {
1105 /* Mapped but at wrong address, meaning there wasn't actually
1106 * enough space for this brk.
1108 target_munmap(mapped_addr
, new_alloc_size
);
1110 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1113 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1116 #if defined(TARGET_ALPHA)
1117 /* We (partially) emulate OSF/1 on Alpha, which requires we
1118 return a proper errno, not an unchanged brk value. */
1119 return -TARGET_ENOMEM
;
1121 /* For everything else, return the previous break. */
1125 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1126 abi_ulong target_fds_addr
,
1130 abi_ulong b
, *target_fds
;
1132 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1133 if (!(target_fds
= lock_user(VERIFY_READ
,
1135 sizeof(abi_ulong
) * nw
,
1137 return -TARGET_EFAULT
;
1141 for (i
= 0; i
< nw
; i
++) {
1142 /* grab the abi_ulong */
1143 __get_user(b
, &target_fds
[i
]);
1144 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1145 /* check the bit inside the abi_ulong */
1152 unlock_user(target_fds
, target_fds_addr
, 0);
1157 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1158 abi_ulong target_fds_addr
,
1161 if (target_fds_addr
) {
1162 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1163 return -TARGET_EFAULT
;
1171 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1177 abi_ulong
*target_fds
;
1179 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1180 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1182 sizeof(abi_ulong
) * nw
,
1184 return -TARGET_EFAULT
;
1187 for (i
= 0; i
< nw
; i
++) {
1189 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1190 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1193 __put_user(v
, &target_fds
[i
]);
1196 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1201 #if defined(__alpha__)
1202 #define HOST_HZ 1024
1207 static inline abi_long
host_to_target_clock_t(long ticks
)
1209 #if HOST_HZ == TARGET_HZ
1212 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1216 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1217 const struct rusage
*rusage
)
1219 struct target_rusage
*target_rusage
;
1221 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1222 return -TARGET_EFAULT
;
1223 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1224 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1225 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1226 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1227 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1228 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1229 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1230 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1231 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1232 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1233 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1234 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1235 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1236 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1237 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1238 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1239 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1240 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1241 unlock_user_struct(target_rusage
, target_addr
, 1);
1246 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1248 abi_ulong target_rlim_swap
;
1251 target_rlim_swap
= tswapal(target_rlim
);
1252 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1253 return RLIM_INFINITY
;
1255 result
= target_rlim_swap
;
1256 if (target_rlim_swap
!= (rlim_t
)result
)
1257 return RLIM_INFINITY
;
1262 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1264 abi_ulong target_rlim_swap
;
1267 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1268 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1270 target_rlim_swap
= rlim
;
1271 result
= tswapal(target_rlim_swap
);
1276 static inline int target_to_host_resource(int code
)
1279 case TARGET_RLIMIT_AS
:
1281 case TARGET_RLIMIT_CORE
:
1283 case TARGET_RLIMIT_CPU
:
1285 case TARGET_RLIMIT_DATA
:
1287 case TARGET_RLIMIT_FSIZE
:
1288 return RLIMIT_FSIZE
;
1289 case TARGET_RLIMIT_LOCKS
:
1290 return RLIMIT_LOCKS
;
1291 case TARGET_RLIMIT_MEMLOCK
:
1292 return RLIMIT_MEMLOCK
;
1293 case TARGET_RLIMIT_MSGQUEUE
:
1294 return RLIMIT_MSGQUEUE
;
1295 case TARGET_RLIMIT_NICE
:
1297 case TARGET_RLIMIT_NOFILE
:
1298 return RLIMIT_NOFILE
;
1299 case TARGET_RLIMIT_NPROC
:
1300 return RLIMIT_NPROC
;
1301 case TARGET_RLIMIT_RSS
:
1303 case TARGET_RLIMIT_RTPRIO
:
1304 return RLIMIT_RTPRIO
;
1305 case TARGET_RLIMIT_SIGPENDING
:
1306 return RLIMIT_SIGPENDING
;
1307 case TARGET_RLIMIT_STACK
:
1308 return RLIMIT_STACK
;
1314 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1315 abi_ulong target_tv_addr
)
1317 struct target_timeval
*target_tv
;
1319 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1320 return -TARGET_EFAULT
;
1322 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1323 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1325 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1330 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1331 const struct timeval
*tv
)
1333 struct target_timeval
*target_tv
;
1335 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1336 return -TARGET_EFAULT
;
1338 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1339 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1341 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1346 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1347 abi_ulong target_tz_addr
)
1349 struct target_timezone
*target_tz
;
1351 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1352 return -TARGET_EFAULT
;
1355 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1356 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1358 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1363 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1366 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1367 abi_ulong target_mq_attr_addr
)
1369 struct target_mq_attr
*target_mq_attr
;
1371 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1372 target_mq_attr_addr
, 1))
1373 return -TARGET_EFAULT
;
1375 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1376 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1377 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1378 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1380 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1385 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1386 const struct mq_attr
*attr
)
1388 struct target_mq_attr
*target_mq_attr
;
1390 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1391 target_mq_attr_addr
, 0))
1392 return -TARGET_EFAULT
;
1394 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1395 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1396 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1397 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1399 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1405 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1406 /* do_select() must return target values and target errnos. */
1407 static abi_long
do_select(int n
,
1408 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1409 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1411 fd_set rfds
, wfds
, efds
;
1412 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1414 struct timespec ts
, *ts_ptr
;
1417 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1421 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1425 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1430 if (target_tv_addr
) {
1431 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1432 return -TARGET_EFAULT
;
1433 ts
.tv_sec
= tv
.tv_sec
;
1434 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1440 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1443 if (!is_error(ret
)) {
1444 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1445 return -TARGET_EFAULT
;
1446 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1447 return -TARGET_EFAULT
;
1448 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1449 return -TARGET_EFAULT
;
1451 if (target_tv_addr
) {
1452 tv
.tv_sec
= ts
.tv_sec
;
1453 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1454 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1455 return -TARGET_EFAULT
;
1463 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1464 static abi_long
do_old_select(abi_ulong arg1
)
1466 struct target_sel_arg_struct
*sel
;
1467 abi_ulong inp
, outp
, exp
, tvp
;
1470 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1471 return -TARGET_EFAULT
;
1474 nsel
= tswapal(sel
->n
);
1475 inp
= tswapal(sel
->inp
);
1476 outp
= tswapal(sel
->outp
);
1477 exp
= tswapal(sel
->exp
);
1478 tvp
= tswapal(sel
->tvp
);
1480 unlock_user_struct(sel
, arg1
, 0);
1482 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1487 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1490 return pipe2(host_pipe
, flags
);
1496 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1497 int flags
, int is_pipe2
)
1501 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1504 return get_errno(ret
);
1506 /* Several targets have special calling conventions for the original
1507 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1509 #if defined(TARGET_ALPHA)
1510 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1511 return host_pipe
[0];
1512 #elif defined(TARGET_MIPS)
1513 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1514 return host_pipe
[0];
1515 #elif defined(TARGET_SH4)
1516 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1517 return host_pipe
[0];
1518 #elif defined(TARGET_SPARC)
1519 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1520 return host_pipe
[0];
1524 if (put_user_s32(host_pipe
[0], pipedes
)
1525 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1526 return -TARGET_EFAULT
;
1527 return get_errno(ret
);
1530 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1531 abi_ulong target_addr
,
1534 struct target_ip_mreqn
*target_smreqn
;
1536 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1538 return -TARGET_EFAULT
;
1539 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1540 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1541 if (len
== sizeof(struct target_ip_mreqn
))
1542 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1543 unlock_user(target_smreqn
, target_addr
, 0);
1548 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1549 abi_ulong target_addr
,
1552 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1553 sa_family_t sa_family
;
1554 struct target_sockaddr
*target_saddr
;
1556 if (fd_trans_target_to_host_addr(fd
)) {
1557 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1560 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1562 return -TARGET_EFAULT
;
1564 sa_family
= tswap16(target_saddr
->sa_family
);
1566 /* Oops. The caller might send a incomplete sun_path; sun_path
1567 * must be terminated by \0 (see the manual page), but
1568 * unfortunately it is quite common to specify sockaddr_un
1569 * length as "strlen(x->sun_path)" while it should be
1570 * "strlen(...) + 1". We'll fix that here if needed.
1571 * Linux kernel has a similar feature.
1574 if (sa_family
== AF_UNIX
) {
1575 if (len
< unix_maxlen
&& len
> 0) {
1576 char *cp
= (char*)target_saddr
;
1578 if ( cp
[len
-1] && !cp
[len
] )
1581 if (len
> unix_maxlen
)
1585 memcpy(addr
, target_saddr
, len
);
1586 addr
->sa_family
= sa_family
;
1587 if (sa_family
== AF_NETLINK
) {
1588 struct sockaddr_nl
*nladdr
;
1590 nladdr
= (struct sockaddr_nl
*)addr
;
1591 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1592 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1593 } else if (sa_family
== AF_PACKET
) {
1594 struct target_sockaddr_ll
*lladdr
;
1596 lladdr
= (struct target_sockaddr_ll
*)addr
;
1597 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1598 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1600 unlock_user(target_saddr
, target_addr
, 0);
1605 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1606 struct sockaddr
*addr
,
1609 struct target_sockaddr
*target_saddr
;
1615 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1617 return -TARGET_EFAULT
;
1618 memcpy(target_saddr
, addr
, len
);
1619 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1620 sizeof(target_saddr
->sa_family
)) {
1621 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1623 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1624 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1625 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1626 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1627 } else if (addr
->sa_family
== AF_PACKET
) {
1628 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1629 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1630 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1632 unlock_user(target_saddr
, target_addr
, len
);
1637 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1638 struct target_msghdr
*target_msgh
)
1640 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1641 abi_long msg_controllen
;
1642 abi_ulong target_cmsg_addr
;
1643 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1644 socklen_t space
= 0;
1646 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1647 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1649 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1650 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1651 target_cmsg_start
= target_cmsg
;
1653 return -TARGET_EFAULT
;
1655 while (cmsg
&& target_cmsg
) {
1656 void *data
= CMSG_DATA(cmsg
);
1657 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1659 int len
= tswapal(target_cmsg
->cmsg_len
)
1660 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1662 space
+= CMSG_SPACE(len
);
1663 if (space
> msgh
->msg_controllen
) {
1664 space
-= CMSG_SPACE(len
);
1665 /* This is a QEMU bug, since we allocated the payload
1666 * area ourselves (unlike overflow in host-to-target
1667 * conversion, which is just the guest giving us a buffer
1668 * that's too small). It can't happen for the payload types
1669 * we currently support; if it becomes an issue in future
1670 * we would need to improve our allocation strategy to
1671 * something more intelligent than "twice the size of the
1672 * target buffer we're reading from".
1674 gemu_log("Host cmsg overflow\n");
1678 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1679 cmsg
->cmsg_level
= SOL_SOCKET
;
1681 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1683 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1684 cmsg
->cmsg_len
= CMSG_LEN(len
);
1686 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1687 int *fd
= (int *)data
;
1688 int *target_fd
= (int *)target_data
;
1689 int i
, numfds
= len
/ sizeof(int);
1691 for (i
= 0; i
< numfds
; i
++) {
1692 __get_user(fd
[i
], target_fd
+ i
);
1694 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1695 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1696 struct ucred
*cred
= (struct ucred
*)data
;
1697 struct target_ucred
*target_cred
=
1698 (struct target_ucred
*)target_data
;
1700 __get_user(cred
->pid
, &target_cred
->pid
);
1701 __get_user(cred
->uid
, &target_cred
->uid
);
1702 __get_user(cred
->gid
, &target_cred
->gid
);
1704 gemu_log("Unsupported ancillary data: %d/%d\n",
1705 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1706 memcpy(data
, target_data
, len
);
1709 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1710 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1713 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1715 msgh
->msg_controllen
= space
;
1719 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1720 struct msghdr
*msgh
)
1722 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1723 abi_long msg_controllen
;
1724 abi_ulong target_cmsg_addr
;
1725 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1726 socklen_t space
= 0;
1728 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1729 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1731 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1732 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1733 target_cmsg_start
= target_cmsg
;
1735 return -TARGET_EFAULT
;
1737 while (cmsg
&& target_cmsg
) {
1738 void *data
= CMSG_DATA(cmsg
);
1739 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1741 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1742 int tgt_len
, tgt_space
;
1744 /* We never copy a half-header but may copy half-data;
1745 * this is Linux's behaviour in put_cmsg(). Note that
1746 * truncation here is a guest problem (which we report
1747 * to the guest via the CTRUNC bit), unlike truncation
1748 * in target_to_host_cmsg, which is a QEMU bug.
1750 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1751 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1755 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1756 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1758 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1760 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1762 tgt_len
= TARGET_CMSG_LEN(len
);
1764 /* Payload types which need a different size of payload on
1765 * the target must adjust tgt_len here.
1767 switch (cmsg
->cmsg_level
) {
1769 switch (cmsg
->cmsg_type
) {
1771 tgt_len
= sizeof(struct target_timeval
);
1780 if (msg_controllen
< tgt_len
) {
1781 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1782 tgt_len
= msg_controllen
;
1785 /* We must now copy-and-convert len bytes of payload
1786 * into tgt_len bytes of destination space. Bear in mind
1787 * that in both source and destination we may be dealing
1788 * with a truncated value!
1790 switch (cmsg
->cmsg_level
) {
1792 switch (cmsg
->cmsg_type
) {
1795 int *fd
= (int *)data
;
1796 int *target_fd
= (int *)target_data
;
1797 int i
, numfds
= tgt_len
/ sizeof(int);
1799 for (i
= 0; i
< numfds
; i
++) {
1800 __put_user(fd
[i
], target_fd
+ i
);
1806 struct timeval
*tv
= (struct timeval
*)data
;
1807 struct target_timeval
*target_tv
=
1808 (struct target_timeval
*)target_data
;
1810 if (len
!= sizeof(struct timeval
) ||
1811 tgt_len
!= sizeof(struct target_timeval
)) {
1815 /* copy struct timeval to target */
1816 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1817 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1820 case SCM_CREDENTIALS
:
1822 struct ucred
*cred
= (struct ucred
*)data
;
1823 struct target_ucred
*target_cred
=
1824 (struct target_ucred
*)target_data
;
1826 __put_user(cred
->pid
, &target_cred
->pid
);
1827 __put_user(cred
->uid
, &target_cred
->uid
);
1828 __put_user(cred
->gid
, &target_cred
->gid
);
1838 gemu_log("Unsupported ancillary data: %d/%d\n",
1839 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1840 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1841 if (tgt_len
> len
) {
1842 memset(target_data
+ len
, 0, tgt_len
- len
);
1846 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1847 tgt_space
= TARGET_CMSG_SPACE(len
);
1848 if (msg_controllen
< tgt_space
) {
1849 tgt_space
= msg_controllen
;
1851 msg_controllen
-= tgt_space
;
1853 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1854 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1857 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1859 target_msgh
->msg_controllen
= tswapal(space
);
1863 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1865 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1866 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1867 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1868 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1869 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1872 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1874 abi_long (*host_to_target_nlmsg
)
1875 (struct nlmsghdr
*))
1880 while (len
> sizeof(struct nlmsghdr
)) {
1882 nlmsg_len
= nlh
->nlmsg_len
;
1883 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1888 switch (nlh
->nlmsg_type
) {
1890 tswap_nlmsghdr(nlh
);
1896 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1897 e
->error
= tswap32(e
->error
);
1898 tswap_nlmsghdr(&e
->msg
);
1899 tswap_nlmsghdr(nlh
);
1903 ret
= host_to_target_nlmsg(nlh
);
1905 tswap_nlmsghdr(nlh
);
1910 tswap_nlmsghdr(nlh
);
1911 len
-= NLMSG_ALIGN(nlmsg_len
);
1912 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1917 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1919 abi_long (*target_to_host_nlmsg
)
1920 (struct nlmsghdr
*))
1924 while (len
> sizeof(struct nlmsghdr
)) {
1925 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1926 tswap32(nlh
->nlmsg_len
) > len
) {
1929 tswap_nlmsghdr(nlh
);
1930 switch (nlh
->nlmsg_type
) {
1937 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1938 e
->error
= tswap32(e
->error
);
1939 tswap_nlmsghdr(&e
->msg
);
1943 ret
= target_to_host_nlmsg(nlh
);
1948 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1949 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1954 #ifdef CONFIG_RTNETLINK
1955 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
1956 size_t len
, void *context
,
1957 abi_long (*host_to_target_nlattr
)
1961 unsigned short nla_len
;
1964 while (len
> sizeof(struct nlattr
)) {
1965 nla_len
= nlattr
->nla_len
;
1966 if (nla_len
< sizeof(struct nlattr
) ||
1970 ret
= host_to_target_nlattr(nlattr
, context
);
1971 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
1972 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
1976 len
-= NLA_ALIGN(nla_len
);
1977 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
1982 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1984 abi_long (*host_to_target_rtattr
)
1987 unsigned short rta_len
;
1990 while (len
> sizeof(struct rtattr
)) {
1991 rta_len
= rtattr
->rta_len
;
1992 if (rta_len
< sizeof(struct rtattr
) ||
1996 ret
= host_to_target_rtattr(rtattr
);
1997 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1998 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2002 len
-= RTA_ALIGN(rta_len
);
2003 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
2008 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2010 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2017 switch (nlattr
->nla_type
) {
2019 case QEMU_IFLA_BR_FDB_FLUSH
:
2022 case QEMU_IFLA_BR_GROUP_ADDR
:
2025 case QEMU_IFLA_BR_VLAN_FILTERING
:
2026 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2027 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2028 case QEMU_IFLA_BR_MCAST_ROUTER
:
2029 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2030 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2031 case QEMU_IFLA_BR_MCAST_QUERIER
:
2032 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2033 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2034 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2037 case QEMU_IFLA_BR_PRIORITY
:
2038 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2039 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2040 case QEMU_IFLA_BR_ROOT_PORT
:
2041 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2042 u16
= NLA_DATA(nlattr
);
2043 *u16
= tswap16(*u16
);
2046 case QEMU_IFLA_BR_FORWARD_DELAY
:
2047 case QEMU_IFLA_BR_HELLO_TIME
:
2048 case QEMU_IFLA_BR_MAX_AGE
:
2049 case QEMU_IFLA_BR_AGEING_TIME
:
2050 case QEMU_IFLA_BR_STP_STATE
:
2051 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2052 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2053 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2054 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2055 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2056 u32
= NLA_DATA(nlattr
);
2057 *u32
= tswap32(*u32
);
2060 case QEMU_IFLA_BR_HELLO_TIMER
:
2061 case QEMU_IFLA_BR_TCN_TIMER
:
2062 case QEMU_IFLA_BR_GC_TIMER
:
2063 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2064 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2065 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2066 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2067 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2068 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2069 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2070 u64
= NLA_DATA(nlattr
);
2071 *u64
= tswap64(*u64
);
2073 /* ifla_bridge_id: uin8_t[] */
2074 case QEMU_IFLA_BR_ROOT_ID
:
2075 case QEMU_IFLA_BR_BRIDGE_ID
:
2078 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2084 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2091 switch (nlattr
->nla_type
) {
2093 case QEMU_IFLA_BRPORT_STATE
:
2094 case QEMU_IFLA_BRPORT_MODE
:
2095 case QEMU_IFLA_BRPORT_GUARD
:
2096 case QEMU_IFLA_BRPORT_PROTECT
:
2097 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2098 case QEMU_IFLA_BRPORT_LEARNING
:
2099 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2100 case QEMU_IFLA_BRPORT_PROXYARP
:
2101 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2102 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2103 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2104 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2105 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2108 case QEMU_IFLA_BRPORT_PRIORITY
:
2109 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2110 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2111 case QEMU_IFLA_BRPORT_ID
:
2112 case QEMU_IFLA_BRPORT_NO
:
2113 u16
= NLA_DATA(nlattr
);
2114 *u16
= tswap16(*u16
);
2117 case QEMU_IFLA_BRPORT_COST
:
2118 u32
= NLA_DATA(nlattr
);
2119 *u32
= tswap32(*u32
);
2122 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2123 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2124 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2125 u64
= NLA_DATA(nlattr
);
2126 *u64
= tswap64(*u64
);
2128 /* ifla_bridge_id: uint8_t[] */
2129 case QEMU_IFLA_BRPORT_ROOT_ID
:
2130 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2133 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2139 struct linkinfo_context
{
2146 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2149 struct linkinfo_context
*li_context
= context
;
2151 switch (nlattr
->nla_type
) {
2153 case QEMU_IFLA_INFO_KIND
:
2154 li_context
->name
= NLA_DATA(nlattr
);
2155 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2157 case QEMU_IFLA_INFO_SLAVE_KIND
:
2158 li_context
->slave_name
= NLA_DATA(nlattr
);
2159 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2162 case QEMU_IFLA_INFO_XSTATS
:
2163 /* FIXME: only used by CAN */
2166 case QEMU_IFLA_INFO_DATA
:
2167 if (strncmp(li_context
->name
, "bridge",
2168 li_context
->len
) == 0) {
2169 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2172 host_to_target_data_bridge_nlattr
);
2174 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2177 case QEMU_IFLA_INFO_SLAVE_DATA
:
2178 if (strncmp(li_context
->slave_name
, "bridge",
2179 li_context
->slave_len
) == 0) {
2180 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2183 host_to_target_slave_data_bridge_nlattr
);
2185 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2186 li_context
->slave_name
);
2190 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2197 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2203 switch (nlattr
->nla_type
) {
2204 case QEMU_IFLA_INET_CONF
:
2205 u32
= NLA_DATA(nlattr
);
2206 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2208 u32
[i
] = tswap32(u32
[i
]);
2212 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2217 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2222 struct ifla_cacheinfo
*ci
;
2225 switch (nlattr
->nla_type
) {
2227 case QEMU_IFLA_INET6_TOKEN
:
2230 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2233 case QEMU_IFLA_INET6_FLAGS
:
2234 u32
= NLA_DATA(nlattr
);
2235 *u32
= tswap32(*u32
);
2238 case QEMU_IFLA_INET6_CONF
:
2239 u32
= NLA_DATA(nlattr
);
2240 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2242 u32
[i
] = tswap32(u32
[i
]);
2245 /* ifla_cacheinfo */
2246 case QEMU_IFLA_INET6_CACHEINFO
:
2247 ci
= NLA_DATA(nlattr
);
2248 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2249 ci
->tstamp
= tswap32(ci
->tstamp
);
2250 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2251 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2254 case QEMU_IFLA_INET6_STATS
:
2255 case QEMU_IFLA_INET6_ICMP6STATS
:
2256 u64
= NLA_DATA(nlattr
);
2257 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2259 u64
[i
] = tswap64(u64
[i
]);
2263 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2268 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2271 switch (nlattr
->nla_type
) {
2273 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2275 host_to_target_data_inet_nlattr
);
2277 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2279 host_to_target_data_inet6_nlattr
);
2281 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2287 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2290 struct rtnl_link_stats
*st
;
2291 struct rtnl_link_stats64
*st64
;
2292 struct rtnl_link_ifmap
*map
;
2293 struct linkinfo_context li_context
;
2295 switch (rtattr
->rta_type
) {
2297 case QEMU_IFLA_ADDRESS
:
2298 case QEMU_IFLA_BROADCAST
:
2300 case QEMU_IFLA_IFNAME
:
2301 case QEMU_IFLA_QDISC
:
2304 case QEMU_IFLA_OPERSTATE
:
2305 case QEMU_IFLA_LINKMODE
:
2306 case QEMU_IFLA_CARRIER
:
2307 case QEMU_IFLA_PROTO_DOWN
:
2311 case QEMU_IFLA_LINK
:
2312 case QEMU_IFLA_WEIGHT
:
2313 case QEMU_IFLA_TXQLEN
:
2314 case QEMU_IFLA_CARRIER_CHANGES
:
2315 case QEMU_IFLA_NUM_RX_QUEUES
:
2316 case QEMU_IFLA_NUM_TX_QUEUES
:
2317 case QEMU_IFLA_PROMISCUITY
:
2318 case QEMU_IFLA_EXT_MASK
:
2319 case QEMU_IFLA_LINK_NETNSID
:
2320 case QEMU_IFLA_GROUP
:
2321 case QEMU_IFLA_MASTER
:
2322 case QEMU_IFLA_NUM_VF
:
2323 u32
= RTA_DATA(rtattr
);
2324 *u32
= tswap32(*u32
);
2326 /* struct rtnl_link_stats */
2327 case QEMU_IFLA_STATS
:
2328 st
= RTA_DATA(rtattr
);
2329 st
->rx_packets
= tswap32(st
->rx_packets
);
2330 st
->tx_packets
= tswap32(st
->tx_packets
);
2331 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2332 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2333 st
->rx_errors
= tswap32(st
->rx_errors
);
2334 st
->tx_errors
= tswap32(st
->tx_errors
);
2335 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2336 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2337 st
->multicast
= tswap32(st
->multicast
);
2338 st
->collisions
= tswap32(st
->collisions
);
2340 /* detailed rx_errors: */
2341 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2342 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2343 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2344 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2345 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2346 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2348 /* detailed tx_errors */
2349 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2350 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2351 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2352 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2353 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2356 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2357 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2359 /* struct rtnl_link_stats64 */
2360 case QEMU_IFLA_STATS64
:
2361 st64
= RTA_DATA(rtattr
);
2362 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2363 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2364 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2365 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2366 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2367 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2368 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2369 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2370 st64
->multicast
= tswap64(st64
->multicast
);
2371 st64
->collisions
= tswap64(st64
->collisions
);
2373 /* detailed rx_errors: */
2374 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2375 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2376 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2377 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2378 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2379 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2381 /* detailed tx_errors */
2382 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2383 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2384 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2385 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2386 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2389 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2390 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2392 /* struct rtnl_link_ifmap */
2394 map
= RTA_DATA(rtattr
);
2395 map
->mem_start
= tswap64(map
->mem_start
);
2396 map
->mem_end
= tswap64(map
->mem_end
);
2397 map
->base_addr
= tswap64(map
->base_addr
);
2398 map
->irq
= tswap16(map
->irq
);
2401 case QEMU_IFLA_LINKINFO
:
2402 memset(&li_context
, 0, sizeof(li_context
));
2403 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2405 host_to_target_data_linkinfo_nlattr
);
2406 case QEMU_IFLA_AF_SPEC
:
2407 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2409 host_to_target_data_spec_nlattr
);
2411 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2417 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2420 struct ifa_cacheinfo
*ci
;
2422 switch (rtattr
->rta_type
) {
2423 /* binary: depends on family type */
2433 u32
= RTA_DATA(rtattr
);
2434 *u32
= tswap32(*u32
);
2436 /* struct ifa_cacheinfo */
2438 ci
= RTA_DATA(rtattr
);
2439 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2440 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2441 ci
->cstamp
= tswap32(ci
->cstamp
);
2442 ci
->tstamp
= tswap32(ci
->tstamp
);
2445 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2451 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2454 switch (rtattr
->rta_type
) {
2455 /* binary: depends on family type */
2464 u32
= RTA_DATA(rtattr
);
2465 *u32
= tswap32(*u32
);
2468 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2474 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2475 uint32_t rtattr_len
)
2477 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2478 host_to_target_data_link_rtattr
);
2481 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2482 uint32_t rtattr_len
)
2484 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2485 host_to_target_data_addr_rtattr
);
2488 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2489 uint32_t rtattr_len
)
2491 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2492 host_to_target_data_route_rtattr
);
2495 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2498 struct ifinfomsg
*ifi
;
2499 struct ifaddrmsg
*ifa
;
2502 nlmsg_len
= nlh
->nlmsg_len
;
2503 switch (nlh
->nlmsg_type
) {
2507 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2508 ifi
= NLMSG_DATA(nlh
);
2509 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2510 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2511 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2512 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2513 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2514 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2520 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2521 ifa
= NLMSG_DATA(nlh
);
2522 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2523 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2524 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2530 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2531 rtm
= NLMSG_DATA(nlh
);
2532 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2533 host_to_target_route_rtattr(RTM_RTA(rtm
),
2534 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2538 return -TARGET_EINVAL
;
2543 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2546 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2549 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2551 abi_long (*target_to_host_rtattr
)
2556 while (len
>= sizeof(struct rtattr
)) {
2557 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2558 tswap16(rtattr
->rta_len
) > len
) {
2561 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2562 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2563 ret
= target_to_host_rtattr(rtattr
);
2567 len
-= RTA_ALIGN(rtattr
->rta_len
);
2568 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2569 RTA_ALIGN(rtattr
->rta_len
));
2574 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2576 switch (rtattr
->rta_type
) {
2578 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2584 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2586 switch (rtattr
->rta_type
) {
2587 /* binary: depends on family type */
2592 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2598 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2601 switch (rtattr
->rta_type
) {
2602 /* binary: depends on family type */
2610 u32
= RTA_DATA(rtattr
);
2611 *u32
= tswap32(*u32
);
2614 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2620 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2621 uint32_t rtattr_len
)
2623 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2624 target_to_host_data_link_rtattr
);
2627 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2628 uint32_t rtattr_len
)
2630 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2631 target_to_host_data_addr_rtattr
);
2634 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2635 uint32_t rtattr_len
)
2637 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2638 target_to_host_data_route_rtattr
);
2641 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2643 struct ifinfomsg
*ifi
;
2644 struct ifaddrmsg
*ifa
;
2647 switch (nlh
->nlmsg_type
) {
2652 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2653 ifi
= NLMSG_DATA(nlh
);
2654 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2655 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2656 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2657 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2658 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2659 NLMSG_LENGTH(sizeof(*ifi
)));
2665 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2666 ifa
= NLMSG_DATA(nlh
);
2667 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2668 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2669 NLMSG_LENGTH(sizeof(*ifa
)));
2676 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2677 rtm
= NLMSG_DATA(nlh
);
2678 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2679 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2680 NLMSG_LENGTH(sizeof(*rtm
)));
2684 return -TARGET_EOPNOTSUPP
;
2689 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2691 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2693 #endif /* CONFIG_RTNETLINK */
2695 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2697 switch (nlh
->nlmsg_type
) {
2699 gemu_log("Unknown host audit message type %d\n",
2701 return -TARGET_EINVAL
;
2706 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2709 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2712 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2714 switch (nlh
->nlmsg_type
) {
2716 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2717 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2720 gemu_log("Unknown target audit message type %d\n",
2722 return -TARGET_EINVAL
;
2728 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2730 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2733 /* do_setsockopt() Must return target values and target errnos. */
2734 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2735 abi_ulong optval_addr
, socklen_t optlen
)
2739 struct ip_mreqn
*ip_mreq
;
2740 struct ip_mreq_source
*ip_mreq_source
;
2744 /* TCP options all take an 'int' value. */
2745 if (optlen
< sizeof(uint32_t))
2746 return -TARGET_EINVAL
;
2748 if (get_user_u32(val
, optval_addr
))
2749 return -TARGET_EFAULT
;
2750 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2757 case IP_ROUTER_ALERT
:
2761 case IP_MTU_DISCOVER
:
2767 case IP_MULTICAST_TTL
:
2768 case IP_MULTICAST_LOOP
:
2770 if (optlen
>= sizeof(uint32_t)) {
2771 if (get_user_u32(val
, optval_addr
))
2772 return -TARGET_EFAULT
;
2773 } else if (optlen
>= 1) {
2774 if (get_user_u8(val
, optval_addr
))
2775 return -TARGET_EFAULT
;
2777 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2779 case IP_ADD_MEMBERSHIP
:
2780 case IP_DROP_MEMBERSHIP
:
2781 if (optlen
< sizeof (struct target_ip_mreq
) ||
2782 optlen
> sizeof (struct target_ip_mreqn
))
2783 return -TARGET_EINVAL
;
2785 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2786 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2787 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2790 case IP_BLOCK_SOURCE
:
2791 case IP_UNBLOCK_SOURCE
:
2792 case IP_ADD_SOURCE_MEMBERSHIP
:
2793 case IP_DROP_SOURCE_MEMBERSHIP
:
2794 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2795 return -TARGET_EINVAL
;
2797 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2798 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2799 unlock_user (ip_mreq_source
, optval_addr
, 0);
2808 case IPV6_MTU_DISCOVER
:
2811 case IPV6_RECVPKTINFO
:
2813 if (optlen
< sizeof(uint32_t)) {
2814 return -TARGET_EINVAL
;
2816 if (get_user_u32(val
, optval_addr
)) {
2817 return -TARGET_EFAULT
;
2819 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2820 &val
, sizeof(val
)));
2829 /* struct icmp_filter takes an u32 value */
2830 if (optlen
< sizeof(uint32_t)) {
2831 return -TARGET_EINVAL
;
2834 if (get_user_u32(val
, optval_addr
)) {
2835 return -TARGET_EFAULT
;
2837 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2838 &val
, sizeof(val
)));
2845 case TARGET_SOL_SOCKET
:
2847 case TARGET_SO_RCVTIMEO
:
2851 optname
= SO_RCVTIMEO
;
2854 if (optlen
!= sizeof(struct target_timeval
)) {
2855 return -TARGET_EINVAL
;
2858 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2859 return -TARGET_EFAULT
;
2862 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2866 case TARGET_SO_SNDTIMEO
:
2867 optname
= SO_SNDTIMEO
;
2869 case TARGET_SO_ATTACH_FILTER
:
2871 struct target_sock_fprog
*tfprog
;
2872 struct target_sock_filter
*tfilter
;
2873 struct sock_fprog fprog
;
2874 struct sock_filter
*filter
;
2877 if (optlen
!= sizeof(*tfprog
)) {
2878 return -TARGET_EINVAL
;
2880 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2881 return -TARGET_EFAULT
;
2883 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2884 tswapal(tfprog
->filter
), 0)) {
2885 unlock_user_struct(tfprog
, optval_addr
, 1);
2886 return -TARGET_EFAULT
;
2889 fprog
.len
= tswap16(tfprog
->len
);
2890 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2891 if (filter
== NULL
) {
2892 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2893 unlock_user_struct(tfprog
, optval_addr
, 1);
2894 return -TARGET_ENOMEM
;
2896 for (i
= 0; i
< fprog
.len
; i
++) {
2897 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2898 filter
[i
].jt
= tfilter
[i
].jt
;
2899 filter
[i
].jf
= tfilter
[i
].jf
;
2900 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2902 fprog
.filter
= filter
;
2904 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2905 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2908 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2909 unlock_user_struct(tfprog
, optval_addr
, 1);
2912 case TARGET_SO_BINDTODEVICE
:
2914 char *dev_ifname
, *addr_ifname
;
2916 if (optlen
> IFNAMSIZ
- 1) {
2917 optlen
= IFNAMSIZ
- 1;
2919 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2921 return -TARGET_EFAULT
;
2923 optname
= SO_BINDTODEVICE
;
2924 addr_ifname
= alloca(IFNAMSIZ
);
2925 memcpy(addr_ifname
, dev_ifname
, optlen
);
2926 addr_ifname
[optlen
] = 0;
2927 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2928 addr_ifname
, optlen
));
2929 unlock_user (dev_ifname
, optval_addr
, 0);
2932 /* Options with 'int' argument. */
2933 case TARGET_SO_DEBUG
:
2936 case TARGET_SO_REUSEADDR
:
2937 optname
= SO_REUSEADDR
;
2939 case TARGET_SO_TYPE
:
2942 case TARGET_SO_ERROR
:
2945 case TARGET_SO_DONTROUTE
:
2946 optname
= SO_DONTROUTE
;
2948 case TARGET_SO_BROADCAST
:
2949 optname
= SO_BROADCAST
;
2951 case TARGET_SO_SNDBUF
:
2952 optname
= SO_SNDBUF
;
2954 case TARGET_SO_SNDBUFFORCE
:
2955 optname
= SO_SNDBUFFORCE
;
2957 case TARGET_SO_RCVBUF
:
2958 optname
= SO_RCVBUF
;
2960 case TARGET_SO_RCVBUFFORCE
:
2961 optname
= SO_RCVBUFFORCE
;
2963 case TARGET_SO_KEEPALIVE
:
2964 optname
= SO_KEEPALIVE
;
2966 case TARGET_SO_OOBINLINE
:
2967 optname
= SO_OOBINLINE
;
2969 case TARGET_SO_NO_CHECK
:
2970 optname
= SO_NO_CHECK
;
2972 case TARGET_SO_PRIORITY
:
2973 optname
= SO_PRIORITY
;
2976 case TARGET_SO_BSDCOMPAT
:
2977 optname
= SO_BSDCOMPAT
;
2980 case TARGET_SO_PASSCRED
:
2981 optname
= SO_PASSCRED
;
2983 case TARGET_SO_PASSSEC
:
2984 optname
= SO_PASSSEC
;
2986 case TARGET_SO_TIMESTAMP
:
2987 optname
= SO_TIMESTAMP
;
2989 case TARGET_SO_RCVLOWAT
:
2990 optname
= SO_RCVLOWAT
;
2996 if (optlen
< sizeof(uint32_t))
2997 return -TARGET_EINVAL
;
2999 if (get_user_u32(val
, optval_addr
))
3000 return -TARGET_EFAULT
;
3001 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
3005 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
3006 ret
= -TARGET_ENOPROTOOPT
;
3011 /* do_getsockopt() Must return target values and target errnos. */
3012 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
3013 abi_ulong optval_addr
, abi_ulong optlen
)
3020 case TARGET_SOL_SOCKET
:
3023 /* These don't just return a single integer */
3024 case TARGET_SO_LINGER
:
3025 case TARGET_SO_RCVTIMEO
:
3026 case TARGET_SO_SNDTIMEO
:
3027 case TARGET_SO_PEERNAME
:
3029 case TARGET_SO_PEERCRED
: {
3032 struct target_ucred
*tcr
;
3034 if (get_user_u32(len
, optlen
)) {
3035 return -TARGET_EFAULT
;
3038 return -TARGET_EINVAL
;
3042 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3050 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3051 return -TARGET_EFAULT
;
3053 __put_user(cr
.pid
, &tcr
->pid
);
3054 __put_user(cr
.uid
, &tcr
->uid
);
3055 __put_user(cr
.gid
, &tcr
->gid
);
3056 unlock_user_struct(tcr
, optval_addr
, 1);
3057 if (put_user_u32(len
, optlen
)) {
3058 return -TARGET_EFAULT
;
3062 /* Options with 'int' argument. */
3063 case TARGET_SO_DEBUG
:
3066 case TARGET_SO_REUSEADDR
:
3067 optname
= SO_REUSEADDR
;
3069 case TARGET_SO_TYPE
:
3072 case TARGET_SO_ERROR
:
3075 case TARGET_SO_DONTROUTE
:
3076 optname
= SO_DONTROUTE
;
3078 case TARGET_SO_BROADCAST
:
3079 optname
= SO_BROADCAST
;
3081 case TARGET_SO_SNDBUF
:
3082 optname
= SO_SNDBUF
;
3084 case TARGET_SO_RCVBUF
:
3085 optname
= SO_RCVBUF
;
3087 case TARGET_SO_KEEPALIVE
:
3088 optname
= SO_KEEPALIVE
;
3090 case TARGET_SO_OOBINLINE
:
3091 optname
= SO_OOBINLINE
;
3093 case TARGET_SO_NO_CHECK
:
3094 optname
= SO_NO_CHECK
;
3096 case TARGET_SO_PRIORITY
:
3097 optname
= SO_PRIORITY
;
3100 case TARGET_SO_BSDCOMPAT
:
3101 optname
= SO_BSDCOMPAT
;
3104 case TARGET_SO_PASSCRED
:
3105 optname
= SO_PASSCRED
;
3107 case TARGET_SO_TIMESTAMP
:
3108 optname
= SO_TIMESTAMP
;
3110 case TARGET_SO_RCVLOWAT
:
3111 optname
= SO_RCVLOWAT
;
3113 case TARGET_SO_ACCEPTCONN
:
3114 optname
= SO_ACCEPTCONN
;
3121 /* TCP options all take an 'int' value. */
3123 if (get_user_u32(len
, optlen
))
3124 return -TARGET_EFAULT
;
3126 return -TARGET_EINVAL
;
3128 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3131 if (optname
== SO_TYPE
) {
3132 val
= host_to_target_sock_type(val
);
3137 if (put_user_u32(val
, optval_addr
))
3138 return -TARGET_EFAULT
;
3140 if (put_user_u8(val
, optval_addr
))
3141 return -TARGET_EFAULT
;
3143 if (put_user_u32(len
, optlen
))
3144 return -TARGET_EFAULT
;
3151 case IP_ROUTER_ALERT
:
3155 case IP_MTU_DISCOVER
:
3161 case IP_MULTICAST_TTL
:
3162 case IP_MULTICAST_LOOP
:
3163 if (get_user_u32(len
, optlen
))
3164 return -TARGET_EFAULT
;
3166 return -TARGET_EINVAL
;
3168 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3171 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3173 if (put_user_u32(len
, optlen
)
3174 || put_user_u8(val
, optval_addr
))
3175 return -TARGET_EFAULT
;
3177 if (len
> sizeof(int))
3179 if (put_user_u32(len
, optlen
)
3180 || put_user_u32(val
, optval_addr
))
3181 return -TARGET_EFAULT
;
3185 ret
= -TARGET_ENOPROTOOPT
;
3191 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3193 ret
= -TARGET_EOPNOTSUPP
;
3199 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3200 abi_ulong count
, int copy
)
3202 struct target_iovec
*target_vec
;
3204 abi_ulong total_len
, max_len
;
3207 bool bad_address
= false;
3213 if (count
> IOV_MAX
) {
3218 vec
= g_try_new0(struct iovec
, count
);
3224 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3225 count
* sizeof(struct target_iovec
), 1);
3226 if (target_vec
== NULL
) {
3231 /* ??? If host page size > target page size, this will result in a
3232 value larger than what we can actually support. */
3233 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3236 for (i
= 0; i
< count
; i
++) {
3237 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3238 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3243 } else if (len
== 0) {
3244 /* Zero length pointer is ignored. */
3245 vec
[i
].iov_base
= 0;
3247 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3248 /* If the first buffer pointer is bad, this is a fault. But
3249 * subsequent bad buffers will result in a partial write; this
3250 * is realized by filling the vector with null pointers and
3252 if (!vec
[i
].iov_base
) {
3263 if (len
> max_len
- total_len
) {
3264 len
= max_len
- total_len
;
3267 vec
[i
].iov_len
= len
;
3271 unlock_user(target_vec
, target_addr
, 0);
3276 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3277 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3280 unlock_user(target_vec
, target_addr
, 0);
3287 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3288 abi_ulong count
, int copy
)
3290 struct target_iovec
*target_vec
;
3293 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3294 count
* sizeof(struct target_iovec
), 1);
3296 for (i
= 0; i
< count
; i
++) {
3297 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3298 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3302 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3304 unlock_user(target_vec
, target_addr
, 0);
3310 static inline int target_to_host_sock_type(int *type
)
3313 int target_type
= *type
;
3315 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3316 case TARGET_SOCK_DGRAM
:
3317 host_type
= SOCK_DGRAM
;
3319 case TARGET_SOCK_STREAM
:
3320 host_type
= SOCK_STREAM
;
3323 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3326 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3327 #if defined(SOCK_CLOEXEC)
3328 host_type
|= SOCK_CLOEXEC
;
3330 return -TARGET_EINVAL
;
3333 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3334 #if defined(SOCK_NONBLOCK)
3335 host_type
|= SOCK_NONBLOCK
;
3336 #elif !defined(O_NONBLOCK)
3337 return -TARGET_EINVAL
;
3344 /* Try to emulate socket type flags after socket creation. */
3345 static int sock_flags_fixup(int fd
, int target_type
)
3347 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3348 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3349 int flags
= fcntl(fd
, F_GETFL
);
3350 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3352 return -TARGET_EINVAL
;
3359 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3360 abi_ulong target_addr
,
3363 struct sockaddr
*addr
= host_addr
;
3364 struct target_sockaddr
*target_saddr
;
3366 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3367 if (!target_saddr
) {
3368 return -TARGET_EFAULT
;
3371 memcpy(addr
, target_saddr
, len
);
3372 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3373 /* spkt_protocol is big-endian */
3375 unlock_user(target_saddr
, target_addr
, 0);
3379 static TargetFdTrans target_packet_trans
= {
3380 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3383 #ifdef CONFIG_RTNETLINK
3384 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3388 ret
= target_to_host_nlmsg_route(buf
, len
);
3396 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3400 ret
= host_to_target_nlmsg_route(buf
, len
);
3408 static TargetFdTrans target_netlink_route_trans
= {
3409 .target_to_host_data
= netlink_route_target_to_host
,
3410 .host_to_target_data
= netlink_route_host_to_target
,
3412 #endif /* CONFIG_RTNETLINK */
3414 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3418 ret
= target_to_host_nlmsg_audit(buf
, len
);
3426 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3430 ret
= host_to_target_nlmsg_audit(buf
, len
);
3438 static TargetFdTrans target_netlink_audit_trans
= {
3439 .target_to_host_data
= netlink_audit_target_to_host
,
3440 .host_to_target_data
= netlink_audit_host_to_target
,
3443 /* do_socket() Must return target values and target errnos. */
3444 static abi_long
do_socket(int domain
, int type
, int protocol
)
3446 int target_type
= type
;
3449 ret
= target_to_host_sock_type(&type
);
3454 if (domain
== PF_NETLINK
&& !(
3455 #ifdef CONFIG_RTNETLINK
3456 protocol
== NETLINK_ROUTE
||
3458 protocol
== NETLINK_KOBJECT_UEVENT
||
3459 protocol
== NETLINK_AUDIT
)) {
3460 return -EPFNOSUPPORT
;
3463 if (domain
== AF_PACKET
||
3464 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3465 protocol
= tswap16(protocol
);
3468 ret
= get_errno(socket(domain
, type
, protocol
));
3470 ret
= sock_flags_fixup(ret
, target_type
);
3471 if (type
== SOCK_PACKET
) {
3472 /* Manage an obsolete case :
3473 * if socket type is SOCK_PACKET, bind by name
3475 fd_trans_register(ret
, &target_packet_trans
);
3476 } else if (domain
== PF_NETLINK
) {
3478 #ifdef CONFIG_RTNETLINK
3480 fd_trans_register(ret
, &target_netlink_route_trans
);
3483 case NETLINK_KOBJECT_UEVENT
:
3484 /* nothing to do: messages are strings */
3487 fd_trans_register(ret
, &target_netlink_audit_trans
);
3490 g_assert_not_reached();
3497 /* do_bind() Must return target values and target errnos. */
3498 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3504 if ((int)addrlen
< 0) {
3505 return -TARGET_EINVAL
;
3508 addr
= alloca(addrlen
+1);
3510 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3514 return get_errno(bind(sockfd
, addr
, addrlen
));
3517 /* do_connect() Must return target values and target errnos. */
3518 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3524 if ((int)addrlen
< 0) {
3525 return -TARGET_EINVAL
;
3528 addr
= alloca(addrlen
+1);
3530 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3534 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3537 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3538 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3539 int flags
, int send
)
3545 abi_ulong target_vec
;
3547 if (msgp
->msg_name
) {
3548 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3549 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3550 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3551 tswapal(msgp
->msg_name
),
3553 if (ret
== -TARGET_EFAULT
) {
3554 /* For connected sockets msg_name and msg_namelen must
3555 * be ignored, so returning EFAULT immediately is wrong.
3556 * Instead, pass a bad msg_name to the host kernel, and
3557 * let it decide whether to return EFAULT or not.
3559 msg
.msg_name
= (void *)-1;
3564 msg
.msg_name
= NULL
;
3565 msg
.msg_namelen
= 0;
3567 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3568 msg
.msg_control
= alloca(msg
.msg_controllen
);
3569 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3571 count
= tswapal(msgp
->msg_iovlen
);
3572 target_vec
= tswapal(msgp
->msg_iov
);
3574 if (count
> IOV_MAX
) {
3575 /* sendrcvmsg returns a different errno for this condition than
3576 * readv/writev, so we must catch it here before lock_iovec() does.
3578 ret
= -TARGET_EMSGSIZE
;
3582 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3583 target_vec
, count
, send
);
3585 ret
= -host_to_target_errno(errno
);
3588 msg
.msg_iovlen
= count
;
3592 if (fd_trans_target_to_host_data(fd
)) {
3595 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3596 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3597 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3598 msg
.msg_iov
->iov_len
);
3600 msg
.msg_iov
->iov_base
= host_msg
;
3601 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3605 ret
= target_to_host_cmsg(&msg
, msgp
);
3607 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3611 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3612 if (!is_error(ret
)) {
3614 if (fd_trans_host_to_target_data(fd
)) {
3615 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3618 ret
= host_to_target_cmsg(msgp
, &msg
);
3620 if (!is_error(ret
)) {
3621 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3622 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3623 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3624 msg
.msg_name
, msg
.msg_namelen
);
3636 unlock_iovec(vec
, target_vec
, count
, !send
);
3641 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3642 int flags
, int send
)
3645 struct target_msghdr
*msgp
;
3647 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3651 return -TARGET_EFAULT
;
3653 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3654 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3658 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3659 * so it might not have this *mmsg-specific flag either.
3661 #ifndef MSG_WAITFORONE
3662 #define MSG_WAITFORONE 0x10000
3665 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3666 unsigned int vlen
, unsigned int flags
,
3669 struct target_mmsghdr
*mmsgp
;
3673 if (vlen
> UIO_MAXIOV
) {
3677 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3679 return -TARGET_EFAULT
;
3682 for (i
= 0; i
< vlen
; i
++) {
3683 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3684 if (is_error(ret
)) {
3687 mmsgp
[i
].msg_len
= tswap32(ret
);
3688 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3689 if (flags
& MSG_WAITFORONE
) {
3690 flags
|= MSG_DONTWAIT
;
3694 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3696 /* Return number of datagrams sent if we sent any at all;
3697 * otherwise return the error.
3705 /* do_accept4() Must return target values and target errnos. */
3706 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3707 abi_ulong target_addrlen_addr
, int flags
)
3714 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3716 if (target_addr
== 0) {
3717 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3720 /* linux returns EINVAL if addrlen pointer is invalid */
3721 if (get_user_u32(addrlen
, target_addrlen_addr
))
3722 return -TARGET_EINVAL
;
3724 if ((int)addrlen
< 0) {
3725 return -TARGET_EINVAL
;
3728 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3729 return -TARGET_EINVAL
;
3731 addr
= alloca(addrlen
);
3733 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3734 if (!is_error(ret
)) {
3735 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3736 if (put_user_u32(addrlen
, target_addrlen_addr
))
3737 ret
= -TARGET_EFAULT
;
3742 /* do_getpeername() Must return target values and target errnos. */
3743 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3744 abi_ulong target_addrlen_addr
)
3750 if (get_user_u32(addrlen
, target_addrlen_addr
))
3751 return -TARGET_EFAULT
;
3753 if ((int)addrlen
< 0) {
3754 return -TARGET_EINVAL
;
3757 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3758 return -TARGET_EFAULT
;
3760 addr
= alloca(addrlen
);
3762 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3763 if (!is_error(ret
)) {
3764 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3765 if (put_user_u32(addrlen
, target_addrlen_addr
))
3766 ret
= -TARGET_EFAULT
;
3771 /* do_getsockname() Must return target values and target errnos. */
3772 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3773 abi_ulong target_addrlen_addr
)
3779 if (get_user_u32(addrlen
, target_addrlen_addr
))
3780 return -TARGET_EFAULT
;
3782 if ((int)addrlen
< 0) {
3783 return -TARGET_EINVAL
;
3786 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3787 return -TARGET_EFAULT
;
3789 addr
= alloca(addrlen
);
3791 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3792 if (!is_error(ret
)) {
3793 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3794 if (put_user_u32(addrlen
, target_addrlen_addr
))
3795 ret
= -TARGET_EFAULT
;
3800 /* do_socketpair() Must return target values and target errnos. */
3801 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3802 abi_ulong target_tab_addr
)
3807 target_to_host_sock_type(&type
);
3809 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3810 if (!is_error(ret
)) {
3811 if (put_user_s32(tab
[0], target_tab_addr
)
3812 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3813 ret
= -TARGET_EFAULT
;
3818 /* do_sendto() Must return target values and target errnos. */
3819 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3820 abi_ulong target_addr
, socklen_t addrlen
)
3824 void *copy_msg
= NULL
;
3827 if ((int)addrlen
< 0) {
3828 return -TARGET_EINVAL
;
3831 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3833 return -TARGET_EFAULT
;
3834 if (fd_trans_target_to_host_data(fd
)) {
3835 copy_msg
= host_msg
;
3836 host_msg
= g_malloc(len
);
3837 memcpy(host_msg
, copy_msg
, len
);
3838 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3844 addr
= alloca(addrlen
+1);
3845 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3849 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3851 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3856 host_msg
= copy_msg
;
3858 unlock_user(host_msg
, msg
, 0);
3862 /* do_recvfrom() Must return target values and target errnos. */
3863 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3864 abi_ulong target_addr
,
3865 abi_ulong target_addrlen
)
3872 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3874 return -TARGET_EFAULT
;
3876 if (get_user_u32(addrlen
, target_addrlen
)) {
3877 ret
= -TARGET_EFAULT
;
3880 if ((int)addrlen
< 0) {
3881 ret
= -TARGET_EINVAL
;
3884 addr
= alloca(addrlen
);
3885 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3888 addr
= NULL
; /* To keep compiler quiet. */
3889 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3891 if (!is_error(ret
)) {
3892 if (fd_trans_host_to_target_data(fd
)) {
3893 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
3896 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3897 if (put_user_u32(addrlen
, target_addrlen
)) {
3898 ret
= -TARGET_EFAULT
;
3902 unlock_user(host_msg
, msg
, len
);
3905 unlock_user(host_msg
, msg
, 0);
3910 #ifdef TARGET_NR_socketcall
3911 /* do_socketcall() must return target values and target errnos. */
3912 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3914 static const unsigned nargs
[] = { /* number of arguments per operation */
3915 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3916 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3917 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3918 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3919 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3920 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3921 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3922 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3923 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3924 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3925 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3926 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3927 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3928 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3929 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3930 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3931 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3932 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3933 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3934 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3936 abi_long a
[6]; /* max 6 args */
3939 /* check the range of the first argument num */
3940 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3941 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3942 return -TARGET_EINVAL
;
3944 /* ensure we have space for args */
3945 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3946 return -TARGET_EINVAL
;
3948 /* collect the arguments in a[] according to nargs[] */
3949 for (i
= 0; i
< nargs
[num
]; ++i
) {
3950 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3951 return -TARGET_EFAULT
;
3954 /* now when we have the args, invoke the appropriate underlying function */
3956 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3957 return do_socket(a
[0], a
[1], a
[2]);
3958 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3959 return do_bind(a
[0], a
[1], a
[2]);
3960 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3961 return do_connect(a
[0], a
[1], a
[2]);
3962 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3963 return get_errno(listen(a
[0], a
[1]));
3964 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3965 return do_accept4(a
[0], a
[1], a
[2], 0);
3966 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3967 return do_getsockname(a
[0], a
[1], a
[2]);
3968 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3969 return do_getpeername(a
[0], a
[1], a
[2]);
3970 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3971 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3972 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3973 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3974 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3975 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3976 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3977 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3978 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3979 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3980 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3981 return get_errno(shutdown(a
[0], a
[1]));
3982 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3983 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3984 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3985 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3986 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3987 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3988 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3989 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3990 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3991 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3992 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3993 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3994 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3995 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3997 gemu_log("Unsupported socketcall: %d\n", num
);
3998 return -TARGET_EINVAL
;
4003 #define N_SHM_REGIONS 32
4005 static struct shm_region
{
4009 } shm_regions
[N_SHM_REGIONS
];
4011 #ifndef TARGET_SEMID64_DS
4012 /* asm-generic version of this struct */
4013 struct target_semid64_ds
4015 struct target_ipc_perm sem_perm
;
4016 abi_ulong sem_otime
;
4017 #if TARGET_ABI_BITS == 32
4018 abi_ulong __unused1
;
4020 abi_ulong sem_ctime
;
4021 #if TARGET_ABI_BITS == 32
4022 abi_ulong __unused2
;
4024 abi_ulong sem_nsems
;
4025 abi_ulong __unused3
;
4026 abi_ulong __unused4
;
4030 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4031 abi_ulong target_addr
)
4033 struct target_ipc_perm
*target_ip
;
4034 struct target_semid64_ds
*target_sd
;
4036 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4037 return -TARGET_EFAULT
;
4038 target_ip
= &(target_sd
->sem_perm
);
4039 host_ip
->__key
= tswap32(target_ip
->__key
);
4040 host_ip
->uid
= tswap32(target_ip
->uid
);
4041 host_ip
->gid
= tswap32(target_ip
->gid
);
4042 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4043 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4044 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4045 host_ip
->mode
= tswap32(target_ip
->mode
);
4047 host_ip
->mode
= tswap16(target_ip
->mode
);
4049 #if defined(TARGET_PPC)
4050 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4052 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4054 unlock_user_struct(target_sd
, target_addr
, 0);
4058 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4059 struct ipc_perm
*host_ip
)
4061 struct target_ipc_perm
*target_ip
;
4062 struct target_semid64_ds
*target_sd
;
4064 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4065 return -TARGET_EFAULT
;
4066 target_ip
= &(target_sd
->sem_perm
);
4067 target_ip
->__key
= tswap32(host_ip
->__key
);
4068 target_ip
->uid
= tswap32(host_ip
->uid
);
4069 target_ip
->gid
= tswap32(host_ip
->gid
);
4070 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4071 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4072 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4073 target_ip
->mode
= tswap32(host_ip
->mode
);
4075 target_ip
->mode
= tswap16(host_ip
->mode
);
4077 #if defined(TARGET_PPC)
4078 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4080 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4082 unlock_user_struct(target_sd
, target_addr
, 1);
4086 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4087 abi_ulong target_addr
)
4089 struct target_semid64_ds
*target_sd
;
4091 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4092 return -TARGET_EFAULT
;
4093 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4094 return -TARGET_EFAULT
;
4095 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4096 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4097 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4098 unlock_user_struct(target_sd
, target_addr
, 0);
4102 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4103 struct semid_ds
*host_sd
)
4105 struct target_semid64_ds
*target_sd
;
4107 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4108 return -TARGET_EFAULT
;
4109 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4110 return -TARGET_EFAULT
;
4111 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4112 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4113 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4114 unlock_user_struct(target_sd
, target_addr
, 1);
4118 struct target_seminfo
{
4131 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4132 struct seminfo
*host_seminfo
)
4134 struct target_seminfo
*target_seminfo
;
4135 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4136 return -TARGET_EFAULT
;
4137 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4138 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4139 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4140 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4141 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4142 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4143 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4144 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4145 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4146 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4147 unlock_user_struct(target_seminfo
, target_addr
, 1);
4153 struct semid_ds
*buf
;
4154 unsigned short *array
;
4155 struct seminfo
*__buf
;
4158 union target_semun
{
4165 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4166 abi_ulong target_addr
)
4169 unsigned short *array
;
4171 struct semid_ds semid_ds
;
4174 semun
.buf
= &semid_ds
;
4176 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4178 return get_errno(ret
);
4180 nsems
= semid_ds
.sem_nsems
;
4182 *host_array
= g_try_new(unsigned short, nsems
);
4184 return -TARGET_ENOMEM
;
4186 array
= lock_user(VERIFY_READ
, target_addr
,
4187 nsems
*sizeof(unsigned short), 1);
4189 g_free(*host_array
);
4190 return -TARGET_EFAULT
;
4193 for(i
=0; i
<nsems
; i
++) {
4194 __get_user((*host_array
)[i
], &array
[i
]);
4196 unlock_user(array
, target_addr
, 0);
4201 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4202 unsigned short **host_array
)
4205 unsigned short *array
;
4207 struct semid_ds semid_ds
;
4210 semun
.buf
= &semid_ds
;
4212 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4214 return get_errno(ret
);
4216 nsems
= semid_ds
.sem_nsems
;
4218 array
= lock_user(VERIFY_WRITE
, target_addr
,
4219 nsems
*sizeof(unsigned short), 0);
4221 return -TARGET_EFAULT
;
4223 for(i
=0; i
<nsems
; i
++) {
4224 __put_user((*host_array
)[i
], &array
[i
]);
4226 g_free(*host_array
);
4227 unlock_user(array
, target_addr
, 1);
4232 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4233 abi_ulong target_arg
)
4235 union target_semun target_su
= { .buf
= target_arg
};
4237 struct semid_ds dsarg
;
4238 unsigned short *array
= NULL
;
4239 struct seminfo seminfo
;
4240 abi_long ret
= -TARGET_EINVAL
;
4247 /* In 64 bit cross-endian situations, we will erroneously pick up
4248 * the wrong half of the union for the "val" element. To rectify
4249 * this, the entire 8-byte structure is byteswapped, followed by
4250 * a swap of the 4 byte val field. In other cases, the data is
4251 * already in proper host byte order. */
4252 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4253 target_su
.buf
= tswapal(target_su
.buf
);
4254 arg
.val
= tswap32(target_su
.val
);
4256 arg
.val
= target_su
.val
;
4258 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4262 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4266 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4267 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4274 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4278 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4279 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4285 arg
.__buf
= &seminfo
;
4286 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4287 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4295 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4302 struct target_sembuf
{
4303 unsigned short sem_num
;
4308 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4309 abi_ulong target_addr
,
4312 struct target_sembuf
*target_sembuf
;
4315 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4316 nsops
*sizeof(struct target_sembuf
), 1);
4318 return -TARGET_EFAULT
;
4320 for(i
=0; i
<nsops
; i
++) {
4321 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4322 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4323 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4326 unlock_user(target_sembuf
, target_addr
, 0);
4331 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4333 struct sembuf sops
[nsops
];
4335 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4336 return -TARGET_EFAULT
;
4338 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4341 struct target_msqid_ds
4343 struct target_ipc_perm msg_perm
;
4344 abi_ulong msg_stime
;
4345 #if TARGET_ABI_BITS == 32
4346 abi_ulong __unused1
;
4348 abi_ulong msg_rtime
;
4349 #if TARGET_ABI_BITS == 32
4350 abi_ulong __unused2
;
4352 abi_ulong msg_ctime
;
4353 #if TARGET_ABI_BITS == 32
4354 abi_ulong __unused3
;
4356 abi_ulong __msg_cbytes
;
4358 abi_ulong msg_qbytes
;
4359 abi_ulong msg_lspid
;
4360 abi_ulong msg_lrpid
;
4361 abi_ulong __unused4
;
4362 abi_ulong __unused5
;
4365 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4366 abi_ulong target_addr
)
4368 struct target_msqid_ds
*target_md
;
4370 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4371 return -TARGET_EFAULT
;
4372 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4373 return -TARGET_EFAULT
;
4374 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4375 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4376 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4377 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4378 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4379 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4380 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4381 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4382 unlock_user_struct(target_md
, target_addr
, 0);
4386 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4387 struct msqid_ds
*host_md
)
4389 struct target_msqid_ds
*target_md
;
4391 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4392 return -TARGET_EFAULT
;
4393 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4394 return -TARGET_EFAULT
;
4395 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4396 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4397 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4398 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4399 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4400 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4401 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4402 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4403 unlock_user_struct(target_md
, target_addr
, 1);
4407 struct target_msginfo
{
4415 unsigned short int msgseg
;
4418 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4419 struct msginfo
*host_msginfo
)
4421 struct target_msginfo
*target_msginfo
;
4422 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4423 return -TARGET_EFAULT
;
4424 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4425 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4426 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4427 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4428 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4429 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4430 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4431 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4432 unlock_user_struct(target_msginfo
, target_addr
, 1);
4436 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4438 struct msqid_ds dsarg
;
4439 struct msginfo msginfo
;
4440 abi_long ret
= -TARGET_EINVAL
;
4448 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4449 return -TARGET_EFAULT
;
4450 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4451 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4452 return -TARGET_EFAULT
;
4455 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4459 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4460 if (host_to_target_msginfo(ptr
, &msginfo
))
4461 return -TARGET_EFAULT
;
4468 struct target_msgbuf
{
4473 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4474 ssize_t msgsz
, int msgflg
)
4476 struct target_msgbuf
*target_mb
;
4477 struct msgbuf
*host_mb
;
4481 return -TARGET_EINVAL
;
4484 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4485 return -TARGET_EFAULT
;
4486 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4488 unlock_user_struct(target_mb
, msgp
, 0);
4489 return -TARGET_ENOMEM
;
4491 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4492 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4493 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4495 unlock_user_struct(target_mb
, msgp
, 0);
4500 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4501 ssize_t msgsz
, abi_long msgtyp
,
4504 struct target_msgbuf
*target_mb
;
4506 struct msgbuf
*host_mb
;
4510 return -TARGET_EINVAL
;
4513 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4514 return -TARGET_EFAULT
;
4516 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4518 ret
= -TARGET_ENOMEM
;
4521 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4524 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4525 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4526 if (!target_mtext
) {
4527 ret
= -TARGET_EFAULT
;
4530 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4531 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4534 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4538 unlock_user_struct(target_mb
, msgp
, 1);
4543 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4544 abi_ulong target_addr
)
4546 struct target_shmid_ds
*target_sd
;
4548 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4549 return -TARGET_EFAULT
;
4550 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4551 return -TARGET_EFAULT
;
4552 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4553 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4554 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4555 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4556 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4557 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4558 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4559 unlock_user_struct(target_sd
, target_addr
, 0);
4563 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4564 struct shmid_ds
*host_sd
)
4566 struct target_shmid_ds
*target_sd
;
4568 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4569 return -TARGET_EFAULT
;
4570 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4571 return -TARGET_EFAULT
;
4572 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4573 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4574 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4575 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4576 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4577 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4578 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4579 unlock_user_struct(target_sd
, target_addr
, 1);
4583 struct target_shminfo
{
4591 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4592 struct shminfo
*host_shminfo
)
4594 struct target_shminfo
*target_shminfo
;
4595 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4596 return -TARGET_EFAULT
;
4597 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4598 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4599 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4600 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4601 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4602 unlock_user_struct(target_shminfo
, target_addr
, 1);
4606 struct target_shm_info
{
4611 abi_ulong swap_attempts
;
4612 abi_ulong swap_successes
;
4615 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4616 struct shm_info
*host_shm_info
)
4618 struct target_shm_info
*target_shm_info
;
4619 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4620 return -TARGET_EFAULT
;
4621 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4622 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4623 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4624 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4625 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4626 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4627 unlock_user_struct(target_shm_info
, target_addr
, 1);
4631 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4633 struct shmid_ds dsarg
;
4634 struct shminfo shminfo
;
4635 struct shm_info shm_info
;
4636 abi_long ret
= -TARGET_EINVAL
;
4644 if (target_to_host_shmid_ds(&dsarg
, buf
))
4645 return -TARGET_EFAULT
;
4646 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4647 if (host_to_target_shmid_ds(buf
, &dsarg
))
4648 return -TARGET_EFAULT
;
4651 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4652 if (host_to_target_shminfo(buf
, &shminfo
))
4653 return -TARGET_EFAULT
;
4656 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4657 if (host_to_target_shm_info(buf
, &shm_info
))
4658 return -TARGET_EFAULT
;
4663 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4670 #ifndef TARGET_FORCE_SHMLBA
4671 /* For most architectures, SHMLBA is the same as the page size;
4672 * some architectures have larger values, in which case they should
4673 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4674 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4675 * and defining its own value for SHMLBA.
4677 * The kernel also permits SHMLBA to be set by the architecture to a
4678 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4679 * this means that addresses are rounded to the large size if
4680 * SHM_RND is set but addresses not aligned to that size are not rejected
4681 * as long as they are at least page-aligned. Since the only architecture
4682 * which uses this is ia64 this code doesn't provide for that oddity.
4684 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4686 return TARGET_PAGE_SIZE
;
4690 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4691 int shmid
, abi_ulong shmaddr
, int shmflg
)
4695 struct shmid_ds shm_info
;
4699 /* find out the length of the shared memory segment */
4700 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4701 if (is_error(ret
)) {
4702 /* can't get length, bail out */
4706 shmlba
= target_shmlba(cpu_env
);
4708 if (shmaddr
& (shmlba
- 1)) {
4709 if (shmflg
& SHM_RND
) {
4710 shmaddr
&= ~(shmlba
- 1);
4712 return -TARGET_EINVAL
;
4719 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4721 abi_ulong mmap_start
;
4723 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4725 if (mmap_start
== -1) {
4727 host_raddr
= (void *)-1;
4729 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4732 if (host_raddr
== (void *)-1) {
4734 return get_errno((long)host_raddr
);
4736 raddr
=h2g((unsigned long)host_raddr
);
4738 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4739 PAGE_VALID
| PAGE_READ
|
4740 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4742 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4743 if (!shm_regions
[i
].in_use
) {
4744 shm_regions
[i
].in_use
= true;
4745 shm_regions
[i
].start
= raddr
;
4746 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4756 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4760 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4761 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4762 shm_regions
[i
].in_use
= false;
4763 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4768 return get_errno(shmdt(g2h(shmaddr
)));
4771 #ifdef TARGET_NR_ipc
4772 /* ??? This only works with linear mappings. */
4773 /* do_ipc() must return target values and target errnos. */
4774 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4775 unsigned int call
, abi_long first
,
4776 abi_long second
, abi_long third
,
4777 abi_long ptr
, abi_long fifth
)
4782 version
= call
>> 16;
4787 ret
= do_semop(first
, ptr
, second
);
4791 ret
= get_errno(semget(first
, second
, third
));
4794 case IPCOP_semctl
: {
4795 /* The semun argument to semctl is passed by value, so dereference the
4798 get_user_ual(atptr
, ptr
);
4799 ret
= do_semctl(first
, second
, third
, atptr
);
4804 ret
= get_errno(msgget(first
, second
));
4808 ret
= do_msgsnd(first
, ptr
, second
, third
);
4812 ret
= do_msgctl(first
, second
, ptr
);
4819 struct target_ipc_kludge
{
4824 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4825 ret
= -TARGET_EFAULT
;
4829 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4831 unlock_user_struct(tmp
, ptr
, 0);
4835 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4844 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4845 if (is_error(raddr
))
4846 return get_errno(raddr
);
4847 if (put_user_ual(raddr
, third
))
4848 return -TARGET_EFAULT
;
4852 ret
= -TARGET_EINVAL
;
4857 ret
= do_shmdt(ptr
);
4861 /* IPC_* flag values are the same on all linux platforms */
4862 ret
= get_errno(shmget(first
, second
, third
));
4865 /* IPC_* and SHM_* command values are the same on all linux platforms */
4867 ret
= do_shmctl(first
, second
, ptr
);
4870 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4871 ret
= -TARGET_ENOSYS
;
4878 /* kernel structure types definitions */
4880 #define STRUCT(name, ...) STRUCT_ ## name,
4881 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4883 #include "syscall_types.h"
4887 #undef STRUCT_SPECIAL
4889 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4890 #define STRUCT_SPECIAL(name)
4891 #include "syscall_types.h"
4893 #undef STRUCT_SPECIAL
4895 typedef struct IOCTLEntry IOCTLEntry
;
4897 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4898 int fd
, int cmd
, abi_long arg
);
4902 unsigned int host_cmd
;
4905 do_ioctl_fn
*do_ioctl
;
4906 const argtype arg_type
[5];
4909 #define IOC_R 0x0001
4910 #define IOC_W 0x0002
4911 #define IOC_RW (IOC_R | IOC_W)
4913 #define MAX_STRUCT_SIZE 4096
4915 #ifdef CONFIG_FIEMAP
4916 /* So fiemap access checks don't overflow on 32 bit systems.
4917 * This is very slightly smaller than the limit imposed by
4918 * the underlying kernel.
4920 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4921 / sizeof(struct fiemap_extent))
4923 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4924 int fd
, int cmd
, abi_long arg
)
4926 /* The parameter for this ioctl is a struct fiemap followed
4927 * by an array of struct fiemap_extent whose size is set
4928 * in fiemap->fm_extent_count. The array is filled in by the
4931 int target_size_in
, target_size_out
;
4933 const argtype
*arg_type
= ie
->arg_type
;
4934 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4937 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4941 assert(arg_type
[0] == TYPE_PTR
);
4942 assert(ie
->access
== IOC_RW
);
4944 target_size_in
= thunk_type_size(arg_type
, 0);
4945 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4947 return -TARGET_EFAULT
;
4949 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4950 unlock_user(argptr
, arg
, 0);
4951 fm
= (struct fiemap
*)buf_temp
;
4952 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4953 return -TARGET_EINVAL
;
4956 outbufsz
= sizeof (*fm
) +
4957 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4959 if (outbufsz
> MAX_STRUCT_SIZE
) {
4960 /* We can't fit all the extents into the fixed size buffer.
4961 * Allocate one that is large enough and use it instead.
4963 fm
= g_try_malloc(outbufsz
);
4965 return -TARGET_ENOMEM
;
4967 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4970 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4971 if (!is_error(ret
)) {
4972 target_size_out
= target_size_in
;
4973 /* An extent_count of 0 means we were only counting the extents
4974 * so there are no structs to copy
4976 if (fm
->fm_extent_count
!= 0) {
4977 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4979 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4981 ret
= -TARGET_EFAULT
;
4983 /* Convert the struct fiemap */
4984 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4985 if (fm
->fm_extent_count
!= 0) {
4986 p
= argptr
+ target_size_in
;
4987 /* ...and then all the struct fiemap_extents */
4988 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4989 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4994 unlock_user(argptr
, arg
, target_size_out
);
5004 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5005 int fd
, int cmd
, abi_long arg
)
5007 const argtype
*arg_type
= ie
->arg_type
;
5011 struct ifconf
*host_ifconf
;
5013 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
5014 int target_ifreq_size
;
5019 abi_long target_ifc_buf
;
5023 assert(arg_type
[0] == TYPE_PTR
);
5024 assert(ie
->access
== IOC_RW
);
5027 target_size
= thunk_type_size(arg_type
, 0);
5029 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5031 return -TARGET_EFAULT
;
5032 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5033 unlock_user(argptr
, arg
, 0);
5035 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5036 target_ifc_len
= host_ifconf
->ifc_len
;
5037 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5039 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5040 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5041 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5043 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5044 if (outbufsz
> MAX_STRUCT_SIZE
) {
5045 /* We can't fit all the extents into the fixed size buffer.
5046 * Allocate one that is large enough and use it instead.
5048 host_ifconf
= malloc(outbufsz
);
5050 return -TARGET_ENOMEM
;
5052 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5055 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5057 host_ifconf
->ifc_len
= host_ifc_len
;
5058 host_ifconf
->ifc_buf
= host_ifc_buf
;
5060 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5061 if (!is_error(ret
)) {
5062 /* convert host ifc_len to target ifc_len */
5064 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5065 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5066 host_ifconf
->ifc_len
= target_ifc_len
;
5068 /* restore target ifc_buf */
5070 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5072 /* copy struct ifconf to target user */
5074 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5076 return -TARGET_EFAULT
;
5077 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5078 unlock_user(argptr
, arg
, target_size
);
5080 /* copy ifreq[] to target user */
5082 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5083 for (i
= 0; i
< nb_ifreq
; i
++) {
5084 thunk_convert(argptr
+ i
* target_ifreq_size
,
5085 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5086 ifreq_arg_type
, THUNK_TARGET
);
5088 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5098 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5099 int cmd
, abi_long arg
)
5102 struct dm_ioctl
*host_dm
;
5103 abi_long guest_data
;
5104 uint32_t guest_data_size
;
5106 const argtype
*arg_type
= ie
->arg_type
;
5108 void *big_buf
= NULL
;
5112 target_size
= thunk_type_size(arg_type
, 0);
5113 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5115 ret
= -TARGET_EFAULT
;
5118 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5119 unlock_user(argptr
, arg
, 0);
5121 /* buf_temp is too small, so fetch things into a bigger buffer */
5122 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5123 memcpy(big_buf
, buf_temp
, target_size
);
5127 guest_data
= arg
+ host_dm
->data_start
;
5128 if ((guest_data
- arg
) < 0) {
5129 ret
= -TARGET_EINVAL
;
5132 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5133 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5135 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5137 ret
= -TARGET_EFAULT
;
5141 switch (ie
->host_cmd
) {
5143 case DM_LIST_DEVICES
:
5146 case DM_DEV_SUSPEND
:
5149 case DM_TABLE_STATUS
:
5150 case DM_TABLE_CLEAR
:
5152 case DM_LIST_VERSIONS
:
5156 case DM_DEV_SET_GEOMETRY
:
5157 /* data contains only strings */
5158 memcpy(host_data
, argptr
, guest_data_size
);
5161 memcpy(host_data
, argptr
, guest_data_size
);
5162 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5166 void *gspec
= argptr
;
5167 void *cur_data
= host_data
;
5168 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5169 int spec_size
= thunk_type_size(arg_type
, 0);
5172 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5173 struct dm_target_spec
*spec
= cur_data
;
5177 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5178 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5180 spec
->next
= sizeof(*spec
) + slen
;
5181 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5183 cur_data
+= spec
->next
;
5188 ret
= -TARGET_EINVAL
;
5189 unlock_user(argptr
, guest_data
, 0);
5192 unlock_user(argptr
, guest_data
, 0);
5194 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5195 if (!is_error(ret
)) {
5196 guest_data
= arg
+ host_dm
->data_start
;
5197 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5198 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5199 switch (ie
->host_cmd
) {
5204 case DM_DEV_SUSPEND
:
5207 case DM_TABLE_CLEAR
:
5209 case DM_DEV_SET_GEOMETRY
:
5210 /* no return data */
5212 case DM_LIST_DEVICES
:
5214 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5215 uint32_t remaining_data
= guest_data_size
;
5216 void *cur_data
= argptr
;
5217 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5218 int nl_size
= 12; /* can't use thunk_size due to alignment */
5221 uint32_t next
= nl
->next
;
5223 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5225 if (remaining_data
< nl
->next
) {
5226 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5229 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5230 strcpy(cur_data
+ nl_size
, nl
->name
);
5231 cur_data
+= nl
->next
;
5232 remaining_data
-= nl
->next
;
5236 nl
= (void*)nl
+ next
;
5241 case DM_TABLE_STATUS
:
5243 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5244 void *cur_data
= argptr
;
5245 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5246 int spec_size
= thunk_type_size(arg_type
, 0);
5249 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5250 uint32_t next
= spec
->next
;
5251 int slen
= strlen((char*)&spec
[1]) + 1;
5252 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5253 if (guest_data_size
< spec
->next
) {
5254 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5257 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5258 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5259 cur_data
= argptr
+ spec
->next
;
5260 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5266 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5267 int count
= *(uint32_t*)hdata
;
5268 uint64_t *hdev
= hdata
+ 8;
5269 uint64_t *gdev
= argptr
+ 8;
5272 *(uint32_t*)argptr
= tswap32(count
);
5273 for (i
= 0; i
< count
; i
++) {
5274 *gdev
= tswap64(*hdev
);
5280 case DM_LIST_VERSIONS
:
5282 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5283 uint32_t remaining_data
= guest_data_size
;
5284 void *cur_data
= argptr
;
5285 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5286 int vers_size
= thunk_type_size(arg_type
, 0);
5289 uint32_t next
= vers
->next
;
5291 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5293 if (remaining_data
< vers
->next
) {
5294 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5297 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5298 strcpy(cur_data
+ vers_size
, vers
->name
);
5299 cur_data
+= vers
->next
;
5300 remaining_data
-= vers
->next
;
5304 vers
= (void*)vers
+ next
;
5309 unlock_user(argptr
, guest_data
, 0);
5310 ret
= -TARGET_EINVAL
;
5313 unlock_user(argptr
, guest_data
, guest_data_size
);
5315 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5317 ret
= -TARGET_EFAULT
;
5320 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5321 unlock_user(argptr
, arg
, target_size
);
5328 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5329 int cmd
, abi_long arg
)
5333 const argtype
*arg_type
= ie
->arg_type
;
5334 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5337 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5338 struct blkpg_partition host_part
;
5340 /* Read and convert blkpg */
5342 target_size
= thunk_type_size(arg_type
, 0);
5343 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5345 ret
= -TARGET_EFAULT
;
5348 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5349 unlock_user(argptr
, arg
, 0);
5351 switch (host_blkpg
->op
) {
5352 case BLKPG_ADD_PARTITION
:
5353 case BLKPG_DEL_PARTITION
:
5354 /* payload is struct blkpg_partition */
5357 /* Unknown opcode */
5358 ret
= -TARGET_EINVAL
;
5362 /* Read and convert blkpg->data */
5363 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5364 target_size
= thunk_type_size(part_arg_type
, 0);
5365 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5367 ret
= -TARGET_EFAULT
;
5370 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5371 unlock_user(argptr
, arg
, 0);
5373 /* Swizzle the data pointer to our local copy and call! */
5374 host_blkpg
->data
= &host_part
;
5375 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5381 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5382 int fd
, int cmd
, abi_long arg
)
5384 const argtype
*arg_type
= ie
->arg_type
;
5385 const StructEntry
*se
;
5386 const argtype
*field_types
;
5387 const int *dst_offsets
, *src_offsets
;
5390 abi_ulong
*target_rt_dev_ptr
;
5391 unsigned long *host_rt_dev_ptr
;
5395 assert(ie
->access
== IOC_W
);
5396 assert(*arg_type
== TYPE_PTR
);
5398 assert(*arg_type
== TYPE_STRUCT
);
5399 target_size
= thunk_type_size(arg_type
, 0);
5400 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5402 return -TARGET_EFAULT
;
5405 assert(*arg_type
== (int)STRUCT_rtentry
);
5406 se
= struct_entries
+ *arg_type
++;
5407 assert(se
->convert
[0] == NULL
);
5408 /* convert struct here to be able to catch rt_dev string */
5409 field_types
= se
->field_types
;
5410 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5411 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5412 for (i
= 0; i
< se
->nb_fields
; i
++) {
5413 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5414 assert(*field_types
== TYPE_PTRVOID
);
5415 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5416 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5417 if (*target_rt_dev_ptr
!= 0) {
5418 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5419 tswapal(*target_rt_dev_ptr
));
5420 if (!*host_rt_dev_ptr
) {
5421 unlock_user(argptr
, arg
, 0);
5422 return -TARGET_EFAULT
;
5425 *host_rt_dev_ptr
= 0;
5430 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5431 argptr
+ src_offsets
[i
],
5432 field_types
, THUNK_HOST
);
5434 unlock_user(argptr
, arg
, 0);
5436 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5437 if (*host_rt_dev_ptr
!= 0) {
5438 unlock_user((void *)*host_rt_dev_ptr
,
5439 *target_rt_dev_ptr
, 0);
5444 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5445 int fd
, int cmd
, abi_long arg
)
5447 int sig
= target_to_host_signal(arg
);
5448 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5451 static IOCTLEntry ioctl_entries
[] = {
5452 #define IOCTL(cmd, access, ...) \
5453 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5454 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5455 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5456 #define IOCTL_IGNORE(cmd) \
5457 { TARGET_ ## cmd, 0, #cmd },
5462 /* ??? Implement proper locking for ioctls. */
5463 /* do_ioctl() Must return target values and target errnos. */
5464 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5466 const IOCTLEntry
*ie
;
5467 const argtype
*arg_type
;
5469 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5475 if (ie
->target_cmd
== 0) {
5476 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5477 return -TARGET_ENOSYS
;
5479 if (ie
->target_cmd
== cmd
)
5483 arg_type
= ie
->arg_type
;
5485 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5488 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5489 } else if (!ie
->host_cmd
) {
5490 /* Some architectures define BSD ioctls in their headers
5491 that are not implemented in Linux. */
5492 return -TARGET_ENOSYS
;
5495 switch(arg_type
[0]) {
5498 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5502 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5506 target_size
= thunk_type_size(arg_type
, 0);
5507 switch(ie
->access
) {
5509 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5510 if (!is_error(ret
)) {
5511 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5513 return -TARGET_EFAULT
;
5514 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5515 unlock_user(argptr
, arg
, target_size
);
5519 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5521 return -TARGET_EFAULT
;
5522 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5523 unlock_user(argptr
, arg
, 0);
5524 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5528 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5530 return -TARGET_EFAULT
;
5531 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5532 unlock_user(argptr
, arg
, 0);
5533 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5534 if (!is_error(ret
)) {
5535 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5537 return -TARGET_EFAULT
;
5538 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5539 unlock_user(argptr
, arg
, target_size
);
5545 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5546 (long)cmd
, arg_type
[0]);
5547 ret
= -TARGET_ENOSYS
;
5553 static const bitmask_transtbl iflag_tbl
[] = {
5554 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5555 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5556 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5557 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5558 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5559 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5560 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5561 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5562 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5563 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5564 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5565 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5566 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5567 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5571 static const bitmask_transtbl oflag_tbl
[] = {
5572 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5573 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5574 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5575 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5576 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5577 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5578 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5579 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5580 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5581 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5582 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5583 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5584 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5585 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5586 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5587 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5588 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5589 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5590 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5591 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5592 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5593 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5594 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5595 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5599 static const bitmask_transtbl cflag_tbl
[] = {
5600 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5601 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5602 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5603 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5604 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5605 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5606 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5607 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5608 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5609 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5610 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5611 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5612 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5613 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5614 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5615 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5616 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5617 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5618 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5619 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5620 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5621 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5622 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5623 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5624 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5625 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5626 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5627 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5628 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5629 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5630 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5634 static const bitmask_transtbl lflag_tbl
[] = {
5635 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5636 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5637 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5638 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5639 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5640 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5641 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5642 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5643 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5644 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5645 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5646 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5647 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5648 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5649 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5653 static void target_to_host_termios (void *dst
, const void *src
)
5655 struct host_termios
*host
= dst
;
5656 const struct target_termios
*target
= src
;
5659 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5661 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5663 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5665 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5666 host
->c_line
= target
->c_line
;
5668 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5669 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5670 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5671 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5672 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5673 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5674 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5675 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5676 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5677 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5678 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5679 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5680 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5681 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5682 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5683 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5684 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5685 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5688 static void host_to_target_termios (void *dst
, const void *src
)
5690 struct target_termios
*target
= dst
;
5691 const struct host_termios
*host
= src
;
5694 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5696 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5698 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5700 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5701 target
->c_line
= host
->c_line
;
5703 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5704 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5705 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5706 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5707 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5708 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5709 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5710 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5711 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5712 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5713 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5714 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5715 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5716 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5717 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5718 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5719 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5720 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5723 static const StructEntry struct_termios_def
= {
5724 .convert
= { host_to_target_termios
, target_to_host_termios
},
5725 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5726 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5729 static bitmask_transtbl mmap_flags_tbl
[] = {
5730 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5731 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5732 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5733 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5734 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5735 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5736 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5737 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5738 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5743 #if defined(TARGET_I386)
5745 /* NOTE: there is really one LDT for all the threads */
5746 static uint8_t *ldt_table
;
5748 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5755 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5756 if (size
> bytecount
)
5758 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5760 return -TARGET_EFAULT
;
5761 /* ??? Should this by byteswapped? */
5762 memcpy(p
, ldt_table
, size
);
5763 unlock_user(p
, ptr
, size
);
5767 /* XXX: add locking support */
5768 static abi_long
write_ldt(CPUX86State
*env
,
5769 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5771 struct target_modify_ldt_ldt_s ldt_info
;
5772 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5773 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5774 int seg_not_present
, useable
, lm
;
5775 uint32_t *lp
, entry_1
, entry_2
;
5777 if (bytecount
!= sizeof(ldt_info
))
5778 return -TARGET_EINVAL
;
5779 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5780 return -TARGET_EFAULT
;
5781 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5782 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5783 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5784 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5785 unlock_user_struct(target_ldt_info
, ptr
, 0);
5787 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5788 return -TARGET_EINVAL
;
5789 seg_32bit
= ldt_info
.flags
& 1;
5790 contents
= (ldt_info
.flags
>> 1) & 3;
5791 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5792 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5793 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5794 useable
= (ldt_info
.flags
>> 6) & 1;
5798 lm
= (ldt_info
.flags
>> 7) & 1;
5800 if (contents
== 3) {
5802 return -TARGET_EINVAL
;
5803 if (seg_not_present
== 0)
5804 return -TARGET_EINVAL
;
5806 /* allocate the LDT */
5808 env
->ldt
.base
= target_mmap(0,
5809 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5810 PROT_READ
|PROT_WRITE
,
5811 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5812 if (env
->ldt
.base
== -1)
5813 return -TARGET_ENOMEM
;
5814 memset(g2h(env
->ldt
.base
), 0,
5815 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5816 env
->ldt
.limit
= 0xffff;
5817 ldt_table
= g2h(env
->ldt
.base
);
5820 /* NOTE: same code as Linux kernel */
5821 /* Allow LDTs to be cleared by the user. */
5822 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5825 read_exec_only
== 1 &&
5827 limit_in_pages
== 0 &&
5828 seg_not_present
== 1 &&
5836 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5837 (ldt_info
.limit
& 0x0ffff);
5838 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5839 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5840 (ldt_info
.limit
& 0xf0000) |
5841 ((read_exec_only
^ 1) << 9) |
5843 ((seg_not_present
^ 1) << 15) |
5845 (limit_in_pages
<< 23) |
5849 entry_2
|= (useable
<< 20);
5851 /* Install the new entry ... */
5853 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5854 lp
[0] = tswap32(entry_1
);
5855 lp
[1] = tswap32(entry_2
);
5859 /* specific and weird i386 syscalls */
5860 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5861 unsigned long bytecount
)
5867 ret
= read_ldt(ptr
, bytecount
);
5870 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5873 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5876 ret
= -TARGET_ENOSYS
;
5882 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5883 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5885 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5886 struct target_modify_ldt_ldt_s ldt_info
;
5887 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5888 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5889 int seg_not_present
, useable
, lm
;
5890 uint32_t *lp
, entry_1
, entry_2
;
5893 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5894 if (!target_ldt_info
)
5895 return -TARGET_EFAULT
;
5896 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5897 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5898 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5899 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5900 if (ldt_info
.entry_number
== -1) {
5901 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5902 if (gdt_table
[i
] == 0) {
5903 ldt_info
.entry_number
= i
;
5904 target_ldt_info
->entry_number
= tswap32(i
);
5909 unlock_user_struct(target_ldt_info
, ptr
, 1);
5911 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5912 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5913 return -TARGET_EINVAL
;
5914 seg_32bit
= ldt_info
.flags
& 1;
5915 contents
= (ldt_info
.flags
>> 1) & 3;
5916 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5917 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5918 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5919 useable
= (ldt_info
.flags
>> 6) & 1;
5923 lm
= (ldt_info
.flags
>> 7) & 1;
5926 if (contents
== 3) {
5927 if (seg_not_present
== 0)
5928 return -TARGET_EINVAL
;
5931 /* NOTE: same code as Linux kernel */
5932 /* Allow LDTs to be cleared by the user. */
5933 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5934 if ((contents
== 0 &&
5935 read_exec_only
== 1 &&
5937 limit_in_pages
== 0 &&
5938 seg_not_present
== 1 &&
5946 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5947 (ldt_info
.limit
& 0x0ffff);
5948 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5949 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5950 (ldt_info
.limit
& 0xf0000) |
5951 ((read_exec_only
^ 1) << 9) |
5953 ((seg_not_present
^ 1) << 15) |
5955 (limit_in_pages
<< 23) |
5960 /* Install the new entry ... */
5962 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5963 lp
[0] = tswap32(entry_1
);
5964 lp
[1] = tswap32(entry_2
);
5968 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5970 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5971 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5972 uint32_t base_addr
, limit
, flags
;
5973 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5974 int seg_not_present
, useable
, lm
;
5975 uint32_t *lp
, entry_1
, entry_2
;
5977 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5978 if (!target_ldt_info
)
5979 return -TARGET_EFAULT
;
5980 idx
= tswap32(target_ldt_info
->entry_number
);
5981 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5982 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5983 unlock_user_struct(target_ldt_info
, ptr
, 1);
5984 return -TARGET_EINVAL
;
5986 lp
= (uint32_t *)(gdt_table
+ idx
);
5987 entry_1
= tswap32(lp
[0]);
5988 entry_2
= tswap32(lp
[1]);
5990 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5991 contents
= (entry_2
>> 10) & 3;
5992 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5993 seg_32bit
= (entry_2
>> 22) & 1;
5994 limit_in_pages
= (entry_2
>> 23) & 1;
5995 useable
= (entry_2
>> 20) & 1;
5999 lm
= (entry_2
>> 21) & 1;
6001 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6002 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6003 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6004 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6005 base_addr
= (entry_1
>> 16) |
6006 (entry_2
& 0xff000000) |
6007 ((entry_2
& 0xff) << 16);
6008 target_ldt_info
->base_addr
= tswapal(base_addr
);
6009 target_ldt_info
->limit
= tswap32(limit
);
6010 target_ldt_info
->flags
= tswap32(flags
);
6011 unlock_user_struct(target_ldt_info
, ptr
, 1);
6014 #endif /* TARGET_I386 && TARGET_ABI32 */
6016 #ifndef TARGET_ABI32
6017 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6024 case TARGET_ARCH_SET_GS
:
6025 case TARGET_ARCH_SET_FS
:
6026 if (code
== TARGET_ARCH_SET_GS
)
6030 cpu_x86_load_seg(env
, idx
, 0);
6031 env
->segs
[idx
].base
= addr
;
6033 case TARGET_ARCH_GET_GS
:
6034 case TARGET_ARCH_GET_FS
:
6035 if (code
== TARGET_ARCH_GET_GS
)
6039 val
= env
->segs
[idx
].base
;
6040 if (put_user(val
, addr
, abi_ulong
))
6041 ret
= -TARGET_EFAULT
;
6044 ret
= -TARGET_EINVAL
;
6051 #endif /* defined(TARGET_I386) */
6053 #define NEW_STACK_SIZE 0x40000
6056 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6059 pthread_mutex_t mutex
;
6060 pthread_cond_t cond
;
6063 abi_ulong child_tidptr
;
6064 abi_ulong parent_tidptr
;
6068 static void *clone_func(void *arg
)
6070 new_thread_info
*info
= arg
;
6075 rcu_register_thread();
6077 cpu
= ENV_GET_CPU(env
);
6079 ts
= (TaskState
*)cpu
->opaque
;
6080 info
->tid
= gettid();
6081 cpu
->host_tid
= info
->tid
;
6083 if (info
->child_tidptr
)
6084 put_user_u32(info
->tid
, info
->child_tidptr
);
6085 if (info
->parent_tidptr
)
6086 put_user_u32(info
->tid
, info
->parent_tidptr
);
6087 /* Enable signals. */
6088 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6089 /* Signal to the parent that we're ready. */
6090 pthread_mutex_lock(&info
->mutex
);
6091 pthread_cond_broadcast(&info
->cond
);
6092 pthread_mutex_unlock(&info
->mutex
);
6093 /* Wait until the parent has finshed initializing the tls state. */
6094 pthread_mutex_lock(&clone_lock
);
6095 pthread_mutex_unlock(&clone_lock
);
6101 /* do_fork() Must return host values and target errnos (unlike most
6102 do_*() functions). */
6103 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6104 abi_ulong parent_tidptr
, target_ulong newtls
,
6105 abi_ulong child_tidptr
)
6107 CPUState
*cpu
= ENV_GET_CPU(env
);
6111 CPUArchState
*new_env
;
6114 flags
&= ~CLONE_IGNORED_FLAGS
;
6116 /* Emulate vfork() with fork() */
6117 if (flags
& CLONE_VFORK
)
6118 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6120 if (flags
& CLONE_VM
) {
6121 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6122 new_thread_info info
;
6123 pthread_attr_t attr
;
6125 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6126 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6127 return -TARGET_EINVAL
;
6130 ts
= g_new0(TaskState
, 1);
6131 init_task_state(ts
);
6132 /* we create a new CPU instance. */
6133 new_env
= cpu_copy(env
);
6134 /* Init regs that differ from the parent. */
6135 cpu_clone_regs(new_env
, newsp
);
6136 new_cpu
= ENV_GET_CPU(new_env
);
6137 new_cpu
->opaque
= ts
;
6138 ts
->bprm
= parent_ts
->bprm
;
6139 ts
->info
= parent_ts
->info
;
6140 ts
->signal_mask
= parent_ts
->signal_mask
;
6142 if (flags
& CLONE_CHILD_CLEARTID
) {
6143 ts
->child_tidptr
= child_tidptr
;
6146 if (flags
& CLONE_SETTLS
) {
6147 cpu_set_tls (new_env
, newtls
);
6150 /* Grab a mutex so that thread setup appears atomic. */
6151 pthread_mutex_lock(&clone_lock
);
6153 memset(&info
, 0, sizeof(info
));
6154 pthread_mutex_init(&info
.mutex
, NULL
);
6155 pthread_mutex_lock(&info
.mutex
);
6156 pthread_cond_init(&info
.cond
, NULL
);
6158 if (flags
& CLONE_CHILD_SETTID
) {
6159 info
.child_tidptr
= child_tidptr
;
6161 if (flags
& CLONE_PARENT_SETTID
) {
6162 info
.parent_tidptr
= parent_tidptr
;
6165 ret
= pthread_attr_init(&attr
);
6166 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6167 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6168 /* It is not safe to deliver signals until the child has finished
6169 initializing, so temporarily block all signals. */
6170 sigfillset(&sigmask
);
6171 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6173 /* If this is our first additional thread, we need to ensure we
6174 * generate code for parallel execution and flush old translations.
6176 if (!parallel_cpus
) {
6177 parallel_cpus
= true;
6181 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6182 /* TODO: Free new CPU state if thread creation failed. */
6184 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6185 pthread_attr_destroy(&attr
);
6187 /* Wait for the child to initialize. */
6188 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6193 pthread_mutex_unlock(&info
.mutex
);
6194 pthread_cond_destroy(&info
.cond
);
6195 pthread_mutex_destroy(&info
.mutex
);
6196 pthread_mutex_unlock(&clone_lock
);
6198 /* if no CLONE_VM, we consider it is a fork */
6199 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6200 return -TARGET_EINVAL
;
6203 /* We can't support custom termination signals */
6204 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6205 return -TARGET_EINVAL
;
6208 if (block_signals()) {
6209 return -TARGET_ERESTARTSYS
;
6215 /* Child Process. */
6217 cpu_clone_regs(env
, newsp
);
6219 /* There is a race condition here. The parent process could
6220 theoretically read the TID in the child process before the child
6221 tid is set. This would require using either ptrace
6222 (not implemented) or having *_tidptr to point at a shared memory
6223 mapping. We can't repeat the spinlock hack used above because
6224 the child process gets its own copy of the lock. */
6225 if (flags
& CLONE_CHILD_SETTID
)
6226 put_user_u32(gettid(), child_tidptr
);
6227 if (flags
& CLONE_PARENT_SETTID
)
6228 put_user_u32(gettid(), parent_tidptr
);
6229 ts
= (TaskState
*)cpu
->opaque
;
6230 if (flags
& CLONE_SETTLS
)
6231 cpu_set_tls (env
, newtls
);
6232 if (flags
& CLONE_CHILD_CLEARTID
)
6233 ts
->child_tidptr
= child_tidptr
;
6241 /* warning : doesn't handle linux specific flags... */
6242 static int target_to_host_fcntl_cmd(int cmd
)
6245 case TARGET_F_DUPFD
:
6246 case TARGET_F_GETFD
:
6247 case TARGET_F_SETFD
:
6248 case TARGET_F_GETFL
:
6249 case TARGET_F_SETFL
:
6251 case TARGET_F_GETLK
:
6253 case TARGET_F_SETLK
:
6255 case TARGET_F_SETLKW
:
6257 case TARGET_F_GETOWN
:
6259 case TARGET_F_SETOWN
:
6261 case TARGET_F_GETSIG
:
6263 case TARGET_F_SETSIG
:
6265 #if TARGET_ABI_BITS == 32
6266 case TARGET_F_GETLK64
:
6268 case TARGET_F_SETLK64
:
6270 case TARGET_F_SETLKW64
:
6273 case TARGET_F_SETLEASE
:
6275 case TARGET_F_GETLEASE
:
6277 #ifdef F_DUPFD_CLOEXEC
6278 case TARGET_F_DUPFD_CLOEXEC
:
6279 return F_DUPFD_CLOEXEC
;
6281 case TARGET_F_NOTIFY
:
6284 case TARGET_F_GETOWN_EX
:
6288 case TARGET_F_SETOWN_EX
:
6292 case TARGET_F_SETPIPE_SZ
:
6293 return F_SETPIPE_SZ
;
6294 case TARGET_F_GETPIPE_SZ
:
6295 return F_GETPIPE_SZ
;
6298 return -TARGET_EINVAL
;
6300 return -TARGET_EINVAL
;
6303 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6304 static const bitmask_transtbl flock_tbl
[] = {
6305 TRANSTBL_CONVERT(F_RDLCK
),
6306 TRANSTBL_CONVERT(F_WRLCK
),
6307 TRANSTBL_CONVERT(F_UNLCK
),
6308 TRANSTBL_CONVERT(F_EXLCK
),
6309 TRANSTBL_CONVERT(F_SHLCK
),
6313 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6314 abi_ulong target_flock_addr
)
6316 struct target_flock
*target_fl
;
6319 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6320 return -TARGET_EFAULT
;
6323 __get_user(l_type
, &target_fl
->l_type
);
6324 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6325 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6326 __get_user(fl
->l_start
, &target_fl
->l_start
);
6327 __get_user(fl
->l_len
, &target_fl
->l_len
);
6328 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6329 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6333 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6334 const struct flock64
*fl
)
6336 struct target_flock
*target_fl
;
6339 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6340 return -TARGET_EFAULT
;
6343 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6344 __put_user(l_type
, &target_fl
->l_type
);
6345 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6346 __put_user(fl
->l_start
, &target_fl
->l_start
);
6347 __put_user(fl
->l_len
, &target_fl
->l_len
);
6348 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6349 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6353 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6354 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6356 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6357 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6358 abi_ulong target_flock_addr
)
6360 struct target_eabi_flock64
*target_fl
;
6363 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6364 return -TARGET_EFAULT
;
6367 __get_user(l_type
, &target_fl
->l_type
);
6368 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6369 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6370 __get_user(fl
->l_start
, &target_fl
->l_start
);
6371 __get_user(fl
->l_len
, &target_fl
->l_len
);
6372 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6373 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6377 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6378 const struct flock64
*fl
)
6380 struct target_eabi_flock64
*target_fl
;
6383 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6384 return -TARGET_EFAULT
;
6387 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6388 __put_user(l_type
, &target_fl
->l_type
);
6389 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6390 __put_user(fl
->l_start
, &target_fl
->l_start
);
6391 __put_user(fl
->l_len
, &target_fl
->l_len
);
6392 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6393 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6398 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6399 abi_ulong target_flock_addr
)
6401 struct target_flock64
*target_fl
;
6404 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6405 return -TARGET_EFAULT
;
6408 __get_user(l_type
, &target_fl
->l_type
);
6409 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6410 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6411 __get_user(fl
->l_start
, &target_fl
->l_start
);
6412 __get_user(fl
->l_len
, &target_fl
->l_len
);
6413 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6414 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6418 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6419 const struct flock64
*fl
)
6421 struct target_flock64
*target_fl
;
6424 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6425 return -TARGET_EFAULT
;
6428 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6429 __put_user(l_type
, &target_fl
->l_type
);
6430 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6431 __put_user(fl
->l_start
, &target_fl
->l_start
);
6432 __put_user(fl
->l_len
, &target_fl
->l_len
);
6433 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6434 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6438 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6440 struct flock64 fl64
;
6442 struct f_owner_ex fox
;
6443 struct target_f_owner_ex
*target_fox
;
6446 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6448 if (host_cmd
== -TARGET_EINVAL
)
6452 case TARGET_F_GETLK
:
6453 ret
= copy_from_user_flock(&fl64
, arg
);
6457 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6459 ret
= copy_to_user_flock(arg
, &fl64
);
6463 case TARGET_F_SETLK
:
6464 case TARGET_F_SETLKW
:
6465 ret
= copy_from_user_flock(&fl64
, arg
);
6469 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6472 case TARGET_F_GETLK64
:
6473 ret
= copy_from_user_flock64(&fl64
, arg
);
6477 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6479 ret
= copy_to_user_flock64(arg
, &fl64
);
6482 case TARGET_F_SETLK64
:
6483 case TARGET_F_SETLKW64
:
6484 ret
= copy_from_user_flock64(&fl64
, arg
);
6488 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6491 case TARGET_F_GETFL
:
6492 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6494 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6498 case TARGET_F_SETFL
:
6499 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6500 target_to_host_bitmask(arg
,
6505 case TARGET_F_GETOWN_EX
:
6506 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6508 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6509 return -TARGET_EFAULT
;
6510 target_fox
->type
= tswap32(fox
.type
);
6511 target_fox
->pid
= tswap32(fox
.pid
);
6512 unlock_user_struct(target_fox
, arg
, 1);
6518 case TARGET_F_SETOWN_EX
:
6519 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6520 return -TARGET_EFAULT
;
6521 fox
.type
= tswap32(target_fox
->type
);
6522 fox
.pid
= tswap32(target_fox
->pid
);
6523 unlock_user_struct(target_fox
, arg
, 0);
6524 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6528 case TARGET_F_SETOWN
:
6529 case TARGET_F_GETOWN
:
6530 case TARGET_F_SETSIG
:
6531 case TARGET_F_GETSIG
:
6532 case TARGET_F_SETLEASE
:
6533 case TARGET_F_GETLEASE
:
6534 case TARGET_F_SETPIPE_SZ
:
6535 case TARGET_F_GETPIPE_SZ
:
6536 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6540 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6548 static inline int high2lowuid(int uid
)
6556 static inline int high2lowgid(int gid
)
6564 static inline int low2highuid(int uid
)
6566 if ((int16_t)uid
== -1)
6572 static inline int low2highgid(int gid
)
6574 if ((int16_t)gid
== -1)
6579 static inline int tswapid(int id
)
6584 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6586 #else /* !USE_UID16 */
6587 static inline int high2lowuid(int uid
)
6591 static inline int high2lowgid(int gid
)
6595 static inline int low2highuid(int uid
)
6599 static inline int low2highgid(int gid
)
6603 static inline int tswapid(int id
)
6608 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6610 #endif /* USE_UID16 */
6612 /* We must do direct syscalls for setting UID/GID, because we want to
6613 * implement the Linux system call semantics of "change only for this thread",
6614 * not the libc/POSIX semantics of "change for all threads in process".
6615 * (See http://ewontfix.com/17/ for more details.)
6616 * We use the 32-bit version of the syscalls if present; if it is not
6617 * then either the host architecture supports 32-bit UIDs natively with
6618 * the standard syscall, or the 16-bit UID is the best we can do.
6620 #ifdef __NR_setuid32
6621 #define __NR_sys_setuid __NR_setuid32
6623 #define __NR_sys_setuid __NR_setuid
6625 #ifdef __NR_setgid32
6626 #define __NR_sys_setgid __NR_setgid32
6628 #define __NR_sys_setgid __NR_setgid
6630 #ifdef __NR_setresuid32
6631 #define __NR_sys_setresuid __NR_setresuid32
6633 #define __NR_sys_setresuid __NR_setresuid
6635 #ifdef __NR_setresgid32
6636 #define __NR_sys_setresgid __NR_setresgid32
6638 #define __NR_sys_setresgid __NR_setresgid
6641 _syscall1(int, sys_setuid
, uid_t
, uid
)
6642 _syscall1(int, sys_setgid
, gid_t
, gid
)
6643 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6644 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6646 void syscall_init(void)
6649 const argtype
*arg_type
;
6653 thunk_init(STRUCT_MAX
);
6655 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6656 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6657 #include "syscall_types.h"
6659 #undef STRUCT_SPECIAL
6661 /* Build target_to_host_errno_table[] table from
6662 * host_to_target_errno_table[]. */
6663 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6664 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6667 /* we patch the ioctl size if necessary. We rely on the fact that
6668 no ioctl has all the bits at '1' in the size field */
6670 while (ie
->target_cmd
!= 0) {
6671 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6672 TARGET_IOC_SIZEMASK
) {
6673 arg_type
= ie
->arg_type
;
6674 if (arg_type
[0] != TYPE_PTR
) {
6675 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6680 size
= thunk_type_size(arg_type
, 0);
6681 ie
->target_cmd
= (ie
->target_cmd
&
6682 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6683 (size
<< TARGET_IOC_SIZESHIFT
);
6686 /* automatic consistency check if same arch */
6687 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6688 (defined(__x86_64__) && defined(TARGET_X86_64))
6689 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6690 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6691 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6698 #if TARGET_ABI_BITS == 32
6699 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6701 #ifdef TARGET_WORDS_BIGENDIAN
6702 return ((uint64_t)word0
<< 32) | word1
;
6704 return ((uint64_t)word1
<< 32) | word0
;
6707 #else /* TARGET_ABI_BITS == 32 */
6708 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6712 #endif /* TARGET_ABI_BITS != 32 */
6714 #ifdef TARGET_NR_truncate64
6715 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6720 if (regpairs_aligned(cpu_env
)) {
6724 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6728 #ifdef TARGET_NR_ftruncate64
6729 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6734 if (regpairs_aligned(cpu_env
)) {
6738 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6742 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6743 abi_ulong target_addr
)
6745 struct target_timespec
*target_ts
;
6747 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6748 return -TARGET_EFAULT
;
6749 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6750 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6751 unlock_user_struct(target_ts
, target_addr
, 0);
6755 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6756 struct timespec
*host_ts
)
6758 struct target_timespec
*target_ts
;
6760 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6761 return -TARGET_EFAULT
;
6762 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6763 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6764 unlock_user_struct(target_ts
, target_addr
, 1);
6768 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6769 abi_ulong target_addr
)
6771 struct target_itimerspec
*target_itspec
;
6773 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6774 return -TARGET_EFAULT
;
6777 host_itspec
->it_interval
.tv_sec
=
6778 tswapal(target_itspec
->it_interval
.tv_sec
);
6779 host_itspec
->it_interval
.tv_nsec
=
6780 tswapal(target_itspec
->it_interval
.tv_nsec
);
6781 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6782 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6784 unlock_user_struct(target_itspec
, target_addr
, 1);
6788 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6789 struct itimerspec
*host_its
)
6791 struct target_itimerspec
*target_itspec
;
6793 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6794 return -TARGET_EFAULT
;
6797 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6798 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6800 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6801 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6803 unlock_user_struct(target_itspec
, target_addr
, 0);
6807 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6808 abi_long target_addr
)
6810 struct target_timex
*target_tx
;
6812 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6813 return -TARGET_EFAULT
;
6816 __get_user(host_tx
->modes
, &target_tx
->modes
);
6817 __get_user(host_tx
->offset
, &target_tx
->offset
);
6818 __get_user(host_tx
->freq
, &target_tx
->freq
);
6819 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6820 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6821 __get_user(host_tx
->status
, &target_tx
->status
);
6822 __get_user(host_tx
->constant
, &target_tx
->constant
);
6823 __get_user(host_tx
->precision
, &target_tx
->precision
);
6824 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6825 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6826 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6827 __get_user(host_tx
->tick
, &target_tx
->tick
);
6828 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6829 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6830 __get_user(host_tx
->shift
, &target_tx
->shift
);
6831 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6832 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6833 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6834 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6835 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6836 __get_user(host_tx
->tai
, &target_tx
->tai
);
6838 unlock_user_struct(target_tx
, target_addr
, 0);
6842 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6843 struct timex
*host_tx
)
6845 struct target_timex
*target_tx
;
6847 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6848 return -TARGET_EFAULT
;
6851 __put_user(host_tx
->modes
, &target_tx
->modes
);
6852 __put_user(host_tx
->offset
, &target_tx
->offset
);
6853 __put_user(host_tx
->freq
, &target_tx
->freq
);
6854 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6855 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6856 __put_user(host_tx
->status
, &target_tx
->status
);
6857 __put_user(host_tx
->constant
, &target_tx
->constant
);
6858 __put_user(host_tx
->precision
, &target_tx
->precision
);
6859 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6860 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6861 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6862 __put_user(host_tx
->tick
, &target_tx
->tick
);
6863 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6864 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6865 __put_user(host_tx
->shift
, &target_tx
->shift
);
6866 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6867 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6868 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6869 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6870 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6871 __put_user(host_tx
->tai
, &target_tx
->tai
);
6873 unlock_user_struct(target_tx
, target_addr
, 1);
6878 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6879 abi_ulong target_addr
)
6881 struct target_sigevent
*target_sevp
;
6883 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6884 return -TARGET_EFAULT
;
6887 /* This union is awkward on 64 bit systems because it has a 32 bit
6888 * integer and a pointer in it; we follow the conversion approach
6889 * used for handling sigval types in signal.c so the guest should get
6890 * the correct value back even if we did a 64 bit byteswap and it's
6891 * using the 32 bit integer.
6893 host_sevp
->sigev_value
.sival_ptr
=
6894 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6895 host_sevp
->sigev_signo
=
6896 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6897 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6898 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6900 unlock_user_struct(target_sevp
, target_addr
, 1);
6904 #if defined(TARGET_NR_mlockall)
6905 static inline int target_to_host_mlockall_arg(int arg
)
6909 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6910 result
|= MCL_CURRENT
;
6912 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6913 result
|= MCL_FUTURE
;
6919 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6920 abi_ulong target_addr
,
6921 struct stat
*host_st
)
6923 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6924 if (((CPUARMState
*)cpu_env
)->eabi
) {
6925 struct target_eabi_stat64
*target_st
;
6927 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6928 return -TARGET_EFAULT
;
6929 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6930 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6931 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6932 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6933 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6935 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6936 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6937 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6938 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6939 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6940 __put_user(host_st
->st_size
, &target_st
->st_size
);
6941 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6942 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6943 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6944 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6945 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6946 unlock_user_struct(target_st
, target_addr
, 1);
6950 #if defined(TARGET_HAS_STRUCT_STAT64)
6951 struct target_stat64
*target_st
;
6953 struct target_stat
*target_st
;
6956 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6957 return -TARGET_EFAULT
;
6958 memset(target_st
, 0, sizeof(*target_st
));
6959 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6960 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6961 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6962 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6964 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6965 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6966 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6967 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6968 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6969 /* XXX: better use of kernel struct */
6970 __put_user(host_st
->st_size
, &target_st
->st_size
);
6971 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6972 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6973 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6974 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6975 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6976 unlock_user_struct(target_st
, target_addr
, 1);
6982 /* ??? Using host futex calls even when target atomic operations
6983 are not really atomic probably breaks things. However implementing
6984 futexes locally would make futexes shared between multiple processes
6985 tricky. However they're probably useless because guest atomic
6986 operations won't work either. */
6987 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6988 target_ulong uaddr2
, int val3
)
6990 struct timespec ts
, *pts
;
6993 /* ??? We assume FUTEX_* constants are the same on both host
6995 #ifdef FUTEX_CMD_MASK
6996 base_op
= op
& FUTEX_CMD_MASK
;
7002 case FUTEX_WAIT_BITSET
:
7005 target_to_host_timespec(pts
, timeout
);
7009 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
7012 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7014 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7016 case FUTEX_CMP_REQUEUE
:
7018 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7019 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7020 But the prototype takes a `struct timespec *'; insert casts
7021 to satisfy the compiler. We do not need to tswap TIMEOUT
7022 since it's not compared to guest memory. */
7023 pts
= (struct timespec
*)(uintptr_t) timeout
;
7024 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
7026 (base_op
== FUTEX_CMP_REQUEUE
7030 return -TARGET_ENOSYS
;
7033 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7034 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7035 abi_long handle
, abi_long mount_id
,
7038 struct file_handle
*target_fh
;
7039 struct file_handle
*fh
;
7043 unsigned int size
, total_size
;
7045 if (get_user_s32(size
, handle
)) {
7046 return -TARGET_EFAULT
;
7049 name
= lock_user_string(pathname
);
7051 return -TARGET_EFAULT
;
7054 total_size
= sizeof(struct file_handle
) + size
;
7055 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7057 unlock_user(name
, pathname
, 0);
7058 return -TARGET_EFAULT
;
7061 fh
= g_malloc0(total_size
);
7062 fh
->handle_bytes
= size
;
7064 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7065 unlock_user(name
, pathname
, 0);
7067 /* man name_to_handle_at(2):
7068 * Other than the use of the handle_bytes field, the caller should treat
7069 * the file_handle structure as an opaque data type
7072 memcpy(target_fh
, fh
, total_size
);
7073 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7074 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7076 unlock_user(target_fh
, handle
, total_size
);
7078 if (put_user_s32(mid
, mount_id
)) {
7079 return -TARGET_EFAULT
;
7087 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7088 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7091 struct file_handle
*target_fh
;
7092 struct file_handle
*fh
;
7093 unsigned int size
, total_size
;
7096 if (get_user_s32(size
, handle
)) {
7097 return -TARGET_EFAULT
;
7100 total_size
= sizeof(struct file_handle
) + size
;
7101 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7103 return -TARGET_EFAULT
;
7106 fh
= g_memdup(target_fh
, total_size
);
7107 fh
->handle_bytes
= size
;
7108 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7110 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7111 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7115 unlock_user(target_fh
, handle
, total_size
);
7121 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7123 /* signalfd siginfo conversion */
7126 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7127 const struct signalfd_siginfo
*info
)
7129 int sig
= host_to_target_signal(info
->ssi_signo
);
7131 /* linux/signalfd.h defines a ssi_addr_lsb
7132 * not defined in sys/signalfd.h but used by some kernels
7135 #ifdef BUS_MCEERR_AO
7136 if (tinfo
->ssi_signo
== SIGBUS
&&
7137 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7138 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7139 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7140 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7141 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7145 tinfo
->ssi_signo
= tswap32(sig
);
7146 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7147 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7148 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7149 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7150 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7151 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7152 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7153 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7154 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7155 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7156 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7157 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7158 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7159 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7160 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7163 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7167 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7168 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7174 static TargetFdTrans target_signalfd_trans
= {
7175 .host_to_target_data
= host_to_target_data_signalfd
,
7178 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7181 target_sigset_t
*target_mask
;
7185 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7186 return -TARGET_EINVAL
;
7188 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7189 return -TARGET_EFAULT
;
7192 target_to_host_sigset(&host_mask
, target_mask
);
7194 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7196 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7198 fd_trans_register(ret
, &target_signalfd_trans
);
7201 unlock_user_struct(target_mask
, mask
, 0);
7207 /* Map host to target signal numbers for the wait family of syscalls.
7208 Assume all other status bits are the same. */
7209 int host_to_target_waitstatus(int status
)
7211 if (WIFSIGNALED(status
)) {
7212 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7214 if (WIFSTOPPED(status
)) {
7215 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7221 static int open_self_cmdline(void *cpu_env
, int fd
)
7224 bool word_skipped
= false;
7226 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
7236 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
7239 fd_orig
= close(fd_orig
);
7242 } else if (nb_read
== 0) {
7246 if (!word_skipped
) {
7247 /* Skip the first string, which is the path to qemu-*-static
7248 instead of the actual command. */
7249 cp_buf
= memchr(buf
, 0, nb_read
);
7251 /* Null byte found, skip one string */
7253 nb_read
-= cp_buf
- buf
;
7254 word_skipped
= true;
7259 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
7268 return close(fd_orig
);
7271 static int open_self_maps(void *cpu_env
, int fd
)
7273 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7274 TaskState
*ts
= cpu
->opaque
;
7280 fp
= fopen("/proc/self/maps", "r");
7285 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7286 int fields
, dev_maj
, dev_min
, inode
;
7287 uint64_t min
, max
, offset
;
7288 char flag_r
, flag_w
, flag_x
, flag_p
;
7289 char path
[512] = "";
7290 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7291 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7292 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7294 if ((fields
< 10) || (fields
> 11)) {
7297 if (h2g_valid(min
)) {
7298 int flags
= page_get_flags(h2g(min
));
7299 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
7300 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7303 if (h2g(min
) == ts
->info
->stack_limit
) {
7304 pstrcpy(path
, sizeof(path
), " [stack]");
7306 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7307 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7308 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7309 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7310 path
[0] ? " " : "", path
);
7320 static int open_self_stat(void *cpu_env
, int fd
)
7322 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7323 TaskState
*ts
= cpu
->opaque
;
7324 abi_ulong start_stack
= ts
->info
->start_stack
;
7327 for (i
= 0; i
< 44; i
++) {
7335 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7336 } else if (i
== 1) {
7338 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7339 } else if (i
== 27) {
7342 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7344 /* for the rest, there is MasterCard */
7345 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7349 if (write(fd
, buf
, len
) != len
) {
7357 static int open_self_auxv(void *cpu_env
, int fd
)
7359 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7360 TaskState
*ts
= cpu
->opaque
;
7361 abi_ulong auxv
= ts
->info
->saved_auxv
;
7362 abi_ulong len
= ts
->info
->auxv_len
;
7366 * Auxiliary vector is stored in target process stack.
7367 * read in whole auxv vector and copy it to file
7369 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7373 r
= write(fd
, ptr
, len
);
7380 lseek(fd
, 0, SEEK_SET
);
7381 unlock_user(ptr
, auxv
, len
);
7387 static int is_proc_myself(const char *filename
, const char *entry
)
7389 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7390 filename
+= strlen("/proc/");
7391 if (!strncmp(filename
, "self/", strlen("self/"))) {
7392 filename
+= strlen("self/");
7393 } else if (*filename
>= '1' && *filename
<= '9') {
7395 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7396 if (!strncmp(filename
, myself
, strlen(myself
))) {
7397 filename
+= strlen(myself
);
7404 if (!strcmp(filename
, entry
)) {
7411 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7412 static int is_proc(const char *filename
, const char *entry
)
7414 return strcmp(filename
, entry
) == 0;
7417 static int open_net_route(void *cpu_env
, int fd
)
7424 fp
= fopen("/proc/net/route", "r");
7431 read
= getline(&line
, &len
, fp
);
7432 dprintf(fd
, "%s", line
);
7436 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7438 uint32_t dest
, gw
, mask
;
7439 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7440 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7441 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7442 &mask
, &mtu
, &window
, &irtt
);
7443 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7444 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7445 metric
, tswap32(mask
), mtu
, window
, irtt
);
7455 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7458 const char *filename
;
7459 int (*fill
)(void *cpu_env
, int fd
);
7460 int (*cmp
)(const char *s1
, const char *s2
);
7462 const struct fake_open
*fake_open
;
7463 static const struct fake_open fakes
[] = {
7464 { "maps", open_self_maps
, is_proc_myself
},
7465 { "stat", open_self_stat
, is_proc_myself
},
7466 { "auxv", open_self_auxv
, is_proc_myself
},
7467 { "cmdline", open_self_cmdline
, is_proc_myself
},
7468 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7469 { "/proc/net/route", open_net_route
, is_proc
},
7471 { NULL
, NULL
, NULL
}
7474 if (is_proc_myself(pathname
, "exe")) {
7475 int execfd
= qemu_getauxval(AT_EXECFD
);
7476 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7479 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7480 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7485 if (fake_open
->filename
) {
7487 char filename
[PATH_MAX
];
7490 /* create temporary file to map stat to */
7491 tmpdir
= getenv("TMPDIR");
7494 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7495 fd
= mkstemp(filename
);
7501 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7507 lseek(fd
, 0, SEEK_SET
);
7512 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7515 #define TIMER_MAGIC 0x0caf0000
7516 #define TIMER_MAGIC_MASK 0xffff0000
7518 /* Convert QEMU provided timer ID back to internal 16bit index format */
7519 static target_timer_t
get_timer_id(abi_long arg
)
7521 target_timer_t timerid
= arg
;
7523 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7524 return -TARGET_EINVAL
;
7529 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7530 return -TARGET_EINVAL
;
7536 /* do_syscall() should always have a single exit point at the end so
7537 that actions, such as logging of syscall results, can be performed.
7538 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7539 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7540 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7541 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7544 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7550 #if defined(DEBUG_ERESTARTSYS)
7551 /* Debug-only code for exercising the syscall-restart code paths
7552 * in the per-architecture cpu main loops: restart every syscall
7553 * the guest makes once before letting it through.
7560 return -TARGET_ERESTARTSYS
;
7566 gemu_log("syscall %d", num
);
7568 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7570 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7573 case TARGET_NR_exit
:
7574 /* In old applications this may be used to implement _exit(2).
7575 However in threaded applictions it is used for thread termination,
7576 and _exit_group is used for application termination.
7577 Do thread termination if we have more then one thread. */
7579 if (block_signals()) {
7580 ret
= -TARGET_ERESTARTSYS
;
7586 if (CPU_NEXT(first_cpu
)) {
7589 /* Remove the CPU from the list. */
7590 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7595 if (ts
->child_tidptr
) {
7596 put_user_u32(0, ts
->child_tidptr
);
7597 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7601 object_unref(OBJECT(cpu
));
7603 rcu_unregister_thread();
7611 gdb_exit(cpu_env
, arg1
);
7613 ret
= 0; /* avoid warning */
7615 case TARGET_NR_read
:
7619 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7621 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7623 fd_trans_host_to_target_data(arg1
)) {
7624 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7626 unlock_user(p
, arg2
, ret
);
7629 case TARGET_NR_write
:
7630 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7632 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7633 unlock_user(p
, arg2
, 0);
7635 #ifdef TARGET_NR_open
7636 case TARGET_NR_open
:
7637 if (!(p
= lock_user_string(arg1
)))
7639 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7640 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7642 fd_trans_unregister(ret
);
7643 unlock_user(p
, arg1
, 0);
7646 case TARGET_NR_openat
:
7647 if (!(p
= lock_user_string(arg2
)))
7649 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7650 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7652 fd_trans_unregister(ret
);
7653 unlock_user(p
, arg2
, 0);
7655 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7656 case TARGET_NR_name_to_handle_at
:
7657 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7660 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7661 case TARGET_NR_open_by_handle_at
:
7662 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7663 fd_trans_unregister(ret
);
7666 case TARGET_NR_close
:
7667 fd_trans_unregister(arg1
);
7668 ret
= get_errno(close(arg1
));
7673 #ifdef TARGET_NR_fork
7674 case TARGET_NR_fork
:
7675 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
7678 #ifdef TARGET_NR_waitpid
7679 case TARGET_NR_waitpid
:
7682 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7683 if (!is_error(ret
) && arg2
&& ret
7684 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7689 #ifdef TARGET_NR_waitid
7690 case TARGET_NR_waitid
:
7694 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7695 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7696 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7698 host_to_target_siginfo(p
, &info
);
7699 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7704 #ifdef TARGET_NR_creat /* not on alpha */
7705 case TARGET_NR_creat
:
7706 if (!(p
= lock_user_string(arg1
)))
7708 ret
= get_errno(creat(p
, arg2
));
7709 fd_trans_unregister(ret
);
7710 unlock_user(p
, arg1
, 0);
7713 #ifdef TARGET_NR_link
7714 case TARGET_NR_link
:
7717 p
= lock_user_string(arg1
);
7718 p2
= lock_user_string(arg2
);
7720 ret
= -TARGET_EFAULT
;
7722 ret
= get_errno(link(p
, p2
));
7723 unlock_user(p2
, arg2
, 0);
7724 unlock_user(p
, arg1
, 0);
7728 #if defined(TARGET_NR_linkat)
7729 case TARGET_NR_linkat
:
7734 p
= lock_user_string(arg2
);
7735 p2
= lock_user_string(arg4
);
7737 ret
= -TARGET_EFAULT
;
7739 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7740 unlock_user(p
, arg2
, 0);
7741 unlock_user(p2
, arg4
, 0);
7745 #ifdef TARGET_NR_unlink
7746 case TARGET_NR_unlink
:
7747 if (!(p
= lock_user_string(arg1
)))
7749 ret
= get_errno(unlink(p
));
7750 unlock_user(p
, arg1
, 0);
7753 #if defined(TARGET_NR_unlinkat)
7754 case TARGET_NR_unlinkat
:
7755 if (!(p
= lock_user_string(arg2
)))
7757 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7758 unlock_user(p
, arg2
, 0);
7761 case TARGET_NR_execve
:
7763 char **argp
, **envp
;
7766 abi_ulong guest_argp
;
7767 abi_ulong guest_envp
;
7774 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7775 if (get_user_ual(addr
, gp
))
7783 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7784 if (get_user_ual(addr
, gp
))
7791 argp
= alloca((argc
+ 1) * sizeof(void *));
7792 envp
= alloca((envc
+ 1) * sizeof(void *));
7794 for (gp
= guest_argp
, q
= argp
; gp
;
7795 gp
+= sizeof(abi_ulong
), q
++) {
7796 if (get_user_ual(addr
, gp
))
7800 if (!(*q
= lock_user_string(addr
)))
7802 total_size
+= strlen(*q
) + 1;
7806 for (gp
= guest_envp
, q
= envp
; gp
;
7807 gp
+= sizeof(abi_ulong
), q
++) {
7808 if (get_user_ual(addr
, gp
))
7812 if (!(*q
= lock_user_string(addr
)))
7814 total_size
+= strlen(*q
) + 1;
7818 if (!(p
= lock_user_string(arg1
)))
7820 /* Although execve() is not an interruptible syscall it is
7821 * a special case where we must use the safe_syscall wrapper:
7822 * if we allow a signal to happen before we make the host
7823 * syscall then we will 'lose' it, because at the point of
7824 * execve the process leaves QEMU's control. So we use the
7825 * safe syscall wrapper to ensure that we either take the
7826 * signal as a guest signal, or else it does not happen
7827 * before the execve completes and makes it the other
7828 * program's problem.
7830 ret
= get_errno(safe_execve(p
, argp
, envp
));
7831 unlock_user(p
, arg1
, 0);
7836 ret
= -TARGET_EFAULT
;
7839 for (gp
= guest_argp
, q
= argp
; *q
;
7840 gp
+= sizeof(abi_ulong
), q
++) {
7841 if (get_user_ual(addr
, gp
)
7844 unlock_user(*q
, addr
, 0);
7846 for (gp
= guest_envp
, q
= envp
; *q
;
7847 gp
+= sizeof(abi_ulong
), q
++) {
7848 if (get_user_ual(addr
, gp
)
7851 unlock_user(*q
, addr
, 0);
7855 case TARGET_NR_chdir
:
7856 if (!(p
= lock_user_string(arg1
)))
7858 ret
= get_errno(chdir(p
));
7859 unlock_user(p
, arg1
, 0);
7861 #ifdef TARGET_NR_time
7862 case TARGET_NR_time
:
7865 ret
= get_errno(time(&host_time
));
7868 && put_user_sal(host_time
, arg1
))
7873 #ifdef TARGET_NR_mknod
7874 case TARGET_NR_mknod
:
7875 if (!(p
= lock_user_string(arg1
)))
7877 ret
= get_errno(mknod(p
, arg2
, arg3
));
7878 unlock_user(p
, arg1
, 0);
7881 #if defined(TARGET_NR_mknodat)
7882 case TARGET_NR_mknodat
:
7883 if (!(p
= lock_user_string(arg2
)))
7885 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7886 unlock_user(p
, arg2
, 0);
7889 #ifdef TARGET_NR_chmod
7890 case TARGET_NR_chmod
:
7891 if (!(p
= lock_user_string(arg1
)))
7893 ret
= get_errno(chmod(p
, arg2
));
7894 unlock_user(p
, arg1
, 0);
7897 #ifdef TARGET_NR_break
7898 case TARGET_NR_break
:
7901 #ifdef TARGET_NR_oldstat
7902 case TARGET_NR_oldstat
:
7905 case TARGET_NR_lseek
:
7906 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
7908 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7909 /* Alpha specific */
7910 case TARGET_NR_getxpid
:
7911 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7912 ret
= get_errno(getpid());
7915 #ifdef TARGET_NR_getpid
7916 case TARGET_NR_getpid
:
7917 ret
= get_errno(getpid());
7920 case TARGET_NR_mount
:
7922 /* need to look at the data field */
7926 p
= lock_user_string(arg1
);
7934 p2
= lock_user_string(arg2
);
7937 unlock_user(p
, arg1
, 0);
7943 p3
= lock_user_string(arg3
);
7946 unlock_user(p
, arg1
, 0);
7948 unlock_user(p2
, arg2
, 0);
7955 /* FIXME - arg5 should be locked, but it isn't clear how to
7956 * do that since it's not guaranteed to be a NULL-terminated
7960 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7962 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7964 ret
= get_errno(ret
);
7967 unlock_user(p
, arg1
, 0);
7969 unlock_user(p2
, arg2
, 0);
7971 unlock_user(p3
, arg3
, 0);
7975 #ifdef TARGET_NR_umount
7976 case TARGET_NR_umount
:
7977 if (!(p
= lock_user_string(arg1
)))
7979 ret
= get_errno(umount(p
));
7980 unlock_user(p
, arg1
, 0);
7983 #ifdef TARGET_NR_stime /* not on alpha */
7984 case TARGET_NR_stime
:
7987 if (get_user_sal(host_time
, arg1
))
7989 ret
= get_errno(stime(&host_time
));
7993 case TARGET_NR_ptrace
:
7995 #ifdef TARGET_NR_alarm /* not on alpha */
7996 case TARGET_NR_alarm
:
8000 #ifdef TARGET_NR_oldfstat
8001 case TARGET_NR_oldfstat
:
8004 #ifdef TARGET_NR_pause /* not on alpha */
8005 case TARGET_NR_pause
:
8006 if (!block_signals()) {
8007 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8009 ret
= -TARGET_EINTR
;
8012 #ifdef TARGET_NR_utime
8013 case TARGET_NR_utime
:
8015 struct utimbuf tbuf
, *host_tbuf
;
8016 struct target_utimbuf
*target_tbuf
;
8018 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8020 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8021 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8022 unlock_user_struct(target_tbuf
, arg2
, 0);
8027 if (!(p
= lock_user_string(arg1
)))
8029 ret
= get_errno(utime(p
, host_tbuf
));
8030 unlock_user(p
, arg1
, 0);
8034 #ifdef TARGET_NR_utimes
8035 case TARGET_NR_utimes
:
8037 struct timeval
*tvp
, tv
[2];
8039 if (copy_from_user_timeval(&tv
[0], arg2
)
8040 || copy_from_user_timeval(&tv
[1],
8041 arg2
+ sizeof(struct target_timeval
)))
8047 if (!(p
= lock_user_string(arg1
)))
8049 ret
= get_errno(utimes(p
, tvp
));
8050 unlock_user(p
, arg1
, 0);
8054 #if defined(TARGET_NR_futimesat)
8055 case TARGET_NR_futimesat
:
8057 struct timeval
*tvp
, tv
[2];
8059 if (copy_from_user_timeval(&tv
[0], arg3
)
8060 || copy_from_user_timeval(&tv
[1],
8061 arg3
+ sizeof(struct target_timeval
)))
8067 if (!(p
= lock_user_string(arg2
)))
8069 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8070 unlock_user(p
, arg2
, 0);
8074 #ifdef TARGET_NR_stty
8075 case TARGET_NR_stty
:
8078 #ifdef TARGET_NR_gtty
8079 case TARGET_NR_gtty
:
8082 #ifdef TARGET_NR_access
8083 case TARGET_NR_access
:
8084 if (!(p
= lock_user_string(arg1
)))
8086 ret
= get_errno(access(path(p
), arg2
));
8087 unlock_user(p
, arg1
, 0);
8090 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8091 case TARGET_NR_faccessat
:
8092 if (!(p
= lock_user_string(arg2
)))
8094 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8095 unlock_user(p
, arg2
, 0);
8098 #ifdef TARGET_NR_nice /* not on alpha */
8099 case TARGET_NR_nice
:
8100 ret
= get_errno(nice(arg1
));
8103 #ifdef TARGET_NR_ftime
8104 case TARGET_NR_ftime
:
8107 case TARGET_NR_sync
:
8111 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8112 case TARGET_NR_syncfs
:
8113 ret
= get_errno(syncfs(arg1
));
8116 case TARGET_NR_kill
:
8117 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8119 #ifdef TARGET_NR_rename
8120 case TARGET_NR_rename
:
8123 p
= lock_user_string(arg1
);
8124 p2
= lock_user_string(arg2
);
8126 ret
= -TARGET_EFAULT
;
8128 ret
= get_errno(rename(p
, p2
));
8129 unlock_user(p2
, arg2
, 0);
8130 unlock_user(p
, arg1
, 0);
8134 #if defined(TARGET_NR_renameat)
8135 case TARGET_NR_renameat
:
8138 p
= lock_user_string(arg2
);
8139 p2
= lock_user_string(arg4
);
8141 ret
= -TARGET_EFAULT
;
8143 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8144 unlock_user(p2
, arg4
, 0);
8145 unlock_user(p
, arg2
, 0);
8149 #ifdef TARGET_NR_mkdir
8150 case TARGET_NR_mkdir
:
8151 if (!(p
= lock_user_string(arg1
)))
8153 ret
= get_errno(mkdir(p
, arg2
));
8154 unlock_user(p
, arg1
, 0);
8157 #if defined(TARGET_NR_mkdirat)
8158 case TARGET_NR_mkdirat
:
8159 if (!(p
= lock_user_string(arg2
)))
8161 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8162 unlock_user(p
, arg2
, 0);
8165 #ifdef TARGET_NR_rmdir
8166 case TARGET_NR_rmdir
:
8167 if (!(p
= lock_user_string(arg1
)))
8169 ret
= get_errno(rmdir(p
));
8170 unlock_user(p
, arg1
, 0);
8174 ret
= get_errno(dup(arg1
));
8176 fd_trans_dup(arg1
, ret
);
8179 #ifdef TARGET_NR_pipe
8180 case TARGET_NR_pipe
:
8181 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
8184 #ifdef TARGET_NR_pipe2
8185 case TARGET_NR_pipe2
:
8186 ret
= do_pipe(cpu_env
, arg1
,
8187 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8190 case TARGET_NR_times
:
8192 struct target_tms
*tmsp
;
8194 ret
= get_errno(times(&tms
));
8196 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8199 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8200 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8201 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8202 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8205 ret
= host_to_target_clock_t(ret
);
8208 #ifdef TARGET_NR_prof
8209 case TARGET_NR_prof
:
8212 #ifdef TARGET_NR_signal
8213 case TARGET_NR_signal
:
8216 case TARGET_NR_acct
:
8218 ret
= get_errno(acct(NULL
));
8220 if (!(p
= lock_user_string(arg1
)))
8222 ret
= get_errno(acct(path(p
)));
8223 unlock_user(p
, arg1
, 0);
8226 #ifdef TARGET_NR_umount2
8227 case TARGET_NR_umount2
:
8228 if (!(p
= lock_user_string(arg1
)))
8230 ret
= get_errno(umount2(p
, arg2
));
8231 unlock_user(p
, arg1
, 0);
8234 #ifdef TARGET_NR_lock
8235 case TARGET_NR_lock
:
8238 case TARGET_NR_ioctl
:
8239 ret
= do_ioctl(arg1
, arg2
, arg3
);
8241 case TARGET_NR_fcntl
:
8242 ret
= do_fcntl(arg1
, arg2
, arg3
);
8244 #ifdef TARGET_NR_mpx
8248 case TARGET_NR_setpgid
:
8249 ret
= get_errno(setpgid(arg1
, arg2
));
8251 #ifdef TARGET_NR_ulimit
8252 case TARGET_NR_ulimit
:
8255 #ifdef TARGET_NR_oldolduname
8256 case TARGET_NR_oldolduname
:
8259 case TARGET_NR_umask
:
8260 ret
= get_errno(umask(arg1
));
8262 case TARGET_NR_chroot
:
8263 if (!(p
= lock_user_string(arg1
)))
8265 ret
= get_errno(chroot(p
));
8266 unlock_user(p
, arg1
, 0);
8268 #ifdef TARGET_NR_ustat
8269 case TARGET_NR_ustat
:
8272 #ifdef TARGET_NR_dup2
8273 case TARGET_NR_dup2
:
8274 ret
= get_errno(dup2(arg1
, arg2
));
8276 fd_trans_dup(arg1
, arg2
);
8280 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8281 case TARGET_NR_dup3
:
8282 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
8284 fd_trans_dup(arg1
, arg2
);
8288 #ifdef TARGET_NR_getppid /* not on alpha */
8289 case TARGET_NR_getppid
:
8290 ret
= get_errno(getppid());
8293 #ifdef TARGET_NR_getpgrp
8294 case TARGET_NR_getpgrp
:
8295 ret
= get_errno(getpgrp());
8298 case TARGET_NR_setsid
:
8299 ret
= get_errno(setsid());
8301 #ifdef TARGET_NR_sigaction
8302 case TARGET_NR_sigaction
:
8304 #if defined(TARGET_ALPHA)
8305 struct target_sigaction act
, oact
, *pact
= 0;
8306 struct target_old_sigaction
*old_act
;
8308 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8310 act
._sa_handler
= old_act
->_sa_handler
;
8311 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8312 act
.sa_flags
= old_act
->sa_flags
;
8313 act
.sa_restorer
= 0;
8314 unlock_user_struct(old_act
, arg2
, 0);
8317 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8318 if (!is_error(ret
) && arg3
) {
8319 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8321 old_act
->_sa_handler
= oact
._sa_handler
;
8322 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8323 old_act
->sa_flags
= oact
.sa_flags
;
8324 unlock_user_struct(old_act
, arg3
, 1);
8326 #elif defined(TARGET_MIPS)
8327 struct target_sigaction act
, oact
, *pact
, *old_act
;
8330 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8332 act
._sa_handler
= old_act
->_sa_handler
;
8333 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8334 act
.sa_flags
= old_act
->sa_flags
;
8335 unlock_user_struct(old_act
, arg2
, 0);
8341 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8343 if (!is_error(ret
) && arg3
) {
8344 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8346 old_act
->_sa_handler
= oact
._sa_handler
;
8347 old_act
->sa_flags
= oact
.sa_flags
;
8348 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8349 old_act
->sa_mask
.sig
[1] = 0;
8350 old_act
->sa_mask
.sig
[2] = 0;
8351 old_act
->sa_mask
.sig
[3] = 0;
8352 unlock_user_struct(old_act
, arg3
, 1);
8355 struct target_old_sigaction
*old_act
;
8356 struct target_sigaction act
, oact
, *pact
;
8358 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8360 act
._sa_handler
= old_act
->_sa_handler
;
8361 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8362 act
.sa_flags
= old_act
->sa_flags
;
8363 act
.sa_restorer
= old_act
->sa_restorer
;
8364 unlock_user_struct(old_act
, arg2
, 0);
8369 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8370 if (!is_error(ret
) && arg3
) {
8371 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8373 old_act
->_sa_handler
= oact
._sa_handler
;
8374 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8375 old_act
->sa_flags
= oact
.sa_flags
;
8376 old_act
->sa_restorer
= oact
.sa_restorer
;
8377 unlock_user_struct(old_act
, arg3
, 1);
8383 case TARGET_NR_rt_sigaction
:
8385 #if defined(TARGET_ALPHA)
8386 struct target_sigaction act
, oact
, *pact
= 0;
8387 struct target_rt_sigaction
*rt_act
;
8389 if (arg4
!= sizeof(target_sigset_t
)) {
8390 ret
= -TARGET_EINVAL
;
8394 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8396 act
._sa_handler
= rt_act
->_sa_handler
;
8397 act
.sa_mask
= rt_act
->sa_mask
;
8398 act
.sa_flags
= rt_act
->sa_flags
;
8399 act
.sa_restorer
= arg5
;
8400 unlock_user_struct(rt_act
, arg2
, 0);
8403 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8404 if (!is_error(ret
) && arg3
) {
8405 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8407 rt_act
->_sa_handler
= oact
._sa_handler
;
8408 rt_act
->sa_mask
= oact
.sa_mask
;
8409 rt_act
->sa_flags
= oact
.sa_flags
;
8410 unlock_user_struct(rt_act
, arg3
, 1);
8413 struct target_sigaction
*act
;
8414 struct target_sigaction
*oact
;
8416 if (arg4
!= sizeof(target_sigset_t
)) {
8417 ret
= -TARGET_EINVAL
;
8421 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
8426 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8427 ret
= -TARGET_EFAULT
;
8428 goto rt_sigaction_fail
;
8432 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8435 unlock_user_struct(act
, arg2
, 0);
8437 unlock_user_struct(oact
, arg3
, 1);
8441 #ifdef TARGET_NR_sgetmask /* not on alpha */
8442 case TARGET_NR_sgetmask
:
8445 abi_ulong target_set
;
8446 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8448 host_to_target_old_sigset(&target_set
, &cur_set
);
8454 #ifdef TARGET_NR_ssetmask /* not on alpha */
8455 case TARGET_NR_ssetmask
:
8457 sigset_t set
, oset
, cur_set
;
8458 abi_ulong target_set
= arg1
;
8459 /* We only have one word of the new mask so we must read
8460 * the rest of it with do_sigprocmask() and OR in this word.
8461 * We are guaranteed that a do_sigprocmask() that only queries
8462 * the signal mask will not fail.
8464 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8466 target_to_host_old_sigset(&set
, &target_set
);
8467 sigorset(&set
, &set
, &cur_set
);
8468 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8470 host_to_target_old_sigset(&target_set
, &oset
);
8476 #ifdef TARGET_NR_sigprocmask
8477 case TARGET_NR_sigprocmask
:
8479 #if defined(TARGET_ALPHA)
8480 sigset_t set
, oldset
;
8485 case TARGET_SIG_BLOCK
:
8488 case TARGET_SIG_UNBLOCK
:
8491 case TARGET_SIG_SETMASK
:
8495 ret
= -TARGET_EINVAL
;
8499 target_to_host_old_sigset(&set
, &mask
);
8501 ret
= do_sigprocmask(how
, &set
, &oldset
);
8502 if (!is_error(ret
)) {
8503 host_to_target_old_sigset(&mask
, &oldset
);
8505 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8508 sigset_t set
, oldset
, *set_ptr
;
8513 case TARGET_SIG_BLOCK
:
8516 case TARGET_SIG_UNBLOCK
:
8519 case TARGET_SIG_SETMASK
:
8523 ret
= -TARGET_EINVAL
;
8526 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8528 target_to_host_old_sigset(&set
, p
);
8529 unlock_user(p
, arg2
, 0);
8535 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8536 if (!is_error(ret
) && arg3
) {
8537 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8539 host_to_target_old_sigset(p
, &oldset
);
8540 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8546 case TARGET_NR_rt_sigprocmask
:
8549 sigset_t set
, oldset
, *set_ptr
;
8551 if (arg4
!= sizeof(target_sigset_t
)) {
8552 ret
= -TARGET_EINVAL
;
8558 case TARGET_SIG_BLOCK
:
8561 case TARGET_SIG_UNBLOCK
:
8564 case TARGET_SIG_SETMASK
:
8568 ret
= -TARGET_EINVAL
;
8571 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8573 target_to_host_sigset(&set
, p
);
8574 unlock_user(p
, arg2
, 0);
8580 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8581 if (!is_error(ret
) && arg3
) {
8582 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8584 host_to_target_sigset(p
, &oldset
);
8585 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8589 #ifdef TARGET_NR_sigpending
8590 case TARGET_NR_sigpending
:
8593 ret
= get_errno(sigpending(&set
));
8594 if (!is_error(ret
)) {
8595 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8597 host_to_target_old_sigset(p
, &set
);
8598 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8603 case TARGET_NR_rt_sigpending
:
8607 /* Yes, this check is >, not != like most. We follow the kernel's
8608 * logic and it does it like this because it implements
8609 * NR_sigpending through the same code path, and in that case
8610 * the old_sigset_t is smaller in size.
8612 if (arg2
> sizeof(target_sigset_t
)) {
8613 ret
= -TARGET_EINVAL
;
8617 ret
= get_errno(sigpending(&set
));
8618 if (!is_error(ret
)) {
8619 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8621 host_to_target_sigset(p
, &set
);
8622 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8626 #ifdef TARGET_NR_sigsuspend
8627 case TARGET_NR_sigsuspend
:
8629 TaskState
*ts
= cpu
->opaque
;
8630 #if defined(TARGET_ALPHA)
8631 abi_ulong mask
= arg1
;
8632 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8634 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8636 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8637 unlock_user(p
, arg1
, 0);
8639 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8641 if (ret
!= -TARGET_ERESTARTSYS
) {
8642 ts
->in_sigsuspend
= 1;
8647 case TARGET_NR_rt_sigsuspend
:
8649 TaskState
*ts
= cpu
->opaque
;
8651 if (arg2
!= sizeof(target_sigset_t
)) {
8652 ret
= -TARGET_EINVAL
;
8655 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8657 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8658 unlock_user(p
, arg1
, 0);
8659 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8661 if (ret
!= -TARGET_ERESTARTSYS
) {
8662 ts
->in_sigsuspend
= 1;
8666 case TARGET_NR_rt_sigtimedwait
:
8669 struct timespec uts
, *puts
;
8672 if (arg4
!= sizeof(target_sigset_t
)) {
8673 ret
= -TARGET_EINVAL
;
8677 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8679 target_to_host_sigset(&set
, p
);
8680 unlock_user(p
, arg1
, 0);
8683 target_to_host_timespec(puts
, arg3
);
8687 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8689 if (!is_error(ret
)) {
8691 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8696 host_to_target_siginfo(p
, &uinfo
);
8697 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8699 ret
= host_to_target_signal(ret
);
8703 case TARGET_NR_rt_sigqueueinfo
:
8707 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8711 target_to_host_siginfo(&uinfo
, p
);
8712 unlock_user(p
, arg1
, 0);
8713 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8716 #ifdef TARGET_NR_sigreturn
8717 case TARGET_NR_sigreturn
:
8718 if (block_signals()) {
8719 ret
= -TARGET_ERESTARTSYS
;
8721 ret
= do_sigreturn(cpu_env
);
8725 case TARGET_NR_rt_sigreturn
:
8726 if (block_signals()) {
8727 ret
= -TARGET_ERESTARTSYS
;
8729 ret
= do_rt_sigreturn(cpu_env
);
8732 case TARGET_NR_sethostname
:
8733 if (!(p
= lock_user_string(arg1
)))
8735 ret
= get_errno(sethostname(p
, arg2
));
8736 unlock_user(p
, arg1
, 0);
8738 case TARGET_NR_setrlimit
:
8740 int resource
= target_to_host_resource(arg1
);
8741 struct target_rlimit
*target_rlim
;
8743 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8745 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8746 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8747 unlock_user_struct(target_rlim
, arg2
, 0);
8748 ret
= get_errno(setrlimit(resource
, &rlim
));
8751 case TARGET_NR_getrlimit
:
8753 int resource
= target_to_host_resource(arg1
);
8754 struct target_rlimit
*target_rlim
;
8757 ret
= get_errno(getrlimit(resource
, &rlim
));
8758 if (!is_error(ret
)) {
8759 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8761 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8762 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8763 unlock_user_struct(target_rlim
, arg2
, 1);
8767 case TARGET_NR_getrusage
:
8769 struct rusage rusage
;
8770 ret
= get_errno(getrusage(arg1
, &rusage
));
8771 if (!is_error(ret
)) {
8772 ret
= host_to_target_rusage(arg2
, &rusage
);
8776 case TARGET_NR_gettimeofday
:
8779 ret
= get_errno(gettimeofday(&tv
, NULL
));
8780 if (!is_error(ret
)) {
8781 if (copy_to_user_timeval(arg1
, &tv
))
8786 case TARGET_NR_settimeofday
:
8788 struct timeval tv
, *ptv
= NULL
;
8789 struct timezone tz
, *ptz
= NULL
;
8792 if (copy_from_user_timeval(&tv
, arg1
)) {
8799 if (copy_from_user_timezone(&tz
, arg2
)) {
8805 ret
= get_errno(settimeofday(ptv
, ptz
));
8808 #if defined(TARGET_NR_select)
8809 case TARGET_NR_select
:
8810 #if defined(TARGET_WANT_NI_OLD_SELECT)
8811 /* some architectures used to have old_select here
8812 * but now ENOSYS it.
8814 ret
= -TARGET_ENOSYS
;
8815 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8816 ret
= do_old_select(arg1
);
8818 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8822 #ifdef TARGET_NR_pselect6
8823 case TARGET_NR_pselect6
:
8825 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8826 fd_set rfds
, wfds
, efds
;
8827 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8828 struct timespec ts
, *ts_ptr
;
8831 * The 6th arg is actually two args smashed together,
8832 * so we cannot use the C library.
8840 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8841 target_sigset_t
*target_sigset
;
8849 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8853 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8857 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8863 * This takes a timespec, and not a timeval, so we cannot
8864 * use the do_select() helper ...
8867 if (target_to_host_timespec(&ts
, ts_addr
)) {
8875 /* Extract the two packed args for the sigset */
8878 sig
.size
= SIGSET_T_SIZE
;
8880 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8884 arg_sigset
= tswapal(arg7
[0]);
8885 arg_sigsize
= tswapal(arg7
[1]);
8886 unlock_user(arg7
, arg6
, 0);
8890 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8891 /* Like the kernel, we enforce correct size sigsets */
8892 ret
= -TARGET_EINVAL
;
8895 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8896 sizeof(*target_sigset
), 1);
8897 if (!target_sigset
) {
8900 target_to_host_sigset(&set
, target_sigset
);
8901 unlock_user(target_sigset
, arg_sigset
, 0);
8909 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8912 if (!is_error(ret
)) {
8913 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8915 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8917 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8920 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8926 #ifdef TARGET_NR_symlink
8927 case TARGET_NR_symlink
:
8930 p
= lock_user_string(arg1
);
8931 p2
= lock_user_string(arg2
);
8933 ret
= -TARGET_EFAULT
;
8935 ret
= get_errno(symlink(p
, p2
));
8936 unlock_user(p2
, arg2
, 0);
8937 unlock_user(p
, arg1
, 0);
8941 #if defined(TARGET_NR_symlinkat)
8942 case TARGET_NR_symlinkat
:
8945 p
= lock_user_string(arg1
);
8946 p2
= lock_user_string(arg3
);
8948 ret
= -TARGET_EFAULT
;
8950 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8951 unlock_user(p2
, arg3
, 0);
8952 unlock_user(p
, arg1
, 0);
8956 #ifdef TARGET_NR_oldlstat
8957 case TARGET_NR_oldlstat
:
8960 #ifdef TARGET_NR_readlink
8961 case TARGET_NR_readlink
:
8964 p
= lock_user_string(arg1
);
8965 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8967 ret
= -TARGET_EFAULT
;
8969 /* Short circuit this for the magic exe check. */
8970 ret
= -TARGET_EINVAL
;
8971 } else if (is_proc_myself((const char *)p
, "exe")) {
8972 char real
[PATH_MAX
], *temp
;
8973 temp
= realpath(exec_path
, real
);
8974 /* Return value is # of bytes that we wrote to the buffer. */
8976 ret
= get_errno(-1);
8978 /* Don't worry about sign mismatch as earlier mapping
8979 * logic would have thrown a bad address error. */
8980 ret
= MIN(strlen(real
), arg3
);
8981 /* We cannot NUL terminate the string. */
8982 memcpy(p2
, real
, ret
);
8985 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8987 unlock_user(p2
, arg2
, ret
);
8988 unlock_user(p
, arg1
, 0);
8992 #if defined(TARGET_NR_readlinkat)
8993 case TARGET_NR_readlinkat
:
8996 p
= lock_user_string(arg2
);
8997 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8999 ret
= -TARGET_EFAULT
;
9000 } else if (is_proc_myself((const char *)p
, "exe")) {
9001 char real
[PATH_MAX
], *temp
;
9002 temp
= realpath(exec_path
, real
);
9003 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9004 snprintf((char *)p2
, arg4
, "%s", real
);
9006 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9008 unlock_user(p2
, arg3
, ret
);
9009 unlock_user(p
, arg2
, 0);
9013 #ifdef TARGET_NR_uselib
9014 case TARGET_NR_uselib
:
9017 #ifdef TARGET_NR_swapon
9018 case TARGET_NR_swapon
:
9019 if (!(p
= lock_user_string(arg1
)))
9021 ret
= get_errno(swapon(p
, arg2
));
9022 unlock_user(p
, arg1
, 0);
9025 case TARGET_NR_reboot
:
9026 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9027 /* arg4 must be ignored in all other cases */
9028 p
= lock_user_string(arg4
);
9032 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9033 unlock_user(p
, arg4
, 0);
9035 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9038 #ifdef TARGET_NR_readdir
9039 case TARGET_NR_readdir
:
9042 #ifdef TARGET_NR_mmap
9043 case TARGET_NR_mmap
:
9044 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9045 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9046 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9047 || defined(TARGET_S390X)
9050 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9051 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9059 unlock_user(v
, arg1
, 0);
9060 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9061 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9065 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9066 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9072 #ifdef TARGET_NR_mmap2
9073 case TARGET_NR_mmap2
:
9075 #define MMAP_SHIFT 12
9077 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9078 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9080 arg6
<< MMAP_SHIFT
));
9083 case TARGET_NR_munmap
:
9084 ret
= get_errno(target_munmap(arg1
, arg2
));
9086 case TARGET_NR_mprotect
:
9088 TaskState
*ts
= cpu
->opaque
;
9089 /* Special hack to detect libc making the stack executable. */
9090 if ((arg3
& PROT_GROWSDOWN
)
9091 && arg1
>= ts
->info
->stack_limit
9092 && arg1
<= ts
->info
->start_stack
) {
9093 arg3
&= ~PROT_GROWSDOWN
;
9094 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9095 arg1
= ts
->info
->stack_limit
;
9098 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
9100 #ifdef TARGET_NR_mremap
9101 case TARGET_NR_mremap
:
9102 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9105 /* ??? msync/mlock/munlock are broken for softmmu. */
9106 #ifdef TARGET_NR_msync
9107 case TARGET_NR_msync
:
9108 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
9111 #ifdef TARGET_NR_mlock
9112 case TARGET_NR_mlock
:
9113 ret
= get_errno(mlock(g2h(arg1
), arg2
));
9116 #ifdef TARGET_NR_munlock
9117 case TARGET_NR_munlock
:
9118 ret
= get_errno(munlock(g2h(arg1
), arg2
));
9121 #ifdef TARGET_NR_mlockall
9122 case TARGET_NR_mlockall
:
9123 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9126 #ifdef TARGET_NR_munlockall
9127 case TARGET_NR_munlockall
:
9128 ret
= get_errno(munlockall());
9131 case TARGET_NR_truncate
:
9132 if (!(p
= lock_user_string(arg1
)))
9134 ret
= get_errno(truncate(p
, arg2
));
9135 unlock_user(p
, arg1
, 0);
9137 case TARGET_NR_ftruncate
:
9138 ret
= get_errno(ftruncate(arg1
, arg2
));
9140 case TARGET_NR_fchmod
:
9141 ret
= get_errno(fchmod(arg1
, arg2
));
9143 #if defined(TARGET_NR_fchmodat)
9144 case TARGET_NR_fchmodat
:
9145 if (!(p
= lock_user_string(arg2
)))
9147 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9148 unlock_user(p
, arg2
, 0);
9151 case TARGET_NR_getpriority
:
9152 /* Note that negative values are valid for getpriority, so we must
9153 differentiate based on errno settings. */
9155 ret
= getpriority(arg1
, arg2
);
9156 if (ret
== -1 && errno
!= 0) {
9157 ret
= -host_to_target_errno(errno
);
9161 /* Return value is the unbiased priority. Signal no error. */
9162 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9164 /* Return value is a biased priority to avoid negative numbers. */
9168 case TARGET_NR_setpriority
:
9169 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
9171 #ifdef TARGET_NR_profil
9172 case TARGET_NR_profil
:
9175 case TARGET_NR_statfs
:
9176 if (!(p
= lock_user_string(arg1
)))
9178 ret
= get_errno(statfs(path(p
), &stfs
));
9179 unlock_user(p
, arg1
, 0);
9181 if (!is_error(ret
)) {
9182 struct target_statfs
*target_stfs
;
9184 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9186 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9187 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9188 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9189 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9190 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9191 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9192 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9193 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9194 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9195 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9196 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9197 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9198 unlock_user_struct(target_stfs
, arg2
, 1);
9201 case TARGET_NR_fstatfs
:
9202 ret
= get_errno(fstatfs(arg1
, &stfs
));
9203 goto convert_statfs
;
9204 #ifdef TARGET_NR_statfs64
9205 case TARGET_NR_statfs64
:
9206 if (!(p
= lock_user_string(arg1
)))
9208 ret
= get_errno(statfs(path(p
), &stfs
));
9209 unlock_user(p
, arg1
, 0);
9211 if (!is_error(ret
)) {
9212 struct target_statfs64
*target_stfs
;
9214 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9216 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9217 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9218 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9219 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9220 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9221 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9222 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9223 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9224 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9225 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9226 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9227 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9228 unlock_user_struct(target_stfs
, arg3
, 1);
9231 case TARGET_NR_fstatfs64
:
9232 ret
= get_errno(fstatfs(arg1
, &stfs
));
9233 goto convert_statfs64
;
9235 #ifdef TARGET_NR_ioperm
9236 case TARGET_NR_ioperm
:
9239 #ifdef TARGET_NR_socketcall
9240 case TARGET_NR_socketcall
:
9241 ret
= do_socketcall(arg1
, arg2
);
9244 #ifdef TARGET_NR_accept
9245 case TARGET_NR_accept
:
9246 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9249 #ifdef TARGET_NR_accept4
9250 case TARGET_NR_accept4
:
9251 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9254 #ifdef TARGET_NR_bind
9255 case TARGET_NR_bind
:
9256 ret
= do_bind(arg1
, arg2
, arg3
);
9259 #ifdef TARGET_NR_connect
9260 case TARGET_NR_connect
:
9261 ret
= do_connect(arg1
, arg2
, arg3
);
9264 #ifdef TARGET_NR_getpeername
9265 case TARGET_NR_getpeername
:
9266 ret
= do_getpeername(arg1
, arg2
, arg3
);
9269 #ifdef TARGET_NR_getsockname
9270 case TARGET_NR_getsockname
:
9271 ret
= do_getsockname(arg1
, arg2
, arg3
);
9274 #ifdef TARGET_NR_getsockopt
9275 case TARGET_NR_getsockopt
:
9276 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9279 #ifdef TARGET_NR_listen
9280 case TARGET_NR_listen
:
9281 ret
= get_errno(listen(arg1
, arg2
));
9284 #ifdef TARGET_NR_recv
9285 case TARGET_NR_recv
:
9286 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9289 #ifdef TARGET_NR_recvfrom
9290 case TARGET_NR_recvfrom
:
9291 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9294 #ifdef TARGET_NR_recvmsg
9295 case TARGET_NR_recvmsg
:
9296 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9299 #ifdef TARGET_NR_send
9300 case TARGET_NR_send
:
9301 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9304 #ifdef TARGET_NR_sendmsg
9305 case TARGET_NR_sendmsg
:
9306 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9309 #ifdef TARGET_NR_sendmmsg
9310 case TARGET_NR_sendmmsg
:
9311 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9313 case TARGET_NR_recvmmsg
:
9314 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9317 #ifdef TARGET_NR_sendto
9318 case TARGET_NR_sendto
:
9319 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9322 #ifdef TARGET_NR_shutdown
9323 case TARGET_NR_shutdown
:
9324 ret
= get_errno(shutdown(arg1
, arg2
));
9327 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9328 case TARGET_NR_getrandom
:
9329 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9333 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9334 unlock_user(p
, arg1
, ret
);
9337 #ifdef TARGET_NR_socket
9338 case TARGET_NR_socket
:
9339 ret
= do_socket(arg1
, arg2
, arg3
);
9340 fd_trans_unregister(ret
);
9343 #ifdef TARGET_NR_socketpair
9344 case TARGET_NR_socketpair
:
9345 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9348 #ifdef TARGET_NR_setsockopt
9349 case TARGET_NR_setsockopt
:
9350 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9353 #if defined(TARGET_NR_syslog)
9354 case TARGET_NR_syslog
:
9359 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9360 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9361 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9362 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9363 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9364 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9365 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9366 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9368 ret
= get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9371 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9372 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9373 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9375 ret
= -TARGET_EINVAL
;
9383 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9385 ret
= -TARGET_EFAULT
;
9388 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9389 unlock_user(p
, arg2
, arg3
);
9399 case TARGET_NR_setitimer
:
9401 struct itimerval value
, ovalue
, *pvalue
;
9405 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9406 || copy_from_user_timeval(&pvalue
->it_value
,
9407 arg2
+ sizeof(struct target_timeval
)))
9412 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9413 if (!is_error(ret
) && arg3
) {
9414 if (copy_to_user_timeval(arg3
,
9415 &ovalue
.it_interval
)
9416 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9422 case TARGET_NR_getitimer
:
9424 struct itimerval value
;
9426 ret
= get_errno(getitimer(arg1
, &value
));
9427 if (!is_error(ret
) && arg2
) {
9428 if (copy_to_user_timeval(arg2
,
9430 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9436 #ifdef TARGET_NR_stat
9437 case TARGET_NR_stat
:
9438 if (!(p
= lock_user_string(arg1
)))
9440 ret
= get_errno(stat(path(p
), &st
));
9441 unlock_user(p
, arg1
, 0);
9444 #ifdef TARGET_NR_lstat
9445 case TARGET_NR_lstat
:
9446 if (!(p
= lock_user_string(arg1
)))
9448 ret
= get_errno(lstat(path(p
), &st
));
9449 unlock_user(p
, arg1
, 0);
9452 case TARGET_NR_fstat
:
9454 ret
= get_errno(fstat(arg1
, &st
));
9455 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9458 if (!is_error(ret
)) {
9459 struct target_stat
*target_st
;
9461 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9463 memset(target_st
, 0, sizeof(*target_st
));
9464 __put_user(st
.st_dev
, &target_st
->st_dev
);
9465 __put_user(st
.st_ino
, &target_st
->st_ino
);
9466 __put_user(st
.st_mode
, &target_st
->st_mode
);
9467 __put_user(st
.st_uid
, &target_st
->st_uid
);
9468 __put_user(st
.st_gid
, &target_st
->st_gid
);
9469 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9470 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9471 __put_user(st
.st_size
, &target_st
->st_size
);
9472 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9473 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9474 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9475 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9476 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9477 unlock_user_struct(target_st
, arg2
, 1);
9481 #ifdef TARGET_NR_olduname
9482 case TARGET_NR_olduname
:
9485 #ifdef TARGET_NR_iopl
9486 case TARGET_NR_iopl
:
9489 case TARGET_NR_vhangup
:
9490 ret
= get_errno(vhangup());
9492 #ifdef TARGET_NR_idle
9493 case TARGET_NR_idle
:
9496 #ifdef TARGET_NR_syscall
9497 case TARGET_NR_syscall
:
9498 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9499 arg6
, arg7
, arg8
, 0);
9502 case TARGET_NR_wait4
:
9505 abi_long status_ptr
= arg2
;
9506 struct rusage rusage
, *rusage_ptr
;
9507 abi_ulong target_rusage
= arg4
;
9508 abi_long rusage_err
;
9510 rusage_ptr
= &rusage
;
9513 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9514 if (!is_error(ret
)) {
9515 if (status_ptr
&& ret
) {
9516 status
= host_to_target_waitstatus(status
);
9517 if (put_user_s32(status
, status_ptr
))
9520 if (target_rusage
) {
9521 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9529 #ifdef TARGET_NR_swapoff
9530 case TARGET_NR_swapoff
:
9531 if (!(p
= lock_user_string(arg1
)))
9533 ret
= get_errno(swapoff(p
));
9534 unlock_user(p
, arg1
, 0);
9537 case TARGET_NR_sysinfo
:
9539 struct target_sysinfo
*target_value
;
9540 struct sysinfo value
;
9541 ret
= get_errno(sysinfo(&value
));
9542 if (!is_error(ret
) && arg1
)
9544 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9546 __put_user(value
.uptime
, &target_value
->uptime
);
9547 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9548 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9549 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9550 __put_user(value
.totalram
, &target_value
->totalram
);
9551 __put_user(value
.freeram
, &target_value
->freeram
);
9552 __put_user(value
.sharedram
, &target_value
->sharedram
);
9553 __put_user(value
.bufferram
, &target_value
->bufferram
);
9554 __put_user(value
.totalswap
, &target_value
->totalswap
);
9555 __put_user(value
.freeswap
, &target_value
->freeswap
);
9556 __put_user(value
.procs
, &target_value
->procs
);
9557 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9558 __put_user(value
.freehigh
, &target_value
->freehigh
);
9559 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9560 unlock_user_struct(target_value
, arg1
, 1);
9564 #ifdef TARGET_NR_ipc
9566 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9569 #ifdef TARGET_NR_semget
9570 case TARGET_NR_semget
:
9571 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9574 #ifdef TARGET_NR_semop
9575 case TARGET_NR_semop
:
9576 ret
= do_semop(arg1
, arg2
, arg3
);
9579 #ifdef TARGET_NR_semctl
9580 case TARGET_NR_semctl
:
9581 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9584 #ifdef TARGET_NR_msgctl
9585 case TARGET_NR_msgctl
:
9586 ret
= do_msgctl(arg1
, arg2
, arg3
);
9589 #ifdef TARGET_NR_msgget
9590 case TARGET_NR_msgget
:
9591 ret
= get_errno(msgget(arg1
, arg2
));
9594 #ifdef TARGET_NR_msgrcv
9595 case TARGET_NR_msgrcv
:
9596 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9599 #ifdef TARGET_NR_msgsnd
9600 case TARGET_NR_msgsnd
:
9601 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9604 #ifdef TARGET_NR_shmget
9605 case TARGET_NR_shmget
:
9606 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9609 #ifdef TARGET_NR_shmctl
9610 case TARGET_NR_shmctl
:
9611 ret
= do_shmctl(arg1
, arg2
, arg3
);
9614 #ifdef TARGET_NR_shmat
9615 case TARGET_NR_shmat
:
9616 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9619 #ifdef TARGET_NR_shmdt
9620 case TARGET_NR_shmdt
:
9621 ret
= do_shmdt(arg1
);
9624 case TARGET_NR_fsync
:
9625 ret
= get_errno(fsync(arg1
));
9627 case TARGET_NR_clone
:
9628 /* Linux manages to have three different orderings for its
9629 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9630 * match the kernel's CONFIG_CLONE_* settings.
9631 * Microblaze is further special in that it uses a sixth
9632 * implicit argument to clone for the TLS pointer.
9634 #if defined(TARGET_MICROBLAZE)
9635 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9636 #elif defined(TARGET_CLONE_BACKWARDS)
9637 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9638 #elif defined(TARGET_CLONE_BACKWARDS2)
9639 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9641 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9644 #ifdef __NR_exit_group
9645 /* new thread calls */
9646 case TARGET_NR_exit_group
:
9650 gdb_exit(cpu_env
, arg1
);
9651 ret
= get_errno(exit_group(arg1
));
9654 case TARGET_NR_setdomainname
:
9655 if (!(p
= lock_user_string(arg1
)))
9657 ret
= get_errno(setdomainname(p
, arg2
));
9658 unlock_user(p
, arg1
, 0);
9660 case TARGET_NR_uname
:
9661 /* no need to transcode because we use the linux syscall */
9663 struct new_utsname
* buf
;
9665 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9667 ret
= get_errno(sys_uname(buf
));
9668 if (!is_error(ret
)) {
9669 /* Overwrite the native machine name with whatever is being
9671 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
9672 /* Allow the user to override the reported release. */
9673 if (qemu_uname_release
&& *qemu_uname_release
) {
9674 g_strlcpy(buf
->release
, qemu_uname_release
,
9675 sizeof(buf
->release
));
9678 unlock_user_struct(buf
, arg1
, 1);
9682 case TARGET_NR_modify_ldt
:
9683 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9685 #if !defined(TARGET_X86_64)
9686 case TARGET_NR_vm86old
:
9688 case TARGET_NR_vm86
:
9689 ret
= do_vm86(cpu_env
, arg1
, arg2
);
9693 case TARGET_NR_adjtimex
:
9695 struct timex host_buf
;
9697 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9700 ret
= get_errno(adjtimex(&host_buf
));
9701 if (!is_error(ret
)) {
9702 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9708 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9709 case TARGET_NR_clock_adjtime
:
9711 struct timex htx
, *phtx
= &htx
;
9713 if (target_to_host_timex(phtx
, arg2
) != 0) {
9716 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9717 if (!is_error(ret
) && phtx
) {
9718 if (host_to_target_timex(arg2
, phtx
) != 0) {
9725 #ifdef TARGET_NR_create_module
9726 case TARGET_NR_create_module
:
9728 case TARGET_NR_init_module
:
9729 case TARGET_NR_delete_module
:
9730 #ifdef TARGET_NR_get_kernel_syms
9731 case TARGET_NR_get_kernel_syms
:
9734 case TARGET_NR_quotactl
:
9736 case TARGET_NR_getpgid
:
9737 ret
= get_errno(getpgid(arg1
));
9739 case TARGET_NR_fchdir
:
9740 ret
= get_errno(fchdir(arg1
));
9742 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9743 case TARGET_NR_bdflush
:
9746 #ifdef TARGET_NR_sysfs
9747 case TARGET_NR_sysfs
:
9750 case TARGET_NR_personality
:
9751 ret
= get_errno(personality(arg1
));
9753 #ifdef TARGET_NR_afs_syscall
9754 case TARGET_NR_afs_syscall
:
9757 #ifdef TARGET_NR__llseek /* Not on alpha */
9758 case TARGET_NR__llseek
:
9761 #if !defined(__NR_llseek)
9762 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9764 ret
= get_errno(res
);
9769 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9771 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9777 #ifdef TARGET_NR_getdents
9778 case TARGET_NR_getdents
:
9779 #ifdef __NR_getdents
9780 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9782 struct target_dirent
*target_dirp
;
9783 struct linux_dirent
*dirp
;
9784 abi_long count
= arg3
;
9786 dirp
= g_try_malloc(count
);
9788 ret
= -TARGET_ENOMEM
;
9792 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9793 if (!is_error(ret
)) {
9794 struct linux_dirent
*de
;
9795 struct target_dirent
*tde
;
9797 int reclen
, treclen
;
9798 int count1
, tnamelen
;
9802 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9806 reclen
= de
->d_reclen
;
9807 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9808 assert(tnamelen
>= 0);
9809 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9810 assert(count1
+ treclen
<= count
);
9811 tde
->d_reclen
= tswap16(treclen
);
9812 tde
->d_ino
= tswapal(de
->d_ino
);
9813 tde
->d_off
= tswapal(de
->d_off
);
9814 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9815 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9817 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9821 unlock_user(target_dirp
, arg2
, ret
);
9827 struct linux_dirent
*dirp
;
9828 abi_long count
= arg3
;
9830 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9832 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9833 if (!is_error(ret
)) {
9834 struct linux_dirent
*de
;
9839 reclen
= de
->d_reclen
;
9842 de
->d_reclen
= tswap16(reclen
);
9843 tswapls(&de
->d_ino
);
9844 tswapls(&de
->d_off
);
9845 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9849 unlock_user(dirp
, arg2
, ret
);
9853 /* Implement getdents in terms of getdents64 */
9855 struct linux_dirent64
*dirp
;
9856 abi_long count
= arg3
;
9858 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9862 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9863 if (!is_error(ret
)) {
9864 /* Convert the dirent64 structs to target dirent. We do this
9865 * in-place, since we can guarantee that a target_dirent is no
9866 * larger than a dirent64; however this means we have to be
9867 * careful to read everything before writing in the new format.
9869 struct linux_dirent64
*de
;
9870 struct target_dirent
*tde
;
9875 tde
= (struct target_dirent
*)dirp
;
9877 int namelen
, treclen
;
9878 int reclen
= de
->d_reclen
;
9879 uint64_t ino
= de
->d_ino
;
9880 int64_t off
= de
->d_off
;
9881 uint8_t type
= de
->d_type
;
9883 namelen
= strlen(de
->d_name
);
9884 treclen
= offsetof(struct target_dirent
, d_name
)
9886 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9888 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9889 tde
->d_ino
= tswapal(ino
);
9890 tde
->d_off
= tswapal(off
);
9891 tde
->d_reclen
= tswap16(treclen
);
9892 /* The target_dirent type is in what was formerly a padding
9893 * byte at the end of the structure:
9895 *(((char *)tde
) + treclen
- 1) = type
;
9897 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9898 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9904 unlock_user(dirp
, arg2
, ret
);
9908 #endif /* TARGET_NR_getdents */
9909 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9910 case TARGET_NR_getdents64
:
9912 struct linux_dirent64
*dirp
;
9913 abi_long count
= arg3
;
9914 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9916 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9917 if (!is_error(ret
)) {
9918 struct linux_dirent64
*de
;
9923 reclen
= de
->d_reclen
;
9926 de
->d_reclen
= tswap16(reclen
);
9927 tswap64s((uint64_t *)&de
->d_ino
);
9928 tswap64s((uint64_t *)&de
->d_off
);
9929 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9933 unlock_user(dirp
, arg2
, ret
);
9936 #endif /* TARGET_NR_getdents64 */
9937 #if defined(TARGET_NR__newselect)
9938 case TARGET_NR__newselect
:
9939 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9942 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9943 # ifdef TARGET_NR_poll
9944 case TARGET_NR_poll
:
9946 # ifdef TARGET_NR_ppoll
9947 case TARGET_NR_ppoll
:
9950 struct target_pollfd
*target_pfd
;
9951 unsigned int nfds
= arg2
;
9958 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9959 ret
= -TARGET_EINVAL
;
9963 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9964 sizeof(struct target_pollfd
) * nfds
, 1);
9969 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9970 for (i
= 0; i
< nfds
; i
++) {
9971 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9972 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9977 # ifdef TARGET_NR_ppoll
9978 case TARGET_NR_ppoll
:
9980 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9981 target_sigset_t
*target_set
;
9982 sigset_t _set
, *set
= &_set
;
9985 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9986 unlock_user(target_pfd
, arg1
, 0);
9994 if (arg5
!= sizeof(target_sigset_t
)) {
9995 unlock_user(target_pfd
, arg1
, 0);
9996 ret
= -TARGET_EINVAL
;
10000 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10002 unlock_user(target_pfd
, arg1
, 0);
10005 target_to_host_sigset(set
, target_set
);
10010 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10011 set
, SIGSET_T_SIZE
));
10013 if (!is_error(ret
) && arg3
) {
10014 host_to_target_timespec(arg3
, timeout_ts
);
10017 unlock_user(target_set
, arg4
, 0);
10022 # ifdef TARGET_NR_poll
10023 case TARGET_NR_poll
:
10025 struct timespec ts
, *pts
;
10028 /* Convert ms to secs, ns */
10029 ts
.tv_sec
= arg3
/ 1000;
10030 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10033 /* -ve poll() timeout means "infinite" */
10036 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10041 g_assert_not_reached();
10044 if (!is_error(ret
)) {
10045 for(i
= 0; i
< nfds
; i
++) {
10046 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10049 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10053 case TARGET_NR_flock
:
10054 /* NOTE: the flock constant seems to be the same for every
10056 ret
= get_errno(safe_flock(arg1
, arg2
));
10058 case TARGET_NR_readv
:
10060 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10062 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10063 unlock_iovec(vec
, arg2
, arg3
, 1);
10065 ret
= -host_to_target_errno(errno
);
10069 case TARGET_NR_writev
:
10071 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10073 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10074 unlock_iovec(vec
, arg2
, arg3
, 0);
10076 ret
= -host_to_target_errno(errno
);
10080 #if defined(TARGET_NR_preadv)
10081 case TARGET_NR_preadv
:
10083 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10085 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, arg4
, arg5
));
10086 unlock_iovec(vec
, arg2
, arg3
, 1);
10088 ret
= -host_to_target_errno(errno
);
10093 #if defined(TARGET_NR_pwritev)
10094 case TARGET_NR_pwritev
:
10096 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10098 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, arg4
, arg5
));
10099 unlock_iovec(vec
, arg2
, arg3
, 0);
10101 ret
= -host_to_target_errno(errno
);
10106 case TARGET_NR_getsid
:
10107 ret
= get_errno(getsid(arg1
));
10109 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10110 case TARGET_NR_fdatasync
:
10111 ret
= get_errno(fdatasync(arg1
));
10114 #ifdef TARGET_NR__sysctl
10115 case TARGET_NR__sysctl
:
10116 /* We don't implement this, but ENOTDIR is always a safe
10118 ret
= -TARGET_ENOTDIR
;
10121 case TARGET_NR_sched_getaffinity
:
10123 unsigned int mask_size
;
10124 unsigned long *mask
;
10127 * sched_getaffinity needs multiples of ulong, so need to take
10128 * care of mismatches between target ulong and host ulong sizes.
10130 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10131 ret
= -TARGET_EINVAL
;
10134 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10136 mask
= alloca(mask_size
);
10137 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10139 if (!is_error(ret
)) {
10141 /* More data returned than the caller's buffer will fit.
10142 * This only happens if sizeof(abi_long) < sizeof(long)
10143 * and the caller passed us a buffer holding an odd number
10144 * of abi_longs. If the host kernel is actually using the
10145 * extra 4 bytes then fail EINVAL; otherwise we can just
10146 * ignore them and only copy the interesting part.
10148 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10149 if (numcpus
> arg2
* 8) {
10150 ret
= -TARGET_EINVAL
;
10156 if (copy_to_user(arg3
, mask
, ret
)) {
10162 case TARGET_NR_sched_setaffinity
:
10164 unsigned int mask_size
;
10165 unsigned long *mask
;
10168 * sched_setaffinity needs multiples of ulong, so need to take
10169 * care of mismatches between target ulong and host ulong sizes.
10171 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10172 ret
= -TARGET_EINVAL
;
10175 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10177 mask
= alloca(mask_size
);
10178 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
10181 memcpy(mask
, p
, arg2
);
10182 unlock_user_struct(p
, arg2
, 0);
10184 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10187 case TARGET_NR_sched_setparam
:
10189 struct sched_param
*target_schp
;
10190 struct sched_param schp
;
10193 return -TARGET_EINVAL
;
10195 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10197 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10198 unlock_user_struct(target_schp
, arg2
, 0);
10199 ret
= get_errno(sched_setparam(arg1
, &schp
));
10202 case TARGET_NR_sched_getparam
:
10204 struct sched_param
*target_schp
;
10205 struct sched_param schp
;
10208 return -TARGET_EINVAL
;
10210 ret
= get_errno(sched_getparam(arg1
, &schp
));
10211 if (!is_error(ret
)) {
10212 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10214 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10215 unlock_user_struct(target_schp
, arg2
, 1);
10219 case TARGET_NR_sched_setscheduler
:
10221 struct sched_param
*target_schp
;
10222 struct sched_param schp
;
10224 return -TARGET_EINVAL
;
10226 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10228 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10229 unlock_user_struct(target_schp
, arg3
, 0);
10230 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10233 case TARGET_NR_sched_getscheduler
:
10234 ret
= get_errno(sched_getscheduler(arg1
));
10236 case TARGET_NR_sched_yield
:
10237 ret
= get_errno(sched_yield());
10239 case TARGET_NR_sched_get_priority_max
:
10240 ret
= get_errno(sched_get_priority_max(arg1
));
10242 case TARGET_NR_sched_get_priority_min
:
10243 ret
= get_errno(sched_get_priority_min(arg1
));
10245 case TARGET_NR_sched_rr_get_interval
:
10247 struct timespec ts
;
10248 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10249 if (!is_error(ret
)) {
10250 ret
= host_to_target_timespec(arg2
, &ts
);
10254 case TARGET_NR_nanosleep
:
10256 struct timespec req
, rem
;
10257 target_to_host_timespec(&req
, arg1
);
10258 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10259 if (is_error(ret
) && arg2
) {
10260 host_to_target_timespec(arg2
, &rem
);
10264 #ifdef TARGET_NR_query_module
10265 case TARGET_NR_query_module
:
10266 goto unimplemented
;
10268 #ifdef TARGET_NR_nfsservctl
10269 case TARGET_NR_nfsservctl
:
10270 goto unimplemented
;
10272 case TARGET_NR_prctl
:
10274 case PR_GET_PDEATHSIG
:
10277 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10278 if (!is_error(ret
) && arg2
10279 && put_user_ual(deathsig
, arg2
)) {
10287 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10291 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10292 arg3
, arg4
, arg5
));
10293 unlock_user(name
, arg2
, 16);
10298 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10302 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10303 arg3
, arg4
, arg5
));
10304 unlock_user(name
, arg2
, 0);
10309 /* Most prctl options have no pointer arguments */
10310 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10314 #ifdef TARGET_NR_arch_prctl
10315 case TARGET_NR_arch_prctl
:
10316 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10317 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
10320 goto unimplemented
;
10323 #ifdef TARGET_NR_pread64
10324 case TARGET_NR_pread64
:
10325 if (regpairs_aligned(cpu_env
)) {
10329 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10331 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10332 unlock_user(p
, arg2
, ret
);
10334 case TARGET_NR_pwrite64
:
10335 if (regpairs_aligned(cpu_env
)) {
10339 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10341 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10342 unlock_user(p
, arg2
, 0);
10345 case TARGET_NR_getcwd
:
10346 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10348 ret
= get_errno(sys_getcwd1(p
, arg2
));
10349 unlock_user(p
, arg1
, ret
);
10351 case TARGET_NR_capget
:
10352 case TARGET_NR_capset
:
10354 struct target_user_cap_header
*target_header
;
10355 struct target_user_cap_data
*target_data
= NULL
;
10356 struct __user_cap_header_struct header
;
10357 struct __user_cap_data_struct data
[2];
10358 struct __user_cap_data_struct
*dataptr
= NULL
;
10359 int i
, target_datalen
;
10360 int data_items
= 1;
10362 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10365 header
.version
= tswap32(target_header
->version
);
10366 header
.pid
= tswap32(target_header
->pid
);
10368 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10369 /* Version 2 and up takes pointer to two user_data structs */
10373 target_datalen
= sizeof(*target_data
) * data_items
;
10376 if (num
== TARGET_NR_capget
) {
10377 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10379 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10381 if (!target_data
) {
10382 unlock_user_struct(target_header
, arg1
, 0);
10386 if (num
== TARGET_NR_capset
) {
10387 for (i
= 0; i
< data_items
; i
++) {
10388 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10389 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10390 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10397 if (num
== TARGET_NR_capget
) {
10398 ret
= get_errno(capget(&header
, dataptr
));
10400 ret
= get_errno(capset(&header
, dataptr
));
10403 /* The kernel always updates version for both capget and capset */
10404 target_header
->version
= tswap32(header
.version
);
10405 unlock_user_struct(target_header
, arg1
, 1);
10408 if (num
== TARGET_NR_capget
) {
10409 for (i
= 0; i
< data_items
; i
++) {
10410 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10411 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10412 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10414 unlock_user(target_data
, arg2
, target_datalen
);
10416 unlock_user(target_data
, arg2
, 0);
10421 case TARGET_NR_sigaltstack
:
10422 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10425 #ifdef CONFIG_SENDFILE
10426 case TARGET_NR_sendfile
:
10428 off_t
*offp
= NULL
;
10431 ret
= get_user_sal(off
, arg3
);
10432 if (is_error(ret
)) {
10437 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10438 if (!is_error(ret
) && arg3
) {
10439 abi_long ret2
= put_user_sal(off
, arg3
);
10440 if (is_error(ret2
)) {
10446 #ifdef TARGET_NR_sendfile64
10447 case TARGET_NR_sendfile64
:
10449 off_t
*offp
= NULL
;
10452 ret
= get_user_s64(off
, arg3
);
10453 if (is_error(ret
)) {
10458 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10459 if (!is_error(ret
) && arg3
) {
10460 abi_long ret2
= put_user_s64(off
, arg3
);
10461 if (is_error(ret2
)) {
10469 case TARGET_NR_sendfile
:
10470 #ifdef TARGET_NR_sendfile64
10471 case TARGET_NR_sendfile64
:
10473 goto unimplemented
;
10476 #ifdef TARGET_NR_getpmsg
10477 case TARGET_NR_getpmsg
:
10478 goto unimplemented
;
10480 #ifdef TARGET_NR_putpmsg
10481 case TARGET_NR_putpmsg
:
10482 goto unimplemented
;
10484 #ifdef TARGET_NR_vfork
10485 case TARGET_NR_vfork
:
10486 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
10490 #ifdef TARGET_NR_ugetrlimit
10491 case TARGET_NR_ugetrlimit
:
10493 struct rlimit rlim
;
10494 int resource
= target_to_host_resource(arg1
);
10495 ret
= get_errno(getrlimit(resource
, &rlim
));
10496 if (!is_error(ret
)) {
10497 struct target_rlimit
*target_rlim
;
10498 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10500 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10501 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10502 unlock_user_struct(target_rlim
, arg2
, 1);
10507 #ifdef TARGET_NR_truncate64
10508 case TARGET_NR_truncate64
:
10509 if (!(p
= lock_user_string(arg1
)))
10511 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10512 unlock_user(p
, arg1
, 0);
10515 #ifdef TARGET_NR_ftruncate64
10516 case TARGET_NR_ftruncate64
:
10517 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10520 #ifdef TARGET_NR_stat64
10521 case TARGET_NR_stat64
:
10522 if (!(p
= lock_user_string(arg1
)))
10524 ret
= get_errno(stat(path(p
), &st
));
10525 unlock_user(p
, arg1
, 0);
10526 if (!is_error(ret
))
10527 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10530 #ifdef TARGET_NR_lstat64
10531 case TARGET_NR_lstat64
:
10532 if (!(p
= lock_user_string(arg1
)))
10534 ret
= get_errno(lstat(path(p
), &st
));
10535 unlock_user(p
, arg1
, 0);
10536 if (!is_error(ret
))
10537 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10540 #ifdef TARGET_NR_fstat64
10541 case TARGET_NR_fstat64
:
10542 ret
= get_errno(fstat(arg1
, &st
));
10543 if (!is_error(ret
))
10544 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10547 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10548 #ifdef TARGET_NR_fstatat64
10549 case TARGET_NR_fstatat64
:
10551 #ifdef TARGET_NR_newfstatat
10552 case TARGET_NR_newfstatat
:
10554 if (!(p
= lock_user_string(arg2
)))
10556 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10557 if (!is_error(ret
))
10558 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10561 #ifdef TARGET_NR_lchown
10562 case TARGET_NR_lchown
:
10563 if (!(p
= lock_user_string(arg1
)))
10565 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10566 unlock_user(p
, arg1
, 0);
10569 #ifdef TARGET_NR_getuid
10570 case TARGET_NR_getuid
:
10571 ret
= get_errno(high2lowuid(getuid()));
10574 #ifdef TARGET_NR_getgid
10575 case TARGET_NR_getgid
:
10576 ret
= get_errno(high2lowgid(getgid()));
10579 #ifdef TARGET_NR_geteuid
10580 case TARGET_NR_geteuid
:
10581 ret
= get_errno(high2lowuid(geteuid()));
10584 #ifdef TARGET_NR_getegid
10585 case TARGET_NR_getegid
:
10586 ret
= get_errno(high2lowgid(getegid()));
10589 case TARGET_NR_setreuid
:
10590 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10592 case TARGET_NR_setregid
:
10593 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10595 case TARGET_NR_getgroups
:
10597 int gidsetsize
= arg1
;
10598 target_id
*target_grouplist
;
10602 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10603 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10604 if (gidsetsize
== 0)
10606 if (!is_error(ret
)) {
10607 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10608 if (!target_grouplist
)
10610 for(i
= 0;i
< ret
; i
++)
10611 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10612 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10616 case TARGET_NR_setgroups
:
10618 int gidsetsize
= arg1
;
10619 target_id
*target_grouplist
;
10620 gid_t
*grouplist
= NULL
;
10623 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10624 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10625 if (!target_grouplist
) {
10626 ret
= -TARGET_EFAULT
;
10629 for (i
= 0; i
< gidsetsize
; i
++) {
10630 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10632 unlock_user(target_grouplist
, arg2
, 0);
10634 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10637 case TARGET_NR_fchown
:
10638 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10640 #if defined(TARGET_NR_fchownat)
10641 case TARGET_NR_fchownat
:
10642 if (!(p
= lock_user_string(arg2
)))
10644 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10645 low2highgid(arg4
), arg5
));
10646 unlock_user(p
, arg2
, 0);
10649 #ifdef TARGET_NR_setresuid
10650 case TARGET_NR_setresuid
:
10651 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
10653 low2highuid(arg3
)));
10656 #ifdef TARGET_NR_getresuid
10657 case TARGET_NR_getresuid
:
10659 uid_t ruid
, euid
, suid
;
10660 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10661 if (!is_error(ret
)) {
10662 if (put_user_id(high2lowuid(ruid
), arg1
)
10663 || put_user_id(high2lowuid(euid
), arg2
)
10664 || put_user_id(high2lowuid(suid
), arg3
))
10670 #ifdef TARGET_NR_getresgid
10671 case TARGET_NR_setresgid
:
10672 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
10674 low2highgid(arg3
)));
10677 #ifdef TARGET_NR_getresgid
10678 case TARGET_NR_getresgid
:
10680 gid_t rgid
, egid
, sgid
;
10681 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10682 if (!is_error(ret
)) {
10683 if (put_user_id(high2lowgid(rgid
), arg1
)
10684 || put_user_id(high2lowgid(egid
), arg2
)
10685 || put_user_id(high2lowgid(sgid
), arg3
))
10691 #ifdef TARGET_NR_chown
10692 case TARGET_NR_chown
:
10693 if (!(p
= lock_user_string(arg1
)))
10695 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10696 unlock_user(p
, arg1
, 0);
10699 case TARGET_NR_setuid
:
10700 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
10702 case TARGET_NR_setgid
:
10703 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
10705 case TARGET_NR_setfsuid
:
10706 ret
= get_errno(setfsuid(arg1
));
10708 case TARGET_NR_setfsgid
:
10709 ret
= get_errno(setfsgid(arg1
));
10712 #ifdef TARGET_NR_lchown32
10713 case TARGET_NR_lchown32
:
10714 if (!(p
= lock_user_string(arg1
)))
10716 ret
= get_errno(lchown(p
, arg2
, arg3
));
10717 unlock_user(p
, arg1
, 0);
10720 #ifdef TARGET_NR_getuid32
10721 case TARGET_NR_getuid32
:
10722 ret
= get_errno(getuid());
10726 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10727 /* Alpha specific */
10728 case TARGET_NR_getxuid
:
10732 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10734 ret
= get_errno(getuid());
10737 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10738 /* Alpha specific */
10739 case TARGET_NR_getxgid
:
10743 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10745 ret
= get_errno(getgid());
10748 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10749 /* Alpha specific */
10750 case TARGET_NR_osf_getsysinfo
:
10751 ret
= -TARGET_EOPNOTSUPP
;
10753 case TARGET_GSI_IEEE_FP_CONTROL
:
10755 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10757 /* Copied from linux ieee_fpcr_to_swcr. */
10758 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10759 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10760 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10761 | SWCR_TRAP_ENABLE_DZE
10762 | SWCR_TRAP_ENABLE_OVF
);
10763 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10764 | SWCR_TRAP_ENABLE_INE
);
10765 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10766 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10768 if (put_user_u64 (swcr
, arg2
))
10774 /* case GSI_IEEE_STATE_AT_SIGNAL:
10775 -- Not implemented in linux kernel.
10777 -- Retrieves current unaligned access state; not much used.
10778 case GSI_PROC_TYPE:
10779 -- Retrieves implver information; surely not used.
10780 case GSI_GET_HWRPB:
10781 -- Grabs a copy of the HWRPB; surely not used.
10786 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10787 /* Alpha specific */
10788 case TARGET_NR_osf_setsysinfo
:
10789 ret
= -TARGET_EOPNOTSUPP
;
10791 case TARGET_SSI_IEEE_FP_CONTROL
:
10793 uint64_t swcr
, fpcr
, orig_fpcr
;
10795 if (get_user_u64 (swcr
, arg2
)) {
10798 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10799 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10801 /* Copied from linux ieee_swcr_to_fpcr. */
10802 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10803 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10804 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10805 | SWCR_TRAP_ENABLE_DZE
10806 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10807 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10808 | SWCR_TRAP_ENABLE_INE
)) << 57;
10809 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10810 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10812 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10817 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10819 uint64_t exc
, fpcr
, orig_fpcr
;
10822 if (get_user_u64(exc
, arg2
)) {
10826 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10828 /* We only add to the exception status here. */
10829 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
10831 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10834 /* Old exceptions are not signaled. */
10835 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
10837 /* If any exceptions set by this call,
10838 and are unmasked, send a signal. */
10840 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
10841 si_code
= TARGET_FPE_FLTRES
;
10843 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
10844 si_code
= TARGET_FPE_FLTUND
;
10846 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
10847 si_code
= TARGET_FPE_FLTOVF
;
10849 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
10850 si_code
= TARGET_FPE_FLTDIV
;
10852 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
10853 si_code
= TARGET_FPE_FLTINV
;
10855 if (si_code
!= 0) {
10856 target_siginfo_t info
;
10857 info
.si_signo
= SIGFPE
;
10859 info
.si_code
= si_code
;
10860 info
._sifields
._sigfault
._addr
10861 = ((CPUArchState
*)cpu_env
)->pc
;
10862 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
10863 QEMU_SI_FAULT
, &info
);
10868 /* case SSI_NVPAIRS:
10869 -- Used with SSIN_UACPROC to enable unaligned accesses.
10870 case SSI_IEEE_STATE_AT_SIGNAL:
10871 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10872 -- Not implemented in linux kernel
10877 #ifdef TARGET_NR_osf_sigprocmask
10878 /* Alpha specific. */
10879 case TARGET_NR_osf_sigprocmask
:
10883 sigset_t set
, oldset
;
10886 case TARGET_SIG_BLOCK
:
10889 case TARGET_SIG_UNBLOCK
:
10892 case TARGET_SIG_SETMASK
:
10896 ret
= -TARGET_EINVAL
;
10900 target_to_host_old_sigset(&set
, &mask
);
10901 ret
= do_sigprocmask(how
, &set
, &oldset
);
10903 host_to_target_old_sigset(&mask
, &oldset
);
10910 #ifdef TARGET_NR_getgid32
10911 case TARGET_NR_getgid32
:
10912 ret
= get_errno(getgid());
10915 #ifdef TARGET_NR_geteuid32
10916 case TARGET_NR_geteuid32
:
10917 ret
= get_errno(geteuid());
10920 #ifdef TARGET_NR_getegid32
10921 case TARGET_NR_getegid32
:
10922 ret
= get_errno(getegid());
10925 #ifdef TARGET_NR_setreuid32
10926 case TARGET_NR_setreuid32
:
10927 ret
= get_errno(setreuid(arg1
, arg2
));
10930 #ifdef TARGET_NR_setregid32
10931 case TARGET_NR_setregid32
:
10932 ret
= get_errno(setregid(arg1
, arg2
));
10935 #ifdef TARGET_NR_getgroups32
10936 case TARGET_NR_getgroups32
:
10938 int gidsetsize
= arg1
;
10939 uint32_t *target_grouplist
;
10943 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10944 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10945 if (gidsetsize
== 0)
10947 if (!is_error(ret
)) {
10948 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10949 if (!target_grouplist
) {
10950 ret
= -TARGET_EFAULT
;
10953 for(i
= 0;i
< ret
; i
++)
10954 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10955 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10960 #ifdef TARGET_NR_setgroups32
10961 case TARGET_NR_setgroups32
:
10963 int gidsetsize
= arg1
;
10964 uint32_t *target_grouplist
;
10968 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10969 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10970 if (!target_grouplist
) {
10971 ret
= -TARGET_EFAULT
;
10974 for(i
= 0;i
< gidsetsize
; i
++)
10975 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10976 unlock_user(target_grouplist
, arg2
, 0);
10977 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10981 #ifdef TARGET_NR_fchown32
10982 case TARGET_NR_fchown32
:
10983 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
10986 #ifdef TARGET_NR_setresuid32
10987 case TARGET_NR_setresuid32
:
10988 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10991 #ifdef TARGET_NR_getresuid32
10992 case TARGET_NR_getresuid32
:
10994 uid_t ruid
, euid
, suid
;
10995 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10996 if (!is_error(ret
)) {
10997 if (put_user_u32(ruid
, arg1
)
10998 || put_user_u32(euid
, arg2
)
10999 || put_user_u32(suid
, arg3
))
11005 #ifdef TARGET_NR_setresgid32
11006 case TARGET_NR_setresgid32
:
11007 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11010 #ifdef TARGET_NR_getresgid32
11011 case TARGET_NR_getresgid32
:
11013 gid_t rgid
, egid
, sgid
;
11014 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11015 if (!is_error(ret
)) {
11016 if (put_user_u32(rgid
, arg1
)
11017 || put_user_u32(egid
, arg2
)
11018 || put_user_u32(sgid
, arg3
))
11024 #ifdef TARGET_NR_chown32
11025 case TARGET_NR_chown32
:
11026 if (!(p
= lock_user_string(arg1
)))
11028 ret
= get_errno(chown(p
, arg2
, arg3
));
11029 unlock_user(p
, arg1
, 0);
11032 #ifdef TARGET_NR_setuid32
11033 case TARGET_NR_setuid32
:
11034 ret
= get_errno(sys_setuid(arg1
));
11037 #ifdef TARGET_NR_setgid32
11038 case TARGET_NR_setgid32
:
11039 ret
= get_errno(sys_setgid(arg1
));
11042 #ifdef TARGET_NR_setfsuid32
11043 case TARGET_NR_setfsuid32
:
11044 ret
= get_errno(setfsuid(arg1
));
11047 #ifdef TARGET_NR_setfsgid32
11048 case TARGET_NR_setfsgid32
:
11049 ret
= get_errno(setfsgid(arg1
));
11053 case TARGET_NR_pivot_root
:
11054 goto unimplemented
;
11055 #ifdef TARGET_NR_mincore
11056 case TARGET_NR_mincore
:
11059 ret
= -TARGET_EFAULT
;
11060 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
11062 if (!(p
= lock_user_string(arg3
)))
11064 ret
= get_errno(mincore(a
, arg2
, p
));
11065 unlock_user(p
, arg3
, ret
);
11067 unlock_user(a
, arg1
, 0);
11071 #ifdef TARGET_NR_arm_fadvise64_64
11072 case TARGET_NR_arm_fadvise64_64
:
11073 /* arm_fadvise64_64 looks like fadvise64_64 but
11074 * with different argument order: fd, advice, offset, len
11075 * rather than the usual fd, offset, len, advice.
11076 * Note that offset and len are both 64-bit so appear as
11077 * pairs of 32-bit registers.
11079 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11080 target_offset64(arg5
, arg6
), arg2
);
11081 ret
= -host_to_target_errno(ret
);
11085 #if TARGET_ABI_BITS == 32
11087 #ifdef TARGET_NR_fadvise64_64
11088 case TARGET_NR_fadvise64_64
:
11089 /* 6 args: fd, offset (high, low), len (high, low), advice */
11090 if (regpairs_aligned(cpu_env
)) {
11091 /* offset is in (3,4), len in (5,6) and advice in 7 */
11098 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11099 target_offset64(arg2
, arg3
),
11100 target_offset64(arg4
, arg5
),
11105 #ifdef TARGET_NR_fadvise64
11106 case TARGET_NR_fadvise64
:
11107 /* 5 args: fd, offset (high, low), len, advice */
11108 if (regpairs_aligned(cpu_env
)) {
11109 /* offset is in (3,4), len in 5 and advice in 6 */
11115 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11116 target_offset64(arg2
, arg3
),
11121 #else /* not a 32-bit ABI */
11122 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11123 #ifdef TARGET_NR_fadvise64_64
11124 case TARGET_NR_fadvise64_64
:
11126 #ifdef TARGET_NR_fadvise64
11127 case TARGET_NR_fadvise64
:
11129 #ifdef TARGET_S390X
11131 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11132 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11133 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11134 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11138 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11141 #endif /* end of 64-bit ABI fadvise handling */
11143 #ifdef TARGET_NR_madvise
11144 case TARGET_NR_madvise
:
11145 /* A straight passthrough may not be safe because qemu sometimes
11146 turns private file-backed mappings into anonymous mappings.
11147 This will break MADV_DONTNEED.
11148 This is a hint, so ignoring and returning success is ok. */
11149 ret
= get_errno(0);
11152 #if TARGET_ABI_BITS == 32
11153 case TARGET_NR_fcntl64
:
11157 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11158 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11161 if (((CPUARMState
*)cpu_env
)->eabi
) {
11162 copyfrom
= copy_from_user_eabi_flock64
;
11163 copyto
= copy_to_user_eabi_flock64
;
11167 cmd
= target_to_host_fcntl_cmd(arg2
);
11168 if (cmd
== -TARGET_EINVAL
) {
11174 case TARGET_F_GETLK64
:
11175 ret
= copyfrom(&fl
, arg3
);
11179 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
11181 ret
= copyto(arg3
, &fl
);
11185 case TARGET_F_SETLK64
:
11186 case TARGET_F_SETLKW64
:
11187 ret
= copyfrom(&fl
, arg3
);
11191 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11194 ret
= do_fcntl(arg1
, arg2
, arg3
);
11200 #ifdef TARGET_NR_cacheflush
11201 case TARGET_NR_cacheflush
:
11202 /* self-modifying code is handled automatically, so nothing needed */
11206 #ifdef TARGET_NR_security
11207 case TARGET_NR_security
:
11208 goto unimplemented
;
11210 #ifdef TARGET_NR_getpagesize
11211 case TARGET_NR_getpagesize
:
11212 ret
= TARGET_PAGE_SIZE
;
11215 case TARGET_NR_gettid
:
11216 ret
= get_errno(gettid());
11218 #ifdef TARGET_NR_readahead
11219 case TARGET_NR_readahead
:
11220 #if TARGET_ABI_BITS == 32
11221 if (regpairs_aligned(cpu_env
)) {
11226 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
11228 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11233 #ifdef TARGET_NR_setxattr
11234 case TARGET_NR_listxattr
:
11235 case TARGET_NR_llistxattr
:
11239 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11241 ret
= -TARGET_EFAULT
;
11245 p
= lock_user_string(arg1
);
11247 if (num
== TARGET_NR_listxattr
) {
11248 ret
= get_errno(listxattr(p
, b
, arg3
));
11250 ret
= get_errno(llistxattr(p
, b
, arg3
));
11253 ret
= -TARGET_EFAULT
;
11255 unlock_user(p
, arg1
, 0);
11256 unlock_user(b
, arg2
, arg3
);
11259 case TARGET_NR_flistxattr
:
11263 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11265 ret
= -TARGET_EFAULT
;
11269 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11270 unlock_user(b
, arg2
, arg3
);
11273 case TARGET_NR_setxattr
:
11274 case TARGET_NR_lsetxattr
:
11276 void *p
, *n
, *v
= 0;
11278 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11280 ret
= -TARGET_EFAULT
;
11284 p
= lock_user_string(arg1
);
11285 n
= lock_user_string(arg2
);
11287 if (num
== TARGET_NR_setxattr
) {
11288 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11290 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11293 ret
= -TARGET_EFAULT
;
11295 unlock_user(p
, arg1
, 0);
11296 unlock_user(n
, arg2
, 0);
11297 unlock_user(v
, arg3
, 0);
11300 case TARGET_NR_fsetxattr
:
11304 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11306 ret
= -TARGET_EFAULT
;
11310 n
= lock_user_string(arg2
);
11312 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11314 ret
= -TARGET_EFAULT
;
11316 unlock_user(n
, arg2
, 0);
11317 unlock_user(v
, arg3
, 0);
11320 case TARGET_NR_getxattr
:
11321 case TARGET_NR_lgetxattr
:
11323 void *p
, *n
, *v
= 0;
11325 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11327 ret
= -TARGET_EFAULT
;
11331 p
= lock_user_string(arg1
);
11332 n
= lock_user_string(arg2
);
11334 if (num
== TARGET_NR_getxattr
) {
11335 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11337 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11340 ret
= -TARGET_EFAULT
;
11342 unlock_user(p
, arg1
, 0);
11343 unlock_user(n
, arg2
, 0);
11344 unlock_user(v
, arg3
, arg4
);
11347 case TARGET_NR_fgetxattr
:
11351 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11353 ret
= -TARGET_EFAULT
;
11357 n
= lock_user_string(arg2
);
11359 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11361 ret
= -TARGET_EFAULT
;
11363 unlock_user(n
, arg2
, 0);
11364 unlock_user(v
, arg3
, arg4
);
11367 case TARGET_NR_removexattr
:
11368 case TARGET_NR_lremovexattr
:
11371 p
= lock_user_string(arg1
);
11372 n
= lock_user_string(arg2
);
11374 if (num
== TARGET_NR_removexattr
) {
11375 ret
= get_errno(removexattr(p
, n
));
11377 ret
= get_errno(lremovexattr(p
, n
));
11380 ret
= -TARGET_EFAULT
;
11382 unlock_user(p
, arg1
, 0);
11383 unlock_user(n
, arg2
, 0);
11386 case TARGET_NR_fremovexattr
:
11389 n
= lock_user_string(arg2
);
11391 ret
= get_errno(fremovexattr(arg1
, n
));
11393 ret
= -TARGET_EFAULT
;
11395 unlock_user(n
, arg2
, 0);
11399 #endif /* CONFIG_ATTR */
11400 #ifdef TARGET_NR_set_thread_area
11401 case TARGET_NR_set_thread_area
:
11402 #if defined(TARGET_MIPS)
11403 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11406 #elif defined(TARGET_CRIS)
11408 ret
= -TARGET_EINVAL
;
11410 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11414 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11415 ret
= do_set_thread_area(cpu_env
, arg1
);
11417 #elif defined(TARGET_M68K)
11419 TaskState
*ts
= cpu
->opaque
;
11420 ts
->tp_value
= arg1
;
11425 goto unimplemented_nowarn
;
11428 #ifdef TARGET_NR_get_thread_area
11429 case TARGET_NR_get_thread_area
:
11430 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11431 ret
= do_get_thread_area(cpu_env
, arg1
);
11433 #elif defined(TARGET_M68K)
11435 TaskState
*ts
= cpu
->opaque
;
11436 ret
= ts
->tp_value
;
11440 goto unimplemented_nowarn
;
11443 #ifdef TARGET_NR_getdomainname
11444 case TARGET_NR_getdomainname
:
11445 goto unimplemented_nowarn
;
11448 #ifdef TARGET_NR_clock_gettime
11449 case TARGET_NR_clock_gettime
:
11451 struct timespec ts
;
11452 ret
= get_errno(clock_gettime(arg1
, &ts
));
11453 if (!is_error(ret
)) {
11454 host_to_target_timespec(arg2
, &ts
);
11459 #ifdef TARGET_NR_clock_getres
11460 case TARGET_NR_clock_getres
:
11462 struct timespec ts
;
11463 ret
= get_errno(clock_getres(arg1
, &ts
));
11464 if (!is_error(ret
)) {
11465 host_to_target_timespec(arg2
, &ts
);
11470 #ifdef TARGET_NR_clock_nanosleep
11471 case TARGET_NR_clock_nanosleep
:
11473 struct timespec ts
;
11474 target_to_host_timespec(&ts
, arg3
);
11475 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11476 &ts
, arg4
? &ts
: NULL
));
11478 host_to_target_timespec(arg4
, &ts
);
11480 #if defined(TARGET_PPC)
11481 /* clock_nanosleep is odd in that it returns positive errno values.
11482 * On PPC, CR0 bit 3 should be set in such a situation. */
11483 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11484 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11491 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11492 case TARGET_NR_set_tid_address
:
11493 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11497 case TARGET_NR_tkill
:
11498 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11501 case TARGET_NR_tgkill
:
11502 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11503 target_to_host_signal(arg3
)));
11506 #ifdef TARGET_NR_set_robust_list
11507 case TARGET_NR_set_robust_list
:
11508 case TARGET_NR_get_robust_list
:
11509 /* The ABI for supporting robust futexes has userspace pass
11510 * the kernel a pointer to a linked list which is updated by
11511 * userspace after the syscall; the list is walked by the kernel
11512 * when the thread exits. Since the linked list in QEMU guest
11513 * memory isn't a valid linked list for the host and we have
11514 * no way to reliably intercept the thread-death event, we can't
11515 * support these. Silently return ENOSYS so that guest userspace
11516 * falls back to a non-robust futex implementation (which should
11517 * be OK except in the corner case of the guest crashing while
11518 * holding a mutex that is shared with another process via
11521 goto unimplemented_nowarn
;
11524 #if defined(TARGET_NR_utimensat)
11525 case TARGET_NR_utimensat
:
11527 struct timespec
*tsp
, ts
[2];
11531 target_to_host_timespec(ts
, arg3
);
11532 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11536 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11538 if (!(p
= lock_user_string(arg2
))) {
11539 ret
= -TARGET_EFAULT
;
11542 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11543 unlock_user(p
, arg2
, 0);
11548 case TARGET_NR_futex
:
11549 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11551 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11552 case TARGET_NR_inotify_init
:
11553 ret
= get_errno(sys_inotify_init());
11556 #ifdef CONFIG_INOTIFY1
11557 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11558 case TARGET_NR_inotify_init1
:
11559 ret
= get_errno(sys_inotify_init1(arg1
));
11563 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11564 case TARGET_NR_inotify_add_watch
:
11565 p
= lock_user_string(arg2
);
11566 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11567 unlock_user(p
, arg2
, 0);
11570 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11571 case TARGET_NR_inotify_rm_watch
:
11572 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11576 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11577 case TARGET_NR_mq_open
:
11579 struct mq_attr posix_mq_attr
;
11582 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11583 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11586 p
= lock_user_string(arg1
- 1);
11590 ret
= get_errno(mq_open(p
, host_flags
, arg3
, &posix_mq_attr
));
11591 unlock_user (p
, arg1
, 0);
11595 case TARGET_NR_mq_unlink
:
11596 p
= lock_user_string(arg1
- 1);
11598 ret
= -TARGET_EFAULT
;
11601 ret
= get_errno(mq_unlink(p
));
11602 unlock_user (p
, arg1
, 0);
11605 case TARGET_NR_mq_timedsend
:
11607 struct timespec ts
;
11609 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11611 target_to_host_timespec(&ts
, arg5
);
11612 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11613 host_to_target_timespec(arg5
, &ts
);
11615 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11617 unlock_user (p
, arg2
, arg3
);
11621 case TARGET_NR_mq_timedreceive
:
11623 struct timespec ts
;
11626 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11628 target_to_host_timespec(&ts
, arg5
);
11629 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11631 host_to_target_timespec(arg5
, &ts
);
11633 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11636 unlock_user (p
, arg2
, arg3
);
11638 put_user_u32(prio
, arg4
);
11642 /* Not implemented for now... */
11643 /* case TARGET_NR_mq_notify: */
11646 case TARGET_NR_mq_getsetattr
:
11648 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11651 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
11652 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11655 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11656 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
11663 #ifdef CONFIG_SPLICE
11664 #ifdef TARGET_NR_tee
11665 case TARGET_NR_tee
:
11667 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11671 #ifdef TARGET_NR_splice
11672 case TARGET_NR_splice
:
11674 loff_t loff_in
, loff_out
;
11675 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11677 if (get_user_u64(loff_in
, arg2
)) {
11680 ploff_in
= &loff_in
;
11683 if (get_user_u64(loff_out
, arg4
)) {
11686 ploff_out
= &loff_out
;
11688 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11690 if (put_user_u64(loff_in
, arg2
)) {
11695 if (put_user_u64(loff_out
, arg4
)) {
11702 #ifdef TARGET_NR_vmsplice
11703 case TARGET_NR_vmsplice
:
11705 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11707 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11708 unlock_iovec(vec
, arg2
, arg3
, 0);
11710 ret
= -host_to_target_errno(errno
);
11715 #endif /* CONFIG_SPLICE */
11716 #ifdef CONFIG_EVENTFD
11717 #if defined(TARGET_NR_eventfd)
11718 case TARGET_NR_eventfd
:
11719 ret
= get_errno(eventfd(arg1
, 0));
11720 fd_trans_unregister(ret
);
11723 #if defined(TARGET_NR_eventfd2)
11724 case TARGET_NR_eventfd2
:
11726 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11727 if (arg2
& TARGET_O_NONBLOCK
) {
11728 host_flags
|= O_NONBLOCK
;
11730 if (arg2
& TARGET_O_CLOEXEC
) {
11731 host_flags
|= O_CLOEXEC
;
11733 ret
= get_errno(eventfd(arg1
, host_flags
));
11734 fd_trans_unregister(ret
);
11738 #endif /* CONFIG_EVENTFD */
11739 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11740 case TARGET_NR_fallocate
:
11741 #if TARGET_ABI_BITS == 32
11742 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11743 target_offset64(arg5
, arg6
)));
11745 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11749 #if defined(CONFIG_SYNC_FILE_RANGE)
11750 #if defined(TARGET_NR_sync_file_range)
11751 case TARGET_NR_sync_file_range
:
11752 #if TARGET_ABI_BITS == 32
11753 #if defined(TARGET_MIPS)
11754 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11755 target_offset64(arg5
, arg6
), arg7
));
11757 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11758 target_offset64(arg4
, arg5
), arg6
));
11759 #endif /* !TARGET_MIPS */
11761 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11765 #if defined(TARGET_NR_sync_file_range2)
11766 case TARGET_NR_sync_file_range2
:
11767 /* This is like sync_file_range but the arguments are reordered */
11768 #if TARGET_ABI_BITS == 32
11769 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11770 target_offset64(arg5
, arg6
), arg2
));
11772 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11777 #if defined(TARGET_NR_signalfd4)
11778 case TARGET_NR_signalfd4
:
11779 ret
= do_signalfd4(arg1
, arg2
, arg4
);
11782 #if defined(TARGET_NR_signalfd)
11783 case TARGET_NR_signalfd
:
11784 ret
= do_signalfd4(arg1
, arg2
, 0);
11787 #if defined(CONFIG_EPOLL)
11788 #if defined(TARGET_NR_epoll_create)
11789 case TARGET_NR_epoll_create
:
11790 ret
= get_errno(epoll_create(arg1
));
11793 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11794 case TARGET_NR_epoll_create1
:
11795 ret
= get_errno(epoll_create1(arg1
));
11798 #if defined(TARGET_NR_epoll_ctl)
11799 case TARGET_NR_epoll_ctl
:
11801 struct epoll_event ep
;
11802 struct epoll_event
*epp
= 0;
11804 struct target_epoll_event
*target_ep
;
11805 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11808 ep
.events
= tswap32(target_ep
->events
);
11809 /* The epoll_data_t union is just opaque data to the kernel,
11810 * so we transfer all 64 bits across and need not worry what
11811 * actual data type it is.
11813 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11814 unlock_user_struct(target_ep
, arg4
, 0);
11817 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11822 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11823 #if defined(TARGET_NR_epoll_wait)
11824 case TARGET_NR_epoll_wait
:
11826 #if defined(TARGET_NR_epoll_pwait)
11827 case TARGET_NR_epoll_pwait
:
11830 struct target_epoll_event
*target_ep
;
11831 struct epoll_event
*ep
;
11833 int maxevents
= arg3
;
11834 int timeout
= arg4
;
11836 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11837 ret
= -TARGET_EINVAL
;
11841 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11842 maxevents
* sizeof(struct target_epoll_event
), 1);
11847 ep
= g_try_new(struct epoll_event
, maxevents
);
11849 unlock_user(target_ep
, arg2
, 0);
11850 ret
= -TARGET_ENOMEM
;
11855 #if defined(TARGET_NR_epoll_pwait)
11856 case TARGET_NR_epoll_pwait
:
11858 target_sigset_t
*target_set
;
11859 sigset_t _set
, *set
= &_set
;
11862 if (arg6
!= sizeof(target_sigset_t
)) {
11863 ret
= -TARGET_EINVAL
;
11867 target_set
= lock_user(VERIFY_READ
, arg5
,
11868 sizeof(target_sigset_t
), 1);
11870 ret
= -TARGET_EFAULT
;
11873 target_to_host_sigset(set
, target_set
);
11874 unlock_user(target_set
, arg5
, 0);
11879 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11880 set
, SIGSET_T_SIZE
));
11884 #if defined(TARGET_NR_epoll_wait)
11885 case TARGET_NR_epoll_wait
:
11886 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11891 ret
= -TARGET_ENOSYS
;
11893 if (!is_error(ret
)) {
11895 for (i
= 0; i
< ret
; i
++) {
11896 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11897 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11899 unlock_user(target_ep
, arg2
,
11900 ret
* sizeof(struct target_epoll_event
));
11902 unlock_user(target_ep
, arg2
, 0);
11909 #ifdef TARGET_NR_prlimit64
11910 case TARGET_NR_prlimit64
:
11912 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11913 struct target_rlimit64
*target_rnew
, *target_rold
;
11914 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11915 int resource
= target_to_host_resource(arg2
);
11917 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11920 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11921 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11922 unlock_user_struct(target_rnew
, arg3
, 0);
11926 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11927 if (!is_error(ret
) && arg4
) {
11928 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11931 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11932 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11933 unlock_user_struct(target_rold
, arg4
, 1);
11938 #ifdef TARGET_NR_gethostname
11939 case TARGET_NR_gethostname
:
11941 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11943 ret
= get_errno(gethostname(name
, arg2
));
11944 unlock_user(name
, arg1
, arg2
);
11946 ret
= -TARGET_EFAULT
;
11951 #ifdef TARGET_NR_atomic_cmpxchg_32
11952 case TARGET_NR_atomic_cmpxchg_32
:
11954 /* should use start_exclusive from main.c */
11955 abi_ulong mem_value
;
11956 if (get_user_u32(mem_value
, arg6
)) {
11957 target_siginfo_t info
;
11958 info
.si_signo
= SIGSEGV
;
11960 info
.si_code
= TARGET_SEGV_MAPERR
;
11961 info
._sifields
._sigfault
._addr
= arg6
;
11962 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11963 QEMU_SI_FAULT
, &info
);
11967 if (mem_value
== arg2
)
11968 put_user_u32(arg1
, arg6
);
11973 #ifdef TARGET_NR_atomic_barrier
11974 case TARGET_NR_atomic_barrier
:
11976 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11982 #ifdef TARGET_NR_timer_create
11983 case TARGET_NR_timer_create
:
11985 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11987 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11990 int timer_index
= next_free_host_timer();
11992 if (timer_index
< 0) {
11993 ret
= -TARGET_EAGAIN
;
11995 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11998 phost_sevp
= &host_sevp
;
11999 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12005 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12009 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12018 #ifdef TARGET_NR_timer_settime
12019 case TARGET_NR_timer_settime
:
12021 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12022 * struct itimerspec * old_value */
12023 target_timer_t timerid
= get_timer_id(arg1
);
12027 } else if (arg3
== 0) {
12028 ret
= -TARGET_EINVAL
;
12030 timer_t htimer
= g_posix_timers
[timerid
];
12031 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12033 target_to_host_itimerspec(&hspec_new
, arg3
);
12035 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12036 host_to_target_itimerspec(arg2
, &hspec_old
);
12042 #ifdef TARGET_NR_timer_gettime
12043 case TARGET_NR_timer_gettime
:
12045 /* args: timer_t timerid, struct itimerspec *curr_value */
12046 target_timer_t timerid
= get_timer_id(arg1
);
12050 } else if (!arg2
) {
12051 ret
= -TARGET_EFAULT
;
12053 timer_t htimer
= g_posix_timers
[timerid
];
12054 struct itimerspec hspec
;
12055 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12057 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12058 ret
= -TARGET_EFAULT
;
12065 #ifdef TARGET_NR_timer_getoverrun
12066 case TARGET_NR_timer_getoverrun
:
12068 /* args: timer_t timerid */
12069 target_timer_t timerid
= get_timer_id(arg1
);
12074 timer_t htimer
= g_posix_timers
[timerid
];
12075 ret
= get_errno(timer_getoverrun(htimer
));
12077 fd_trans_unregister(ret
);
12082 #ifdef TARGET_NR_timer_delete
12083 case TARGET_NR_timer_delete
:
12085 /* args: timer_t timerid */
12086 target_timer_t timerid
= get_timer_id(arg1
);
12091 timer_t htimer
= g_posix_timers
[timerid
];
12092 ret
= get_errno(timer_delete(htimer
));
12093 g_posix_timers
[timerid
] = 0;
12099 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12100 case TARGET_NR_timerfd_create
:
12101 ret
= get_errno(timerfd_create(arg1
,
12102 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12106 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12107 case TARGET_NR_timerfd_gettime
:
12109 struct itimerspec its_curr
;
12111 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12113 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12120 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12121 case TARGET_NR_timerfd_settime
:
12123 struct itimerspec its_new
, its_old
, *p_new
;
12126 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12134 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12136 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12143 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12144 case TARGET_NR_ioprio_get
:
12145 ret
= get_errno(ioprio_get(arg1
, arg2
));
12149 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12150 case TARGET_NR_ioprio_set
:
12151 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
12155 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12156 case TARGET_NR_setns
:
12157 ret
= get_errno(setns(arg1
, arg2
));
12160 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12161 case TARGET_NR_unshare
:
12162 ret
= get_errno(unshare(arg1
));
12165 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12166 case TARGET_NR_kcmp
:
12167 ret
= get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12173 gemu_log("qemu: Unsupported syscall: %d\n", num
);
12174 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12175 unimplemented_nowarn
:
12177 ret
= -TARGET_ENOSYS
;
12182 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
12185 print_syscall_ret(num
, ret
);
12186 trace_guest_user_syscall_ret(cpu
, num
, ret
);
12189 ret
= -TARGET_EFAULT
;