4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
39 int __clone2(int (*fn
)(void *), void *child_stack_base
,
40 size_t stack_size
, int flags
, void *arg
, ...);
42 #include <sys/socket.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <linux/netlink.h>
104 #ifdef CONFIG_RTNETLINK
105 #include <linux/rtnetlink.h>
107 #include <linux/audit.h>
108 #include "linux_loop.h"
113 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
114 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
117 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
118 * once. This exercises the codepaths for restart.
120 //#define DEBUG_ERESTARTSYS
122 //#include <linux/msdos_fs.h>
123 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
124 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
134 #define _syscall0(type,name) \
135 static type name (void) \
137 return syscall(__NR_##name); \
140 #define _syscall1(type,name,type1,arg1) \
141 static type name (type1 arg1) \
143 return syscall(__NR_##name, arg1); \
146 #define _syscall2(type,name,type1,arg1,type2,arg2) \
147 static type name (type1 arg1,type2 arg2) \
149 return syscall(__NR_##name, arg1, arg2); \
152 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
153 static type name (type1 arg1,type2 arg2,type3 arg3) \
155 return syscall(__NR_##name, arg1, arg2, arg3); \
158 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
164 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
168 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
172 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
173 type5,arg5,type6,arg6) \
174 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
177 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
181 #define __NR_sys_uname __NR_uname
182 #define __NR_sys_getcwd1 __NR_getcwd
183 #define __NR_sys_getdents __NR_getdents
184 #define __NR_sys_getdents64 __NR_getdents64
185 #define __NR_sys_getpriority __NR_getpriority
186 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
187 #define __NR_sys_syslog __NR_syslog
188 #define __NR_sys_futex __NR_futex
189 #define __NR_sys_inotify_init __NR_inotify_init
190 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
191 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
193 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
195 #define __NR__llseek __NR_lseek
198 /* Newer kernel ports have llseek() instead of _llseek() */
199 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
200 #define TARGET_NR__llseek TARGET_NR_llseek
204 _syscall0(int, gettid
)
206 /* This is a replacement for the host gettid() and must return a host
208 static int gettid(void) {
212 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
213 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
215 #if !defined(__NR_getdents) || \
216 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
217 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
221 loff_t
*, res
, uint
, wh
);
223 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
224 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
225 #ifdef __NR_exit_group
226 _syscall1(int,exit_group
,int,error_code
)
228 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
229 _syscall1(int,set_tid_address
,int *,tidptr
)
231 #if defined(TARGET_NR_futex) && defined(__NR_futex)
232 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
233 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
235 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
236 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
237 unsigned long *, user_mask_ptr
);
238 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
239 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
240 unsigned long *, user_mask_ptr
);
241 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
243 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
244 struct __user_cap_data_struct
*, data
);
245 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
246 struct __user_cap_data_struct
*, data
);
247 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
248 _syscall2(int, ioprio_get
, int, which
, int, who
)
250 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
251 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
253 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
254 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
257 static bitmask_transtbl fcntl_flags_tbl
[] = {
258 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
259 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
260 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
261 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
262 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
263 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
264 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
265 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
266 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
267 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
268 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
269 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
270 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
271 #if defined(O_DIRECT)
272 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
274 #if defined(O_NOATIME)
275 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
277 #if defined(O_CLOEXEC)
278 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
281 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
283 /* Don't terminate the list prematurely on 64-bit host+guest. */
284 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
285 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
290 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
291 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
292 typedef struct TargetFdTrans
{
293 TargetFdDataFunc host_to_target_data
;
294 TargetFdDataFunc target_to_host_data
;
295 TargetFdAddrFunc target_to_host_addr
;
298 static TargetFdTrans
**target_fd_trans
;
300 static unsigned int target_fd_max
;
302 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
304 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
305 return target_fd_trans
[fd
]->target_to_host_data
;
310 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
312 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
313 return target_fd_trans
[fd
]->host_to_target_data
;
318 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
320 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
321 return target_fd_trans
[fd
]->target_to_host_addr
;
326 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
330 if (fd
>= target_fd_max
) {
331 oldmax
= target_fd_max
;
332 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
333 target_fd_trans
= g_renew(TargetFdTrans
*,
334 target_fd_trans
, target_fd_max
);
335 memset((void *)(target_fd_trans
+ oldmax
), 0,
336 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
338 target_fd_trans
[fd
] = trans
;
341 static void fd_trans_unregister(int fd
)
343 if (fd
>= 0 && fd
< target_fd_max
) {
344 target_fd_trans
[fd
] = NULL
;
348 static void fd_trans_dup(int oldfd
, int newfd
)
350 fd_trans_unregister(newfd
);
351 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
352 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
356 static int sys_getcwd1(char *buf
, size_t size
)
358 if (getcwd(buf
, size
) == NULL
) {
359 /* getcwd() sets errno */
362 return strlen(buf
)+1;
365 #ifdef TARGET_NR_utimensat
366 #ifdef CONFIG_UTIMENSAT
367 static int sys_utimensat(int dirfd
, const char *pathname
,
368 const struct timespec times
[2], int flags
)
370 if (pathname
== NULL
)
371 return futimens(dirfd
, times
);
373 return utimensat(dirfd
, pathname
, times
, flags
);
375 #elif defined(__NR_utimensat)
376 #define __NR_sys_utimensat __NR_utimensat
377 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
378 const struct timespec
*,tsp
,int,flags
)
380 static int sys_utimensat(int dirfd
, const char *pathname
,
381 const struct timespec times
[2], int flags
)
387 #endif /* TARGET_NR_utimensat */
389 #ifdef CONFIG_INOTIFY
390 #include <sys/inotify.h>
392 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
393 static int sys_inotify_init(void)
395 return (inotify_init());
398 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
399 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
401 return (inotify_add_watch(fd
, pathname
, mask
));
404 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
405 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
407 return (inotify_rm_watch(fd
, wd
));
410 #ifdef CONFIG_INOTIFY1
411 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
412 static int sys_inotify_init1(int flags
)
414 return (inotify_init1(flags
));
419 /* Userspace can usually survive runtime without inotify */
420 #undef TARGET_NR_inotify_init
421 #undef TARGET_NR_inotify_init1
422 #undef TARGET_NR_inotify_add_watch
423 #undef TARGET_NR_inotify_rm_watch
424 #endif /* CONFIG_INOTIFY */
426 #if defined(TARGET_NR_prlimit64)
427 #ifndef __NR_prlimit64
428 # define __NR_prlimit64 -1
430 #define __NR_sys_prlimit64 __NR_prlimit64
431 /* The glibc rlimit structure may not be that used by the underlying syscall */
432 struct host_rlimit64
{
436 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
437 const struct host_rlimit64
*, new_limit
,
438 struct host_rlimit64
*, old_limit
)
442 #if defined(TARGET_NR_timer_create)
443 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
444 static timer_t g_posix_timers
[32] = { 0, } ;
446 static inline int next_free_host_timer(void)
449 /* FIXME: Does finding the next free slot require a lock? */
450 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
451 if (g_posix_timers
[k
] == 0) {
452 g_posix_timers
[k
] = (timer_t
) 1;
460 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
462 static inline int regpairs_aligned(void *cpu_env
) {
463 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
465 #elif defined(TARGET_MIPS)
466 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
467 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
468 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
469 * of registers which translates to the same as ARM/MIPS, because we start with
471 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
473 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
476 #define ERRNO_TABLE_SIZE 1200
478 /* target_to_host_errno_table[] is initialized from
479 * host_to_target_errno_table[] in syscall_init(). */
480 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
484 * This list is the union of errno values overridden in asm-<arch>/errno.h
485 * minus the errnos that are not actually generic to all archs.
487 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
488 [EAGAIN
] = TARGET_EAGAIN
,
489 [EIDRM
] = TARGET_EIDRM
,
490 [ECHRNG
] = TARGET_ECHRNG
,
491 [EL2NSYNC
] = TARGET_EL2NSYNC
,
492 [EL3HLT
] = TARGET_EL3HLT
,
493 [EL3RST
] = TARGET_EL3RST
,
494 [ELNRNG
] = TARGET_ELNRNG
,
495 [EUNATCH
] = TARGET_EUNATCH
,
496 [ENOCSI
] = TARGET_ENOCSI
,
497 [EL2HLT
] = TARGET_EL2HLT
,
498 [EDEADLK
] = TARGET_EDEADLK
,
499 [ENOLCK
] = TARGET_ENOLCK
,
500 [EBADE
] = TARGET_EBADE
,
501 [EBADR
] = TARGET_EBADR
,
502 [EXFULL
] = TARGET_EXFULL
,
503 [ENOANO
] = TARGET_ENOANO
,
504 [EBADRQC
] = TARGET_EBADRQC
,
505 [EBADSLT
] = TARGET_EBADSLT
,
506 [EBFONT
] = TARGET_EBFONT
,
507 [ENOSTR
] = TARGET_ENOSTR
,
508 [ENODATA
] = TARGET_ENODATA
,
509 [ETIME
] = TARGET_ETIME
,
510 [ENOSR
] = TARGET_ENOSR
,
511 [ENONET
] = TARGET_ENONET
,
512 [ENOPKG
] = TARGET_ENOPKG
,
513 [EREMOTE
] = TARGET_EREMOTE
,
514 [ENOLINK
] = TARGET_ENOLINK
,
515 [EADV
] = TARGET_EADV
,
516 [ESRMNT
] = TARGET_ESRMNT
,
517 [ECOMM
] = TARGET_ECOMM
,
518 [EPROTO
] = TARGET_EPROTO
,
519 [EDOTDOT
] = TARGET_EDOTDOT
,
520 [EMULTIHOP
] = TARGET_EMULTIHOP
,
521 [EBADMSG
] = TARGET_EBADMSG
,
522 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
523 [EOVERFLOW
] = TARGET_EOVERFLOW
,
524 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
525 [EBADFD
] = TARGET_EBADFD
,
526 [EREMCHG
] = TARGET_EREMCHG
,
527 [ELIBACC
] = TARGET_ELIBACC
,
528 [ELIBBAD
] = TARGET_ELIBBAD
,
529 [ELIBSCN
] = TARGET_ELIBSCN
,
530 [ELIBMAX
] = TARGET_ELIBMAX
,
531 [ELIBEXEC
] = TARGET_ELIBEXEC
,
532 [EILSEQ
] = TARGET_EILSEQ
,
533 [ENOSYS
] = TARGET_ENOSYS
,
534 [ELOOP
] = TARGET_ELOOP
,
535 [ERESTART
] = TARGET_ERESTART
,
536 [ESTRPIPE
] = TARGET_ESTRPIPE
,
537 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
538 [EUSERS
] = TARGET_EUSERS
,
539 [ENOTSOCK
] = TARGET_ENOTSOCK
,
540 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
541 [EMSGSIZE
] = TARGET_EMSGSIZE
,
542 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
543 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
544 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
545 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
546 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
547 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
548 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
549 [EADDRINUSE
] = TARGET_EADDRINUSE
,
550 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
551 [ENETDOWN
] = TARGET_ENETDOWN
,
552 [ENETUNREACH
] = TARGET_ENETUNREACH
,
553 [ENETRESET
] = TARGET_ENETRESET
,
554 [ECONNABORTED
] = TARGET_ECONNABORTED
,
555 [ECONNRESET
] = TARGET_ECONNRESET
,
556 [ENOBUFS
] = TARGET_ENOBUFS
,
557 [EISCONN
] = TARGET_EISCONN
,
558 [ENOTCONN
] = TARGET_ENOTCONN
,
559 [EUCLEAN
] = TARGET_EUCLEAN
,
560 [ENOTNAM
] = TARGET_ENOTNAM
,
561 [ENAVAIL
] = TARGET_ENAVAIL
,
562 [EISNAM
] = TARGET_EISNAM
,
563 [EREMOTEIO
] = TARGET_EREMOTEIO
,
564 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
565 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
566 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
567 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
568 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
569 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
570 [EALREADY
] = TARGET_EALREADY
,
571 [EINPROGRESS
] = TARGET_EINPROGRESS
,
572 [ESTALE
] = TARGET_ESTALE
,
573 [ECANCELED
] = TARGET_ECANCELED
,
574 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
575 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
577 [ENOKEY
] = TARGET_ENOKEY
,
580 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
583 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
586 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
589 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
591 #ifdef ENOTRECOVERABLE
592 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
596 static inline int host_to_target_errno(int err
)
598 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
599 host_to_target_errno_table
[err
]) {
600 return host_to_target_errno_table
[err
];
605 static inline int target_to_host_errno(int err
)
607 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
608 target_to_host_errno_table
[err
]) {
609 return target_to_host_errno_table
[err
];
614 static inline abi_long
get_errno(abi_long ret
)
617 return -host_to_target_errno(errno
);
622 static inline int is_error(abi_long ret
)
624 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
627 const char *target_strerror(int err
)
629 if (err
== TARGET_ERESTARTSYS
) {
630 return "To be restarted";
632 if (err
== TARGET_QEMU_ESIGRETURN
) {
633 return "Successful exit from sigreturn";
636 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
639 return strerror(target_to_host_errno(err
));
642 #define safe_syscall0(type, name) \
643 static type safe_##name(void) \
645 return safe_syscall(__NR_##name); \
648 #define safe_syscall1(type, name, type1, arg1) \
649 static type safe_##name(type1 arg1) \
651 return safe_syscall(__NR_##name, arg1); \
654 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
655 static type safe_##name(type1 arg1, type2 arg2) \
657 return safe_syscall(__NR_##name, arg1, arg2); \
660 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
661 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
663 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
666 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
668 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
670 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
673 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
674 type4, arg4, type5, arg5) \
675 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
678 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
681 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
682 type4, arg4, type5, arg5, type6, arg6) \
683 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
684 type5 arg5, type6 arg6) \
686 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
689 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
690 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
691 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
692 int, flags
, mode_t
, mode
)
693 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
694 struct rusage
*, rusage
)
695 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
696 int, options
, struct rusage
*, rusage
)
697 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
698 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
699 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
700 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
701 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
703 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
704 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
706 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
707 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
708 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
709 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
710 safe_syscall2(int, tkill
, int, tid
, int, sig
)
711 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
712 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
713 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
714 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
716 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
717 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
718 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
719 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
720 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
721 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
722 safe_syscall2(int, flock
, int, fd
, int, operation
)
723 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
724 const struct timespec
*, uts
, size_t, sigsetsize
)
725 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
727 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
728 struct timespec
*, rem
)
729 #ifdef TARGET_NR_clock_nanosleep
730 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
731 const struct timespec
*, req
, struct timespec
*, rem
)
734 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
736 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
737 long, msgtype
, int, flags
)
738 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
739 unsigned, nsops
, const struct timespec
*, timeout
)
741 /* This host kernel architecture uses a single ipc syscall; fake up
742 * wrappers for the sub-operations to hide this implementation detail.
743 * Annoyingly we can't include linux/ipc.h to get the constant definitions
744 * for the call parameter because some structs in there conflict with the
745 * sys/ipc.h ones. So we just define them here, and rely on them being
746 * the same for all host architectures.
748 #define Q_SEMTIMEDOP 4
751 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
753 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
754 void *, ptr
, long, fifth
)
755 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
757 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
759 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
761 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
763 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
764 const struct timespec
*timeout
)
766 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
770 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
771 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
772 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
773 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
774 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
776 /* We do ioctl like this rather than via safe_syscall3 to preserve the
777 * "third argument might be integer or pointer or not present" behaviour of
780 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
781 /* Similarly for fcntl. Note that callers must always:
782 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
783 * use the flock64 struct rather than unsuffixed flock
784 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
787 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
789 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
792 static inline int host_to_target_sock_type(int host_type
)
796 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
798 target_type
= TARGET_SOCK_DGRAM
;
801 target_type
= TARGET_SOCK_STREAM
;
804 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
808 #if defined(SOCK_CLOEXEC)
809 if (host_type
& SOCK_CLOEXEC
) {
810 target_type
|= TARGET_SOCK_CLOEXEC
;
814 #if defined(SOCK_NONBLOCK)
815 if (host_type
& SOCK_NONBLOCK
) {
816 target_type
|= TARGET_SOCK_NONBLOCK
;
823 static abi_ulong target_brk
;
824 static abi_ulong target_original_brk
;
825 static abi_ulong brk_page
;
827 void target_set_brk(abi_ulong new_brk
)
829 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
830 brk_page
= HOST_PAGE_ALIGN(target_brk
);
833 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
834 #define DEBUGF_BRK(message, args...)
836 /* do_brk() must return target values and target errnos. */
837 abi_long
do_brk(abi_ulong new_brk
)
839 abi_long mapped_addr
;
842 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
845 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
848 if (new_brk
< target_original_brk
) {
849 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
854 /* If the new brk is less than the highest page reserved to the
855 * target heap allocation, set it and we're almost done... */
856 if (new_brk
<= brk_page
) {
857 /* Heap contents are initialized to zero, as for anonymous
859 if (new_brk
> target_brk
) {
860 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
862 target_brk
= new_brk
;
863 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
867 /* We need to allocate more memory after the brk... Note that
868 * we don't use MAP_FIXED because that will map over the top of
869 * any existing mapping (like the one with the host libc or qemu
870 * itself); instead we treat "mapped but at wrong address" as
871 * a failure and unmap again.
873 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
874 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
875 PROT_READ
|PROT_WRITE
,
876 MAP_ANON
|MAP_PRIVATE
, 0, 0));
878 if (mapped_addr
== brk_page
) {
879 /* Heap contents are initialized to zero, as for anonymous
880 * mapped pages. Technically the new pages are already
881 * initialized to zero since they *are* anonymous mapped
882 * pages, however we have to take care with the contents that
883 * come from the remaining part of the previous page: it may
884 * contains garbage data due to a previous heap usage (grown
886 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
888 target_brk
= new_brk
;
889 brk_page
= HOST_PAGE_ALIGN(target_brk
);
890 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
893 } else if (mapped_addr
!= -1) {
894 /* Mapped but at wrong address, meaning there wasn't actually
895 * enough space for this brk.
897 target_munmap(mapped_addr
, new_alloc_size
);
899 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
902 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
905 #if defined(TARGET_ALPHA)
906 /* We (partially) emulate OSF/1 on Alpha, which requires we
907 return a proper errno, not an unchanged brk value. */
908 return -TARGET_ENOMEM
;
910 /* For everything else, return the previous break. */
914 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
915 abi_ulong target_fds_addr
,
919 abi_ulong b
, *target_fds
;
921 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
922 if (!(target_fds
= lock_user(VERIFY_READ
,
924 sizeof(abi_ulong
) * nw
,
926 return -TARGET_EFAULT
;
930 for (i
= 0; i
< nw
; i
++) {
931 /* grab the abi_ulong */
932 __get_user(b
, &target_fds
[i
]);
933 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
934 /* check the bit inside the abi_ulong */
941 unlock_user(target_fds
, target_fds_addr
, 0);
946 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
947 abi_ulong target_fds_addr
,
950 if (target_fds_addr
) {
951 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
952 return -TARGET_EFAULT
;
960 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
966 abi_ulong
*target_fds
;
968 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
969 if (!(target_fds
= lock_user(VERIFY_WRITE
,
971 sizeof(abi_ulong
) * nw
,
973 return -TARGET_EFAULT
;
976 for (i
= 0; i
< nw
; i
++) {
978 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
979 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
982 __put_user(v
, &target_fds
[i
]);
985 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
990 #if defined(__alpha__)
996 static inline abi_long
host_to_target_clock_t(long ticks
)
998 #if HOST_HZ == TARGET_HZ
1001 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1005 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1006 const struct rusage
*rusage
)
1008 struct target_rusage
*target_rusage
;
1010 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1011 return -TARGET_EFAULT
;
1012 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1013 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1014 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1015 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1016 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1017 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1018 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1019 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1020 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1021 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1022 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1023 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1024 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1025 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1026 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1027 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1028 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1029 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1030 unlock_user_struct(target_rusage
, target_addr
, 1);
1035 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1037 abi_ulong target_rlim_swap
;
1040 target_rlim_swap
= tswapal(target_rlim
);
1041 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1042 return RLIM_INFINITY
;
1044 result
= target_rlim_swap
;
1045 if (target_rlim_swap
!= (rlim_t
)result
)
1046 return RLIM_INFINITY
;
1051 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1053 abi_ulong target_rlim_swap
;
1056 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1057 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1059 target_rlim_swap
= rlim
;
1060 result
= tswapal(target_rlim_swap
);
1065 static inline int target_to_host_resource(int code
)
1068 case TARGET_RLIMIT_AS
:
1070 case TARGET_RLIMIT_CORE
:
1072 case TARGET_RLIMIT_CPU
:
1074 case TARGET_RLIMIT_DATA
:
1076 case TARGET_RLIMIT_FSIZE
:
1077 return RLIMIT_FSIZE
;
1078 case TARGET_RLIMIT_LOCKS
:
1079 return RLIMIT_LOCKS
;
1080 case TARGET_RLIMIT_MEMLOCK
:
1081 return RLIMIT_MEMLOCK
;
1082 case TARGET_RLIMIT_MSGQUEUE
:
1083 return RLIMIT_MSGQUEUE
;
1084 case TARGET_RLIMIT_NICE
:
1086 case TARGET_RLIMIT_NOFILE
:
1087 return RLIMIT_NOFILE
;
1088 case TARGET_RLIMIT_NPROC
:
1089 return RLIMIT_NPROC
;
1090 case TARGET_RLIMIT_RSS
:
1092 case TARGET_RLIMIT_RTPRIO
:
1093 return RLIMIT_RTPRIO
;
1094 case TARGET_RLIMIT_SIGPENDING
:
1095 return RLIMIT_SIGPENDING
;
1096 case TARGET_RLIMIT_STACK
:
1097 return RLIMIT_STACK
;
1103 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1104 abi_ulong target_tv_addr
)
1106 struct target_timeval
*target_tv
;
1108 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1109 return -TARGET_EFAULT
;
1111 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1112 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1114 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1119 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1120 const struct timeval
*tv
)
1122 struct target_timeval
*target_tv
;
1124 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1125 return -TARGET_EFAULT
;
1127 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1128 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1130 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1135 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1136 abi_ulong target_tz_addr
)
1138 struct target_timezone
*target_tz
;
1140 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1141 return -TARGET_EFAULT
;
1144 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1145 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1147 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1152 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1155 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1156 abi_ulong target_mq_attr_addr
)
1158 struct target_mq_attr
*target_mq_attr
;
1160 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1161 target_mq_attr_addr
, 1))
1162 return -TARGET_EFAULT
;
1164 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1165 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1166 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1167 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1169 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1174 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1175 const struct mq_attr
*attr
)
1177 struct target_mq_attr
*target_mq_attr
;
1179 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1180 target_mq_attr_addr
, 0))
1181 return -TARGET_EFAULT
;
1183 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1184 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1185 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1186 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1188 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1194 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1195 /* do_select() must return target values and target errnos. */
1196 static abi_long
do_select(int n
,
1197 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1198 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1200 fd_set rfds
, wfds
, efds
;
1201 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1203 struct timespec ts
, *ts_ptr
;
1206 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1210 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1214 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1219 if (target_tv_addr
) {
1220 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1221 return -TARGET_EFAULT
;
1222 ts
.tv_sec
= tv
.tv_sec
;
1223 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1229 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1232 if (!is_error(ret
)) {
1233 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1234 return -TARGET_EFAULT
;
1235 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1236 return -TARGET_EFAULT
;
1237 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1238 return -TARGET_EFAULT
;
1240 if (target_tv_addr
) {
1241 tv
.tv_sec
= ts
.tv_sec
;
1242 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1243 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1244 return -TARGET_EFAULT
;
1253 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1256 return pipe2(host_pipe
, flags
);
1262 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1263 int flags
, int is_pipe2
)
1267 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1270 return get_errno(ret
);
1272 /* Several targets have special calling conventions for the original
1273 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1275 #if defined(TARGET_ALPHA)
1276 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1277 return host_pipe
[0];
1278 #elif defined(TARGET_MIPS)
1279 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1280 return host_pipe
[0];
1281 #elif defined(TARGET_SH4)
1282 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1283 return host_pipe
[0];
1284 #elif defined(TARGET_SPARC)
1285 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1286 return host_pipe
[0];
1290 if (put_user_s32(host_pipe
[0], pipedes
)
1291 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1292 return -TARGET_EFAULT
;
1293 return get_errno(ret
);
1296 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1297 abi_ulong target_addr
,
1300 struct target_ip_mreqn
*target_smreqn
;
1302 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1304 return -TARGET_EFAULT
;
1305 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1306 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1307 if (len
== sizeof(struct target_ip_mreqn
))
1308 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1309 unlock_user(target_smreqn
, target_addr
, 0);
1314 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1315 abi_ulong target_addr
,
1318 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1319 sa_family_t sa_family
;
1320 struct target_sockaddr
*target_saddr
;
1322 if (fd_trans_target_to_host_addr(fd
)) {
1323 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1326 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1328 return -TARGET_EFAULT
;
1330 sa_family
= tswap16(target_saddr
->sa_family
);
1332 /* Oops. The caller might send a incomplete sun_path; sun_path
1333 * must be terminated by \0 (see the manual page), but
1334 * unfortunately it is quite common to specify sockaddr_un
1335 * length as "strlen(x->sun_path)" while it should be
1336 * "strlen(...) + 1". We'll fix that here if needed.
1337 * Linux kernel has a similar feature.
1340 if (sa_family
== AF_UNIX
) {
1341 if (len
< unix_maxlen
&& len
> 0) {
1342 char *cp
= (char*)target_saddr
;
1344 if ( cp
[len
-1] && !cp
[len
] )
1347 if (len
> unix_maxlen
)
1351 memcpy(addr
, target_saddr
, len
);
1352 addr
->sa_family
= sa_family
;
1353 if (sa_family
== AF_NETLINK
) {
1354 struct sockaddr_nl
*nladdr
;
1356 nladdr
= (struct sockaddr_nl
*)addr
;
1357 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1358 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1359 } else if (sa_family
== AF_PACKET
) {
1360 struct target_sockaddr_ll
*lladdr
;
1362 lladdr
= (struct target_sockaddr_ll
*)addr
;
1363 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1364 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1366 unlock_user(target_saddr
, target_addr
, 0);
1371 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1372 struct sockaddr
*addr
,
1375 struct target_sockaddr
*target_saddr
;
1377 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1379 return -TARGET_EFAULT
;
1380 memcpy(target_saddr
, addr
, len
);
1381 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1382 if (addr
->sa_family
== AF_NETLINK
) {
1383 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1384 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1385 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1387 unlock_user(target_saddr
, target_addr
, len
);
1392 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1393 struct target_msghdr
*target_msgh
)
1395 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1396 abi_long msg_controllen
;
1397 abi_ulong target_cmsg_addr
;
1398 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1399 socklen_t space
= 0;
1401 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1402 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1404 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1405 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1406 target_cmsg_start
= target_cmsg
;
1408 return -TARGET_EFAULT
;
1410 while (cmsg
&& target_cmsg
) {
1411 void *data
= CMSG_DATA(cmsg
);
1412 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1414 int len
= tswapal(target_cmsg
->cmsg_len
)
1415 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1417 space
+= CMSG_SPACE(len
);
1418 if (space
> msgh
->msg_controllen
) {
1419 space
-= CMSG_SPACE(len
);
1420 /* This is a QEMU bug, since we allocated the payload
1421 * area ourselves (unlike overflow in host-to-target
1422 * conversion, which is just the guest giving us a buffer
1423 * that's too small). It can't happen for the payload types
1424 * we currently support; if it becomes an issue in future
1425 * we would need to improve our allocation strategy to
1426 * something more intelligent than "twice the size of the
1427 * target buffer we're reading from".
1429 gemu_log("Host cmsg overflow\n");
1433 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1434 cmsg
->cmsg_level
= SOL_SOCKET
;
1436 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1438 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1439 cmsg
->cmsg_len
= CMSG_LEN(len
);
1441 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1442 int *fd
= (int *)data
;
1443 int *target_fd
= (int *)target_data
;
1444 int i
, numfds
= len
/ sizeof(int);
1446 for (i
= 0; i
< numfds
; i
++) {
1447 __get_user(fd
[i
], target_fd
+ i
);
1449 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1450 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1451 struct ucred
*cred
= (struct ucred
*)data
;
1452 struct target_ucred
*target_cred
=
1453 (struct target_ucred
*)target_data
;
1455 __get_user(cred
->pid
, &target_cred
->pid
);
1456 __get_user(cred
->uid
, &target_cred
->uid
);
1457 __get_user(cred
->gid
, &target_cred
->gid
);
1459 gemu_log("Unsupported ancillary data: %d/%d\n",
1460 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1461 memcpy(data
, target_data
, len
);
1464 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1465 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1468 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1470 msgh
->msg_controllen
= space
;
1474 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1475 struct msghdr
*msgh
)
1477 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1478 abi_long msg_controllen
;
1479 abi_ulong target_cmsg_addr
;
1480 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1481 socklen_t space
= 0;
1483 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1484 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1486 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1487 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1488 target_cmsg_start
= target_cmsg
;
1490 return -TARGET_EFAULT
;
1492 while (cmsg
&& target_cmsg
) {
1493 void *data
= CMSG_DATA(cmsg
);
1494 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1496 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1497 int tgt_len
, tgt_space
;
1499 /* We never copy a half-header but may copy half-data;
1500 * this is Linux's behaviour in put_cmsg(). Note that
1501 * truncation here is a guest problem (which we report
1502 * to the guest via the CTRUNC bit), unlike truncation
1503 * in target_to_host_cmsg, which is a QEMU bug.
1505 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1506 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1510 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1511 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1513 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1515 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1517 tgt_len
= TARGET_CMSG_LEN(len
);
1519 /* Payload types which need a different size of payload on
1520 * the target must adjust tgt_len here.
1522 switch (cmsg
->cmsg_level
) {
1524 switch (cmsg
->cmsg_type
) {
1526 tgt_len
= sizeof(struct target_timeval
);
1535 if (msg_controllen
< tgt_len
) {
1536 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1537 tgt_len
= msg_controllen
;
1540 /* We must now copy-and-convert len bytes of payload
1541 * into tgt_len bytes of destination space. Bear in mind
1542 * that in both source and destination we may be dealing
1543 * with a truncated value!
1545 switch (cmsg
->cmsg_level
) {
1547 switch (cmsg
->cmsg_type
) {
1550 int *fd
= (int *)data
;
1551 int *target_fd
= (int *)target_data
;
1552 int i
, numfds
= tgt_len
/ sizeof(int);
1554 for (i
= 0; i
< numfds
; i
++) {
1555 __put_user(fd
[i
], target_fd
+ i
);
1561 struct timeval
*tv
= (struct timeval
*)data
;
1562 struct target_timeval
*target_tv
=
1563 (struct target_timeval
*)target_data
;
1565 if (len
!= sizeof(struct timeval
) ||
1566 tgt_len
!= sizeof(struct target_timeval
)) {
1570 /* copy struct timeval to target */
1571 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1572 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1575 case SCM_CREDENTIALS
:
1577 struct ucred
*cred
= (struct ucred
*)data
;
1578 struct target_ucred
*target_cred
=
1579 (struct target_ucred
*)target_data
;
1581 __put_user(cred
->pid
, &target_cred
->pid
);
1582 __put_user(cred
->uid
, &target_cred
->uid
);
1583 __put_user(cred
->gid
, &target_cred
->gid
);
1593 gemu_log("Unsupported ancillary data: %d/%d\n",
1594 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1595 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1596 if (tgt_len
> len
) {
1597 memset(target_data
+ len
, 0, tgt_len
- len
);
1601 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1602 tgt_space
= TARGET_CMSG_SPACE(len
);
1603 if (msg_controllen
< tgt_space
) {
1604 tgt_space
= msg_controllen
;
1606 msg_controllen
-= tgt_space
;
1608 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1609 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1612 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1614 target_msgh
->msg_controllen
= tswapal(space
);
1618 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1620 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1621 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1622 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1623 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1624 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1627 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1629 abi_long (*host_to_target_nlmsg
)
1630 (struct nlmsghdr
*))
1635 while (len
> sizeof(struct nlmsghdr
)) {
1637 nlmsg_len
= nlh
->nlmsg_len
;
1638 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1643 switch (nlh
->nlmsg_type
) {
1645 tswap_nlmsghdr(nlh
);
1651 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1652 e
->error
= tswap32(e
->error
);
1653 tswap_nlmsghdr(&e
->msg
);
1654 tswap_nlmsghdr(nlh
);
1658 ret
= host_to_target_nlmsg(nlh
);
1660 tswap_nlmsghdr(nlh
);
1665 tswap_nlmsghdr(nlh
);
1666 len
-= NLMSG_ALIGN(nlmsg_len
);
1667 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1672 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1674 abi_long (*target_to_host_nlmsg
)
1675 (struct nlmsghdr
*))
1679 while (len
> sizeof(struct nlmsghdr
)) {
1680 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1681 tswap32(nlh
->nlmsg_len
) > len
) {
1684 tswap_nlmsghdr(nlh
);
1685 switch (nlh
->nlmsg_type
) {
1692 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1693 e
->error
= tswap32(e
->error
);
1694 tswap_nlmsghdr(&e
->msg
);
1697 ret
= target_to_host_nlmsg(nlh
);
1702 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1703 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1708 #ifdef CONFIG_RTNETLINK
1709 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1711 abi_long (*host_to_target_rtattr
)
1714 unsigned short rta_len
;
1717 while (len
> sizeof(struct rtattr
)) {
1718 rta_len
= rtattr
->rta_len
;
1719 if (rta_len
< sizeof(struct rtattr
) ||
1723 ret
= host_to_target_rtattr(rtattr
);
1724 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1725 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1729 len
-= RTA_ALIGN(rta_len
);
1730 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
1735 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
1738 struct rtnl_link_stats
*st
;
1739 struct rtnl_link_stats64
*st64
;
1740 struct rtnl_link_ifmap
*map
;
1742 switch (rtattr
->rta_type
) {
1745 case IFLA_BROADCAST
:
1751 case IFLA_OPERSTATE
:
1754 case IFLA_PROTO_DOWN
:
1761 case IFLA_CARRIER_CHANGES
:
1762 case IFLA_NUM_RX_QUEUES
:
1763 case IFLA_NUM_TX_QUEUES
:
1764 case IFLA_PROMISCUITY
:
1766 case IFLA_LINK_NETNSID
:
1770 u32
= RTA_DATA(rtattr
);
1771 *u32
= tswap32(*u32
);
1773 /* struct rtnl_link_stats */
1775 st
= RTA_DATA(rtattr
);
1776 st
->rx_packets
= tswap32(st
->rx_packets
);
1777 st
->tx_packets
= tswap32(st
->tx_packets
);
1778 st
->rx_bytes
= tswap32(st
->rx_bytes
);
1779 st
->tx_bytes
= tswap32(st
->tx_bytes
);
1780 st
->rx_errors
= tswap32(st
->rx_errors
);
1781 st
->tx_errors
= tswap32(st
->tx_errors
);
1782 st
->rx_dropped
= tswap32(st
->rx_dropped
);
1783 st
->tx_dropped
= tswap32(st
->tx_dropped
);
1784 st
->multicast
= tswap32(st
->multicast
);
1785 st
->collisions
= tswap32(st
->collisions
);
1787 /* detailed rx_errors: */
1788 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
1789 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
1790 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
1791 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
1792 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
1793 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
1795 /* detailed tx_errors */
1796 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
1797 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
1798 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
1799 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
1800 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
1803 st
->rx_compressed
= tswap32(st
->rx_compressed
);
1804 st
->tx_compressed
= tswap32(st
->tx_compressed
);
1806 /* struct rtnl_link_stats64 */
1808 st64
= RTA_DATA(rtattr
);
1809 st64
->rx_packets
= tswap64(st64
->rx_packets
);
1810 st64
->tx_packets
= tswap64(st64
->tx_packets
);
1811 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
1812 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
1813 st64
->rx_errors
= tswap64(st64
->rx_errors
);
1814 st64
->tx_errors
= tswap64(st64
->tx_errors
);
1815 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
1816 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
1817 st64
->multicast
= tswap64(st64
->multicast
);
1818 st64
->collisions
= tswap64(st64
->collisions
);
1820 /* detailed rx_errors: */
1821 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
1822 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
1823 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
1824 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
1825 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
1826 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
1828 /* detailed tx_errors */
1829 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
1830 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
1831 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
1832 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
1833 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
1836 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
1837 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
1839 /* struct rtnl_link_ifmap */
1841 map
= RTA_DATA(rtattr
);
1842 map
->mem_start
= tswap64(map
->mem_start
);
1843 map
->mem_end
= tswap64(map
->mem_end
);
1844 map
->base_addr
= tswap64(map
->base_addr
);
1845 map
->irq
= tswap16(map
->irq
);
1850 /* FIXME: implement nested type */
1851 gemu_log("Unimplemented nested type %d\n", rtattr
->rta_type
);
1854 gemu_log("Unknown host IFLA type: %d\n", rtattr
->rta_type
);
1860 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
1863 struct ifa_cacheinfo
*ci
;
1865 switch (rtattr
->rta_type
) {
1866 /* binary: depends on family type */
1876 u32
= RTA_DATA(rtattr
);
1877 *u32
= tswap32(*u32
);
1879 /* struct ifa_cacheinfo */
1881 ci
= RTA_DATA(rtattr
);
1882 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
1883 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
1884 ci
->cstamp
= tswap32(ci
->cstamp
);
1885 ci
->tstamp
= tswap32(ci
->tstamp
);
1888 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
1894 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
1897 switch (rtattr
->rta_type
) {
1898 /* binary: depends on family type */
1907 u32
= RTA_DATA(rtattr
);
1908 *u32
= tswap32(*u32
);
1911 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
1917 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
1918 uint32_t rtattr_len
)
1920 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1921 host_to_target_data_link_rtattr
);
1924 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
1925 uint32_t rtattr_len
)
1927 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1928 host_to_target_data_addr_rtattr
);
1931 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
1932 uint32_t rtattr_len
)
1934 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
1935 host_to_target_data_route_rtattr
);
1938 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
1941 struct ifinfomsg
*ifi
;
1942 struct ifaddrmsg
*ifa
;
1945 nlmsg_len
= nlh
->nlmsg_len
;
1946 switch (nlh
->nlmsg_type
) {
1950 ifi
= NLMSG_DATA(nlh
);
1951 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
1952 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
1953 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
1954 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
1955 host_to_target_link_rtattr(IFLA_RTA(ifi
),
1956 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
1961 ifa
= NLMSG_DATA(nlh
);
1962 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
1963 host_to_target_addr_rtattr(IFA_RTA(ifa
),
1964 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
1969 rtm
= NLMSG_DATA(nlh
);
1970 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
1971 host_to_target_route_rtattr(RTM_RTA(rtm
),
1972 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
1975 return -TARGET_EINVAL
;
1980 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
1983 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
1986 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
1988 abi_long (*target_to_host_rtattr
)
1993 while (len
>= sizeof(struct rtattr
)) {
1994 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
1995 tswap16(rtattr
->rta_len
) > len
) {
1998 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1999 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2000 ret
= target_to_host_rtattr(rtattr
);
2004 len
-= RTA_ALIGN(rtattr
->rta_len
);
2005 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2006 RTA_ALIGN(rtattr
->rta_len
));
2011 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2013 switch (rtattr
->rta_type
) {
2015 gemu_log("Unknown target IFLA type: %d\n", rtattr
->rta_type
);
2021 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2023 switch (rtattr
->rta_type
) {
2024 /* binary: depends on family type */
2029 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2035 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2038 switch (rtattr
->rta_type
) {
2039 /* binary: depends on family type */
2046 u32
= RTA_DATA(rtattr
);
2047 *u32
= tswap32(*u32
);
2050 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2056 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2057 uint32_t rtattr_len
)
2059 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2060 target_to_host_data_link_rtattr
);
2063 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2064 uint32_t rtattr_len
)
2066 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2067 target_to_host_data_addr_rtattr
);
2070 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2071 uint32_t rtattr_len
)
2073 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2074 target_to_host_data_route_rtattr
);
2077 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2079 struct ifinfomsg
*ifi
;
2080 struct ifaddrmsg
*ifa
;
2083 switch (nlh
->nlmsg_type
) {
2088 ifi
= NLMSG_DATA(nlh
);
2089 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2090 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2091 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2092 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2093 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2094 NLMSG_LENGTH(sizeof(*ifi
)));
2099 ifa
= NLMSG_DATA(nlh
);
2100 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2101 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2102 NLMSG_LENGTH(sizeof(*ifa
)));
2108 rtm
= NLMSG_DATA(nlh
);
2109 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2110 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2111 NLMSG_LENGTH(sizeof(*rtm
)));
2114 return -TARGET_EOPNOTSUPP
;
2119 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2121 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2123 #endif /* CONFIG_RTNETLINK */
2125 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2127 switch (nlh
->nlmsg_type
) {
2129 gemu_log("Unknown host audit message type %d\n",
2131 return -TARGET_EINVAL
;
2136 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2139 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2142 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2144 switch (nlh
->nlmsg_type
) {
2146 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2147 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2150 gemu_log("Unknown target audit message type %d\n",
2152 return -TARGET_EINVAL
;
2158 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2160 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2163 /* do_setsockopt() Must return target values and target errnos. */
2164 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2165 abi_ulong optval_addr
, socklen_t optlen
)
2169 struct ip_mreqn
*ip_mreq
;
2170 struct ip_mreq_source
*ip_mreq_source
;
2174 /* TCP options all take an 'int' value. */
2175 if (optlen
< sizeof(uint32_t))
2176 return -TARGET_EINVAL
;
2178 if (get_user_u32(val
, optval_addr
))
2179 return -TARGET_EFAULT
;
2180 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2187 case IP_ROUTER_ALERT
:
2191 case IP_MTU_DISCOVER
:
2197 case IP_MULTICAST_TTL
:
2198 case IP_MULTICAST_LOOP
:
2200 if (optlen
>= sizeof(uint32_t)) {
2201 if (get_user_u32(val
, optval_addr
))
2202 return -TARGET_EFAULT
;
2203 } else if (optlen
>= 1) {
2204 if (get_user_u8(val
, optval_addr
))
2205 return -TARGET_EFAULT
;
2207 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2209 case IP_ADD_MEMBERSHIP
:
2210 case IP_DROP_MEMBERSHIP
:
2211 if (optlen
< sizeof (struct target_ip_mreq
) ||
2212 optlen
> sizeof (struct target_ip_mreqn
))
2213 return -TARGET_EINVAL
;
2215 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2216 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2217 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2220 case IP_BLOCK_SOURCE
:
2221 case IP_UNBLOCK_SOURCE
:
2222 case IP_ADD_SOURCE_MEMBERSHIP
:
2223 case IP_DROP_SOURCE_MEMBERSHIP
:
2224 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2225 return -TARGET_EINVAL
;
2227 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2228 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2229 unlock_user (ip_mreq_source
, optval_addr
, 0);
2238 case IPV6_MTU_DISCOVER
:
2241 case IPV6_RECVPKTINFO
:
2243 if (optlen
< sizeof(uint32_t)) {
2244 return -TARGET_EINVAL
;
2246 if (get_user_u32(val
, optval_addr
)) {
2247 return -TARGET_EFAULT
;
2249 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2250 &val
, sizeof(val
)));
2259 /* struct icmp_filter takes an u32 value */
2260 if (optlen
< sizeof(uint32_t)) {
2261 return -TARGET_EINVAL
;
2264 if (get_user_u32(val
, optval_addr
)) {
2265 return -TARGET_EFAULT
;
2267 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2268 &val
, sizeof(val
)));
2275 case TARGET_SOL_SOCKET
:
2277 case TARGET_SO_RCVTIMEO
:
2281 optname
= SO_RCVTIMEO
;
2284 if (optlen
!= sizeof(struct target_timeval
)) {
2285 return -TARGET_EINVAL
;
2288 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2289 return -TARGET_EFAULT
;
2292 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2296 case TARGET_SO_SNDTIMEO
:
2297 optname
= SO_SNDTIMEO
;
2299 case TARGET_SO_ATTACH_FILTER
:
2301 struct target_sock_fprog
*tfprog
;
2302 struct target_sock_filter
*tfilter
;
2303 struct sock_fprog fprog
;
2304 struct sock_filter
*filter
;
2307 if (optlen
!= sizeof(*tfprog
)) {
2308 return -TARGET_EINVAL
;
2310 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2311 return -TARGET_EFAULT
;
2313 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2314 tswapal(tfprog
->filter
), 0)) {
2315 unlock_user_struct(tfprog
, optval_addr
, 1);
2316 return -TARGET_EFAULT
;
2319 fprog
.len
= tswap16(tfprog
->len
);
2320 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2321 if (filter
== NULL
) {
2322 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2323 unlock_user_struct(tfprog
, optval_addr
, 1);
2324 return -TARGET_ENOMEM
;
2326 for (i
= 0; i
< fprog
.len
; i
++) {
2327 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2328 filter
[i
].jt
= tfilter
[i
].jt
;
2329 filter
[i
].jf
= tfilter
[i
].jf
;
2330 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2332 fprog
.filter
= filter
;
2334 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2335 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2338 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2339 unlock_user_struct(tfprog
, optval_addr
, 1);
2342 case TARGET_SO_BINDTODEVICE
:
2344 char *dev_ifname
, *addr_ifname
;
2346 if (optlen
> IFNAMSIZ
- 1) {
2347 optlen
= IFNAMSIZ
- 1;
2349 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2351 return -TARGET_EFAULT
;
2353 optname
= SO_BINDTODEVICE
;
2354 addr_ifname
= alloca(IFNAMSIZ
);
2355 memcpy(addr_ifname
, dev_ifname
, optlen
);
2356 addr_ifname
[optlen
] = 0;
2357 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2358 addr_ifname
, optlen
));
2359 unlock_user (dev_ifname
, optval_addr
, 0);
2362 /* Options with 'int' argument. */
2363 case TARGET_SO_DEBUG
:
2366 case TARGET_SO_REUSEADDR
:
2367 optname
= SO_REUSEADDR
;
2369 case TARGET_SO_TYPE
:
2372 case TARGET_SO_ERROR
:
2375 case TARGET_SO_DONTROUTE
:
2376 optname
= SO_DONTROUTE
;
2378 case TARGET_SO_BROADCAST
:
2379 optname
= SO_BROADCAST
;
2381 case TARGET_SO_SNDBUF
:
2382 optname
= SO_SNDBUF
;
2384 case TARGET_SO_SNDBUFFORCE
:
2385 optname
= SO_SNDBUFFORCE
;
2387 case TARGET_SO_RCVBUF
:
2388 optname
= SO_RCVBUF
;
2390 case TARGET_SO_RCVBUFFORCE
:
2391 optname
= SO_RCVBUFFORCE
;
2393 case TARGET_SO_KEEPALIVE
:
2394 optname
= SO_KEEPALIVE
;
2396 case TARGET_SO_OOBINLINE
:
2397 optname
= SO_OOBINLINE
;
2399 case TARGET_SO_NO_CHECK
:
2400 optname
= SO_NO_CHECK
;
2402 case TARGET_SO_PRIORITY
:
2403 optname
= SO_PRIORITY
;
2406 case TARGET_SO_BSDCOMPAT
:
2407 optname
= SO_BSDCOMPAT
;
2410 case TARGET_SO_PASSCRED
:
2411 optname
= SO_PASSCRED
;
2413 case TARGET_SO_PASSSEC
:
2414 optname
= SO_PASSSEC
;
2416 case TARGET_SO_TIMESTAMP
:
2417 optname
= SO_TIMESTAMP
;
2419 case TARGET_SO_RCVLOWAT
:
2420 optname
= SO_RCVLOWAT
;
2426 if (optlen
< sizeof(uint32_t))
2427 return -TARGET_EINVAL
;
2429 if (get_user_u32(val
, optval_addr
))
2430 return -TARGET_EFAULT
;
2431 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2435 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2436 ret
= -TARGET_ENOPROTOOPT
;
2441 /* do_getsockopt() Must return target values and target errnos. */
2442 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2443 abi_ulong optval_addr
, abi_ulong optlen
)
2450 case TARGET_SOL_SOCKET
:
2453 /* These don't just return a single integer */
2454 case TARGET_SO_LINGER
:
2455 case TARGET_SO_RCVTIMEO
:
2456 case TARGET_SO_SNDTIMEO
:
2457 case TARGET_SO_PEERNAME
:
2459 case TARGET_SO_PEERCRED
: {
2462 struct target_ucred
*tcr
;
2464 if (get_user_u32(len
, optlen
)) {
2465 return -TARGET_EFAULT
;
2468 return -TARGET_EINVAL
;
2472 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2480 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2481 return -TARGET_EFAULT
;
2483 __put_user(cr
.pid
, &tcr
->pid
);
2484 __put_user(cr
.uid
, &tcr
->uid
);
2485 __put_user(cr
.gid
, &tcr
->gid
);
2486 unlock_user_struct(tcr
, optval_addr
, 1);
2487 if (put_user_u32(len
, optlen
)) {
2488 return -TARGET_EFAULT
;
2492 /* Options with 'int' argument. */
2493 case TARGET_SO_DEBUG
:
2496 case TARGET_SO_REUSEADDR
:
2497 optname
= SO_REUSEADDR
;
2499 case TARGET_SO_TYPE
:
2502 case TARGET_SO_ERROR
:
2505 case TARGET_SO_DONTROUTE
:
2506 optname
= SO_DONTROUTE
;
2508 case TARGET_SO_BROADCAST
:
2509 optname
= SO_BROADCAST
;
2511 case TARGET_SO_SNDBUF
:
2512 optname
= SO_SNDBUF
;
2514 case TARGET_SO_RCVBUF
:
2515 optname
= SO_RCVBUF
;
2517 case TARGET_SO_KEEPALIVE
:
2518 optname
= SO_KEEPALIVE
;
2520 case TARGET_SO_OOBINLINE
:
2521 optname
= SO_OOBINLINE
;
2523 case TARGET_SO_NO_CHECK
:
2524 optname
= SO_NO_CHECK
;
2526 case TARGET_SO_PRIORITY
:
2527 optname
= SO_PRIORITY
;
2530 case TARGET_SO_BSDCOMPAT
:
2531 optname
= SO_BSDCOMPAT
;
2534 case TARGET_SO_PASSCRED
:
2535 optname
= SO_PASSCRED
;
2537 case TARGET_SO_TIMESTAMP
:
2538 optname
= SO_TIMESTAMP
;
2540 case TARGET_SO_RCVLOWAT
:
2541 optname
= SO_RCVLOWAT
;
2543 case TARGET_SO_ACCEPTCONN
:
2544 optname
= SO_ACCEPTCONN
;
2551 /* TCP options all take an 'int' value. */
2553 if (get_user_u32(len
, optlen
))
2554 return -TARGET_EFAULT
;
2556 return -TARGET_EINVAL
;
2558 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2561 if (optname
== SO_TYPE
) {
2562 val
= host_to_target_sock_type(val
);
2567 if (put_user_u32(val
, optval_addr
))
2568 return -TARGET_EFAULT
;
2570 if (put_user_u8(val
, optval_addr
))
2571 return -TARGET_EFAULT
;
2573 if (put_user_u32(len
, optlen
))
2574 return -TARGET_EFAULT
;
2581 case IP_ROUTER_ALERT
:
2585 case IP_MTU_DISCOVER
:
2591 case IP_MULTICAST_TTL
:
2592 case IP_MULTICAST_LOOP
:
2593 if (get_user_u32(len
, optlen
))
2594 return -TARGET_EFAULT
;
2596 return -TARGET_EINVAL
;
2598 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2601 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2603 if (put_user_u32(len
, optlen
)
2604 || put_user_u8(val
, optval_addr
))
2605 return -TARGET_EFAULT
;
2607 if (len
> sizeof(int))
2609 if (put_user_u32(len
, optlen
)
2610 || put_user_u32(val
, optval_addr
))
2611 return -TARGET_EFAULT
;
2615 ret
= -TARGET_ENOPROTOOPT
;
2621 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2623 ret
= -TARGET_EOPNOTSUPP
;
2629 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2630 int count
, int copy
)
2632 struct target_iovec
*target_vec
;
2634 abi_ulong total_len
, max_len
;
2637 bool bad_address
= false;
2643 if (count
< 0 || count
> IOV_MAX
) {
2648 vec
= g_try_new0(struct iovec
, count
);
2654 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2655 count
* sizeof(struct target_iovec
), 1);
2656 if (target_vec
== NULL
) {
2661 /* ??? If host page size > target page size, this will result in a
2662 value larger than what we can actually support. */
2663 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2666 for (i
= 0; i
< count
; i
++) {
2667 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2668 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2673 } else if (len
== 0) {
2674 /* Zero length pointer is ignored. */
2675 vec
[i
].iov_base
= 0;
2677 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2678 /* If the first buffer pointer is bad, this is a fault. But
2679 * subsequent bad buffers will result in a partial write; this
2680 * is realized by filling the vector with null pointers and
2682 if (!vec
[i
].iov_base
) {
2693 if (len
> max_len
- total_len
) {
2694 len
= max_len
- total_len
;
2697 vec
[i
].iov_len
= len
;
2701 unlock_user(target_vec
, target_addr
, 0);
2706 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2707 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2710 unlock_user(target_vec
, target_addr
, 0);
2717 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2718 int count
, int copy
)
2720 struct target_iovec
*target_vec
;
2723 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2724 count
* sizeof(struct target_iovec
), 1);
2726 for (i
= 0; i
< count
; i
++) {
2727 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2728 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2732 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2734 unlock_user(target_vec
, target_addr
, 0);
2740 static inline int target_to_host_sock_type(int *type
)
2743 int target_type
= *type
;
2745 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2746 case TARGET_SOCK_DGRAM
:
2747 host_type
= SOCK_DGRAM
;
2749 case TARGET_SOCK_STREAM
:
2750 host_type
= SOCK_STREAM
;
2753 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2756 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2757 #if defined(SOCK_CLOEXEC)
2758 host_type
|= SOCK_CLOEXEC
;
2760 return -TARGET_EINVAL
;
2763 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2764 #if defined(SOCK_NONBLOCK)
2765 host_type
|= SOCK_NONBLOCK
;
2766 #elif !defined(O_NONBLOCK)
2767 return -TARGET_EINVAL
;
2774 /* Try to emulate socket type flags after socket creation. */
2775 static int sock_flags_fixup(int fd
, int target_type
)
2777 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2778 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2779 int flags
= fcntl(fd
, F_GETFL
);
2780 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2782 return -TARGET_EINVAL
;
2789 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
2790 abi_ulong target_addr
,
2793 struct sockaddr
*addr
= host_addr
;
2794 struct target_sockaddr
*target_saddr
;
2796 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
2797 if (!target_saddr
) {
2798 return -TARGET_EFAULT
;
2801 memcpy(addr
, target_saddr
, len
);
2802 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
2803 /* spkt_protocol is big-endian */
2805 unlock_user(target_saddr
, target_addr
, 0);
2809 static TargetFdTrans target_packet_trans
= {
2810 .target_to_host_addr
= packet_target_to_host_sockaddr
,
2813 #ifdef CONFIG_RTNETLINK
2814 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
2816 return target_to_host_nlmsg_route(buf
, len
);
2819 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
2821 return host_to_target_nlmsg_route(buf
, len
);
2824 static TargetFdTrans target_netlink_route_trans
= {
2825 .target_to_host_data
= netlink_route_target_to_host
,
2826 .host_to_target_data
= netlink_route_host_to_target
,
2828 #endif /* CONFIG_RTNETLINK */
2830 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
2832 return target_to_host_nlmsg_audit(buf
, len
);
2835 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
2837 return host_to_target_nlmsg_audit(buf
, len
);
2840 static TargetFdTrans target_netlink_audit_trans
= {
2841 .target_to_host_data
= netlink_audit_target_to_host
,
2842 .host_to_target_data
= netlink_audit_host_to_target
,
2845 /* do_socket() Must return target values and target errnos. */
2846 static abi_long
do_socket(int domain
, int type
, int protocol
)
2848 int target_type
= type
;
2851 ret
= target_to_host_sock_type(&type
);
2856 if (domain
== PF_NETLINK
&& !(
2857 #ifdef CONFIG_RTNETLINK
2858 protocol
== NETLINK_ROUTE
||
2860 protocol
== NETLINK_KOBJECT_UEVENT
||
2861 protocol
== NETLINK_AUDIT
)) {
2862 return -EPFNOSUPPORT
;
2865 if (domain
== AF_PACKET
||
2866 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2867 protocol
= tswap16(protocol
);
2870 ret
= get_errno(socket(domain
, type
, protocol
));
2872 ret
= sock_flags_fixup(ret
, target_type
);
2873 if (type
== SOCK_PACKET
) {
2874 /* Manage an obsolete case :
2875 * if socket type is SOCK_PACKET, bind by name
2877 fd_trans_register(ret
, &target_packet_trans
);
2878 } else if (domain
== PF_NETLINK
) {
2880 #ifdef CONFIG_RTNETLINK
2882 fd_trans_register(ret
, &target_netlink_route_trans
);
2885 case NETLINK_KOBJECT_UEVENT
:
2886 /* nothing to do: messages are strings */
2889 fd_trans_register(ret
, &target_netlink_audit_trans
);
2892 g_assert_not_reached();
2899 /* do_bind() Must return target values and target errnos. */
2900 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2906 if ((int)addrlen
< 0) {
2907 return -TARGET_EINVAL
;
2910 addr
= alloca(addrlen
+1);
2912 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2916 return get_errno(bind(sockfd
, addr
, addrlen
));
2919 /* do_connect() Must return target values and target errnos. */
2920 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2926 if ((int)addrlen
< 0) {
2927 return -TARGET_EINVAL
;
2930 addr
= alloca(addrlen
+1);
2932 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2936 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
2939 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2940 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2941 int flags
, int send
)
2947 abi_ulong target_vec
;
2949 if (msgp
->msg_name
) {
2950 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2951 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2952 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2953 tswapal(msgp
->msg_name
),
2959 msg
.msg_name
= NULL
;
2960 msg
.msg_namelen
= 0;
2962 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2963 msg
.msg_control
= alloca(msg
.msg_controllen
);
2964 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2966 count
= tswapal(msgp
->msg_iovlen
);
2967 target_vec
= tswapal(msgp
->msg_iov
);
2968 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2969 target_vec
, count
, send
);
2971 ret
= -host_to_target_errno(errno
);
2974 msg
.msg_iovlen
= count
;
2978 if (fd_trans_target_to_host_data(fd
)) {
2979 ret
= fd_trans_target_to_host_data(fd
)(msg
.msg_iov
->iov_base
,
2980 msg
.msg_iov
->iov_len
);
2982 ret
= target_to_host_cmsg(&msg
, msgp
);
2985 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2988 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
2989 if (!is_error(ret
)) {
2991 if (fd_trans_host_to_target_data(fd
)) {
2992 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
2993 msg
.msg_iov
->iov_len
);
2995 ret
= host_to_target_cmsg(msgp
, &msg
);
2997 if (!is_error(ret
)) {
2998 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2999 if (msg
.msg_name
!= NULL
) {
3000 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3001 msg
.msg_name
, msg
.msg_namelen
);
3013 unlock_iovec(vec
, target_vec
, count
, !send
);
3018 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3019 int flags
, int send
)
3022 struct target_msghdr
*msgp
;
3024 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3028 return -TARGET_EFAULT
;
3030 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3031 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3035 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3036 * so it might not have this *mmsg-specific flag either.
3038 #ifndef MSG_WAITFORONE
3039 #define MSG_WAITFORONE 0x10000
3042 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3043 unsigned int vlen
, unsigned int flags
,
3046 struct target_mmsghdr
*mmsgp
;
3050 if (vlen
> UIO_MAXIOV
) {
3054 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3056 return -TARGET_EFAULT
;
3059 for (i
= 0; i
< vlen
; i
++) {
3060 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3061 if (is_error(ret
)) {
3064 mmsgp
[i
].msg_len
= tswap32(ret
);
3065 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3066 if (flags
& MSG_WAITFORONE
) {
3067 flags
|= MSG_DONTWAIT
;
3071 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3073 /* Return number of datagrams sent if we sent any at all;
3074 * otherwise return the error.
3082 /* do_accept4() Must return target values and target errnos. */
3083 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3084 abi_ulong target_addrlen_addr
, int flags
)
3091 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3093 if (target_addr
== 0) {
3094 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3097 /* linux returns EINVAL if addrlen pointer is invalid */
3098 if (get_user_u32(addrlen
, target_addrlen_addr
))
3099 return -TARGET_EINVAL
;
3101 if ((int)addrlen
< 0) {
3102 return -TARGET_EINVAL
;
3105 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3106 return -TARGET_EINVAL
;
3108 addr
= alloca(addrlen
);
3110 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3111 if (!is_error(ret
)) {
3112 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3113 if (put_user_u32(addrlen
, target_addrlen_addr
))
3114 ret
= -TARGET_EFAULT
;
3119 /* do_getpeername() Must return target values and target errnos. */
3120 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3121 abi_ulong target_addrlen_addr
)
3127 if (get_user_u32(addrlen
, target_addrlen_addr
))
3128 return -TARGET_EFAULT
;
3130 if ((int)addrlen
< 0) {
3131 return -TARGET_EINVAL
;
3134 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3135 return -TARGET_EFAULT
;
3137 addr
= alloca(addrlen
);
3139 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3140 if (!is_error(ret
)) {
3141 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3142 if (put_user_u32(addrlen
, target_addrlen_addr
))
3143 ret
= -TARGET_EFAULT
;
3148 /* do_getsockname() Must return target values and target errnos. */
3149 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3150 abi_ulong target_addrlen_addr
)
3156 if (get_user_u32(addrlen
, target_addrlen_addr
))
3157 return -TARGET_EFAULT
;
3159 if ((int)addrlen
< 0) {
3160 return -TARGET_EINVAL
;
3163 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3164 return -TARGET_EFAULT
;
3166 addr
= alloca(addrlen
);
3168 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3169 if (!is_error(ret
)) {
3170 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3171 if (put_user_u32(addrlen
, target_addrlen_addr
))
3172 ret
= -TARGET_EFAULT
;
3177 /* do_socketpair() Must return target values and target errnos. */
3178 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3179 abi_ulong target_tab_addr
)
3184 target_to_host_sock_type(&type
);
3186 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3187 if (!is_error(ret
)) {
3188 if (put_user_s32(tab
[0], target_tab_addr
)
3189 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3190 ret
= -TARGET_EFAULT
;
3195 /* do_sendto() Must return target values and target errnos. */
3196 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3197 abi_ulong target_addr
, socklen_t addrlen
)
3203 if ((int)addrlen
< 0) {
3204 return -TARGET_EINVAL
;
3207 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3209 return -TARGET_EFAULT
;
3210 if (fd_trans_target_to_host_data(fd
)) {
3211 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3213 unlock_user(host_msg
, msg
, 0);
3218 addr
= alloca(addrlen
+1);
3219 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3221 unlock_user(host_msg
, msg
, 0);
3224 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3226 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3228 unlock_user(host_msg
, msg
, 0);
3232 /* do_recvfrom() Must return target values and target errnos. */
3233 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3234 abi_ulong target_addr
,
3235 abi_ulong target_addrlen
)
3242 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3244 return -TARGET_EFAULT
;
3246 if (get_user_u32(addrlen
, target_addrlen
)) {
3247 ret
= -TARGET_EFAULT
;
3250 if ((int)addrlen
< 0) {
3251 ret
= -TARGET_EINVAL
;
3254 addr
= alloca(addrlen
);
3255 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3258 addr
= NULL
; /* To keep compiler quiet. */
3259 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3261 if (!is_error(ret
)) {
3263 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3264 if (put_user_u32(addrlen
, target_addrlen
)) {
3265 ret
= -TARGET_EFAULT
;
3269 unlock_user(host_msg
, msg
, len
);
3272 unlock_user(host_msg
, msg
, 0);
3277 #ifdef TARGET_NR_socketcall
3278 /* do_socketcall() Must return target values and target errnos. */
3279 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3281 static const unsigned ac
[] = { /* number of arguments per call */
3282 [SOCKOP_socket
] = 3, /* domain, type, protocol */
3283 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
3284 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
3285 [SOCKOP_listen
] = 2, /* sockfd, backlog */
3286 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
3287 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
3288 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
3289 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
3290 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
3291 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
3292 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
3293 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3294 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3295 [SOCKOP_shutdown
] = 2, /* sockfd, how */
3296 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
3297 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
3298 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3299 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3300 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3301 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3303 abi_long a
[6]; /* max 6 args */
3305 /* first, collect the arguments in a[] according to ac[] */
3306 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
3308 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
3309 for (i
= 0; i
< ac
[num
]; ++i
) {
3310 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3311 return -TARGET_EFAULT
;
3316 /* now when we have the args, actually handle the call */
3318 case SOCKOP_socket
: /* domain, type, protocol */
3319 return do_socket(a
[0], a
[1], a
[2]);
3320 case SOCKOP_bind
: /* sockfd, addr, addrlen */
3321 return do_bind(a
[0], a
[1], a
[2]);
3322 case SOCKOP_connect
: /* sockfd, addr, addrlen */
3323 return do_connect(a
[0], a
[1], a
[2]);
3324 case SOCKOP_listen
: /* sockfd, backlog */
3325 return get_errno(listen(a
[0], a
[1]));
3326 case SOCKOP_accept
: /* sockfd, addr, addrlen */
3327 return do_accept4(a
[0], a
[1], a
[2], 0);
3328 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
3329 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3330 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
3331 return do_getsockname(a
[0], a
[1], a
[2]);
3332 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
3333 return do_getpeername(a
[0], a
[1], a
[2]);
3334 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
3335 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3336 case SOCKOP_send
: /* sockfd, msg, len, flags */
3337 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3338 case SOCKOP_recv
: /* sockfd, msg, len, flags */
3339 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3340 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
3341 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3342 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
3343 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3344 case SOCKOP_shutdown
: /* sockfd, how */
3345 return get_errno(shutdown(a
[0], a
[1]));
3346 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
3347 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3348 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
3349 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3350 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
3351 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3352 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
3353 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3354 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
3355 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3356 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
3357 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3359 gemu_log("Unsupported socketcall: %d\n", num
);
3360 return -TARGET_ENOSYS
;
3365 #define N_SHM_REGIONS 32
3367 static struct shm_region
{
3371 } shm_regions
[N_SHM_REGIONS
];
3373 struct target_semid_ds
3375 struct target_ipc_perm sem_perm
;
3376 abi_ulong sem_otime
;
3377 #if !defined(TARGET_PPC64)
3378 abi_ulong __unused1
;
3380 abi_ulong sem_ctime
;
3381 #if !defined(TARGET_PPC64)
3382 abi_ulong __unused2
;
3384 abi_ulong sem_nsems
;
3385 abi_ulong __unused3
;
3386 abi_ulong __unused4
;
3389 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3390 abi_ulong target_addr
)
3392 struct target_ipc_perm
*target_ip
;
3393 struct target_semid_ds
*target_sd
;
3395 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3396 return -TARGET_EFAULT
;
3397 target_ip
= &(target_sd
->sem_perm
);
3398 host_ip
->__key
= tswap32(target_ip
->__key
);
3399 host_ip
->uid
= tswap32(target_ip
->uid
);
3400 host_ip
->gid
= tswap32(target_ip
->gid
);
3401 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3402 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3403 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3404 host_ip
->mode
= tswap32(target_ip
->mode
);
3406 host_ip
->mode
= tswap16(target_ip
->mode
);
3408 #if defined(TARGET_PPC)
3409 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3411 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3413 unlock_user_struct(target_sd
, target_addr
, 0);
3417 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3418 struct ipc_perm
*host_ip
)
3420 struct target_ipc_perm
*target_ip
;
3421 struct target_semid_ds
*target_sd
;
3423 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3424 return -TARGET_EFAULT
;
3425 target_ip
= &(target_sd
->sem_perm
);
3426 target_ip
->__key
= tswap32(host_ip
->__key
);
3427 target_ip
->uid
= tswap32(host_ip
->uid
);
3428 target_ip
->gid
= tswap32(host_ip
->gid
);
3429 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3430 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3431 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3432 target_ip
->mode
= tswap32(host_ip
->mode
);
3434 target_ip
->mode
= tswap16(host_ip
->mode
);
3436 #if defined(TARGET_PPC)
3437 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3439 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3441 unlock_user_struct(target_sd
, target_addr
, 1);
3445 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3446 abi_ulong target_addr
)
3448 struct target_semid_ds
*target_sd
;
3450 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3451 return -TARGET_EFAULT
;
3452 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3453 return -TARGET_EFAULT
;
3454 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3455 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3456 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3457 unlock_user_struct(target_sd
, target_addr
, 0);
3461 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3462 struct semid_ds
*host_sd
)
3464 struct target_semid_ds
*target_sd
;
3466 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3467 return -TARGET_EFAULT
;
3468 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3469 return -TARGET_EFAULT
;
3470 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3471 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3472 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3473 unlock_user_struct(target_sd
, target_addr
, 1);
3477 struct target_seminfo
{
3490 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3491 struct seminfo
*host_seminfo
)
3493 struct target_seminfo
*target_seminfo
;
3494 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3495 return -TARGET_EFAULT
;
3496 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3497 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3498 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3499 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3500 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3501 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3502 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3503 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3504 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3505 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3506 unlock_user_struct(target_seminfo
, target_addr
, 1);
3512 struct semid_ds
*buf
;
3513 unsigned short *array
;
3514 struct seminfo
*__buf
;
3517 union target_semun
{
3524 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3525 abi_ulong target_addr
)
3528 unsigned short *array
;
3530 struct semid_ds semid_ds
;
3533 semun
.buf
= &semid_ds
;
3535 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3537 return get_errno(ret
);
3539 nsems
= semid_ds
.sem_nsems
;
3541 *host_array
= g_try_new(unsigned short, nsems
);
3543 return -TARGET_ENOMEM
;
3545 array
= lock_user(VERIFY_READ
, target_addr
,
3546 nsems
*sizeof(unsigned short), 1);
3548 g_free(*host_array
);
3549 return -TARGET_EFAULT
;
3552 for(i
=0; i
<nsems
; i
++) {
3553 __get_user((*host_array
)[i
], &array
[i
]);
3555 unlock_user(array
, target_addr
, 0);
3560 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3561 unsigned short **host_array
)
3564 unsigned short *array
;
3566 struct semid_ds semid_ds
;
3569 semun
.buf
= &semid_ds
;
3571 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3573 return get_errno(ret
);
3575 nsems
= semid_ds
.sem_nsems
;
3577 array
= lock_user(VERIFY_WRITE
, target_addr
,
3578 nsems
*sizeof(unsigned short), 0);
3580 return -TARGET_EFAULT
;
3582 for(i
=0; i
<nsems
; i
++) {
3583 __put_user((*host_array
)[i
], &array
[i
]);
3585 g_free(*host_array
);
3586 unlock_user(array
, target_addr
, 1);
3591 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3592 abi_ulong target_arg
)
3594 union target_semun target_su
= { .buf
= target_arg
};
3596 struct semid_ds dsarg
;
3597 unsigned short *array
= NULL
;
3598 struct seminfo seminfo
;
3599 abi_long ret
= -TARGET_EINVAL
;
3606 /* In 64 bit cross-endian situations, we will erroneously pick up
3607 * the wrong half of the union for the "val" element. To rectify
3608 * this, the entire 8-byte structure is byteswapped, followed by
3609 * a swap of the 4 byte val field. In other cases, the data is
3610 * already in proper host byte order. */
3611 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3612 target_su
.buf
= tswapal(target_su
.buf
);
3613 arg
.val
= tswap32(target_su
.val
);
3615 arg
.val
= target_su
.val
;
3617 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3621 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3625 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3626 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3633 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3637 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3638 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3644 arg
.__buf
= &seminfo
;
3645 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3646 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3654 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3661 struct target_sembuf
{
3662 unsigned short sem_num
;
3667 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3668 abi_ulong target_addr
,
3671 struct target_sembuf
*target_sembuf
;
3674 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3675 nsops
*sizeof(struct target_sembuf
), 1);
3677 return -TARGET_EFAULT
;
3679 for(i
=0; i
<nsops
; i
++) {
3680 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3681 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3682 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3685 unlock_user(target_sembuf
, target_addr
, 0);
3690 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3692 struct sembuf sops
[nsops
];
3694 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3695 return -TARGET_EFAULT
;
3697 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
3700 struct target_msqid_ds
3702 struct target_ipc_perm msg_perm
;
3703 abi_ulong msg_stime
;
3704 #if TARGET_ABI_BITS == 32
3705 abi_ulong __unused1
;
3707 abi_ulong msg_rtime
;
3708 #if TARGET_ABI_BITS == 32
3709 abi_ulong __unused2
;
3711 abi_ulong msg_ctime
;
3712 #if TARGET_ABI_BITS == 32
3713 abi_ulong __unused3
;
3715 abi_ulong __msg_cbytes
;
3717 abi_ulong msg_qbytes
;
3718 abi_ulong msg_lspid
;
3719 abi_ulong msg_lrpid
;
3720 abi_ulong __unused4
;
3721 abi_ulong __unused5
;
3724 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3725 abi_ulong target_addr
)
3727 struct target_msqid_ds
*target_md
;
3729 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3730 return -TARGET_EFAULT
;
3731 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3732 return -TARGET_EFAULT
;
3733 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3734 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3735 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3736 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3737 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3738 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3739 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3740 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3741 unlock_user_struct(target_md
, target_addr
, 0);
3745 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3746 struct msqid_ds
*host_md
)
3748 struct target_msqid_ds
*target_md
;
3750 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3751 return -TARGET_EFAULT
;
3752 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3753 return -TARGET_EFAULT
;
3754 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3755 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3756 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3757 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3758 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3759 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3760 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3761 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3762 unlock_user_struct(target_md
, target_addr
, 1);
3766 struct target_msginfo
{
3774 unsigned short int msgseg
;
3777 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3778 struct msginfo
*host_msginfo
)
3780 struct target_msginfo
*target_msginfo
;
3781 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3782 return -TARGET_EFAULT
;
3783 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3784 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3785 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3786 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3787 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3788 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3789 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3790 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3791 unlock_user_struct(target_msginfo
, target_addr
, 1);
3795 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3797 struct msqid_ds dsarg
;
3798 struct msginfo msginfo
;
3799 abi_long ret
= -TARGET_EINVAL
;
3807 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3808 return -TARGET_EFAULT
;
3809 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3810 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3811 return -TARGET_EFAULT
;
3814 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3818 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3819 if (host_to_target_msginfo(ptr
, &msginfo
))
3820 return -TARGET_EFAULT
;
3827 struct target_msgbuf
{
3832 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3833 ssize_t msgsz
, int msgflg
)
3835 struct target_msgbuf
*target_mb
;
3836 struct msgbuf
*host_mb
;
3840 return -TARGET_EINVAL
;
3843 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3844 return -TARGET_EFAULT
;
3845 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3847 unlock_user_struct(target_mb
, msgp
, 0);
3848 return -TARGET_ENOMEM
;
3850 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3851 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3852 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3854 unlock_user_struct(target_mb
, msgp
, 0);
3859 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3860 ssize_t msgsz
, abi_long msgtyp
,
3863 struct target_msgbuf
*target_mb
;
3865 struct msgbuf
*host_mb
;
3869 return -TARGET_EINVAL
;
3872 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3873 return -TARGET_EFAULT
;
3875 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3877 ret
= -TARGET_ENOMEM
;
3880 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3883 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3884 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3885 if (!target_mtext
) {
3886 ret
= -TARGET_EFAULT
;
3889 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3890 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3893 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3897 unlock_user_struct(target_mb
, msgp
, 1);
3902 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3903 abi_ulong target_addr
)
3905 struct target_shmid_ds
*target_sd
;
3907 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3908 return -TARGET_EFAULT
;
3909 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3910 return -TARGET_EFAULT
;
3911 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3912 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3913 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3914 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3915 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3916 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3917 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3918 unlock_user_struct(target_sd
, target_addr
, 0);
3922 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3923 struct shmid_ds
*host_sd
)
3925 struct target_shmid_ds
*target_sd
;
3927 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3928 return -TARGET_EFAULT
;
3929 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3930 return -TARGET_EFAULT
;
3931 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3932 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3933 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3934 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3935 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3936 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3937 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3938 unlock_user_struct(target_sd
, target_addr
, 1);
3942 struct target_shminfo
{
3950 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3951 struct shminfo
*host_shminfo
)
3953 struct target_shminfo
*target_shminfo
;
3954 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3955 return -TARGET_EFAULT
;
3956 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3957 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3958 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3959 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3960 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3961 unlock_user_struct(target_shminfo
, target_addr
, 1);
3965 struct target_shm_info
{
3970 abi_ulong swap_attempts
;
3971 abi_ulong swap_successes
;
3974 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3975 struct shm_info
*host_shm_info
)
3977 struct target_shm_info
*target_shm_info
;
3978 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3979 return -TARGET_EFAULT
;
3980 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3981 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3982 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3983 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3984 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3985 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3986 unlock_user_struct(target_shm_info
, target_addr
, 1);
3990 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3992 struct shmid_ds dsarg
;
3993 struct shminfo shminfo
;
3994 struct shm_info shm_info
;
3995 abi_long ret
= -TARGET_EINVAL
;
4003 if (target_to_host_shmid_ds(&dsarg
, buf
))
4004 return -TARGET_EFAULT
;
4005 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4006 if (host_to_target_shmid_ds(buf
, &dsarg
))
4007 return -TARGET_EFAULT
;
4010 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4011 if (host_to_target_shminfo(buf
, &shminfo
))
4012 return -TARGET_EFAULT
;
4015 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4016 if (host_to_target_shm_info(buf
, &shm_info
))
4017 return -TARGET_EFAULT
;
4022 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4029 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
4033 struct shmid_ds shm_info
;
4036 /* find out the length of the shared memory segment */
4037 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4038 if (is_error(ret
)) {
4039 /* can't get length, bail out */
4046 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4048 abi_ulong mmap_start
;
4050 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4052 if (mmap_start
== -1) {
4054 host_raddr
= (void *)-1;
4056 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4059 if (host_raddr
== (void *)-1) {
4061 return get_errno((long)host_raddr
);
4063 raddr
=h2g((unsigned long)host_raddr
);
4065 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4066 PAGE_VALID
| PAGE_READ
|
4067 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4069 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4070 if (!shm_regions
[i
].in_use
) {
4071 shm_regions
[i
].in_use
= true;
4072 shm_regions
[i
].start
= raddr
;
4073 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4083 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4087 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4088 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4089 shm_regions
[i
].in_use
= false;
4090 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4095 return get_errno(shmdt(g2h(shmaddr
)));
4098 #ifdef TARGET_NR_ipc
4099 /* ??? This only works with linear mappings. */
4100 /* do_ipc() must return target values and target errnos. */
4101 static abi_long
do_ipc(unsigned int call
, abi_long first
,
4102 abi_long second
, abi_long third
,
4103 abi_long ptr
, abi_long fifth
)
4108 version
= call
>> 16;
4113 ret
= do_semop(first
, ptr
, second
);
4117 ret
= get_errno(semget(first
, second
, third
));
4120 case IPCOP_semctl
: {
4121 /* The semun argument to semctl is passed by value, so dereference the
4124 get_user_ual(atptr
, ptr
);
4125 ret
= do_semctl(first
, second
, third
, atptr
);
4130 ret
= get_errno(msgget(first
, second
));
4134 ret
= do_msgsnd(first
, ptr
, second
, third
);
4138 ret
= do_msgctl(first
, second
, ptr
);
4145 struct target_ipc_kludge
{
4150 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4151 ret
= -TARGET_EFAULT
;
4155 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4157 unlock_user_struct(tmp
, ptr
, 0);
4161 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4170 raddr
= do_shmat(first
, ptr
, second
);
4171 if (is_error(raddr
))
4172 return get_errno(raddr
);
4173 if (put_user_ual(raddr
, third
))
4174 return -TARGET_EFAULT
;
4178 ret
= -TARGET_EINVAL
;
4183 ret
= do_shmdt(ptr
);
4187 /* IPC_* flag values are the same on all linux platforms */
4188 ret
= get_errno(shmget(first
, second
, third
));
4191 /* IPC_* and SHM_* command values are the same on all linux platforms */
4193 ret
= do_shmctl(first
, second
, ptr
);
4196 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4197 ret
= -TARGET_ENOSYS
;
4204 /* kernel structure types definitions */
4206 #define STRUCT(name, ...) STRUCT_ ## name,
4207 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4209 #include "syscall_types.h"
4213 #undef STRUCT_SPECIAL
4215 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4216 #define STRUCT_SPECIAL(name)
4217 #include "syscall_types.h"
4219 #undef STRUCT_SPECIAL
4221 typedef struct IOCTLEntry IOCTLEntry
;
4223 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4224 int fd
, int cmd
, abi_long arg
);
4228 unsigned int host_cmd
;
4231 do_ioctl_fn
*do_ioctl
;
4232 const argtype arg_type
[5];
4235 #define IOC_R 0x0001
4236 #define IOC_W 0x0002
4237 #define IOC_RW (IOC_R | IOC_W)
4239 #define MAX_STRUCT_SIZE 4096
4241 #ifdef CONFIG_FIEMAP
4242 /* So fiemap access checks don't overflow on 32 bit systems.
4243 * This is very slightly smaller than the limit imposed by
4244 * the underlying kernel.
4246 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4247 / sizeof(struct fiemap_extent))
4249 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4250 int fd
, int cmd
, abi_long arg
)
4252 /* The parameter for this ioctl is a struct fiemap followed
4253 * by an array of struct fiemap_extent whose size is set
4254 * in fiemap->fm_extent_count. The array is filled in by the
4257 int target_size_in
, target_size_out
;
4259 const argtype
*arg_type
= ie
->arg_type
;
4260 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4263 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4267 assert(arg_type
[0] == TYPE_PTR
);
4268 assert(ie
->access
== IOC_RW
);
4270 target_size_in
= thunk_type_size(arg_type
, 0);
4271 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4273 return -TARGET_EFAULT
;
4275 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4276 unlock_user(argptr
, arg
, 0);
4277 fm
= (struct fiemap
*)buf_temp
;
4278 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4279 return -TARGET_EINVAL
;
4282 outbufsz
= sizeof (*fm
) +
4283 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4285 if (outbufsz
> MAX_STRUCT_SIZE
) {
4286 /* We can't fit all the extents into the fixed size buffer.
4287 * Allocate one that is large enough and use it instead.
4289 fm
= g_try_malloc(outbufsz
);
4291 return -TARGET_ENOMEM
;
4293 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4296 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4297 if (!is_error(ret
)) {
4298 target_size_out
= target_size_in
;
4299 /* An extent_count of 0 means we were only counting the extents
4300 * so there are no structs to copy
4302 if (fm
->fm_extent_count
!= 0) {
4303 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4305 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4307 ret
= -TARGET_EFAULT
;
4309 /* Convert the struct fiemap */
4310 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4311 if (fm
->fm_extent_count
!= 0) {
4312 p
= argptr
+ target_size_in
;
4313 /* ...and then all the struct fiemap_extents */
4314 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4315 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4320 unlock_user(argptr
, arg
, target_size_out
);
4330 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4331 int fd
, int cmd
, abi_long arg
)
4333 const argtype
*arg_type
= ie
->arg_type
;
4337 struct ifconf
*host_ifconf
;
4339 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4340 int target_ifreq_size
;
4345 abi_long target_ifc_buf
;
4349 assert(arg_type
[0] == TYPE_PTR
);
4350 assert(ie
->access
== IOC_RW
);
4353 target_size
= thunk_type_size(arg_type
, 0);
4355 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4357 return -TARGET_EFAULT
;
4358 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4359 unlock_user(argptr
, arg
, 0);
4361 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4362 target_ifc_len
= host_ifconf
->ifc_len
;
4363 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4365 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4366 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4367 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4369 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4370 if (outbufsz
> MAX_STRUCT_SIZE
) {
4371 /* We can't fit all the extents into the fixed size buffer.
4372 * Allocate one that is large enough and use it instead.
4374 host_ifconf
= malloc(outbufsz
);
4376 return -TARGET_ENOMEM
;
4378 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4381 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
4383 host_ifconf
->ifc_len
= host_ifc_len
;
4384 host_ifconf
->ifc_buf
= host_ifc_buf
;
4386 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4387 if (!is_error(ret
)) {
4388 /* convert host ifc_len to target ifc_len */
4390 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4391 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4392 host_ifconf
->ifc_len
= target_ifc_len
;
4394 /* restore target ifc_buf */
4396 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4398 /* copy struct ifconf to target user */
4400 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4402 return -TARGET_EFAULT
;
4403 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4404 unlock_user(argptr
, arg
, target_size
);
4406 /* copy ifreq[] to target user */
4408 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4409 for (i
= 0; i
< nb_ifreq
; i
++) {
4410 thunk_convert(argptr
+ i
* target_ifreq_size
,
4411 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4412 ifreq_arg_type
, THUNK_TARGET
);
4414 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4424 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4425 int cmd
, abi_long arg
)
4428 struct dm_ioctl
*host_dm
;
4429 abi_long guest_data
;
4430 uint32_t guest_data_size
;
4432 const argtype
*arg_type
= ie
->arg_type
;
4434 void *big_buf
= NULL
;
4438 target_size
= thunk_type_size(arg_type
, 0);
4439 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4441 ret
= -TARGET_EFAULT
;
4444 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4445 unlock_user(argptr
, arg
, 0);
4447 /* buf_temp is too small, so fetch things into a bigger buffer */
4448 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4449 memcpy(big_buf
, buf_temp
, target_size
);
4453 guest_data
= arg
+ host_dm
->data_start
;
4454 if ((guest_data
- arg
) < 0) {
4458 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4459 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4461 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4462 switch (ie
->host_cmd
) {
4464 case DM_LIST_DEVICES
:
4467 case DM_DEV_SUSPEND
:
4470 case DM_TABLE_STATUS
:
4471 case DM_TABLE_CLEAR
:
4473 case DM_LIST_VERSIONS
:
4477 case DM_DEV_SET_GEOMETRY
:
4478 /* data contains only strings */
4479 memcpy(host_data
, argptr
, guest_data_size
);
4482 memcpy(host_data
, argptr
, guest_data_size
);
4483 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4487 void *gspec
= argptr
;
4488 void *cur_data
= host_data
;
4489 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4490 int spec_size
= thunk_type_size(arg_type
, 0);
4493 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4494 struct dm_target_spec
*spec
= cur_data
;
4498 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4499 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4501 spec
->next
= sizeof(*spec
) + slen
;
4502 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4504 cur_data
+= spec
->next
;
4509 ret
= -TARGET_EINVAL
;
4510 unlock_user(argptr
, guest_data
, 0);
4513 unlock_user(argptr
, guest_data
, 0);
4515 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4516 if (!is_error(ret
)) {
4517 guest_data
= arg
+ host_dm
->data_start
;
4518 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4519 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4520 switch (ie
->host_cmd
) {
4525 case DM_DEV_SUSPEND
:
4528 case DM_TABLE_CLEAR
:
4530 case DM_DEV_SET_GEOMETRY
:
4531 /* no return data */
4533 case DM_LIST_DEVICES
:
4535 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4536 uint32_t remaining_data
= guest_data_size
;
4537 void *cur_data
= argptr
;
4538 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4539 int nl_size
= 12; /* can't use thunk_size due to alignment */
4542 uint32_t next
= nl
->next
;
4544 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4546 if (remaining_data
< nl
->next
) {
4547 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4550 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4551 strcpy(cur_data
+ nl_size
, nl
->name
);
4552 cur_data
+= nl
->next
;
4553 remaining_data
-= nl
->next
;
4557 nl
= (void*)nl
+ next
;
4562 case DM_TABLE_STATUS
:
4564 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4565 void *cur_data
= argptr
;
4566 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4567 int spec_size
= thunk_type_size(arg_type
, 0);
4570 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4571 uint32_t next
= spec
->next
;
4572 int slen
= strlen((char*)&spec
[1]) + 1;
4573 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4574 if (guest_data_size
< spec
->next
) {
4575 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4578 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4579 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4580 cur_data
= argptr
+ spec
->next
;
4581 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4587 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4588 int count
= *(uint32_t*)hdata
;
4589 uint64_t *hdev
= hdata
+ 8;
4590 uint64_t *gdev
= argptr
+ 8;
4593 *(uint32_t*)argptr
= tswap32(count
);
4594 for (i
= 0; i
< count
; i
++) {
4595 *gdev
= tswap64(*hdev
);
4601 case DM_LIST_VERSIONS
:
4603 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4604 uint32_t remaining_data
= guest_data_size
;
4605 void *cur_data
= argptr
;
4606 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4607 int vers_size
= thunk_type_size(arg_type
, 0);
4610 uint32_t next
= vers
->next
;
4612 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
4614 if (remaining_data
< vers
->next
) {
4615 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4618 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
4619 strcpy(cur_data
+ vers_size
, vers
->name
);
4620 cur_data
+= vers
->next
;
4621 remaining_data
-= vers
->next
;
4625 vers
= (void*)vers
+ next
;
4630 unlock_user(argptr
, guest_data
, 0);
4631 ret
= -TARGET_EINVAL
;
4634 unlock_user(argptr
, guest_data
, guest_data_size
);
4636 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4638 ret
= -TARGET_EFAULT
;
4641 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4642 unlock_user(argptr
, arg
, target_size
);
4649 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4650 int cmd
, abi_long arg
)
4654 const argtype
*arg_type
= ie
->arg_type
;
4655 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
4658 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
4659 struct blkpg_partition host_part
;
4661 /* Read and convert blkpg */
4663 target_size
= thunk_type_size(arg_type
, 0);
4664 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4666 ret
= -TARGET_EFAULT
;
4669 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4670 unlock_user(argptr
, arg
, 0);
4672 switch (host_blkpg
->op
) {
4673 case BLKPG_ADD_PARTITION
:
4674 case BLKPG_DEL_PARTITION
:
4675 /* payload is struct blkpg_partition */
4678 /* Unknown opcode */
4679 ret
= -TARGET_EINVAL
;
4683 /* Read and convert blkpg->data */
4684 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
4685 target_size
= thunk_type_size(part_arg_type
, 0);
4686 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4688 ret
= -TARGET_EFAULT
;
4691 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
4692 unlock_user(argptr
, arg
, 0);
4694 /* Swizzle the data pointer to our local copy and call! */
4695 host_blkpg
->data
= &host_part
;
4696 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
4702 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4703 int fd
, int cmd
, abi_long arg
)
4705 const argtype
*arg_type
= ie
->arg_type
;
4706 const StructEntry
*se
;
4707 const argtype
*field_types
;
4708 const int *dst_offsets
, *src_offsets
;
4711 abi_ulong
*target_rt_dev_ptr
;
4712 unsigned long *host_rt_dev_ptr
;
4716 assert(ie
->access
== IOC_W
);
4717 assert(*arg_type
== TYPE_PTR
);
4719 assert(*arg_type
== TYPE_STRUCT
);
4720 target_size
= thunk_type_size(arg_type
, 0);
4721 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4723 return -TARGET_EFAULT
;
4726 assert(*arg_type
== (int)STRUCT_rtentry
);
4727 se
= struct_entries
+ *arg_type
++;
4728 assert(se
->convert
[0] == NULL
);
4729 /* convert struct here to be able to catch rt_dev string */
4730 field_types
= se
->field_types
;
4731 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
4732 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
4733 for (i
= 0; i
< se
->nb_fields
; i
++) {
4734 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
4735 assert(*field_types
== TYPE_PTRVOID
);
4736 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
4737 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
4738 if (*target_rt_dev_ptr
!= 0) {
4739 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
4740 tswapal(*target_rt_dev_ptr
));
4741 if (!*host_rt_dev_ptr
) {
4742 unlock_user(argptr
, arg
, 0);
4743 return -TARGET_EFAULT
;
4746 *host_rt_dev_ptr
= 0;
4751 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
4752 argptr
+ src_offsets
[i
],
4753 field_types
, THUNK_HOST
);
4755 unlock_user(argptr
, arg
, 0);
4757 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4758 if (*host_rt_dev_ptr
!= 0) {
4759 unlock_user((void *)*host_rt_dev_ptr
,
4760 *target_rt_dev_ptr
, 0);
4765 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4766 int fd
, int cmd
, abi_long arg
)
4768 int sig
= target_to_host_signal(arg
);
4769 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
4772 static IOCTLEntry ioctl_entries
[] = {
4773 #define IOCTL(cmd, access, ...) \
4774 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4775 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4776 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4781 /* ??? Implement proper locking for ioctls. */
4782 /* do_ioctl() Must return target values and target errnos. */
4783 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4785 const IOCTLEntry
*ie
;
4786 const argtype
*arg_type
;
4788 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4794 if (ie
->target_cmd
== 0) {
4795 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4796 return -TARGET_ENOSYS
;
4798 if (ie
->target_cmd
== cmd
)
4802 arg_type
= ie
->arg_type
;
4804 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
4807 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4810 switch(arg_type
[0]) {
4813 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
4817 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
4821 target_size
= thunk_type_size(arg_type
, 0);
4822 switch(ie
->access
) {
4824 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4825 if (!is_error(ret
)) {
4826 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4828 return -TARGET_EFAULT
;
4829 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4830 unlock_user(argptr
, arg
, target_size
);
4834 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4836 return -TARGET_EFAULT
;
4837 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4838 unlock_user(argptr
, arg
, 0);
4839 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4843 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4845 return -TARGET_EFAULT
;
4846 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4847 unlock_user(argptr
, arg
, 0);
4848 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4849 if (!is_error(ret
)) {
4850 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4852 return -TARGET_EFAULT
;
4853 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4854 unlock_user(argptr
, arg
, target_size
);
4860 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4861 (long)cmd
, arg_type
[0]);
4862 ret
= -TARGET_ENOSYS
;
4868 static const bitmask_transtbl iflag_tbl
[] = {
4869 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
4870 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
4871 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
4872 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
4873 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
4874 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
4875 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
4876 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
4877 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
4878 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
4879 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
4880 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
4881 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
4882 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
4886 static const bitmask_transtbl oflag_tbl
[] = {
4887 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
4888 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
4889 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
4890 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
4891 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
4892 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
4893 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
4894 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
4895 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
4896 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
4897 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
4898 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
4899 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
4900 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
4901 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
4902 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
4903 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
4904 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
4905 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
4906 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
4907 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
4908 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
4909 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
4910 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
4914 static const bitmask_transtbl cflag_tbl
[] = {
4915 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
4916 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
4917 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
4918 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
4919 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
4920 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
4921 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
4922 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
4923 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
4924 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
4925 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
4926 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
4927 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
4928 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
4929 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
4930 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
4931 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
4932 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
4933 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
4934 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
4935 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
4936 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
4937 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
4938 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
4939 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
4940 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
4941 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
4942 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
4943 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
4944 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
4945 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
4949 static const bitmask_transtbl lflag_tbl
[] = {
4950 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
4951 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
4952 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
4953 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
4954 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
4955 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
4956 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
4957 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
4958 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
4959 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
4960 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
4961 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
4962 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
4963 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
4964 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
4968 static void target_to_host_termios (void *dst
, const void *src
)
4970 struct host_termios
*host
= dst
;
4971 const struct target_termios
*target
= src
;
4974 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
4976 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
4978 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
4980 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
4981 host
->c_line
= target
->c_line
;
4983 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
4984 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
4985 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
4986 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
4987 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
4988 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
4989 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
4990 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
4991 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
4992 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
4993 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
4994 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
4995 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
4996 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
4997 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
4998 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
4999 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5000 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5003 static void host_to_target_termios (void *dst
, const void *src
)
5005 struct target_termios
*target
= dst
;
5006 const struct host_termios
*host
= src
;
5009 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5011 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5013 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5015 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5016 target
->c_line
= host
->c_line
;
5018 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5019 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5020 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5021 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5022 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5023 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5024 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5025 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5026 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5027 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5028 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5029 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5030 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5031 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5032 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5033 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5034 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5035 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5038 static const StructEntry struct_termios_def
= {
5039 .convert
= { host_to_target_termios
, target_to_host_termios
},
5040 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5041 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5044 static bitmask_transtbl mmap_flags_tbl
[] = {
5045 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5046 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5047 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5048 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5049 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5050 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5051 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5052 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5053 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5058 #if defined(TARGET_I386)
5060 /* NOTE: there is really one LDT for all the threads */
5061 static uint8_t *ldt_table
;
5063 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5070 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5071 if (size
> bytecount
)
5073 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5075 return -TARGET_EFAULT
;
5076 /* ??? Should this by byteswapped? */
5077 memcpy(p
, ldt_table
, size
);
5078 unlock_user(p
, ptr
, size
);
5082 /* XXX: add locking support */
5083 static abi_long
write_ldt(CPUX86State
*env
,
5084 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5086 struct target_modify_ldt_ldt_s ldt_info
;
5087 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5088 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5089 int seg_not_present
, useable
, lm
;
5090 uint32_t *lp
, entry_1
, entry_2
;
5092 if (bytecount
!= sizeof(ldt_info
))
5093 return -TARGET_EINVAL
;
5094 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5095 return -TARGET_EFAULT
;
5096 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5097 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5098 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5099 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5100 unlock_user_struct(target_ldt_info
, ptr
, 0);
5102 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5103 return -TARGET_EINVAL
;
5104 seg_32bit
= ldt_info
.flags
& 1;
5105 contents
= (ldt_info
.flags
>> 1) & 3;
5106 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5107 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5108 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5109 useable
= (ldt_info
.flags
>> 6) & 1;
5113 lm
= (ldt_info
.flags
>> 7) & 1;
5115 if (contents
== 3) {
5117 return -TARGET_EINVAL
;
5118 if (seg_not_present
== 0)
5119 return -TARGET_EINVAL
;
5121 /* allocate the LDT */
5123 env
->ldt
.base
= target_mmap(0,
5124 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5125 PROT_READ
|PROT_WRITE
,
5126 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5127 if (env
->ldt
.base
== -1)
5128 return -TARGET_ENOMEM
;
5129 memset(g2h(env
->ldt
.base
), 0,
5130 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5131 env
->ldt
.limit
= 0xffff;
5132 ldt_table
= g2h(env
->ldt
.base
);
5135 /* NOTE: same code as Linux kernel */
5136 /* Allow LDTs to be cleared by the user. */
5137 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5140 read_exec_only
== 1 &&
5142 limit_in_pages
== 0 &&
5143 seg_not_present
== 1 &&
5151 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5152 (ldt_info
.limit
& 0x0ffff);
5153 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5154 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5155 (ldt_info
.limit
& 0xf0000) |
5156 ((read_exec_only
^ 1) << 9) |
5158 ((seg_not_present
^ 1) << 15) |
5160 (limit_in_pages
<< 23) |
5164 entry_2
|= (useable
<< 20);
5166 /* Install the new entry ... */
5168 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5169 lp
[0] = tswap32(entry_1
);
5170 lp
[1] = tswap32(entry_2
);
5174 /* specific and weird i386 syscalls */
5175 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5176 unsigned long bytecount
)
5182 ret
= read_ldt(ptr
, bytecount
);
5185 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5188 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5191 ret
= -TARGET_ENOSYS
;
5197 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5198 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5200 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5201 struct target_modify_ldt_ldt_s ldt_info
;
5202 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5203 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5204 int seg_not_present
, useable
, lm
;
5205 uint32_t *lp
, entry_1
, entry_2
;
5208 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5209 if (!target_ldt_info
)
5210 return -TARGET_EFAULT
;
5211 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5212 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5213 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5214 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5215 if (ldt_info
.entry_number
== -1) {
5216 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5217 if (gdt_table
[i
] == 0) {
5218 ldt_info
.entry_number
= i
;
5219 target_ldt_info
->entry_number
= tswap32(i
);
5224 unlock_user_struct(target_ldt_info
, ptr
, 1);
5226 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5227 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5228 return -TARGET_EINVAL
;
5229 seg_32bit
= ldt_info
.flags
& 1;
5230 contents
= (ldt_info
.flags
>> 1) & 3;
5231 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5232 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5233 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5234 useable
= (ldt_info
.flags
>> 6) & 1;
5238 lm
= (ldt_info
.flags
>> 7) & 1;
5241 if (contents
== 3) {
5242 if (seg_not_present
== 0)
5243 return -TARGET_EINVAL
;
5246 /* NOTE: same code as Linux kernel */
5247 /* Allow LDTs to be cleared by the user. */
5248 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5249 if ((contents
== 0 &&
5250 read_exec_only
== 1 &&
5252 limit_in_pages
== 0 &&
5253 seg_not_present
== 1 &&
5261 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5262 (ldt_info
.limit
& 0x0ffff);
5263 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5264 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5265 (ldt_info
.limit
& 0xf0000) |
5266 ((read_exec_only
^ 1) << 9) |
5268 ((seg_not_present
^ 1) << 15) |
5270 (limit_in_pages
<< 23) |
5275 /* Install the new entry ... */
5277 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5278 lp
[0] = tswap32(entry_1
);
5279 lp
[1] = tswap32(entry_2
);
5283 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5285 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5286 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5287 uint32_t base_addr
, limit
, flags
;
5288 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5289 int seg_not_present
, useable
, lm
;
5290 uint32_t *lp
, entry_1
, entry_2
;
5292 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5293 if (!target_ldt_info
)
5294 return -TARGET_EFAULT
;
5295 idx
= tswap32(target_ldt_info
->entry_number
);
5296 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5297 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5298 unlock_user_struct(target_ldt_info
, ptr
, 1);
5299 return -TARGET_EINVAL
;
5301 lp
= (uint32_t *)(gdt_table
+ idx
);
5302 entry_1
= tswap32(lp
[0]);
5303 entry_2
= tswap32(lp
[1]);
5305 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5306 contents
= (entry_2
>> 10) & 3;
5307 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5308 seg_32bit
= (entry_2
>> 22) & 1;
5309 limit_in_pages
= (entry_2
>> 23) & 1;
5310 useable
= (entry_2
>> 20) & 1;
5314 lm
= (entry_2
>> 21) & 1;
5316 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5317 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5318 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5319 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5320 base_addr
= (entry_1
>> 16) |
5321 (entry_2
& 0xff000000) |
5322 ((entry_2
& 0xff) << 16);
5323 target_ldt_info
->base_addr
= tswapal(base_addr
);
5324 target_ldt_info
->limit
= tswap32(limit
);
5325 target_ldt_info
->flags
= tswap32(flags
);
5326 unlock_user_struct(target_ldt_info
, ptr
, 1);
5329 #endif /* TARGET_I386 && TARGET_ABI32 */
5331 #ifndef TARGET_ABI32
5332 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5339 case TARGET_ARCH_SET_GS
:
5340 case TARGET_ARCH_SET_FS
:
5341 if (code
== TARGET_ARCH_SET_GS
)
5345 cpu_x86_load_seg(env
, idx
, 0);
5346 env
->segs
[idx
].base
= addr
;
5348 case TARGET_ARCH_GET_GS
:
5349 case TARGET_ARCH_GET_FS
:
5350 if (code
== TARGET_ARCH_GET_GS
)
5354 val
= env
->segs
[idx
].base
;
5355 if (put_user(val
, addr
, abi_ulong
))
5356 ret
= -TARGET_EFAULT
;
5359 ret
= -TARGET_EINVAL
;
5366 #endif /* defined(TARGET_I386) */
5368 #define NEW_STACK_SIZE 0x40000
5371 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5374 pthread_mutex_t mutex
;
5375 pthread_cond_t cond
;
5378 abi_ulong child_tidptr
;
5379 abi_ulong parent_tidptr
;
5383 static void *clone_func(void *arg
)
5385 new_thread_info
*info
= arg
;
5390 rcu_register_thread();
5392 cpu
= ENV_GET_CPU(env
);
5394 ts
= (TaskState
*)cpu
->opaque
;
5395 info
->tid
= gettid();
5396 cpu
->host_tid
= info
->tid
;
5398 if (info
->child_tidptr
)
5399 put_user_u32(info
->tid
, info
->child_tidptr
);
5400 if (info
->parent_tidptr
)
5401 put_user_u32(info
->tid
, info
->parent_tidptr
);
5402 /* Enable signals. */
5403 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5404 /* Signal to the parent that we're ready. */
5405 pthread_mutex_lock(&info
->mutex
);
5406 pthread_cond_broadcast(&info
->cond
);
5407 pthread_mutex_unlock(&info
->mutex
);
5408 /* Wait until the parent has finshed initializing the tls state. */
5409 pthread_mutex_lock(&clone_lock
);
5410 pthread_mutex_unlock(&clone_lock
);
5416 /* do_fork() Must return host values and target errnos (unlike most
5417 do_*() functions). */
5418 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5419 abi_ulong parent_tidptr
, target_ulong newtls
,
5420 abi_ulong child_tidptr
)
5422 CPUState
*cpu
= ENV_GET_CPU(env
);
5426 CPUArchState
*new_env
;
5427 unsigned int nptl_flags
;
5430 /* Emulate vfork() with fork() */
5431 if (flags
& CLONE_VFORK
)
5432 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5434 if (flags
& CLONE_VM
) {
5435 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5436 new_thread_info info
;
5437 pthread_attr_t attr
;
5439 ts
= g_new0(TaskState
, 1);
5440 init_task_state(ts
);
5441 /* we create a new CPU instance. */
5442 new_env
= cpu_copy(env
);
5443 /* Init regs that differ from the parent. */
5444 cpu_clone_regs(new_env
, newsp
);
5445 new_cpu
= ENV_GET_CPU(new_env
);
5446 new_cpu
->opaque
= ts
;
5447 ts
->bprm
= parent_ts
->bprm
;
5448 ts
->info
= parent_ts
->info
;
5449 ts
->signal_mask
= parent_ts
->signal_mask
;
5451 flags
&= ~CLONE_NPTL_FLAGS2
;
5453 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
5454 ts
->child_tidptr
= child_tidptr
;
5457 if (nptl_flags
& CLONE_SETTLS
)
5458 cpu_set_tls (new_env
, newtls
);
5460 /* Grab a mutex so that thread setup appears atomic. */
5461 pthread_mutex_lock(&clone_lock
);
5463 memset(&info
, 0, sizeof(info
));
5464 pthread_mutex_init(&info
.mutex
, NULL
);
5465 pthread_mutex_lock(&info
.mutex
);
5466 pthread_cond_init(&info
.cond
, NULL
);
5468 if (nptl_flags
& CLONE_CHILD_SETTID
)
5469 info
.child_tidptr
= child_tidptr
;
5470 if (nptl_flags
& CLONE_PARENT_SETTID
)
5471 info
.parent_tidptr
= parent_tidptr
;
5473 ret
= pthread_attr_init(&attr
);
5474 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5475 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5476 /* It is not safe to deliver signals until the child has finished
5477 initializing, so temporarily block all signals. */
5478 sigfillset(&sigmask
);
5479 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5481 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5482 /* TODO: Free new CPU state if thread creation failed. */
5484 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5485 pthread_attr_destroy(&attr
);
5487 /* Wait for the child to initialize. */
5488 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5490 if (flags
& CLONE_PARENT_SETTID
)
5491 put_user_u32(ret
, parent_tidptr
);
5495 pthread_mutex_unlock(&info
.mutex
);
5496 pthread_cond_destroy(&info
.cond
);
5497 pthread_mutex_destroy(&info
.mutex
);
5498 pthread_mutex_unlock(&clone_lock
);
5500 /* if no CLONE_VM, we consider it is a fork */
5501 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0) {
5502 return -TARGET_EINVAL
;
5505 if (block_signals()) {
5506 return -TARGET_ERESTARTSYS
;
5512 /* Child Process. */
5514 cpu_clone_regs(env
, newsp
);
5516 /* There is a race condition here. The parent process could
5517 theoretically read the TID in the child process before the child
5518 tid is set. This would require using either ptrace
5519 (not implemented) or having *_tidptr to point at a shared memory
5520 mapping. We can't repeat the spinlock hack used above because
5521 the child process gets its own copy of the lock. */
5522 if (flags
& CLONE_CHILD_SETTID
)
5523 put_user_u32(gettid(), child_tidptr
);
5524 if (flags
& CLONE_PARENT_SETTID
)
5525 put_user_u32(gettid(), parent_tidptr
);
5526 ts
= (TaskState
*)cpu
->opaque
;
5527 if (flags
& CLONE_SETTLS
)
5528 cpu_set_tls (env
, newtls
);
5529 if (flags
& CLONE_CHILD_CLEARTID
)
5530 ts
->child_tidptr
= child_tidptr
;
5538 /* warning : doesn't handle linux specific flags... */
5539 static int target_to_host_fcntl_cmd(int cmd
)
5542 case TARGET_F_DUPFD
:
5543 case TARGET_F_GETFD
:
5544 case TARGET_F_SETFD
:
5545 case TARGET_F_GETFL
:
5546 case TARGET_F_SETFL
:
5548 case TARGET_F_GETLK
:
5550 case TARGET_F_SETLK
:
5552 case TARGET_F_SETLKW
:
5554 case TARGET_F_GETOWN
:
5556 case TARGET_F_SETOWN
:
5558 case TARGET_F_GETSIG
:
5560 case TARGET_F_SETSIG
:
5562 #if TARGET_ABI_BITS == 32
5563 case TARGET_F_GETLK64
:
5565 case TARGET_F_SETLK64
:
5567 case TARGET_F_SETLKW64
:
5570 case TARGET_F_SETLEASE
:
5572 case TARGET_F_GETLEASE
:
5574 #ifdef F_DUPFD_CLOEXEC
5575 case TARGET_F_DUPFD_CLOEXEC
:
5576 return F_DUPFD_CLOEXEC
;
5578 case TARGET_F_NOTIFY
:
5581 case TARGET_F_GETOWN_EX
:
5585 case TARGET_F_SETOWN_EX
:
5589 return -TARGET_EINVAL
;
5591 return -TARGET_EINVAL
;
5594 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5595 static const bitmask_transtbl flock_tbl
[] = {
5596 TRANSTBL_CONVERT(F_RDLCK
),
5597 TRANSTBL_CONVERT(F_WRLCK
),
5598 TRANSTBL_CONVERT(F_UNLCK
),
5599 TRANSTBL_CONVERT(F_EXLCK
),
5600 TRANSTBL_CONVERT(F_SHLCK
),
5604 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
5605 abi_ulong target_flock_addr
)
5607 struct target_flock
*target_fl
;
5610 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5611 return -TARGET_EFAULT
;
5614 __get_user(l_type
, &target_fl
->l_type
);
5615 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
5616 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5617 __get_user(fl
->l_start
, &target_fl
->l_start
);
5618 __get_user(fl
->l_len
, &target_fl
->l_len
);
5619 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5620 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5624 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
5625 const struct flock64
*fl
)
5627 struct target_flock
*target_fl
;
5630 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5631 return -TARGET_EFAULT
;
5634 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
5635 __put_user(l_type
, &target_fl
->l_type
);
5636 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5637 __put_user(fl
->l_start
, &target_fl
->l_start
);
5638 __put_user(fl
->l_len
, &target_fl
->l_len
);
5639 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5640 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5644 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
5645 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
5647 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5648 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
5649 abi_ulong target_flock_addr
)
5651 struct target_eabi_flock64
*target_fl
;
5654 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5655 return -TARGET_EFAULT
;
5658 __get_user(l_type
, &target_fl
->l_type
);
5659 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
5660 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5661 __get_user(fl
->l_start
, &target_fl
->l_start
);
5662 __get_user(fl
->l_len
, &target_fl
->l_len
);
5663 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5664 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5668 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
5669 const struct flock64
*fl
)
5671 struct target_eabi_flock64
*target_fl
;
5674 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5675 return -TARGET_EFAULT
;
5678 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
5679 __put_user(l_type
, &target_fl
->l_type
);
5680 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5681 __put_user(fl
->l_start
, &target_fl
->l_start
);
5682 __put_user(fl
->l_len
, &target_fl
->l_len
);
5683 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5684 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5689 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
5690 abi_ulong target_flock_addr
)
5692 struct target_flock64
*target_fl
;
5695 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5696 return -TARGET_EFAULT
;
5699 __get_user(l_type
, &target_fl
->l_type
);
5700 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
5701 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5702 __get_user(fl
->l_start
, &target_fl
->l_start
);
5703 __get_user(fl
->l_len
, &target_fl
->l_len
);
5704 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5705 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5709 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
5710 const struct flock64
*fl
)
5712 struct target_flock64
*target_fl
;
5715 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5716 return -TARGET_EFAULT
;
5719 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
5720 __put_user(l_type
, &target_fl
->l_type
);
5721 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5722 __put_user(fl
->l_start
, &target_fl
->l_start
);
5723 __put_user(fl
->l_len
, &target_fl
->l_len
);
5724 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5725 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5729 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
5731 struct flock64 fl64
;
5733 struct f_owner_ex fox
;
5734 struct target_f_owner_ex
*target_fox
;
5737 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
5739 if (host_cmd
== -TARGET_EINVAL
)
5743 case TARGET_F_GETLK
:
5744 ret
= copy_from_user_flock(&fl64
, arg
);
5748 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5750 ret
= copy_to_user_flock(arg
, &fl64
);
5754 case TARGET_F_SETLK
:
5755 case TARGET_F_SETLKW
:
5756 ret
= copy_from_user_flock(&fl64
, arg
);
5760 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5763 case TARGET_F_GETLK64
:
5764 ret
= copy_from_user_flock64(&fl64
, arg
);
5768 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5770 ret
= copy_to_user_flock64(arg
, &fl64
);
5773 case TARGET_F_SETLK64
:
5774 case TARGET_F_SETLKW64
:
5775 ret
= copy_from_user_flock64(&fl64
, arg
);
5779 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5782 case TARGET_F_GETFL
:
5783 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
5785 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
5789 case TARGET_F_SETFL
:
5790 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
5791 target_to_host_bitmask(arg
,
5796 case TARGET_F_GETOWN_EX
:
5797 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
5799 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
5800 return -TARGET_EFAULT
;
5801 target_fox
->type
= tswap32(fox
.type
);
5802 target_fox
->pid
= tswap32(fox
.pid
);
5803 unlock_user_struct(target_fox
, arg
, 1);
5809 case TARGET_F_SETOWN_EX
:
5810 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
5811 return -TARGET_EFAULT
;
5812 fox
.type
= tswap32(target_fox
->type
);
5813 fox
.pid
= tswap32(target_fox
->pid
);
5814 unlock_user_struct(target_fox
, arg
, 0);
5815 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
5819 case TARGET_F_SETOWN
:
5820 case TARGET_F_GETOWN
:
5821 case TARGET_F_SETSIG
:
5822 case TARGET_F_GETSIG
:
5823 case TARGET_F_SETLEASE
:
5824 case TARGET_F_GETLEASE
:
5825 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
5829 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
5837 static inline int high2lowuid(int uid
)
5845 static inline int high2lowgid(int gid
)
5853 static inline int low2highuid(int uid
)
5855 if ((int16_t)uid
== -1)
5861 static inline int low2highgid(int gid
)
5863 if ((int16_t)gid
== -1)
5868 static inline int tswapid(int id
)
5873 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5875 #else /* !USE_UID16 */
5876 static inline int high2lowuid(int uid
)
5880 static inline int high2lowgid(int gid
)
5884 static inline int low2highuid(int uid
)
5888 static inline int low2highgid(int gid
)
5892 static inline int tswapid(int id
)
5897 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5899 #endif /* USE_UID16 */
5901 /* We must do direct syscalls for setting UID/GID, because we want to
5902 * implement the Linux system call semantics of "change only for this thread",
5903 * not the libc/POSIX semantics of "change for all threads in process".
5904 * (See http://ewontfix.com/17/ for more details.)
5905 * We use the 32-bit version of the syscalls if present; if it is not
5906 * then either the host architecture supports 32-bit UIDs natively with
5907 * the standard syscall, or the 16-bit UID is the best we can do.
5909 #ifdef __NR_setuid32
5910 #define __NR_sys_setuid __NR_setuid32
5912 #define __NR_sys_setuid __NR_setuid
5914 #ifdef __NR_setgid32
5915 #define __NR_sys_setgid __NR_setgid32
5917 #define __NR_sys_setgid __NR_setgid
5919 #ifdef __NR_setresuid32
5920 #define __NR_sys_setresuid __NR_setresuid32
5922 #define __NR_sys_setresuid __NR_setresuid
5924 #ifdef __NR_setresgid32
5925 #define __NR_sys_setresgid __NR_setresgid32
5927 #define __NR_sys_setresgid __NR_setresgid
5930 _syscall1(int, sys_setuid
, uid_t
, uid
)
5931 _syscall1(int, sys_setgid
, gid_t
, gid
)
5932 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
5933 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
5935 void syscall_init(void)
5938 const argtype
*arg_type
;
5942 thunk_init(STRUCT_MAX
);
5944 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5945 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5946 #include "syscall_types.h"
5948 #undef STRUCT_SPECIAL
5950 /* Build target_to_host_errno_table[] table from
5951 * host_to_target_errno_table[]. */
5952 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
5953 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
5956 /* we patch the ioctl size if necessary. We rely on the fact that
5957 no ioctl has all the bits at '1' in the size field */
5959 while (ie
->target_cmd
!= 0) {
5960 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
5961 TARGET_IOC_SIZEMASK
) {
5962 arg_type
= ie
->arg_type
;
5963 if (arg_type
[0] != TYPE_PTR
) {
5964 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
5969 size
= thunk_type_size(arg_type
, 0);
5970 ie
->target_cmd
= (ie
->target_cmd
&
5971 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
5972 (size
<< TARGET_IOC_SIZESHIFT
);
5975 /* automatic consistency check if same arch */
5976 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5977 (defined(__x86_64__) && defined(TARGET_X86_64))
5978 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
5979 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5980 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
5987 #if TARGET_ABI_BITS == 32
5988 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
5990 #ifdef TARGET_WORDS_BIGENDIAN
5991 return ((uint64_t)word0
<< 32) | word1
;
5993 return ((uint64_t)word1
<< 32) | word0
;
5996 #else /* TARGET_ABI_BITS == 32 */
5997 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6001 #endif /* TARGET_ABI_BITS != 32 */
6003 #ifdef TARGET_NR_truncate64
6004 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6009 if (regpairs_aligned(cpu_env
)) {
6013 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6017 #ifdef TARGET_NR_ftruncate64
6018 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6023 if (regpairs_aligned(cpu_env
)) {
6027 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6031 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6032 abi_ulong target_addr
)
6034 struct target_timespec
*target_ts
;
6036 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6037 return -TARGET_EFAULT
;
6038 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6039 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6040 unlock_user_struct(target_ts
, target_addr
, 0);
6044 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6045 struct timespec
*host_ts
)
6047 struct target_timespec
*target_ts
;
6049 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6050 return -TARGET_EFAULT
;
6051 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6052 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6053 unlock_user_struct(target_ts
, target_addr
, 1);
6057 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6058 abi_ulong target_addr
)
6060 struct target_itimerspec
*target_itspec
;
6062 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6063 return -TARGET_EFAULT
;
6066 host_itspec
->it_interval
.tv_sec
=
6067 tswapal(target_itspec
->it_interval
.tv_sec
);
6068 host_itspec
->it_interval
.tv_nsec
=
6069 tswapal(target_itspec
->it_interval
.tv_nsec
);
6070 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6071 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6073 unlock_user_struct(target_itspec
, target_addr
, 1);
6077 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6078 struct itimerspec
*host_its
)
6080 struct target_itimerspec
*target_itspec
;
6082 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6083 return -TARGET_EFAULT
;
6086 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6087 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6089 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6090 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6092 unlock_user_struct(target_itspec
, target_addr
, 0);
6096 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6097 abi_ulong target_addr
)
6099 struct target_sigevent
*target_sevp
;
6101 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6102 return -TARGET_EFAULT
;
6105 /* This union is awkward on 64 bit systems because it has a 32 bit
6106 * integer and a pointer in it; we follow the conversion approach
6107 * used for handling sigval types in signal.c so the guest should get
6108 * the correct value back even if we did a 64 bit byteswap and it's
6109 * using the 32 bit integer.
6111 host_sevp
->sigev_value
.sival_ptr
=
6112 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6113 host_sevp
->sigev_signo
=
6114 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6115 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6116 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6118 unlock_user_struct(target_sevp
, target_addr
, 1);
6122 #if defined(TARGET_NR_mlockall)
6123 static inline int target_to_host_mlockall_arg(int arg
)
6127 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6128 result
|= MCL_CURRENT
;
6130 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6131 result
|= MCL_FUTURE
;
6137 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6138 abi_ulong target_addr
,
6139 struct stat
*host_st
)
6141 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6142 if (((CPUARMState
*)cpu_env
)->eabi
) {
6143 struct target_eabi_stat64
*target_st
;
6145 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6146 return -TARGET_EFAULT
;
6147 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6148 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6149 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6150 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6151 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6153 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6154 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6155 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6156 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6157 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6158 __put_user(host_st
->st_size
, &target_st
->st_size
);
6159 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6160 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6161 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6162 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6163 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6164 unlock_user_struct(target_st
, target_addr
, 1);
6168 #if defined(TARGET_HAS_STRUCT_STAT64)
6169 struct target_stat64
*target_st
;
6171 struct target_stat
*target_st
;
6174 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6175 return -TARGET_EFAULT
;
6176 memset(target_st
, 0, sizeof(*target_st
));
6177 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6178 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6179 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6180 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6182 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6183 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6184 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6185 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6186 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6187 /* XXX: better use of kernel struct */
6188 __put_user(host_st
->st_size
, &target_st
->st_size
);
6189 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6190 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6191 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6192 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6193 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6194 unlock_user_struct(target_st
, target_addr
, 1);
6200 /* ??? Using host futex calls even when target atomic operations
6201 are not really atomic probably breaks things. However implementing
6202 futexes locally would make futexes shared between multiple processes
6203 tricky. However they're probably useless because guest atomic
6204 operations won't work either. */
6205 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6206 target_ulong uaddr2
, int val3
)
6208 struct timespec ts
, *pts
;
6211 /* ??? We assume FUTEX_* constants are the same on both host
6213 #ifdef FUTEX_CMD_MASK
6214 base_op
= op
& FUTEX_CMD_MASK
;
6220 case FUTEX_WAIT_BITSET
:
6223 target_to_host_timespec(pts
, timeout
);
6227 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6230 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6232 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6234 case FUTEX_CMP_REQUEUE
:
6236 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6237 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6238 But the prototype takes a `struct timespec *'; insert casts
6239 to satisfy the compiler. We do not need to tswap TIMEOUT
6240 since it's not compared to guest memory. */
6241 pts
= (struct timespec
*)(uintptr_t) timeout
;
6242 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6244 (base_op
== FUTEX_CMP_REQUEUE
6248 return -TARGET_ENOSYS
;
6251 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6252 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6253 abi_long handle
, abi_long mount_id
,
6256 struct file_handle
*target_fh
;
6257 struct file_handle
*fh
;
6261 unsigned int size
, total_size
;
6263 if (get_user_s32(size
, handle
)) {
6264 return -TARGET_EFAULT
;
6267 name
= lock_user_string(pathname
);
6269 return -TARGET_EFAULT
;
6272 total_size
= sizeof(struct file_handle
) + size
;
6273 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6275 unlock_user(name
, pathname
, 0);
6276 return -TARGET_EFAULT
;
6279 fh
= g_malloc0(total_size
);
6280 fh
->handle_bytes
= size
;
6282 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6283 unlock_user(name
, pathname
, 0);
6285 /* man name_to_handle_at(2):
6286 * Other than the use of the handle_bytes field, the caller should treat
6287 * the file_handle structure as an opaque data type
6290 memcpy(target_fh
, fh
, total_size
);
6291 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6292 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6294 unlock_user(target_fh
, handle
, total_size
);
6296 if (put_user_s32(mid
, mount_id
)) {
6297 return -TARGET_EFAULT
;
6305 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6306 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6309 struct file_handle
*target_fh
;
6310 struct file_handle
*fh
;
6311 unsigned int size
, total_size
;
6314 if (get_user_s32(size
, handle
)) {
6315 return -TARGET_EFAULT
;
6318 total_size
= sizeof(struct file_handle
) + size
;
6319 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6321 return -TARGET_EFAULT
;
6324 fh
= g_memdup(target_fh
, total_size
);
6325 fh
->handle_bytes
= size
;
6326 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6328 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6329 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6333 unlock_user(target_fh
, handle
, total_size
);
6339 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6341 /* signalfd siginfo conversion */
6344 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
6345 const struct signalfd_siginfo
*info
)
6347 int sig
= host_to_target_signal(info
->ssi_signo
);
6349 /* linux/signalfd.h defines a ssi_addr_lsb
6350 * not defined in sys/signalfd.h but used by some kernels
6353 #ifdef BUS_MCEERR_AO
6354 if (tinfo
->ssi_signo
== SIGBUS
&&
6355 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
6356 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
6357 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
6358 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
6359 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
6363 tinfo
->ssi_signo
= tswap32(sig
);
6364 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
6365 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
6366 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
6367 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
6368 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
6369 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
6370 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
6371 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
6372 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
6373 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
6374 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
6375 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
6376 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
6377 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
6378 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
6381 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
6385 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
6386 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
6392 static TargetFdTrans target_signalfd_trans
= {
6393 .host_to_target_data
= host_to_target_data_signalfd
,
6396 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6399 target_sigset_t
*target_mask
;
6403 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6404 return -TARGET_EINVAL
;
6406 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6407 return -TARGET_EFAULT
;
6410 target_to_host_sigset(&host_mask
, target_mask
);
6412 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6414 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6416 fd_trans_register(ret
, &target_signalfd_trans
);
6419 unlock_user_struct(target_mask
, mask
, 0);
6425 /* Map host to target signal numbers for the wait family of syscalls.
6426 Assume all other status bits are the same. */
6427 int host_to_target_waitstatus(int status
)
6429 if (WIFSIGNALED(status
)) {
6430 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6432 if (WIFSTOPPED(status
)) {
6433 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6439 static int open_self_cmdline(void *cpu_env
, int fd
)
6442 bool word_skipped
= false;
6444 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
6454 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
6457 fd_orig
= close(fd_orig
);
6460 } else if (nb_read
== 0) {
6464 if (!word_skipped
) {
6465 /* Skip the first string, which is the path to qemu-*-static
6466 instead of the actual command. */
6467 cp_buf
= memchr(buf
, 0, sizeof(buf
));
6469 /* Null byte found, skip one string */
6471 nb_read
-= cp_buf
- buf
;
6472 word_skipped
= true;
6477 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
6486 return close(fd_orig
);
6489 static int open_self_maps(void *cpu_env
, int fd
)
6491 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6492 TaskState
*ts
= cpu
->opaque
;
6498 fp
= fopen("/proc/self/maps", "r");
6503 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6504 int fields
, dev_maj
, dev_min
, inode
;
6505 uint64_t min
, max
, offset
;
6506 char flag_r
, flag_w
, flag_x
, flag_p
;
6507 char path
[512] = "";
6508 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6509 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6510 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6512 if ((fields
< 10) || (fields
> 11)) {
6515 if (h2g_valid(min
)) {
6516 int flags
= page_get_flags(h2g(min
));
6517 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
6518 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
6521 if (h2g(min
) == ts
->info
->stack_limit
) {
6522 pstrcpy(path
, sizeof(path
), " [stack]");
6524 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
6525 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
6526 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
6527 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
6528 path
[0] ? " " : "", path
);
6538 static int open_self_stat(void *cpu_env
, int fd
)
6540 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6541 TaskState
*ts
= cpu
->opaque
;
6542 abi_ulong start_stack
= ts
->info
->start_stack
;
6545 for (i
= 0; i
< 44; i
++) {
6553 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6554 } else if (i
== 1) {
6556 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
6557 } else if (i
== 27) {
6560 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6562 /* for the rest, there is MasterCard */
6563 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
6567 if (write(fd
, buf
, len
) != len
) {
6575 static int open_self_auxv(void *cpu_env
, int fd
)
6577 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6578 TaskState
*ts
= cpu
->opaque
;
6579 abi_ulong auxv
= ts
->info
->saved_auxv
;
6580 abi_ulong len
= ts
->info
->auxv_len
;
6584 * Auxiliary vector is stored in target process stack.
6585 * read in whole auxv vector and copy it to file
6587 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
6591 r
= write(fd
, ptr
, len
);
6598 lseek(fd
, 0, SEEK_SET
);
6599 unlock_user(ptr
, auxv
, len
);
6605 static int is_proc_myself(const char *filename
, const char *entry
)
6607 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
6608 filename
+= strlen("/proc/");
6609 if (!strncmp(filename
, "self/", strlen("self/"))) {
6610 filename
+= strlen("self/");
6611 } else if (*filename
>= '1' && *filename
<= '9') {
6613 snprintf(myself
, sizeof(myself
), "%d/", getpid());
6614 if (!strncmp(filename
, myself
, strlen(myself
))) {
6615 filename
+= strlen(myself
);
6622 if (!strcmp(filename
, entry
)) {
6629 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6630 static int is_proc(const char *filename
, const char *entry
)
6632 return strcmp(filename
, entry
) == 0;
6635 static int open_net_route(void *cpu_env
, int fd
)
6642 fp
= fopen("/proc/net/route", "r");
6649 read
= getline(&line
, &len
, fp
);
6650 dprintf(fd
, "%s", line
);
6654 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6656 uint32_t dest
, gw
, mask
;
6657 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
6658 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6659 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
6660 &mask
, &mtu
, &window
, &irtt
);
6661 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6662 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
6663 metric
, tswap32(mask
), mtu
, window
, irtt
);
6673 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
6676 const char *filename
;
6677 int (*fill
)(void *cpu_env
, int fd
);
6678 int (*cmp
)(const char *s1
, const char *s2
);
6680 const struct fake_open
*fake_open
;
6681 static const struct fake_open fakes
[] = {
6682 { "maps", open_self_maps
, is_proc_myself
},
6683 { "stat", open_self_stat
, is_proc_myself
},
6684 { "auxv", open_self_auxv
, is_proc_myself
},
6685 { "cmdline", open_self_cmdline
, is_proc_myself
},
6686 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6687 { "/proc/net/route", open_net_route
, is_proc
},
6689 { NULL
, NULL
, NULL
}
6692 if (is_proc_myself(pathname
, "exe")) {
6693 int execfd
= qemu_getauxval(AT_EXECFD
);
6694 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
6697 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
6698 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
6703 if (fake_open
->filename
) {
6705 char filename
[PATH_MAX
];
6708 /* create temporary file to map stat to */
6709 tmpdir
= getenv("TMPDIR");
6712 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
6713 fd
= mkstemp(filename
);
6719 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
6725 lseek(fd
, 0, SEEK_SET
);
6730 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
6733 #define TIMER_MAGIC 0x0caf0000
6734 #define TIMER_MAGIC_MASK 0xffff0000
6736 /* Convert QEMU provided timer ID back to internal 16bit index format */
6737 static target_timer_t
get_timer_id(abi_long arg
)
6739 target_timer_t timerid
= arg
;
6741 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
6742 return -TARGET_EINVAL
;
6747 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
6748 return -TARGET_EINVAL
;
6754 /* do_syscall() should always have a single exit point at the end so
6755 that actions, such as logging of syscall results, can be performed.
6756 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
6757 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
6758 abi_long arg2
, abi_long arg3
, abi_long arg4
,
6759 abi_long arg5
, abi_long arg6
, abi_long arg7
,
6762 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
6768 #if defined(DEBUG_ERESTARTSYS)
6769 /* Debug-only code for exercising the syscall-restart code paths
6770 * in the per-architecture cpu main loops: restart every syscall
6771 * the guest makes once before letting it through.
6778 return -TARGET_ERESTARTSYS
;
6784 gemu_log("syscall %d", num
);
6787 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6790 case TARGET_NR_exit
:
6791 /* In old applications this may be used to implement _exit(2).
6792 However in threaded applictions it is used for thread termination,
6793 and _exit_group is used for application termination.
6794 Do thread termination if we have more then one thread. */
6796 if (block_signals()) {
6797 ret
= -TARGET_ERESTARTSYS
;
6801 if (CPU_NEXT(first_cpu
)) {
6805 /* Remove the CPU from the list. */
6806 QTAILQ_REMOVE(&cpus
, cpu
, node
);
6809 if (ts
->child_tidptr
) {
6810 put_user_u32(0, ts
->child_tidptr
);
6811 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
6815 object_unref(OBJECT(cpu
));
6817 rcu_unregister_thread();
6823 gdb_exit(cpu_env
, arg1
);
6825 ret
= 0; /* avoid warning */
6827 case TARGET_NR_read
:
6831 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6833 ret
= get_errno(safe_read(arg1
, p
, arg3
));
6835 fd_trans_host_to_target_data(arg1
)) {
6836 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
6838 unlock_user(p
, arg2
, ret
);
6841 case TARGET_NR_write
:
6842 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6844 ret
= get_errno(safe_write(arg1
, p
, arg3
));
6845 unlock_user(p
, arg2
, 0);
6847 #ifdef TARGET_NR_open
6848 case TARGET_NR_open
:
6849 if (!(p
= lock_user_string(arg1
)))
6851 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
6852 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
6854 fd_trans_unregister(ret
);
6855 unlock_user(p
, arg1
, 0);
6858 case TARGET_NR_openat
:
6859 if (!(p
= lock_user_string(arg2
)))
6861 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
6862 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
6864 fd_trans_unregister(ret
);
6865 unlock_user(p
, arg2
, 0);
6867 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6868 case TARGET_NR_name_to_handle_at
:
6869 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
6872 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6873 case TARGET_NR_open_by_handle_at
:
6874 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
6875 fd_trans_unregister(ret
);
6878 case TARGET_NR_close
:
6879 fd_trans_unregister(arg1
);
6880 ret
= get_errno(close(arg1
));
6885 #ifdef TARGET_NR_fork
6886 case TARGET_NR_fork
:
6887 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
6890 #ifdef TARGET_NR_waitpid
6891 case TARGET_NR_waitpid
:
6894 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
6895 if (!is_error(ret
) && arg2
&& ret
6896 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
6901 #ifdef TARGET_NR_waitid
6902 case TARGET_NR_waitid
:
6906 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
6907 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
6908 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
6910 host_to_target_siginfo(p
, &info
);
6911 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
6916 #ifdef TARGET_NR_creat /* not on alpha */
6917 case TARGET_NR_creat
:
6918 if (!(p
= lock_user_string(arg1
)))
6920 ret
= get_errno(creat(p
, arg2
));
6921 fd_trans_unregister(ret
);
6922 unlock_user(p
, arg1
, 0);
6925 #ifdef TARGET_NR_link
6926 case TARGET_NR_link
:
6929 p
= lock_user_string(arg1
);
6930 p2
= lock_user_string(arg2
);
6932 ret
= -TARGET_EFAULT
;
6934 ret
= get_errno(link(p
, p2
));
6935 unlock_user(p2
, arg2
, 0);
6936 unlock_user(p
, arg1
, 0);
6940 #if defined(TARGET_NR_linkat)
6941 case TARGET_NR_linkat
:
6946 p
= lock_user_string(arg2
);
6947 p2
= lock_user_string(arg4
);
6949 ret
= -TARGET_EFAULT
;
6951 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
6952 unlock_user(p
, arg2
, 0);
6953 unlock_user(p2
, arg4
, 0);
6957 #ifdef TARGET_NR_unlink
6958 case TARGET_NR_unlink
:
6959 if (!(p
= lock_user_string(arg1
)))
6961 ret
= get_errno(unlink(p
));
6962 unlock_user(p
, arg1
, 0);
6965 #if defined(TARGET_NR_unlinkat)
6966 case TARGET_NR_unlinkat
:
6967 if (!(p
= lock_user_string(arg2
)))
6969 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
6970 unlock_user(p
, arg2
, 0);
6973 case TARGET_NR_execve
:
6975 char **argp
, **envp
;
6978 abi_ulong guest_argp
;
6979 abi_ulong guest_envp
;
6986 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
6987 if (get_user_ual(addr
, gp
))
6995 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
6996 if (get_user_ual(addr
, gp
))
7003 argp
= alloca((argc
+ 1) * sizeof(void *));
7004 envp
= alloca((envc
+ 1) * sizeof(void *));
7006 for (gp
= guest_argp
, q
= argp
; gp
;
7007 gp
+= sizeof(abi_ulong
), q
++) {
7008 if (get_user_ual(addr
, gp
))
7012 if (!(*q
= lock_user_string(addr
)))
7014 total_size
+= strlen(*q
) + 1;
7018 for (gp
= guest_envp
, q
= envp
; gp
;
7019 gp
+= sizeof(abi_ulong
), q
++) {
7020 if (get_user_ual(addr
, gp
))
7024 if (!(*q
= lock_user_string(addr
)))
7026 total_size
+= strlen(*q
) + 1;
7030 if (!(p
= lock_user_string(arg1
)))
7032 /* Although execve() is not an interruptible syscall it is
7033 * a special case where we must use the safe_syscall wrapper:
7034 * if we allow a signal to happen before we make the host
7035 * syscall then we will 'lose' it, because at the point of
7036 * execve the process leaves QEMU's control. So we use the
7037 * safe syscall wrapper to ensure that we either take the
7038 * signal as a guest signal, or else it does not happen
7039 * before the execve completes and makes it the other
7040 * program's problem.
7042 ret
= get_errno(safe_execve(p
, argp
, envp
));
7043 unlock_user(p
, arg1
, 0);
7048 ret
= -TARGET_EFAULT
;
7051 for (gp
= guest_argp
, q
= argp
; *q
;
7052 gp
+= sizeof(abi_ulong
), q
++) {
7053 if (get_user_ual(addr
, gp
)
7056 unlock_user(*q
, addr
, 0);
7058 for (gp
= guest_envp
, q
= envp
; *q
;
7059 gp
+= sizeof(abi_ulong
), q
++) {
7060 if (get_user_ual(addr
, gp
)
7063 unlock_user(*q
, addr
, 0);
7067 case TARGET_NR_chdir
:
7068 if (!(p
= lock_user_string(arg1
)))
7070 ret
= get_errno(chdir(p
));
7071 unlock_user(p
, arg1
, 0);
7073 #ifdef TARGET_NR_time
7074 case TARGET_NR_time
:
7077 ret
= get_errno(time(&host_time
));
7080 && put_user_sal(host_time
, arg1
))
7085 #ifdef TARGET_NR_mknod
7086 case TARGET_NR_mknod
:
7087 if (!(p
= lock_user_string(arg1
)))
7089 ret
= get_errno(mknod(p
, arg2
, arg3
));
7090 unlock_user(p
, arg1
, 0);
7093 #if defined(TARGET_NR_mknodat)
7094 case TARGET_NR_mknodat
:
7095 if (!(p
= lock_user_string(arg2
)))
7097 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7098 unlock_user(p
, arg2
, 0);
7101 #ifdef TARGET_NR_chmod
7102 case TARGET_NR_chmod
:
7103 if (!(p
= lock_user_string(arg1
)))
7105 ret
= get_errno(chmod(p
, arg2
));
7106 unlock_user(p
, arg1
, 0);
7109 #ifdef TARGET_NR_break
7110 case TARGET_NR_break
:
7113 #ifdef TARGET_NR_oldstat
7114 case TARGET_NR_oldstat
:
7117 case TARGET_NR_lseek
:
7118 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
7120 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7121 /* Alpha specific */
7122 case TARGET_NR_getxpid
:
7123 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7124 ret
= get_errno(getpid());
7127 #ifdef TARGET_NR_getpid
7128 case TARGET_NR_getpid
:
7129 ret
= get_errno(getpid());
7132 case TARGET_NR_mount
:
7134 /* need to look at the data field */
7138 p
= lock_user_string(arg1
);
7146 p2
= lock_user_string(arg2
);
7149 unlock_user(p
, arg1
, 0);
7155 p3
= lock_user_string(arg3
);
7158 unlock_user(p
, arg1
, 0);
7160 unlock_user(p2
, arg2
, 0);
7167 /* FIXME - arg5 should be locked, but it isn't clear how to
7168 * do that since it's not guaranteed to be a NULL-terminated
7172 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7174 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7176 ret
= get_errno(ret
);
7179 unlock_user(p
, arg1
, 0);
7181 unlock_user(p2
, arg2
, 0);
7183 unlock_user(p3
, arg3
, 0);
7187 #ifdef TARGET_NR_umount
7188 case TARGET_NR_umount
:
7189 if (!(p
= lock_user_string(arg1
)))
7191 ret
= get_errno(umount(p
));
7192 unlock_user(p
, arg1
, 0);
7195 #ifdef TARGET_NR_stime /* not on alpha */
7196 case TARGET_NR_stime
:
7199 if (get_user_sal(host_time
, arg1
))
7201 ret
= get_errno(stime(&host_time
));
7205 case TARGET_NR_ptrace
:
7207 #ifdef TARGET_NR_alarm /* not on alpha */
7208 case TARGET_NR_alarm
:
7212 #ifdef TARGET_NR_oldfstat
7213 case TARGET_NR_oldfstat
:
7216 #ifdef TARGET_NR_pause /* not on alpha */
7217 case TARGET_NR_pause
:
7218 if (!block_signals()) {
7219 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7221 ret
= -TARGET_EINTR
;
7224 #ifdef TARGET_NR_utime
7225 case TARGET_NR_utime
:
7227 struct utimbuf tbuf
, *host_tbuf
;
7228 struct target_utimbuf
*target_tbuf
;
7230 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7232 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7233 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7234 unlock_user_struct(target_tbuf
, arg2
, 0);
7239 if (!(p
= lock_user_string(arg1
)))
7241 ret
= get_errno(utime(p
, host_tbuf
));
7242 unlock_user(p
, arg1
, 0);
7246 #ifdef TARGET_NR_utimes
7247 case TARGET_NR_utimes
:
7249 struct timeval
*tvp
, tv
[2];
7251 if (copy_from_user_timeval(&tv
[0], arg2
)
7252 || copy_from_user_timeval(&tv
[1],
7253 arg2
+ sizeof(struct target_timeval
)))
7259 if (!(p
= lock_user_string(arg1
)))
7261 ret
= get_errno(utimes(p
, tvp
));
7262 unlock_user(p
, arg1
, 0);
7266 #if defined(TARGET_NR_futimesat)
7267 case TARGET_NR_futimesat
:
7269 struct timeval
*tvp
, tv
[2];
7271 if (copy_from_user_timeval(&tv
[0], arg3
)
7272 || copy_from_user_timeval(&tv
[1],
7273 arg3
+ sizeof(struct target_timeval
)))
7279 if (!(p
= lock_user_string(arg2
)))
7281 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7282 unlock_user(p
, arg2
, 0);
7286 #ifdef TARGET_NR_stty
7287 case TARGET_NR_stty
:
7290 #ifdef TARGET_NR_gtty
7291 case TARGET_NR_gtty
:
7294 #ifdef TARGET_NR_access
7295 case TARGET_NR_access
:
7296 if (!(p
= lock_user_string(arg1
)))
7298 ret
= get_errno(access(path(p
), arg2
));
7299 unlock_user(p
, arg1
, 0);
7302 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7303 case TARGET_NR_faccessat
:
7304 if (!(p
= lock_user_string(arg2
)))
7306 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7307 unlock_user(p
, arg2
, 0);
7310 #ifdef TARGET_NR_nice /* not on alpha */
7311 case TARGET_NR_nice
:
7312 ret
= get_errno(nice(arg1
));
7315 #ifdef TARGET_NR_ftime
7316 case TARGET_NR_ftime
:
7319 case TARGET_NR_sync
:
7323 case TARGET_NR_kill
:
7324 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7326 #ifdef TARGET_NR_rename
7327 case TARGET_NR_rename
:
7330 p
= lock_user_string(arg1
);
7331 p2
= lock_user_string(arg2
);
7333 ret
= -TARGET_EFAULT
;
7335 ret
= get_errno(rename(p
, p2
));
7336 unlock_user(p2
, arg2
, 0);
7337 unlock_user(p
, arg1
, 0);
7341 #if defined(TARGET_NR_renameat)
7342 case TARGET_NR_renameat
:
7345 p
= lock_user_string(arg2
);
7346 p2
= lock_user_string(arg4
);
7348 ret
= -TARGET_EFAULT
;
7350 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7351 unlock_user(p2
, arg4
, 0);
7352 unlock_user(p
, arg2
, 0);
7356 #ifdef TARGET_NR_mkdir
7357 case TARGET_NR_mkdir
:
7358 if (!(p
= lock_user_string(arg1
)))
7360 ret
= get_errno(mkdir(p
, arg2
));
7361 unlock_user(p
, arg1
, 0);
7364 #if defined(TARGET_NR_mkdirat)
7365 case TARGET_NR_mkdirat
:
7366 if (!(p
= lock_user_string(arg2
)))
7368 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7369 unlock_user(p
, arg2
, 0);
7372 #ifdef TARGET_NR_rmdir
7373 case TARGET_NR_rmdir
:
7374 if (!(p
= lock_user_string(arg1
)))
7376 ret
= get_errno(rmdir(p
));
7377 unlock_user(p
, arg1
, 0);
7381 ret
= get_errno(dup(arg1
));
7383 fd_trans_dup(arg1
, ret
);
7386 #ifdef TARGET_NR_pipe
7387 case TARGET_NR_pipe
:
7388 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
7391 #ifdef TARGET_NR_pipe2
7392 case TARGET_NR_pipe2
:
7393 ret
= do_pipe(cpu_env
, arg1
,
7394 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7397 case TARGET_NR_times
:
7399 struct target_tms
*tmsp
;
7401 ret
= get_errno(times(&tms
));
7403 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7406 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7407 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7408 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7409 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7412 ret
= host_to_target_clock_t(ret
);
7415 #ifdef TARGET_NR_prof
7416 case TARGET_NR_prof
:
7419 #ifdef TARGET_NR_signal
7420 case TARGET_NR_signal
:
7423 case TARGET_NR_acct
:
7425 ret
= get_errno(acct(NULL
));
7427 if (!(p
= lock_user_string(arg1
)))
7429 ret
= get_errno(acct(path(p
)));
7430 unlock_user(p
, arg1
, 0);
7433 #ifdef TARGET_NR_umount2
7434 case TARGET_NR_umount2
:
7435 if (!(p
= lock_user_string(arg1
)))
7437 ret
= get_errno(umount2(p
, arg2
));
7438 unlock_user(p
, arg1
, 0);
7441 #ifdef TARGET_NR_lock
7442 case TARGET_NR_lock
:
7445 case TARGET_NR_ioctl
:
7446 ret
= do_ioctl(arg1
, arg2
, arg3
);
7448 case TARGET_NR_fcntl
:
7449 ret
= do_fcntl(arg1
, arg2
, arg3
);
7451 #ifdef TARGET_NR_mpx
7455 case TARGET_NR_setpgid
:
7456 ret
= get_errno(setpgid(arg1
, arg2
));
7458 #ifdef TARGET_NR_ulimit
7459 case TARGET_NR_ulimit
:
7462 #ifdef TARGET_NR_oldolduname
7463 case TARGET_NR_oldolduname
:
7466 case TARGET_NR_umask
:
7467 ret
= get_errno(umask(arg1
));
7469 case TARGET_NR_chroot
:
7470 if (!(p
= lock_user_string(arg1
)))
7472 ret
= get_errno(chroot(p
));
7473 unlock_user(p
, arg1
, 0);
7475 #ifdef TARGET_NR_ustat
7476 case TARGET_NR_ustat
:
7479 #ifdef TARGET_NR_dup2
7480 case TARGET_NR_dup2
:
7481 ret
= get_errno(dup2(arg1
, arg2
));
7483 fd_trans_dup(arg1
, arg2
);
7487 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7488 case TARGET_NR_dup3
:
7489 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
7491 fd_trans_dup(arg1
, arg2
);
7495 #ifdef TARGET_NR_getppid /* not on alpha */
7496 case TARGET_NR_getppid
:
7497 ret
= get_errno(getppid());
7500 #ifdef TARGET_NR_getpgrp
7501 case TARGET_NR_getpgrp
:
7502 ret
= get_errno(getpgrp());
7505 case TARGET_NR_setsid
:
7506 ret
= get_errno(setsid());
7508 #ifdef TARGET_NR_sigaction
7509 case TARGET_NR_sigaction
:
7511 #if defined(TARGET_ALPHA)
7512 struct target_sigaction act
, oact
, *pact
= 0;
7513 struct target_old_sigaction
*old_act
;
7515 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7517 act
._sa_handler
= old_act
->_sa_handler
;
7518 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7519 act
.sa_flags
= old_act
->sa_flags
;
7520 act
.sa_restorer
= 0;
7521 unlock_user_struct(old_act
, arg2
, 0);
7524 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7525 if (!is_error(ret
) && arg3
) {
7526 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7528 old_act
->_sa_handler
= oact
._sa_handler
;
7529 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7530 old_act
->sa_flags
= oact
.sa_flags
;
7531 unlock_user_struct(old_act
, arg3
, 1);
7533 #elif defined(TARGET_MIPS)
7534 struct target_sigaction act
, oact
, *pact
, *old_act
;
7537 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7539 act
._sa_handler
= old_act
->_sa_handler
;
7540 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
7541 act
.sa_flags
= old_act
->sa_flags
;
7542 unlock_user_struct(old_act
, arg2
, 0);
7548 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7550 if (!is_error(ret
) && arg3
) {
7551 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7553 old_act
->_sa_handler
= oact
._sa_handler
;
7554 old_act
->sa_flags
= oact
.sa_flags
;
7555 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
7556 old_act
->sa_mask
.sig
[1] = 0;
7557 old_act
->sa_mask
.sig
[2] = 0;
7558 old_act
->sa_mask
.sig
[3] = 0;
7559 unlock_user_struct(old_act
, arg3
, 1);
7562 struct target_old_sigaction
*old_act
;
7563 struct target_sigaction act
, oact
, *pact
;
7565 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7567 act
._sa_handler
= old_act
->_sa_handler
;
7568 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7569 act
.sa_flags
= old_act
->sa_flags
;
7570 act
.sa_restorer
= old_act
->sa_restorer
;
7571 unlock_user_struct(old_act
, arg2
, 0);
7576 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7577 if (!is_error(ret
) && arg3
) {
7578 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7580 old_act
->_sa_handler
= oact
._sa_handler
;
7581 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7582 old_act
->sa_flags
= oact
.sa_flags
;
7583 old_act
->sa_restorer
= oact
.sa_restorer
;
7584 unlock_user_struct(old_act
, arg3
, 1);
7590 case TARGET_NR_rt_sigaction
:
7592 #if defined(TARGET_ALPHA)
7593 struct target_sigaction act
, oact
, *pact
= 0;
7594 struct target_rt_sigaction
*rt_act
;
7595 /* ??? arg4 == sizeof(sigset_t). */
7597 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
7599 act
._sa_handler
= rt_act
->_sa_handler
;
7600 act
.sa_mask
= rt_act
->sa_mask
;
7601 act
.sa_flags
= rt_act
->sa_flags
;
7602 act
.sa_restorer
= arg5
;
7603 unlock_user_struct(rt_act
, arg2
, 0);
7606 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7607 if (!is_error(ret
) && arg3
) {
7608 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
7610 rt_act
->_sa_handler
= oact
._sa_handler
;
7611 rt_act
->sa_mask
= oact
.sa_mask
;
7612 rt_act
->sa_flags
= oact
.sa_flags
;
7613 unlock_user_struct(rt_act
, arg3
, 1);
7616 struct target_sigaction
*act
;
7617 struct target_sigaction
*oact
;
7620 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
7625 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
7626 ret
= -TARGET_EFAULT
;
7627 goto rt_sigaction_fail
;
7631 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
7634 unlock_user_struct(act
, arg2
, 0);
7636 unlock_user_struct(oact
, arg3
, 1);
7640 #ifdef TARGET_NR_sgetmask /* not on alpha */
7641 case TARGET_NR_sgetmask
:
7644 abi_ulong target_set
;
7645 ret
= do_sigprocmask(0, NULL
, &cur_set
);
7647 host_to_target_old_sigset(&target_set
, &cur_set
);
7653 #ifdef TARGET_NR_ssetmask /* not on alpha */
7654 case TARGET_NR_ssetmask
:
7656 sigset_t set
, oset
, cur_set
;
7657 abi_ulong target_set
= arg1
;
7658 /* We only have one word of the new mask so we must read
7659 * the rest of it with do_sigprocmask() and OR in this word.
7660 * We are guaranteed that a do_sigprocmask() that only queries
7661 * the signal mask will not fail.
7663 ret
= do_sigprocmask(0, NULL
, &cur_set
);
7665 target_to_host_old_sigset(&set
, &target_set
);
7666 sigorset(&set
, &set
, &cur_set
);
7667 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
7669 host_to_target_old_sigset(&target_set
, &oset
);
7675 #ifdef TARGET_NR_sigprocmask
7676 case TARGET_NR_sigprocmask
:
7678 #if defined(TARGET_ALPHA)
7679 sigset_t set
, oldset
;
7684 case TARGET_SIG_BLOCK
:
7687 case TARGET_SIG_UNBLOCK
:
7690 case TARGET_SIG_SETMASK
:
7694 ret
= -TARGET_EINVAL
;
7698 target_to_host_old_sigset(&set
, &mask
);
7700 ret
= do_sigprocmask(how
, &set
, &oldset
);
7701 if (!is_error(ret
)) {
7702 host_to_target_old_sigset(&mask
, &oldset
);
7704 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
7707 sigset_t set
, oldset
, *set_ptr
;
7712 case TARGET_SIG_BLOCK
:
7715 case TARGET_SIG_UNBLOCK
:
7718 case TARGET_SIG_SETMASK
:
7722 ret
= -TARGET_EINVAL
;
7725 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7727 target_to_host_old_sigset(&set
, p
);
7728 unlock_user(p
, arg2
, 0);
7734 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7735 if (!is_error(ret
) && arg3
) {
7736 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7738 host_to_target_old_sigset(p
, &oldset
);
7739 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7745 case TARGET_NR_rt_sigprocmask
:
7748 sigset_t set
, oldset
, *set_ptr
;
7752 case TARGET_SIG_BLOCK
:
7755 case TARGET_SIG_UNBLOCK
:
7758 case TARGET_SIG_SETMASK
:
7762 ret
= -TARGET_EINVAL
;
7765 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7767 target_to_host_sigset(&set
, p
);
7768 unlock_user(p
, arg2
, 0);
7774 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
7775 if (!is_error(ret
) && arg3
) {
7776 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
7778 host_to_target_sigset(p
, &oldset
);
7779 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
7783 #ifdef TARGET_NR_sigpending
7784 case TARGET_NR_sigpending
:
7787 ret
= get_errno(sigpending(&set
));
7788 if (!is_error(ret
)) {
7789 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7791 host_to_target_old_sigset(p
, &set
);
7792 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7797 case TARGET_NR_rt_sigpending
:
7800 ret
= get_errno(sigpending(&set
));
7801 if (!is_error(ret
)) {
7802 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
7804 host_to_target_sigset(p
, &set
);
7805 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
7809 #ifdef TARGET_NR_sigsuspend
7810 case TARGET_NR_sigsuspend
:
7812 TaskState
*ts
= cpu
->opaque
;
7813 #if defined(TARGET_ALPHA)
7814 abi_ulong mask
= arg1
;
7815 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
7817 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7819 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
7820 unlock_user(p
, arg1
, 0);
7822 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
7824 if (ret
!= -TARGET_ERESTARTSYS
) {
7825 ts
->in_sigsuspend
= 1;
7830 case TARGET_NR_rt_sigsuspend
:
7832 TaskState
*ts
= cpu
->opaque
;
7833 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7835 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
7836 unlock_user(p
, arg1
, 0);
7837 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
7839 if (ret
!= -TARGET_ERESTARTSYS
) {
7840 ts
->in_sigsuspend
= 1;
7844 case TARGET_NR_rt_sigtimedwait
:
7847 struct timespec uts
, *puts
;
7850 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
7852 target_to_host_sigset(&set
, p
);
7853 unlock_user(p
, arg1
, 0);
7856 target_to_host_timespec(puts
, arg3
);
7860 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
7862 if (!is_error(ret
)) {
7864 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
7869 host_to_target_siginfo(p
, &uinfo
);
7870 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
7872 ret
= host_to_target_signal(ret
);
7876 case TARGET_NR_rt_sigqueueinfo
:
7880 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
7884 target_to_host_siginfo(&uinfo
, p
);
7885 unlock_user(p
, arg1
, 0);
7886 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
7889 #ifdef TARGET_NR_sigreturn
7890 case TARGET_NR_sigreturn
:
7891 if (block_signals()) {
7892 ret
= -TARGET_ERESTARTSYS
;
7894 ret
= do_sigreturn(cpu_env
);
7898 case TARGET_NR_rt_sigreturn
:
7899 if (block_signals()) {
7900 ret
= -TARGET_ERESTARTSYS
;
7902 ret
= do_rt_sigreturn(cpu_env
);
7905 case TARGET_NR_sethostname
:
7906 if (!(p
= lock_user_string(arg1
)))
7908 ret
= get_errno(sethostname(p
, arg2
));
7909 unlock_user(p
, arg1
, 0);
7911 case TARGET_NR_setrlimit
:
7913 int resource
= target_to_host_resource(arg1
);
7914 struct target_rlimit
*target_rlim
;
7916 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
7918 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
7919 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
7920 unlock_user_struct(target_rlim
, arg2
, 0);
7921 ret
= get_errno(setrlimit(resource
, &rlim
));
7924 case TARGET_NR_getrlimit
:
7926 int resource
= target_to_host_resource(arg1
);
7927 struct target_rlimit
*target_rlim
;
7930 ret
= get_errno(getrlimit(resource
, &rlim
));
7931 if (!is_error(ret
)) {
7932 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7934 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7935 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7936 unlock_user_struct(target_rlim
, arg2
, 1);
7940 case TARGET_NR_getrusage
:
7942 struct rusage rusage
;
7943 ret
= get_errno(getrusage(arg1
, &rusage
));
7944 if (!is_error(ret
)) {
7945 ret
= host_to_target_rusage(arg2
, &rusage
);
7949 case TARGET_NR_gettimeofday
:
7952 ret
= get_errno(gettimeofday(&tv
, NULL
));
7953 if (!is_error(ret
)) {
7954 if (copy_to_user_timeval(arg1
, &tv
))
7959 case TARGET_NR_settimeofday
:
7961 struct timeval tv
, *ptv
= NULL
;
7962 struct timezone tz
, *ptz
= NULL
;
7965 if (copy_from_user_timeval(&tv
, arg1
)) {
7972 if (copy_from_user_timezone(&tz
, arg2
)) {
7978 ret
= get_errno(settimeofday(ptv
, ptz
));
7981 #if defined(TARGET_NR_select)
7982 case TARGET_NR_select
:
7983 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7984 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7987 struct target_sel_arg_struct
*sel
;
7988 abi_ulong inp
, outp
, exp
, tvp
;
7991 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
7993 nsel
= tswapal(sel
->n
);
7994 inp
= tswapal(sel
->inp
);
7995 outp
= tswapal(sel
->outp
);
7996 exp
= tswapal(sel
->exp
);
7997 tvp
= tswapal(sel
->tvp
);
7998 unlock_user_struct(sel
, arg1
, 0);
7999 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
8004 #ifdef TARGET_NR_pselect6
8005 case TARGET_NR_pselect6
:
8007 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8008 fd_set rfds
, wfds
, efds
;
8009 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8010 struct timespec ts
, *ts_ptr
;
8013 * The 6th arg is actually two args smashed together,
8014 * so we cannot use the C library.
8022 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8023 target_sigset_t
*target_sigset
;
8031 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8035 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8039 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8045 * This takes a timespec, and not a timeval, so we cannot
8046 * use the do_select() helper ...
8049 if (target_to_host_timespec(&ts
, ts_addr
)) {
8057 /* Extract the two packed args for the sigset */
8060 sig
.size
= SIGSET_T_SIZE
;
8062 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8066 arg_sigset
= tswapal(arg7
[0]);
8067 arg_sigsize
= tswapal(arg7
[1]);
8068 unlock_user(arg7
, arg6
, 0);
8072 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8073 /* Like the kernel, we enforce correct size sigsets */
8074 ret
= -TARGET_EINVAL
;
8077 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8078 sizeof(*target_sigset
), 1);
8079 if (!target_sigset
) {
8082 target_to_host_sigset(&set
, target_sigset
);
8083 unlock_user(target_sigset
, arg_sigset
, 0);
8091 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8094 if (!is_error(ret
)) {
8095 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8097 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8099 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8102 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8108 #ifdef TARGET_NR_symlink
8109 case TARGET_NR_symlink
:
8112 p
= lock_user_string(arg1
);
8113 p2
= lock_user_string(arg2
);
8115 ret
= -TARGET_EFAULT
;
8117 ret
= get_errno(symlink(p
, p2
));
8118 unlock_user(p2
, arg2
, 0);
8119 unlock_user(p
, arg1
, 0);
8123 #if defined(TARGET_NR_symlinkat)
8124 case TARGET_NR_symlinkat
:
8127 p
= lock_user_string(arg1
);
8128 p2
= lock_user_string(arg3
);
8130 ret
= -TARGET_EFAULT
;
8132 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8133 unlock_user(p2
, arg3
, 0);
8134 unlock_user(p
, arg1
, 0);
8138 #ifdef TARGET_NR_oldlstat
8139 case TARGET_NR_oldlstat
:
8142 #ifdef TARGET_NR_readlink
8143 case TARGET_NR_readlink
:
8146 p
= lock_user_string(arg1
);
8147 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8149 ret
= -TARGET_EFAULT
;
8151 /* Short circuit this for the magic exe check. */
8152 ret
= -TARGET_EINVAL
;
8153 } else if (is_proc_myself((const char *)p
, "exe")) {
8154 char real
[PATH_MAX
], *temp
;
8155 temp
= realpath(exec_path
, real
);
8156 /* Return value is # of bytes that we wrote to the buffer. */
8158 ret
= get_errno(-1);
8160 /* Don't worry about sign mismatch as earlier mapping
8161 * logic would have thrown a bad address error. */
8162 ret
= MIN(strlen(real
), arg3
);
8163 /* We cannot NUL terminate the string. */
8164 memcpy(p2
, real
, ret
);
8167 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8169 unlock_user(p2
, arg2
, ret
);
8170 unlock_user(p
, arg1
, 0);
8174 #if defined(TARGET_NR_readlinkat)
8175 case TARGET_NR_readlinkat
:
8178 p
= lock_user_string(arg2
);
8179 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8181 ret
= -TARGET_EFAULT
;
8182 } else if (is_proc_myself((const char *)p
, "exe")) {
8183 char real
[PATH_MAX
], *temp
;
8184 temp
= realpath(exec_path
, real
);
8185 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8186 snprintf((char *)p2
, arg4
, "%s", real
);
8188 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8190 unlock_user(p2
, arg3
, ret
);
8191 unlock_user(p
, arg2
, 0);
8195 #ifdef TARGET_NR_uselib
8196 case TARGET_NR_uselib
:
8199 #ifdef TARGET_NR_swapon
8200 case TARGET_NR_swapon
:
8201 if (!(p
= lock_user_string(arg1
)))
8203 ret
= get_errno(swapon(p
, arg2
));
8204 unlock_user(p
, arg1
, 0);
8207 case TARGET_NR_reboot
:
8208 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8209 /* arg4 must be ignored in all other cases */
8210 p
= lock_user_string(arg4
);
8214 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8215 unlock_user(p
, arg4
, 0);
8217 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8220 #ifdef TARGET_NR_readdir
8221 case TARGET_NR_readdir
:
8224 #ifdef TARGET_NR_mmap
8225 case TARGET_NR_mmap
:
8226 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8227 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8228 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8229 || defined(TARGET_S390X)
8232 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8233 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8241 unlock_user(v
, arg1
, 0);
8242 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8243 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8247 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8248 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8254 #ifdef TARGET_NR_mmap2
8255 case TARGET_NR_mmap2
:
8257 #define MMAP_SHIFT 12
8259 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8260 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8262 arg6
<< MMAP_SHIFT
));
8265 case TARGET_NR_munmap
:
8266 ret
= get_errno(target_munmap(arg1
, arg2
));
8268 case TARGET_NR_mprotect
:
8270 TaskState
*ts
= cpu
->opaque
;
8271 /* Special hack to detect libc making the stack executable. */
8272 if ((arg3
& PROT_GROWSDOWN
)
8273 && arg1
>= ts
->info
->stack_limit
8274 && arg1
<= ts
->info
->start_stack
) {
8275 arg3
&= ~PROT_GROWSDOWN
;
8276 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8277 arg1
= ts
->info
->stack_limit
;
8280 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
8282 #ifdef TARGET_NR_mremap
8283 case TARGET_NR_mremap
:
8284 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8287 /* ??? msync/mlock/munlock are broken for softmmu. */
8288 #ifdef TARGET_NR_msync
8289 case TARGET_NR_msync
:
8290 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
8293 #ifdef TARGET_NR_mlock
8294 case TARGET_NR_mlock
:
8295 ret
= get_errno(mlock(g2h(arg1
), arg2
));
8298 #ifdef TARGET_NR_munlock
8299 case TARGET_NR_munlock
:
8300 ret
= get_errno(munlock(g2h(arg1
), arg2
));
8303 #ifdef TARGET_NR_mlockall
8304 case TARGET_NR_mlockall
:
8305 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8308 #ifdef TARGET_NR_munlockall
8309 case TARGET_NR_munlockall
:
8310 ret
= get_errno(munlockall());
8313 case TARGET_NR_truncate
:
8314 if (!(p
= lock_user_string(arg1
)))
8316 ret
= get_errno(truncate(p
, arg2
));
8317 unlock_user(p
, arg1
, 0);
8319 case TARGET_NR_ftruncate
:
8320 ret
= get_errno(ftruncate(arg1
, arg2
));
8322 case TARGET_NR_fchmod
:
8323 ret
= get_errno(fchmod(arg1
, arg2
));
8325 #if defined(TARGET_NR_fchmodat)
8326 case TARGET_NR_fchmodat
:
8327 if (!(p
= lock_user_string(arg2
)))
8329 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8330 unlock_user(p
, arg2
, 0);
8333 case TARGET_NR_getpriority
:
8334 /* Note that negative values are valid for getpriority, so we must
8335 differentiate based on errno settings. */
8337 ret
= getpriority(arg1
, arg2
);
8338 if (ret
== -1 && errno
!= 0) {
8339 ret
= -host_to_target_errno(errno
);
8343 /* Return value is the unbiased priority. Signal no error. */
8344 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8346 /* Return value is a biased priority to avoid negative numbers. */
8350 case TARGET_NR_setpriority
:
8351 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
8353 #ifdef TARGET_NR_profil
8354 case TARGET_NR_profil
:
8357 case TARGET_NR_statfs
:
8358 if (!(p
= lock_user_string(arg1
)))
8360 ret
= get_errno(statfs(path(p
), &stfs
));
8361 unlock_user(p
, arg1
, 0);
8363 if (!is_error(ret
)) {
8364 struct target_statfs
*target_stfs
;
8366 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8368 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8369 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8370 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8371 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8372 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8373 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8374 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8375 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8376 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8377 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8378 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8379 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8380 unlock_user_struct(target_stfs
, arg2
, 1);
8383 case TARGET_NR_fstatfs
:
8384 ret
= get_errno(fstatfs(arg1
, &stfs
));
8385 goto convert_statfs
;
8386 #ifdef TARGET_NR_statfs64
8387 case TARGET_NR_statfs64
:
8388 if (!(p
= lock_user_string(arg1
)))
8390 ret
= get_errno(statfs(path(p
), &stfs
));
8391 unlock_user(p
, arg1
, 0);
8393 if (!is_error(ret
)) {
8394 struct target_statfs64
*target_stfs
;
8396 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8398 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8399 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8400 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8401 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8402 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8403 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8404 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8405 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8406 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8407 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8408 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8409 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8410 unlock_user_struct(target_stfs
, arg3
, 1);
8413 case TARGET_NR_fstatfs64
:
8414 ret
= get_errno(fstatfs(arg1
, &stfs
));
8415 goto convert_statfs64
;
8417 #ifdef TARGET_NR_ioperm
8418 case TARGET_NR_ioperm
:
8421 #ifdef TARGET_NR_socketcall
8422 case TARGET_NR_socketcall
:
8423 ret
= do_socketcall(arg1
, arg2
);
8426 #ifdef TARGET_NR_accept
8427 case TARGET_NR_accept
:
8428 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
8431 #ifdef TARGET_NR_accept4
8432 case TARGET_NR_accept4
:
8433 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
8436 #ifdef TARGET_NR_bind
8437 case TARGET_NR_bind
:
8438 ret
= do_bind(arg1
, arg2
, arg3
);
8441 #ifdef TARGET_NR_connect
8442 case TARGET_NR_connect
:
8443 ret
= do_connect(arg1
, arg2
, arg3
);
8446 #ifdef TARGET_NR_getpeername
8447 case TARGET_NR_getpeername
:
8448 ret
= do_getpeername(arg1
, arg2
, arg3
);
8451 #ifdef TARGET_NR_getsockname
8452 case TARGET_NR_getsockname
:
8453 ret
= do_getsockname(arg1
, arg2
, arg3
);
8456 #ifdef TARGET_NR_getsockopt
8457 case TARGET_NR_getsockopt
:
8458 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
8461 #ifdef TARGET_NR_listen
8462 case TARGET_NR_listen
:
8463 ret
= get_errno(listen(arg1
, arg2
));
8466 #ifdef TARGET_NR_recv
8467 case TARGET_NR_recv
:
8468 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
8471 #ifdef TARGET_NR_recvfrom
8472 case TARGET_NR_recvfrom
:
8473 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8476 #ifdef TARGET_NR_recvmsg
8477 case TARGET_NR_recvmsg
:
8478 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
8481 #ifdef TARGET_NR_send
8482 case TARGET_NR_send
:
8483 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
8486 #ifdef TARGET_NR_sendmsg
8487 case TARGET_NR_sendmsg
:
8488 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
8491 #ifdef TARGET_NR_sendmmsg
8492 case TARGET_NR_sendmmsg
:
8493 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
8495 case TARGET_NR_recvmmsg
:
8496 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
8499 #ifdef TARGET_NR_sendto
8500 case TARGET_NR_sendto
:
8501 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8504 #ifdef TARGET_NR_shutdown
8505 case TARGET_NR_shutdown
:
8506 ret
= get_errno(shutdown(arg1
, arg2
));
8509 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8510 case TARGET_NR_getrandom
:
8511 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8515 ret
= get_errno(getrandom(p
, arg2
, arg3
));
8516 unlock_user(p
, arg1
, ret
);
8519 #ifdef TARGET_NR_socket
8520 case TARGET_NR_socket
:
8521 ret
= do_socket(arg1
, arg2
, arg3
);
8522 fd_trans_unregister(ret
);
8525 #ifdef TARGET_NR_socketpair
8526 case TARGET_NR_socketpair
:
8527 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
8530 #ifdef TARGET_NR_setsockopt
8531 case TARGET_NR_setsockopt
:
8532 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
8536 case TARGET_NR_syslog
:
8537 if (!(p
= lock_user_string(arg2
)))
8539 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
8540 unlock_user(p
, arg2
, 0);
8543 case TARGET_NR_setitimer
:
8545 struct itimerval value
, ovalue
, *pvalue
;
8549 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
8550 || copy_from_user_timeval(&pvalue
->it_value
,
8551 arg2
+ sizeof(struct target_timeval
)))
8556 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
8557 if (!is_error(ret
) && arg3
) {
8558 if (copy_to_user_timeval(arg3
,
8559 &ovalue
.it_interval
)
8560 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
8566 case TARGET_NR_getitimer
:
8568 struct itimerval value
;
8570 ret
= get_errno(getitimer(arg1
, &value
));
8571 if (!is_error(ret
) && arg2
) {
8572 if (copy_to_user_timeval(arg2
,
8574 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
8580 #ifdef TARGET_NR_stat
8581 case TARGET_NR_stat
:
8582 if (!(p
= lock_user_string(arg1
)))
8584 ret
= get_errno(stat(path(p
), &st
));
8585 unlock_user(p
, arg1
, 0);
8588 #ifdef TARGET_NR_lstat
8589 case TARGET_NR_lstat
:
8590 if (!(p
= lock_user_string(arg1
)))
8592 ret
= get_errno(lstat(path(p
), &st
));
8593 unlock_user(p
, arg1
, 0);
8596 case TARGET_NR_fstat
:
8598 ret
= get_errno(fstat(arg1
, &st
));
8599 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8602 if (!is_error(ret
)) {
8603 struct target_stat
*target_st
;
8605 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
8607 memset(target_st
, 0, sizeof(*target_st
));
8608 __put_user(st
.st_dev
, &target_st
->st_dev
);
8609 __put_user(st
.st_ino
, &target_st
->st_ino
);
8610 __put_user(st
.st_mode
, &target_st
->st_mode
);
8611 __put_user(st
.st_uid
, &target_st
->st_uid
);
8612 __put_user(st
.st_gid
, &target_st
->st_gid
);
8613 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
8614 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
8615 __put_user(st
.st_size
, &target_st
->st_size
);
8616 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
8617 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
8618 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
8619 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
8620 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
8621 unlock_user_struct(target_st
, arg2
, 1);
8625 #ifdef TARGET_NR_olduname
8626 case TARGET_NR_olduname
:
8629 #ifdef TARGET_NR_iopl
8630 case TARGET_NR_iopl
:
8633 case TARGET_NR_vhangup
:
8634 ret
= get_errno(vhangup());
8636 #ifdef TARGET_NR_idle
8637 case TARGET_NR_idle
:
8640 #ifdef TARGET_NR_syscall
8641 case TARGET_NR_syscall
:
8642 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
8643 arg6
, arg7
, arg8
, 0);
8646 case TARGET_NR_wait4
:
8649 abi_long status_ptr
= arg2
;
8650 struct rusage rusage
, *rusage_ptr
;
8651 abi_ulong target_rusage
= arg4
;
8652 abi_long rusage_err
;
8654 rusage_ptr
= &rusage
;
8657 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
8658 if (!is_error(ret
)) {
8659 if (status_ptr
&& ret
) {
8660 status
= host_to_target_waitstatus(status
);
8661 if (put_user_s32(status
, status_ptr
))
8664 if (target_rusage
) {
8665 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
8673 #ifdef TARGET_NR_swapoff
8674 case TARGET_NR_swapoff
:
8675 if (!(p
= lock_user_string(arg1
)))
8677 ret
= get_errno(swapoff(p
));
8678 unlock_user(p
, arg1
, 0);
8681 case TARGET_NR_sysinfo
:
8683 struct target_sysinfo
*target_value
;
8684 struct sysinfo value
;
8685 ret
= get_errno(sysinfo(&value
));
8686 if (!is_error(ret
) && arg1
)
8688 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
8690 __put_user(value
.uptime
, &target_value
->uptime
);
8691 __put_user(value
.loads
[0], &target_value
->loads
[0]);
8692 __put_user(value
.loads
[1], &target_value
->loads
[1]);
8693 __put_user(value
.loads
[2], &target_value
->loads
[2]);
8694 __put_user(value
.totalram
, &target_value
->totalram
);
8695 __put_user(value
.freeram
, &target_value
->freeram
);
8696 __put_user(value
.sharedram
, &target_value
->sharedram
);
8697 __put_user(value
.bufferram
, &target_value
->bufferram
);
8698 __put_user(value
.totalswap
, &target_value
->totalswap
);
8699 __put_user(value
.freeswap
, &target_value
->freeswap
);
8700 __put_user(value
.procs
, &target_value
->procs
);
8701 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
8702 __put_user(value
.freehigh
, &target_value
->freehigh
);
8703 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
8704 unlock_user_struct(target_value
, arg1
, 1);
8708 #ifdef TARGET_NR_ipc
8710 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8713 #ifdef TARGET_NR_semget
8714 case TARGET_NR_semget
:
8715 ret
= get_errno(semget(arg1
, arg2
, arg3
));
8718 #ifdef TARGET_NR_semop
8719 case TARGET_NR_semop
:
8720 ret
= do_semop(arg1
, arg2
, arg3
);
8723 #ifdef TARGET_NR_semctl
8724 case TARGET_NR_semctl
:
8725 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
8728 #ifdef TARGET_NR_msgctl
8729 case TARGET_NR_msgctl
:
8730 ret
= do_msgctl(arg1
, arg2
, arg3
);
8733 #ifdef TARGET_NR_msgget
8734 case TARGET_NR_msgget
:
8735 ret
= get_errno(msgget(arg1
, arg2
));
8738 #ifdef TARGET_NR_msgrcv
8739 case TARGET_NR_msgrcv
:
8740 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
8743 #ifdef TARGET_NR_msgsnd
8744 case TARGET_NR_msgsnd
:
8745 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
8748 #ifdef TARGET_NR_shmget
8749 case TARGET_NR_shmget
:
8750 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
8753 #ifdef TARGET_NR_shmctl
8754 case TARGET_NR_shmctl
:
8755 ret
= do_shmctl(arg1
, arg2
, arg3
);
8758 #ifdef TARGET_NR_shmat
8759 case TARGET_NR_shmat
:
8760 ret
= do_shmat(arg1
, arg2
, arg3
);
8763 #ifdef TARGET_NR_shmdt
8764 case TARGET_NR_shmdt
:
8765 ret
= do_shmdt(arg1
);
8768 case TARGET_NR_fsync
:
8769 ret
= get_errno(fsync(arg1
));
8771 case TARGET_NR_clone
:
8772 /* Linux manages to have three different orderings for its
8773 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8774 * match the kernel's CONFIG_CLONE_* settings.
8775 * Microblaze is further special in that it uses a sixth
8776 * implicit argument to clone for the TLS pointer.
8778 #if defined(TARGET_MICROBLAZE)
8779 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
8780 #elif defined(TARGET_CLONE_BACKWARDS)
8781 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
8782 #elif defined(TARGET_CLONE_BACKWARDS2)
8783 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
8785 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
8788 #ifdef __NR_exit_group
8789 /* new thread calls */
8790 case TARGET_NR_exit_group
:
8794 gdb_exit(cpu_env
, arg1
);
8795 ret
= get_errno(exit_group(arg1
));
8798 case TARGET_NR_setdomainname
:
8799 if (!(p
= lock_user_string(arg1
)))
8801 ret
= get_errno(setdomainname(p
, arg2
));
8802 unlock_user(p
, arg1
, 0);
8804 case TARGET_NR_uname
:
8805 /* no need to transcode because we use the linux syscall */
8807 struct new_utsname
* buf
;
8809 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
8811 ret
= get_errno(sys_uname(buf
));
8812 if (!is_error(ret
)) {
8813 /* Overrite the native machine name with whatever is being
8815 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
8816 /* Allow the user to override the reported release. */
8817 if (qemu_uname_release
&& *qemu_uname_release
)
8818 strcpy (buf
->release
, qemu_uname_release
);
8820 unlock_user_struct(buf
, arg1
, 1);
8824 case TARGET_NR_modify_ldt
:
8825 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
8827 #if !defined(TARGET_X86_64)
8828 case TARGET_NR_vm86old
:
8830 case TARGET_NR_vm86
:
8831 ret
= do_vm86(cpu_env
, arg1
, arg2
);
8835 case TARGET_NR_adjtimex
:
8837 #ifdef TARGET_NR_create_module
8838 case TARGET_NR_create_module
:
8840 case TARGET_NR_init_module
:
8841 case TARGET_NR_delete_module
:
8842 #ifdef TARGET_NR_get_kernel_syms
8843 case TARGET_NR_get_kernel_syms
:
8846 case TARGET_NR_quotactl
:
8848 case TARGET_NR_getpgid
:
8849 ret
= get_errno(getpgid(arg1
));
8851 case TARGET_NR_fchdir
:
8852 ret
= get_errno(fchdir(arg1
));
8854 #ifdef TARGET_NR_bdflush /* not on x86_64 */
8855 case TARGET_NR_bdflush
:
8858 #ifdef TARGET_NR_sysfs
8859 case TARGET_NR_sysfs
:
8862 case TARGET_NR_personality
:
8863 ret
= get_errno(personality(arg1
));
8865 #ifdef TARGET_NR_afs_syscall
8866 case TARGET_NR_afs_syscall
:
8869 #ifdef TARGET_NR__llseek /* Not on alpha */
8870 case TARGET_NR__llseek
:
8873 #if !defined(__NR_llseek)
8874 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
8876 ret
= get_errno(res
);
8881 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
8883 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
8889 #ifdef TARGET_NR_getdents
8890 case TARGET_NR_getdents
:
8891 #ifdef __NR_getdents
8892 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8894 struct target_dirent
*target_dirp
;
8895 struct linux_dirent
*dirp
;
8896 abi_long count
= arg3
;
8898 dirp
= g_try_malloc(count
);
8900 ret
= -TARGET_ENOMEM
;
8904 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8905 if (!is_error(ret
)) {
8906 struct linux_dirent
*de
;
8907 struct target_dirent
*tde
;
8909 int reclen
, treclen
;
8910 int count1
, tnamelen
;
8914 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8918 reclen
= de
->d_reclen
;
8919 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
8920 assert(tnamelen
>= 0);
8921 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
8922 assert(count1
+ treclen
<= count
);
8923 tde
->d_reclen
= tswap16(treclen
);
8924 tde
->d_ino
= tswapal(de
->d_ino
);
8925 tde
->d_off
= tswapal(de
->d_off
);
8926 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
8927 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8929 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8933 unlock_user(target_dirp
, arg2
, ret
);
8939 struct linux_dirent
*dirp
;
8940 abi_long count
= arg3
;
8942 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8944 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8945 if (!is_error(ret
)) {
8946 struct linux_dirent
*de
;
8951 reclen
= de
->d_reclen
;
8954 de
->d_reclen
= tswap16(reclen
);
8955 tswapls(&de
->d_ino
);
8956 tswapls(&de
->d_off
);
8957 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8961 unlock_user(dirp
, arg2
, ret
);
8965 /* Implement getdents in terms of getdents64 */
8967 struct linux_dirent64
*dirp
;
8968 abi_long count
= arg3
;
8970 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8974 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8975 if (!is_error(ret
)) {
8976 /* Convert the dirent64 structs to target dirent. We do this
8977 * in-place, since we can guarantee that a target_dirent is no
8978 * larger than a dirent64; however this means we have to be
8979 * careful to read everything before writing in the new format.
8981 struct linux_dirent64
*de
;
8982 struct target_dirent
*tde
;
8987 tde
= (struct target_dirent
*)dirp
;
8989 int namelen
, treclen
;
8990 int reclen
= de
->d_reclen
;
8991 uint64_t ino
= de
->d_ino
;
8992 int64_t off
= de
->d_off
;
8993 uint8_t type
= de
->d_type
;
8995 namelen
= strlen(de
->d_name
);
8996 treclen
= offsetof(struct target_dirent
, d_name
)
8998 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9000 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9001 tde
->d_ino
= tswapal(ino
);
9002 tde
->d_off
= tswapal(off
);
9003 tde
->d_reclen
= tswap16(treclen
);
9004 /* The target_dirent type is in what was formerly a padding
9005 * byte at the end of the structure:
9007 *(((char *)tde
) + treclen
- 1) = type
;
9009 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9010 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9016 unlock_user(dirp
, arg2
, ret
);
9020 #endif /* TARGET_NR_getdents */
9021 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9022 case TARGET_NR_getdents64
:
9024 struct linux_dirent64
*dirp
;
9025 abi_long count
= arg3
;
9026 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9028 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9029 if (!is_error(ret
)) {
9030 struct linux_dirent64
*de
;
9035 reclen
= de
->d_reclen
;
9038 de
->d_reclen
= tswap16(reclen
);
9039 tswap64s((uint64_t *)&de
->d_ino
);
9040 tswap64s((uint64_t *)&de
->d_off
);
9041 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9045 unlock_user(dirp
, arg2
, ret
);
9048 #endif /* TARGET_NR_getdents64 */
9049 #if defined(TARGET_NR__newselect)
9050 case TARGET_NR__newselect
:
9051 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9054 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9055 # ifdef TARGET_NR_poll
9056 case TARGET_NR_poll
:
9058 # ifdef TARGET_NR_ppoll
9059 case TARGET_NR_ppoll
:
9062 struct target_pollfd
*target_pfd
;
9063 unsigned int nfds
= arg2
;
9070 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9071 sizeof(struct target_pollfd
) * nfds
, 1);
9076 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9077 for (i
= 0; i
< nfds
; i
++) {
9078 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9079 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9084 # ifdef TARGET_NR_ppoll
9085 case TARGET_NR_ppoll
:
9087 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9088 target_sigset_t
*target_set
;
9089 sigset_t _set
, *set
= &_set
;
9092 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9093 unlock_user(target_pfd
, arg1
, 0);
9101 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9103 unlock_user(target_pfd
, arg1
, 0);
9106 target_to_host_sigset(set
, target_set
);
9111 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9112 set
, SIGSET_T_SIZE
));
9114 if (!is_error(ret
) && arg3
) {
9115 host_to_target_timespec(arg3
, timeout_ts
);
9118 unlock_user(target_set
, arg4
, 0);
9123 # ifdef TARGET_NR_poll
9124 case TARGET_NR_poll
:
9126 struct timespec ts
, *pts
;
9129 /* Convert ms to secs, ns */
9130 ts
.tv_sec
= arg3
/ 1000;
9131 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9134 /* -ve poll() timeout means "infinite" */
9137 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9142 g_assert_not_reached();
9145 if (!is_error(ret
)) {
9146 for(i
= 0; i
< nfds
; i
++) {
9147 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9150 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9154 case TARGET_NR_flock
:
9155 /* NOTE: the flock constant seems to be the same for every
9157 ret
= get_errno(safe_flock(arg1
, arg2
));
9159 case TARGET_NR_readv
:
9161 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9163 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9164 unlock_iovec(vec
, arg2
, arg3
, 1);
9166 ret
= -host_to_target_errno(errno
);
9170 case TARGET_NR_writev
:
9172 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9174 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9175 unlock_iovec(vec
, arg2
, arg3
, 0);
9177 ret
= -host_to_target_errno(errno
);
9181 case TARGET_NR_getsid
:
9182 ret
= get_errno(getsid(arg1
));
9184 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9185 case TARGET_NR_fdatasync
:
9186 ret
= get_errno(fdatasync(arg1
));
9189 #ifdef TARGET_NR__sysctl
9190 case TARGET_NR__sysctl
:
9191 /* We don't implement this, but ENOTDIR is always a safe
9193 ret
= -TARGET_ENOTDIR
;
9196 case TARGET_NR_sched_getaffinity
:
9198 unsigned int mask_size
;
9199 unsigned long *mask
;
9202 * sched_getaffinity needs multiples of ulong, so need to take
9203 * care of mismatches between target ulong and host ulong sizes.
9205 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9206 ret
= -TARGET_EINVAL
;
9209 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9211 mask
= alloca(mask_size
);
9212 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9214 if (!is_error(ret
)) {
9216 /* More data returned than the caller's buffer will fit.
9217 * This only happens if sizeof(abi_long) < sizeof(long)
9218 * and the caller passed us a buffer holding an odd number
9219 * of abi_longs. If the host kernel is actually using the
9220 * extra 4 bytes then fail EINVAL; otherwise we can just
9221 * ignore them and only copy the interesting part.
9223 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9224 if (numcpus
> arg2
* 8) {
9225 ret
= -TARGET_EINVAL
;
9231 if (copy_to_user(arg3
, mask
, ret
)) {
9237 case TARGET_NR_sched_setaffinity
:
9239 unsigned int mask_size
;
9240 unsigned long *mask
;
9243 * sched_setaffinity needs multiples of ulong, so need to take
9244 * care of mismatches between target ulong and host ulong sizes.
9246 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9247 ret
= -TARGET_EINVAL
;
9250 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9252 mask
= alloca(mask_size
);
9253 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
9256 memcpy(mask
, p
, arg2
);
9257 unlock_user_struct(p
, arg2
, 0);
9259 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9262 case TARGET_NR_sched_setparam
:
9264 struct sched_param
*target_schp
;
9265 struct sched_param schp
;
9268 return -TARGET_EINVAL
;
9270 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9272 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9273 unlock_user_struct(target_schp
, arg2
, 0);
9274 ret
= get_errno(sched_setparam(arg1
, &schp
));
9277 case TARGET_NR_sched_getparam
:
9279 struct sched_param
*target_schp
;
9280 struct sched_param schp
;
9283 return -TARGET_EINVAL
;
9285 ret
= get_errno(sched_getparam(arg1
, &schp
));
9286 if (!is_error(ret
)) {
9287 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9289 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9290 unlock_user_struct(target_schp
, arg2
, 1);
9294 case TARGET_NR_sched_setscheduler
:
9296 struct sched_param
*target_schp
;
9297 struct sched_param schp
;
9299 return -TARGET_EINVAL
;
9301 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9303 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9304 unlock_user_struct(target_schp
, arg3
, 0);
9305 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9308 case TARGET_NR_sched_getscheduler
:
9309 ret
= get_errno(sched_getscheduler(arg1
));
9311 case TARGET_NR_sched_yield
:
9312 ret
= get_errno(sched_yield());
9314 case TARGET_NR_sched_get_priority_max
:
9315 ret
= get_errno(sched_get_priority_max(arg1
));
9317 case TARGET_NR_sched_get_priority_min
:
9318 ret
= get_errno(sched_get_priority_min(arg1
));
9320 case TARGET_NR_sched_rr_get_interval
:
9323 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9324 if (!is_error(ret
)) {
9325 ret
= host_to_target_timespec(arg2
, &ts
);
9329 case TARGET_NR_nanosleep
:
9331 struct timespec req
, rem
;
9332 target_to_host_timespec(&req
, arg1
);
9333 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9334 if (is_error(ret
) && arg2
) {
9335 host_to_target_timespec(arg2
, &rem
);
9339 #ifdef TARGET_NR_query_module
9340 case TARGET_NR_query_module
:
9343 #ifdef TARGET_NR_nfsservctl
9344 case TARGET_NR_nfsservctl
:
9347 case TARGET_NR_prctl
:
9349 case PR_GET_PDEATHSIG
:
9352 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9353 if (!is_error(ret
) && arg2
9354 && put_user_ual(deathsig
, arg2
)) {
9362 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9366 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9368 unlock_user(name
, arg2
, 16);
9373 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9377 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9379 unlock_user(name
, arg2
, 0);
9384 /* Most prctl options have no pointer arguments */
9385 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
9389 #ifdef TARGET_NR_arch_prctl
9390 case TARGET_NR_arch_prctl
:
9391 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9392 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
9398 #ifdef TARGET_NR_pread64
9399 case TARGET_NR_pread64
:
9400 if (regpairs_aligned(cpu_env
)) {
9404 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
9406 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9407 unlock_user(p
, arg2
, ret
);
9409 case TARGET_NR_pwrite64
:
9410 if (regpairs_aligned(cpu_env
)) {
9414 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
9416 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9417 unlock_user(p
, arg2
, 0);
9420 case TARGET_NR_getcwd
:
9421 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
9423 ret
= get_errno(sys_getcwd1(p
, arg2
));
9424 unlock_user(p
, arg1
, ret
);
9426 case TARGET_NR_capget
:
9427 case TARGET_NR_capset
:
9429 struct target_user_cap_header
*target_header
;
9430 struct target_user_cap_data
*target_data
= NULL
;
9431 struct __user_cap_header_struct header
;
9432 struct __user_cap_data_struct data
[2];
9433 struct __user_cap_data_struct
*dataptr
= NULL
;
9434 int i
, target_datalen
;
9437 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
9440 header
.version
= tswap32(target_header
->version
);
9441 header
.pid
= tswap32(target_header
->pid
);
9443 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
9444 /* Version 2 and up takes pointer to two user_data structs */
9448 target_datalen
= sizeof(*target_data
) * data_items
;
9451 if (num
== TARGET_NR_capget
) {
9452 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
9454 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
9457 unlock_user_struct(target_header
, arg1
, 0);
9461 if (num
== TARGET_NR_capset
) {
9462 for (i
= 0; i
< data_items
; i
++) {
9463 data
[i
].effective
= tswap32(target_data
[i
].effective
);
9464 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
9465 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
9472 if (num
== TARGET_NR_capget
) {
9473 ret
= get_errno(capget(&header
, dataptr
));
9475 ret
= get_errno(capset(&header
, dataptr
));
9478 /* The kernel always updates version for both capget and capset */
9479 target_header
->version
= tswap32(header
.version
);
9480 unlock_user_struct(target_header
, arg1
, 1);
9483 if (num
== TARGET_NR_capget
) {
9484 for (i
= 0; i
< data_items
; i
++) {
9485 target_data
[i
].effective
= tswap32(data
[i
].effective
);
9486 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
9487 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
9489 unlock_user(target_data
, arg2
, target_datalen
);
9491 unlock_user(target_data
, arg2
, 0);
9496 case TARGET_NR_sigaltstack
:
9497 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
9500 #ifdef CONFIG_SENDFILE
9501 case TARGET_NR_sendfile
:
9506 ret
= get_user_sal(off
, arg3
);
9507 if (is_error(ret
)) {
9512 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9513 if (!is_error(ret
) && arg3
) {
9514 abi_long ret2
= put_user_sal(off
, arg3
);
9515 if (is_error(ret2
)) {
9521 #ifdef TARGET_NR_sendfile64
9522 case TARGET_NR_sendfile64
:
9527 ret
= get_user_s64(off
, arg3
);
9528 if (is_error(ret
)) {
9533 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
9534 if (!is_error(ret
) && arg3
) {
9535 abi_long ret2
= put_user_s64(off
, arg3
);
9536 if (is_error(ret2
)) {
9544 case TARGET_NR_sendfile
:
9545 #ifdef TARGET_NR_sendfile64
9546 case TARGET_NR_sendfile64
:
9551 #ifdef TARGET_NR_getpmsg
9552 case TARGET_NR_getpmsg
:
9555 #ifdef TARGET_NR_putpmsg
9556 case TARGET_NR_putpmsg
:
9559 #ifdef TARGET_NR_vfork
9560 case TARGET_NR_vfork
:
9561 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
9565 #ifdef TARGET_NR_ugetrlimit
9566 case TARGET_NR_ugetrlimit
:
9569 int resource
= target_to_host_resource(arg1
);
9570 ret
= get_errno(getrlimit(resource
, &rlim
));
9571 if (!is_error(ret
)) {
9572 struct target_rlimit
*target_rlim
;
9573 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9575 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9576 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9577 unlock_user_struct(target_rlim
, arg2
, 1);
9582 #ifdef TARGET_NR_truncate64
9583 case TARGET_NR_truncate64
:
9584 if (!(p
= lock_user_string(arg1
)))
9586 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
9587 unlock_user(p
, arg1
, 0);
9590 #ifdef TARGET_NR_ftruncate64
9591 case TARGET_NR_ftruncate64
:
9592 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
9595 #ifdef TARGET_NR_stat64
9596 case TARGET_NR_stat64
:
9597 if (!(p
= lock_user_string(arg1
)))
9599 ret
= get_errno(stat(path(p
), &st
));
9600 unlock_user(p
, arg1
, 0);
9602 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9605 #ifdef TARGET_NR_lstat64
9606 case TARGET_NR_lstat64
:
9607 if (!(p
= lock_user_string(arg1
)))
9609 ret
= get_errno(lstat(path(p
), &st
));
9610 unlock_user(p
, arg1
, 0);
9612 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9615 #ifdef TARGET_NR_fstat64
9616 case TARGET_NR_fstat64
:
9617 ret
= get_errno(fstat(arg1
, &st
));
9619 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
9622 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9623 #ifdef TARGET_NR_fstatat64
9624 case TARGET_NR_fstatat64
:
9626 #ifdef TARGET_NR_newfstatat
9627 case TARGET_NR_newfstatat
:
9629 if (!(p
= lock_user_string(arg2
)))
9631 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
9633 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
9636 #ifdef TARGET_NR_lchown
9637 case TARGET_NR_lchown
:
9638 if (!(p
= lock_user_string(arg1
)))
9640 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
9641 unlock_user(p
, arg1
, 0);
9644 #ifdef TARGET_NR_getuid
9645 case TARGET_NR_getuid
:
9646 ret
= get_errno(high2lowuid(getuid()));
9649 #ifdef TARGET_NR_getgid
9650 case TARGET_NR_getgid
:
9651 ret
= get_errno(high2lowgid(getgid()));
9654 #ifdef TARGET_NR_geteuid
9655 case TARGET_NR_geteuid
:
9656 ret
= get_errno(high2lowuid(geteuid()));
9659 #ifdef TARGET_NR_getegid
9660 case TARGET_NR_getegid
:
9661 ret
= get_errno(high2lowgid(getegid()));
9664 case TARGET_NR_setreuid
:
9665 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
9667 case TARGET_NR_setregid
:
9668 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
9670 case TARGET_NR_getgroups
:
9672 int gidsetsize
= arg1
;
9673 target_id
*target_grouplist
;
9677 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9678 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9679 if (gidsetsize
== 0)
9681 if (!is_error(ret
)) {
9682 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
9683 if (!target_grouplist
)
9685 for(i
= 0;i
< ret
; i
++)
9686 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
9687 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
9691 case TARGET_NR_setgroups
:
9693 int gidsetsize
= arg1
;
9694 target_id
*target_grouplist
;
9695 gid_t
*grouplist
= NULL
;
9698 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9699 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
9700 if (!target_grouplist
) {
9701 ret
= -TARGET_EFAULT
;
9704 for (i
= 0; i
< gidsetsize
; i
++) {
9705 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
9707 unlock_user(target_grouplist
, arg2
, 0);
9709 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
9712 case TARGET_NR_fchown
:
9713 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
9715 #if defined(TARGET_NR_fchownat)
9716 case TARGET_NR_fchownat
:
9717 if (!(p
= lock_user_string(arg2
)))
9719 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
9720 low2highgid(arg4
), arg5
));
9721 unlock_user(p
, arg2
, 0);
9724 #ifdef TARGET_NR_setresuid
9725 case TARGET_NR_setresuid
:
9726 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
9728 low2highuid(arg3
)));
9731 #ifdef TARGET_NR_getresuid
9732 case TARGET_NR_getresuid
:
9734 uid_t ruid
, euid
, suid
;
9735 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9736 if (!is_error(ret
)) {
9737 if (put_user_id(high2lowuid(ruid
), arg1
)
9738 || put_user_id(high2lowuid(euid
), arg2
)
9739 || put_user_id(high2lowuid(suid
), arg3
))
9745 #ifdef TARGET_NR_getresgid
9746 case TARGET_NR_setresgid
:
9747 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
9749 low2highgid(arg3
)));
9752 #ifdef TARGET_NR_getresgid
9753 case TARGET_NR_getresgid
:
9755 gid_t rgid
, egid
, sgid
;
9756 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9757 if (!is_error(ret
)) {
9758 if (put_user_id(high2lowgid(rgid
), arg1
)
9759 || put_user_id(high2lowgid(egid
), arg2
)
9760 || put_user_id(high2lowgid(sgid
), arg3
))
9766 #ifdef TARGET_NR_chown
9767 case TARGET_NR_chown
:
9768 if (!(p
= lock_user_string(arg1
)))
9770 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
9771 unlock_user(p
, arg1
, 0);
9774 case TARGET_NR_setuid
:
9775 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
9777 case TARGET_NR_setgid
:
9778 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
9780 case TARGET_NR_setfsuid
:
9781 ret
= get_errno(setfsuid(arg1
));
9783 case TARGET_NR_setfsgid
:
9784 ret
= get_errno(setfsgid(arg1
));
9787 #ifdef TARGET_NR_lchown32
9788 case TARGET_NR_lchown32
:
9789 if (!(p
= lock_user_string(arg1
)))
9791 ret
= get_errno(lchown(p
, arg2
, arg3
));
9792 unlock_user(p
, arg1
, 0);
9795 #ifdef TARGET_NR_getuid32
9796 case TARGET_NR_getuid32
:
9797 ret
= get_errno(getuid());
9801 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9802 /* Alpha specific */
9803 case TARGET_NR_getxuid
:
9807 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
9809 ret
= get_errno(getuid());
9812 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9813 /* Alpha specific */
9814 case TARGET_NR_getxgid
:
9818 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
9820 ret
= get_errno(getgid());
9823 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9824 /* Alpha specific */
9825 case TARGET_NR_osf_getsysinfo
:
9826 ret
= -TARGET_EOPNOTSUPP
;
9828 case TARGET_GSI_IEEE_FP_CONTROL
:
9830 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
9832 /* Copied from linux ieee_fpcr_to_swcr. */
9833 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
9834 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
9835 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
9836 | SWCR_TRAP_ENABLE_DZE
9837 | SWCR_TRAP_ENABLE_OVF
);
9838 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
9839 | SWCR_TRAP_ENABLE_INE
);
9840 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
9841 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
9843 if (put_user_u64 (swcr
, arg2
))
9849 /* case GSI_IEEE_STATE_AT_SIGNAL:
9850 -- Not implemented in linux kernel.
9852 -- Retrieves current unaligned access state; not much used.
9854 -- Retrieves implver information; surely not used.
9856 -- Grabs a copy of the HWRPB; surely not used.
9861 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9862 /* Alpha specific */
9863 case TARGET_NR_osf_setsysinfo
:
9864 ret
= -TARGET_EOPNOTSUPP
;
9866 case TARGET_SSI_IEEE_FP_CONTROL
:
9868 uint64_t swcr
, fpcr
, orig_fpcr
;
9870 if (get_user_u64 (swcr
, arg2
)) {
9873 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
9874 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
9876 /* Copied from linux ieee_swcr_to_fpcr. */
9877 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
9878 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
9879 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
9880 | SWCR_TRAP_ENABLE_DZE
9881 | SWCR_TRAP_ENABLE_OVF
)) << 48;
9882 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
9883 | SWCR_TRAP_ENABLE_INE
)) << 57;
9884 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
9885 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
9887 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
9892 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
9894 uint64_t exc
, fpcr
, orig_fpcr
;
9897 if (get_user_u64(exc
, arg2
)) {
9901 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
9903 /* We only add to the exception status here. */
9904 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
9906 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
9909 /* Old exceptions are not signaled. */
9910 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
9912 /* If any exceptions set by this call,
9913 and are unmasked, send a signal. */
9915 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
9916 si_code
= TARGET_FPE_FLTRES
;
9918 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
9919 si_code
= TARGET_FPE_FLTUND
;
9921 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
9922 si_code
= TARGET_FPE_FLTOVF
;
9924 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
9925 si_code
= TARGET_FPE_FLTDIV
;
9927 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
9928 si_code
= TARGET_FPE_FLTINV
;
9931 target_siginfo_t info
;
9932 info
.si_signo
= SIGFPE
;
9934 info
.si_code
= si_code
;
9935 info
._sifields
._sigfault
._addr
9936 = ((CPUArchState
*)cpu_env
)->pc
;
9937 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
9942 /* case SSI_NVPAIRS:
9943 -- Used with SSIN_UACPROC to enable unaligned accesses.
9944 case SSI_IEEE_STATE_AT_SIGNAL:
9945 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9946 -- Not implemented in linux kernel
9951 #ifdef TARGET_NR_osf_sigprocmask
9952 /* Alpha specific. */
9953 case TARGET_NR_osf_sigprocmask
:
9957 sigset_t set
, oldset
;
9960 case TARGET_SIG_BLOCK
:
9963 case TARGET_SIG_UNBLOCK
:
9966 case TARGET_SIG_SETMASK
:
9970 ret
= -TARGET_EINVAL
;
9974 target_to_host_old_sigset(&set
, &mask
);
9975 ret
= do_sigprocmask(how
, &set
, &oldset
);
9977 host_to_target_old_sigset(&mask
, &oldset
);
9984 #ifdef TARGET_NR_getgid32
9985 case TARGET_NR_getgid32
:
9986 ret
= get_errno(getgid());
9989 #ifdef TARGET_NR_geteuid32
9990 case TARGET_NR_geteuid32
:
9991 ret
= get_errno(geteuid());
9994 #ifdef TARGET_NR_getegid32
9995 case TARGET_NR_getegid32
:
9996 ret
= get_errno(getegid());
9999 #ifdef TARGET_NR_setreuid32
10000 case TARGET_NR_setreuid32
:
10001 ret
= get_errno(setreuid(arg1
, arg2
));
10004 #ifdef TARGET_NR_setregid32
10005 case TARGET_NR_setregid32
:
10006 ret
= get_errno(setregid(arg1
, arg2
));
10009 #ifdef TARGET_NR_getgroups32
10010 case TARGET_NR_getgroups32
:
10012 int gidsetsize
= arg1
;
10013 uint32_t *target_grouplist
;
10017 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10018 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10019 if (gidsetsize
== 0)
10021 if (!is_error(ret
)) {
10022 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10023 if (!target_grouplist
) {
10024 ret
= -TARGET_EFAULT
;
10027 for(i
= 0;i
< ret
; i
++)
10028 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10029 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10034 #ifdef TARGET_NR_setgroups32
10035 case TARGET_NR_setgroups32
:
10037 int gidsetsize
= arg1
;
10038 uint32_t *target_grouplist
;
10042 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10043 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10044 if (!target_grouplist
) {
10045 ret
= -TARGET_EFAULT
;
10048 for(i
= 0;i
< gidsetsize
; i
++)
10049 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10050 unlock_user(target_grouplist
, arg2
, 0);
10051 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10055 #ifdef TARGET_NR_fchown32
10056 case TARGET_NR_fchown32
:
10057 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
10060 #ifdef TARGET_NR_setresuid32
10061 case TARGET_NR_setresuid32
:
10062 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10065 #ifdef TARGET_NR_getresuid32
10066 case TARGET_NR_getresuid32
:
10068 uid_t ruid
, euid
, suid
;
10069 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10070 if (!is_error(ret
)) {
10071 if (put_user_u32(ruid
, arg1
)
10072 || put_user_u32(euid
, arg2
)
10073 || put_user_u32(suid
, arg3
))
10079 #ifdef TARGET_NR_setresgid32
10080 case TARGET_NR_setresgid32
:
10081 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10084 #ifdef TARGET_NR_getresgid32
10085 case TARGET_NR_getresgid32
:
10087 gid_t rgid
, egid
, sgid
;
10088 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10089 if (!is_error(ret
)) {
10090 if (put_user_u32(rgid
, arg1
)
10091 || put_user_u32(egid
, arg2
)
10092 || put_user_u32(sgid
, arg3
))
10098 #ifdef TARGET_NR_chown32
10099 case TARGET_NR_chown32
:
10100 if (!(p
= lock_user_string(arg1
)))
10102 ret
= get_errno(chown(p
, arg2
, arg3
));
10103 unlock_user(p
, arg1
, 0);
10106 #ifdef TARGET_NR_setuid32
10107 case TARGET_NR_setuid32
:
10108 ret
= get_errno(sys_setuid(arg1
));
10111 #ifdef TARGET_NR_setgid32
10112 case TARGET_NR_setgid32
:
10113 ret
= get_errno(sys_setgid(arg1
));
10116 #ifdef TARGET_NR_setfsuid32
10117 case TARGET_NR_setfsuid32
:
10118 ret
= get_errno(setfsuid(arg1
));
10121 #ifdef TARGET_NR_setfsgid32
10122 case TARGET_NR_setfsgid32
:
10123 ret
= get_errno(setfsgid(arg1
));
10127 case TARGET_NR_pivot_root
:
10128 goto unimplemented
;
10129 #ifdef TARGET_NR_mincore
10130 case TARGET_NR_mincore
:
10133 ret
= -TARGET_EFAULT
;
10134 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
10136 if (!(p
= lock_user_string(arg3
)))
10138 ret
= get_errno(mincore(a
, arg2
, p
));
10139 unlock_user(p
, arg3
, ret
);
10141 unlock_user(a
, arg1
, 0);
10145 #ifdef TARGET_NR_arm_fadvise64_64
10146 case TARGET_NR_arm_fadvise64_64
:
10147 /* arm_fadvise64_64 looks like fadvise64_64 but
10148 * with different argument order: fd, advice, offset, len
10149 * rather than the usual fd, offset, len, advice.
10150 * Note that offset and len are both 64-bit so appear as
10151 * pairs of 32-bit registers.
10153 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10154 target_offset64(arg5
, arg6
), arg2
);
10155 ret
= -host_to_target_errno(ret
);
10159 #if TARGET_ABI_BITS == 32
10161 #ifdef TARGET_NR_fadvise64_64
10162 case TARGET_NR_fadvise64_64
:
10163 /* 6 args: fd, offset (high, low), len (high, low), advice */
10164 if (regpairs_aligned(cpu_env
)) {
10165 /* offset is in (3,4), len in (5,6) and advice in 7 */
10172 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10173 target_offset64(arg2
, arg3
),
10174 target_offset64(arg4
, arg5
),
10179 #ifdef TARGET_NR_fadvise64
10180 case TARGET_NR_fadvise64
:
10181 /* 5 args: fd, offset (high, low), len, advice */
10182 if (regpairs_aligned(cpu_env
)) {
10183 /* offset is in (3,4), len in 5 and advice in 6 */
10189 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10190 target_offset64(arg2
, arg3
),
10195 #else /* not a 32-bit ABI */
10196 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10197 #ifdef TARGET_NR_fadvise64_64
10198 case TARGET_NR_fadvise64_64
:
10200 #ifdef TARGET_NR_fadvise64
10201 case TARGET_NR_fadvise64
:
10203 #ifdef TARGET_S390X
10205 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10206 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10207 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10208 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10212 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10215 #endif /* end of 64-bit ABI fadvise handling */
10217 #ifdef TARGET_NR_madvise
10218 case TARGET_NR_madvise
:
10219 /* A straight passthrough may not be safe because qemu sometimes
10220 turns private file-backed mappings into anonymous mappings.
10221 This will break MADV_DONTNEED.
10222 This is a hint, so ignoring and returning success is ok. */
10223 ret
= get_errno(0);
10226 #if TARGET_ABI_BITS == 32
10227 case TARGET_NR_fcntl64
:
10231 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
10232 to_flock64_fn
*copyto
= copy_to_user_flock64
;
10235 if (((CPUARMState
*)cpu_env
)->eabi
) {
10236 copyfrom
= copy_from_user_eabi_flock64
;
10237 copyto
= copy_to_user_eabi_flock64
;
10241 cmd
= target_to_host_fcntl_cmd(arg2
);
10242 if (cmd
== -TARGET_EINVAL
) {
10248 case TARGET_F_GETLK64
:
10249 ret
= copyfrom(&fl
, arg3
);
10253 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10255 ret
= copyto(arg3
, &fl
);
10259 case TARGET_F_SETLK64
:
10260 case TARGET_F_SETLKW64
:
10261 ret
= copyfrom(&fl
, arg3
);
10265 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10268 ret
= do_fcntl(arg1
, arg2
, arg3
);
10274 #ifdef TARGET_NR_cacheflush
10275 case TARGET_NR_cacheflush
:
10276 /* self-modifying code is handled automatically, so nothing needed */
10280 #ifdef TARGET_NR_security
10281 case TARGET_NR_security
:
10282 goto unimplemented
;
10284 #ifdef TARGET_NR_getpagesize
10285 case TARGET_NR_getpagesize
:
10286 ret
= TARGET_PAGE_SIZE
;
10289 case TARGET_NR_gettid
:
10290 ret
= get_errno(gettid());
10292 #ifdef TARGET_NR_readahead
10293 case TARGET_NR_readahead
:
10294 #if TARGET_ABI_BITS == 32
10295 if (regpairs_aligned(cpu_env
)) {
10300 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
10302 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10307 #ifdef TARGET_NR_setxattr
10308 case TARGET_NR_listxattr
:
10309 case TARGET_NR_llistxattr
:
10313 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10315 ret
= -TARGET_EFAULT
;
10319 p
= lock_user_string(arg1
);
10321 if (num
== TARGET_NR_listxattr
) {
10322 ret
= get_errno(listxattr(p
, b
, arg3
));
10324 ret
= get_errno(llistxattr(p
, b
, arg3
));
10327 ret
= -TARGET_EFAULT
;
10329 unlock_user(p
, arg1
, 0);
10330 unlock_user(b
, arg2
, arg3
);
10333 case TARGET_NR_flistxattr
:
10337 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10339 ret
= -TARGET_EFAULT
;
10343 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10344 unlock_user(b
, arg2
, arg3
);
10347 case TARGET_NR_setxattr
:
10348 case TARGET_NR_lsetxattr
:
10350 void *p
, *n
, *v
= 0;
10352 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10354 ret
= -TARGET_EFAULT
;
10358 p
= lock_user_string(arg1
);
10359 n
= lock_user_string(arg2
);
10361 if (num
== TARGET_NR_setxattr
) {
10362 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
10364 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
10367 ret
= -TARGET_EFAULT
;
10369 unlock_user(p
, arg1
, 0);
10370 unlock_user(n
, arg2
, 0);
10371 unlock_user(v
, arg3
, 0);
10374 case TARGET_NR_fsetxattr
:
10378 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10380 ret
= -TARGET_EFAULT
;
10384 n
= lock_user_string(arg2
);
10386 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
10388 ret
= -TARGET_EFAULT
;
10390 unlock_user(n
, arg2
, 0);
10391 unlock_user(v
, arg3
, 0);
10394 case TARGET_NR_getxattr
:
10395 case TARGET_NR_lgetxattr
:
10397 void *p
, *n
, *v
= 0;
10399 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10401 ret
= -TARGET_EFAULT
;
10405 p
= lock_user_string(arg1
);
10406 n
= lock_user_string(arg2
);
10408 if (num
== TARGET_NR_getxattr
) {
10409 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
10411 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
10414 ret
= -TARGET_EFAULT
;
10416 unlock_user(p
, arg1
, 0);
10417 unlock_user(n
, arg2
, 0);
10418 unlock_user(v
, arg3
, arg4
);
10421 case TARGET_NR_fgetxattr
:
10425 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10427 ret
= -TARGET_EFAULT
;
10431 n
= lock_user_string(arg2
);
10433 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
10435 ret
= -TARGET_EFAULT
;
10437 unlock_user(n
, arg2
, 0);
10438 unlock_user(v
, arg3
, arg4
);
10441 case TARGET_NR_removexattr
:
10442 case TARGET_NR_lremovexattr
:
10445 p
= lock_user_string(arg1
);
10446 n
= lock_user_string(arg2
);
10448 if (num
== TARGET_NR_removexattr
) {
10449 ret
= get_errno(removexattr(p
, n
));
10451 ret
= get_errno(lremovexattr(p
, n
));
10454 ret
= -TARGET_EFAULT
;
10456 unlock_user(p
, arg1
, 0);
10457 unlock_user(n
, arg2
, 0);
10460 case TARGET_NR_fremovexattr
:
10463 n
= lock_user_string(arg2
);
10465 ret
= get_errno(fremovexattr(arg1
, n
));
10467 ret
= -TARGET_EFAULT
;
10469 unlock_user(n
, arg2
, 0);
10473 #endif /* CONFIG_ATTR */
10474 #ifdef TARGET_NR_set_thread_area
10475 case TARGET_NR_set_thread_area
:
10476 #if defined(TARGET_MIPS)
10477 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
10480 #elif defined(TARGET_CRIS)
10482 ret
= -TARGET_EINVAL
;
10484 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
10488 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10489 ret
= do_set_thread_area(cpu_env
, arg1
);
10491 #elif defined(TARGET_M68K)
10493 TaskState
*ts
= cpu
->opaque
;
10494 ts
->tp_value
= arg1
;
10499 goto unimplemented_nowarn
;
10502 #ifdef TARGET_NR_get_thread_area
10503 case TARGET_NR_get_thread_area
:
10504 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10505 ret
= do_get_thread_area(cpu_env
, arg1
);
10507 #elif defined(TARGET_M68K)
10509 TaskState
*ts
= cpu
->opaque
;
10510 ret
= ts
->tp_value
;
10514 goto unimplemented_nowarn
;
10517 #ifdef TARGET_NR_getdomainname
10518 case TARGET_NR_getdomainname
:
10519 goto unimplemented_nowarn
;
10522 #ifdef TARGET_NR_clock_gettime
10523 case TARGET_NR_clock_gettime
:
10525 struct timespec ts
;
10526 ret
= get_errno(clock_gettime(arg1
, &ts
));
10527 if (!is_error(ret
)) {
10528 host_to_target_timespec(arg2
, &ts
);
10533 #ifdef TARGET_NR_clock_getres
10534 case TARGET_NR_clock_getres
:
10536 struct timespec ts
;
10537 ret
= get_errno(clock_getres(arg1
, &ts
));
10538 if (!is_error(ret
)) {
10539 host_to_target_timespec(arg2
, &ts
);
10544 #ifdef TARGET_NR_clock_nanosleep
10545 case TARGET_NR_clock_nanosleep
:
10547 struct timespec ts
;
10548 target_to_host_timespec(&ts
, arg3
);
10549 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
10550 &ts
, arg4
? &ts
: NULL
));
10552 host_to_target_timespec(arg4
, &ts
);
10554 #if defined(TARGET_PPC)
10555 /* clock_nanosleep is odd in that it returns positive errno values.
10556 * On PPC, CR0 bit 3 should be set in such a situation. */
10557 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
10558 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
10565 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10566 case TARGET_NR_set_tid_address
:
10567 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
10571 case TARGET_NR_tkill
:
10572 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
10575 case TARGET_NR_tgkill
:
10576 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
10577 target_to_host_signal(arg3
)));
10580 #ifdef TARGET_NR_set_robust_list
10581 case TARGET_NR_set_robust_list
:
10582 case TARGET_NR_get_robust_list
:
10583 /* The ABI for supporting robust futexes has userspace pass
10584 * the kernel a pointer to a linked list which is updated by
10585 * userspace after the syscall; the list is walked by the kernel
10586 * when the thread exits. Since the linked list in QEMU guest
10587 * memory isn't a valid linked list for the host and we have
10588 * no way to reliably intercept the thread-death event, we can't
10589 * support these. Silently return ENOSYS so that guest userspace
10590 * falls back to a non-robust futex implementation (which should
10591 * be OK except in the corner case of the guest crashing while
10592 * holding a mutex that is shared with another process via
10595 goto unimplemented_nowarn
;
10598 #if defined(TARGET_NR_utimensat)
10599 case TARGET_NR_utimensat
:
10601 struct timespec
*tsp
, ts
[2];
10605 target_to_host_timespec(ts
, arg3
);
10606 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
10610 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
10612 if (!(p
= lock_user_string(arg2
))) {
10613 ret
= -TARGET_EFAULT
;
10616 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
10617 unlock_user(p
, arg2
, 0);
10622 case TARGET_NR_futex
:
10623 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10625 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10626 case TARGET_NR_inotify_init
:
10627 ret
= get_errno(sys_inotify_init());
10630 #ifdef CONFIG_INOTIFY1
10631 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10632 case TARGET_NR_inotify_init1
:
10633 ret
= get_errno(sys_inotify_init1(arg1
));
10637 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10638 case TARGET_NR_inotify_add_watch
:
10639 p
= lock_user_string(arg2
);
10640 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
10641 unlock_user(p
, arg2
, 0);
10644 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10645 case TARGET_NR_inotify_rm_watch
:
10646 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
10650 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10651 case TARGET_NR_mq_open
:
10653 struct mq_attr posix_mq_attr
, *attrp
;
10655 p
= lock_user_string(arg1
- 1);
10657 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
10658 attrp
= &posix_mq_attr
;
10662 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
10663 unlock_user (p
, arg1
, 0);
10667 case TARGET_NR_mq_unlink
:
10668 p
= lock_user_string(arg1
- 1);
10669 ret
= get_errno(mq_unlink(p
));
10670 unlock_user (p
, arg1
, 0);
10673 case TARGET_NR_mq_timedsend
:
10675 struct timespec ts
;
10677 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10679 target_to_host_timespec(&ts
, arg5
);
10680 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
10681 host_to_target_timespec(arg5
, &ts
);
10683 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
10685 unlock_user (p
, arg2
, arg3
);
10689 case TARGET_NR_mq_timedreceive
:
10691 struct timespec ts
;
10694 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
10696 target_to_host_timespec(&ts
, arg5
);
10697 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
10699 host_to_target_timespec(arg5
, &ts
);
10701 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
10704 unlock_user (p
, arg2
, arg3
);
10706 put_user_u32(prio
, arg4
);
10710 /* Not implemented for now... */
10711 /* case TARGET_NR_mq_notify: */
10714 case TARGET_NR_mq_getsetattr
:
10716 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
10719 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
10720 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
10723 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
10724 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
10731 #ifdef CONFIG_SPLICE
10732 #ifdef TARGET_NR_tee
10733 case TARGET_NR_tee
:
10735 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
10739 #ifdef TARGET_NR_splice
10740 case TARGET_NR_splice
:
10742 loff_t loff_in
, loff_out
;
10743 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
10745 if (get_user_u64(loff_in
, arg2
)) {
10748 ploff_in
= &loff_in
;
10751 if (get_user_u64(loff_out
, arg4
)) {
10754 ploff_out
= &loff_out
;
10756 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
10758 if (put_user_u64(loff_in
, arg2
)) {
10763 if (put_user_u64(loff_out
, arg4
)) {
10770 #ifdef TARGET_NR_vmsplice
10771 case TARGET_NR_vmsplice
:
10773 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10775 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
10776 unlock_iovec(vec
, arg2
, arg3
, 0);
10778 ret
= -host_to_target_errno(errno
);
10783 #endif /* CONFIG_SPLICE */
10784 #ifdef CONFIG_EVENTFD
10785 #if defined(TARGET_NR_eventfd)
10786 case TARGET_NR_eventfd
:
10787 ret
= get_errno(eventfd(arg1
, 0));
10788 fd_trans_unregister(ret
);
10791 #if defined(TARGET_NR_eventfd2)
10792 case TARGET_NR_eventfd2
:
10794 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
10795 if (arg2
& TARGET_O_NONBLOCK
) {
10796 host_flags
|= O_NONBLOCK
;
10798 if (arg2
& TARGET_O_CLOEXEC
) {
10799 host_flags
|= O_CLOEXEC
;
10801 ret
= get_errno(eventfd(arg1
, host_flags
));
10802 fd_trans_unregister(ret
);
10806 #endif /* CONFIG_EVENTFD */
10807 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10808 case TARGET_NR_fallocate
:
10809 #if TARGET_ABI_BITS == 32
10810 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
10811 target_offset64(arg5
, arg6
)));
10813 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
10817 #if defined(CONFIG_SYNC_FILE_RANGE)
10818 #if defined(TARGET_NR_sync_file_range)
10819 case TARGET_NR_sync_file_range
:
10820 #if TARGET_ABI_BITS == 32
10821 #if defined(TARGET_MIPS)
10822 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
10823 target_offset64(arg5
, arg6
), arg7
));
10825 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
10826 target_offset64(arg4
, arg5
), arg6
));
10827 #endif /* !TARGET_MIPS */
10829 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
10833 #if defined(TARGET_NR_sync_file_range2)
10834 case TARGET_NR_sync_file_range2
:
10835 /* This is like sync_file_range but the arguments are reordered */
10836 #if TARGET_ABI_BITS == 32
10837 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
10838 target_offset64(arg5
, arg6
), arg2
));
10840 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
10845 #if defined(TARGET_NR_signalfd4)
10846 case TARGET_NR_signalfd4
:
10847 ret
= do_signalfd4(arg1
, arg2
, arg4
);
10850 #if defined(TARGET_NR_signalfd)
10851 case TARGET_NR_signalfd
:
10852 ret
= do_signalfd4(arg1
, arg2
, 0);
10855 #if defined(CONFIG_EPOLL)
10856 #if defined(TARGET_NR_epoll_create)
10857 case TARGET_NR_epoll_create
:
10858 ret
= get_errno(epoll_create(arg1
));
10861 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10862 case TARGET_NR_epoll_create1
:
10863 ret
= get_errno(epoll_create1(arg1
));
10866 #if defined(TARGET_NR_epoll_ctl)
10867 case TARGET_NR_epoll_ctl
:
10869 struct epoll_event ep
;
10870 struct epoll_event
*epp
= 0;
10872 struct target_epoll_event
*target_ep
;
10873 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
10876 ep
.events
= tswap32(target_ep
->events
);
10877 /* The epoll_data_t union is just opaque data to the kernel,
10878 * so we transfer all 64 bits across and need not worry what
10879 * actual data type it is.
10881 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
10882 unlock_user_struct(target_ep
, arg4
, 0);
10885 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
10890 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
10891 #if defined(TARGET_NR_epoll_wait)
10892 case TARGET_NR_epoll_wait
:
10894 #if defined(TARGET_NR_epoll_pwait)
10895 case TARGET_NR_epoll_pwait
:
10898 struct target_epoll_event
*target_ep
;
10899 struct epoll_event
*ep
;
10901 int maxevents
= arg3
;
10902 int timeout
= arg4
;
10904 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
10905 maxevents
* sizeof(struct target_epoll_event
), 1);
10910 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
10913 #if defined(TARGET_NR_epoll_pwait)
10914 case TARGET_NR_epoll_pwait
:
10916 target_sigset_t
*target_set
;
10917 sigset_t _set
, *set
= &_set
;
10920 target_set
= lock_user(VERIFY_READ
, arg5
,
10921 sizeof(target_sigset_t
), 1);
10923 unlock_user(target_ep
, arg2
, 0);
10926 target_to_host_sigset(set
, target_set
);
10927 unlock_user(target_set
, arg5
, 0);
10932 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
10933 set
, SIGSET_T_SIZE
));
10937 #if defined(TARGET_NR_epoll_wait)
10938 case TARGET_NR_epoll_wait
:
10939 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
10944 ret
= -TARGET_ENOSYS
;
10946 if (!is_error(ret
)) {
10948 for (i
= 0; i
< ret
; i
++) {
10949 target_ep
[i
].events
= tswap32(ep
[i
].events
);
10950 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
10953 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
10958 #ifdef TARGET_NR_prlimit64
10959 case TARGET_NR_prlimit64
:
10961 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10962 struct target_rlimit64
*target_rnew
, *target_rold
;
10963 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
10964 int resource
= target_to_host_resource(arg2
);
10966 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
10969 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
10970 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
10971 unlock_user_struct(target_rnew
, arg3
, 0);
10975 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
10976 if (!is_error(ret
) && arg4
) {
10977 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
10980 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
10981 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
10982 unlock_user_struct(target_rold
, arg4
, 1);
10987 #ifdef TARGET_NR_gethostname
10988 case TARGET_NR_gethostname
:
10990 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10992 ret
= get_errno(gethostname(name
, arg2
));
10993 unlock_user(name
, arg1
, arg2
);
10995 ret
= -TARGET_EFAULT
;
11000 #ifdef TARGET_NR_atomic_cmpxchg_32
11001 case TARGET_NR_atomic_cmpxchg_32
:
11003 /* should use start_exclusive from main.c */
11004 abi_ulong mem_value
;
11005 if (get_user_u32(mem_value
, arg6
)) {
11006 target_siginfo_t info
;
11007 info
.si_signo
= SIGSEGV
;
11009 info
.si_code
= TARGET_SEGV_MAPERR
;
11010 info
._sifields
._sigfault
._addr
= arg6
;
11011 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
11015 if (mem_value
== arg2
)
11016 put_user_u32(arg1
, arg6
);
11021 #ifdef TARGET_NR_atomic_barrier
11022 case TARGET_NR_atomic_barrier
:
11024 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11030 #ifdef TARGET_NR_timer_create
11031 case TARGET_NR_timer_create
:
11033 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11035 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11038 int timer_index
= next_free_host_timer();
11040 if (timer_index
< 0) {
11041 ret
= -TARGET_EAGAIN
;
11043 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11046 phost_sevp
= &host_sevp
;
11047 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11053 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11057 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11066 #ifdef TARGET_NR_timer_settime
11067 case TARGET_NR_timer_settime
:
11069 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11070 * struct itimerspec * old_value */
11071 target_timer_t timerid
= get_timer_id(arg1
);
11075 } else if (arg3
== 0) {
11076 ret
= -TARGET_EINVAL
;
11078 timer_t htimer
= g_posix_timers
[timerid
];
11079 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11081 target_to_host_itimerspec(&hspec_new
, arg3
);
11083 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11084 host_to_target_itimerspec(arg2
, &hspec_old
);
11090 #ifdef TARGET_NR_timer_gettime
11091 case TARGET_NR_timer_gettime
:
11093 /* args: timer_t timerid, struct itimerspec *curr_value */
11094 target_timer_t timerid
= get_timer_id(arg1
);
11098 } else if (!arg2
) {
11099 ret
= -TARGET_EFAULT
;
11101 timer_t htimer
= g_posix_timers
[timerid
];
11102 struct itimerspec hspec
;
11103 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11105 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11106 ret
= -TARGET_EFAULT
;
11113 #ifdef TARGET_NR_timer_getoverrun
11114 case TARGET_NR_timer_getoverrun
:
11116 /* args: timer_t timerid */
11117 target_timer_t timerid
= get_timer_id(arg1
);
11122 timer_t htimer
= g_posix_timers
[timerid
];
11123 ret
= get_errno(timer_getoverrun(htimer
));
11125 fd_trans_unregister(ret
);
11130 #ifdef TARGET_NR_timer_delete
11131 case TARGET_NR_timer_delete
:
11133 /* args: timer_t timerid */
11134 target_timer_t timerid
= get_timer_id(arg1
);
11139 timer_t htimer
= g_posix_timers
[timerid
];
11140 ret
= get_errno(timer_delete(htimer
));
11141 g_posix_timers
[timerid
] = 0;
11147 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11148 case TARGET_NR_timerfd_create
:
11149 ret
= get_errno(timerfd_create(arg1
,
11150 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11154 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11155 case TARGET_NR_timerfd_gettime
:
11157 struct itimerspec its_curr
;
11159 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11161 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11168 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11169 case TARGET_NR_timerfd_settime
:
11171 struct itimerspec its_new
, its_old
, *p_new
;
11174 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11182 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11184 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11191 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11192 case TARGET_NR_ioprio_get
:
11193 ret
= get_errno(ioprio_get(arg1
, arg2
));
11197 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11198 case TARGET_NR_ioprio_set
:
11199 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
11203 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11204 case TARGET_NR_setns
:
11205 ret
= get_errno(setns(arg1
, arg2
));
11208 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11209 case TARGET_NR_unshare
:
11210 ret
= get_errno(unshare(arg1
));
11216 gemu_log("qemu: Unsupported syscall: %d\n", num
);
11217 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11218 unimplemented_nowarn
:
11220 ret
= -TARGET_ENOSYS
;
11225 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
11228 print_syscall_ret(num
, ret
);
11231 ret
= -TARGET_EFAULT
;