4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
37 #include <linux/capability.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
61 #include <sys/timerfd.h>
67 #include <sys/eventfd.h>
70 #include <sys/epoll.h>
73 #include "qemu/xattr.h"
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
92 #include <linux/mtio.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include "linux_loop.h"
109 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
110 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
114 //#include <linux/msdos_fs.h>
115 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
116 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
127 #define _syscall0(type,name) \
128 static type name (void) \
130 return syscall(__NR_##name); \
133 #define _syscall1(type,name,type1,arg1) \
134 static type name (type1 arg1) \
136 return syscall(__NR_##name, arg1); \
139 #define _syscall2(type,name,type1,arg1,type2,arg2) \
140 static type name (type1 arg1,type2 arg2) \
142 return syscall(__NR_##name, arg1, arg2); \
145 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
146 static type name (type1 arg1,type2 arg2,type3 arg3) \
148 return syscall(__NR_##name, arg1, arg2, arg3); \
151 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
152 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
154 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
157 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
165 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
166 type5,arg5,type6,arg6) \
167 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
174 #define __NR_sys_uname __NR_uname
175 #define __NR_sys_getcwd1 __NR_getcwd
176 #define __NR_sys_getdents __NR_getdents
177 #define __NR_sys_getdents64 __NR_getdents64
178 #define __NR_sys_getpriority __NR_getpriority
179 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
180 #define __NR_sys_syslog __NR_syslog
181 #define __NR_sys_tgkill __NR_tgkill
182 #define __NR_sys_tkill __NR_tkill
183 #define __NR_sys_futex __NR_futex
184 #define __NR_sys_inotify_init __NR_inotify_init
185 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
186 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
188 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
190 #define __NR__llseek __NR_lseek
193 /* Newer kernel ports have llseek() instead of _llseek() */
194 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
195 #define TARGET_NR__llseek TARGET_NR_llseek
199 _syscall0(int, gettid
)
201 /* This is a replacement for the host gettid() and must return a host
203 static int gettid(void) {
207 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
208 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
210 #if !defined(__NR_getdents) || \
211 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
212 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
214 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
215 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
216 loff_t
*, res
, uint
, wh
);
218 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
219 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
220 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
221 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
223 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
224 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
226 #ifdef __NR_exit_group
227 _syscall1(int,exit_group
,int,error_code
)
229 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
230 _syscall1(int,set_tid_address
,int *,tidptr
)
232 #if defined(TARGET_NR_futex) && defined(__NR_futex)
233 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
234 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
236 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
237 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
238 unsigned long *, user_mask_ptr
);
239 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
240 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
241 unsigned long *, user_mask_ptr
);
242 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
244 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
245 struct __user_cap_data_struct
*, data
);
246 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
247 struct __user_cap_data_struct
*, data
);
248 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
249 _syscall2(int, ioprio_get
, int, which
, int, who
)
251 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
252 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
254 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
255 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
258 static bitmask_transtbl fcntl_flags_tbl
[] = {
259 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
260 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
261 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
262 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
263 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
264 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
265 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
266 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
267 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
268 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
269 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
270 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
271 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
272 #if defined(O_DIRECT)
273 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
275 #if defined(O_NOATIME)
276 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
278 #if defined(O_CLOEXEC)
279 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
282 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
284 /* Don't terminate the list prematurely on 64-bit host+guest. */
285 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
286 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
291 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
292 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
293 typedef struct TargetFdTrans
{
294 TargetFdDataFunc host_to_target_data
;
295 TargetFdDataFunc target_to_host_data
;
296 TargetFdAddrFunc target_to_host_addr
;
299 static TargetFdTrans
**target_fd_trans
;
301 static unsigned int target_fd_max
;
303 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
305 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
306 return target_fd_trans
[fd
]->host_to_target_data
;
311 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
313 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
314 return target_fd_trans
[fd
]->target_to_host_addr
;
319 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
323 if (fd
>= target_fd_max
) {
324 oldmax
= target_fd_max
;
325 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
326 target_fd_trans
= g_renew(TargetFdTrans
*,
327 target_fd_trans
, target_fd_max
);
328 memset((void *)(target_fd_trans
+ oldmax
), 0,
329 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
331 target_fd_trans
[fd
] = trans
;
334 static void fd_trans_unregister(int fd
)
336 if (fd
>= 0 && fd
< target_fd_max
) {
337 target_fd_trans
[fd
] = NULL
;
341 static void fd_trans_dup(int oldfd
, int newfd
)
343 fd_trans_unregister(newfd
);
344 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
345 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
349 static int sys_getcwd1(char *buf
, size_t size
)
351 if (getcwd(buf
, size
) == NULL
) {
352 /* getcwd() sets errno */
355 return strlen(buf
)+1;
358 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
361 * open(2) has extra parameter 'mode' when called with
364 if ((flags
& O_CREAT
) != 0) {
365 return (openat(dirfd
, pathname
, flags
, mode
));
367 return (openat(dirfd
, pathname
, flags
));
370 #ifdef TARGET_NR_utimensat
371 #ifdef CONFIG_UTIMENSAT
372 static int sys_utimensat(int dirfd
, const char *pathname
,
373 const struct timespec times
[2], int flags
)
375 if (pathname
== NULL
)
376 return futimens(dirfd
, times
);
378 return utimensat(dirfd
, pathname
, times
, flags
);
380 #elif defined(__NR_utimensat)
381 #define __NR_sys_utimensat __NR_utimensat
382 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
383 const struct timespec
*,tsp
,int,flags
)
385 static int sys_utimensat(int dirfd
, const char *pathname
,
386 const struct timespec times
[2], int flags
)
392 #endif /* TARGET_NR_utimensat */
394 #ifdef CONFIG_INOTIFY
395 #include <sys/inotify.h>
397 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
398 static int sys_inotify_init(void)
400 return (inotify_init());
403 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
404 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
406 return (inotify_add_watch(fd
, pathname
, mask
));
409 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
410 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
412 return (inotify_rm_watch(fd
, wd
));
415 #ifdef CONFIG_INOTIFY1
416 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
417 static int sys_inotify_init1(int flags
)
419 return (inotify_init1(flags
));
424 /* Userspace can usually survive runtime without inotify */
425 #undef TARGET_NR_inotify_init
426 #undef TARGET_NR_inotify_init1
427 #undef TARGET_NR_inotify_add_watch
428 #undef TARGET_NR_inotify_rm_watch
429 #endif /* CONFIG_INOTIFY */
431 #if defined(TARGET_NR_ppoll)
433 # define __NR_ppoll -1
435 #define __NR_sys_ppoll __NR_ppoll
436 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
437 struct timespec
*, timeout
, const sigset_t
*, sigmask
,
441 #if defined(TARGET_NR_pselect6)
442 #ifndef __NR_pselect6
443 # define __NR_pselect6 -1
445 #define __NR_sys_pselect6 __NR_pselect6
446 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
447 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
450 #if defined(TARGET_NR_prlimit64)
451 #ifndef __NR_prlimit64
452 # define __NR_prlimit64 -1
454 #define __NR_sys_prlimit64 __NR_prlimit64
455 /* The glibc rlimit structure may not be that used by the underlying syscall */
456 struct host_rlimit64
{
460 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
461 const struct host_rlimit64
*, new_limit
,
462 struct host_rlimit64
*, old_limit
)
466 #if defined(TARGET_NR_timer_create)
467 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
468 static timer_t g_posix_timers
[32] = { 0, } ;
470 static inline int next_free_host_timer(void)
473 /* FIXME: Does finding the next free slot require a lock? */
474 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
475 if (g_posix_timers
[k
] == 0) {
476 g_posix_timers
[k
] = (timer_t
) 1;
484 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
486 static inline int regpairs_aligned(void *cpu_env
) {
487 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
489 #elif defined(TARGET_MIPS)
490 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
491 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
492 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
493 * of registers which translates to the same as ARM/MIPS, because we start with
495 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
497 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
500 #define ERRNO_TABLE_SIZE 1200
502 /* target_to_host_errno_table[] is initialized from
503 * host_to_target_errno_table[] in syscall_init(). */
504 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
508 * This list is the union of errno values overridden in asm-<arch>/errno.h
509 * minus the errnos that are not actually generic to all archs.
511 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
512 [EAGAIN
] = TARGET_EAGAIN
,
513 [EIDRM
] = TARGET_EIDRM
,
514 [ECHRNG
] = TARGET_ECHRNG
,
515 [EL2NSYNC
] = TARGET_EL2NSYNC
,
516 [EL3HLT
] = TARGET_EL3HLT
,
517 [EL3RST
] = TARGET_EL3RST
,
518 [ELNRNG
] = TARGET_ELNRNG
,
519 [EUNATCH
] = TARGET_EUNATCH
,
520 [ENOCSI
] = TARGET_ENOCSI
,
521 [EL2HLT
] = TARGET_EL2HLT
,
522 [EDEADLK
] = TARGET_EDEADLK
,
523 [ENOLCK
] = TARGET_ENOLCK
,
524 [EBADE
] = TARGET_EBADE
,
525 [EBADR
] = TARGET_EBADR
,
526 [EXFULL
] = TARGET_EXFULL
,
527 [ENOANO
] = TARGET_ENOANO
,
528 [EBADRQC
] = TARGET_EBADRQC
,
529 [EBADSLT
] = TARGET_EBADSLT
,
530 [EBFONT
] = TARGET_EBFONT
,
531 [ENOSTR
] = TARGET_ENOSTR
,
532 [ENODATA
] = TARGET_ENODATA
,
533 [ETIME
] = TARGET_ETIME
,
534 [ENOSR
] = TARGET_ENOSR
,
535 [ENONET
] = TARGET_ENONET
,
536 [ENOPKG
] = TARGET_ENOPKG
,
537 [EREMOTE
] = TARGET_EREMOTE
,
538 [ENOLINK
] = TARGET_ENOLINK
,
539 [EADV
] = TARGET_EADV
,
540 [ESRMNT
] = TARGET_ESRMNT
,
541 [ECOMM
] = TARGET_ECOMM
,
542 [EPROTO
] = TARGET_EPROTO
,
543 [EDOTDOT
] = TARGET_EDOTDOT
,
544 [EMULTIHOP
] = TARGET_EMULTIHOP
,
545 [EBADMSG
] = TARGET_EBADMSG
,
546 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
547 [EOVERFLOW
] = TARGET_EOVERFLOW
,
548 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
549 [EBADFD
] = TARGET_EBADFD
,
550 [EREMCHG
] = TARGET_EREMCHG
,
551 [ELIBACC
] = TARGET_ELIBACC
,
552 [ELIBBAD
] = TARGET_ELIBBAD
,
553 [ELIBSCN
] = TARGET_ELIBSCN
,
554 [ELIBMAX
] = TARGET_ELIBMAX
,
555 [ELIBEXEC
] = TARGET_ELIBEXEC
,
556 [EILSEQ
] = TARGET_EILSEQ
,
557 [ENOSYS
] = TARGET_ENOSYS
,
558 [ELOOP
] = TARGET_ELOOP
,
559 [ERESTART
] = TARGET_ERESTART
,
560 [ESTRPIPE
] = TARGET_ESTRPIPE
,
561 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
562 [EUSERS
] = TARGET_EUSERS
,
563 [ENOTSOCK
] = TARGET_ENOTSOCK
,
564 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
565 [EMSGSIZE
] = TARGET_EMSGSIZE
,
566 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
567 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
568 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
569 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
570 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
571 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
572 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
573 [EADDRINUSE
] = TARGET_EADDRINUSE
,
574 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
575 [ENETDOWN
] = TARGET_ENETDOWN
,
576 [ENETUNREACH
] = TARGET_ENETUNREACH
,
577 [ENETRESET
] = TARGET_ENETRESET
,
578 [ECONNABORTED
] = TARGET_ECONNABORTED
,
579 [ECONNRESET
] = TARGET_ECONNRESET
,
580 [ENOBUFS
] = TARGET_ENOBUFS
,
581 [EISCONN
] = TARGET_EISCONN
,
582 [ENOTCONN
] = TARGET_ENOTCONN
,
583 [EUCLEAN
] = TARGET_EUCLEAN
,
584 [ENOTNAM
] = TARGET_ENOTNAM
,
585 [ENAVAIL
] = TARGET_ENAVAIL
,
586 [EISNAM
] = TARGET_EISNAM
,
587 [EREMOTEIO
] = TARGET_EREMOTEIO
,
588 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
589 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
590 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
591 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
592 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
593 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
594 [EALREADY
] = TARGET_EALREADY
,
595 [EINPROGRESS
] = TARGET_EINPROGRESS
,
596 [ESTALE
] = TARGET_ESTALE
,
597 [ECANCELED
] = TARGET_ECANCELED
,
598 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
599 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
601 [ENOKEY
] = TARGET_ENOKEY
,
604 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
607 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
610 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
613 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
615 #ifdef ENOTRECOVERABLE
616 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
620 static inline int host_to_target_errno(int err
)
622 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
623 host_to_target_errno_table
[err
]) {
624 return host_to_target_errno_table
[err
];
629 static inline int target_to_host_errno(int err
)
631 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
632 target_to_host_errno_table
[err
]) {
633 return target_to_host_errno_table
[err
];
638 static inline abi_long
get_errno(abi_long ret
)
641 return -host_to_target_errno(errno
);
646 static inline int is_error(abi_long ret
)
648 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
651 char *target_strerror(int err
)
653 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
656 return strerror(target_to_host_errno(err
));
659 static inline int host_to_target_sock_type(int host_type
)
663 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
665 target_type
= TARGET_SOCK_DGRAM
;
668 target_type
= TARGET_SOCK_STREAM
;
671 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
675 #if defined(SOCK_CLOEXEC)
676 if (host_type
& SOCK_CLOEXEC
) {
677 target_type
|= TARGET_SOCK_CLOEXEC
;
681 #if defined(SOCK_NONBLOCK)
682 if (host_type
& SOCK_NONBLOCK
) {
683 target_type
|= TARGET_SOCK_NONBLOCK
;
690 static abi_ulong target_brk
;
691 static abi_ulong target_original_brk
;
692 static abi_ulong brk_page
;
694 void target_set_brk(abi_ulong new_brk
)
696 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
697 brk_page
= HOST_PAGE_ALIGN(target_brk
);
700 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
701 #define DEBUGF_BRK(message, args...)
703 /* do_brk() must return target values and target errnos. */
704 abi_long
do_brk(abi_ulong new_brk
)
706 abi_long mapped_addr
;
709 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
712 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
715 if (new_brk
< target_original_brk
) {
716 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
721 /* If the new brk is less than the highest page reserved to the
722 * target heap allocation, set it and we're almost done... */
723 if (new_brk
<= brk_page
) {
724 /* Heap contents are initialized to zero, as for anonymous
726 if (new_brk
> target_brk
) {
727 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
729 target_brk
= new_brk
;
730 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
734 /* We need to allocate more memory after the brk... Note that
735 * we don't use MAP_FIXED because that will map over the top of
736 * any existing mapping (like the one with the host libc or qemu
737 * itself); instead we treat "mapped but at wrong address" as
738 * a failure and unmap again.
740 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
741 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
742 PROT_READ
|PROT_WRITE
,
743 MAP_ANON
|MAP_PRIVATE
, 0, 0));
745 if (mapped_addr
== brk_page
) {
746 /* Heap contents are initialized to zero, as for anonymous
747 * mapped pages. Technically the new pages are already
748 * initialized to zero since they *are* anonymous mapped
749 * pages, however we have to take care with the contents that
750 * come from the remaining part of the previous page: it may
751 * contains garbage data due to a previous heap usage (grown
753 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
755 target_brk
= new_brk
;
756 brk_page
= HOST_PAGE_ALIGN(target_brk
);
757 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
760 } else if (mapped_addr
!= -1) {
761 /* Mapped but at wrong address, meaning there wasn't actually
762 * enough space for this brk.
764 target_munmap(mapped_addr
, new_alloc_size
);
766 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
769 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
772 #if defined(TARGET_ALPHA)
773 /* We (partially) emulate OSF/1 on Alpha, which requires we
774 return a proper errno, not an unchanged brk value. */
775 return -TARGET_ENOMEM
;
777 /* For everything else, return the previous break. */
781 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
782 abi_ulong target_fds_addr
,
786 abi_ulong b
, *target_fds
;
788 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
789 if (!(target_fds
= lock_user(VERIFY_READ
,
791 sizeof(abi_ulong
) * nw
,
793 return -TARGET_EFAULT
;
797 for (i
= 0; i
< nw
; i
++) {
798 /* grab the abi_ulong */
799 __get_user(b
, &target_fds
[i
]);
800 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
801 /* check the bit inside the abi_ulong */
808 unlock_user(target_fds
, target_fds_addr
, 0);
813 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
814 abi_ulong target_fds_addr
,
817 if (target_fds_addr
) {
818 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
819 return -TARGET_EFAULT
;
827 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
833 abi_ulong
*target_fds
;
835 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
836 if (!(target_fds
= lock_user(VERIFY_WRITE
,
838 sizeof(abi_ulong
) * nw
,
840 return -TARGET_EFAULT
;
843 for (i
= 0; i
< nw
; i
++) {
845 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
846 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
849 __put_user(v
, &target_fds
[i
]);
852 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
857 #if defined(__alpha__)
863 static inline abi_long
host_to_target_clock_t(long ticks
)
865 #if HOST_HZ == TARGET_HZ
868 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
872 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
873 const struct rusage
*rusage
)
875 struct target_rusage
*target_rusage
;
877 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
878 return -TARGET_EFAULT
;
879 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
880 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
881 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
882 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
883 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
884 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
885 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
886 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
887 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
888 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
889 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
890 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
891 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
892 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
893 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
894 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
895 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
896 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
897 unlock_user_struct(target_rusage
, target_addr
, 1);
902 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
904 abi_ulong target_rlim_swap
;
907 target_rlim_swap
= tswapal(target_rlim
);
908 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
909 return RLIM_INFINITY
;
911 result
= target_rlim_swap
;
912 if (target_rlim_swap
!= (rlim_t
)result
)
913 return RLIM_INFINITY
;
918 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
920 abi_ulong target_rlim_swap
;
923 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
924 target_rlim_swap
= TARGET_RLIM_INFINITY
;
926 target_rlim_swap
= rlim
;
927 result
= tswapal(target_rlim_swap
);
932 static inline int target_to_host_resource(int code
)
935 case TARGET_RLIMIT_AS
:
937 case TARGET_RLIMIT_CORE
:
939 case TARGET_RLIMIT_CPU
:
941 case TARGET_RLIMIT_DATA
:
943 case TARGET_RLIMIT_FSIZE
:
945 case TARGET_RLIMIT_LOCKS
:
947 case TARGET_RLIMIT_MEMLOCK
:
948 return RLIMIT_MEMLOCK
;
949 case TARGET_RLIMIT_MSGQUEUE
:
950 return RLIMIT_MSGQUEUE
;
951 case TARGET_RLIMIT_NICE
:
953 case TARGET_RLIMIT_NOFILE
:
954 return RLIMIT_NOFILE
;
955 case TARGET_RLIMIT_NPROC
:
957 case TARGET_RLIMIT_RSS
:
959 case TARGET_RLIMIT_RTPRIO
:
960 return RLIMIT_RTPRIO
;
961 case TARGET_RLIMIT_SIGPENDING
:
962 return RLIMIT_SIGPENDING
;
963 case TARGET_RLIMIT_STACK
:
970 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
971 abi_ulong target_tv_addr
)
973 struct target_timeval
*target_tv
;
975 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
976 return -TARGET_EFAULT
;
978 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
979 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
981 unlock_user_struct(target_tv
, target_tv_addr
, 0);
986 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
987 const struct timeval
*tv
)
989 struct target_timeval
*target_tv
;
991 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
992 return -TARGET_EFAULT
;
994 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
995 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
997 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1002 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1003 abi_ulong target_tz_addr
)
1005 struct target_timezone
*target_tz
;
1007 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1008 return -TARGET_EFAULT
;
1011 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1012 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1014 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1019 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1022 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1023 abi_ulong target_mq_attr_addr
)
1025 struct target_mq_attr
*target_mq_attr
;
1027 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1028 target_mq_attr_addr
, 1))
1029 return -TARGET_EFAULT
;
1031 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1032 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1033 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1034 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1036 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1041 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1042 const struct mq_attr
*attr
)
1044 struct target_mq_attr
*target_mq_attr
;
1046 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1047 target_mq_attr_addr
, 0))
1048 return -TARGET_EFAULT
;
1050 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1051 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1052 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1053 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1055 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1061 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1062 /* do_select() must return target values and target errnos. */
1063 static abi_long
do_select(int n
,
1064 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1065 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1067 fd_set rfds
, wfds
, efds
;
1068 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1069 struct timeval tv
, *tv_ptr
;
1072 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1076 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1080 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1085 if (target_tv_addr
) {
1086 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1087 return -TARGET_EFAULT
;
1093 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1095 if (!is_error(ret
)) {
1096 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1097 return -TARGET_EFAULT
;
1098 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1099 return -TARGET_EFAULT
;
1100 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1101 return -TARGET_EFAULT
;
1103 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1104 return -TARGET_EFAULT
;
1111 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1114 return pipe2(host_pipe
, flags
);
1120 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1121 int flags
, int is_pipe2
)
1125 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1128 return get_errno(ret
);
1130 /* Several targets have special calling conventions for the original
1131 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1133 #if defined(TARGET_ALPHA)
1134 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1135 return host_pipe
[0];
1136 #elif defined(TARGET_MIPS)
1137 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1138 return host_pipe
[0];
1139 #elif defined(TARGET_SH4)
1140 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1141 return host_pipe
[0];
1142 #elif defined(TARGET_SPARC)
1143 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1144 return host_pipe
[0];
1148 if (put_user_s32(host_pipe
[0], pipedes
)
1149 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1150 return -TARGET_EFAULT
;
1151 return get_errno(ret
);
1154 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1155 abi_ulong target_addr
,
1158 struct target_ip_mreqn
*target_smreqn
;
1160 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1162 return -TARGET_EFAULT
;
1163 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1164 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1165 if (len
== sizeof(struct target_ip_mreqn
))
1166 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1167 unlock_user(target_smreqn
, target_addr
, 0);
1172 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1173 abi_ulong target_addr
,
1176 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1177 sa_family_t sa_family
;
1178 struct target_sockaddr
*target_saddr
;
1180 if (fd_trans_target_to_host_addr(fd
)) {
1181 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1184 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1186 return -TARGET_EFAULT
;
1188 sa_family
= tswap16(target_saddr
->sa_family
);
1190 /* Oops. The caller might send a incomplete sun_path; sun_path
1191 * must be terminated by \0 (see the manual page), but
1192 * unfortunately it is quite common to specify sockaddr_un
1193 * length as "strlen(x->sun_path)" while it should be
1194 * "strlen(...) + 1". We'll fix that here if needed.
1195 * Linux kernel has a similar feature.
1198 if (sa_family
== AF_UNIX
) {
1199 if (len
< unix_maxlen
&& len
> 0) {
1200 char *cp
= (char*)target_saddr
;
1202 if ( cp
[len
-1] && !cp
[len
] )
1205 if (len
> unix_maxlen
)
1209 memcpy(addr
, target_saddr
, len
);
1210 addr
->sa_family
= sa_family
;
1211 if (sa_family
== AF_PACKET
) {
1212 struct target_sockaddr_ll
*lladdr
;
1214 lladdr
= (struct target_sockaddr_ll
*)addr
;
1215 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1216 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1218 unlock_user(target_saddr
, target_addr
, 0);
1223 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1224 struct sockaddr
*addr
,
1227 struct target_sockaddr
*target_saddr
;
1229 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1231 return -TARGET_EFAULT
;
1232 memcpy(target_saddr
, addr
, len
);
1233 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1234 unlock_user(target_saddr
, target_addr
, len
);
1239 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1240 struct target_msghdr
*target_msgh
)
1242 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1243 abi_long msg_controllen
;
1244 abi_ulong target_cmsg_addr
;
1245 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1246 socklen_t space
= 0;
1248 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1249 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1251 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1252 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1253 target_cmsg_start
= target_cmsg
;
1255 return -TARGET_EFAULT
;
1257 while (cmsg
&& target_cmsg
) {
1258 void *data
= CMSG_DATA(cmsg
);
1259 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1261 int len
= tswapal(target_cmsg
->cmsg_len
)
1262 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1264 space
+= CMSG_SPACE(len
);
1265 if (space
> msgh
->msg_controllen
) {
1266 space
-= CMSG_SPACE(len
);
1267 /* This is a QEMU bug, since we allocated the payload
1268 * area ourselves (unlike overflow in host-to-target
1269 * conversion, which is just the guest giving us a buffer
1270 * that's too small). It can't happen for the payload types
1271 * we currently support; if it becomes an issue in future
1272 * we would need to improve our allocation strategy to
1273 * something more intelligent than "twice the size of the
1274 * target buffer we're reading from".
1276 gemu_log("Host cmsg overflow\n");
1280 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1281 cmsg
->cmsg_level
= SOL_SOCKET
;
1283 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1285 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1286 cmsg
->cmsg_len
= CMSG_LEN(len
);
1288 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1289 int *fd
= (int *)data
;
1290 int *target_fd
= (int *)target_data
;
1291 int i
, numfds
= len
/ sizeof(int);
1293 for (i
= 0; i
< numfds
; i
++) {
1294 __get_user(fd
[i
], target_fd
+ i
);
1296 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1297 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1298 struct ucred
*cred
= (struct ucred
*)data
;
1299 struct target_ucred
*target_cred
=
1300 (struct target_ucred
*)target_data
;
1302 __get_user(cred
->pid
, &target_cred
->pid
);
1303 __get_user(cred
->uid
, &target_cred
->uid
);
1304 __get_user(cred
->gid
, &target_cred
->gid
);
1306 gemu_log("Unsupported ancillary data: %d/%d\n",
1307 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1308 memcpy(data
, target_data
, len
);
1311 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1312 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1315 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1317 msgh
->msg_controllen
= space
;
1321 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1322 struct msghdr
*msgh
)
1324 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1325 abi_long msg_controllen
;
1326 abi_ulong target_cmsg_addr
;
1327 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1328 socklen_t space
= 0;
1330 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1331 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1333 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1334 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1335 target_cmsg_start
= target_cmsg
;
1337 return -TARGET_EFAULT
;
1339 while (cmsg
&& target_cmsg
) {
1340 void *data
= CMSG_DATA(cmsg
);
1341 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1343 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1344 int tgt_len
, tgt_space
;
1346 /* We never copy a half-header but may copy half-data;
1347 * this is Linux's behaviour in put_cmsg(). Note that
1348 * truncation here is a guest problem (which we report
1349 * to the guest via the CTRUNC bit), unlike truncation
1350 * in target_to_host_cmsg, which is a QEMU bug.
1352 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1353 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1357 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1358 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1360 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1362 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1364 tgt_len
= TARGET_CMSG_LEN(len
);
1366 /* Payload types which need a different size of payload on
1367 * the target must adjust tgt_len here.
1369 switch (cmsg
->cmsg_level
) {
1371 switch (cmsg
->cmsg_type
) {
1373 tgt_len
= sizeof(struct target_timeval
);
1382 if (msg_controllen
< tgt_len
) {
1383 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1384 tgt_len
= msg_controllen
;
1387 /* We must now copy-and-convert len bytes of payload
1388 * into tgt_len bytes of destination space. Bear in mind
1389 * that in both source and destination we may be dealing
1390 * with a truncated value!
1392 switch (cmsg
->cmsg_level
) {
1394 switch (cmsg
->cmsg_type
) {
1397 int *fd
= (int *)data
;
1398 int *target_fd
= (int *)target_data
;
1399 int i
, numfds
= tgt_len
/ sizeof(int);
1401 for (i
= 0; i
< numfds
; i
++) {
1402 __put_user(fd
[i
], target_fd
+ i
);
1408 struct timeval
*tv
= (struct timeval
*)data
;
1409 struct target_timeval
*target_tv
=
1410 (struct target_timeval
*)target_data
;
1412 if (len
!= sizeof(struct timeval
) ||
1413 tgt_len
!= sizeof(struct target_timeval
)) {
1417 /* copy struct timeval to target */
1418 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1419 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1422 case SCM_CREDENTIALS
:
1424 struct ucred
*cred
= (struct ucred
*)data
;
1425 struct target_ucred
*target_cred
=
1426 (struct target_ucred
*)target_data
;
1428 __put_user(cred
->pid
, &target_cred
->pid
);
1429 __put_user(cred
->uid
, &target_cred
->uid
);
1430 __put_user(cred
->gid
, &target_cred
->gid
);
1440 gemu_log("Unsupported ancillary data: %d/%d\n",
1441 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1442 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1443 if (tgt_len
> len
) {
1444 memset(target_data
+ len
, 0, tgt_len
- len
);
1448 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1449 tgt_space
= TARGET_CMSG_SPACE(len
);
1450 if (msg_controllen
< tgt_space
) {
1451 tgt_space
= msg_controllen
;
1453 msg_controllen
-= tgt_space
;
1455 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1456 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1459 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1461 target_msgh
->msg_controllen
= tswapal(space
);
1465 /* do_setsockopt() Must return target values and target errnos. */
1466 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1467 abi_ulong optval_addr
, socklen_t optlen
)
1471 struct ip_mreqn
*ip_mreq
;
1472 struct ip_mreq_source
*ip_mreq_source
;
1476 /* TCP options all take an 'int' value. */
1477 if (optlen
< sizeof(uint32_t))
1478 return -TARGET_EINVAL
;
1480 if (get_user_u32(val
, optval_addr
))
1481 return -TARGET_EFAULT
;
1482 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1489 case IP_ROUTER_ALERT
:
1493 case IP_MTU_DISCOVER
:
1499 case IP_MULTICAST_TTL
:
1500 case IP_MULTICAST_LOOP
:
1502 if (optlen
>= sizeof(uint32_t)) {
1503 if (get_user_u32(val
, optval_addr
))
1504 return -TARGET_EFAULT
;
1505 } else if (optlen
>= 1) {
1506 if (get_user_u8(val
, optval_addr
))
1507 return -TARGET_EFAULT
;
1509 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1511 case IP_ADD_MEMBERSHIP
:
1512 case IP_DROP_MEMBERSHIP
:
1513 if (optlen
< sizeof (struct target_ip_mreq
) ||
1514 optlen
> sizeof (struct target_ip_mreqn
))
1515 return -TARGET_EINVAL
;
1517 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1518 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1519 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1522 case IP_BLOCK_SOURCE
:
1523 case IP_UNBLOCK_SOURCE
:
1524 case IP_ADD_SOURCE_MEMBERSHIP
:
1525 case IP_DROP_SOURCE_MEMBERSHIP
:
1526 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1527 return -TARGET_EINVAL
;
1529 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1530 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1531 unlock_user (ip_mreq_source
, optval_addr
, 0);
1540 case IPV6_MTU_DISCOVER
:
1543 case IPV6_RECVPKTINFO
:
1545 if (optlen
< sizeof(uint32_t)) {
1546 return -TARGET_EINVAL
;
1548 if (get_user_u32(val
, optval_addr
)) {
1549 return -TARGET_EFAULT
;
1551 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1552 &val
, sizeof(val
)));
1561 /* struct icmp_filter takes an u32 value */
1562 if (optlen
< sizeof(uint32_t)) {
1563 return -TARGET_EINVAL
;
1566 if (get_user_u32(val
, optval_addr
)) {
1567 return -TARGET_EFAULT
;
1569 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1570 &val
, sizeof(val
)));
1577 case TARGET_SOL_SOCKET
:
1579 case TARGET_SO_RCVTIMEO
:
1583 optname
= SO_RCVTIMEO
;
1586 if (optlen
!= sizeof(struct target_timeval
)) {
1587 return -TARGET_EINVAL
;
1590 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1591 return -TARGET_EFAULT
;
1594 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1598 case TARGET_SO_SNDTIMEO
:
1599 optname
= SO_SNDTIMEO
;
1601 case TARGET_SO_ATTACH_FILTER
:
1603 struct target_sock_fprog
*tfprog
;
1604 struct target_sock_filter
*tfilter
;
1605 struct sock_fprog fprog
;
1606 struct sock_filter
*filter
;
1609 if (optlen
!= sizeof(*tfprog
)) {
1610 return -TARGET_EINVAL
;
1612 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
1613 return -TARGET_EFAULT
;
1615 if (!lock_user_struct(VERIFY_READ
, tfilter
,
1616 tswapal(tfprog
->filter
), 0)) {
1617 unlock_user_struct(tfprog
, optval_addr
, 1);
1618 return -TARGET_EFAULT
;
1621 fprog
.len
= tswap16(tfprog
->len
);
1622 filter
= g_try_new(struct sock_filter
, fprog
.len
);
1623 if (filter
== NULL
) {
1624 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1625 unlock_user_struct(tfprog
, optval_addr
, 1);
1626 return -TARGET_ENOMEM
;
1628 for (i
= 0; i
< fprog
.len
; i
++) {
1629 filter
[i
].code
= tswap16(tfilter
[i
].code
);
1630 filter
[i
].jt
= tfilter
[i
].jt
;
1631 filter
[i
].jf
= tfilter
[i
].jf
;
1632 filter
[i
].k
= tswap32(tfilter
[i
].k
);
1634 fprog
.filter
= filter
;
1636 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
1637 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
1640 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
1641 unlock_user_struct(tfprog
, optval_addr
, 1);
1644 case TARGET_SO_BINDTODEVICE
:
1646 char *dev_ifname
, *addr_ifname
;
1648 if (optlen
> IFNAMSIZ
- 1) {
1649 optlen
= IFNAMSIZ
- 1;
1651 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1653 return -TARGET_EFAULT
;
1655 optname
= SO_BINDTODEVICE
;
1656 addr_ifname
= alloca(IFNAMSIZ
);
1657 memcpy(addr_ifname
, dev_ifname
, optlen
);
1658 addr_ifname
[optlen
] = 0;
1659 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1660 addr_ifname
, optlen
));
1661 unlock_user (dev_ifname
, optval_addr
, 0);
1664 /* Options with 'int' argument. */
1665 case TARGET_SO_DEBUG
:
1668 case TARGET_SO_REUSEADDR
:
1669 optname
= SO_REUSEADDR
;
1671 case TARGET_SO_TYPE
:
1674 case TARGET_SO_ERROR
:
1677 case TARGET_SO_DONTROUTE
:
1678 optname
= SO_DONTROUTE
;
1680 case TARGET_SO_BROADCAST
:
1681 optname
= SO_BROADCAST
;
1683 case TARGET_SO_SNDBUF
:
1684 optname
= SO_SNDBUF
;
1686 case TARGET_SO_SNDBUFFORCE
:
1687 optname
= SO_SNDBUFFORCE
;
1689 case TARGET_SO_RCVBUF
:
1690 optname
= SO_RCVBUF
;
1692 case TARGET_SO_RCVBUFFORCE
:
1693 optname
= SO_RCVBUFFORCE
;
1695 case TARGET_SO_KEEPALIVE
:
1696 optname
= SO_KEEPALIVE
;
1698 case TARGET_SO_OOBINLINE
:
1699 optname
= SO_OOBINLINE
;
1701 case TARGET_SO_NO_CHECK
:
1702 optname
= SO_NO_CHECK
;
1704 case TARGET_SO_PRIORITY
:
1705 optname
= SO_PRIORITY
;
1708 case TARGET_SO_BSDCOMPAT
:
1709 optname
= SO_BSDCOMPAT
;
1712 case TARGET_SO_PASSCRED
:
1713 optname
= SO_PASSCRED
;
1715 case TARGET_SO_PASSSEC
:
1716 optname
= SO_PASSSEC
;
1718 case TARGET_SO_TIMESTAMP
:
1719 optname
= SO_TIMESTAMP
;
1721 case TARGET_SO_RCVLOWAT
:
1722 optname
= SO_RCVLOWAT
;
1728 if (optlen
< sizeof(uint32_t))
1729 return -TARGET_EINVAL
;
1731 if (get_user_u32(val
, optval_addr
))
1732 return -TARGET_EFAULT
;
1733 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1737 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1738 ret
= -TARGET_ENOPROTOOPT
;
1743 /* do_getsockopt() Must return target values and target errnos. */
1744 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1745 abi_ulong optval_addr
, abi_ulong optlen
)
1752 case TARGET_SOL_SOCKET
:
1755 /* These don't just return a single integer */
1756 case TARGET_SO_LINGER
:
1757 case TARGET_SO_RCVTIMEO
:
1758 case TARGET_SO_SNDTIMEO
:
1759 case TARGET_SO_PEERNAME
:
1761 case TARGET_SO_PEERCRED
: {
1764 struct target_ucred
*tcr
;
1766 if (get_user_u32(len
, optlen
)) {
1767 return -TARGET_EFAULT
;
1770 return -TARGET_EINVAL
;
1774 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1782 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1783 return -TARGET_EFAULT
;
1785 __put_user(cr
.pid
, &tcr
->pid
);
1786 __put_user(cr
.uid
, &tcr
->uid
);
1787 __put_user(cr
.gid
, &tcr
->gid
);
1788 unlock_user_struct(tcr
, optval_addr
, 1);
1789 if (put_user_u32(len
, optlen
)) {
1790 return -TARGET_EFAULT
;
1794 /* Options with 'int' argument. */
1795 case TARGET_SO_DEBUG
:
1798 case TARGET_SO_REUSEADDR
:
1799 optname
= SO_REUSEADDR
;
1801 case TARGET_SO_TYPE
:
1804 case TARGET_SO_ERROR
:
1807 case TARGET_SO_DONTROUTE
:
1808 optname
= SO_DONTROUTE
;
1810 case TARGET_SO_BROADCAST
:
1811 optname
= SO_BROADCAST
;
1813 case TARGET_SO_SNDBUF
:
1814 optname
= SO_SNDBUF
;
1816 case TARGET_SO_RCVBUF
:
1817 optname
= SO_RCVBUF
;
1819 case TARGET_SO_KEEPALIVE
:
1820 optname
= SO_KEEPALIVE
;
1822 case TARGET_SO_OOBINLINE
:
1823 optname
= SO_OOBINLINE
;
1825 case TARGET_SO_NO_CHECK
:
1826 optname
= SO_NO_CHECK
;
1828 case TARGET_SO_PRIORITY
:
1829 optname
= SO_PRIORITY
;
1832 case TARGET_SO_BSDCOMPAT
:
1833 optname
= SO_BSDCOMPAT
;
1836 case TARGET_SO_PASSCRED
:
1837 optname
= SO_PASSCRED
;
1839 case TARGET_SO_TIMESTAMP
:
1840 optname
= SO_TIMESTAMP
;
1842 case TARGET_SO_RCVLOWAT
:
1843 optname
= SO_RCVLOWAT
;
1845 case TARGET_SO_ACCEPTCONN
:
1846 optname
= SO_ACCEPTCONN
;
1853 /* TCP options all take an 'int' value. */
1855 if (get_user_u32(len
, optlen
))
1856 return -TARGET_EFAULT
;
1858 return -TARGET_EINVAL
;
1860 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1863 if (optname
== SO_TYPE
) {
1864 val
= host_to_target_sock_type(val
);
1869 if (put_user_u32(val
, optval_addr
))
1870 return -TARGET_EFAULT
;
1872 if (put_user_u8(val
, optval_addr
))
1873 return -TARGET_EFAULT
;
1875 if (put_user_u32(len
, optlen
))
1876 return -TARGET_EFAULT
;
1883 case IP_ROUTER_ALERT
:
1887 case IP_MTU_DISCOVER
:
1893 case IP_MULTICAST_TTL
:
1894 case IP_MULTICAST_LOOP
:
1895 if (get_user_u32(len
, optlen
))
1896 return -TARGET_EFAULT
;
1898 return -TARGET_EINVAL
;
1900 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1903 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1905 if (put_user_u32(len
, optlen
)
1906 || put_user_u8(val
, optval_addr
))
1907 return -TARGET_EFAULT
;
1909 if (len
> sizeof(int))
1911 if (put_user_u32(len
, optlen
)
1912 || put_user_u32(val
, optval_addr
))
1913 return -TARGET_EFAULT
;
1917 ret
= -TARGET_ENOPROTOOPT
;
1923 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1925 ret
= -TARGET_EOPNOTSUPP
;
1931 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
1932 int count
, int copy
)
1934 struct target_iovec
*target_vec
;
1936 abi_ulong total_len
, max_len
;
1939 bool bad_address
= false;
1945 if (count
< 0 || count
> IOV_MAX
) {
1950 vec
= g_try_new0(struct iovec
, count
);
1956 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1957 count
* sizeof(struct target_iovec
), 1);
1958 if (target_vec
== NULL
) {
1963 /* ??? If host page size > target page size, this will result in a
1964 value larger than what we can actually support. */
1965 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
1968 for (i
= 0; i
< count
; i
++) {
1969 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1970 abi_long len
= tswapal(target_vec
[i
].iov_len
);
1975 } else if (len
== 0) {
1976 /* Zero length pointer is ignored. */
1977 vec
[i
].iov_base
= 0;
1979 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
1980 /* If the first buffer pointer is bad, this is a fault. But
1981 * subsequent bad buffers will result in a partial write; this
1982 * is realized by filling the vector with null pointers and
1984 if (!vec
[i
].iov_base
) {
1995 if (len
> max_len
- total_len
) {
1996 len
= max_len
- total_len
;
1999 vec
[i
].iov_len
= len
;
2003 unlock_user(target_vec
, target_addr
, 0);
2008 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2009 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2012 unlock_user(target_vec
, target_addr
, 0);
2019 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2020 int count
, int copy
)
2022 struct target_iovec
*target_vec
;
2025 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2026 count
* sizeof(struct target_iovec
), 1);
2028 for (i
= 0; i
< count
; i
++) {
2029 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2030 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2034 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2036 unlock_user(target_vec
, target_addr
, 0);
2042 static inline int target_to_host_sock_type(int *type
)
2045 int target_type
= *type
;
2047 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2048 case TARGET_SOCK_DGRAM
:
2049 host_type
= SOCK_DGRAM
;
2051 case TARGET_SOCK_STREAM
:
2052 host_type
= SOCK_STREAM
;
2055 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2058 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2059 #if defined(SOCK_CLOEXEC)
2060 host_type
|= SOCK_CLOEXEC
;
2062 return -TARGET_EINVAL
;
2065 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2066 #if defined(SOCK_NONBLOCK)
2067 host_type
|= SOCK_NONBLOCK
;
2068 #elif !defined(O_NONBLOCK)
2069 return -TARGET_EINVAL
;
2076 /* Try to emulate socket type flags after socket creation. */
2077 static int sock_flags_fixup(int fd
, int target_type
)
2079 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2080 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2081 int flags
= fcntl(fd
, F_GETFL
);
2082 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2084 return -TARGET_EINVAL
;
2091 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
2092 abi_ulong target_addr
,
2095 struct sockaddr
*addr
= host_addr
;
2096 struct target_sockaddr
*target_saddr
;
2098 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
2099 if (!target_saddr
) {
2100 return -TARGET_EFAULT
;
2103 memcpy(addr
, target_saddr
, len
);
2104 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
2105 /* spkt_protocol is big-endian */
2107 unlock_user(target_saddr
, target_addr
, 0);
2111 static TargetFdTrans target_packet_trans
= {
2112 .target_to_host_addr
= packet_target_to_host_sockaddr
,
2115 /* do_socket() Must return target values and target errnos. */
2116 static abi_long
do_socket(int domain
, int type
, int protocol
)
2118 int target_type
= type
;
2121 ret
= target_to_host_sock_type(&type
);
2126 if (domain
== PF_NETLINK
)
2127 return -TARGET_EAFNOSUPPORT
;
2129 if (domain
== AF_PACKET
||
2130 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2131 protocol
= tswap16(protocol
);
2134 ret
= get_errno(socket(domain
, type
, protocol
));
2136 ret
= sock_flags_fixup(ret
, target_type
);
2137 if (type
== SOCK_PACKET
) {
2138 /* Manage an obsolete case :
2139 * if socket type is SOCK_PACKET, bind by name
2141 fd_trans_register(ret
, &target_packet_trans
);
2147 /* do_bind() Must return target values and target errnos. */
2148 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2154 if ((int)addrlen
< 0) {
2155 return -TARGET_EINVAL
;
2158 addr
= alloca(addrlen
+1);
2160 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2164 return get_errno(bind(sockfd
, addr
, addrlen
));
2167 /* do_connect() Must return target values and target errnos. */
2168 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2174 if ((int)addrlen
< 0) {
2175 return -TARGET_EINVAL
;
2178 addr
= alloca(addrlen
+1);
2180 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2184 return get_errno(connect(sockfd
, addr
, addrlen
));
2187 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2188 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2189 int flags
, int send
)
2195 abi_ulong target_vec
;
2197 if (msgp
->msg_name
) {
2198 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2199 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2200 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2201 tswapal(msgp
->msg_name
),
2207 msg
.msg_name
= NULL
;
2208 msg
.msg_namelen
= 0;
2210 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2211 msg
.msg_control
= alloca(msg
.msg_controllen
);
2212 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2214 count
= tswapal(msgp
->msg_iovlen
);
2215 target_vec
= tswapal(msgp
->msg_iov
);
2216 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2217 target_vec
, count
, send
);
2219 ret
= -host_to_target_errno(errno
);
2222 msg
.msg_iovlen
= count
;
2226 ret
= target_to_host_cmsg(&msg
, msgp
);
2228 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
2230 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
2231 if (!is_error(ret
)) {
2233 ret
= host_to_target_cmsg(msgp
, &msg
);
2234 if (!is_error(ret
)) {
2235 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2236 if (msg
.msg_name
!= NULL
) {
2237 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2238 msg
.msg_name
, msg
.msg_namelen
);
2250 unlock_iovec(vec
, target_vec
, count
, !send
);
2255 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2256 int flags
, int send
)
2259 struct target_msghdr
*msgp
;
2261 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2265 return -TARGET_EFAULT
;
2267 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2268 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2272 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2273 * so it might not have this *mmsg-specific flag either.
2275 #ifndef MSG_WAITFORONE
2276 #define MSG_WAITFORONE 0x10000
2279 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2280 unsigned int vlen
, unsigned int flags
,
2283 struct target_mmsghdr
*mmsgp
;
2287 if (vlen
> UIO_MAXIOV
) {
2291 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2293 return -TARGET_EFAULT
;
2296 for (i
= 0; i
< vlen
; i
++) {
2297 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2298 if (is_error(ret
)) {
2301 mmsgp
[i
].msg_len
= tswap32(ret
);
2302 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2303 if (flags
& MSG_WAITFORONE
) {
2304 flags
|= MSG_DONTWAIT
;
2308 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2310 /* Return number of datagrams sent if we sent any at all;
2311 * otherwise return the error.
2319 /* If we don't have a system accept4() then just call accept.
2320 * The callsites to do_accept4() will ensure that they don't
2321 * pass a non-zero flags argument in this config.
2323 #ifndef CONFIG_ACCEPT4
2324 static inline int accept4(int sockfd
, struct sockaddr
*addr
,
2325 socklen_t
*addrlen
, int flags
)
2328 return accept(sockfd
, addr
, addrlen
);
2332 /* do_accept4() Must return target values and target errnos. */
2333 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2334 abi_ulong target_addrlen_addr
, int flags
)
2341 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
2343 if (target_addr
== 0) {
2344 return get_errno(accept4(fd
, NULL
, NULL
, host_flags
));
2347 /* linux returns EINVAL if addrlen pointer is invalid */
2348 if (get_user_u32(addrlen
, target_addrlen_addr
))
2349 return -TARGET_EINVAL
;
2351 if ((int)addrlen
< 0) {
2352 return -TARGET_EINVAL
;
2355 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2356 return -TARGET_EINVAL
;
2358 addr
= alloca(addrlen
);
2360 ret
= get_errno(accept4(fd
, addr
, &addrlen
, host_flags
));
2361 if (!is_error(ret
)) {
2362 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2363 if (put_user_u32(addrlen
, target_addrlen_addr
))
2364 ret
= -TARGET_EFAULT
;
2369 /* do_getpeername() Must return target values and target errnos. */
2370 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2371 abi_ulong target_addrlen_addr
)
2377 if (get_user_u32(addrlen
, target_addrlen_addr
))
2378 return -TARGET_EFAULT
;
2380 if ((int)addrlen
< 0) {
2381 return -TARGET_EINVAL
;
2384 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2385 return -TARGET_EFAULT
;
2387 addr
= alloca(addrlen
);
2389 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2390 if (!is_error(ret
)) {
2391 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2392 if (put_user_u32(addrlen
, target_addrlen_addr
))
2393 ret
= -TARGET_EFAULT
;
2398 /* do_getsockname() Must return target values and target errnos. */
2399 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2400 abi_ulong target_addrlen_addr
)
2406 if (get_user_u32(addrlen
, target_addrlen_addr
))
2407 return -TARGET_EFAULT
;
2409 if ((int)addrlen
< 0) {
2410 return -TARGET_EINVAL
;
2413 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2414 return -TARGET_EFAULT
;
2416 addr
= alloca(addrlen
);
2418 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2419 if (!is_error(ret
)) {
2420 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2421 if (put_user_u32(addrlen
, target_addrlen_addr
))
2422 ret
= -TARGET_EFAULT
;
2427 /* do_socketpair() Must return target values and target errnos. */
2428 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2429 abi_ulong target_tab_addr
)
2434 target_to_host_sock_type(&type
);
2436 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2437 if (!is_error(ret
)) {
2438 if (put_user_s32(tab
[0], target_tab_addr
)
2439 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2440 ret
= -TARGET_EFAULT
;
2445 /* do_sendto() Must return target values and target errnos. */
2446 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2447 abi_ulong target_addr
, socklen_t addrlen
)
2453 if ((int)addrlen
< 0) {
2454 return -TARGET_EINVAL
;
2457 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2459 return -TARGET_EFAULT
;
2461 addr
= alloca(addrlen
+1);
2462 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
2464 unlock_user(host_msg
, msg
, 0);
2467 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2469 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2471 unlock_user(host_msg
, msg
, 0);
2475 /* do_recvfrom() Must return target values and target errnos. */
2476 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2477 abi_ulong target_addr
,
2478 abi_ulong target_addrlen
)
2485 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2487 return -TARGET_EFAULT
;
2489 if (get_user_u32(addrlen
, target_addrlen
)) {
2490 ret
= -TARGET_EFAULT
;
2493 if ((int)addrlen
< 0) {
2494 ret
= -TARGET_EINVAL
;
2497 addr
= alloca(addrlen
);
2498 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2500 addr
= NULL
; /* To keep compiler quiet. */
2501 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2503 if (!is_error(ret
)) {
2505 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2506 if (put_user_u32(addrlen
, target_addrlen
)) {
2507 ret
= -TARGET_EFAULT
;
2511 unlock_user(host_msg
, msg
, len
);
2514 unlock_user(host_msg
, msg
, 0);
2519 #ifdef TARGET_NR_socketcall
2520 /* do_socketcall() Must return target values and target errnos. */
2521 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2523 static const unsigned ac
[] = { /* number of arguments per call */
2524 [SOCKOP_socket
] = 3, /* domain, type, protocol */
2525 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
2526 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
2527 [SOCKOP_listen
] = 2, /* sockfd, backlog */
2528 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
2529 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
2530 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
2531 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
2532 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
2533 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
2534 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
2535 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2536 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2537 [SOCKOP_shutdown
] = 2, /* sockfd, how */
2538 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
2539 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
2540 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
2541 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
2542 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2543 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
2545 abi_long a
[6]; /* max 6 args */
2547 /* first, collect the arguments in a[] according to ac[] */
2548 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
2550 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
2551 for (i
= 0; i
< ac
[num
]; ++i
) {
2552 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
2553 return -TARGET_EFAULT
;
2558 /* now when we have the args, actually handle the call */
2560 case SOCKOP_socket
: /* domain, type, protocol */
2561 return do_socket(a
[0], a
[1], a
[2]);
2562 case SOCKOP_bind
: /* sockfd, addr, addrlen */
2563 return do_bind(a
[0], a
[1], a
[2]);
2564 case SOCKOP_connect
: /* sockfd, addr, addrlen */
2565 return do_connect(a
[0], a
[1], a
[2]);
2566 case SOCKOP_listen
: /* sockfd, backlog */
2567 return get_errno(listen(a
[0], a
[1]));
2568 case SOCKOP_accept
: /* sockfd, addr, addrlen */
2569 return do_accept4(a
[0], a
[1], a
[2], 0);
2570 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
2571 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
2572 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
2573 return do_getsockname(a
[0], a
[1], a
[2]);
2574 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
2575 return do_getpeername(a
[0], a
[1], a
[2]);
2576 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
2577 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
2578 case SOCKOP_send
: /* sockfd, msg, len, flags */
2579 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
2580 case SOCKOP_recv
: /* sockfd, msg, len, flags */
2581 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
2582 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
2583 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2584 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
2585 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
2586 case SOCKOP_shutdown
: /* sockfd, how */
2587 return get_errno(shutdown(a
[0], a
[1]));
2588 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
2589 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
2590 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
2591 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
2592 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
2593 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
2594 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
2595 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
2596 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
2597 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2598 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
2599 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
2601 gemu_log("Unsupported socketcall: %d\n", num
);
2602 return -TARGET_ENOSYS
;
2607 #define N_SHM_REGIONS 32
2609 static struct shm_region
{
2613 } shm_regions
[N_SHM_REGIONS
];
2615 struct target_semid_ds
2617 struct target_ipc_perm sem_perm
;
2618 abi_ulong sem_otime
;
2619 #if !defined(TARGET_PPC64)
2620 abi_ulong __unused1
;
2622 abi_ulong sem_ctime
;
2623 #if !defined(TARGET_PPC64)
2624 abi_ulong __unused2
;
2626 abi_ulong sem_nsems
;
2627 abi_ulong __unused3
;
2628 abi_ulong __unused4
;
2631 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2632 abi_ulong target_addr
)
2634 struct target_ipc_perm
*target_ip
;
2635 struct target_semid_ds
*target_sd
;
2637 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2638 return -TARGET_EFAULT
;
2639 target_ip
= &(target_sd
->sem_perm
);
2640 host_ip
->__key
= tswap32(target_ip
->__key
);
2641 host_ip
->uid
= tswap32(target_ip
->uid
);
2642 host_ip
->gid
= tswap32(target_ip
->gid
);
2643 host_ip
->cuid
= tswap32(target_ip
->cuid
);
2644 host_ip
->cgid
= tswap32(target_ip
->cgid
);
2645 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2646 host_ip
->mode
= tswap32(target_ip
->mode
);
2648 host_ip
->mode
= tswap16(target_ip
->mode
);
2650 #if defined(TARGET_PPC)
2651 host_ip
->__seq
= tswap32(target_ip
->__seq
);
2653 host_ip
->__seq
= tswap16(target_ip
->__seq
);
2655 unlock_user_struct(target_sd
, target_addr
, 0);
2659 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2660 struct ipc_perm
*host_ip
)
2662 struct target_ipc_perm
*target_ip
;
2663 struct target_semid_ds
*target_sd
;
2665 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2666 return -TARGET_EFAULT
;
2667 target_ip
= &(target_sd
->sem_perm
);
2668 target_ip
->__key
= tswap32(host_ip
->__key
);
2669 target_ip
->uid
= tswap32(host_ip
->uid
);
2670 target_ip
->gid
= tswap32(host_ip
->gid
);
2671 target_ip
->cuid
= tswap32(host_ip
->cuid
);
2672 target_ip
->cgid
= tswap32(host_ip
->cgid
);
2673 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2674 target_ip
->mode
= tswap32(host_ip
->mode
);
2676 target_ip
->mode
= tswap16(host_ip
->mode
);
2678 #if defined(TARGET_PPC)
2679 target_ip
->__seq
= tswap32(host_ip
->__seq
);
2681 target_ip
->__seq
= tswap16(host_ip
->__seq
);
2683 unlock_user_struct(target_sd
, target_addr
, 1);
2687 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2688 abi_ulong target_addr
)
2690 struct target_semid_ds
*target_sd
;
2692 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2693 return -TARGET_EFAULT
;
2694 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2695 return -TARGET_EFAULT
;
2696 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2697 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2698 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2699 unlock_user_struct(target_sd
, target_addr
, 0);
2703 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2704 struct semid_ds
*host_sd
)
2706 struct target_semid_ds
*target_sd
;
2708 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2709 return -TARGET_EFAULT
;
2710 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2711 return -TARGET_EFAULT
;
2712 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2713 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2714 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2715 unlock_user_struct(target_sd
, target_addr
, 1);
2719 struct target_seminfo
{
2732 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2733 struct seminfo
*host_seminfo
)
2735 struct target_seminfo
*target_seminfo
;
2736 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2737 return -TARGET_EFAULT
;
2738 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2739 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2740 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2741 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2742 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2743 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2744 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2745 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2746 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2747 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2748 unlock_user_struct(target_seminfo
, target_addr
, 1);
2754 struct semid_ds
*buf
;
2755 unsigned short *array
;
2756 struct seminfo
*__buf
;
2759 union target_semun
{
2766 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2767 abi_ulong target_addr
)
2770 unsigned short *array
;
2772 struct semid_ds semid_ds
;
2775 semun
.buf
= &semid_ds
;
2777 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2779 return get_errno(ret
);
2781 nsems
= semid_ds
.sem_nsems
;
2783 *host_array
= g_try_new(unsigned short, nsems
);
2785 return -TARGET_ENOMEM
;
2787 array
= lock_user(VERIFY_READ
, target_addr
,
2788 nsems
*sizeof(unsigned short), 1);
2790 g_free(*host_array
);
2791 return -TARGET_EFAULT
;
2794 for(i
=0; i
<nsems
; i
++) {
2795 __get_user((*host_array
)[i
], &array
[i
]);
2797 unlock_user(array
, target_addr
, 0);
2802 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2803 unsigned short **host_array
)
2806 unsigned short *array
;
2808 struct semid_ds semid_ds
;
2811 semun
.buf
= &semid_ds
;
2813 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2815 return get_errno(ret
);
2817 nsems
= semid_ds
.sem_nsems
;
2819 array
= lock_user(VERIFY_WRITE
, target_addr
,
2820 nsems
*sizeof(unsigned short), 0);
2822 return -TARGET_EFAULT
;
2824 for(i
=0; i
<nsems
; i
++) {
2825 __put_user((*host_array
)[i
], &array
[i
]);
2827 g_free(*host_array
);
2828 unlock_user(array
, target_addr
, 1);
2833 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2834 abi_ulong target_arg
)
2836 union target_semun target_su
= { .buf
= target_arg
};
2838 struct semid_ds dsarg
;
2839 unsigned short *array
= NULL
;
2840 struct seminfo seminfo
;
2841 abi_long ret
= -TARGET_EINVAL
;
2848 /* In 64 bit cross-endian situations, we will erroneously pick up
2849 * the wrong half of the union for the "val" element. To rectify
2850 * this, the entire 8-byte structure is byteswapped, followed by
2851 * a swap of the 4 byte val field. In other cases, the data is
2852 * already in proper host byte order. */
2853 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
2854 target_su
.buf
= tswapal(target_su
.buf
);
2855 arg
.val
= tswap32(target_su
.val
);
2857 arg
.val
= target_su
.val
;
2859 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2863 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2867 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2868 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2875 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2879 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2880 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2886 arg
.__buf
= &seminfo
;
2887 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2888 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2896 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2903 struct target_sembuf
{
2904 unsigned short sem_num
;
2909 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2910 abi_ulong target_addr
,
2913 struct target_sembuf
*target_sembuf
;
2916 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2917 nsops
*sizeof(struct target_sembuf
), 1);
2919 return -TARGET_EFAULT
;
2921 for(i
=0; i
<nsops
; i
++) {
2922 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2923 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2924 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2927 unlock_user(target_sembuf
, target_addr
, 0);
2932 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2934 struct sembuf sops
[nsops
];
2936 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2937 return -TARGET_EFAULT
;
2939 return get_errno(semop(semid
, sops
, nsops
));
2942 struct target_msqid_ds
2944 struct target_ipc_perm msg_perm
;
2945 abi_ulong msg_stime
;
2946 #if TARGET_ABI_BITS == 32
2947 abi_ulong __unused1
;
2949 abi_ulong msg_rtime
;
2950 #if TARGET_ABI_BITS == 32
2951 abi_ulong __unused2
;
2953 abi_ulong msg_ctime
;
2954 #if TARGET_ABI_BITS == 32
2955 abi_ulong __unused3
;
2957 abi_ulong __msg_cbytes
;
2959 abi_ulong msg_qbytes
;
2960 abi_ulong msg_lspid
;
2961 abi_ulong msg_lrpid
;
2962 abi_ulong __unused4
;
2963 abi_ulong __unused5
;
2966 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2967 abi_ulong target_addr
)
2969 struct target_msqid_ds
*target_md
;
2971 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2972 return -TARGET_EFAULT
;
2973 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2974 return -TARGET_EFAULT
;
2975 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2976 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2977 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2978 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2979 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2980 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2981 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2982 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2983 unlock_user_struct(target_md
, target_addr
, 0);
2987 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2988 struct msqid_ds
*host_md
)
2990 struct target_msqid_ds
*target_md
;
2992 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2993 return -TARGET_EFAULT
;
2994 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2995 return -TARGET_EFAULT
;
2996 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2997 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2998 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2999 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3000 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3001 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3002 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3003 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3004 unlock_user_struct(target_md
, target_addr
, 1);
3008 struct target_msginfo
{
3016 unsigned short int msgseg
;
3019 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3020 struct msginfo
*host_msginfo
)
3022 struct target_msginfo
*target_msginfo
;
3023 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3024 return -TARGET_EFAULT
;
3025 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3026 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3027 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3028 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3029 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3030 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3031 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3032 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3033 unlock_user_struct(target_msginfo
, target_addr
, 1);
3037 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3039 struct msqid_ds dsarg
;
3040 struct msginfo msginfo
;
3041 abi_long ret
= -TARGET_EINVAL
;
3049 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3050 return -TARGET_EFAULT
;
3051 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3052 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3053 return -TARGET_EFAULT
;
3056 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3060 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3061 if (host_to_target_msginfo(ptr
, &msginfo
))
3062 return -TARGET_EFAULT
;
3069 struct target_msgbuf
{
3074 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3075 ssize_t msgsz
, int msgflg
)
3077 struct target_msgbuf
*target_mb
;
3078 struct msgbuf
*host_mb
;
3082 return -TARGET_EINVAL
;
3085 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3086 return -TARGET_EFAULT
;
3087 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3089 unlock_user_struct(target_mb
, msgp
, 0);
3090 return -TARGET_ENOMEM
;
3092 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3093 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3094 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3096 unlock_user_struct(target_mb
, msgp
, 0);
3101 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3102 unsigned int msgsz
, abi_long msgtyp
,
3105 struct target_msgbuf
*target_mb
;
3107 struct msgbuf
*host_mb
;
3110 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3111 return -TARGET_EFAULT
;
3113 host_mb
= g_malloc(msgsz
+sizeof(long));
3114 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3117 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3118 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3119 if (!target_mtext
) {
3120 ret
= -TARGET_EFAULT
;
3123 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3124 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3127 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3131 unlock_user_struct(target_mb
, msgp
, 1);
3136 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3137 abi_ulong target_addr
)
3139 struct target_shmid_ds
*target_sd
;
3141 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3142 return -TARGET_EFAULT
;
3143 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3144 return -TARGET_EFAULT
;
3145 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3146 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3147 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3148 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3149 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3150 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3151 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3152 unlock_user_struct(target_sd
, target_addr
, 0);
3156 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3157 struct shmid_ds
*host_sd
)
3159 struct target_shmid_ds
*target_sd
;
3161 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3162 return -TARGET_EFAULT
;
3163 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3164 return -TARGET_EFAULT
;
3165 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3166 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3167 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3168 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3169 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3170 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3171 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3172 unlock_user_struct(target_sd
, target_addr
, 1);
3176 struct target_shminfo
{
3184 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3185 struct shminfo
*host_shminfo
)
3187 struct target_shminfo
*target_shminfo
;
3188 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3189 return -TARGET_EFAULT
;
3190 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3191 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3192 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3193 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3194 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3195 unlock_user_struct(target_shminfo
, target_addr
, 1);
3199 struct target_shm_info
{
3204 abi_ulong swap_attempts
;
3205 abi_ulong swap_successes
;
3208 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3209 struct shm_info
*host_shm_info
)
3211 struct target_shm_info
*target_shm_info
;
3212 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3213 return -TARGET_EFAULT
;
3214 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3215 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3216 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3217 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3218 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3219 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3220 unlock_user_struct(target_shm_info
, target_addr
, 1);
3224 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3226 struct shmid_ds dsarg
;
3227 struct shminfo shminfo
;
3228 struct shm_info shm_info
;
3229 abi_long ret
= -TARGET_EINVAL
;
3237 if (target_to_host_shmid_ds(&dsarg
, buf
))
3238 return -TARGET_EFAULT
;
3239 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3240 if (host_to_target_shmid_ds(buf
, &dsarg
))
3241 return -TARGET_EFAULT
;
3244 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3245 if (host_to_target_shminfo(buf
, &shminfo
))
3246 return -TARGET_EFAULT
;
3249 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3250 if (host_to_target_shm_info(buf
, &shm_info
))
3251 return -TARGET_EFAULT
;
3256 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3263 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
3267 struct shmid_ds shm_info
;
3270 /* find out the length of the shared memory segment */
3271 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3272 if (is_error(ret
)) {
3273 /* can't get length, bail out */
3280 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3282 abi_ulong mmap_start
;
3284 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3286 if (mmap_start
== -1) {
3288 host_raddr
= (void *)-1;
3290 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3293 if (host_raddr
== (void *)-1) {
3295 return get_errno((long)host_raddr
);
3297 raddr
=h2g((unsigned long)host_raddr
);
3299 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3300 PAGE_VALID
| PAGE_READ
|
3301 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3303 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3304 if (!shm_regions
[i
].in_use
) {
3305 shm_regions
[i
].in_use
= true;
3306 shm_regions
[i
].start
= raddr
;
3307 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3317 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3321 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3322 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
3323 shm_regions
[i
].in_use
= false;
3324 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3329 return get_errno(shmdt(g2h(shmaddr
)));
3332 #ifdef TARGET_NR_ipc
3333 /* ??? This only works with linear mappings. */
3334 /* do_ipc() must return target values and target errnos. */
3335 static abi_long
do_ipc(unsigned int call
, abi_long first
,
3336 abi_long second
, abi_long third
,
3337 abi_long ptr
, abi_long fifth
)
3342 version
= call
>> 16;
3347 ret
= do_semop(first
, ptr
, second
);
3351 ret
= get_errno(semget(first
, second
, third
));
3354 case IPCOP_semctl
: {
3355 /* The semun argument to semctl is passed by value, so dereference the
3358 get_user_ual(atptr
, ptr
);
3359 ret
= do_semctl(first
, second
, third
, atptr
);
3364 ret
= get_errno(msgget(first
, second
));
3368 ret
= do_msgsnd(first
, ptr
, second
, third
);
3372 ret
= do_msgctl(first
, second
, ptr
);
3379 struct target_ipc_kludge
{
3384 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3385 ret
= -TARGET_EFAULT
;
3389 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3391 unlock_user_struct(tmp
, ptr
, 0);
3395 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3404 raddr
= do_shmat(first
, ptr
, second
);
3405 if (is_error(raddr
))
3406 return get_errno(raddr
);
3407 if (put_user_ual(raddr
, third
))
3408 return -TARGET_EFAULT
;
3412 ret
= -TARGET_EINVAL
;
3417 ret
= do_shmdt(ptr
);
3421 /* IPC_* flag values are the same on all linux platforms */
3422 ret
= get_errno(shmget(first
, second
, third
));
3425 /* IPC_* and SHM_* command values are the same on all linux platforms */
3427 ret
= do_shmctl(first
, second
, ptr
);
3430 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3431 ret
= -TARGET_ENOSYS
;
3438 /* kernel structure types definitions */
3440 #define STRUCT(name, ...) STRUCT_ ## name,
3441 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3443 #include "syscall_types.h"
3447 #undef STRUCT_SPECIAL
3449 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3450 #define STRUCT_SPECIAL(name)
3451 #include "syscall_types.h"
3453 #undef STRUCT_SPECIAL
3455 typedef struct IOCTLEntry IOCTLEntry
;
3457 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3458 int fd
, int cmd
, abi_long arg
);
3462 unsigned int host_cmd
;
3465 do_ioctl_fn
*do_ioctl
;
3466 const argtype arg_type
[5];
3469 #define IOC_R 0x0001
3470 #define IOC_W 0x0002
3471 #define IOC_RW (IOC_R | IOC_W)
3473 #define MAX_STRUCT_SIZE 4096
3475 #ifdef CONFIG_FIEMAP
3476 /* So fiemap access checks don't overflow on 32 bit systems.
3477 * This is very slightly smaller than the limit imposed by
3478 * the underlying kernel.
3480 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3481 / sizeof(struct fiemap_extent))
3483 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3484 int fd
, int cmd
, abi_long arg
)
3486 /* The parameter for this ioctl is a struct fiemap followed
3487 * by an array of struct fiemap_extent whose size is set
3488 * in fiemap->fm_extent_count. The array is filled in by the
3491 int target_size_in
, target_size_out
;
3493 const argtype
*arg_type
= ie
->arg_type
;
3494 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3497 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3501 assert(arg_type
[0] == TYPE_PTR
);
3502 assert(ie
->access
== IOC_RW
);
3504 target_size_in
= thunk_type_size(arg_type
, 0);
3505 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3507 return -TARGET_EFAULT
;
3509 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3510 unlock_user(argptr
, arg
, 0);
3511 fm
= (struct fiemap
*)buf_temp
;
3512 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3513 return -TARGET_EINVAL
;
3516 outbufsz
= sizeof (*fm
) +
3517 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3519 if (outbufsz
> MAX_STRUCT_SIZE
) {
3520 /* We can't fit all the extents into the fixed size buffer.
3521 * Allocate one that is large enough and use it instead.
3523 fm
= g_try_malloc(outbufsz
);
3525 return -TARGET_ENOMEM
;
3527 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3530 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3531 if (!is_error(ret
)) {
3532 target_size_out
= target_size_in
;
3533 /* An extent_count of 0 means we were only counting the extents
3534 * so there are no structs to copy
3536 if (fm
->fm_extent_count
!= 0) {
3537 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3539 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3541 ret
= -TARGET_EFAULT
;
3543 /* Convert the struct fiemap */
3544 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3545 if (fm
->fm_extent_count
!= 0) {
3546 p
= argptr
+ target_size_in
;
3547 /* ...and then all the struct fiemap_extents */
3548 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3549 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3554 unlock_user(argptr
, arg
, target_size_out
);
3564 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3565 int fd
, int cmd
, abi_long arg
)
3567 const argtype
*arg_type
= ie
->arg_type
;
3571 struct ifconf
*host_ifconf
;
3573 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3574 int target_ifreq_size
;
3579 abi_long target_ifc_buf
;
3583 assert(arg_type
[0] == TYPE_PTR
);
3584 assert(ie
->access
== IOC_RW
);
3587 target_size
= thunk_type_size(arg_type
, 0);
3589 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3591 return -TARGET_EFAULT
;
3592 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3593 unlock_user(argptr
, arg
, 0);
3595 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3596 target_ifc_len
= host_ifconf
->ifc_len
;
3597 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3599 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3600 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3601 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3603 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3604 if (outbufsz
> MAX_STRUCT_SIZE
) {
3605 /* We can't fit all the extents into the fixed size buffer.
3606 * Allocate one that is large enough and use it instead.
3608 host_ifconf
= malloc(outbufsz
);
3610 return -TARGET_ENOMEM
;
3612 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3615 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3617 host_ifconf
->ifc_len
= host_ifc_len
;
3618 host_ifconf
->ifc_buf
= host_ifc_buf
;
3620 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3621 if (!is_error(ret
)) {
3622 /* convert host ifc_len to target ifc_len */
3624 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3625 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3626 host_ifconf
->ifc_len
= target_ifc_len
;
3628 /* restore target ifc_buf */
3630 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3632 /* copy struct ifconf to target user */
3634 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3636 return -TARGET_EFAULT
;
3637 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3638 unlock_user(argptr
, arg
, target_size
);
3640 /* copy ifreq[] to target user */
3642 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3643 for (i
= 0; i
< nb_ifreq
; i
++) {
3644 thunk_convert(argptr
+ i
* target_ifreq_size
,
3645 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3646 ifreq_arg_type
, THUNK_TARGET
);
3648 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3658 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3659 int cmd
, abi_long arg
)
3662 struct dm_ioctl
*host_dm
;
3663 abi_long guest_data
;
3664 uint32_t guest_data_size
;
3666 const argtype
*arg_type
= ie
->arg_type
;
3668 void *big_buf
= NULL
;
3672 target_size
= thunk_type_size(arg_type
, 0);
3673 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3675 ret
= -TARGET_EFAULT
;
3678 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3679 unlock_user(argptr
, arg
, 0);
3681 /* buf_temp is too small, so fetch things into a bigger buffer */
3682 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3683 memcpy(big_buf
, buf_temp
, target_size
);
3687 guest_data
= arg
+ host_dm
->data_start
;
3688 if ((guest_data
- arg
) < 0) {
3692 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3693 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3695 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3696 switch (ie
->host_cmd
) {
3698 case DM_LIST_DEVICES
:
3701 case DM_DEV_SUSPEND
:
3704 case DM_TABLE_STATUS
:
3705 case DM_TABLE_CLEAR
:
3707 case DM_LIST_VERSIONS
:
3711 case DM_DEV_SET_GEOMETRY
:
3712 /* data contains only strings */
3713 memcpy(host_data
, argptr
, guest_data_size
);
3716 memcpy(host_data
, argptr
, guest_data_size
);
3717 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3721 void *gspec
= argptr
;
3722 void *cur_data
= host_data
;
3723 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3724 int spec_size
= thunk_type_size(arg_type
, 0);
3727 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3728 struct dm_target_spec
*spec
= cur_data
;
3732 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3733 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3735 spec
->next
= sizeof(*spec
) + slen
;
3736 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3738 cur_data
+= spec
->next
;
3743 ret
= -TARGET_EINVAL
;
3744 unlock_user(argptr
, guest_data
, 0);
3747 unlock_user(argptr
, guest_data
, 0);
3749 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3750 if (!is_error(ret
)) {
3751 guest_data
= arg
+ host_dm
->data_start
;
3752 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3753 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3754 switch (ie
->host_cmd
) {
3759 case DM_DEV_SUSPEND
:
3762 case DM_TABLE_CLEAR
:
3764 case DM_DEV_SET_GEOMETRY
:
3765 /* no return data */
3767 case DM_LIST_DEVICES
:
3769 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3770 uint32_t remaining_data
= guest_data_size
;
3771 void *cur_data
= argptr
;
3772 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3773 int nl_size
= 12; /* can't use thunk_size due to alignment */
3776 uint32_t next
= nl
->next
;
3778 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3780 if (remaining_data
< nl
->next
) {
3781 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3784 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3785 strcpy(cur_data
+ nl_size
, nl
->name
);
3786 cur_data
+= nl
->next
;
3787 remaining_data
-= nl
->next
;
3791 nl
= (void*)nl
+ next
;
3796 case DM_TABLE_STATUS
:
3798 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3799 void *cur_data
= argptr
;
3800 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3801 int spec_size
= thunk_type_size(arg_type
, 0);
3804 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3805 uint32_t next
= spec
->next
;
3806 int slen
= strlen((char*)&spec
[1]) + 1;
3807 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3808 if (guest_data_size
< spec
->next
) {
3809 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3812 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3813 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3814 cur_data
= argptr
+ spec
->next
;
3815 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3821 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3822 int count
= *(uint32_t*)hdata
;
3823 uint64_t *hdev
= hdata
+ 8;
3824 uint64_t *gdev
= argptr
+ 8;
3827 *(uint32_t*)argptr
= tswap32(count
);
3828 for (i
= 0; i
< count
; i
++) {
3829 *gdev
= tswap64(*hdev
);
3835 case DM_LIST_VERSIONS
:
3837 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3838 uint32_t remaining_data
= guest_data_size
;
3839 void *cur_data
= argptr
;
3840 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3841 int vers_size
= thunk_type_size(arg_type
, 0);
3844 uint32_t next
= vers
->next
;
3846 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3848 if (remaining_data
< vers
->next
) {
3849 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3852 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3853 strcpy(cur_data
+ vers_size
, vers
->name
);
3854 cur_data
+= vers
->next
;
3855 remaining_data
-= vers
->next
;
3859 vers
= (void*)vers
+ next
;
3864 unlock_user(argptr
, guest_data
, 0);
3865 ret
= -TARGET_EINVAL
;
3868 unlock_user(argptr
, guest_data
, guest_data_size
);
3870 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3872 ret
= -TARGET_EFAULT
;
3875 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3876 unlock_user(argptr
, arg
, target_size
);
3883 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3884 int cmd
, abi_long arg
)
3888 const argtype
*arg_type
= ie
->arg_type
;
3889 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
3892 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
3893 struct blkpg_partition host_part
;
3895 /* Read and convert blkpg */
3897 target_size
= thunk_type_size(arg_type
, 0);
3898 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3900 ret
= -TARGET_EFAULT
;
3903 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3904 unlock_user(argptr
, arg
, 0);
3906 switch (host_blkpg
->op
) {
3907 case BLKPG_ADD_PARTITION
:
3908 case BLKPG_DEL_PARTITION
:
3909 /* payload is struct blkpg_partition */
3912 /* Unknown opcode */
3913 ret
= -TARGET_EINVAL
;
3917 /* Read and convert blkpg->data */
3918 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
3919 target_size
= thunk_type_size(part_arg_type
, 0);
3920 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3922 ret
= -TARGET_EFAULT
;
3925 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
3926 unlock_user(argptr
, arg
, 0);
3928 /* Swizzle the data pointer to our local copy and call! */
3929 host_blkpg
->data
= &host_part
;
3930 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_blkpg
));
3936 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3937 int fd
, int cmd
, abi_long arg
)
3939 const argtype
*arg_type
= ie
->arg_type
;
3940 const StructEntry
*se
;
3941 const argtype
*field_types
;
3942 const int *dst_offsets
, *src_offsets
;
3945 abi_ulong
*target_rt_dev_ptr
;
3946 unsigned long *host_rt_dev_ptr
;
3950 assert(ie
->access
== IOC_W
);
3951 assert(*arg_type
== TYPE_PTR
);
3953 assert(*arg_type
== TYPE_STRUCT
);
3954 target_size
= thunk_type_size(arg_type
, 0);
3955 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3957 return -TARGET_EFAULT
;
3960 assert(*arg_type
== (int)STRUCT_rtentry
);
3961 se
= struct_entries
+ *arg_type
++;
3962 assert(se
->convert
[0] == NULL
);
3963 /* convert struct here to be able to catch rt_dev string */
3964 field_types
= se
->field_types
;
3965 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
3966 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
3967 for (i
= 0; i
< se
->nb_fields
; i
++) {
3968 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
3969 assert(*field_types
== TYPE_PTRVOID
);
3970 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
3971 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
3972 if (*target_rt_dev_ptr
!= 0) {
3973 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
3974 tswapal(*target_rt_dev_ptr
));
3975 if (!*host_rt_dev_ptr
) {
3976 unlock_user(argptr
, arg
, 0);
3977 return -TARGET_EFAULT
;
3980 *host_rt_dev_ptr
= 0;
3985 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
3986 argptr
+ src_offsets
[i
],
3987 field_types
, THUNK_HOST
);
3989 unlock_user(argptr
, arg
, 0);
3991 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3992 if (*host_rt_dev_ptr
!= 0) {
3993 unlock_user((void *)*host_rt_dev_ptr
,
3994 *target_rt_dev_ptr
, 0);
3999 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4000 int fd
, int cmd
, abi_long arg
)
4002 int sig
= target_to_host_signal(arg
);
4003 return get_errno(ioctl(fd
, ie
->host_cmd
, sig
));
4006 static IOCTLEntry ioctl_entries
[] = {
4007 #define IOCTL(cmd, access, ...) \
4008 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4009 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4010 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4015 /* ??? Implement proper locking for ioctls. */
4016 /* do_ioctl() Must return target values and target errnos. */
4017 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4019 const IOCTLEntry
*ie
;
4020 const argtype
*arg_type
;
4022 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4028 if (ie
->target_cmd
== 0) {
4029 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4030 return -TARGET_ENOSYS
;
4032 if (ie
->target_cmd
== cmd
)
4036 arg_type
= ie
->arg_type
;
4038 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
4041 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4044 switch(arg_type
[0]) {
4047 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
4051 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
4055 target_size
= thunk_type_size(arg_type
, 0);
4056 switch(ie
->access
) {
4058 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4059 if (!is_error(ret
)) {
4060 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4062 return -TARGET_EFAULT
;
4063 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4064 unlock_user(argptr
, arg
, target_size
);
4068 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4070 return -TARGET_EFAULT
;
4071 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4072 unlock_user(argptr
, arg
, 0);
4073 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4077 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4079 return -TARGET_EFAULT
;
4080 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4081 unlock_user(argptr
, arg
, 0);
4082 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
4083 if (!is_error(ret
)) {
4084 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4086 return -TARGET_EFAULT
;
4087 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4088 unlock_user(argptr
, arg
, target_size
);
4094 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4095 (long)cmd
, arg_type
[0]);
4096 ret
= -TARGET_ENOSYS
;
4102 static const bitmask_transtbl iflag_tbl
[] = {
4103 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
4104 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
4105 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
4106 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
4107 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
4108 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
4109 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
4110 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
4111 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
4112 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
4113 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
4114 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
4115 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
4116 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
4120 static const bitmask_transtbl oflag_tbl
[] = {
4121 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
4122 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
4123 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
4124 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
4125 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
4126 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
4127 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
4128 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
4129 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
4130 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
4131 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
4132 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
4133 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
4134 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
4135 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
4136 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
4137 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
4138 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
4139 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
4140 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
4141 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
4142 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
4143 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
4144 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
4148 static const bitmask_transtbl cflag_tbl
[] = {
4149 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
4150 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
4151 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
4152 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
4153 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
4154 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
4155 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
4156 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
4157 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
4158 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
4159 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
4160 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
4161 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
4162 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
4163 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
4164 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
4165 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
4166 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
4167 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
4168 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
4169 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
4170 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
4171 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
4172 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
4173 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
4174 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
4175 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
4176 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
4177 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
4178 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
4179 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
4183 static const bitmask_transtbl lflag_tbl
[] = {
4184 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
4185 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
4186 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
4187 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
4188 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
4189 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
4190 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
4191 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
4192 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
4193 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
4194 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
4195 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
4196 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
4197 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
4198 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
4202 static void target_to_host_termios (void *dst
, const void *src
)
4204 struct host_termios
*host
= dst
;
4205 const struct target_termios
*target
= src
;
4208 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
4210 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
4212 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
4214 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
4215 host
->c_line
= target
->c_line
;
4217 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
4218 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
4219 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
4220 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
4221 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
4222 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
4223 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
4224 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
4225 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
4226 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
4227 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
4228 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
4229 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
4230 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
4231 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
4232 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
4233 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
4234 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
4237 static void host_to_target_termios (void *dst
, const void *src
)
4239 struct target_termios
*target
= dst
;
4240 const struct host_termios
*host
= src
;
4243 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
4245 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
4247 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
4249 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
4250 target
->c_line
= host
->c_line
;
4252 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
4253 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
4254 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
4255 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
4256 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
4257 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
4258 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
4259 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
4260 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
4261 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
4262 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
4263 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
4264 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
4265 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
4266 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
4267 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
4268 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
4269 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
4272 static const StructEntry struct_termios_def
= {
4273 .convert
= { host_to_target_termios
, target_to_host_termios
},
4274 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
4275 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
4278 static bitmask_transtbl mmap_flags_tbl
[] = {
4279 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
4280 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
4281 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
4282 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
4283 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
4284 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
4285 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
4286 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
4287 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
4292 #if defined(TARGET_I386)
4294 /* NOTE: there is really one LDT for all the threads */
4295 static uint8_t *ldt_table
;
4297 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
4304 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
4305 if (size
> bytecount
)
4307 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
4309 return -TARGET_EFAULT
;
4310 /* ??? Should this by byteswapped? */
4311 memcpy(p
, ldt_table
, size
);
4312 unlock_user(p
, ptr
, size
);
4316 /* XXX: add locking support */
4317 static abi_long
write_ldt(CPUX86State
*env
,
4318 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
4320 struct target_modify_ldt_ldt_s ldt_info
;
4321 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4322 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4323 int seg_not_present
, useable
, lm
;
4324 uint32_t *lp
, entry_1
, entry_2
;
4326 if (bytecount
!= sizeof(ldt_info
))
4327 return -TARGET_EINVAL
;
4328 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
4329 return -TARGET_EFAULT
;
4330 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4331 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4332 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4333 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4334 unlock_user_struct(target_ldt_info
, ptr
, 0);
4336 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
4337 return -TARGET_EINVAL
;
4338 seg_32bit
= ldt_info
.flags
& 1;
4339 contents
= (ldt_info
.flags
>> 1) & 3;
4340 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4341 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4342 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4343 useable
= (ldt_info
.flags
>> 6) & 1;
4347 lm
= (ldt_info
.flags
>> 7) & 1;
4349 if (contents
== 3) {
4351 return -TARGET_EINVAL
;
4352 if (seg_not_present
== 0)
4353 return -TARGET_EINVAL
;
4355 /* allocate the LDT */
4357 env
->ldt
.base
= target_mmap(0,
4358 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
4359 PROT_READ
|PROT_WRITE
,
4360 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4361 if (env
->ldt
.base
== -1)
4362 return -TARGET_ENOMEM
;
4363 memset(g2h(env
->ldt
.base
), 0,
4364 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
4365 env
->ldt
.limit
= 0xffff;
4366 ldt_table
= g2h(env
->ldt
.base
);
4369 /* NOTE: same code as Linux kernel */
4370 /* Allow LDTs to be cleared by the user. */
4371 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4374 read_exec_only
== 1 &&
4376 limit_in_pages
== 0 &&
4377 seg_not_present
== 1 &&
4385 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4386 (ldt_info
.limit
& 0x0ffff);
4387 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4388 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4389 (ldt_info
.limit
& 0xf0000) |
4390 ((read_exec_only
^ 1) << 9) |
4392 ((seg_not_present
^ 1) << 15) |
4394 (limit_in_pages
<< 23) |
4398 entry_2
|= (useable
<< 20);
4400 /* Install the new entry ... */
4402 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
4403 lp
[0] = tswap32(entry_1
);
4404 lp
[1] = tswap32(entry_2
);
4408 /* specific and weird i386 syscalls */
4409 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
4410 unsigned long bytecount
)
4416 ret
= read_ldt(ptr
, bytecount
);
4419 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4422 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4425 ret
= -TARGET_ENOSYS
;
4431 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4432 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4434 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4435 struct target_modify_ldt_ldt_s ldt_info
;
4436 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4437 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4438 int seg_not_present
, useable
, lm
;
4439 uint32_t *lp
, entry_1
, entry_2
;
4442 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4443 if (!target_ldt_info
)
4444 return -TARGET_EFAULT
;
4445 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4446 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4447 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4448 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4449 if (ldt_info
.entry_number
== -1) {
4450 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4451 if (gdt_table
[i
] == 0) {
4452 ldt_info
.entry_number
= i
;
4453 target_ldt_info
->entry_number
= tswap32(i
);
4458 unlock_user_struct(target_ldt_info
, ptr
, 1);
4460 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4461 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4462 return -TARGET_EINVAL
;
4463 seg_32bit
= ldt_info
.flags
& 1;
4464 contents
= (ldt_info
.flags
>> 1) & 3;
4465 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4466 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4467 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4468 useable
= (ldt_info
.flags
>> 6) & 1;
4472 lm
= (ldt_info
.flags
>> 7) & 1;
4475 if (contents
== 3) {
4476 if (seg_not_present
== 0)
4477 return -TARGET_EINVAL
;
4480 /* NOTE: same code as Linux kernel */
4481 /* Allow LDTs to be cleared by the user. */
4482 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4483 if ((contents
== 0 &&
4484 read_exec_only
== 1 &&
4486 limit_in_pages
== 0 &&
4487 seg_not_present
== 1 &&
4495 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4496 (ldt_info
.limit
& 0x0ffff);
4497 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4498 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4499 (ldt_info
.limit
& 0xf0000) |
4500 ((read_exec_only
^ 1) << 9) |
4502 ((seg_not_present
^ 1) << 15) |
4504 (limit_in_pages
<< 23) |
4509 /* Install the new entry ... */
4511 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4512 lp
[0] = tswap32(entry_1
);
4513 lp
[1] = tswap32(entry_2
);
4517 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4519 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4520 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4521 uint32_t base_addr
, limit
, flags
;
4522 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4523 int seg_not_present
, useable
, lm
;
4524 uint32_t *lp
, entry_1
, entry_2
;
4526 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4527 if (!target_ldt_info
)
4528 return -TARGET_EFAULT
;
4529 idx
= tswap32(target_ldt_info
->entry_number
);
4530 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4531 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4532 unlock_user_struct(target_ldt_info
, ptr
, 1);
4533 return -TARGET_EINVAL
;
4535 lp
= (uint32_t *)(gdt_table
+ idx
);
4536 entry_1
= tswap32(lp
[0]);
4537 entry_2
= tswap32(lp
[1]);
4539 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4540 contents
= (entry_2
>> 10) & 3;
4541 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4542 seg_32bit
= (entry_2
>> 22) & 1;
4543 limit_in_pages
= (entry_2
>> 23) & 1;
4544 useable
= (entry_2
>> 20) & 1;
4548 lm
= (entry_2
>> 21) & 1;
4550 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4551 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4552 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4553 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4554 base_addr
= (entry_1
>> 16) |
4555 (entry_2
& 0xff000000) |
4556 ((entry_2
& 0xff) << 16);
4557 target_ldt_info
->base_addr
= tswapal(base_addr
);
4558 target_ldt_info
->limit
= tswap32(limit
);
4559 target_ldt_info
->flags
= tswap32(flags
);
4560 unlock_user_struct(target_ldt_info
, ptr
, 1);
4563 #endif /* TARGET_I386 && TARGET_ABI32 */
4565 #ifndef TARGET_ABI32
4566 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4573 case TARGET_ARCH_SET_GS
:
4574 case TARGET_ARCH_SET_FS
:
4575 if (code
== TARGET_ARCH_SET_GS
)
4579 cpu_x86_load_seg(env
, idx
, 0);
4580 env
->segs
[idx
].base
= addr
;
4582 case TARGET_ARCH_GET_GS
:
4583 case TARGET_ARCH_GET_FS
:
4584 if (code
== TARGET_ARCH_GET_GS
)
4588 val
= env
->segs
[idx
].base
;
4589 if (put_user(val
, addr
, abi_ulong
))
4590 ret
= -TARGET_EFAULT
;
4593 ret
= -TARGET_EINVAL
;
4600 #endif /* defined(TARGET_I386) */
4602 #define NEW_STACK_SIZE 0x40000
4605 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4608 pthread_mutex_t mutex
;
4609 pthread_cond_t cond
;
4612 abi_ulong child_tidptr
;
4613 abi_ulong parent_tidptr
;
4617 static void *clone_func(void *arg
)
4619 new_thread_info
*info
= arg
;
4624 rcu_register_thread();
4626 cpu
= ENV_GET_CPU(env
);
4628 ts
= (TaskState
*)cpu
->opaque
;
4629 info
->tid
= gettid();
4630 cpu
->host_tid
= info
->tid
;
4632 if (info
->child_tidptr
)
4633 put_user_u32(info
->tid
, info
->child_tidptr
);
4634 if (info
->parent_tidptr
)
4635 put_user_u32(info
->tid
, info
->parent_tidptr
);
4636 /* Enable signals. */
4637 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4638 /* Signal to the parent that we're ready. */
4639 pthread_mutex_lock(&info
->mutex
);
4640 pthread_cond_broadcast(&info
->cond
);
4641 pthread_mutex_unlock(&info
->mutex
);
4642 /* Wait until the parent has finshed initializing the tls state. */
4643 pthread_mutex_lock(&clone_lock
);
4644 pthread_mutex_unlock(&clone_lock
);
4650 /* do_fork() Must return host values and target errnos (unlike most
4651 do_*() functions). */
4652 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4653 abi_ulong parent_tidptr
, target_ulong newtls
,
4654 abi_ulong child_tidptr
)
4656 CPUState
*cpu
= ENV_GET_CPU(env
);
4660 CPUArchState
*new_env
;
4661 unsigned int nptl_flags
;
4664 /* Emulate vfork() with fork() */
4665 if (flags
& CLONE_VFORK
)
4666 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4668 if (flags
& CLONE_VM
) {
4669 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
4670 new_thread_info info
;
4671 pthread_attr_t attr
;
4673 ts
= g_new0(TaskState
, 1);
4674 init_task_state(ts
);
4675 /* we create a new CPU instance. */
4676 new_env
= cpu_copy(env
);
4677 /* Init regs that differ from the parent. */
4678 cpu_clone_regs(new_env
, newsp
);
4679 new_cpu
= ENV_GET_CPU(new_env
);
4680 new_cpu
->opaque
= ts
;
4681 ts
->bprm
= parent_ts
->bprm
;
4682 ts
->info
= parent_ts
->info
;
4684 flags
&= ~CLONE_NPTL_FLAGS2
;
4686 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4687 ts
->child_tidptr
= child_tidptr
;
4690 if (nptl_flags
& CLONE_SETTLS
)
4691 cpu_set_tls (new_env
, newtls
);
4693 /* Grab a mutex so that thread setup appears atomic. */
4694 pthread_mutex_lock(&clone_lock
);
4696 memset(&info
, 0, sizeof(info
));
4697 pthread_mutex_init(&info
.mutex
, NULL
);
4698 pthread_mutex_lock(&info
.mutex
);
4699 pthread_cond_init(&info
.cond
, NULL
);
4701 if (nptl_flags
& CLONE_CHILD_SETTID
)
4702 info
.child_tidptr
= child_tidptr
;
4703 if (nptl_flags
& CLONE_PARENT_SETTID
)
4704 info
.parent_tidptr
= parent_tidptr
;
4706 ret
= pthread_attr_init(&attr
);
4707 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4708 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4709 /* It is not safe to deliver signals until the child has finished
4710 initializing, so temporarily block all signals. */
4711 sigfillset(&sigmask
);
4712 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4714 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4715 /* TODO: Free new CPU state if thread creation failed. */
4717 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4718 pthread_attr_destroy(&attr
);
4720 /* Wait for the child to initialize. */
4721 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4723 if (flags
& CLONE_PARENT_SETTID
)
4724 put_user_u32(ret
, parent_tidptr
);
4728 pthread_mutex_unlock(&info
.mutex
);
4729 pthread_cond_destroy(&info
.cond
);
4730 pthread_mutex_destroy(&info
.mutex
);
4731 pthread_mutex_unlock(&clone_lock
);
4733 /* if no CLONE_VM, we consider it is a fork */
4734 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0) {
4735 return -TARGET_EINVAL
;
4740 /* Child Process. */
4742 cpu_clone_regs(env
, newsp
);
4744 /* There is a race condition here. The parent process could
4745 theoretically read the TID in the child process before the child
4746 tid is set. This would require using either ptrace
4747 (not implemented) or having *_tidptr to point at a shared memory
4748 mapping. We can't repeat the spinlock hack used above because
4749 the child process gets its own copy of the lock. */
4750 if (flags
& CLONE_CHILD_SETTID
)
4751 put_user_u32(gettid(), child_tidptr
);
4752 if (flags
& CLONE_PARENT_SETTID
)
4753 put_user_u32(gettid(), parent_tidptr
);
4754 ts
= (TaskState
*)cpu
->opaque
;
4755 if (flags
& CLONE_SETTLS
)
4756 cpu_set_tls (env
, newtls
);
4757 if (flags
& CLONE_CHILD_CLEARTID
)
4758 ts
->child_tidptr
= child_tidptr
;
4766 /* warning : doesn't handle linux specific flags... */
4767 static int target_to_host_fcntl_cmd(int cmd
)
4770 case TARGET_F_DUPFD
:
4771 case TARGET_F_GETFD
:
4772 case TARGET_F_SETFD
:
4773 case TARGET_F_GETFL
:
4774 case TARGET_F_SETFL
:
4776 case TARGET_F_GETLK
:
4778 case TARGET_F_SETLK
:
4780 case TARGET_F_SETLKW
:
4782 case TARGET_F_GETOWN
:
4784 case TARGET_F_SETOWN
:
4786 case TARGET_F_GETSIG
:
4788 case TARGET_F_SETSIG
:
4790 #if TARGET_ABI_BITS == 32
4791 case TARGET_F_GETLK64
:
4793 case TARGET_F_SETLK64
:
4795 case TARGET_F_SETLKW64
:
4798 case TARGET_F_SETLEASE
:
4800 case TARGET_F_GETLEASE
:
4802 #ifdef F_DUPFD_CLOEXEC
4803 case TARGET_F_DUPFD_CLOEXEC
:
4804 return F_DUPFD_CLOEXEC
;
4806 case TARGET_F_NOTIFY
:
4809 case TARGET_F_GETOWN_EX
:
4813 case TARGET_F_SETOWN_EX
:
4817 return -TARGET_EINVAL
;
4819 return -TARGET_EINVAL
;
4822 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4823 static const bitmask_transtbl flock_tbl
[] = {
4824 TRANSTBL_CONVERT(F_RDLCK
),
4825 TRANSTBL_CONVERT(F_WRLCK
),
4826 TRANSTBL_CONVERT(F_UNLCK
),
4827 TRANSTBL_CONVERT(F_EXLCK
),
4828 TRANSTBL_CONVERT(F_SHLCK
),
4832 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4835 struct target_flock
*target_fl
;
4836 struct flock64 fl64
;
4837 struct target_flock64
*target_fl64
;
4839 struct f_owner_ex fox
;
4840 struct target_f_owner_ex
*target_fox
;
4843 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4845 if (host_cmd
== -TARGET_EINVAL
)
4849 case TARGET_F_GETLK
:
4850 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4851 return -TARGET_EFAULT
;
4853 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4854 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4855 fl
.l_start
= tswapal(target_fl
->l_start
);
4856 fl
.l_len
= tswapal(target_fl
->l_len
);
4857 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4858 unlock_user_struct(target_fl
, arg
, 0);
4859 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4861 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4862 return -TARGET_EFAULT
;
4864 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
4865 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4866 target_fl
->l_start
= tswapal(fl
.l_start
);
4867 target_fl
->l_len
= tswapal(fl
.l_len
);
4868 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4869 unlock_user_struct(target_fl
, arg
, 1);
4873 case TARGET_F_SETLK
:
4874 case TARGET_F_SETLKW
:
4875 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4876 return -TARGET_EFAULT
;
4878 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4879 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4880 fl
.l_start
= tswapal(target_fl
->l_start
);
4881 fl
.l_len
= tswapal(target_fl
->l_len
);
4882 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4883 unlock_user_struct(target_fl
, arg
, 0);
4884 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4887 case TARGET_F_GETLK64
:
4888 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4889 return -TARGET_EFAULT
;
4891 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4892 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4893 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4894 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4895 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4896 unlock_user_struct(target_fl64
, arg
, 0);
4897 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4899 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4900 return -TARGET_EFAULT
;
4901 target_fl64
->l_type
=
4902 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
4903 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4904 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4905 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4906 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4907 unlock_user_struct(target_fl64
, arg
, 1);
4910 case TARGET_F_SETLK64
:
4911 case TARGET_F_SETLKW64
:
4912 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4913 return -TARGET_EFAULT
;
4915 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4916 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4917 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4918 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4919 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4920 unlock_user_struct(target_fl64
, arg
, 0);
4921 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4924 case TARGET_F_GETFL
:
4925 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4927 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4931 case TARGET_F_SETFL
:
4932 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4936 case TARGET_F_GETOWN_EX
:
4937 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
4939 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
4940 return -TARGET_EFAULT
;
4941 target_fox
->type
= tswap32(fox
.type
);
4942 target_fox
->pid
= tswap32(fox
.pid
);
4943 unlock_user_struct(target_fox
, arg
, 1);
4949 case TARGET_F_SETOWN_EX
:
4950 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
4951 return -TARGET_EFAULT
;
4952 fox
.type
= tswap32(target_fox
->type
);
4953 fox
.pid
= tswap32(target_fox
->pid
);
4954 unlock_user_struct(target_fox
, arg
, 0);
4955 ret
= get_errno(fcntl(fd
, host_cmd
, &fox
));
4959 case TARGET_F_SETOWN
:
4960 case TARGET_F_GETOWN
:
4961 case TARGET_F_SETSIG
:
4962 case TARGET_F_GETSIG
:
4963 case TARGET_F_SETLEASE
:
4964 case TARGET_F_GETLEASE
:
4965 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4969 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4977 static inline int high2lowuid(int uid
)
4985 static inline int high2lowgid(int gid
)
4993 static inline int low2highuid(int uid
)
4995 if ((int16_t)uid
== -1)
5001 static inline int low2highgid(int gid
)
5003 if ((int16_t)gid
== -1)
5008 static inline int tswapid(int id
)
5013 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5015 #else /* !USE_UID16 */
5016 static inline int high2lowuid(int uid
)
5020 static inline int high2lowgid(int gid
)
5024 static inline int low2highuid(int uid
)
5028 static inline int low2highgid(int gid
)
5032 static inline int tswapid(int id
)
5037 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5039 #endif /* USE_UID16 */
5041 void syscall_init(void)
5044 const argtype
*arg_type
;
5048 thunk_init(STRUCT_MAX
);
5050 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5051 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5052 #include "syscall_types.h"
5054 #undef STRUCT_SPECIAL
5056 /* Build target_to_host_errno_table[] table from
5057 * host_to_target_errno_table[]. */
5058 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
5059 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
5062 /* we patch the ioctl size if necessary. We rely on the fact that
5063 no ioctl has all the bits at '1' in the size field */
5065 while (ie
->target_cmd
!= 0) {
5066 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
5067 TARGET_IOC_SIZEMASK
) {
5068 arg_type
= ie
->arg_type
;
5069 if (arg_type
[0] != TYPE_PTR
) {
5070 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
5075 size
= thunk_type_size(arg_type
, 0);
5076 ie
->target_cmd
= (ie
->target_cmd
&
5077 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
5078 (size
<< TARGET_IOC_SIZESHIFT
);
5081 /* automatic consistency check if same arch */
5082 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5083 (defined(__x86_64__) && defined(TARGET_X86_64))
5084 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
5085 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5086 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
5093 #if TARGET_ABI_BITS == 32
5094 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
5096 #ifdef TARGET_WORDS_BIGENDIAN
5097 return ((uint64_t)word0
<< 32) | word1
;
5099 return ((uint64_t)word1
<< 32) | word0
;
5102 #else /* TARGET_ABI_BITS == 32 */
5103 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
5107 #endif /* TARGET_ABI_BITS != 32 */
5109 #ifdef TARGET_NR_truncate64
5110 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
5115 if (regpairs_aligned(cpu_env
)) {
5119 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
5123 #ifdef TARGET_NR_ftruncate64
5124 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
5129 if (regpairs_aligned(cpu_env
)) {
5133 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
5137 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
5138 abi_ulong target_addr
)
5140 struct target_timespec
*target_ts
;
5142 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
5143 return -TARGET_EFAULT
;
5144 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
5145 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
5146 unlock_user_struct(target_ts
, target_addr
, 0);
5150 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
5151 struct timespec
*host_ts
)
5153 struct target_timespec
*target_ts
;
5155 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
5156 return -TARGET_EFAULT
;
5157 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
5158 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
5159 unlock_user_struct(target_ts
, target_addr
, 1);
5163 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
5164 abi_ulong target_addr
)
5166 struct target_itimerspec
*target_itspec
;
5168 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
5169 return -TARGET_EFAULT
;
5172 host_itspec
->it_interval
.tv_sec
=
5173 tswapal(target_itspec
->it_interval
.tv_sec
);
5174 host_itspec
->it_interval
.tv_nsec
=
5175 tswapal(target_itspec
->it_interval
.tv_nsec
);
5176 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
5177 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
5179 unlock_user_struct(target_itspec
, target_addr
, 1);
5183 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
5184 struct itimerspec
*host_its
)
5186 struct target_itimerspec
*target_itspec
;
5188 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
5189 return -TARGET_EFAULT
;
5192 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
5193 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
5195 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
5196 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
5198 unlock_user_struct(target_itspec
, target_addr
, 0);
5202 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
5203 abi_ulong target_addr
)
5205 struct target_sigevent
*target_sevp
;
5207 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
5208 return -TARGET_EFAULT
;
5211 /* This union is awkward on 64 bit systems because it has a 32 bit
5212 * integer and a pointer in it; we follow the conversion approach
5213 * used for handling sigval types in signal.c so the guest should get
5214 * the correct value back even if we did a 64 bit byteswap and it's
5215 * using the 32 bit integer.
5217 host_sevp
->sigev_value
.sival_ptr
=
5218 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
5219 host_sevp
->sigev_signo
=
5220 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
5221 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
5222 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
5224 unlock_user_struct(target_sevp
, target_addr
, 1);
5228 #if defined(TARGET_NR_mlockall)
5229 static inline int target_to_host_mlockall_arg(int arg
)
5233 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
5234 result
|= MCL_CURRENT
;
5236 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
5237 result
|= MCL_FUTURE
;
5243 static inline abi_long
host_to_target_stat64(void *cpu_env
,
5244 abi_ulong target_addr
,
5245 struct stat
*host_st
)
5247 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5248 if (((CPUARMState
*)cpu_env
)->eabi
) {
5249 struct target_eabi_stat64
*target_st
;
5251 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
5252 return -TARGET_EFAULT
;
5253 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
5254 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
5255 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
5256 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5257 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
5259 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
5260 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
5261 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
5262 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
5263 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
5264 __put_user(host_st
->st_size
, &target_st
->st_size
);
5265 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
5266 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
5267 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
5268 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
5269 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
5270 unlock_user_struct(target_st
, target_addr
, 1);
5274 #if defined(TARGET_HAS_STRUCT_STAT64)
5275 struct target_stat64
*target_st
;
5277 struct target_stat
*target_st
;
5280 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
5281 return -TARGET_EFAULT
;
5282 memset(target_st
, 0, sizeof(*target_st
));
5283 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
5284 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
5285 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5286 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
5288 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
5289 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
5290 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
5291 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
5292 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
5293 /* XXX: better use of kernel struct */
5294 __put_user(host_st
->st_size
, &target_st
->st_size
);
5295 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
5296 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
5297 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
5298 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
5299 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
5300 unlock_user_struct(target_st
, target_addr
, 1);
5306 /* ??? Using host futex calls even when target atomic operations
5307 are not really atomic probably breaks things. However implementing
5308 futexes locally would make futexes shared between multiple processes
5309 tricky. However they're probably useless because guest atomic
5310 operations won't work either. */
5311 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
5312 target_ulong uaddr2
, int val3
)
5314 struct timespec ts
, *pts
;
5317 /* ??? We assume FUTEX_* constants are the same on both host
5319 #ifdef FUTEX_CMD_MASK
5320 base_op
= op
& FUTEX_CMD_MASK
;
5326 case FUTEX_WAIT_BITSET
:
5329 target_to_host_timespec(pts
, timeout
);
5333 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
5336 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5338 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
5340 case FUTEX_CMP_REQUEUE
:
5342 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5343 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5344 But the prototype takes a `struct timespec *'; insert casts
5345 to satisfy the compiler. We do not need to tswap TIMEOUT
5346 since it's not compared to guest memory. */
5347 pts
= (struct timespec
*)(uintptr_t) timeout
;
5348 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
5350 (base_op
== FUTEX_CMP_REQUEUE
5354 return -TARGET_ENOSYS
;
5357 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5358 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
5359 abi_long handle
, abi_long mount_id
,
5362 struct file_handle
*target_fh
;
5363 struct file_handle
*fh
;
5367 unsigned int size
, total_size
;
5369 if (get_user_s32(size
, handle
)) {
5370 return -TARGET_EFAULT
;
5373 name
= lock_user_string(pathname
);
5375 return -TARGET_EFAULT
;
5378 total_size
= sizeof(struct file_handle
) + size
;
5379 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
5381 unlock_user(name
, pathname
, 0);
5382 return -TARGET_EFAULT
;
5385 fh
= g_malloc0(total_size
);
5386 fh
->handle_bytes
= size
;
5388 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
5389 unlock_user(name
, pathname
, 0);
5391 /* man name_to_handle_at(2):
5392 * Other than the use of the handle_bytes field, the caller should treat
5393 * the file_handle structure as an opaque data type
5396 memcpy(target_fh
, fh
, total_size
);
5397 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
5398 target_fh
->handle_type
= tswap32(fh
->handle_type
);
5400 unlock_user(target_fh
, handle
, total_size
);
5402 if (put_user_s32(mid
, mount_id
)) {
5403 return -TARGET_EFAULT
;
5411 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5412 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
5415 struct file_handle
*target_fh
;
5416 struct file_handle
*fh
;
5417 unsigned int size
, total_size
;
5420 if (get_user_s32(size
, handle
)) {
5421 return -TARGET_EFAULT
;
5424 total_size
= sizeof(struct file_handle
) + size
;
5425 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
5427 return -TARGET_EFAULT
;
5430 fh
= g_memdup(target_fh
, total_size
);
5431 fh
->handle_bytes
= size
;
5432 fh
->handle_type
= tswap32(target_fh
->handle_type
);
5434 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
5435 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
5439 unlock_user(target_fh
, handle
, total_size
);
5445 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
5447 /* signalfd siginfo conversion */
5450 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
5451 const struct signalfd_siginfo
*info
)
5453 int sig
= host_to_target_signal(info
->ssi_signo
);
5455 /* linux/signalfd.h defines a ssi_addr_lsb
5456 * not defined in sys/signalfd.h but used by some kernels
5459 #ifdef BUS_MCEERR_AO
5460 if (tinfo
->ssi_signo
== SIGBUS
&&
5461 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
5462 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
5463 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
5464 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
5465 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
5469 tinfo
->ssi_signo
= tswap32(sig
);
5470 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
5471 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
5472 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
5473 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
5474 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
5475 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
5476 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
5477 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
5478 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
5479 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
5480 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
5481 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
5482 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
5483 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
5484 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
5487 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
5491 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
5492 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
5498 static TargetFdTrans target_signalfd_trans
= {
5499 .host_to_target_data
= host_to_target_data_signalfd
,
5502 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
5505 target_sigset_t
*target_mask
;
5509 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
5510 return -TARGET_EINVAL
;
5512 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
5513 return -TARGET_EFAULT
;
5516 target_to_host_sigset(&host_mask
, target_mask
);
5518 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
5520 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
5522 fd_trans_register(ret
, &target_signalfd_trans
);
5525 unlock_user_struct(target_mask
, mask
, 0);
5531 /* Map host to target signal numbers for the wait family of syscalls.
5532 Assume all other status bits are the same. */
5533 int host_to_target_waitstatus(int status
)
5535 if (WIFSIGNALED(status
)) {
5536 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
5538 if (WIFSTOPPED(status
)) {
5539 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
5545 static int open_self_cmdline(void *cpu_env
, int fd
)
5548 bool word_skipped
= false;
5550 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
5560 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
5562 fd_orig
= close(fd_orig
);
5564 } else if (nb_read
== 0) {
5568 if (!word_skipped
) {
5569 /* Skip the first string, which is the path to qemu-*-static
5570 instead of the actual command. */
5571 cp_buf
= memchr(buf
, 0, sizeof(buf
));
5573 /* Null byte found, skip one string */
5575 nb_read
-= cp_buf
- buf
;
5576 word_skipped
= true;
5581 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
5588 return close(fd_orig
);
5591 static int open_self_maps(void *cpu_env
, int fd
)
5593 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5594 TaskState
*ts
= cpu
->opaque
;
5600 fp
= fopen("/proc/self/maps", "r");
5605 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5606 int fields
, dev_maj
, dev_min
, inode
;
5607 uint64_t min
, max
, offset
;
5608 char flag_r
, flag_w
, flag_x
, flag_p
;
5609 char path
[512] = "";
5610 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
5611 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
5612 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
5614 if ((fields
< 10) || (fields
> 11)) {
5617 if (h2g_valid(min
)) {
5618 int flags
= page_get_flags(h2g(min
));
5619 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
5620 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
5623 if (h2g(min
) == ts
->info
->stack_limit
) {
5624 pstrcpy(path
, sizeof(path
), " [stack]");
5626 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
5627 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
5628 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
5629 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
5630 path
[0] ? " " : "", path
);
5640 static int open_self_stat(void *cpu_env
, int fd
)
5642 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5643 TaskState
*ts
= cpu
->opaque
;
5644 abi_ulong start_stack
= ts
->info
->start_stack
;
5647 for (i
= 0; i
< 44; i
++) {
5655 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5656 } else if (i
== 1) {
5658 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
5659 } else if (i
== 27) {
5662 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5664 /* for the rest, there is MasterCard */
5665 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
5669 if (write(fd
, buf
, len
) != len
) {
5677 static int open_self_auxv(void *cpu_env
, int fd
)
5679 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
5680 TaskState
*ts
= cpu
->opaque
;
5681 abi_ulong auxv
= ts
->info
->saved_auxv
;
5682 abi_ulong len
= ts
->info
->auxv_len
;
5686 * Auxiliary vector is stored in target process stack.
5687 * read in whole auxv vector and copy it to file
5689 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
5693 r
= write(fd
, ptr
, len
);
5700 lseek(fd
, 0, SEEK_SET
);
5701 unlock_user(ptr
, auxv
, len
);
5707 static int is_proc_myself(const char *filename
, const char *entry
)
5709 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
5710 filename
+= strlen("/proc/");
5711 if (!strncmp(filename
, "self/", strlen("self/"))) {
5712 filename
+= strlen("self/");
5713 } else if (*filename
>= '1' && *filename
<= '9') {
5715 snprintf(myself
, sizeof(myself
), "%d/", getpid());
5716 if (!strncmp(filename
, myself
, strlen(myself
))) {
5717 filename
+= strlen(myself
);
5724 if (!strcmp(filename
, entry
)) {
5731 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5732 static int is_proc(const char *filename
, const char *entry
)
5734 return strcmp(filename
, entry
) == 0;
5737 static int open_net_route(void *cpu_env
, int fd
)
5744 fp
= fopen("/proc/net/route", "r");
5751 read
= getline(&line
, &len
, fp
);
5752 dprintf(fd
, "%s", line
);
5756 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5758 uint32_t dest
, gw
, mask
;
5759 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
5760 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5761 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
5762 &mask
, &mtu
, &window
, &irtt
);
5763 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5764 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
5765 metric
, tswap32(mask
), mtu
, window
, irtt
);
5775 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
5778 const char *filename
;
5779 int (*fill
)(void *cpu_env
, int fd
);
5780 int (*cmp
)(const char *s1
, const char *s2
);
5782 const struct fake_open
*fake_open
;
5783 static const struct fake_open fakes
[] = {
5784 { "maps", open_self_maps
, is_proc_myself
},
5785 { "stat", open_self_stat
, is_proc_myself
},
5786 { "auxv", open_self_auxv
, is_proc_myself
},
5787 { "cmdline", open_self_cmdline
, is_proc_myself
},
5788 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5789 { "/proc/net/route", open_net_route
, is_proc
},
5791 { NULL
, NULL
, NULL
}
5794 if (is_proc_myself(pathname
, "exe")) {
5795 int execfd
= qemu_getauxval(AT_EXECFD
);
5796 return execfd
? execfd
: get_errno(sys_openat(dirfd
, exec_path
, flags
, mode
));
5799 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5800 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
5805 if (fake_open
->filename
) {
5807 char filename
[PATH_MAX
];
5810 /* create temporary file to map stat to */
5811 tmpdir
= getenv("TMPDIR");
5814 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5815 fd
= mkstemp(filename
);
5821 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5825 lseek(fd
, 0, SEEK_SET
);
5830 return get_errno(sys_openat(dirfd
, path(pathname
), flags
, mode
));
5833 #define TIMER_MAGIC 0x0caf0000
5834 #define TIMER_MAGIC_MASK 0xffff0000
5836 /* Convert QEMU provided timer ID back to internal 16bit index format */
5837 static target_timer_t
get_timer_id(abi_long arg
)
5839 target_timer_t timerid
= arg
;
5841 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
5842 return -TARGET_EINVAL
;
5847 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
5848 return -TARGET_EINVAL
;
5854 /* do_syscall() should always have a single exit point at the end so
5855 that actions, such as logging of syscall results, can be performed.
5856 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5857 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5858 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5859 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5862 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
5869 gemu_log("syscall %d", num
);
5872 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5875 case TARGET_NR_exit
:
5876 /* In old applications this may be used to implement _exit(2).
5877 However in threaded applictions it is used for thread termination,
5878 and _exit_group is used for application termination.
5879 Do thread termination if we have more then one thread. */
5880 /* FIXME: This probably breaks if a signal arrives. We should probably
5881 be disabling signals. */
5882 if (CPU_NEXT(first_cpu
)) {
5886 /* Remove the CPU from the list. */
5887 QTAILQ_REMOVE(&cpus
, cpu
, node
);
5890 if (ts
->child_tidptr
) {
5891 put_user_u32(0, ts
->child_tidptr
);
5892 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5896 object_unref(OBJECT(cpu
));
5898 rcu_unregister_thread();
5904 gdb_exit(cpu_env
, arg1
);
5906 ret
= 0; /* avoid warning */
5908 case TARGET_NR_read
:
5912 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5914 ret
= get_errno(read(arg1
, p
, arg3
));
5916 fd_trans_host_to_target_data(arg1
)) {
5917 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
5919 unlock_user(p
, arg2
, ret
);
5922 case TARGET_NR_write
:
5923 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5925 ret
= get_errno(write(arg1
, p
, arg3
));
5926 unlock_user(p
, arg2
, 0);
5928 #ifdef TARGET_NR_open
5929 case TARGET_NR_open
:
5930 if (!(p
= lock_user_string(arg1
)))
5932 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
5933 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
5935 fd_trans_unregister(ret
);
5936 unlock_user(p
, arg1
, 0);
5939 case TARGET_NR_openat
:
5940 if (!(p
= lock_user_string(arg2
)))
5942 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
5943 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
5945 fd_trans_unregister(ret
);
5946 unlock_user(p
, arg2
, 0);
5948 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5949 case TARGET_NR_name_to_handle_at
:
5950 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
5953 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5954 case TARGET_NR_open_by_handle_at
:
5955 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
5956 fd_trans_unregister(ret
);
5959 case TARGET_NR_close
:
5960 fd_trans_unregister(arg1
);
5961 ret
= get_errno(close(arg1
));
5966 #ifdef TARGET_NR_fork
5967 case TARGET_NR_fork
:
5968 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
5971 #ifdef TARGET_NR_waitpid
5972 case TARGET_NR_waitpid
:
5975 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
5976 if (!is_error(ret
) && arg2
&& ret
5977 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
5982 #ifdef TARGET_NR_waitid
5983 case TARGET_NR_waitid
:
5987 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
5988 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
5989 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
5991 host_to_target_siginfo(p
, &info
);
5992 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
5997 #ifdef TARGET_NR_creat /* not on alpha */
5998 case TARGET_NR_creat
:
5999 if (!(p
= lock_user_string(arg1
)))
6001 ret
= get_errno(creat(p
, arg2
));
6002 fd_trans_unregister(ret
);
6003 unlock_user(p
, arg1
, 0);
6006 #ifdef TARGET_NR_link
6007 case TARGET_NR_link
:
6010 p
= lock_user_string(arg1
);
6011 p2
= lock_user_string(arg2
);
6013 ret
= -TARGET_EFAULT
;
6015 ret
= get_errno(link(p
, p2
));
6016 unlock_user(p2
, arg2
, 0);
6017 unlock_user(p
, arg1
, 0);
6021 #if defined(TARGET_NR_linkat)
6022 case TARGET_NR_linkat
:
6027 p
= lock_user_string(arg2
);
6028 p2
= lock_user_string(arg4
);
6030 ret
= -TARGET_EFAULT
;
6032 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
6033 unlock_user(p
, arg2
, 0);
6034 unlock_user(p2
, arg4
, 0);
6038 #ifdef TARGET_NR_unlink
6039 case TARGET_NR_unlink
:
6040 if (!(p
= lock_user_string(arg1
)))
6042 ret
= get_errno(unlink(p
));
6043 unlock_user(p
, arg1
, 0);
6046 #if defined(TARGET_NR_unlinkat)
6047 case TARGET_NR_unlinkat
:
6048 if (!(p
= lock_user_string(arg2
)))
6050 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
6051 unlock_user(p
, arg2
, 0);
6054 case TARGET_NR_execve
:
6056 char **argp
, **envp
;
6059 abi_ulong guest_argp
;
6060 abi_ulong guest_envp
;
6067 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
6068 if (get_user_ual(addr
, gp
))
6076 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
6077 if (get_user_ual(addr
, gp
))
6084 argp
= alloca((argc
+ 1) * sizeof(void *));
6085 envp
= alloca((envc
+ 1) * sizeof(void *));
6087 for (gp
= guest_argp
, q
= argp
; gp
;
6088 gp
+= sizeof(abi_ulong
), q
++) {
6089 if (get_user_ual(addr
, gp
))
6093 if (!(*q
= lock_user_string(addr
)))
6095 total_size
+= strlen(*q
) + 1;
6099 for (gp
= guest_envp
, q
= envp
; gp
;
6100 gp
+= sizeof(abi_ulong
), q
++) {
6101 if (get_user_ual(addr
, gp
))
6105 if (!(*q
= lock_user_string(addr
)))
6107 total_size
+= strlen(*q
) + 1;
6111 if (!(p
= lock_user_string(arg1
)))
6113 ret
= get_errno(execve(p
, argp
, envp
));
6114 unlock_user(p
, arg1
, 0);
6119 ret
= -TARGET_EFAULT
;
6122 for (gp
= guest_argp
, q
= argp
; *q
;
6123 gp
+= sizeof(abi_ulong
), q
++) {
6124 if (get_user_ual(addr
, gp
)
6127 unlock_user(*q
, addr
, 0);
6129 for (gp
= guest_envp
, q
= envp
; *q
;
6130 gp
+= sizeof(abi_ulong
), q
++) {
6131 if (get_user_ual(addr
, gp
)
6134 unlock_user(*q
, addr
, 0);
6138 case TARGET_NR_chdir
:
6139 if (!(p
= lock_user_string(arg1
)))
6141 ret
= get_errno(chdir(p
));
6142 unlock_user(p
, arg1
, 0);
6144 #ifdef TARGET_NR_time
6145 case TARGET_NR_time
:
6148 ret
= get_errno(time(&host_time
));
6151 && put_user_sal(host_time
, arg1
))
6156 #ifdef TARGET_NR_mknod
6157 case TARGET_NR_mknod
:
6158 if (!(p
= lock_user_string(arg1
)))
6160 ret
= get_errno(mknod(p
, arg2
, arg3
));
6161 unlock_user(p
, arg1
, 0);
6164 #if defined(TARGET_NR_mknodat)
6165 case TARGET_NR_mknodat
:
6166 if (!(p
= lock_user_string(arg2
)))
6168 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
6169 unlock_user(p
, arg2
, 0);
6172 #ifdef TARGET_NR_chmod
6173 case TARGET_NR_chmod
:
6174 if (!(p
= lock_user_string(arg1
)))
6176 ret
= get_errno(chmod(p
, arg2
));
6177 unlock_user(p
, arg1
, 0);
6180 #ifdef TARGET_NR_break
6181 case TARGET_NR_break
:
6184 #ifdef TARGET_NR_oldstat
6185 case TARGET_NR_oldstat
:
6188 case TARGET_NR_lseek
:
6189 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
6191 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
6192 /* Alpha specific */
6193 case TARGET_NR_getxpid
:
6194 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
6195 ret
= get_errno(getpid());
6198 #ifdef TARGET_NR_getpid
6199 case TARGET_NR_getpid
:
6200 ret
= get_errno(getpid());
6203 case TARGET_NR_mount
:
6205 /* need to look at the data field */
6209 p
= lock_user_string(arg1
);
6217 p2
= lock_user_string(arg2
);
6220 unlock_user(p
, arg1
, 0);
6226 p3
= lock_user_string(arg3
);
6229 unlock_user(p
, arg1
, 0);
6231 unlock_user(p2
, arg2
, 0);
6238 /* FIXME - arg5 should be locked, but it isn't clear how to
6239 * do that since it's not guaranteed to be a NULL-terminated
6243 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
6245 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
6247 ret
= get_errno(ret
);
6250 unlock_user(p
, arg1
, 0);
6252 unlock_user(p2
, arg2
, 0);
6254 unlock_user(p3
, arg3
, 0);
6258 #ifdef TARGET_NR_umount
6259 case TARGET_NR_umount
:
6260 if (!(p
= lock_user_string(arg1
)))
6262 ret
= get_errno(umount(p
));
6263 unlock_user(p
, arg1
, 0);
6266 #ifdef TARGET_NR_stime /* not on alpha */
6267 case TARGET_NR_stime
:
6270 if (get_user_sal(host_time
, arg1
))
6272 ret
= get_errno(stime(&host_time
));
6276 case TARGET_NR_ptrace
:
6278 #ifdef TARGET_NR_alarm /* not on alpha */
6279 case TARGET_NR_alarm
:
6283 #ifdef TARGET_NR_oldfstat
6284 case TARGET_NR_oldfstat
:
6287 #ifdef TARGET_NR_pause /* not on alpha */
6288 case TARGET_NR_pause
:
6289 ret
= get_errno(pause());
6292 #ifdef TARGET_NR_utime
6293 case TARGET_NR_utime
:
6295 struct utimbuf tbuf
, *host_tbuf
;
6296 struct target_utimbuf
*target_tbuf
;
6298 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
6300 tbuf
.actime
= tswapal(target_tbuf
->actime
);
6301 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
6302 unlock_user_struct(target_tbuf
, arg2
, 0);
6307 if (!(p
= lock_user_string(arg1
)))
6309 ret
= get_errno(utime(p
, host_tbuf
));
6310 unlock_user(p
, arg1
, 0);
6314 #ifdef TARGET_NR_utimes
6315 case TARGET_NR_utimes
:
6317 struct timeval
*tvp
, tv
[2];
6319 if (copy_from_user_timeval(&tv
[0], arg2
)
6320 || copy_from_user_timeval(&tv
[1],
6321 arg2
+ sizeof(struct target_timeval
)))
6327 if (!(p
= lock_user_string(arg1
)))
6329 ret
= get_errno(utimes(p
, tvp
));
6330 unlock_user(p
, arg1
, 0);
6334 #if defined(TARGET_NR_futimesat)
6335 case TARGET_NR_futimesat
:
6337 struct timeval
*tvp
, tv
[2];
6339 if (copy_from_user_timeval(&tv
[0], arg3
)
6340 || copy_from_user_timeval(&tv
[1],
6341 arg3
+ sizeof(struct target_timeval
)))
6347 if (!(p
= lock_user_string(arg2
)))
6349 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
6350 unlock_user(p
, arg2
, 0);
6354 #ifdef TARGET_NR_stty
6355 case TARGET_NR_stty
:
6358 #ifdef TARGET_NR_gtty
6359 case TARGET_NR_gtty
:
6362 #ifdef TARGET_NR_access
6363 case TARGET_NR_access
:
6364 if (!(p
= lock_user_string(arg1
)))
6366 ret
= get_errno(access(path(p
), arg2
));
6367 unlock_user(p
, arg1
, 0);
6370 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6371 case TARGET_NR_faccessat
:
6372 if (!(p
= lock_user_string(arg2
)))
6374 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
6375 unlock_user(p
, arg2
, 0);
6378 #ifdef TARGET_NR_nice /* not on alpha */
6379 case TARGET_NR_nice
:
6380 ret
= get_errno(nice(arg1
));
6383 #ifdef TARGET_NR_ftime
6384 case TARGET_NR_ftime
:
6387 case TARGET_NR_sync
:
6391 case TARGET_NR_kill
:
6392 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
6394 #ifdef TARGET_NR_rename
6395 case TARGET_NR_rename
:
6398 p
= lock_user_string(arg1
);
6399 p2
= lock_user_string(arg2
);
6401 ret
= -TARGET_EFAULT
;
6403 ret
= get_errno(rename(p
, p2
));
6404 unlock_user(p2
, arg2
, 0);
6405 unlock_user(p
, arg1
, 0);
6409 #if defined(TARGET_NR_renameat)
6410 case TARGET_NR_renameat
:
6413 p
= lock_user_string(arg2
);
6414 p2
= lock_user_string(arg4
);
6416 ret
= -TARGET_EFAULT
;
6418 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
6419 unlock_user(p2
, arg4
, 0);
6420 unlock_user(p
, arg2
, 0);
6424 #ifdef TARGET_NR_mkdir
6425 case TARGET_NR_mkdir
:
6426 if (!(p
= lock_user_string(arg1
)))
6428 ret
= get_errno(mkdir(p
, arg2
));
6429 unlock_user(p
, arg1
, 0);
6432 #if defined(TARGET_NR_mkdirat)
6433 case TARGET_NR_mkdirat
:
6434 if (!(p
= lock_user_string(arg2
)))
6436 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
6437 unlock_user(p
, arg2
, 0);
6440 #ifdef TARGET_NR_rmdir
6441 case TARGET_NR_rmdir
:
6442 if (!(p
= lock_user_string(arg1
)))
6444 ret
= get_errno(rmdir(p
));
6445 unlock_user(p
, arg1
, 0);
6449 ret
= get_errno(dup(arg1
));
6451 fd_trans_dup(arg1
, ret
);
6454 #ifdef TARGET_NR_pipe
6455 case TARGET_NR_pipe
:
6456 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
6459 #ifdef TARGET_NR_pipe2
6460 case TARGET_NR_pipe2
:
6461 ret
= do_pipe(cpu_env
, arg1
,
6462 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
6465 case TARGET_NR_times
:
6467 struct target_tms
*tmsp
;
6469 ret
= get_errno(times(&tms
));
6471 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
6474 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
6475 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
6476 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
6477 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
6480 ret
= host_to_target_clock_t(ret
);
6483 #ifdef TARGET_NR_prof
6484 case TARGET_NR_prof
:
6487 #ifdef TARGET_NR_signal
6488 case TARGET_NR_signal
:
6491 case TARGET_NR_acct
:
6493 ret
= get_errno(acct(NULL
));
6495 if (!(p
= lock_user_string(arg1
)))
6497 ret
= get_errno(acct(path(p
)));
6498 unlock_user(p
, arg1
, 0);
6501 #ifdef TARGET_NR_umount2
6502 case TARGET_NR_umount2
:
6503 if (!(p
= lock_user_string(arg1
)))
6505 ret
= get_errno(umount2(p
, arg2
));
6506 unlock_user(p
, arg1
, 0);
6509 #ifdef TARGET_NR_lock
6510 case TARGET_NR_lock
:
6513 case TARGET_NR_ioctl
:
6514 ret
= do_ioctl(arg1
, arg2
, arg3
);
6516 case TARGET_NR_fcntl
:
6517 ret
= do_fcntl(arg1
, arg2
, arg3
);
6519 #ifdef TARGET_NR_mpx
6523 case TARGET_NR_setpgid
:
6524 ret
= get_errno(setpgid(arg1
, arg2
));
6526 #ifdef TARGET_NR_ulimit
6527 case TARGET_NR_ulimit
:
6530 #ifdef TARGET_NR_oldolduname
6531 case TARGET_NR_oldolduname
:
6534 case TARGET_NR_umask
:
6535 ret
= get_errno(umask(arg1
));
6537 case TARGET_NR_chroot
:
6538 if (!(p
= lock_user_string(arg1
)))
6540 ret
= get_errno(chroot(p
));
6541 unlock_user(p
, arg1
, 0);
6543 #ifdef TARGET_NR_ustat
6544 case TARGET_NR_ustat
:
6547 #ifdef TARGET_NR_dup2
6548 case TARGET_NR_dup2
:
6549 ret
= get_errno(dup2(arg1
, arg2
));
6551 fd_trans_dup(arg1
, arg2
);
6555 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6556 case TARGET_NR_dup3
:
6557 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
6559 fd_trans_dup(arg1
, arg2
);
6563 #ifdef TARGET_NR_getppid /* not on alpha */
6564 case TARGET_NR_getppid
:
6565 ret
= get_errno(getppid());
6568 #ifdef TARGET_NR_getpgrp
6569 case TARGET_NR_getpgrp
:
6570 ret
= get_errno(getpgrp());
6573 case TARGET_NR_setsid
:
6574 ret
= get_errno(setsid());
6576 #ifdef TARGET_NR_sigaction
6577 case TARGET_NR_sigaction
:
6579 #if defined(TARGET_ALPHA)
6580 struct target_sigaction act
, oact
, *pact
= 0;
6581 struct target_old_sigaction
*old_act
;
6583 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6585 act
._sa_handler
= old_act
->_sa_handler
;
6586 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6587 act
.sa_flags
= old_act
->sa_flags
;
6588 act
.sa_restorer
= 0;
6589 unlock_user_struct(old_act
, arg2
, 0);
6592 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6593 if (!is_error(ret
) && arg3
) {
6594 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6596 old_act
->_sa_handler
= oact
._sa_handler
;
6597 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6598 old_act
->sa_flags
= oact
.sa_flags
;
6599 unlock_user_struct(old_act
, arg3
, 1);
6601 #elif defined(TARGET_MIPS)
6602 struct target_sigaction act
, oact
, *pact
, *old_act
;
6605 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6607 act
._sa_handler
= old_act
->_sa_handler
;
6608 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
6609 act
.sa_flags
= old_act
->sa_flags
;
6610 unlock_user_struct(old_act
, arg2
, 0);
6616 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6618 if (!is_error(ret
) && arg3
) {
6619 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6621 old_act
->_sa_handler
= oact
._sa_handler
;
6622 old_act
->sa_flags
= oact
.sa_flags
;
6623 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
6624 old_act
->sa_mask
.sig
[1] = 0;
6625 old_act
->sa_mask
.sig
[2] = 0;
6626 old_act
->sa_mask
.sig
[3] = 0;
6627 unlock_user_struct(old_act
, arg3
, 1);
6630 struct target_old_sigaction
*old_act
;
6631 struct target_sigaction act
, oact
, *pact
;
6633 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
6635 act
._sa_handler
= old_act
->_sa_handler
;
6636 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
6637 act
.sa_flags
= old_act
->sa_flags
;
6638 act
.sa_restorer
= old_act
->sa_restorer
;
6639 unlock_user_struct(old_act
, arg2
, 0);
6644 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6645 if (!is_error(ret
) && arg3
) {
6646 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
6648 old_act
->_sa_handler
= oact
._sa_handler
;
6649 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
6650 old_act
->sa_flags
= oact
.sa_flags
;
6651 old_act
->sa_restorer
= oact
.sa_restorer
;
6652 unlock_user_struct(old_act
, arg3
, 1);
6658 case TARGET_NR_rt_sigaction
:
6660 #if defined(TARGET_ALPHA)
6661 struct target_sigaction act
, oact
, *pact
= 0;
6662 struct target_rt_sigaction
*rt_act
;
6663 /* ??? arg4 == sizeof(sigset_t). */
6665 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
6667 act
._sa_handler
= rt_act
->_sa_handler
;
6668 act
.sa_mask
= rt_act
->sa_mask
;
6669 act
.sa_flags
= rt_act
->sa_flags
;
6670 act
.sa_restorer
= arg5
;
6671 unlock_user_struct(rt_act
, arg2
, 0);
6674 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
6675 if (!is_error(ret
) && arg3
) {
6676 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
6678 rt_act
->_sa_handler
= oact
._sa_handler
;
6679 rt_act
->sa_mask
= oact
.sa_mask
;
6680 rt_act
->sa_flags
= oact
.sa_flags
;
6681 unlock_user_struct(rt_act
, arg3
, 1);
6684 struct target_sigaction
*act
;
6685 struct target_sigaction
*oact
;
6688 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
6693 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
6694 ret
= -TARGET_EFAULT
;
6695 goto rt_sigaction_fail
;
6699 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
6702 unlock_user_struct(act
, arg2
, 0);
6704 unlock_user_struct(oact
, arg3
, 1);
6708 #ifdef TARGET_NR_sgetmask /* not on alpha */
6709 case TARGET_NR_sgetmask
:
6712 abi_ulong target_set
;
6713 do_sigprocmask(0, NULL
, &cur_set
);
6714 host_to_target_old_sigset(&target_set
, &cur_set
);
6719 #ifdef TARGET_NR_ssetmask /* not on alpha */
6720 case TARGET_NR_ssetmask
:
6722 sigset_t set
, oset
, cur_set
;
6723 abi_ulong target_set
= arg1
;
6724 do_sigprocmask(0, NULL
, &cur_set
);
6725 target_to_host_old_sigset(&set
, &target_set
);
6726 sigorset(&set
, &set
, &cur_set
);
6727 do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
6728 host_to_target_old_sigset(&target_set
, &oset
);
6733 #ifdef TARGET_NR_sigprocmask
6734 case TARGET_NR_sigprocmask
:
6736 #if defined(TARGET_ALPHA)
6737 sigset_t set
, oldset
;
6742 case TARGET_SIG_BLOCK
:
6745 case TARGET_SIG_UNBLOCK
:
6748 case TARGET_SIG_SETMASK
:
6752 ret
= -TARGET_EINVAL
;
6756 target_to_host_old_sigset(&set
, &mask
);
6758 ret
= get_errno(do_sigprocmask(how
, &set
, &oldset
));
6759 if (!is_error(ret
)) {
6760 host_to_target_old_sigset(&mask
, &oldset
);
6762 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
6765 sigset_t set
, oldset
, *set_ptr
;
6770 case TARGET_SIG_BLOCK
:
6773 case TARGET_SIG_UNBLOCK
:
6776 case TARGET_SIG_SETMASK
:
6780 ret
= -TARGET_EINVAL
;
6783 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6785 target_to_host_old_sigset(&set
, p
);
6786 unlock_user(p
, arg2
, 0);
6792 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6793 if (!is_error(ret
) && arg3
) {
6794 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6796 host_to_target_old_sigset(p
, &oldset
);
6797 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6803 case TARGET_NR_rt_sigprocmask
:
6806 sigset_t set
, oldset
, *set_ptr
;
6810 case TARGET_SIG_BLOCK
:
6813 case TARGET_SIG_UNBLOCK
:
6816 case TARGET_SIG_SETMASK
:
6820 ret
= -TARGET_EINVAL
;
6823 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6825 target_to_host_sigset(&set
, p
);
6826 unlock_user(p
, arg2
, 0);
6832 ret
= get_errno(do_sigprocmask(how
, set_ptr
, &oldset
));
6833 if (!is_error(ret
) && arg3
) {
6834 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6836 host_to_target_sigset(p
, &oldset
);
6837 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6841 #ifdef TARGET_NR_sigpending
6842 case TARGET_NR_sigpending
:
6845 ret
= get_errno(sigpending(&set
));
6846 if (!is_error(ret
)) {
6847 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6849 host_to_target_old_sigset(p
, &set
);
6850 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6855 case TARGET_NR_rt_sigpending
:
6858 ret
= get_errno(sigpending(&set
));
6859 if (!is_error(ret
)) {
6860 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6862 host_to_target_sigset(p
, &set
);
6863 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6867 #ifdef TARGET_NR_sigsuspend
6868 case TARGET_NR_sigsuspend
:
6871 #if defined(TARGET_ALPHA)
6872 abi_ulong mask
= arg1
;
6873 target_to_host_old_sigset(&set
, &mask
);
6875 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6877 target_to_host_old_sigset(&set
, p
);
6878 unlock_user(p
, arg1
, 0);
6880 ret
= get_errno(sigsuspend(&set
));
6884 case TARGET_NR_rt_sigsuspend
:
6887 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6889 target_to_host_sigset(&set
, p
);
6890 unlock_user(p
, arg1
, 0);
6891 ret
= get_errno(sigsuspend(&set
));
6894 case TARGET_NR_rt_sigtimedwait
:
6897 struct timespec uts
, *puts
;
6900 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6902 target_to_host_sigset(&set
, p
);
6903 unlock_user(p
, arg1
, 0);
6906 target_to_host_timespec(puts
, arg3
);
6910 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6911 if (!is_error(ret
)) {
6913 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
6918 host_to_target_siginfo(p
, &uinfo
);
6919 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
6921 ret
= host_to_target_signal(ret
);
6925 case TARGET_NR_rt_sigqueueinfo
:
6928 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
6930 target_to_host_siginfo(&uinfo
, p
);
6931 unlock_user(p
, arg1
, 0);
6932 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
6935 #ifdef TARGET_NR_sigreturn
6936 case TARGET_NR_sigreturn
:
6937 /* NOTE: ret is eax, so not transcoding must be done */
6938 ret
= do_sigreturn(cpu_env
);
6941 case TARGET_NR_rt_sigreturn
:
6942 /* NOTE: ret is eax, so not transcoding must be done */
6943 ret
= do_rt_sigreturn(cpu_env
);
6945 case TARGET_NR_sethostname
:
6946 if (!(p
= lock_user_string(arg1
)))
6948 ret
= get_errno(sethostname(p
, arg2
));
6949 unlock_user(p
, arg1
, 0);
6951 case TARGET_NR_setrlimit
:
6953 int resource
= target_to_host_resource(arg1
);
6954 struct target_rlimit
*target_rlim
;
6956 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
6958 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
6959 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
6960 unlock_user_struct(target_rlim
, arg2
, 0);
6961 ret
= get_errno(setrlimit(resource
, &rlim
));
6964 case TARGET_NR_getrlimit
:
6966 int resource
= target_to_host_resource(arg1
);
6967 struct target_rlimit
*target_rlim
;
6970 ret
= get_errno(getrlimit(resource
, &rlim
));
6971 if (!is_error(ret
)) {
6972 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6974 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6975 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6976 unlock_user_struct(target_rlim
, arg2
, 1);
6980 case TARGET_NR_getrusage
:
6982 struct rusage rusage
;
6983 ret
= get_errno(getrusage(arg1
, &rusage
));
6984 if (!is_error(ret
)) {
6985 ret
= host_to_target_rusage(arg2
, &rusage
);
6989 case TARGET_NR_gettimeofday
:
6992 ret
= get_errno(gettimeofday(&tv
, NULL
));
6993 if (!is_error(ret
)) {
6994 if (copy_to_user_timeval(arg1
, &tv
))
6999 case TARGET_NR_settimeofday
:
7001 struct timeval tv
, *ptv
= NULL
;
7002 struct timezone tz
, *ptz
= NULL
;
7005 if (copy_from_user_timeval(&tv
, arg1
)) {
7012 if (copy_from_user_timezone(&tz
, arg2
)) {
7018 ret
= get_errno(settimeofday(ptv
, ptz
));
7021 #if defined(TARGET_NR_select)
7022 case TARGET_NR_select
:
7023 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7024 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7027 struct target_sel_arg_struct
*sel
;
7028 abi_ulong inp
, outp
, exp
, tvp
;
7031 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
7033 nsel
= tswapal(sel
->n
);
7034 inp
= tswapal(sel
->inp
);
7035 outp
= tswapal(sel
->outp
);
7036 exp
= tswapal(sel
->exp
);
7037 tvp
= tswapal(sel
->tvp
);
7038 unlock_user_struct(sel
, arg1
, 0);
7039 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
7044 #ifdef TARGET_NR_pselect6
7045 case TARGET_NR_pselect6
:
7047 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
7048 fd_set rfds
, wfds
, efds
;
7049 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
7050 struct timespec ts
, *ts_ptr
;
7053 * The 6th arg is actually two args smashed together,
7054 * so we cannot use the C library.
7062 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
7063 target_sigset_t
*target_sigset
;
7071 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
7075 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
7079 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
7085 * This takes a timespec, and not a timeval, so we cannot
7086 * use the do_select() helper ...
7089 if (target_to_host_timespec(&ts
, ts_addr
)) {
7097 /* Extract the two packed args for the sigset */
7100 sig
.size
= _NSIG
/ 8;
7102 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
7106 arg_sigset
= tswapal(arg7
[0]);
7107 arg_sigsize
= tswapal(arg7
[1]);
7108 unlock_user(arg7
, arg6
, 0);
7112 if (arg_sigsize
!= sizeof(*target_sigset
)) {
7113 /* Like the kernel, we enforce correct size sigsets */
7114 ret
= -TARGET_EINVAL
;
7117 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
7118 sizeof(*target_sigset
), 1);
7119 if (!target_sigset
) {
7122 target_to_host_sigset(&set
, target_sigset
);
7123 unlock_user(target_sigset
, arg_sigset
, 0);
7131 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
7134 if (!is_error(ret
)) {
7135 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
7137 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
7139 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
7142 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
7148 #ifdef TARGET_NR_symlink
7149 case TARGET_NR_symlink
:
7152 p
= lock_user_string(arg1
);
7153 p2
= lock_user_string(arg2
);
7155 ret
= -TARGET_EFAULT
;
7157 ret
= get_errno(symlink(p
, p2
));
7158 unlock_user(p2
, arg2
, 0);
7159 unlock_user(p
, arg1
, 0);
7163 #if defined(TARGET_NR_symlinkat)
7164 case TARGET_NR_symlinkat
:
7167 p
= lock_user_string(arg1
);
7168 p2
= lock_user_string(arg3
);
7170 ret
= -TARGET_EFAULT
;
7172 ret
= get_errno(symlinkat(p
, arg2
, p2
));
7173 unlock_user(p2
, arg3
, 0);
7174 unlock_user(p
, arg1
, 0);
7178 #ifdef TARGET_NR_oldlstat
7179 case TARGET_NR_oldlstat
:
7182 #ifdef TARGET_NR_readlink
7183 case TARGET_NR_readlink
:
7186 p
= lock_user_string(arg1
);
7187 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
7189 ret
= -TARGET_EFAULT
;
7191 /* Short circuit this for the magic exe check. */
7192 ret
= -TARGET_EINVAL
;
7193 } else if (is_proc_myself((const char *)p
, "exe")) {
7194 char real
[PATH_MAX
], *temp
;
7195 temp
= realpath(exec_path
, real
);
7196 /* Return value is # of bytes that we wrote to the buffer. */
7198 ret
= get_errno(-1);
7200 /* Don't worry about sign mismatch as earlier mapping
7201 * logic would have thrown a bad address error. */
7202 ret
= MIN(strlen(real
), arg3
);
7203 /* We cannot NUL terminate the string. */
7204 memcpy(p2
, real
, ret
);
7207 ret
= get_errno(readlink(path(p
), p2
, arg3
));
7209 unlock_user(p2
, arg2
, ret
);
7210 unlock_user(p
, arg1
, 0);
7214 #if defined(TARGET_NR_readlinkat)
7215 case TARGET_NR_readlinkat
:
7218 p
= lock_user_string(arg2
);
7219 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
7221 ret
= -TARGET_EFAULT
;
7222 } else if (is_proc_myself((const char *)p
, "exe")) {
7223 char real
[PATH_MAX
], *temp
;
7224 temp
= realpath(exec_path
, real
);
7225 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
7226 snprintf((char *)p2
, arg4
, "%s", real
);
7228 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
7230 unlock_user(p2
, arg3
, ret
);
7231 unlock_user(p
, arg2
, 0);
7235 #ifdef TARGET_NR_uselib
7236 case TARGET_NR_uselib
:
7239 #ifdef TARGET_NR_swapon
7240 case TARGET_NR_swapon
:
7241 if (!(p
= lock_user_string(arg1
)))
7243 ret
= get_errno(swapon(p
, arg2
));
7244 unlock_user(p
, arg1
, 0);
7247 case TARGET_NR_reboot
:
7248 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
7249 /* arg4 must be ignored in all other cases */
7250 p
= lock_user_string(arg4
);
7254 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
7255 unlock_user(p
, arg4
, 0);
7257 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
7260 #ifdef TARGET_NR_readdir
7261 case TARGET_NR_readdir
:
7264 #ifdef TARGET_NR_mmap
7265 case TARGET_NR_mmap
:
7266 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7267 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
7268 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
7269 || defined(TARGET_S390X)
7272 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
7273 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
7281 unlock_user(v
, arg1
, 0);
7282 ret
= get_errno(target_mmap(v1
, v2
, v3
,
7283 target_to_host_bitmask(v4
, mmap_flags_tbl
),
7287 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
7288 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
7294 #ifdef TARGET_NR_mmap2
7295 case TARGET_NR_mmap2
:
7297 #define MMAP_SHIFT 12
7299 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
7300 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
7302 arg6
<< MMAP_SHIFT
));
7305 case TARGET_NR_munmap
:
7306 ret
= get_errno(target_munmap(arg1
, arg2
));
7308 case TARGET_NR_mprotect
:
7310 TaskState
*ts
= cpu
->opaque
;
7311 /* Special hack to detect libc making the stack executable. */
7312 if ((arg3
& PROT_GROWSDOWN
)
7313 && arg1
>= ts
->info
->stack_limit
7314 && arg1
<= ts
->info
->start_stack
) {
7315 arg3
&= ~PROT_GROWSDOWN
;
7316 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
7317 arg1
= ts
->info
->stack_limit
;
7320 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
7322 #ifdef TARGET_NR_mremap
7323 case TARGET_NR_mremap
:
7324 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
7327 /* ??? msync/mlock/munlock are broken for softmmu. */
7328 #ifdef TARGET_NR_msync
7329 case TARGET_NR_msync
:
7330 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
7333 #ifdef TARGET_NR_mlock
7334 case TARGET_NR_mlock
:
7335 ret
= get_errno(mlock(g2h(arg1
), arg2
));
7338 #ifdef TARGET_NR_munlock
7339 case TARGET_NR_munlock
:
7340 ret
= get_errno(munlock(g2h(arg1
), arg2
));
7343 #ifdef TARGET_NR_mlockall
7344 case TARGET_NR_mlockall
:
7345 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
7348 #ifdef TARGET_NR_munlockall
7349 case TARGET_NR_munlockall
:
7350 ret
= get_errno(munlockall());
7353 case TARGET_NR_truncate
:
7354 if (!(p
= lock_user_string(arg1
)))
7356 ret
= get_errno(truncate(p
, arg2
));
7357 unlock_user(p
, arg1
, 0);
7359 case TARGET_NR_ftruncate
:
7360 ret
= get_errno(ftruncate(arg1
, arg2
));
7362 case TARGET_NR_fchmod
:
7363 ret
= get_errno(fchmod(arg1
, arg2
));
7365 #if defined(TARGET_NR_fchmodat)
7366 case TARGET_NR_fchmodat
:
7367 if (!(p
= lock_user_string(arg2
)))
7369 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
7370 unlock_user(p
, arg2
, 0);
7373 case TARGET_NR_getpriority
:
7374 /* Note that negative values are valid for getpriority, so we must
7375 differentiate based on errno settings. */
7377 ret
= getpriority(arg1
, arg2
);
7378 if (ret
== -1 && errno
!= 0) {
7379 ret
= -host_to_target_errno(errno
);
7383 /* Return value is the unbiased priority. Signal no error. */
7384 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
7386 /* Return value is a biased priority to avoid negative numbers. */
7390 case TARGET_NR_setpriority
:
7391 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
7393 #ifdef TARGET_NR_profil
7394 case TARGET_NR_profil
:
7397 case TARGET_NR_statfs
:
7398 if (!(p
= lock_user_string(arg1
)))
7400 ret
= get_errno(statfs(path(p
), &stfs
));
7401 unlock_user(p
, arg1
, 0);
7403 if (!is_error(ret
)) {
7404 struct target_statfs
*target_stfs
;
7406 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
7408 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
7409 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
7410 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
7411 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
7412 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
7413 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
7414 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
7415 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
7416 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
7417 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
7418 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
7419 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
7420 unlock_user_struct(target_stfs
, arg2
, 1);
7423 case TARGET_NR_fstatfs
:
7424 ret
= get_errno(fstatfs(arg1
, &stfs
));
7425 goto convert_statfs
;
7426 #ifdef TARGET_NR_statfs64
7427 case TARGET_NR_statfs64
:
7428 if (!(p
= lock_user_string(arg1
)))
7430 ret
= get_errno(statfs(path(p
), &stfs
));
7431 unlock_user(p
, arg1
, 0);
7433 if (!is_error(ret
)) {
7434 struct target_statfs64
*target_stfs
;
7436 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
7438 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
7439 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
7440 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
7441 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
7442 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
7443 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
7444 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
7445 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
7446 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
7447 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
7448 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
7449 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
7450 unlock_user_struct(target_stfs
, arg3
, 1);
7453 case TARGET_NR_fstatfs64
:
7454 ret
= get_errno(fstatfs(arg1
, &stfs
));
7455 goto convert_statfs64
;
7457 #ifdef TARGET_NR_ioperm
7458 case TARGET_NR_ioperm
:
7461 #ifdef TARGET_NR_socketcall
7462 case TARGET_NR_socketcall
:
7463 ret
= do_socketcall(arg1
, arg2
);
7466 #ifdef TARGET_NR_accept
7467 case TARGET_NR_accept
:
7468 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
7471 #ifdef TARGET_NR_accept4
7472 case TARGET_NR_accept4
:
7473 #ifdef CONFIG_ACCEPT4
7474 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
7480 #ifdef TARGET_NR_bind
7481 case TARGET_NR_bind
:
7482 ret
= do_bind(arg1
, arg2
, arg3
);
7485 #ifdef TARGET_NR_connect
7486 case TARGET_NR_connect
:
7487 ret
= do_connect(arg1
, arg2
, arg3
);
7490 #ifdef TARGET_NR_getpeername
7491 case TARGET_NR_getpeername
:
7492 ret
= do_getpeername(arg1
, arg2
, arg3
);
7495 #ifdef TARGET_NR_getsockname
7496 case TARGET_NR_getsockname
:
7497 ret
= do_getsockname(arg1
, arg2
, arg3
);
7500 #ifdef TARGET_NR_getsockopt
7501 case TARGET_NR_getsockopt
:
7502 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
7505 #ifdef TARGET_NR_listen
7506 case TARGET_NR_listen
:
7507 ret
= get_errno(listen(arg1
, arg2
));
7510 #ifdef TARGET_NR_recv
7511 case TARGET_NR_recv
:
7512 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
7515 #ifdef TARGET_NR_recvfrom
7516 case TARGET_NR_recvfrom
:
7517 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7520 #ifdef TARGET_NR_recvmsg
7521 case TARGET_NR_recvmsg
:
7522 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
7525 #ifdef TARGET_NR_send
7526 case TARGET_NR_send
:
7527 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
7530 #ifdef TARGET_NR_sendmsg
7531 case TARGET_NR_sendmsg
:
7532 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
7535 #ifdef TARGET_NR_sendmmsg
7536 case TARGET_NR_sendmmsg
:
7537 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
7539 case TARGET_NR_recvmmsg
:
7540 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
7543 #ifdef TARGET_NR_sendto
7544 case TARGET_NR_sendto
:
7545 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7548 #ifdef TARGET_NR_shutdown
7549 case TARGET_NR_shutdown
:
7550 ret
= get_errno(shutdown(arg1
, arg2
));
7553 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
7554 case TARGET_NR_getrandom
:
7555 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
7559 ret
= get_errno(getrandom(p
, arg2
, arg3
));
7560 unlock_user(p
, arg1
, ret
);
7563 #ifdef TARGET_NR_socket
7564 case TARGET_NR_socket
:
7565 ret
= do_socket(arg1
, arg2
, arg3
);
7566 fd_trans_unregister(ret
);
7569 #ifdef TARGET_NR_socketpair
7570 case TARGET_NR_socketpair
:
7571 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
7574 #ifdef TARGET_NR_setsockopt
7575 case TARGET_NR_setsockopt
:
7576 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
7580 case TARGET_NR_syslog
:
7581 if (!(p
= lock_user_string(arg2
)))
7583 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
7584 unlock_user(p
, arg2
, 0);
7587 case TARGET_NR_setitimer
:
7589 struct itimerval value
, ovalue
, *pvalue
;
7593 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
7594 || copy_from_user_timeval(&pvalue
->it_value
,
7595 arg2
+ sizeof(struct target_timeval
)))
7600 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
7601 if (!is_error(ret
) && arg3
) {
7602 if (copy_to_user_timeval(arg3
,
7603 &ovalue
.it_interval
)
7604 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
7610 case TARGET_NR_getitimer
:
7612 struct itimerval value
;
7614 ret
= get_errno(getitimer(arg1
, &value
));
7615 if (!is_error(ret
) && arg2
) {
7616 if (copy_to_user_timeval(arg2
,
7618 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
7624 #ifdef TARGET_NR_stat
7625 case TARGET_NR_stat
:
7626 if (!(p
= lock_user_string(arg1
)))
7628 ret
= get_errno(stat(path(p
), &st
));
7629 unlock_user(p
, arg1
, 0);
7632 #ifdef TARGET_NR_lstat
7633 case TARGET_NR_lstat
:
7634 if (!(p
= lock_user_string(arg1
)))
7636 ret
= get_errno(lstat(path(p
), &st
));
7637 unlock_user(p
, arg1
, 0);
7640 case TARGET_NR_fstat
:
7642 ret
= get_errno(fstat(arg1
, &st
));
7643 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
7646 if (!is_error(ret
)) {
7647 struct target_stat
*target_st
;
7649 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
7651 memset(target_st
, 0, sizeof(*target_st
));
7652 __put_user(st
.st_dev
, &target_st
->st_dev
);
7653 __put_user(st
.st_ino
, &target_st
->st_ino
);
7654 __put_user(st
.st_mode
, &target_st
->st_mode
);
7655 __put_user(st
.st_uid
, &target_st
->st_uid
);
7656 __put_user(st
.st_gid
, &target_st
->st_gid
);
7657 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
7658 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
7659 __put_user(st
.st_size
, &target_st
->st_size
);
7660 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
7661 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
7662 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
7663 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
7664 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
7665 unlock_user_struct(target_st
, arg2
, 1);
7669 #ifdef TARGET_NR_olduname
7670 case TARGET_NR_olduname
:
7673 #ifdef TARGET_NR_iopl
7674 case TARGET_NR_iopl
:
7677 case TARGET_NR_vhangup
:
7678 ret
= get_errno(vhangup());
7680 #ifdef TARGET_NR_idle
7681 case TARGET_NR_idle
:
7684 #ifdef TARGET_NR_syscall
7685 case TARGET_NR_syscall
:
7686 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
7687 arg6
, arg7
, arg8
, 0);
7690 case TARGET_NR_wait4
:
7693 abi_long status_ptr
= arg2
;
7694 struct rusage rusage
, *rusage_ptr
;
7695 abi_ulong target_rusage
= arg4
;
7696 abi_long rusage_err
;
7698 rusage_ptr
= &rusage
;
7701 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
7702 if (!is_error(ret
)) {
7703 if (status_ptr
&& ret
) {
7704 status
= host_to_target_waitstatus(status
);
7705 if (put_user_s32(status
, status_ptr
))
7708 if (target_rusage
) {
7709 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
7717 #ifdef TARGET_NR_swapoff
7718 case TARGET_NR_swapoff
:
7719 if (!(p
= lock_user_string(arg1
)))
7721 ret
= get_errno(swapoff(p
));
7722 unlock_user(p
, arg1
, 0);
7725 case TARGET_NR_sysinfo
:
7727 struct target_sysinfo
*target_value
;
7728 struct sysinfo value
;
7729 ret
= get_errno(sysinfo(&value
));
7730 if (!is_error(ret
) && arg1
)
7732 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
7734 __put_user(value
.uptime
, &target_value
->uptime
);
7735 __put_user(value
.loads
[0], &target_value
->loads
[0]);
7736 __put_user(value
.loads
[1], &target_value
->loads
[1]);
7737 __put_user(value
.loads
[2], &target_value
->loads
[2]);
7738 __put_user(value
.totalram
, &target_value
->totalram
);
7739 __put_user(value
.freeram
, &target_value
->freeram
);
7740 __put_user(value
.sharedram
, &target_value
->sharedram
);
7741 __put_user(value
.bufferram
, &target_value
->bufferram
);
7742 __put_user(value
.totalswap
, &target_value
->totalswap
);
7743 __put_user(value
.freeswap
, &target_value
->freeswap
);
7744 __put_user(value
.procs
, &target_value
->procs
);
7745 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
7746 __put_user(value
.freehigh
, &target_value
->freehigh
);
7747 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
7748 unlock_user_struct(target_value
, arg1
, 1);
7752 #ifdef TARGET_NR_ipc
7754 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7757 #ifdef TARGET_NR_semget
7758 case TARGET_NR_semget
:
7759 ret
= get_errno(semget(arg1
, arg2
, arg3
));
7762 #ifdef TARGET_NR_semop
7763 case TARGET_NR_semop
:
7764 ret
= do_semop(arg1
, arg2
, arg3
);
7767 #ifdef TARGET_NR_semctl
7768 case TARGET_NR_semctl
:
7769 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
7772 #ifdef TARGET_NR_msgctl
7773 case TARGET_NR_msgctl
:
7774 ret
= do_msgctl(arg1
, arg2
, arg3
);
7777 #ifdef TARGET_NR_msgget
7778 case TARGET_NR_msgget
:
7779 ret
= get_errno(msgget(arg1
, arg2
));
7782 #ifdef TARGET_NR_msgrcv
7783 case TARGET_NR_msgrcv
:
7784 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
7787 #ifdef TARGET_NR_msgsnd
7788 case TARGET_NR_msgsnd
:
7789 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
7792 #ifdef TARGET_NR_shmget
7793 case TARGET_NR_shmget
:
7794 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
7797 #ifdef TARGET_NR_shmctl
7798 case TARGET_NR_shmctl
:
7799 ret
= do_shmctl(arg1
, arg2
, arg3
);
7802 #ifdef TARGET_NR_shmat
7803 case TARGET_NR_shmat
:
7804 ret
= do_shmat(arg1
, arg2
, arg3
);
7807 #ifdef TARGET_NR_shmdt
7808 case TARGET_NR_shmdt
:
7809 ret
= do_shmdt(arg1
);
7812 case TARGET_NR_fsync
:
7813 ret
= get_errno(fsync(arg1
));
7815 case TARGET_NR_clone
:
7816 /* Linux manages to have three different orderings for its
7817 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7818 * match the kernel's CONFIG_CLONE_* settings.
7819 * Microblaze is further special in that it uses a sixth
7820 * implicit argument to clone for the TLS pointer.
7822 #if defined(TARGET_MICROBLAZE)
7823 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
7824 #elif defined(TARGET_CLONE_BACKWARDS)
7825 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
7826 #elif defined(TARGET_CLONE_BACKWARDS2)
7827 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
7829 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
7832 #ifdef __NR_exit_group
7833 /* new thread calls */
7834 case TARGET_NR_exit_group
:
7838 gdb_exit(cpu_env
, arg1
);
7839 ret
= get_errno(exit_group(arg1
));
7842 case TARGET_NR_setdomainname
:
7843 if (!(p
= lock_user_string(arg1
)))
7845 ret
= get_errno(setdomainname(p
, arg2
));
7846 unlock_user(p
, arg1
, 0);
7848 case TARGET_NR_uname
:
7849 /* no need to transcode because we use the linux syscall */
7851 struct new_utsname
* buf
;
7853 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
7855 ret
= get_errno(sys_uname(buf
));
7856 if (!is_error(ret
)) {
7857 /* Overrite the native machine name with whatever is being
7859 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
7860 /* Allow the user to override the reported release. */
7861 if (qemu_uname_release
&& *qemu_uname_release
)
7862 strcpy (buf
->release
, qemu_uname_release
);
7864 unlock_user_struct(buf
, arg1
, 1);
7868 case TARGET_NR_modify_ldt
:
7869 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
7871 #if !defined(TARGET_X86_64)
7872 case TARGET_NR_vm86old
:
7874 case TARGET_NR_vm86
:
7875 ret
= do_vm86(cpu_env
, arg1
, arg2
);
7879 case TARGET_NR_adjtimex
:
7881 #ifdef TARGET_NR_create_module
7882 case TARGET_NR_create_module
:
7884 case TARGET_NR_init_module
:
7885 case TARGET_NR_delete_module
:
7886 #ifdef TARGET_NR_get_kernel_syms
7887 case TARGET_NR_get_kernel_syms
:
7890 case TARGET_NR_quotactl
:
7892 case TARGET_NR_getpgid
:
7893 ret
= get_errno(getpgid(arg1
));
7895 case TARGET_NR_fchdir
:
7896 ret
= get_errno(fchdir(arg1
));
7898 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7899 case TARGET_NR_bdflush
:
7902 #ifdef TARGET_NR_sysfs
7903 case TARGET_NR_sysfs
:
7906 case TARGET_NR_personality
:
7907 ret
= get_errno(personality(arg1
));
7909 #ifdef TARGET_NR_afs_syscall
7910 case TARGET_NR_afs_syscall
:
7913 #ifdef TARGET_NR__llseek /* Not on alpha */
7914 case TARGET_NR__llseek
:
7917 #if !defined(__NR_llseek)
7918 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
7920 ret
= get_errno(res
);
7925 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
7927 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
7933 #ifdef TARGET_NR_getdents
7934 case TARGET_NR_getdents
:
7935 #ifdef __NR_getdents
7936 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7938 struct target_dirent
*target_dirp
;
7939 struct linux_dirent
*dirp
;
7940 abi_long count
= arg3
;
7942 dirp
= g_try_malloc(count
);
7944 ret
= -TARGET_ENOMEM
;
7948 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7949 if (!is_error(ret
)) {
7950 struct linux_dirent
*de
;
7951 struct target_dirent
*tde
;
7953 int reclen
, treclen
;
7954 int count1
, tnamelen
;
7958 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7962 reclen
= de
->d_reclen
;
7963 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
7964 assert(tnamelen
>= 0);
7965 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
7966 assert(count1
+ treclen
<= count
);
7967 tde
->d_reclen
= tswap16(treclen
);
7968 tde
->d_ino
= tswapal(de
->d_ino
);
7969 tde
->d_off
= tswapal(de
->d_off
);
7970 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
7971 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7973 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7977 unlock_user(target_dirp
, arg2
, ret
);
7983 struct linux_dirent
*dirp
;
7984 abi_long count
= arg3
;
7986 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7988 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7989 if (!is_error(ret
)) {
7990 struct linux_dirent
*de
;
7995 reclen
= de
->d_reclen
;
7998 de
->d_reclen
= tswap16(reclen
);
7999 tswapls(&de
->d_ino
);
8000 tswapls(&de
->d_off
);
8001 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8005 unlock_user(dirp
, arg2
, ret
);
8009 /* Implement getdents in terms of getdents64 */
8011 struct linux_dirent64
*dirp
;
8012 abi_long count
= arg3
;
8014 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8018 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8019 if (!is_error(ret
)) {
8020 /* Convert the dirent64 structs to target dirent. We do this
8021 * in-place, since we can guarantee that a target_dirent is no
8022 * larger than a dirent64; however this means we have to be
8023 * careful to read everything before writing in the new format.
8025 struct linux_dirent64
*de
;
8026 struct target_dirent
*tde
;
8031 tde
= (struct target_dirent
*)dirp
;
8033 int namelen
, treclen
;
8034 int reclen
= de
->d_reclen
;
8035 uint64_t ino
= de
->d_ino
;
8036 int64_t off
= de
->d_off
;
8037 uint8_t type
= de
->d_type
;
8039 namelen
= strlen(de
->d_name
);
8040 treclen
= offsetof(struct target_dirent
, d_name
)
8042 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
8044 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
8045 tde
->d_ino
= tswapal(ino
);
8046 tde
->d_off
= tswapal(off
);
8047 tde
->d_reclen
= tswap16(treclen
);
8048 /* The target_dirent type is in what was formerly a padding
8049 * byte at the end of the structure:
8051 *(((char *)tde
) + treclen
- 1) = type
;
8053 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8054 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8060 unlock_user(dirp
, arg2
, ret
);
8064 #endif /* TARGET_NR_getdents */
8065 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8066 case TARGET_NR_getdents64
:
8068 struct linux_dirent64
*dirp
;
8069 abi_long count
= arg3
;
8070 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
8072 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8073 if (!is_error(ret
)) {
8074 struct linux_dirent64
*de
;
8079 reclen
= de
->d_reclen
;
8082 de
->d_reclen
= tswap16(reclen
);
8083 tswap64s((uint64_t *)&de
->d_ino
);
8084 tswap64s((uint64_t *)&de
->d_off
);
8085 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8089 unlock_user(dirp
, arg2
, ret
);
8092 #endif /* TARGET_NR_getdents64 */
8093 #if defined(TARGET_NR__newselect)
8094 case TARGET_NR__newselect
:
8095 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8098 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8099 # ifdef TARGET_NR_poll
8100 case TARGET_NR_poll
:
8102 # ifdef TARGET_NR_ppoll
8103 case TARGET_NR_ppoll
:
8106 struct target_pollfd
*target_pfd
;
8107 unsigned int nfds
= arg2
;
8115 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
8116 sizeof(struct target_pollfd
) * nfds
, 1);
8121 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
8122 for (i
= 0; i
< nfds
; i
++) {
8123 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
8124 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
8128 # ifdef TARGET_NR_ppoll
8129 if (num
== TARGET_NR_ppoll
) {
8130 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
8131 target_sigset_t
*target_set
;
8132 sigset_t _set
, *set
= &_set
;
8135 if (target_to_host_timespec(timeout_ts
, arg3
)) {
8136 unlock_user(target_pfd
, arg1
, 0);
8144 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
8146 unlock_user(target_pfd
, arg1
, 0);
8149 target_to_host_sigset(set
, target_set
);
8154 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
8156 if (!is_error(ret
) && arg3
) {
8157 host_to_target_timespec(arg3
, timeout_ts
);
8160 unlock_user(target_set
, arg4
, 0);
8164 ret
= get_errno(poll(pfd
, nfds
, timeout
));
8166 if (!is_error(ret
)) {
8167 for(i
= 0; i
< nfds
; i
++) {
8168 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
8171 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
8175 case TARGET_NR_flock
:
8176 /* NOTE: the flock constant seems to be the same for every
8178 ret
= get_errno(flock(arg1
, arg2
));
8180 case TARGET_NR_readv
:
8182 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
8184 ret
= get_errno(readv(arg1
, vec
, arg3
));
8185 unlock_iovec(vec
, arg2
, arg3
, 1);
8187 ret
= -host_to_target_errno(errno
);
8191 case TARGET_NR_writev
:
8193 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
8195 ret
= get_errno(writev(arg1
, vec
, arg3
));
8196 unlock_iovec(vec
, arg2
, arg3
, 0);
8198 ret
= -host_to_target_errno(errno
);
8202 case TARGET_NR_getsid
:
8203 ret
= get_errno(getsid(arg1
));
8205 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
8206 case TARGET_NR_fdatasync
:
8207 ret
= get_errno(fdatasync(arg1
));
8210 #ifdef TARGET_NR__sysctl
8211 case TARGET_NR__sysctl
:
8212 /* We don't implement this, but ENOTDIR is always a safe
8214 ret
= -TARGET_ENOTDIR
;
8217 case TARGET_NR_sched_getaffinity
:
8219 unsigned int mask_size
;
8220 unsigned long *mask
;
8223 * sched_getaffinity needs multiples of ulong, so need to take
8224 * care of mismatches between target ulong and host ulong sizes.
8226 if (arg2
& (sizeof(abi_ulong
) - 1)) {
8227 ret
= -TARGET_EINVAL
;
8230 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
8232 mask
= alloca(mask_size
);
8233 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
8235 if (!is_error(ret
)) {
8237 /* More data returned than the caller's buffer will fit.
8238 * This only happens if sizeof(abi_long) < sizeof(long)
8239 * and the caller passed us a buffer holding an odd number
8240 * of abi_longs. If the host kernel is actually using the
8241 * extra 4 bytes then fail EINVAL; otherwise we can just
8242 * ignore them and only copy the interesting part.
8244 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
8245 if (numcpus
> arg2
* 8) {
8246 ret
= -TARGET_EINVAL
;
8252 if (copy_to_user(arg3
, mask
, ret
)) {
8258 case TARGET_NR_sched_setaffinity
:
8260 unsigned int mask_size
;
8261 unsigned long *mask
;
8264 * sched_setaffinity needs multiples of ulong, so need to take
8265 * care of mismatches between target ulong and host ulong sizes.
8267 if (arg2
& (sizeof(abi_ulong
) - 1)) {
8268 ret
= -TARGET_EINVAL
;
8271 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
8273 mask
= alloca(mask_size
);
8274 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
8277 memcpy(mask
, p
, arg2
);
8278 unlock_user_struct(p
, arg2
, 0);
8280 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
8283 case TARGET_NR_sched_setparam
:
8285 struct sched_param
*target_schp
;
8286 struct sched_param schp
;
8289 return -TARGET_EINVAL
;
8291 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
8293 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
8294 unlock_user_struct(target_schp
, arg2
, 0);
8295 ret
= get_errno(sched_setparam(arg1
, &schp
));
8298 case TARGET_NR_sched_getparam
:
8300 struct sched_param
*target_schp
;
8301 struct sched_param schp
;
8304 return -TARGET_EINVAL
;
8306 ret
= get_errno(sched_getparam(arg1
, &schp
));
8307 if (!is_error(ret
)) {
8308 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
8310 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
8311 unlock_user_struct(target_schp
, arg2
, 1);
8315 case TARGET_NR_sched_setscheduler
:
8317 struct sched_param
*target_schp
;
8318 struct sched_param schp
;
8320 return -TARGET_EINVAL
;
8322 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
8324 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
8325 unlock_user_struct(target_schp
, arg3
, 0);
8326 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
8329 case TARGET_NR_sched_getscheduler
:
8330 ret
= get_errno(sched_getscheduler(arg1
));
8332 case TARGET_NR_sched_yield
:
8333 ret
= get_errno(sched_yield());
8335 case TARGET_NR_sched_get_priority_max
:
8336 ret
= get_errno(sched_get_priority_max(arg1
));
8338 case TARGET_NR_sched_get_priority_min
:
8339 ret
= get_errno(sched_get_priority_min(arg1
));
8341 case TARGET_NR_sched_rr_get_interval
:
8344 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
8345 if (!is_error(ret
)) {
8346 ret
= host_to_target_timespec(arg2
, &ts
);
8350 case TARGET_NR_nanosleep
:
8352 struct timespec req
, rem
;
8353 target_to_host_timespec(&req
, arg1
);
8354 ret
= get_errno(nanosleep(&req
, &rem
));
8355 if (is_error(ret
) && arg2
) {
8356 host_to_target_timespec(arg2
, &rem
);
8360 #ifdef TARGET_NR_query_module
8361 case TARGET_NR_query_module
:
8364 #ifdef TARGET_NR_nfsservctl
8365 case TARGET_NR_nfsservctl
:
8368 case TARGET_NR_prctl
:
8370 case PR_GET_PDEATHSIG
:
8373 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
8374 if (!is_error(ret
) && arg2
8375 && put_user_ual(deathsig
, arg2
)) {
8383 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
8387 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
8389 unlock_user(name
, arg2
, 16);
8394 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
8398 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
8400 unlock_user(name
, arg2
, 0);
8405 /* Most prctl options have no pointer arguments */
8406 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
8410 #ifdef TARGET_NR_arch_prctl
8411 case TARGET_NR_arch_prctl
:
8412 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
8413 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
8419 #ifdef TARGET_NR_pread64
8420 case TARGET_NR_pread64
:
8421 if (regpairs_aligned(cpu_env
)) {
8425 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8427 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
8428 unlock_user(p
, arg2
, ret
);
8430 case TARGET_NR_pwrite64
:
8431 if (regpairs_aligned(cpu_env
)) {
8435 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8437 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
8438 unlock_user(p
, arg2
, 0);
8441 case TARGET_NR_getcwd
:
8442 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
8444 ret
= get_errno(sys_getcwd1(p
, arg2
));
8445 unlock_user(p
, arg1
, ret
);
8447 case TARGET_NR_capget
:
8448 case TARGET_NR_capset
:
8450 struct target_user_cap_header
*target_header
;
8451 struct target_user_cap_data
*target_data
= NULL
;
8452 struct __user_cap_header_struct header
;
8453 struct __user_cap_data_struct data
[2];
8454 struct __user_cap_data_struct
*dataptr
= NULL
;
8455 int i
, target_datalen
;
8458 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
8461 header
.version
= tswap32(target_header
->version
);
8462 header
.pid
= tswap32(target_header
->pid
);
8464 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
8465 /* Version 2 and up takes pointer to two user_data structs */
8469 target_datalen
= sizeof(*target_data
) * data_items
;
8472 if (num
== TARGET_NR_capget
) {
8473 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
8475 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
8478 unlock_user_struct(target_header
, arg1
, 0);
8482 if (num
== TARGET_NR_capset
) {
8483 for (i
= 0; i
< data_items
; i
++) {
8484 data
[i
].effective
= tswap32(target_data
[i
].effective
);
8485 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
8486 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
8493 if (num
== TARGET_NR_capget
) {
8494 ret
= get_errno(capget(&header
, dataptr
));
8496 ret
= get_errno(capset(&header
, dataptr
));
8499 /* The kernel always updates version for both capget and capset */
8500 target_header
->version
= tswap32(header
.version
);
8501 unlock_user_struct(target_header
, arg1
, 1);
8504 if (num
== TARGET_NR_capget
) {
8505 for (i
= 0; i
< data_items
; i
++) {
8506 target_data
[i
].effective
= tswap32(data
[i
].effective
);
8507 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
8508 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
8510 unlock_user(target_data
, arg2
, target_datalen
);
8512 unlock_user(target_data
, arg2
, 0);
8517 case TARGET_NR_sigaltstack
:
8518 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
8521 #ifdef CONFIG_SENDFILE
8522 case TARGET_NR_sendfile
:
8527 ret
= get_user_sal(off
, arg3
);
8528 if (is_error(ret
)) {
8533 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
8534 if (!is_error(ret
) && arg3
) {
8535 abi_long ret2
= put_user_sal(off
, arg3
);
8536 if (is_error(ret2
)) {
8542 #ifdef TARGET_NR_sendfile64
8543 case TARGET_NR_sendfile64
:
8548 ret
= get_user_s64(off
, arg3
);
8549 if (is_error(ret
)) {
8554 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
8555 if (!is_error(ret
) && arg3
) {
8556 abi_long ret2
= put_user_s64(off
, arg3
);
8557 if (is_error(ret2
)) {
8565 case TARGET_NR_sendfile
:
8566 #ifdef TARGET_NR_sendfile64
8567 case TARGET_NR_sendfile64
:
8572 #ifdef TARGET_NR_getpmsg
8573 case TARGET_NR_getpmsg
:
8576 #ifdef TARGET_NR_putpmsg
8577 case TARGET_NR_putpmsg
:
8580 #ifdef TARGET_NR_vfork
8581 case TARGET_NR_vfork
:
8582 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
8586 #ifdef TARGET_NR_ugetrlimit
8587 case TARGET_NR_ugetrlimit
:
8590 int resource
= target_to_host_resource(arg1
);
8591 ret
= get_errno(getrlimit(resource
, &rlim
));
8592 if (!is_error(ret
)) {
8593 struct target_rlimit
*target_rlim
;
8594 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8596 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8597 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8598 unlock_user_struct(target_rlim
, arg2
, 1);
8603 #ifdef TARGET_NR_truncate64
8604 case TARGET_NR_truncate64
:
8605 if (!(p
= lock_user_string(arg1
)))
8607 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
8608 unlock_user(p
, arg1
, 0);
8611 #ifdef TARGET_NR_ftruncate64
8612 case TARGET_NR_ftruncate64
:
8613 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
8616 #ifdef TARGET_NR_stat64
8617 case TARGET_NR_stat64
:
8618 if (!(p
= lock_user_string(arg1
)))
8620 ret
= get_errno(stat(path(p
), &st
));
8621 unlock_user(p
, arg1
, 0);
8623 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8626 #ifdef TARGET_NR_lstat64
8627 case TARGET_NR_lstat64
:
8628 if (!(p
= lock_user_string(arg1
)))
8630 ret
= get_errno(lstat(path(p
), &st
));
8631 unlock_user(p
, arg1
, 0);
8633 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8636 #ifdef TARGET_NR_fstat64
8637 case TARGET_NR_fstat64
:
8638 ret
= get_errno(fstat(arg1
, &st
));
8640 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
8643 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8644 #ifdef TARGET_NR_fstatat64
8645 case TARGET_NR_fstatat64
:
8647 #ifdef TARGET_NR_newfstatat
8648 case TARGET_NR_newfstatat
:
8650 if (!(p
= lock_user_string(arg2
)))
8652 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
8654 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
8657 #ifdef TARGET_NR_lchown
8658 case TARGET_NR_lchown
:
8659 if (!(p
= lock_user_string(arg1
)))
8661 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8662 unlock_user(p
, arg1
, 0);
8665 #ifdef TARGET_NR_getuid
8666 case TARGET_NR_getuid
:
8667 ret
= get_errno(high2lowuid(getuid()));
8670 #ifdef TARGET_NR_getgid
8671 case TARGET_NR_getgid
:
8672 ret
= get_errno(high2lowgid(getgid()));
8675 #ifdef TARGET_NR_geteuid
8676 case TARGET_NR_geteuid
:
8677 ret
= get_errno(high2lowuid(geteuid()));
8680 #ifdef TARGET_NR_getegid
8681 case TARGET_NR_getegid
:
8682 ret
= get_errno(high2lowgid(getegid()));
8685 case TARGET_NR_setreuid
:
8686 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
8688 case TARGET_NR_setregid
:
8689 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
8691 case TARGET_NR_getgroups
:
8693 int gidsetsize
= arg1
;
8694 target_id
*target_grouplist
;
8698 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8699 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
8700 if (gidsetsize
== 0)
8702 if (!is_error(ret
)) {
8703 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
8704 if (!target_grouplist
)
8706 for(i
= 0;i
< ret
; i
++)
8707 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
8708 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
8712 case TARGET_NR_setgroups
:
8714 int gidsetsize
= arg1
;
8715 target_id
*target_grouplist
;
8716 gid_t
*grouplist
= NULL
;
8719 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8720 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
8721 if (!target_grouplist
) {
8722 ret
= -TARGET_EFAULT
;
8725 for (i
= 0; i
< gidsetsize
; i
++) {
8726 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
8728 unlock_user(target_grouplist
, arg2
, 0);
8730 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8733 case TARGET_NR_fchown
:
8734 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
8736 #if defined(TARGET_NR_fchownat)
8737 case TARGET_NR_fchownat
:
8738 if (!(p
= lock_user_string(arg2
)))
8740 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
8741 low2highgid(arg4
), arg5
));
8742 unlock_user(p
, arg2
, 0);
8745 #ifdef TARGET_NR_setresuid
8746 case TARGET_NR_setresuid
:
8747 ret
= get_errno(setresuid(low2highuid(arg1
),
8749 low2highuid(arg3
)));
8752 #ifdef TARGET_NR_getresuid
8753 case TARGET_NR_getresuid
:
8755 uid_t ruid
, euid
, suid
;
8756 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8757 if (!is_error(ret
)) {
8758 if (put_user_id(high2lowuid(ruid
), arg1
)
8759 || put_user_id(high2lowuid(euid
), arg2
)
8760 || put_user_id(high2lowuid(suid
), arg3
))
8766 #ifdef TARGET_NR_getresgid
8767 case TARGET_NR_setresgid
:
8768 ret
= get_errno(setresgid(low2highgid(arg1
),
8770 low2highgid(arg3
)));
8773 #ifdef TARGET_NR_getresgid
8774 case TARGET_NR_getresgid
:
8776 gid_t rgid
, egid
, sgid
;
8777 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8778 if (!is_error(ret
)) {
8779 if (put_user_id(high2lowgid(rgid
), arg1
)
8780 || put_user_id(high2lowgid(egid
), arg2
)
8781 || put_user_id(high2lowgid(sgid
), arg3
))
8787 #ifdef TARGET_NR_chown
8788 case TARGET_NR_chown
:
8789 if (!(p
= lock_user_string(arg1
)))
8791 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
8792 unlock_user(p
, arg1
, 0);
8795 case TARGET_NR_setuid
:
8796 ret
= get_errno(setuid(low2highuid(arg1
)));
8798 case TARGET_NR_setgid
:
8799 ret
= get_errno(setgid(low2highgid(arg1
)));
8801 case TARGET_NR_setfsuid
:
8802 ret
= get_errno(setfsuid(arg1
));
8804 case TARGET_NR_setfsgid
:
8805 ret
= get_errno(setfsgid(arg1
));
8808 #ifdef TARGET_NR_lchown32
8809 case TARGET_NR_lchown32
:
8810 if (!(p
= lock_user_string(arg1
)))
8812 ret
= get_errno(lchown(p
, arg2
, arg3
));
8813 unlock_user(p
, arg1
, 0);
8816 #ifdef TARGET_NR_getuid32
8817 case TARGET_NR_getuid32
:
8818 ret
= get_errno(getuid());
8822 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8823 /* Alpha specific */
8824 case TARGET_NR_getxuid
:
8828 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
8830 ret
= get_errno(getuid());
8833 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8834 /* Alpha specific */
8835 case TARGET_NR_getxgid
:
8839 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
8841 ret
= get_errno(getgid());
8844 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8845 /* Alpha specific */
8846 case TARGET_NR_osf_getsysinfo
:
8847 ret
= -TARGET_EOPNOTSUPP
;
8849 case TARGET_GSI_IEEE_FP_CONTROL
:
8851 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
8853 /* Copied from linux ieee_fpcr_to_swcr. */
8854 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
8855 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
8856 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
8857 | SWCR_TRAP_ENABLE_DZE
8858 | SWCR_TRAP_ENABLE_OVF
);
8859 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
8860 | SWCR_TRAP_ENABLE_INE
);
8861 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
8862 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
8864 if (put_user_u64 (swcr
, arg2
))
8870 /* case GSI_IEEE_STATE_AT_SIGNAL:
8871 -- Not implemented in linux kernel.
8873 -- Retrieves current unaligned access state; not much used.
8875 -- Retrieves implver information; surely not used.
8877 -- Grabs a copy of the HWRPB; surely not used.
8882 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8883 /* Alpha specific */
8884 case TARGET_NR_osf_setsysinfo
:
8885 ret
= -TARGET_EOPNOTSUPP
;
8887 case TARGET_SSI_IEEE_FP_CONTROL
:
8889 uint64_t swcr
, fpcr
, orig_fpcr
;
8891 if (get_user_u64 (swcr
, arg2
)) {
8894 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8895 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
8897 /* Copied from linux ieee_swcr_to_fpcr. */
8898 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
8899 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
8900 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
8901 | SWCR_TRAP_ENABLE_DZE
8902 | SWCR_TRAP_ENABLE_OVF
)) << 48;
8903 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
8904 | SWCR_TRAP_ENABLE_INE
)) << 57;
8905 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
8906 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
8908 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8913 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
8915 uint64_t exc
, fpcr
, orig_fpcr
;
8918 if (get_user_u64(exc
, arg2
)) {
8922 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
8924 /* We only add to the exception status here. */
8925 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
8927 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
8930 /* Old exceptions are not signaled. */
8931 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
8933 /* If any exceptions set by this call,
8934 and are unmasked, send a signal. */
8936 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
8937 si_code
= TARGET_FPE_FLTRES
;
8939 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
8940 si_code
= TARGET_FPE_FLTUND
;
8942 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
8943 si_code
= TARGET_FPE_FLTOVF
;
8945 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
8946 si_code
= TARGET_FPE_FLTDIV
;
8948 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
8949 si_code
= TARGET_FPE_FLTINV
;
8952 target_siginfo_t info
;
8953 info
.si_signo
= SIGFPE
;
8955 info
.si_code
= si_code
;
8956 info
._sifields
._sigfault
._addr
8957 = ((CPUArchState
*)cpu_env
)->pc
;
8958 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
8963 /* case SSI_NVPAIRS:
8964 -- Used with SSIN_UACPROC to enable unaligned accesses.
8965 case SSI_IEEE_STATE_AT_SIGNAL:
8966 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8967 -- Not implemented in linux kernel
8972 #ifdef TARGET_NR_osf_sigprocmask
8973 /* Alpha specific. */
8974 case TARGET_NR_osf_sigprocmask
:
8978 sigset_t set
, oldset
;
8981 case TARGET_SIG_BLOCK
:
8984 case TARGET_SIG_UNBLOCK
:
8987 case TARGET_SIG_SETMASK
:
8991 ret
= -TARGET_EINVAL
;
8995 target_to_host_old_sigset(&set
, &mask
);
8996 do_sigprocmask(how
, &set
, &oldset
);
8997 host_to_target_old_sigset(&mask
, &oldset
);
9003 #ifdef TARGET_NR_getgid32
9004 case TARGET_NR_getgid32
:
9005 ret
= get_errno(getgid());
9008 #ifdef TARGET_NR_geteuid32
9009 case TARGET_NR_geteuid32
:
9010 ret
= get_errno(geteuid());
9013 #ifdef TARGET_NR_getegid32
9014 case TARGET_NR_getegid32
:
9015 ret
= get_errno(getegid());
9018 #ifdef TARGET_NR_setreuid32
9019 case TARGET_NR_setreuid32
:
9020 ret
= get_errno(setreuid(arg1
, arg2
));
9023 #ifdef TARGET_NR_setregid32
9024 case TARGET_NR_setregid32
:
9025 ret
= get_errno(setregid(arg1
, arg2
));
9028 #ifdef TARGET_NR_getgroups32
9029 case TARGET_NR_getgroups32
:
9031 int gidsetsize
= arg1
;
9032 uint32_t *target_grouplist
;
9036 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9037 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
9038 if (gidsetsize
== 0)
9040 if (!is_error(ret
)) {
9041 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
9042 if (!target_grouplist
) {
9043 ret
= -TARGET_EFAULT
;
9046 for(i
= 0;i
< ret
; i
++)
9047 target_grouplist
[i
] = tswap32(grouplist
[i
]);
9048 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
9053 #ifdef TARGET_NR_setgroups32
9054 case TARGET_NR_setgroups32
:
9056 int gidsetsize
= arg1
;
9057 uint32_t *target_grouplist
;
9061 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
9062 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
9063 if (!target_grouplist
) {
9064 ret
= -TARGET_EFAULT
;
9067 for(i
= 0;i
< gidsetsize
; i
++)
9068 grouplist
[i
] = tswap32(target_grouplist
[i
]);
9069 unlock_user(target_grouplist
, arg2
, 0);
9070 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
9074 #ifdef TARGET_NR_fchown32
9075 case TARGET_NR_fchown32
:
9076 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
9079 #ifdef TARGET_NR_setresuid32
9080 case TARGET_NR_setresuid32
:
9081 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
9084 #ifdef TARGET_NR_getresuid32
9085 case TARGET_NR_getresuid32
:
9087 uid_t ruid
, euid
, suid
;
9088 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
9089 if (!is_error(ret
)) {
9090 if (put_user_u32(ruid
, arg1
)
9091 || put_user_u32(euid
, arg2
)
9092 || put_user_u32(suid
, arg3
))
9098 #ifdef TARGET_NR_setresgid32
9099 case TARGET_NR_setresgid32
:
9100 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
9103 #ifdef TARGET_NR_getresgid32
9104 case TARGET_NR_getresgid32
:
9106 gid_t rgid
, egid
, sgid
;
9107 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
9108 if (!is_error(ret
)) {
9109 if (put_user_u32(rgid
, arg1
)
9110 || put_user_u32(egid
, arg2
)
9111 || put_user_u32(sgid
, arg3
))
9117 #ifdef TARGET_NR_chown32
9118 case TARGET_NR_chown32
:
9119 if (!(p
= lock_user_string(arg1
)))
9121 ret
= get_errno(chown(p
, arg2
, arg3
));
9122 unlock_user(p
, arg1
, 0);
9125 #ifdef TARGET_NR_setuid32
9126 case TARGET_NR_setuid32
:
9127 ret
= get_errno(setuid(arg1
));
9130 #ifdef TARGET_NR_setgid32
9131 case TARGET_NR_setgid32
:
9132 ret
= get_errno(setgid(arg1
));
9135 #ifdef TARGET_NR_setfsuid32
9136 case TARGET_NR_setfsuid32
:
9137 ret
= get_errno(setfsuid(arg1
));
9140 #ifdef TARGET_NR_setfsgid32
9141 case TARGET_NR_setfsgid32
:
9142 ret
= get_errno(setfsgid(arg1
));
9146 case TARGET_NR_pivot_root
:
9148 #ifdef TARGET_NR_mincore
9149 case TARGET_NR_mincore
:
9152 ret
= -TARGET_EFAULT
;
9153 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
9155 if (!(p
= lock_user_string(arg3
)))
9157 ret
= get_errno(mincore(a
, arg2
, p
));
9158 unlock_user(p
, arg3
, ret
);
9160 unlock_user(a
, arg1
, 0);
9164 #ifdef TARGET_NR_arm_fadvise64_64
9165 case TARGET_NR_arm_fadvise64_64
:
9168 * arm_fadvise64_64 looks like fadvise64_64 but
9169 * with different argument order
9177 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
9178 #ifdef TARGET_NR_fadvise64_64
9179 case TARGET_NR_fadvise64_64
:
9181 #ifdef TARGET_NR_fadvise64
9182 case TARGET_NR_fadvise64
:
9186 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
9187 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
9188 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
9189 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
9193 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
9196 #ifdef TARGET_NR_madvise
9197 case TARGET_NR_madvise
:
9198 /* A straight passthrough may not be safe because qemu sometimes
9199 turns private file-backed mappings into anonymous mappings.
9200 This will break MADV_DONTNEED.
9201 This is a hint, so ignoring and returning success is ok. */
9205 #if TARGET_ABI_BITS == 32
9206 case TARGET_NR_fcntl64
:
9210 struct target_flock64
*target_fl
;
9212 struct target_eabi_flock64
*target_efl
;
9215 cmd
= target_to_host_fcntl_cmd(arg2
);
9216 if (cmd
== -TARGET_EINVAL
) {
9222 case TARGET_F_GETLK64
:
9224 if (((CPUARMState
*)cpu_env
)->eabi
) {
9225 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
9227 fl
.l_type
= tswap16(target_efl
->l_type
);
9228 fl
.l_whence
= tswap16(target_efl
->l_whence
);
9229 fl
.l_start
= tswap64(target_efl
->l_start
);
9230 fl
.l_len
= tswap64(target_efl
->l_len
);
9231 fl
.l_pid
= tswap32(target_efl
->l_pid
);
9232 unlock_user_struct(target_efl
, arg3
, 0);
9236 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
9238 fl
.l_type
= tswap16(target_fl
->l_type
);
9239 fl
.l_whence
= tswap16(target_fl
->l_whence
);
9240 fl
.l_start
= tswap64(target_fl
->l_start
);
9241 fl
.l_len
= tswap64(target_fl
->l_len
);
9242 fl
.l_pid
= tswap32(target_fl
->l_pid
);
9243 unlock_user_struct(target_fl
, arg3
, 0);
9245 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
9248 if (((CPUARMState
*)cpu_env
)->eabi
) {
9249 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
9251 target_efl
->l_type
= tswap16(fl
.l_type
);
9252 target_efl
->l_whence
= tswap16(fl
.l_whence
);
9253 target_efl
->l_start
= tswap64(fl
.l_start
);
9254 target_efl
->l_len
= tswap64(fl
.l_len
);
9255 target_efl
->l_pid
= tswap32(fl
.l_pid
);
9256 unlock_user_struct(target_efl
, arg3
, 1);
9260 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
9262 target_fl
->l_type
= tswap16(fl
.l_type
);
9263 target_fl
->l_whence
= tswap16(fl
.l_whence
);
9264 target_fl
->l_start
= tswap64(fl
.l_start
);
9265 target_fl
->l_len
= tswap64(fl
.l_len
);
9266 target_fl
->l_pid
= tswap32(fl
.l_pid
);
9267 unlock_user_struct(target_fl
, arg3
, 1);
9272 case TARGET_F_SETLK64
:
9273 case TARGET_F_SETLKW64
:
9275 if (((CPUARMState
*)cpu_env
)->eabi
) {
9276 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
9278 fl
.l_type
= tswap16(target_efl
->l_type
);
9279 fl
.l_whence
= tswap16(target_efl
->l_whence
);
9280 fl
.l_start
= tswap64(target_efl
->l_start
);
9281 fl
.l_len
= tswap64(target_efl
->l_len
);
9282 fl
.l_pid
= tswap32(target_efl
->l_pid
);
9283 unlock_user_struct(target_efl
, arg3
, 0);
9287 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
9289 fl
.l_type
= tswap16(target_fl
->l_type
);
9290 fl
.l_whence
= tswap16(target_fl
->l_whence
);
9291 fl
.l_start
= tswap64(target_fl
->l_start
);
9292 fl
.l_len
= tswap64(target_fl
->l_len
);
9293 fl
.l_pid
= tswap32(target_fl
->l_pid
);
9294 unlock_user_struct(target_fl
, arg3
, 0);
9296 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
9299 ret
= do_fcntl(arg1
, arg2
, arg3
);
9305 #ifdef TARGET_NR_cacheflush
9306 case TARGET_NR_cacheflush
:
9307 /* self-modifying code is handled automatically, so nothing needed */
9311 #ifdef TARGET_NR_security
9312 case TARGET_NR_security
:
9315 #ifdef TARGET_NR_getpagesize
9316 case TARGET_NR_getpagesize
:
9317 ret
= TARGET_PAGE_SIZE
;
9320 case TARGET_NR_gettid
:
9321 ret
= get_errno(gettid());
9323 #ifdef TARGET_NR_readahead
9324 case TARGET_NR_readahead
:
9325 #if TARGET_ABI_BITS == 32
9326 if (regpairs_aligned(cpu_env
)) {
9331 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
9333 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
9338 #ifdef TARGET_NR_setxattr
9339 case TARGET_NR_listxattr
:
9340 case TARGET_NR_llistxattr
:
9344 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9346 ret
= -TARGET_EFAULT
;
9350 p
= lock_user_string(arg1
);
9352 if (num
== TARGET_NR_listxattr
) {
9353 ret
= get_errno(listxattr(p
, b
, arg3
));
9355 ret
= get_errno(llistxattr(p
, b
, arg3
));
9358 ret
= -TARGET_EFAULT
;
9360 unlock_user(p
, arg1
, 0);
9361 unlock_user(b
, arg2
, arg3
);
9364 case TARGET_NR_flistxattr
:
9368 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9370 ret
= -TARGET_EFAULT
;
9374 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
9375 unlock_user(b
, arg2
, arg3
);
9378 case TARGET_NR_setxattr
:
9379 case TARGET_NR_lsetxattr
:
9381 void *p
, *n
, *v
= 0;
9383 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
9385 ret
= -TARGET_EFAULT
;
9389 p
= lock_user_string(arg1
);
9390 n
= lock_user_string(arg2
);
9392 if (num
== TARGET_NR_setxattr
) {
9393 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
9395 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
9398 ret
= -TARGET_EFAULT
;
9400 unlock_user(p
, arg1
, 0);
9401 unlock_user(n
, arg2
, 0);
9402 unlock_user(v
, arg3
, 0);
9405 case TARGET_NR_fsetxattr
:
9409 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
9411 ret
= -TARGET_EFAULT
;
9415 n
= lock_user_string(arg2
);
9417 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
9419 ret
= -TARGET_EFAULT
;
9421 unlock_user(n
, arg2
, 0);
9422 unlock_user(v
, arg3
, 0);
9425 case TARGET_NR_getxattr
:
9426 case TARGET_NR_lgetxattr
:
9428 void *p
, *n
, *v
= 0;
9430 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9432 ret
= -TARGET_EFAULT
;
9436 p
= lock_user_string(arg1
);
9437 n
= lock_user_string(arg2
);
9439 if (num
== TARGET_NR_getxattr
) {
9440 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
9442 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
9445 ret
= -TARGET_EFAULT
;
9447 unlock_user(p
, arg1
, 0);
9448 unlock_user(n
, arg2
, 0);
9449 unlock_user(v
, arg3
, arg4
);
9452 case TARGET_NR_fgetxattr
:
9456 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9458 ret
= -TARGET_EFAULT
;
9462 n
= lock_user_string(arg2
);
9464 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
9466 ret
= -TARGET_EFAULT
;
9468 unlock_user(n
, arg2
, 0);
9469 unlock_user(v
, arg3
, arg4
);
9472 case TARGET_NR_removexattr
:
9473 case TARGET_NR_lremovexattr
:
9476 p
= lock_user_string(arg1
);
9477 n
= lock_user_string(arg2
);
9479 if (num
== TARGET_NR_removexattr
) {
9480 ret
= get_errno(removexattr(p
, n
));
9482 ret
= get_errno(lremovexattr(p
, n
));
9485 ret
= -TARGET_EFAULT
;
9487 unlock_user(p
, arg1
, 0);
9488 unlock_user(n
, arg2
, 0);
9491 case TARGET_NR_fremovexattr
:
9494 n
= lock_user_string(arg2
);
9496 ret
= get_errno(fremovexattr(arg1
, n
));
9498 ret
= -TARGET_EFAULT
;
9500 unlock_user(n
, arg2
, 0);
9504 #endif /* CONFIG_ATTR */
9505 #ifdef TARGET_NR_set_thread_area
9506 case TARGET_NR_set_thread_area
:
9507 #if defined(TARGET_MIPS)
9508 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
9511 #elif defined(TARGET_CRIS)
9513 ret
= -TARGET_EINVAL
;
9515 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
9519 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9520 ret
= do_set_thread_area(cpu_env
, arg1
);
9522 #elif defined(TARGET_M68K)
9524 TaskState
*ts
= cpu
->opaque
;
9525 ts
->tp_value
= arg1
;
9530 goto unimplemented_nowarn
;
9533 #ifdef TARGET_NR_get_thread_area
9534 case TARGET_NR_get_thread_area
:
9535 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9536 ret
= do_get_thread_area(cpu_env
, arg1
);
9538 #elif defined(TARGET_M68K)
9540 TaskState
*ts
= cpu
->opaque
;
9545 goto unimplemented_nowarn
;
9548 #ifdef TARGET_NR_getdomainname
9549 case TARGET_NR_getdomainname
:
9550 goto unimplemented_nowarn
;
9553 #ifdef TARGET_NR_clock_gettime
9554 case TARGET_NR_clock_gettime
:
9557 ret
= get_errno(clock_gettime(arg1
, &ts
));
9558 if (!is_error(ret
)) {
9559 host_to_target_timespec(arg2
, &ts
);
9564 #ifdef TARGET_NR_clock_getres
9565 case TARGET_NR_clock_getres
:
9568 ret
= get_errno(clock_getres(arg1
, &ts
));
9569 if (!is_error(ret
)) {
9570 host_to_target_timespec(arg2
, &ts
);
9575 #ifdef TARGET_NR_clock_nanosleep
9576 case TARGET_NR_clock_nanosleep
:
9579 target_to_host_timespec(&ts
, arg3
);
9580 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
9582 host_to_target_timespec(arg4
, &ts
);
9584 #if defined(TARGET_PPC)
9585 /* clock_nanosleep is odd in that it returns positive errno values.
9586 * On PPC, CR0 bit 3 should be set in such a situation. */
9588 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
9595 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9596 case TARGET_NR_set_tid_address
:
9597 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
9601 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9602 case TARGET_NR_tkill
:
9603 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
9607 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9608 case TARGET_NR_tgkill
:
9609 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
9610 target_to_host_signal(arg3
)));
9614 #ifdef TARGET_NR_set_robust_list
9615 case TARGET_NR_set_robust_list
:
9616 case TARGET_NR_get_robust_list
:
9617 /* The ABI for supporting robust futexes has userspace pass
9618 * the kernel a pointer to a linked list which is updated by
9619 * userspace after the syscall; the list is walked by the kernel
9620 * when the thread exits. Since the linked list in QEMU guest
9621 * memory isn't a valid linked list for the host and we have
9622 * no way to reliably intercept the thread-death event, we can't
9623 * support these. Silently return ENOSYS so that guest userspace
9624 * falls back to a non-robust futex implementation (which should
9625 * be OK except in the corner case of the guest crashing while
9626 * holding a mutex that is shared with another process via
9629 goto unimplemented_nowarn
;
9632 #if defined(TARGET_NR_utimensat)
9633 case TARGET_NR_utimensat
:
9635 struct timespec
*tsp
, ts
[2];
9639 target_to_host_timespec(ts
, arg3
);
9640 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
9644 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
9646 if (!(p
= lock_user_string(arg2
))) {
9647 ret
= -TARGET_EFAULT
;
9650 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
9651 unlock_user(p
, arg2
, 0);
9656 case TARGET_NR_futex
:
9657 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9659 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9660 case TARGET_NR_inotify_init
:
9661 ret
= get_errno(sys_inotify_init());
9664 #ifdef CONFIG_INOTIFY1
9665 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9666 case TARGET_NR_inotify_init1
:
9667 ret
= get_errno(sys_inotify_init1(arg1
));
9671 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9672 case TARGET_NR_inotify_add_watch
:
9673 p
= lock_user_string(arg2
);
9674 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
9675 unlock_user(p
, arg2
, 0);
9678 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9679 case TARGET_NR_inotify_rm_watch
:
9680 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
9684 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9685 case TARGET_NR_mq_open
:
9687 struct mq_attr posix_mq_attr
, *attrp
;
9689 p
= lock_user_string(arg1
- 1);
9691 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
9692 attrp
= &posix_mq_attr
;
9696 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
9697 unlock_user (p
, arg1
, 0);
9701 case TARGET_NR_mq_unlink
:
9702 p
= lock_user_string(arg1
- 1);
9703 ret
= get_errno(mq_unlink(p
));
9704 unlock_user (p
, arg1
, 0);
9707 case TARGET_NR_mq_timedsend
:
9711 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9713 target_to_host_timespec(&ts
, arg5
);
9714 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
9715 host_to_target_timespec(arg5
, &ts
);
9718 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
9719 unlock_user (p
, arg2
, arg3
);
9723 case TARGET_NR_mq_timedreceive
:
9728 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
9730 target_to_host_timespec(&ts
, arg5
);
9731 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
9732 host_to_target_timespec(arg5
, &ts
);
9735 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
9736 unlock_user (p
, arg2
, arg3
);
9738 put_user_u32(prio
, arg4
);
9742 /* Not implemented for now... */
9743 /* case TARGET_NR_mq_notify: */
9746 case TARGET_NR_mq_getsetattr
:
9748 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
9751 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
9752 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
9755 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
9756 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
9763 #ifdef CONFIG_SPLICE
9764 #ifdef TARGET_NR_tee
9767 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
9771 #ifdef TARGET_NR_splice
9772 case TARGET_NR_splice
:
9774 loff_t loff_in
, loff_out
;
9775 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
9777 if (get_user_u64(loff_in
, arg2
)) {
9780 ploff_in
= &loff_in
;
9783 if (get_user_u64(loff_out
, arg4
)) {
9786 ploff_out
= &loff_out
;
9788 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
9790 if (put_user_u64(loff_in
, arg2
)) {
9795 if (put_user_u64(loff_out
, arg4
)) {
9802 #ifdef TARGET_NR_vmsplice
9803 case TARGET_NR_vmsplice
:
9805 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9807 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
9808 unlock_iovec(vec
, arg2
, arg3
, 0);
9810 ret
= -host_to_target_errno(errno
);
9815 #endif /* CONFIG_SPLICE */
9816 #ifdef CONFIG_EVENTFD
9817 #if defined(TARGET_NR_eventfd)
9818 case TARGET_NR_eventfd
:
9819 ret
= get_errno(eventfd(arg1
, 0));
9820 fd_trans_unregister(ret
);
9823 #if defined(TARGET_NR_eventfd2)
9824 case TARGET_NR_eventfd2
:
9826 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
9827 if (arg2
& TARGET_O_NONBLOCK
) {
9828 host_flags
|= O_NONBLOCK
;
9830 if (arg2
& TARGET_O_CLOEXEC
) {
9831 host_flags
|= O_CLOEXEC
;
9833 ret
= get_errno(eventfd(arg1
, host_flags
));
9834 fd_trans_unregister(ret
);
9838 #endif /* CONFIG_EVENTFD */
9839 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9840 case TARGET_NR_fallocate
:
9841 #if TARGET_ABI_BITS == 32
9842 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
9843 target_offset64(arg5
, arg6
)));
9845 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
9849 #if defined(CONFIG_SYNC_FILE_RANGE)
9850 #if defined(TARGET_NR_sync_file_range)
9851 case TARGET_NR_sync_file_range
:
9852 #if TARGET_ABI_BITS == 32
9853 #if defined(TARGET_MIPS)
9854 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9855 target_offset64(arg5
, arg6
), arg7
));
9857 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
9858 target_offset64(arg4
, arg5
), arg6
));
9859 #endif /* !TARGET_MIPS */
9861 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
9865 #if defined(TARGET_NR_sync_file_range2)
9866 case TARGET_NR_sync_file_range2
:
9867 /* This is like sync_file_range but the arguments are reordered */
9868 #if TARGET_ABI_BITS == 32
9869 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
9870 target_offset64(arg5
, arg6
), arg2
));
9872 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
9877 #if defined(TARGET_NR_signalfd4)
9878 case TARGET_NR_signalfd4
:
9879 ret
= do_signalfd4(arg1
, arg2
, arg4
);
9882 #if defined(TARGET_NR_signalfd)
9883 case TARGET_NR_signalfd
:
9884 ret
= do_signalfd4(arg1
, arg2
, 0);
9887 #if defined(CONFIG_EPOLL)
9888 #if defined(TARGET_NR_epoll_create)
9889 case TARGET_NR_epoll_create
:
9890 ret
= get_errno(epoll_create(arg1
));
9893 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9894 case TARGET_NR_epoll_create1
:
9895 ret
= get_errno(epoll_create1(arg1
));
9898 #if defined(TARGET_NR_epoll_ctl)
9899 case TARGET_NR_epoll_ctl
:
9901 struct epoll_event ep
;
9902 struct epoll_event
*epp
= 0;
9904 struct target_epoll_event
*target_ep
;
9905 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
9908 ep
.events
= tswap32(target_ep
->events
);
9909 /* The epoll_data_t union is just opaque data to the kernel,
9910 * so we transfer all 64 bits across and need not worry what
9911 * actual data type it is.
9913 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
9914 unlock_user_struct(target_ep
, arg4
, 0);
9917 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
9922 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9923 #define IMPLEMENT_EPOLL_PWAIT
9925 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9926 #if defined(TARGET_NR_epoll_wait)
9927 case TARGET_NR_epoll_wait
:
9929 #if defined(IMPLEMENT_EPOLL_PWAIT)
9930 case TARGET_NR_epoll_pwait
:
9933 struct target_epoll_event
*target_ep
;
9934 struct epoll_event
*ep
;
9936 int maxevents
= arg3
;
9939 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
9940 maxevents
* sizeof(struct target_epoll_event
), 1);
9945 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
9948 #if defined(IMPLEMENT_EPOLL_PWAIT)
9949 case TARGET_NR_epoll_pwait
:
9951 target_sigset_t
*target_set
;
9952 sigset_t _set
, *set
= &_set
;
9955 target_set
= lock_user(VERIFY_READ
, arg5
,
9956 sizeof(target_sigset_t
), 1);
9958 unlock_user(target_ep
, arg2
, 0);
9961 target_to_host_sigset(set
, target_set
);
9962 unlock_user(target_set
, arg5
, 0);
9967 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
9971 #if defined(TARGET_NR_epoll_wait)
9972 case TARGET_NR_epoll_wait
:
9973 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
9977 ret
= -TARGET_ENOSYS
;
9979 if (!is_error(ret
)) {
9981 for (i
= 0; i
< ret
; i
++) {
9982 target_ep
[i
].events
= tswap32(ep
[i
].events
);
9983 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
9986 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
9991 #ifdef TARGET_NR_prlimit64
9992 case TARGET_NR_prlimit64
:
9994 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9995 struct target_rlimit64
*target_rnew
, *target_rold
;
9996 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
9997 int resource
= target_to_host_resource(arg2
);
9999 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
10002 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
10003 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
10004 unlock_user_struct(target_rnew
, arg3
, 0);
10008 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
10009 if (!is_error(ret
) && arg4
) {
10010 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
10013 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
10014 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
10015 unlock_user_struct(target_rold
, arg4
, 1);
10020 #ifdef TARGET_NR_gethostname
10021 case TARGET_NR_gethostname
:
10023 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10025 ret
= get_errno(gethostname(name
, arg2
));
10026 unlock_user(name
, arg1
, arg2
);
10028 ret
= -TARGET_EFAULT
;
10033 #ifdef TARGET_NR_atomic_cmpxchg_32
10034 case TARGET_NR_atomic_cmpxchg_32
:
10036 /* should use start_exclusive from main.c */
10037 abi_ulong mem_value
;
10038 if (get_user_u32(mem_value
, arg6
)) {
10039 target_siginfo_t info
;
10040 info
.si_signo
= SIGSEGV
;
10042 info
.si_code
= TARGET_SEGV_MAPERR
;
10043 info
._sifields
._sigfault
._addr
= arg6
;
10044 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
10048 if (mem_value
== arg2
)
10049 put_user_u32(arg1
, arg6
);
10054 #ifdef TARGET_NR_atomic_barrier
10055 case TARGET_NR_atomic_barrier
:
10057 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10063 #ifdef TARGET_NR_timer_create
10064 case TARGET_NR_timer_create
:
10066 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10068 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
10071 int timer_index
= next_free_host_timer();
10073 if (timer_index
< 0) {
10074 ret
= -TARGET_EAGAIN
;
10076 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
10079 phost_sevp
= &host_sevp
;
10080 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
10086 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
10090 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
10099 #ifdef TARGET_NR_timer_settime
10100 case TARGET_NR_timer_settime
:
10102 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10103 * struct itimerspec * old_value */
10104 target_timer_t timerid
= get_timer_id(arg1
);
10108 } else if (arg3
== 0) {
10109 ret
= -TARGET_EINVAL
;
10111 timer_t htimer
= g_posix_timers
[timerid
];
10112 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
10114 target_to_host_itimerspec(&hspec_new
, arg3
);
10116 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
10117 host_to_target_itimerspec(arg2
, &hspec_old
);
10123 #ifdef TARGET_NR_timer_gettime
10124 case TARGET_NR_timer_gettime
:
10126 /* args: timer_t timerid, struct itimerspec *curr_value */
10127 target_timer_t timerid
= get_timer_id(arg1
);
10131 } else if (!arg2
) {
10132 ret
= -TARGET_EFAULT
;
10134 timer_t htimer
= g_posix_timers
[timerid
];
10135 struct itimerspec hspec
;
10136 ret
= get_errno(timer_gettime(htimer
, &hspec
));
10138 if (host_to_target_itimerspec(arg2
, &hspec
)) {
10139 ret
= -TARGET_EFAULT
;
10146 #ifdef TARGET_NR_timer_getoverrun
10147 case TARGET_NR_timer_getoverrun
:
10149 /* args: timer_t timerid */
10150 target_timer_t timerid
= get_timer_id(arg1
);
10155 timer_t htimer
= g_posix_timers
[timerid
];
10156 ret
= get_errno(timer_getoverrun(htimer
));
10158 fd_trans_unregister(ret
);
10163 #ifdef TARGET_NR_timer_delete
10164 case TARGET_NR_timer_delete
:
10166 /* args: timer_t timerid */
10167 target_timer_t timerid
= get_timer_id(arg1
);
10172 timer_t htimer
= g_posix_timers
[timerid
];
10173 ret
= get_errno(timer_delete(htimer
));
10174 g_posix_timers
[timerid
] = 0;
10180 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
10181 case TARGET_NR_timerfd_create
:
10182 ret
= get_errno(timerfd_create(arg1
,
10183 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
10187 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
10188 case TARGET_NR_timerfd_gettime
:
10190 struct itimerspec its_curr
;
10192 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
10194 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
10201 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
10202 case TARGET_NR_timerfd_settime
:
10204 struct itimerspec its_new
, its_old
, *p_new
;
10207 if (target_to_host_itimerspec(&its_new
, arg3
)) {
10215 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
10217 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
10224 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
10225 case TARGET_NR_ioprio_get
:
10226 ret
= get_errno(ioprio_get(arg1
, arg2
));
10230 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
10231 case TARGET_NR_ioprio_set
:
10232 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
10236 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
10237 case TARGET_NR_setns
:
10238 ret
= get_errno(setns(arg1
, arg2
));
10241 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
10242 case TARGET_NR_unshare
:
10243 ret
= get_errno(unshare(arg1
));
10249 gemu_log("qemu: Unsupported syscall: %d\n", num
);
10250 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
10251 unimplemented_nowarn
:
10253 ret
= -TARGET_ENOSYS
;
10258 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
10261 print_syscall_ret(num
, ret
);
10264 ret
= -TARGET_EFAULT
;