4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
32 #include <sys/types.h>
38 #include <sys/mount.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
49 int __clone2(int (*fn
)(void *), void *child_stack_base
,
50 size_t stack_size
, int flags
, void *arg
, ...);
52 #include <sys/socket.h>
56 #include <sys/times.h>
59 #include <sys/statfs.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
73 #include <sys/eventfd.h>
76 #include <sys/epoll.h>
79 #include "qemu/xattr.h"
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/utsname.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #if defined(CONFIG_FIEMAP)
102 #include <linux/fiemap.h>
104 #include <linux/fb.h>
105 #include <linux/vt.h>
106 #include <linux/dm-ioctl.h>
107 #include <linux/reboot.h>
108 #include <linux/route.h>
109 #include "linux_loop.h"
110 #include "cpu-uname.h"
114 #if defined(CONFIG_USE_NPTL)
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
118 /* XXX: Hardcode the above values. */
119 #define CLONE_NPTL_FLAGS2 0
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
137 #define _syscall0(type,name) \
138 static type name (void) \
140 return syscall(__NR_##name); \
143 #define _syscall1(type,name,type1,arg1) \
144 static type name (type1 arg1) \
146 return syscall(__NR_##name, arg1); \
149 #define _syscall2(type,name,type1,arg1,type2,arg2) \
150 static type name (type1 arg1,type2 arg2) \
152 return syscall(__NR_##name, arg1, arg2); \
155 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
156 static type name (type1 arg1,type2 arg2,type3 arg3) \
158 return syscall(__NR_##name, arg1, arg2, arg3); \
161 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
162 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
164 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
167 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
169 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
171 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
175 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
176 type5,arg5,type6,arg6) \
177 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
180 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
184 #define __NR_sys_uname __NR_uname
185 #define __NR_sys_getcwd1 __NR_getcwd
186 #define __NR_sys_getdents __NR_getdents
187 #define __NR_sys_getdents64 __NR_getdents64
188 #define __NR_sys_getpriority __NR_getpriority
189 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_futex __NR_futex
194 #define __NR_sys_inotify_init __NR_inotify_init
195 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
196 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
198 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
200 #define __NR__llseek __NR_lseek
204 _syscall0(int, gettid
)
206 /* This is a replacement for the host gettid() and must return a host
208 static int gettid(void) {
213 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
215 #if !defined(__NR_getdents) || \
216 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
217 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
221 loff_t
*, res
, uint
, wh
);
223 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
224 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
225 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
226 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
228 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
229 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group
,int,error_code
)
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address
,int *,tidptr
)
237 #if defined(CONFIG_USE_NPTL)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
240 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
243 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
244 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
245 unsigned long *, user_mask_ptr
);
246 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
247 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
248 unsigned long *, user_mask_ptr
);
249 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
252 static bitmask_transtbl fcntl_flags_tbl
[] = {
253 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
254 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
255 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
256 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
257 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
258 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
259 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
260 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
261 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
262 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
263 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
264 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
265 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
266 #if defined(O_DIRECT)
267 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
269 #if defined(O_NOATIME)
270 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
272 #if defined(O_CLOEXEC)
273 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
276 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
278 /* Don't terminate the list prematurely on 64-bit host+guest. */
279 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
280 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
285 #define COPY_UTSNAME_FIELD(dest, src) \
287 /* __NEW_UTS_LEN doesn't include terminating null */ \
288 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
289 (dest)[__NEW_UTS_LEN] = '\0'; \
292 static int sys_uname(struct new_utsname
*buf
)
294 struct utsname uts_buf
;
296 if (uname(&uts_buf
) < 0)
300 * Just in case these have some differences, we
301 * translate utsname to new_utsname (which is the
302 * struct linux kernel uses).
305 memset(buf
, 0, sizeof(*buf
));
306 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
307 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
308 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
309 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
310 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
312 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
316 #undef COPY_UTSNAME_FIELD
319 static int sys_getcwd1(char *buf
, size_t size
)
321 if (getcwd(buf
, size
) == NULL
) {
322 /* getcwd() sets errno */
325 return strlen(buf
)+1;
328 #ifdef TARGET_NR_openat
329 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
332 * open(2) has extra parameter 'mode' when called with
335 if ((flags
& O_CREAT
) != 0) {
336 return (openat(dirfd
, pathname
, flags
, mode
));
338 return (openat(dirfd
, pathname
, flags
));
342 #ifdef TARGET_NR_utimensat
343 #ifdef CONFIG_UTIMENSAT
344 static int sys_utimensat(int dirfd
, const char *pathname
,
345 const struct timespec times
[2], int flags
)
347 if (pathname
== NULL
)
348 return futimens(dirfd
, times
);
350 return utimensat(dirfd
, pathname
, times
, flags
);
352 #elif defined(__NR_utimensat)
353 #define __NR_sys_utimensat __NR_utimensat
354 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
355 const struct timespec
*,tsp
,int,flags
)
357 static int sys_utimensat(int dirfd
, const char *pathname
,
358 const struct timespec times
[2], int flags
)
364 #endif /* TARGET_NR_utimensat */
366 #ifdef CONFIG_INOTIFY
367 #include <sys/inotify.h>
369 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
370 static int sys_inotify_init(void)
372 return (inotify_init());
375 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
376 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
378 return (inotify_add_watch(fd
, pathname
, mask
));
381 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
382 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
384 return (inotify_rm_watch(fd
, wd
));
387 #ifdef CONFIG_INOTIFY1
388 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
389 static int sys_inotify_init1(int flags
)
391 return (inotify_init1(flags
));
396 /* Userspace can usually survive runtime without inotify */
397 #undef TARGET_NR_inotify_init
398 #undef TARGET_NR_inotify_init1
399 #undef TARGET_NR_inotify_add_watch
400 #undef TARGET_NR_inotify_rm_watch
401 #endif /* CONFIG_INOTIFY */
403 #if defined(TARGET_NR_ppoll)
405 # define __NR_ppoll -1
407 #define __NR_sys_ppoll __NR_ppoll
408 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
409 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
413 #if defined(TARGET_NR_pselect6)
414 #ifndef __NR_pselect6
415 # define __NR_pselect6 -1
417 #define __NR_sys_pselect6 __NR_pselect6
418 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
419 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
422 #if defined(TARGET_NR_prlimit64)
423 #ifndef __NR_prlimit64
424 # define __NR_prlimit64 -1
426 #define __NR_sys_prlimit64 __NR_prlimit64
427 /* The glibc rlimit structure may not be that used by the underlying syscall */
428 struct host_rlimit64
{
432 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
433 const struct host_rlimit64
*, new_limit
,
434 struct host_rlimit64
*, old_limit
)
437 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
439 static inline int regpairs_aligned(void *cpu_env
) {
440 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
442 #elif defined(TARGET_MIPS)
443 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
444 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
445 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
446 * of registers which translates to the same as ARM/MIPS, because we start with
448 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
450 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
453 #define ERRNO_TABLE_SIZE 1200
455 /* target_to_host_errno_table[] is initialized from
456 * host_to_target_errno_table[] in syscall_init(). */
457 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
461 * This list is the union of errno values overridden in asm-<arch>/errno.h
462 * minus the errnos that are not actually generic to all archs.
464 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
465 [EIDRM
] = TARGET_EIDRM
,
466 [ECHRNG
] = TARGET_ECHRNG
,
467 [EL2NSYNC
] = TARGET_EL2NSYNC
,
468 [EL3HLT
] = TARGET_EL3HLT
,
469 [EL3RST
] = TARGET_EL3RST
,
470 [ELNRNG
] = TARGET_ELNRNG
,
471 [EUNATCH
] = TARGET_EUNATCH
,
472 [ENOCSI
] = TARGET_ENOCSI
,
473 [EL2HLT
] = TARGET_EL2HLT
,
474 [EDEADLK
] = TARGET_EDEADLK
,
475 [ENOLCK
] = TARGET_ENOLCK
,
476 [EBADE
] = TARGET_EBADE
,
477 [EBADR
] = TARGET_EBADR
,
478 [EXFULL
] = TARGET_EXFULL
,
479 [ENOANO
] = TARGET_ENOANO
,
480 [EBADRQC
] = TARGET_EBADRQC
,
481 [EBADSLT
] = TARGET_EBADSLT
,
482 [EBFONT
] = TARGET_EBFONT
,
483 [ENOSTR
] = TARGET_ENOSTR
,
484 [ENODATA
] = TARGET_ENODATA
,
485 [ETIME
] = TARGET_ETIME
,
486 [ENOSR
] = TARGET_ENOSR
,
487 [ENONET
] = TARGET_ENONET
,
488 [ENOPKG
] = TARGET_ENOPKG
,
489 [EREMOTE
] = TARGET_EREMOTE
,
490 [ENOLINK
] = TARGET_ENOLINK
,
491 [EADV
] = TARGET_EADV
,
492 [ESRMNT
] = TARGET_ESRMNT
,
493 [ECOMM
] = TARGET_ECOMM
,
494 [EPROTO
] = TARGET_EPROTO
,
495 [EDOTDOT
] = TARGET_EDOTDOT
,
496 [EMULTIHOP
] = TARGET_EMULTIHOP
,
497 [EBADMSG
] = TARGET_EBADMSG
,
498 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
499 [EOVERFLOW
] = TARGET_EOVERFLOW
,
500 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
501 [EBADFD
] = TARGET_EBADFD
,
502 [EREMCHG
] = TARGET_EREMCHG
,
503 [ELIBACC
] = TARGET_ELIBACC
,
504 [ELIBBAD
] = TARGET_ELIBBAD
,
505 [ELIBSCN
] = TARGET_ELIBSCN
,
506 [ELIBMAX
] = TARGET_ELIBMAX
,
507 [ELIBEXEC
] = TARGET_ELIBEXEC
,
508 [EILSEQ
] = TARGET_EILSEQ
,
509 [ENOSYS
] = TARGET_ENOSYS
,
510 [ELOOP
] = TARGET_ELOOP
,
511 [ERESTART
] = TARGET_ERESTART
,
512 [ESTRPIPE
] = TARGET_ESTRPIPE
,
513 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
514 [EUSERS
] = TARGET_EUSERS
,
515 [ENOTSOCK
] = TARGET_ENOTSOCK
,
516 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
517 [EMSGSIZE
] = TARGET_EMSGSIZE
,
518 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
519 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
520 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
521 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
522 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
523 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
524 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
525 [EADDRINUSE
] = TARGET_EADDRINUSE
,
526 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
527 [ENETDOWN
] = TARGET_ENETDOWN
,
528 [ENETUNREACH
] = TARGET_ENETUNREACH
,
529 [ENETRESET
] = TARGET_ENETRESET
,
530 [ECONNABORTED
] = TARGET_ECONNABORTED
,
531 [ECONNRESET
] = TARGET_ECONNRESET
,
532 [ENOBUFS
] = TARGET_ENOBUFS
,
533 [EISCONN
] = TARGET_EISCONN
,
534 [ENOTCONN
] = TARGET_ENOTCONN
,
535 [EUCLEAN
] = TARGET_EUCLEAN
,
536 [ENOTNAM
] = TARGET_ENOTNAM
,
537 [ENAVAIL
] = TARGET_ENAVAIL
,
538 [EISNAM
] = TARGET_EISNAM
,
539 [EREMOTEIO
] = TARGET_EREMOTEIO
,
540 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
541 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
542 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
543 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
544 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
545 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
546 [EALREADY
] = TARGET_EALREADY
,
547 [EINPROGRESS
] = TARGET_EINPROGRESS
,
548 [ESTALE
] = TARGET_ESTALE
,
549 [ECANCELED
] = TARGET_ECANCELED
,
550 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
551 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
553 [ENOKEY
] = TARGET_ENOKEY
,
556 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
559 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
562 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
565 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
567 #ifdef ENOTRECOVERABLE
568 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
572 static inline int host_to_target_errno(int err
)
574 if(host_to_target_errno_table
[err
])
575 return host_to_target_errno_table
[err
];
579 static inline int target_to_host_errno(int err
)
581 if (target_to_host_errno_table
[err
])
582 return target_to_host_errno_table
[err
];
586 static inline abi_long
get_errno(abi_long ret
)
589 return -host_to_target_errno(errno
);
594 static inline int is_error(abi_long ret
)
596 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
599 char *target_strerror(int err
)
601 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
604 return strerror(target_to_host_errno(err
));
607 static abi_ulong target_brk
;
608 static abi_ulong target_original_brk
;
609 static abi_ulong brk_page
;
611 void target_set_brk(abi_ulong new_brk
)
613 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
614 brk_page
= HOST_PAGE_ALIGN(target_brk
);
617 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
618 #define DEBUGF_BRK(message, args...)
620 /* do_brk() must return target values and target errnos. */
621 abi_long
do_brk(abi_ulong new_brk
)
623 abi_long mapped_addr
;
626 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
629 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
632 if (new_brk
< target_original_brk
) {
633 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
638 /* If the new brk is less than the highest page reserved to the
639 * target heap allocation, set it and we're almost done... */
640 if (new_brk
<= brk_page
) {
641 /* Heap contents are initialized to zero, as for anonymous
643 if (new_brk
> target_brk
) {
644 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
646 target_brk
= new_brk
;
647 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
651 /* We need to allocate more memory after the brk... Note that
652 * we don't use MAP_FIXED because that will map over the top of
653 * any existing mapping (like the one with the host libc or qemu
654 * itself); instead we treat "mapped but at wrong address" as
655 * a failure and unmap again.
657 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
658 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
659 PROT_READ
|PROT_WRITE
,
660 MAP_ANON
|MAP_PRIVATE
, 0, 0));
662 if (mapped_addr
== brk_page
) {
663 /* Heap contents are initialized to zero, as for anonymous
664 * mapped pages. Technically the new pages are already
665 * initialized to zero since they *are* anonymous mapped
666 * pages, however we have to take care with the contents that
667 * come from the remaining part of the previous page: it may
668 * contains garbage data due to a previous heap usage (grown
670 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
672 target_brk
= new_brk
;
673 brk_page
= HOST_PAGE_ALIGN(target_brk
);
674 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
677 } else if (mapped_addr
!= -1) {
678 /* Mapped but at wrong address, meaning there wasn't actually
679 * enough space for this brk.
681 target_munmap(mapped_addr
, new_alloc_size
);
683 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
686 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
689 #if defined(TARGET_ALPHA)
690 /* We (partially) emulate OSF/1 on Alpha, which requires we
691 return a proper errno, not an unchanged brk value. */
692 return -TARGET_ENOMEM
;
694 /* For everything else, return the previous break. */
698 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
699 abi_ulong target_fds_addr
,
703 abi_ulong b
, *target_fds
;
705 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
706 if (!(target_fds
= lock_user(VERIFY_READ
,
708 sizeof(abi_ulong
) * nw
,
710 return -TARGET_EFAULT
;
714 for (i
= 0; i
< nw
; i
++) {
715 /* grab the abi_ulong */
716 __get_user(b
, &target_fds
[i
]);
717 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
718 /* check the bit inside the abi_ulong */
725 unlock_user(target_fds
, target_fds_addr
, 0);
730 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
731 abi_ulong target_fds_addr
,
734 if (target_fds_addr
) {
735 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
736 return -TARGET_EFAULT
;
744 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
750 abi_ulong
*target_fds
;
752 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
753 if (!(target_fds
= lock_user(VERIFY_WRITE
,
755 sizeof(abi_ulong
) * nw
,
757 return -TARGET_EFAULT
;
760 for (i
= 0; i
< nw
; i
++) {
762 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
763 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
766 __put_user(v
, &target_fds
[i
]);
769 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
774 #if defined(__alpha__)
780 static inline abi_long
host_to_target_clock_t(long ticks
)
782 #if HOST_HZ == TARGET_HZ
785 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
789 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
790 const struct rusage
*rusage
)
792 struct target_rusage
*target_rusage
;
794 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
795 return -TARGET_EFAULT
;
796 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
797 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
798 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
799 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
800 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
801 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
802 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
803 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
804 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
805 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
806 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
807 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
808 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
809 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
810 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
811 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
812 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
813 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
814 unlock_user_struct(target_rusage
, target_addr
, 1);
819 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
821 abi_ulong target_rlim_swap
;
824 target_rlim_swap
= tswapal(target_rlim
);
825 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
826 return RLIM_INFINITY
;
828 result
= target_rlim_swap
;
829 if (target_rlim_swap
!= (rlim_t
)result
)
830 return RLIM_INFINITY
;
835 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
837 abi_ulong target_rlim_swap
;
840 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
841 target_rlim_swap
= TARGET_RLIM_INFINITY
;
843 target_rlim_swap
= rlim
;
844 result
= tswapal(target_rlim_swap
);
849 static inline int target_to_host_resource(int code
)
852 case TARGET_RLIMIT_AS
:
854 case TARGET_RLIMIT_CORE
:
856 case TARGET_RLIMIT_CPU
:
858 case TARGET_RLIMIT_DATA
:
860 case TARGET_RLIMIT_FSIZE
:
862 case TARGET_RLIMIT_LOCKS
:
864 case TARGET_RLIMIT_MEMLOCK
:
865 return RLIMIT_MEMLOCK
;
866 case TARGET_RLIMIT_MSGQUEUE
:
867 return RLIMIT_MSGQUEUE
;
868 case TARGET_RLIMIT_NICE
:
870 case TARGET_RLIMIT_NOFILE
:
871 return RLIMIT_NOFILE
;
872 case TARGET_RLIMIT_NPROC
:
874 case TARGET_RLIMIT_RSS
:
876 case TARGET_RLIMIT_RTPRIO
:
877 return RLIMIT_RTPRIO
;
878 case TARGET_RLIMIT_SIGPENDING
:
879 return RLIMIT_SIGPENDING
;
880 case TARGET_RLIMIT_STACK
:
887 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
888 abi_ulong target_tv_addr
)
890 struct target_timeval
*target_tv
;
892 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
893 return -TARGET_EFAULT
;
895 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
896 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
898 unlock_user_struct(target_tv
, target_tv_addr
, 0);
903 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
904 const struct timeval
*tv
)
906 struct target_timeval
*target_tv
;
908 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
909 return -TARGET_EFAULT
;
911 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
912 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
914 unlock_user_struct(target_tv
, target_tv_addr
, 1);
919 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
922 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
923 abi_ulong target_mq_attr_addr
)
925 struct target_mq_attr
*target_mq_attr
;
927 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
928 target_mq_attr_addr
, 1))
929 return -TARGET_EFAULT
;
931 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
932 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
933 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
934 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
936 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
941 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
942 const struct mq_attr
*attr
)
944 struct target_mq_attr
*target_mq_attr
;
946 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
947 target_mq_attr_addr
, 0))
948 return -TARGET_EFAULT
;
950 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
951 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
952 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
953 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
955 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
961 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
962 /* do_select() must return target values and target errnos. */
963 static abi_long
do_select(int n
,
964 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
965 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
967 fd_set rfds
, wfds
, efds
;
968 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
969 struct timeval tv
, *tv_ptr
;
972 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
976 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
980 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
985 if (target_tv_addr
) {
986 if (copy_from_user_timeval(&tv
, target_tv_addr
))
987 return -TARGET_EFAULT
;
993 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
995 if (!is_error(ret
)) {
996 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
997 return -TARGET_EFAULT
;
998 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
999 return -TARGET_EFAULT
;
1000 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1001 return -TARGET_EFAULT
;
1003 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1004 return -TARGET_EFAULT
;
1011 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1014 return pipe2(host_pipe
, flags
);
1020 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1021 int flags
, int is_pipe2
)
1025 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1028 return get_errno(ret
);
1030 /* Several targets have special calling conventions for the original
1031 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1033 #if defined(TARGET_ALPHA)
1034 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1035 return host_pipe
[0];
1036 #elif defined(TARGET_MIPS)
1037 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1038 return host_pipe
[0];
1039 #elif defined(TARGET_SH4)
1040 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1041 return host_pipe
[0];
1045 if (put_user_s32(host_pipe
[0], pipedes
)
1046 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1047 return -TARGET_EFAULT
;
1048 return get_errno(ret
);
1051 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1052 abi_ulong target_addr
,
1055 struct target_ip_mreqn
*target_smreqn
;
1057 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1059 return -TARGET_EFAULT
;
1060 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1061 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1062 if (len
== sizeof(struct target_ip_mreqn
))
1063 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1064 unlock_user(target_smreqn
, target_addr
, 0);
1069 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1070 abi_ulong target_addr
,
1073 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1074 sa_family_t sa_family
;
1075 struct target_sockaddr
*target_saddr
;
1077 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1079 return -TARGET_EFAULT
;
1081 sa_family
= tswap16(target_saddr
->sa_family
);
1083 /* Oops. The caller might send a incomplete sun_path; sun_path
1084 * must be terminated by \0 (see the manual page), but
1085 * unfortunately it is quite common to specify sockaddr_un
1086 * length as "strlen(x->sun_path)" while it should be
1087 * "strlen(...) + 1". We'll fix that here if needed.
1088 * Linux kernel has a similar feature.
1091 if (sa_family
== AF_UNIX
) {
1092 if (len
< unix_maxlen
&& len
> 0) {
1093 char *cp
= (char*)target_saddr
;
1095 if ( cp
[len
-1] && !cp
[len
] )
1098 if (len
> unix_maxlen
)
1102 memcpy(addr
, target_saddr
, len
);
1103 addr
->sa_family
= sa_family
;
1104 unlock_user(target_saddr
, target_addr
, 0);
1109 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1110 struct sockaddr
*addr
,
1113 struct target_sockaddr
*target_saddr
;
1115 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1117 return -TARGET_EFAULT
;
1118 memcpy(target_saddr
, addr
, len
);
1119 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1120 unlock_user(target_saddr
, target_addr
, len
);
1125 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1126 struct target_msghdr
*target_msgh
)
1128 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1129 abi_long msg_controllen
;
1130 abi_ulong target_cmsg_addr
;
1131 struct target_cmsghdr
*target_cmsg
;
1132 socklen_t space
= 0;
1134 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1135 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1137 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1138 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1140 return -TARGET_EFAULT
;
1142 while (cmsg
&& target_cmsg
) {
1143 void *data
= CMSG_DATA(cmsg
);
1144 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1146 int len
= tswapal(target_cmsg
->cmsg_len
)
1147 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1149 space
+= CMSG_SPACE(len
);
1150 if (space
> msgh
->msg_controllen
) {
1151 space
-= CMSG_SPACE(len
);
1152 gemu_log("Host cmsg overflow\n");
1156 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1157 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1158 cmsg
->cmsg_len
= CMSG_LEN(len
);
1160 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1161 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1162 memcpy(data
, target_data
, len
);
1164 int *fd
= (int *)data
;
1165 int *target_fd
= (int *)target_data
;
1166 int i
, numfds
= len
/ sizeof(int);
1168 for (i
= 0; i
< numfds
; i
++)
1169 fd
[i
] = tswap32(target_fd
[i
]);
1172 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1173 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1175 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1177 msgh
->msg_controllen
= space
;
1181 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1182 struct msghdr
*msgh
)
1184 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1185 abi_long msg_controllen
;
1186 abi_ulong target_cmsg_addr
;
1187 struct target_cmsghdr
*target_cmsg
;
1188 socklen_t space
= 0;
1190 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1191 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1193 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1194 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1196 return -TARGET_EFAULT
;
1198 while (cmsg
&& target_cmsg
) {
1199 void *data
= CMSG_DATA(cmsg
);
1200 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1202 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1204 space
+= TARGET_CMSG_SPACE(len
);
1205 if (space
> msg_controllen
) {
1206 space
-= TARGET_CMSG_SPACE(len
);
1207 gemu_log("Target cmsg overflow\n");
1211 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1212 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1213 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(len
));
1215 if ((cmsg
->cmsg_level
== TARGET_SOL_SOCKET
) &&
1216 (cmsg
->cmsg_type
== SCM_RIGHTS
)) {
1217 int *fd
= (int *)data
;
1218 int *target_fd
= (int *)target_data
;
1219 int i
, numfds
= len
/ sizeof(int);
1221 for (i
= 0; i
< numfds
; i
++)
1222 target_fd
[i
] = tswap32(fd
[i
]);
1223 } else if ((cmsg
->cmsg_level
== TARGET_SOL_SOCKET
) &&
1224 (cmsg
->cmsg_type
== SO_TIMESTAMP
) &&
1225 (len
== sizeof(struct timeval
))) {
1226 /* copy struct timeval to target */
1227 struct timeval
*tv
= (struct timeval
*)data
;
1228 struct target_timeval
*target_tv
=
1229 (struct target_timeval
*)target_data
;
1231 target_tv
->tv_sec
= tswapal(tv
->tv_sec
);
1232 target_tv
->tv_usec
= tswapal(tv
->tv_usec
);
1234 gemu_log("Unsupported ancillary data: %d/%d\n",
1235 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1236 memcpy(target_data
, data
, len
);
1239 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1240 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1242 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1244 target_msgh
->msg_controllen
= tswapal(space
);
1248 /* do_setsockopt() Must return target values and target errnos. */
1249 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1250 abi_ulong optval_addr
, socklen_t optlen
)
1254 struct ip_mreqn
*ip_mreq
;
1255 struct ip_mreq_source
*ip_mreq_source
;
1259 /* TCP options all take an 'int' value. */
1260 if (optlen
< sizeof(uint32_t))
1261 return -TARGET_EINVAL
;
1263 if (get_user_u32(val
, optval_addr
))
1264 return -TARGET_EFAULT
;
1265 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1272 case IP_ROUTER_ALERT
:
1276 case IP_MTU_DISCOVER
:
1282 case IP_MULTICAST_TTL
:
1283 case IP_MULTICAST_LOOP
:
1285 if (optlen
>= sizeof(uint32_t)) {
1286 if (get_user_u32(val
, optval_addr
))
1287 return -TARGET_EFAULT
;
1288 } else if (optlen
>= 1) {
1289 if (get_user_u8(val
, optval_addr
))
1290 return -TARGET_EFAULT
;
1292 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1294 case IP_ADD_MEMBERSHIP
:
1295 case IP_DROP_MEMBERSHIP
:
1296 if (optlen
< sizeof (struct target_ip_mreq
) ||
1297 optlen
> sizeof (struct target_ip_mreqn
))
1298 return -TARGET_EINVAL
;
1300 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1301 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1302 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1305 case IP_BLOCK_SOURCE
:
1306 case IP_UNBLOCK_SOURCE
:
1307 case IP_ADD_SOURCE_MEMBERSHIP
:
1308 case IP_DROP_SOURCE_MEMBERSHIP
:
1309 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1310 return -TARGET_EINVAL
;
1312 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1313 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1314 unlock_user (ip_mreq_source
, optval_addr
, 0);
1324 /* struct icmp_filter takes an u32 value */
1325 if (optlen
< sizeof(uint32_t)) {
1326 return -TARGET_EINVAL
;
1329 if (get_user_u32(val
, optval_addr
)) {
1330 return -TARGET_EFAULT
;
1332 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1333 &val
, sizeof(val
)));
1340 case TARGET_SOL_SOCKET
:
1342 case TARGET_SO_RCVTIMEO
:
1346 optname
= SO_RCVTIMEO
;
1349 if (optlen
!= sizeof(struct target_timeval
)) {
1350 return -TARGET_EINVAL
;
1353 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1354 return -TARGET_EFAULT
;
1357 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1361 case TARGET_SO_SNDTIMEO
:
1362 optname
= SO_SNDTIMEO
;
1364 /* Options with 'int' argument. */
1365 case TARGET_SO_DEBUG
:
1368 case TARGET_SO_REUSEADDR
:
1369 optname
= SO_REUSEADDR
;
1371 case TARGET_SO_TYPE
:
1374 case TARGET_SO_ERROR
:
1377 case TARGET_SO_DONTROUTE
:
1378 optname
= SO_DONTROUTE
;
1380 case TARGET_SO_BROADCAST
:
1381 optname
= SO_BROADCAST
;
1383 case TARGET_SO_SNDBUF
:
1384 optname
= SO_SNDBUF
;
1386 case TARGET_SO_RCVBUF
:
1387 optname
= SO_RCVBUF
;
1389 case TARGET_SO_KEEPALIVE
:
1390 optname
= SO_KEEPALIVE
;
1392 case TARGET_SO_OOBINLINE
:
1393 optname
= SO_OOBINLINE
;
1395 case TARGET_SO_NO_CHECK
:
1396 optname
= SO_NO_CHECK
;
1398 case TARGET_SO_PRIORITY
:
1399 optname
= SO_PRIORITY
;
1402 case TARGET_SO_BSDCOMPAT
:
1403 optname
= SO_BSDCOMPAT
;
1406 case TARGET_SO_PASSCRED
:
1407 optname
= SO_PASSCRED
;
1409 case TARGET_SO_TIMESTAMP
:
1410 optname
= SO_TIMESTAMP
;
1412 case TARGET_SO_RCVLOWAT
:
1413 optname
= SO_RCVLOWAT
;
1419 if (optlen
< sizeof(uint32_t))
1420 return -TARGET_EINVAL
;
1422 if (get_user_u32(val
, optval_addr
))
1423 return -TARGET_EFAULT
;
1424 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1428 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1429 ret
= -TARGET_ENOPROTOOPT
;
1434 /* do_getsockopt() Must return target values and target errnos. */
1435 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1436 abi_ulong optval_addr
, abi_ulong optlen
)
1443 case TARGET_SOL_SOCKET
:
1446 /* These don't just return a single integer */
1447 case TARGET_SO_LINGER
:
1448 case TARGET_SO_RCVTIMEO
:
1449 case TARGET_SO_SNDTIMEO
:
1450 case TARGET_SO_PEERNAME
:
1452 case TARGET_SO_PEERCRED
: {
1455 struct target_ucred
*tcr
;
1457 if (get_user_u32(len
, optlen
)) {
1458 return -TARGET_EFAULT
;
1461 return -TARGET_EINVAL
;
1465 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1473 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1474 return -TARGET_EFAULT
;
1476 __put_user(cr
.pid
, &tcr
->pid
);
1477 __put_user(cr
.uid
, &tcr
->uid
);
1478 __put_user(cr
.gid
, &tcr
->gid
);
1479 unlock_user_struct(tcr
, optval_addr
, 1);
1480 if (put_user_u32(len
, optlen
)) {
1481 return -TARGET_EFAULT
;
1485 /* Options with 'int' argument. */
1486 case TARGET_SO_DEBUG
:
1489 case TARGET_SO_REUSEADDR
:
1490 optname
= SO_REUSEADDR
;
1492 case TARGET_SO_TYPE
:
1495 case TARGET_SO_ERROR
:
1498 case TARGET_SO_DONTROUTE
:
1499 optname
= SO_DONTROUTE
;
1501 case TARGET_SO_BROADCAST
:
1502 optname
= SO_BROADCAST
;
1504 case TARGET_SO_SNDBUF
:
1505 optname
= SO_SNDBUF
;
1507 case TARGET_SO_RCVBUF
:
1508 optname
= SO_RCVBUF
;
1510 case TARGET_SO_KEEPALIVE
:
1511 optname
= SO_KEEPALIVE
;
1513 case TARGET_SO_OOBINLINE
:
1514 optname
= SO_OOBINLINE
;
1516 case TARGET_SO_NO_CHECK
:
1517 optname
= SO_NO_CHECK
;
1519 case TARGET_SO_PRIORITY
:
1520 optname
= SO_PRIORITY
;
1523 case TARGET_SO_BSDCOMPAT
:
1524 optname
= SO_BSDCOMPAT
;
1527 case TARGET_SO_PASSCRED
:
1528 optname
= SO_PASSCRED
;
1530 case TARGET_SO_TIMESTAMP
:
1531 optname
= SO_TIMESTAMP
;
1533 case TARGET_SO_RCVLOWAT
:
1534 optname
= SO_RCVLOWAT
;
1541 /* TCP options all take an 'int' value. */
1543 if (get_user_u32(len
, optlen
))
1544 return -TARGET_EFAULT
;
1546 return -TARGET_EINVAL
;
1548 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1554 if (put_user_u32(val
, optval_addr
))
1555 return -TARGET_EFAULT
;
1557 if (put_user_u8(val
, optval_addr
))
1558 return -TARGET_EFAULT
;
1560 if (put_user_u32(len
, optlen
))
1561 return -TARGET_EFAULT
;
1568 case IP_ROUTER_ALERT
:
1572 case IP_MTU_DISCOVER
:
1578 case IP_MULTICAST_TTL
:
1579 case IP_MULTICAST_LOOP
:
1580 if (get_user_u32(len
, optlen
))
1581 return -TARGET_EFAULT
;
1583 return -TARGET_EINVAL
;
1585 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1588 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1590 if (put_user_u32(len
, optlen
)
1591 || put_user_u8(val
, optval_addr
))
1592 return -TARGET_EFAULT
;
1594 if (len
> sizeof(int))
1596 if (put_user_u32(len
, optlen
)
1597 || put_user_u32(val
, optval_addr
))
1598 return -TARGET_EFAULT
;
1602 ret
= -TARGET_ENOPROTOOPT
;
1608 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1610 ret
= -TARGET_EOPNOTSUPP
;
1616 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
1617 int count
, int copy
)
1619 struct target_iovec
*target_vec
;
1621 abi_ulong total_len
, max_len
;
1628 if (count
< 0 || count
> IOV_MAX
) {
1633 vec
= calloc(count
, sizeof(struct iovec
));
1639 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1640 count
* sizeof(struct target_iovec
), 1);
1641 if (target_vec
== NULL
) {
1646 /* ??? If host page size > target page size, this will result in a
1647 value larger than what we can actually support. */
1648 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
1651 for (i
= 0; i
< count
; i
++) {
1652 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1653 abi_long len
= tswapal(target_vec
[i
].iov_len
);
1658 } else if (len
== 0) {
1659 /* Zero length pointer is ignored. */
1660 vec
[i
].iov_base
= 0;
1662 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
1663 if (!vec
[i
].iov_base
) {
1667 if (len
> max_len
- total_len
) {
1668 len
= max_len
- total_len
;
1671 vec
[i
].iov_len
= len
;
1675 unlock_user(target_vec
, target_addr
, 0);
1681 unlock_user(target_vec
, target_addr
, 0);
1685 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1686 int count
, int copy
)
1688 struct target_iovec
*target_vec
;
1691 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1692 count
* sizeof(struct target_iovec
), 1);
1694 for (i
= 0; i
< count
; i
++) {
1695 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1696 abi_long len
= tswapal(target_vec
[i
].iov_base
);
1700 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1702 unlock_user(target_vec
, target_addr
, 0);
1708 static inline void target_to_host_sock_type(int *type
)
1711 int target_type
= *type
;
1713 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
1714 case TARGET_SOCK_DGRAM
:
1715 host_type
= SOCK_DGRAM
;
1717 case TARGET_SOCK_STREAM
:
1718 host_type
= SOCK_STREAM
;
1721 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
1724 if (target_type
& TARGET_SOCK_CLOEXEC
) {
1725 host_type
|= SOCK_CLOEXEC
;
1727 if (target_type
& TARGET_SOCK_NONBLOCK
) {
1728 host_type
|= SOCK_NONBLOCK
;
1733 /* do_socket() Must return target values and target errnos. */
1734 static abi_long
do_socket(int domain
, int type
, int protocol
)
1736 target_to_host_sock_type(&type
);
1738 if (domain
== PF_NETLINK
)
1739 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1740 return get_errno(socket(domain
, type
, protocol
));
1743 /* do_bind() Must return target values and target errnos. */
1744 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1750 if ((int)addrlen
< 0) {
1751 return -TARGET_EINVAL
;
1754 addr
= alloca(addrlen
+1);
1756 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1760 return get_errno(bind(sockfd
, addr
, addrlen
));
1763 /* do_connect() Must return target values and target errnos. */
1764 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1770 if ((int)addrlen
< 0) {
1771 return -TARGET_EINVAL
;
1774 addr
= alloca(addrlen
);
1776 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1780 return get_errno(connect(sockfd
, addr
, addrlen
));
1783 /* do_sendrecvmsg() Must return target values and target errnos. */
1784 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1785 int flags
, int send
)
1788 struct target_msghdr
*msgp
;
1792 abi_ulong target_vec
;
1795 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1799 return -TARGET_EFAULT
;
1800 if (msgp
->msg_name
) {
1801 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1802 msg
.msg_name
= alloca(msg
.msg_namelen
);
1803 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapal(msgp
->msg_name
),
1809 msg
.msg_name
= NULL
;
1810 msg
.msg_namelen
= 0;
1812 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
1813 msg
.msg_control
= alloca(msg
.msg_controllen
);
1814 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1816 count
= tswapal(msgp
->msg_iovlen
);
1817 target_vec
= tswapal(msgp
->msg_iov
);
1818 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
1819 target_vec
, count
, send
);
1821 ret
= -host_to_target_errno(errno
);
1824 msg
.msg_iovlen
= count
;
1828 ret
= target_to_host_cmsg(&msg
, msgp
);
1830 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1832 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1833 if (!is_error(ret
)) {
1835 ret
= host_to_target_cmsg(msgp
, &msg
);
1836 if (!is_error(ret
)) {
1837 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
1838 if (msg
.msg_name
!= NULL
) {
1839 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
1840 msg
.msg_name
, msg
.msg_namelen
);
1852 unlock_iovec(vec
, target_vec
, count
, !send
);
1854 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1858 /* If we don't have a system accept4() then just call accept.
1859 * The callsites to do_accept4() will ensure that they don't
1860 * pass a non-zero flags argument in this config.
1862 #ifndef CONFIG_ACCEPT4
1863 static inline int accept4(int sockfd
, struct sockaddr
*addr
,
1864 socklen_t
*addrlen
, int flags
)
1867 return accept(sockfd
, addr
, addrlen
);
1871 /* do_accept4() Must return target values and target errnos. */
1872 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
1873 abi_ulong target_addrlen_addr
, int flags
)
1879 if (target_addr
== 0) {
1880 return get_errno(accept4(fd
, NULL
, NULL
, flags
));
1883 /* linux returns EINVAL if addrlen pointer is invalid */
1884 if (get_user_u32(addrlen
, target_addrlen_addr
))
1885 return -TARGET_EINVAL
;
1887 if ((int)addrlen
< 0) {
1888 return -TARGET_EINVAL
;
1891 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1892 return -TARGET_EINVAL
;
1894 addr
= alloca(addrlen
);
1896 ret
= get_errno(accept4(fd
, addr
, &addrlen
, flags
));
1897 if (!is_error(ret
)) {
1898 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1899 if (put_user_u32(addrlen
, target_addrlen_addr
))
1900 ret
= -TARGET_EFAULT
;
1905 /* do_getpeername() Must return target values and target errnos. */
1906 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1907 abi_ulong target_addrlen_addr
)
1913 if (get_user_u32(addrlen
, target_addrlen_addr
))
1914 return -TARGET_EFAULT
;
1916 if ((int)addrlen
< 0) {
1917 return -TARGET_EINVAL
;
1920 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1921 return -TARGET_EFAULT
;
1923 addr
= alloca(addrlen
);
1925 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1926 if (!is_error(ret
)) {
1927 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1928 if (put_user_u32(addrlen
, target_addrlen_addr
))
1929 ret
= -TARGET_EFAULT
;
1934 /* do_getsockname() Must return target values and target errnos. */
1935 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1936 abi_ulong target_addrlen_addr
)
1942 if (get_user_u32(addrlen
, target_addrlen_addr
))
1943 return -TARGET_EFAULT
;
1945 if ((int)addrlen
< 0) {
1946 return -TARGET_EINVAL
;
1949 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1950 return -TARGET_EFAULT
;
1952 addr
= alloca(addrlen
);
1954 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1955 if (!is_error(ret
)) {
1956 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1957 if (put_user_u32(addrlen
, target_addrlen_addr
))
1958 ret
= -TARGET_EFAULT
;
1963 /* do_socketpair() Must return target values and target errnos. */
1964 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1965 abi_ulong target_tab_addr
)
1970 target_to_host_sock_type(&type
);
1972 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1973 if (!is_error(ret
)) {
1974 if (put_user_s32(tab
[0], target_tab_addr
)
1975 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1976 ret
= -TARGET_EFAULT
;
1981 /* do_sendto() Must return target values and target errnos. */
1982 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1983 abi_ulong target_addr
, socklen_t addrlen
)
1989 if ((int)addrlen
< 0) {
1990 return -TARGET_EINVAL
;
1993 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1995 return -TARGET_EFAULT
;
1997 addr
= alloca(addrlen
);
1998 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2000 unlock_user(host_msg
, msg
, 0);
2003 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2005 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2007 unlock_user(host_msg
, msg
, 0);
2011 /* do_recvfrom() Must return target values and target errnos. */
2012 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2013 abi_ulong target_addr
,
2014 abi_ulong target_addrlen
)
2021 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2023 return -TARGET_EFAULT
;
2025 if (get_user_u32(addrlen
, target_addrlen
)) {
2026 ret
= -TARGET_EFAULT
;
2029 if ((int)addrlen
< 0) {
2030 ret
= -TARGET_EINVAL
;
2033 addr
= alloca(addrlen
);
2034 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2036 addr
= NULL
; /* To keep compiler quiet. */
2037 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2039 if (!is_error(ret
)) {
2041 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2042 if (put_user_u32(addrlen
, target_addrlen
)) {
2043 ret
= -TARGET_EFAULT
;
2047 unlock_user(host_msg
, msg
, len
);
2050 unlock_user(host_msg
, msg
, 0);
2055 #ifdef TARGET_NR_socketcall
2056 /* do_socketcall() Must return target values and target errnos. */
2057 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2060 const int n
= sizeof(abi_ulong
);
2065 abi_ulong domain
, type
, protocol
;
2067 if (get_user_ual(domain
, vptr
)
2068 || get_user_ual(type
, vptr
+ n
)
2069 || get_user_ual(protocol
, vptr
+ 2 * n
))
2070 return -TARGET_EFAULT
;
2072 ret
= do_socket(domain
, type
, protocol
);
2078 abi_ulong target_addr
;
2081 if (get_user_ual(sockfd
, vptr
)
2082 || get_user_ual(target_addr
, vptr
+ n
)
2083 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2084 return -TARGET_EFAULT
;
2086 ret
= do_bind(sockfd
, target_addr
, addrlen
);
2089 case SOCKOP_connect
:
2092 abi_ulong target_addr
;
2095 if (get_user_ual(sockfd
, vptr
)
2096 || get_user_ual(target_addr
, vptr
+ n
)
2097 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2098 return -TARGET_EFAULT
;
2100 ret
= do_connect(sockfd
, target_addr
, addrlen
);
2105 abi_ulong sockfd
, backlog
;
2107 if (get_user_ual(sockfd
, vptr
)
2108 || get_user_ual(backlog
, vptr
+ n
))
2109 return -TARGET_EFAULT
;
2111 ret
= get_errno(listen(sockfd
, backlog
));
2117 abi_ulong target_addr
, target_addrlen
;
2119 if (get_user_ual(sockfd
, vptr
)
2120 || get_user_ual(target_addr
, vptr
+ n
)
2121 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2122 return -TARGET_EFAULT
;
2124 ret
= do_accept4(sockfd
, target_addr
, target_addrlen
, 0);
2127 case SOCKOP_getsockname
:
2130 abi_ulong target_addr
, target_addrlen
;
2132 if (get_user_ual(sockfd
, vptr
)
2133 || get_user_ual(target_addr
, vptr
+ n
)
2134 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2135 return -TARGET_EFAULT
;
2137 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
2140 case SOCKOP_getpeername
:
2143 abi_ulong target_addr
, target_addrlen
;
2145 if (get_user_ual(sockfd
, vptr
)
2146 || get_user_ual(target_addr
, vptr
+ n
)
2147 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2148 return -TARGET_EFAULT
;
2150 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
2153 case SOCKOP_socketpair
:
2155 abi_ulong domain
, type
, protocol
;
2158 if (get_user_ual(domain
, vptr
)
2159 || get_user_ual(type
, vptr
+ n
)
2160 || get_user_ual(protocol
, vptr
+ 2 * n
)
2161 || get_user_ual(tab
, vptr
+ 3 * n
))
2162 return -TARGET_EFAULT
;
2164 ret
= do_socketpair(domain
, type
, protocol
, tab
);
2174 if (get_user_ual(sockfd
, vptr
)
2175 || get_user_ual(msg
, vptr
+ n
)
2176 || get_user_ual(len
, vptr
+ 2 * n
)
2177 || get_user_ual(flags
, vptr
+ 3 * n
))
2178 return -TARGET_EFAULT
;
2180 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2190 if (get_user_ual(sockfd
, vptr
)
2191 || get_user_ual(msg
, vptr
+ n
)
2192 || get_user_ual(len
, vptr
+ 2 * n
)
2193 || get_user_ual(flags
, vptr
+ 3 * n
))
2194 return -TARGET_EFAULT
;
2196 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2208 if (get_user_ual(sockfd
, vptr
)
2209 || get_user_ual(msg
, vptr
+ n
)
2210 || get_user_ual(len
, vptr
+ 2 * n
)
2211 || get_user_ual(flags
, vptr
+ 3 * n
)
2212 || get_user_ual(addr
, vptr
+ 4 * n
)
2213 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2214 return -TARGET_EFAULT
;
2216 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2219 case SOCKOP_recvfrom
:
2228 if (get_user_ual(sockfd
, vptr
)
2229 || get_user_ual(msg
, vptr
+ n
)
2230 || get_user_ual(len
, vptr
+ 2 * n
)
2231 || get_user_ual(flags
, vptr
+ 3 * n
)
2232 || get_user_ual(addr
, vptr
+ 4 * n
)
2233 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2234 return -TARGET_EFAULT
;
2236 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2239 case SOCKOP_shutdown
:
2241 abi_ulong sockfd
, how
;
2243 if (get_user_ual(sockfd
, vptr
)
2244 || get_user_ual(how
, vptr
+ n
))
2245 return -TARGET_EFAULT
;
2247 ret
= get_errno(shutdown(sockfd
, how
));
2250 case SOCKOP_sendmsg
:
2251 case SOCKOP_recvmsg
:
2254 abi_ulong target_msg
;
2257 if (get_user_ual(fd
, vptr
)
2258 || get_user_ual(target_msg
, vptr
+ n
)
2259 || get_user_ual(flags
, vptr
+ 2 * n
))
2260 return -TARGET_EFAULT
;
2262 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2263 (num
== SOCKOP_sendmsg
));
2266 case SOCKOP_setsockopt
:
2274 if (get_user_ual(sockfd
, vptr
)
2275 || get_user_ual(level
, vptr
+ n
)
2276 || get_user_ual(optname
, vptr
+ 2 * n
)
2277 || get_user_ual(optval
, vptr
+ 3 * n
)
2278 || get_user_ual(optlen
, vptr
+ 4 * n
))
2279 return -TARGET_EFAULT
;
2281 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2284 case SOCKOP_getsockopt
:
2292 if (get_user_ual(sockfd
, vptr
)
2293 || get_user_ual(level
, vptr
+ n
)
2294 || get_user_ual(optname
, vptr
+ 2 * n
)
2295 || get_user_ual(optval
, vptr
+ 3 * n
)
2296 || get_user_ual(optlen
, vptr
+ 4 * n
))
2297 return -TARGET_EFAULT
;
2299 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2303 gemu_log("Unsupported socketcall: %d\n", num
);
2304 ret
= -TARGET_ENOSYS
;
2311 #define N_SHM_REGIONS 32
2313 static struct shm_region
{
2316 } shm_regions
[N_SHM_REGIONS
];
2318 struct target_ipc_perm
2325 unsigned short int mode
;
2326 unsigned short int __pad1
;
2327 unsigned short int __seq
;
2328 unsigned short int __pad2
;
2329 abi_ulong __unused1
;
2330 abi_ulong __unused2
;
2333 struct target_semid_ds
2335 struct target_ipc_perm sem_perm
;
2336 abi_ulong sem_otime
;
2337 abi_ulong __unused1
;
2338 abi_ulong sem_ctime
;
2339 abi_ulong __unused2
;
2340 abi_ulong sem_nsems
;
2341 abi_ulong __unused3
;
2342 abi_ulong __unused4
;
2345 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2346 abi_ulong target_addr
)
2348 struct target_ipc_perm
*target_ip
;
2349 struct target_semid_ds
*target_sd
;
2351 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2352 return -TARGET_EFAULT
;
2353 target_ip
= &(target_sd
->sem_perm
);
2354 host_ip
->__key
= tswapal(target_ip
->__key
);
2355 host_ip
->uid
= tswapal(target_ip
->uid
);
2356 host_ip
->gid
= tswapal(target_ip
->gid
);
2357 host_ip
->cuid
= tswapal(target_ip
->cuid
);
2358 host_ip
->cgid
= tswapal(target_ip
->cgid
);
2359 host_ip
->mode
= tswap16(target_ip
->mode
);
2360 unlock_user_struct(target_sd
, target_addr
, 0);
2364 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2365 struct ipc_perm
*host_ip
)
2367 struct target_ipc_perm
*target_ip
;
2368 struct target_semid_ds
*target_sd
;
2370 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2371 return -TARGET_EFAULT
;
2372 target_ip
= &(target_sd
->sem_perm
);
2373 target_ip
->__key
= tswapal(host_ip
->__key
);
2374 target_ip
->uid
= tswapal(host_ip
->uid
);
2375 target_ip
->gid
= tswapal(host_ip
->gid
);
2376 target_ip
->cuid
= tswapal(host_ip
->cuid
);
2377 target_ip
->cgid
= tswapal(host_ip
->cgid
);
2378 target_ip
->mode
= tswap16(host_ip
->mode
);
2379 unlock_user_struct(target_sd
, target_addr
, 1);
2383 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2384 abi_ulong target_addr
)
2386 struct target_semid_ds
*target_sd
;
2388 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2389 return -TARGET_EFAULT
;
2390 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2391 return -TARGET_EFAULT
;
2392 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2393 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2394 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2395 unlock_user_struct(target_sd
, target_addr
, 0);
2399 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2400 struct semid_ds
*host_sd
)
2402 struct target_semid_ds
*target_sd
;
2404 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2405 return -TARGET_EFAULT
;
2406 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2407 return -TARGET_EFAULT
;
2408 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2409 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2410 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2411 unlock_user_struct(target_sd
, target_addr
, 1);
2415 struct target_seminfo
{
2428 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2429 struct seminfo
*host_seminfo
)
2431 struct target_seminfo
*target_seminfo
;
2432 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2433 return -TARGET_EFAULT
;
2434 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2435 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2436 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2437 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2438 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2439 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2440 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2441 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2442 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2443 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2444 unlock_user_struct(target_seminfo
, target_addr
, 1);
2450 struct semid_ds
*buf
;
2451 unsigned short *array
;
2452 struct seminfo
*__buf
;
2455 union target_semun
{
2462 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2463 abi_ulong target_addr
)
2466 unsigned short *array
;
2468 struct semid_ds semid_ds
;
2471 semun
.buf
= &semid_ds
;
2473 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2475 return get_errno(ret
);
2477 nsems
= semid_ds
.sem_nsems
;
2479 *host_array
= malloc(nsems
*sizeof(unsigned short));
2480 array
= lock_user(VERIFY_READ
, target_addr
,
2481 nsems
*sizeof(unsigned short), 1);
2483 return -TARGET_EFAULT
;
2485 for(i
=0; i
<nsems
; i
++) {
2486 __get_user((*host_array
)[i
], &array
[i
]);
2488 unlock_user(array
, target_addr
, 0);
2493 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2494 unsigned short **host_array
)
2497 unsigned short *array
;
2499 struct semid_ds semid_ds
;
2502 semun
.buf
= &semid_ds
;
2504 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2506 return get_errno(ret
);
2508 nsems
= semid_ds
.sem_nsems
;
2510 array
= lock_user(VERIFY_WRITE
, target_addr
,
2511 nsems
*sizeof(unsigned short), 0);
2513 return -TARGET_EFAULT
;
2515 for(i
=0; i
<nsems
; i
++) {
2516 __put_user((*host_array
)[i
], &array
[i
]);
2519 unlock_user(array
, target_addr
, 1);
2524 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2525 union target_semun target_su
)
2528 struct semid_ds dsarg
;
2529 unsigned short *array
= NULL
;
2530 struct seminfo seminfo
;
2531 abi_long ret
= -TARGET_EINVAL
;
2538 arg
.val
= tswap32(target_su
.val
);
2539 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2540 target_su
.val
= tswap32(arg
.val
);
2544 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2548 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2549 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2556 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2560 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2561 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2567 arg
.__buf
= &seminfo
;
2568 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2569 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2577 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2584 struct target_sembuf
{
2585 unsigned short sem_num
;
2590 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2591 abi_ulong target_addr
,
2594 struct target_sembuf
*target_sembuf
;
2597 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2598 nsops
*sizeof(struct target_sembuf
), 1);
2600 return -TARGET_EFAULT
;
2602 for(i
=0; i
<nsops
; i
++) {
2603 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2604 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2605 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2608 unlock_user(target_sembuf
, target_addr
, 0);
2613 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2615 struct sembuf sops
[nsops
];
2617 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2618 return -TARGET_EFAULT
;
2620 return get_errno(semop(semid
, sops
, nsops
));
2623 struct target_msqid_ds
2625 struct target_ipc_perm msg_perm
;
2626 abi_ulong msg_stime
;
2627 #if TARGET_ABI_BITS == 32
2628 abi_ulong __unused1
;
2630 abi_ulong msg_rtime
;
2631 #if TARGET_ABI_BITS == 32
2632 abi_ulong __unused2
;
2634 abi_ulong msg_ctime
;
2635 #if TARGET_ABI_BITS == 32
2636 abi_ulong __unused3
;
2638 abi_ulong __msg_cbytes
;
2640 abi_ulong msg_qbytes
;
2641 abi_ulong msg_lspid
;
2642 abi_ulong msg_lrpid
;
2643 abi_ulong __unused4
;
2644 abi_ulong __unused5
;
2647 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2648 abi_ulong target_addr
)
2650 struct target_msqid_ds
*target_md
;
2652 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2653 return -TARGET_EFAULT
;
2654 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2655 return -TARGET_EFAULT
;
2656 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2657 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2658 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2659 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2660 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2661 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2662 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2663 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2664 unlock_user_struct(target_md
, target_addr
, 0);
2668 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2669 struct msqid_ds
*host_md
)
2671 struct target_msqid_ds
*target_md
;
2673 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2674 return -TARGET_EFAULT
;
2675 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2676 return -TARGET_EFAULT
;
2677 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2678 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2679 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2680 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
2681 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
2682 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
2683 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
2684 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
2685 unlock_user_struct(target_md
, target_addr
, 1);
2689 struct target_msginfo
{
2697 unsigned short int msgseg
;
2700 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2701 struct msginfo
*host_msginfo
)
2703 struct target_msginfo
*target_msginfo
;
2704 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2705 return -TARGET_EFAULT
;
2706 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2707 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2708 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2709 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2710 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2711 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2712 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2713 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2714 unlock_user_struct(target_msginfo
, target_addr
, 1);
2718 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2720 struct msqid_ds dsarg
;
2721 struct msginfo msginfo
;
2722 abi_long ret
= -TARGET_EINVAL
;
2730 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2731 return -TARGET_EFAULT
;
2732 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2733 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2734 return -TARGET_EFAULT
;
2737 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2741 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2742 if (host_to_target_msginfo(ptr
, &msginfo
))
2743 return -TARGET_EFAULT
;
2750 struct target_msgbuf
{
2755 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2756 unsigned int msgsz
, int msgflg
)
2758 struct target_msgbuf
*target_mb
;
2759 struct msgbuf
*host_mb
;
2762 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2763 return -TARGET_EFAULT
;
2764 host_mb
= malloc(msgsz
+sizeof(long));
2765 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
2766 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2767 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2769 unlock_user_struct(target_mb
, msgp
, 0);
2774 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2775 unsigned int msgsz
, abi_long msgtyp
,
2778 struct target_msgbuf
*target_mb
;
2780 struct msgbuf
*host_mb
;
2783 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2784 return -TARGET_EFAULT
;
2786 host_mb
= g_malloc(msgsz
+sizeof(long));
2787 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
2790 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2791 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2792 if (!target_mtext
) {
2793 ret
= -TARGET_EFAULT
;
2796 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2797 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2800 target_mb
->mtype
= tswapal(host_mb
->mtype
);
2804 unlock_user_struct(target_mb
, msgp
, 1);
2809 struct target_shmid_ds
2811 struct target_ipc_perm shm_perm
;
2812 abi_ulong shm_segsz
;
2813 abi_ulong shm_atime
;
2814 #if TARGET_ABI_BITS == 32
2815 abi_ulong __unused1
;
2817 abi_ulong shm_dtime
;
2818 #if TARGET_ABI_BITS == 32
2819 abi_ulong __unused2
;
2821 abi_ulong shm_ctime
;
2822 #if TARGET_ABI_BITS == 32
2823 abi_ulong __unused3
;
2827 abi_ulong shm_nattch
;
2828 unsigned long int __unused4
;
2829 unsigned long int __unused5
;
2832 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2833 abi_ulong target_addr
)
2835 struct target_shmid_ds
*target_sd
;
2837 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2838 return -TARGET_EFAULT
;
2839 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2840 return -TARGET_EFAULT
;
2841 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2842 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2843 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2844 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2845 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2846 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2847 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2848 unlock_user_struct(target_sd
, target_addr
, 0);
2852 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2853 struct shmid_ds
*host_sd
)
2855 struct target_shmid_ds
*target_sd
;
2857 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2858 return -TARGET_EFAULT
;
2859 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2860 return -TARGET_EFAULT
;
2861 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2862 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2863 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2864 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2865 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2866 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2867 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2868 unlock_user_struct(target_sd
, target_addr
, 1);
2872 struct target_shminfo
{
2880 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2881 struct shminfo
*host_shminfo
)
2883 struct target_shminfo
*target_shminfo
;
2884 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2885 return -TARGET_EFAULT
;
2886 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2887 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2888 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2889 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2890 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2891 unlock_user_struct(target_shminfo
, target_addr
, 1);
2895 struct target_shm_info
{
2900 abi_ulong swap_attempts
;
2901 abi_ulong swap_successes
;
2904 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2905 struct shm_info
*host_shm_info
)
2907 struct target_shm_info
*target_shm_info
;
2908 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2909 return -TARGET_EFAULT
;
2910 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2911 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2912 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2913 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2914 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2915 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2916 unlock_user_struct(target_shm_info
, target_addr
, 1);
2920 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2922 struct shmid_ds dsarg
;
2923 struct shminfo shminfo
;
2924 struct shm_info shm_info
;
2925 abi_long ret
= -TARGET_EINVAL
;
2933 if (target_to_host_shmid_ds(&dsarg
, buf
))
2934 return -TARGET_EFAULT
;
2935 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2936 if (host_to_target_shmid_ds(buf
, &dsarg
))
2937 return -TARGET_EFAULT
;
2940 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2941 if (host_to_target_shminfo(buf
, &shminfo
))
2942 return -TARGET_EFAULT
;
2945 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2946 if (host_to_target_shm_info(buf
, &shm_info
))
2947 return -TARGET_EFAULT
;
2952 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2959 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2963 struct shmid_ds shm_info
;
2966 /* find out the length of the shared memory segment */
2967 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2968 if (is_error(ret
)) {
2969 /* can't get length, bail out */
2976 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2978 abi_ulong mmap_start
;
2980 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2982 if (mmap_start
== -1) {
2984 host_raddr
= (void *)-1;
2986 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2989 if (host_raddr
== (void *)-1) {
2991 return get_errno((long)host_raddr
);
2993 raddr
=h2g((unsigned long)host_raddr
);
2995 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2996 PAGE_VALID
| PAGE_READ
|
2997 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2999 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3000 if (shm_regions
[i
].start
== 0) {
3001 shm_regions
[i
].start
= raddr
;
3002 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3012 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3016 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3017 if (shm_regions
[i
].start
== shmaddr
) {
3018 shm_regions
[i
].start
= 0;
3019 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3024 return get_errno(shmdt(g2h(shmaddr
)));
3027 #ifdef TARGET_NR_ipc
3028 /* ??? This only works with linear mappings. */
3029 /* do_ipc() must return target values and target errnos. */
3030 static abi_long
do_ipc(unsigned int call
, int first
,
3031 int second
, int third
,
3032 abi_long ptr
, abi_long fifth
)
3037 version
= call
>> 16;
3042 ret
= do_semop(first
, ptr
, second
);
3046 ret
= get_errno(semget(first
, second
, third
));
3050 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
3054 ret
= get_errno(msgget(first
, second
));
3058 ret
= do_msgsnd(first
, ptr
, second
, third
);
3062 ret
= do_msgctl(first
, second
, ptr
);
3069 struct target_ipc_kludge
{
3074 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3075 ret
= -TARGET_EFAULT
;
3079 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3081 unlock_user_struct(tmp
, ptr
, 0);
3085 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3094 raddr
= do_shmat(first
, ptr
, second
);
3095 if (is_error(raddr
))
3096 return get_errno(raddr
);
3097 if (put_user_ual(raddr
, third
))
3098 return -TARGET_EFAULT
;
3102 ret
= -TARGET_EINVAL
;
3107 ret
= do_shmdt(ptr
);
3111 /* IPC_* flag values are the same on all linux platforms */
3112 ret
= get_errno(shmget(first
, second
, third
));
3115 /* IPC_* and SHM_* command values are the same on all linux platforms */
3117 ret
= do_shmctl(first
, second
, third
);
3120 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3121 ret
= -TARGET_ENOSYS
;
3128 /* kernel structure types definitions */
3130 #define STRUCT(name, ...) STRUCT_ ## name,
3131 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3133 #include "syscall_types.h"
3136 #undef STRUCT_SPECIAL
3138 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3139 #define STRUCT_SPECIAL(name)
3140 #include "syscall_types.h"
3142 #undef STRUCT_SPECIAL
3144 typedef struct IOCTLEntry IOCTLEntry
;
3146 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3147 int fd
, abi_long cmd
, abi_long arg
);
3150 unsigned int target_cmd
;
3151 unsigned int host_cmd
;
3154 do_ioctl_fn
*do_ioctl
;
3155 const argtype arg_type
[5];
3158 #define IOC_R 0x0001
3159 #define IOC_W 0x0002
3160 #define IOC_RW (IOC_R | IOC_W)
3162 #define MAX_STRUCT_SIZE 4096
3164 #ifdef CONFIG_FIEMAP
3165 /* So fiemap access checks don't overflow on 32 bit systems.
3166 * This is very slightly smaller than the limit imposed by
3167 * the underlying kernel.
3169 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3170 / sizeof(struct fiemap_extent))
3172 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3173 int fd
, abi_long cmd
, abi_long arg
)
3175 /* The parameter for this ioctl is a struct fiemap followed
3176 * by an array of struct fiemap_extent whose size is set
3177 * in fiemap->fm_extent_count. The array is filled in by the
3180 int target_size_in
, target_size_out
;
3182 const argtype
*arg_type
= ie
->arg_type
;
3183 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3186 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3190 assert(arg_type
[0] == TYPE_PTR
);
3191 assert(ie
->access
== IOC_RW
);
3193 target_size_in
= thunk_type_size(arg_type
, 0);
3194 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3196 return -TARGET_EFAULT
;
3198 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3199 unlock_user(argptr
, arg
, 0);
3200 fm
= (struct fiemap
*)buf_temp
;
3201 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3202 return -TARGET_EINVAL
;
3205 outbufsz
= sizeof (*fm
) +
3206 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3208 if (outbufsz
> MAX_STRUCT_SIZE
) {
3209 /* We can't fit all the extents into the fixed size buffer.
3210 * Allocate one that is large enough and use it instead.
3212 fm
= malloc(outbufsz
);
3214 return -TARGET_ENOMEM
;
3216 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3219 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3220 if (!is_error(ret
)) {
3221 target_size_out
= target_size_in
;
3222 /* An extent_count of 0 means we were only counting the extents
3223 * so there are no structs to copy
3225 if (fm
->fm_extent_count
!= 0) {
3226 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3228 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3230 ret
= -TARGET_EFAULT
;
3232 /* Convert the struct fiemap */
3233 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3234 if (fm
->fm_extent_count
!= 0) {
3235 p
= argptr
+ target_size_in
;
3236 /* ...and then all the struct fiemap_extents */
3237 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3238 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3243 unlock_user(argptr
, arg
, target_size_out
);
3253 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3254 int fd
, abi_long cmd
, abi_long arg
)
3256 const argtype
*arg_type
= ie
->arg_type
;
3260 struct ifconf
*host_ifconf
;
3262 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3263 int target_ifreq_size
;
3268 abi_long target_ifc_buf
;
3272 assert(arg_type
[0] == TYPE_PTR
);
3273 assert(ie
->access
== IOC_RW
);
3276 target_size
= thunk_type_size(arg_type
, 0);
3278 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3280 return -TARGET_EFAULT
;
3281 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3282 unlock_user(argptr
, arg
, 0);
3284 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3285 target_ifc_len
= host_ifconf
->ifc_len
;
3286 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3288 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3289 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3290 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3292 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3293 if (outbufsz
> MAX_STRUCT_SIZE
) {
3294 /* We can't fit all the extents into the fixed size buffer.
3295 * Allocate one that is large enough and use it instead.
3297 host_ifconf
= malloc(outbufsz
);
3299 return -TARGET_ENOMEM
;
3301 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3304 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3306 host_ifconf
->ifc_len
= host_ifc_len
;
3307 host_ifconf
->ifc_buf
= host_ifc_buf
;
3309 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3310 if (!is_error(ret
)) {
3311 /* convert host ifc_len to target ifc_len */
3313 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3314 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3315 host_ifconf
->ifc_len
= target_ifc_len
;
3317 /* restore target ifc_buf */
3319 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3321 /* copy struct ifconf to target user */
3323 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3325 return -TARGET_EFAULT
;
3326 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3327 unlock_user(argptr
, arg
, target_size
);
3329 /* copy ifreq[] to target user */
3331 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3332 for (i
= 0; i
< nb_ifreq
; i
++) {
3333 thunk_convert(argptr
+ i
* target_ifreq_size
,
3334 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3335 ifreq_arg_type
, THUNK_TARGET
);
3337 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3347 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3348 abi_long cmd
, abi_long arg
)
3351 struct dm_ioctl
*host_dm
;
3352 abi_long guest_data
;
3353 uint32_t guest_data_size
;
3355 const argtype
*arg_type
= ie
->arg_type
;
3357 void *big_buf
= NULL
;
3361 target_size
= thunk_type_size(arg_type
, 0);
3362 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3364 ret
= -TARGET_EFAULT
;
3367 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3368 unlock_user(argptr
, arg
, 0);
3370 /* buf_temp is too small, so fetch things into a bigger buffer */
3371 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3372 memcpy(big_buf
, buf_temp
, target_size
);
3376 guest_data
= arg
+ host_dm
->data_start
;
3377 if ((guest_data
- arg
) < 0) {
3381 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3382 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3384 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3385 switch (ie
->host_cmd
) {
3387 case DM_LIST_DEVICES
:
3390 case DM_DEV_SUSPEND
:
3393 case DM_TABLE_STATUS
:
3394 case DM_TABLE_CLEAR
:
3396 case DM_LIST_VERSIONS
:
3400 case DM_DEV_SET_GEOMETRY
:
3401 /* data contains only strings */
3402 memcpy(host_data
, argptr
, guest_data_size
);
3405 memcpy(host_data
, argptr
, guest_data_size
);
3406 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3410 void *gspec
= argptr
;
3411 void *cur_data
= host_data
;
3412 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3413 int spec_size
= thunk_type_size(arg_type
, 0);
3416 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3417 struct dm_target_spec
*spec
= cur_data
;
3421 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3422 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3424 spec
->next
= sizeof(*spec
) + slen
;
3425 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3427 cur_data
+= spec
->next
;
3432 ret
= -TARGET_EINVAL
;
3435 unlock_user(argptr
, guest_data
, 0);
3437 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3438 if (!is_error(ret
)) {
3439 guest_data
= arg
+ host_dm
->data_start
;
3440 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3441 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3442 switch (ie
->host_cmd
) {
3447 case DM_DEV_SUSPEND
:
3450 case DM_TABLE_CLEAR
:
3452 case DM_DEV_SET_GEOMETRY
:
3453 /* no return data */
3455 case DM_LIST_DEVICES
:
3457 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3458 uint32_t remaining_data
= guest_data_size
;
3459 void *cur_data
= argptr
;
3460 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3461 int nl_size
= 12; /* can't use thunk_size due to alignment */
3464 uint32_t next
= nl
->next
;
3466 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3468 if (remaining_data
< nl
->next
) {
3469 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3472 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3473 strcpy(cur_data
+ nl_size
, nl
->name
);
3474 cur_data
+= nl
->next
;
3475 remaining_data
-= nl
->next
;
3479 nl
= (void*)nl
+ next
;
3484 case DM_TABLE_STATUS
:
3486 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3487 void *cur_data
= argptr
;
3488 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3489 int spec_size
= thunk_type_size(arg_type
, 0);
3492 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3493 uint32_t next
= spec
->next
;
3494 int slen
= strlen((char*)&spec
[1]) + 1;
3495 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3496 if (guest_data_size
< spec
->next
) {
3497 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3500 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3501 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3502 cur_data
= argptr
+ spec
->next
;
3503 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3509 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3510 int count
= *(uint32_t*)hdata
;
3511 uint64_t *hdev
= hdata
+ 8;
3512 uint64_t *gdev
= argptr
+ 8;
3515 *(uint32_t*)argptr
= tswap32(count
);
3516 for (i
= 0; i
< count
; i
++) {
3517 *gdev
= tswap64(*hdev
);
3523 case DM_LIST_VERSIONS
:
3525 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3526 uint32_t remaining_data
= guest_data_size
;
3527 void *cur_data
= argptr
;
3528 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3529 int vers_size
= thunk_type_size(arg_type
, 0);
3532 uint32_t next
= vers
->next
;
3534 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3536 if (remaining_data
< vers
->next
) {
3537 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3540 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3541 strcpy(cur_data
+ vers_size
, vers
->name
);
3542 cur_data
+= vers
->next
;
3543 remaining_data
-= vers
->next
;
3547 vers
= (void*)vers
+ next
;
3552 ret
= -TARGET_EINVAL
;
3555 unlock_user(argptr
, guest_data
, guest_data_size
);
3557 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3559 ret
= -TARGET_EFAULT
;
3562 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3563 unlock_user(argptr
, arg
, target_size
);
3570 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3571 int fd
, abi_long cmd
, abi_long arg
)
3573 const argtype
*arg_type
= ie
->arg_type
;
3574 const StructEntry
*se
;
3575 const argtype
*field_types
;
3576 const int *dst_offsets
, *src_offsets
;
3579 abi_ulong
*target_rt_dev_ptr
;
3580 unsigned long *host_rt_dev_ptr
;
3584 assert(ie
->access
== IOC_W
);
3585 assert(*arg_type
== TYPE_PTR
);
3587 assert(*arg_type
== TYPE_STRUCT
);
3588 target_size
= thunk_type_size(arg_type
, 0);
3589 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3591 return -TARGET_EFAULT
;
3594 assert(*arg_type
== (int)STRUCT_rtentry
);
3595 se
= struct_entries
+ *arg_type
++;
3596 assert(se
->convert
[0] == NULL
);
3597 /* convert struct here to be able to catch rt_dev string */
3598 field_types
= se
->field_types
;
3599 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
3600 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
3601 for (i
= 0; i
< se
->nb_fields
; i
++) {
3602 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
3603 assert(*field_types
== TYPE_PTRVOID
);
3604 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
3605 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
3606 if (*target_rt_dev_ptr
!= 0) {
3607 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
3608 tswapal(*target_rt_dev_ptr
));
3609 if (!*host_rt_dev_ptr
) {
3610 unlock_user(argptr
, arg
, 0);
3611 return -TARGET_EFAULT
;
3614 *host_rt_dev_ptr
= 0;
3619 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
3620 argptr
+ src_offsets
[i
],
3621 field_types
, THUNK_HOST
);
3623 unlock_user(argptr
, arg
, 0);
3625 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3626 if (*host_rt_dev_ptr
!= 0) {
3627 unlock_user((void *)*host_rt_dev_ptr
,
3628 *target_rt_dev_ptr
, 0);
3633 static IOCTLEntry ioctl_entries
[] = {
3634 #define IOCTL(cmd, access, ...) \
3635 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3636 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3637 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3642 /* ??? Implement proper locking for ioctls. */
3643 /* do_ioctl() Must return target values and target errnos. */
3644 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3646 const IOCTLEntry
*ie
;
3647 const argtype
*arg_type
;
3649 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3655 if (ie
->target_cmd
== 0) {
3656 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3657 return -TARGET_ENOSYS
;
3659 if (ie
->target_cmd
== cmd
)
3663 arg_type
= ie
->arg_type
;
3665 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3668 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3671 switch(arg_type
[0]) {
3674 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3679 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3683 target_size
= thunk_type_size(arg_type
, 0);
3684 switch(ie
->access
) {
3686 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3687 if (!is_error(ret
)) {
3688 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3690 return -TARGET_EFAULT
;
3691 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3692 unlock_user(argptr
, arg
, target_size
);
3696 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3698 return -TARGET_EFAULT
;
3699 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3700 unlock_user(argptr
, arg
, 0);
3701 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3705 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3707 return -TARGET_EFAULT
;
3708 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3709 unlock_user(argptr
, arg
, 0);
3710 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3711 if (!is_error(ret
)) {
3712 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3714 return -TARGET_EFAULT
;
3715 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3716 unlock_user(argptr
, arg
, target_size
);
3722 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3723 (long)cmd
, arg_type
[0]);
3724 ret
= -TARGET_ENOSYS
;
3730 static const bitmask_transtbl iflag_tbl
[] = {
3731 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3732 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3733 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3734 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3735 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3736 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3737 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3738 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3739 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3740 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3741 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3742 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3743 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3744 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3748 static const bitmask_transtbl oflag_tbl
[] = {
3749 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3750 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3751 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3752 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3753 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3754 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3755 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3756 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3757 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3758 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3759 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3760 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3761 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3762 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3763 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3764 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3765 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3766 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3767 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3768 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3769 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3770 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3771 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3772 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3776 static const bitmask_transtbl cflag_tbl
[] = {
3777 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3778 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3779 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3780 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3781 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3782 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3783 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3784 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3785 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3786 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3787 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3788 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3789 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3790 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3791 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3792 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3793 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3794 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3795 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3796 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3797 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3798 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3799 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3800 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3801 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3802 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3803 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3804 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3805 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3806 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3807 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3811 static const bitmask_transtbl lflag_tbl
[] = {
3812 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3813 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3814 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3815 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3816 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3817 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3818 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3819 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3820 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3821 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3822 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3823 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3824 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3825 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3826 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3830 static void target_to_host_termios (void *dst
, const void *src
)
3832 struct host_termios
*host
= dst
;
3833 const struct target_termios
*target
= src
;
3836 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3838 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3840 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3842 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3843 host
->c_line
= target
->c_line
;
3845 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3846 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3847 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3848 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3849 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3850 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3851 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3852 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3853 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3854 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3855 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3856 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3857 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3858 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3859 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3860 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3861 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3862 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3865 static void host_to_target_termios (void *dst
, const void *src
)
3867 struct target_termios
*target
= dst
;
3868 const struct host_termios
*host
= src
;
3871 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3873 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3875 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3877 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3878 target
->c_line
= host
->c_line
;
3880 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3881 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3882 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3883 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3884 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3885 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3886 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3887 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3888 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3889 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3890 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3891 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3892 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3893 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3894 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3895 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3896 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3897 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3900 static const StructEntry struct_termios_def
= {
3901 .convert
= { host_to_target_termios
, target_to_host_termios
},
3902 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3903 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3906 static bitmask_transtbl mmap_flags_tbl
[] = {
3907 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3908 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3909 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3910 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3911 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3912 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3913 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3914 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3918 #if defined(TARGET_I386)
3920 /* NOTE: there is really one LDT for all the threads */
3921 static uint8_t *ldt_table
;
3923 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3930 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3931 if (size
> bytecount
)
3933 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3935 return -TARGET_EFAULT
;
3936 /* ??? Should this by byteswapped? */
3937 memcpy(p
, ldt_table
, size
);
3938 unlock_user(p
, ptr
, size
);
3942 /* XXX: add locking support */
3943 static abi_long
write_ldt(CPUX86State
*env
,
3944 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3946 struct target_modify_ldt_ldt_s ldt_info
;
3947 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3948 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3949 int seg_not_present
, useable
, lm
;
3950 uint32_t *lp
, entry_1
, entry_2
;
3952 if (bytecount
!= sizeof(ldt_info
))
3953 return -TARGET_EINVAL
;
3954 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3955 return -TARGET_EFAULT
;
3956 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3957 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
3958 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3959 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3960 unlock_user_struct(target_ldt_info
, ptr
, 0);
3962 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3963 return -TARGET_EINVAL
;
3964 seg_32bit
= ldt_info
.flags
& 1;
3965 contents
= (ldt_info
.flags
>> 1) & 3;
3966 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3967 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3968 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3969 useable
= (ldt_info
.flags
>> 6) & 1;
3973 lm
= (ldt_info
.flags
>> 7) & 1;
3975 if (contents
== 3) {
3977 return -TARGET_EINVAL
;
3978 if (seg_not_present
== 0)
3979 return -TARGET_EINVAL
;
3981 /* allocate the LDT */
3983 env
->ldt
.base
= target_mmap(0,
3984 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3985 PROT_READ
|PROT_WRITE
,
3986 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3987 if (env
->ldt
.base
== -1)
3988 return -TARGET_ENOMEM
;
3989 memset(g2h(env
->ldt
.base
), 0,
3990 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3991 env
->ldt
.limit
= 0xffff;
3992 ldt_table
= g2h(env
->ldt
.base
);
3995 /* NOTE: same code as Linux kernel */
3996 /* Allow LDTs to be cleared by the user. */
3997 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4000 read_exec_only
== 1 &&
4002 limit_in_pages
== 0 &&
4003 seg_not_present
== 1 &&
4011 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4012 (ldt_info
.limit
& 0x0ffff);
4013 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4014 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4015 (ldt_info
.limit
& 0xf0000) |
4016 ((read_exec_only
^ 1) << 9) |
4018 ((seg_not_present
^ 1) << 15) |
4020 (limit_in_pages
<< 23) |
4024 entry_2
|= (useable
<< 20);
4026 /* Install the new entry ... */
4028 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
4029 lp
[0] = tswap32(entry_1
);
4030 lp
[1] = tswap32(entry_2
);
4034 /* specific and weird i386 syscalls */
4035 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
4036 unsigned long bytecount
)
4042 ret
= read_ldt(ptr
, bytecount
);
4045 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4048 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4051 ret
= -TARGET_ENOSYS
;
4057 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4058 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4060 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4061 struct target_modify_ldt_ldt_s ldt_info
;
4062 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4063 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4064 int seg_not_present
, useable
, lm
;
4065 uint32_t *lp
, entry_1
, entry_2
;
4068 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4069 if (!target_ldt_info
)
4070 return -TARGET_EFAULT
;
4071 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4072 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4073 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4074 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4075 if (ldt_info
.entry_number
== -1) {
4076 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4077 if (gdt_table
[i
] == 0) {
4078 ldt_info
.entry_number
= i
;
4079 target_ldt_info
->entry_number
= tswap32(i
);
4084 unlock_user_struct(target_ldt_info
, ptr
, 1);
4086 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4087 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4088 return -TARGET_EINVAL
;
4089 seg_32bit
= ldt_info
.flags
& 1;
4090 contents
= (ldt_info
.flags
>> 1) & 3;
4091 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4092 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4093 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4094 useable
= (ldt_info
.flags
>> 6) & 1;
4098 lm
= (ldt_info
.flags
>> 7) & 1;
4101 if (contents
== 3) {
4102 if (seg_not_present
== 0)
4103 return -TARGET_EINVAL
;
4106 /* NOTE: same code as Linux kernel */
4107 /* Allow LDTs to be cleared by the user. */
4108 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4109 if ((contents
== 0 &&
4110 read_exec_only
== 1 &&
4112 limit_in_pages
== 0 &&
4113 seg_not_present
== 1 &&
4121 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4122 (ldt_info
.limit
& 0x0ffff);
4123 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4124 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4125 (ldt_info
.limit
& 0xf0000) |
4126 ((read_exec_only
^ 1) << 9) |
4128 ((seg_not_present
^ 1) << 15) |
4130 (limit_in_pages
<< 23) |
4135 /* Install the new entry ... */
4137 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4138 lp
[0] = tswap32(entry_1
);
4139 lp
[1] = tswap32(entry_2
);
4143 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4145 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4146 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4147 uint32_t base_addr
, limit
, flags
;
4148 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4149 int seg_not_present
, useable
, lm
;
4150 uint32_t *lp
, entry_1
, entry_2
;
4152 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4153 if (!target_ldt_info
)
4154 return -TARGET_EFAULT
;
4155 idx
= tswap32(target_ldt_info
->entry_number
);
4156 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4157 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4158 unlock_user_struct(target_ldt_info
, ptr
, 1);
4159 return -TARGET_EINVAL
;
4161 lp
= (uint32_t *)(gdt_table
+ idx
);
4162 entry_1
= tswap32(lp
[0]);
4163 entry_2
= tswap32(lp
[1]);
4165 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4166 contents
= (entry_2
>> 10) & 3;
4167 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4168 seg_32bit
= (entry_2
>> 22) & 1;
4169 limit_in_pages
= (entry_2
>> 23) & 1;
4170 useable
= (entry_2
>> 20) & 1;
4174 lm
= (entry_2
>> 21) & 1;
4176 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4177 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4178 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4179 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4180 base_addr
= (entry_1
>> 16) |
4181 (entry_2
& 0xff000000) |
4182 ((entry_2
& 0xff) << 16);
4183 target_ldt_info
->base_addr
= tswapal(base_addr
);
4184 target_ldt_info
->limit
= tswap32(limit
);
4185 target_ldt_info
->flags
= tswap32(flags
);
4186 unlock_user_struct(target_ldt_info
, ptr
, 1);
4189 #endif /* TARGET_I386 && TARGET_ABI32 */
4191 #ifndef TARGET_ABI32
4192 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4199 case TARGET_ARCH_SET_GS
:
4200 case TARGET_ARCH_SET_FS
:
4201 if (code
== TARGET_ARCH_SET_GS
)
4205 cpu_x86_load_seg(env
, idx
, 0);
4206 env
->segs
[idx
].base
= addr
;
4208 case TARGET_ARCH_GET_GS
:
4209 case TARGET_ARCH_GET_FS
:
4210 if (code
== TARGET_ARCH_GET_GS
)
4214 val
= env
->segs
[idx
].base
;
4215 if (put_user(val
, addr
, abi_ulong
))
4216 ret
= -TARGET_EFAULT
;
4219 ret
= -TARGET_EINVAL
;
4226 #endif /* defined(TARGET_I386) */
4228 #define NEW_STACK_SIZE 0x40000
4230 #if defined(CONFIG_USE_NPTL)
4232 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4235 pthread_mutex_t mutex
;
4236 pthread_cond_t cond
;
4239 abi_ulong child_tidptr
;
4240 abi_ulong parent_tidptr
;
4244 static void *clone_func(void *arg
)
4246 new_thread_info
*info
= arg
;
4252 cpu
= ENV_GET_CPU(env
);
4254 ts
= (TaskState
*)env
->opaque
;
4255 info
->tid
= gettid();
4256 cpu
->host_tid
= info
->tid
;
4258 if (info
->child_tidptr
)
4259 put_user_u32(info
->tid
, info
->child_tidptr
);
4260 if (info
->parent_tidptr
)
4261 put_user_u32(info
->tid
, info
->parent_tidptr
);
4262 /* Enable signals. */
4263 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4264 /* Signal to the parent that we're ready. */
4265 pthread_mutex_lock(&info
->mutex
);
4266 pthread_cond_broadcast(&info
->cond
);
4267 pthread_mutex_unlock(&info
->mutex
);
4268 /* Wait until the parent has finshed initializing the tls state. */
4269 pthread_mutex_lock(&clone_lock
);
4270 pthread_mutex_unlock(&clone_lock
);
4277 static int clone_func(void *arg
)
4279 CPUArchState
*env
= arg
;
4286 /* do_fork() Must return host values and target errnos (unlike most
4287 do_*() functions). */
4288 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4289 abi_ulong parent_tidptr
, target_ulong newtls
,
4290 abi_ulong child_tidptr
)
4294 CPUArchState
*new_env
;
4295 #if defined(CONFIG_USE_NPTL)
4296 unsigned int nptl_flags
;
4302 /* Emulate vfork() with fork() */
4303 if (flags
& CLONE_VFORK
)
4304 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4306 if (flags
& CLONE_VM
) {
4307 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
4308 #if defined(CONFIG_USE_NPTL)
4309 new_thread_info info
;
4310 pthread_attr_t attr
;
4312 ts
= g_malloc0(sizeof(TaskState
));
4313 init_task_state(ts
);
4314 /* we create a new CPU instance. */
4315 new_env
= cpu_copy(env
);
4316 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4317 cpu_reset(ENV_GET_CPU(new_env
));
4319 /* Init regs that differ from the parent. */
4320 cpu_clone_regs(new_env
, newsp
);
4321 new_env
->opaque
= ts
;
4322 ts
->bprm
= parent_ts
->bprm
;
4323 ts
->info
= parent_ts
->info
;
4324 #if defined(CONFIG_USE_NPTL)
4326 flags
&= ~CLONE_NPTL_FLAGS2
;
4328 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4329 ts
->child_tidptr
= child_tidptr
;
4332 if (nptl_flags
& CLONE_SETTLS
)
4333 cpu_set_tls (new_env
, newtls
);
4335 /* Grab a mutex so that thread setup appears atomic. */
4336 pthread_mutex_lock(&clone_lock
);
4338 memset(&info
, 0, sizeof(info
));
4339 pthread_mutex_init(&info
.mutex
, NULL
);
4340 pthread_mutex_lock(&info
.mutex
);
4341 pthread_cond_init(&info
.cond
, NULL
);
4343 if (nptl_flags
& CLONE_CHILD_SETTID
)
4344 info
.child_tidptr
= child_tidptr
;
4345 if (nptl_flags
& CLONE_PARENT_SETTID
)
4346 info
.parent_tidptr
= parent_tidptr
;
4348 ret
= pthread_attr_init(&attr
);
4349 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4350 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4351 /* It is not safe to deliver signals until the child has finished
4352 initializing, so temporarily block all signals. */
4353 sigfillset(&sigmask
);
4354 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4356 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4357 /* TODO: Free new CPU state if thread creation failed. */
4359 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4360 pthread_attr_destroy(&attr
);
4362 /* Wait for the child to initialize. */
4363 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4365 if (flags
& CLONE_PARENT_SETTID
)
4366 put_user_u32(ret
, parent_tidptr
);
4370 pthread_mutex_unlock(&info
.mutex
);
4371 pthread_cond_destroy(&info
.cond
);
4372 pthread_mutex_destroy(&info
.mutex
);
4373 pthread_mutex_unlock(&clone_lock
);
4375 if (flags
& CLONE_NPTL_FLAGS2
)
4377 /* This is probably going to die very quickly, but do it anyway. */
4378 new_stack
= g_malloc0 (NEW_STACK_SIZE
);
4380 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
4382 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
4386 /* if no CLONE_VM, we consider it is a fork */
4387 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4392 /* Child Process. */
4393 cpu_clone_regs(env
, newsp
);
4395 #if defined(CONFIG_USE_NPTL)
4396 /* There is a race condition here. The parent process could
4397 theoretically read the TID in the child process before the child
4398 tid is set. This would require using either ptrace
4399 (not implemented) or having *_tidptr to point at a shared memory
4400 mapping. We can't repeat the spinlock hack used above because
4401 the child process gets its own copy of the lock. */
4402 if (flags
& CLONE_CHILD_SETTID
)
4403 put_user_u32(gettid(), child_tidptr
);
4404 if (flags
& CLONE_PARENT_SETTID
)
4405 put_user_u32(gettid(), parent_tidptr
);
4406 ts
= (TaskState
*)env
->opaque
;
4407 if (flags
& CLONE_SETTLS
)
4408 cpu_set_tls (env
, newtls
);
4409 if (flags
& CLONE_CHILD_CLEARTID
)
4410 ts
->child_tidptr
= child_tidptr
;
4419 /* warning : doesn't handle linux specific flags... */
4420 static int target_to_host_fcntl_cmd(int cmd
)
4423 case TARGET_F_DUPFD
:
4424 case TARGET_F_GETFD
:
4425 case TARGET_F_SETFD
:
4426 case TARGET_F_GETFL
:
4427 case TARGET_F_SETFL
:
4429 case TARGET_F_GETLK
:
4431 case TARGET_F_SETLK
:
4433 case TARGET_F_SETLKW
:
4435 case TARGET_F_GETOWN
:
4437 case TARGET_F_SETOWN
:
4439 case TARGET_F_GETSIG
:
4441 case TARGET_F_SETSIG
:
4443 #if TARGET_ABI_BITS == 32
4444 case TARGET_F_GETLK64
:
4446 case TARGET_F_SETLK64
:
4448 case TARGET_F_SETLKW64
:
4451 case TARGET_F_SETLEASE
:
4453 case TARGET_F_GETLEASE
:
4455 #ifdef F_DUPFD_CLOEXEC
4456 case TARGET_F_DUPFD_CLOEXEC
:
4457 return F_DUPFD_CLOEXEC
;
4459 case TARGET_F_NOTIFY
:
4462 return -TARGET_EINVAL
;
4464 return -TARGET_EINVAL
;
4467 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4468 static const bitmask_transtbl flock_tbl
[] = {
4469 TRANSTBL_CONVERT(F_RDLCK
),
4470 TRANSTBL_CONVERT(F_WRLCK
),
4471 TRANSTBL_CONVERT(F_UNLCK
),
4472 TRANSTBL_CONVERT(F_EXLCK
),
4473 TRANSTBL_CONVERT(F_SHLCK
),
4477 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4480 struct target_flock
*target_fl
;
4481 struct flock64 fl64
;
4482 struct target_flock64
*target_fl64
;
4484 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4486 if (host_cmd
== -TARGET_EINVAL
)
4490 case TARGET_F_GETLK
:
4491 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4492 return -TARGET_EFAULT
;
4494 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4495 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4496 fl
.l_start
= tswapal(target_fl
->l_start
);
4497 fl
.l_len
= tswapal(target_fl
->l_len
);
4498 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4499 unlock_user_struct(target_fl
, arg
, 0);
4500 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4502 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4503 return -TARGET_EFAULT
;
4505 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
4506 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4507 target_fl
->l_start
= tswapal(fl
.l_start
);
4508 target_fl
->l_len
= tswapal(fl
.l_len
);
4509 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4510 unlock_user_struct(target_fl
, arg
, 1);
4514 case TARGET_F_SETLK
:
4515 case TARGET_F_SETLKW
:
4516 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4517 return -TARGET_EFAULT
;
4519 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4520 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4521 fl
.l_start
= tswapal(target_fl
->l_start
);
4522 fl
.l_len
= tswapal(target_fl
->l_len
);
4523 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4524 unlock_user_struct(target_fl
, arg
, 0);
4525 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4528 case TARGET_F_GETLK64
:
4529 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4530 return -TARGET_EFAULT
;
4532 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4533 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4534 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4535 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4536 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4537 unlock_user_struct(target_fl64
, arg
, 0);
4538 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4540 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4541 return -TARGET_EFAULT
;
4542 target_fl64
->l_type
=
4543 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
4544 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4545 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4546 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4547 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4548 unlock_user_struct(target_fl64
, arg
, 1);
4551 case TARGET_F_SETLK64
:
4552 case TARGET_F_SETLKW64
:
4553 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4554 return -TARGET_EFAULT
;
4556 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4557 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4558 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4559 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4560 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4561 unlock_user_struct(target_fl64
, arg
, 0);
4562 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4565 case TARGET_F_GETFL
:
4566 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4568 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4572 case TARGET_F_SETFL
:
4573 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4576 case TARGET_F_SETOWN
:
4577 case TARGET_F_GETOWN
:
4578 case TARGET_F_SETSIG
:
4579 case TARGET_F_GETSIG
:
4580 case TARGET_F_SETLEASE
:
4581 case TARGET_F_GETLEASE
:
4582 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4586 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4594 static inline int high2lowuid(int uid
)
4602 static inline int high2lowgid(int gid
)
4610 static inline int low2highuid(int uid
)
4612 if ((int16_t)uid
== -1)
4618 static inline int low2highgid(int gid
)
4620 if ((int16_t)gid
== -1)
4625 static inline int tswapid(int id
)
4629 #else /* !USE_UID16 */
4630 static inline int high2lowuid(int uid
)
4634 static inline int high2lowgid(int gid
)
4638 static inline int low2highuid(int uid
)
4642 static inline int low2highgid(int gid
)
4646 static inline int tswapid(int id
)
4650 #endif /* USE_UID16 */
4652 void syscall_init(void)
4655 const argtype
*arg_type
;
4659 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4660 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4661 #include "syscall_types.h"
4663 #undef STRUCT_SPECIAL
4665 /* Build target_to_host_errno_table[] table from
4666 * host_to_target_errno_table[]. */
4667 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
4668 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4671 /* we patch the ioctl size if necessary. We rely on the fact that
4672 no ioctl has all the bits at '1' in the size field */
4674 while (ie
->target_cmd
!= 0) {
4675 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4676 TARGET_IOC_SIZEMASK
) {
4677 arg_type
= ie
->arg_type
;
4678 if (arg_type
[0] != TYPE_PTR
) {
4679 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4684 size
= thunk_type_size(arg_type
, 0);
4685 ie
->target_cmd
= (ie
->target_cmd
&
4686 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4687 (size
<< TARGET_IOC_SIZESHIFT
);
4690 /* automatic consistency check if same arch */
4691 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4692 (defined(__x86_64__) && defined(TARGET_X86_64))
4693 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4694 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4695 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4702 #if TARGET_ABI_BITS == 32
4703 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4705 #ifdef TARGET_WORDS_BIGENDIAN
4706 return ((uint64_t)word0
<< 32) | word1
;
4708 return ((uint64_t)word1
<< 32) | word0
;
4711 #else /* TARGET_ABI_BITS == 32 */
4712 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4716 #endif /* TARGET_ABI_BITS != 32 */
4718 #ifdef TARGET_NR_truncate64
4719 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4724 if (regpairs_aligned(cpu_env
)) {
4728 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4732 #ifdef TARGET_NR_ftruncate64
4733 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4738 if (regpairs_aligned(cpu_env
)) {
4742 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4746 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4747 abi_ulong target_addr
)
4749 struct target_timespec
*target_ts
;
4751 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4752 return -TARGET_EFAULT
;
4753 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
4754 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
4755 unlock_user_struct(target_ts
, target_addr
, 0);
4759 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4760 struct timespec
*host_ts
)
4762 struct target_timespec
*target_ts
;
4764 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4765 return -TARGET_EFAULT
;
4766 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
4767 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
4768 unlock_user_struct(target_ts
, target_addr
, 1);
4772 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4773 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4774 abi_ulong target_addr
,
4775 struct stat
*host_st
)
4778 if (((CPUARMState
*)cpu_env
)->eabi
) {
4779 struct target_eabi_stat64
*target_st
;
4781 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4782 return -TARGET_EFAULT
;
4783 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4784 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4785 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4786 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4787 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4789 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4790 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4791 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4792 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4793 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4794 __put_user(host_st
->st_size
, &target_st
->st_size
);
4795 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4796 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4797 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4798 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4799 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4800 unlock_user_struct(target_st
, target_addr
, 1);
4804 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4805 struct target_stat
*target_st
;
4807 struct target_stat64
*target_st
;
4810 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4811 return -TARGET_EFAULT
;
4812 memset(target_st
, 0, sizeof(*target_st
));
4813 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4814 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4815 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4816 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4818 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4819 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4820 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4821 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4822 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4823 /* XXX: better use of kernel struct */
4824 __put_user(host_st
->st_size
, &target_st
->st_size
);
4825 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4826 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4827 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4828 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4829 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4830 unlock_user_struct(target_st
, target_addr
, 1);
4837 #if defined(CONFIG_USE_NPTL)
4838 /* ??? Using host futex calls even when target atomic operations
4839 are not really atomic probably breaks things. However implementing
4840 futexes locally would make futexes shared between multiple processes
4841 tricky. However they're probably useless because guest atomic
4842 operations won't work either. */
4843 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4844 target_ulong uaddr2
, int val3
)
4846 struct timespec ts
, *pts
;
4849 /* ??? We assume FUTEX_* constants are the same on both host
4851 #ifdef FUTEX_CMD_MASK
4852 base_op
= op
& FUTEX_CMD_MASK
;
4858 case FUTEX_WAIT_BITSET
:
4861 target_to_host_timespec(pts
, timeout
);
4865 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4868 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4870 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4872 case FUTEX_CMP_REQUEUE
:
4874 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4875 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4876 But the prototype takes a `struct timespec *'; insert casts
4877 to satisfy the compiler. We do not need to tswap TIMEOUT
4878 since it's not compared to guest memory. */
4879 pts
= (struct timespec
*)(uintptr_t) timeout
;
4880 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4882 (base_op
== FUTEX_CMP_REQUEUE
4886 return -TARGET_ENOSYS
;
4891 /* Map host to target signal numbers for the wait family of syscalls.
4892 Assume all other status bits are the same. */
4893 int host_to_target_waitstatus(int status
)
4895 if (WIFSIGNALED(status
)) {
4896 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4898 if (WIFSTOPPED(status
)) {
4899 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4905 int get_osversion(void)
4907 static int osversion
;
4908 struct new_utsname buf
;
4913 if (qemu_uname_release
&& *qemu_uname_release
) {
4914 s
= qemu_uname_release
;
4916 if (sys_uname(&buf
))
4921 for (i
= 0; i
< 3; i
++) {
4923 while (*s
>= '0' && *s
<= '9') {
4928 tmp
= (tmp
<< 8) + n
;
4937 static int open_self_maps(void *cpu_env
, int fd
)
4939 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4940 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
4947 fp
= fopen("/proc/self/maps", "r");
4952 while ((read
= getline(&line
, &len
, fp
)) != -1) {
4953 int fields
, dev_maj
, dev_min
, inode
;
4954 uint64_t min
, max
, offset
;
4955 char flag_r
, flag_w
, flag_x
, flag_p
;
4956 char path
[512] = "";
4957 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
4958 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
4959 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
4961 if ((fields
< 10) || (fields
> 11)) {
4964 if (!strncmp(path
, "[stack]", 7)) {
4967 if (h2g_valid(min
) && h2g_valid(max
)) {
4968 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
4969 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
4970 h2g(min
), h2g(max
), flag_r
, flag_w
,
4971 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
4972 path
[0] ? " " : "", path
);
4979 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4980 dprintf(fd
, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4981 (unsigned long long)ts
->info
->stack_limit
,
4982 (unsigned long long)(ts
->info
->start_stack
+
4983 (TARGET_PAGE_SIZE
- 1)) & TARGET_PAGE_MASK
,
4984 (unsigned long long)0);
4990 static int open_self_stat(void *cpu_env
, int fd
)
4992 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
4993 abi_ulong start_stack
= ts
->info
->start_stack
;
4996 for (i
= 0; i
< 44; i
++) {
5004 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5005 } else if (i
== 1) {
5007 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
5008 } else if (i
== 27) {
5011 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5013 /* for the rest, there is MasterCard */
5014 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
5018 if (write(fd
, buf
, len
) != len
) {
5026 static int open_self_auxv(void *cpu_env
, int fd
)
5028 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5029 abi_ulong auxv
= ts
->info
->saved_auxv
;
5030 abi_ulong len
= ts
->info
->auxv_len
;
5034 * Auxiliary vector is stored in target process stack.
5035 * read in whole auxv vector and copy it to file
5037 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
5041 r
= write(fd
, ptr
, len
);
5048 lseek(fd
, 0, SEEK_SET
);
5049 unlock_user(ptr
, auxv
, len
);
5055 static int is_proc_myself(const char *filename
, const char *entry
)
5057 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
5058 filename
+= strlen("/proc/");
5059 if (!strncmp(filename
, "self/", strlen("self/"))) {
5060 filename
+= strlen("self/");
5061 } else if (*filename
>= '1' && *filename
<= '9') {
5063 snprintf(myself
, sizeof(myself
), "%d/", getpid());
5064 if (!strncmp(filename
, myself
, strlen(myself
))) {
5065 filename
+= strlen(myself
);
5072 if (!strcmp(filename
, entry
)) {
5079 static int do_open(void *cpu_env
, const char *pathname
, int flags
, mode_t mode
)
5082 const char *filename
;
5083 int (*fill
)(void *cpu_env
, int fd
);
5085 const struct fake_open
*fake_open
;
5086 static const struct fake_open fakes
[] = {
5087 { "maps", open_self_maps
},
5088 { "stat", open_self_stat
},
5089 { "auxv", open_self_auxv
},
5093 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5094 if (is_proc_myself(pathname
, fake_open
->filename
)) {
5099 if (fake_open
->filename
) {
5101 char filename
[PATH_MAX
];
5104 /* create temporary file to map stat to */
5105 tmpdir
= getenv("TMPDIR");
5108 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5109 fd
= mkstemp(filename
);
5115 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5119 lseek(fd
, 0, SEEK_SET
);
5124 return get_errno(open(path(pathname
), flags
, mode
));
5127 /* do_syscall() should always have a single exit point at the end so
5128 that actions, such as logging of syscall results, can be performed.
5129 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5130 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5131 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5132 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5135 #ifdef CONFIG_USE_NPTL
5136 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
5144 gemu_log("syscall %d", num
);
5147 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5150 case TARGET_NR_exit
:
5151 #ifdef CONFIG_USE_NPTL
5152 /* In old applications this may be used to implement _exit(2).
5153 However in threaded applictions it is used for thread termination,
5154 and _exit_group is used for application termination.
5155 Do thread termination if we have more then one thread. */
5156 /* FIXME: This probably breaks if a signal arrives. We should probably
5157 be disabling signals. */
5158 if (first_cpu
->next_cpu
) {
5166 while (p
&& p
!= cpu
) {
5167 lastp
= &p
->next_cpu
;
5170 /* If we didn't find the CPU for this thread then something is
5175 /* Remove the CPU from the list. */
5176 *lastp
= p
->next_cpu
;
5178 ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5179 if (ts
->child_tidptr
) {
5180 put_user_u32(0, ts
->child_tidptr
);
5181 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5185 object_unref(OBJECT(ENV_GET_CPU(cpu_env
)));
5193 gdb_exit(cpu_env
, arg1
);
5195 ret
= 0; /* avoid warning */
5197 case TARGET_NR_read
:
5201 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5203 ret
= get_errno(read(arg1
, p
, arg3
));
5204 unlock_user(p
, arg2
, ret
);
5207 case TARGET_NR_write
:
5208 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5210 ret
= get_errno(write(arg1
, p
, arg3
));
5211 unlock_user(p
, arg2
, 0);
5213 case TARGET_NR_open
:
5214 if (!(p
= lock_user_string(arg1
)))
5216 ret
= get_errno(do_open(cpu_env
, p
,
5217 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
5219 unlock_user(p
, arg1
, 0);
5221 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5222 case TARGET_NR_openat
:
5223 if (!(p
= lock_user_string(arg2
)))
5225 ret
= get_errno(sys_openat(arg1
,
5227 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
5229 unlock_user(p
, arg2
, 0);
5232 case TARGET_NR_close
:
5233 ret
= get_errno(close(arg1
));
5238 case TARGET_NR_fork
:
5239 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
5241 #ifdef TARGET_NR_waitpid
5242 case TARGET_NR_waitpid
:
5245 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
5246 if (!is_error(ret
) && arg2
&& ret
5247 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
5252 #ifdef TARGET_NR_waitid
5253 case TARGET_NR_waitid
:
5257 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
5258 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
5259 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
5261 host_to_target_siginfo(p
, &info
);
5262 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
5267 #ifdef TARGET_NR_creat /* not on alpha */
5268 case TARGET_NR_creat
:
5269 if (!(p
= lock_user_string(arg1
)))
5271 ret
= get_errno(creat(p
, arg2
));
5272 unlock_user(p
, arg1
, 0);
5275 case TARGET_NR_link
:
5278 p
= lock_user_string(arg1
);
5279 p2
= lock_user_string(arg2
);
5281 ret
= -TARGET_EFAULT
;
5283 ret
= get_errno(link(p
, p2
));
5284 unlock_user(p2
, arg2
, 0);
5285 unlock_user(p
, arg1
, 0);
5288 #if defined(TARGET_NR_linkat)
5289 case TARGET_NR_linkat
:
5294 p
= lock_user_string(arg2
);
5295 p2
= lock_user_string(arg4
);
5297 ret
= -TARGET_EFAULT
;
5299 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
5300 unlock_user(p
, arg2
, 0);
5301 unlock_user(p2
, arg4
, 0);
5305 case TARGET_NR_unlink
:
5306 if (!(p
= lock_user_string(arg1
)))
5308 ret
= get_errno(unlink(p
));
5309 unlock_user(p
, arg1
, 0);
5311 #if defined(TARGET_NR_unlinkat)
5312 case TARGET_NR_unlinkat
:
5313 if (!(p
= lock_user_string(arg2
)))
5315 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
5316 unlock_user(p
, arg2
, 0);
5319 case TARGET_NR_execve
:
5321 char **argp
, **envp
;
5324 abi_ulong guest_argp
;
5325 abi_ulong guest_envp
;
5332 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
5333 if (get_user_ual(addr
, gp
))
5341 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
5342 if (get_user_ual(addr
, gp
))
5349 argp
= alloca((argc
+ 1) * sizeof(void *));
5350 envp
= alloca((envc
+ 1) * sizeof(void *));
5352 for (gp
= guest_argp
, q
= argp
; gp
;
5353 gp
+= sizeof(abi_ulong
), q
++) {
5354 if (get_user_ual(addr
, gp
))
5358 if (!(*q
= lock_user_string(addr
)))
5360 total_size
+= strlen(*q
) + 1;
5364 for (gp
= guest_envp
, q
= envp
; gp
;
5365 gp
+= sizeof(abi_ulong
), q
++) {
5366 if (get_user_ual(addr
, gp
))
5370 if (!(*q
= lock_user_string(addr
)))
5372 total_size
+= strlen(*q
) + 1;
5376 /* This case will not be caught by the host's execve() if its
5377 page size is bigger than the target's. */
5378 if (total_size
> MAX_ARG_PAGES
* TARGET_PAGE_SIZE
) {
5379 ret
= -TARGET_E2BIG
;
5382 if (!(p
= lock_user_string(arg1
)))
5384 ret
= get_errno(execve(p
, argp
, envp
));
5385 unlock_user(p
, arg1
, 0);
5390 ret
= -TARGET_EFAULT
;
5393 for (gp
= guest_argp
, q
= argp
; *q
;
5394 gp
+= sizeof(abi_ulong
), q
++) {
5395 if (get_user_ual(addr
, gp
)
5398 unlock_user(*q
, addr
, 0);
5400 for (gp
= guest_envp
, q
= envp
; *q
;
5401 gp
+= sizeof(abi_ulong
), q
++) {
5402 if (get_user_ual(addr
, gp
)
5405 unlock_user(*q
, addr
, 0);
5409 case TARGET_NR_chdir
:
5410 if (!(p
= lock_user_string(arg1
)))
5412 ret
= get_errno(chdir(p
));
5413 unlock_user(p
, arg1
, 0);
5415 #ifdef TARGET_NR_time
5416 case TARGET_NR_time
:
5419 ret
= get_errno(time(&host_time
));
5422 && put_user_sal(host_time
, arg1
))
5427 case TARGET_NR_mknod
:
5428 if (!(p
= lock_user_string(arg1
)))
5430 ret
= get_errno(mknod(p
, arg2
, arg3
));
5431 unlock_user(p
, arg1
, 0);
5433 #if defined(TARGET_NR_mknodat)
5434 case TARGET_NR_mknodat
:
5435 if (!(p
= lock_user_string(arg2
)))
5437 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
5438 unlock_user(p
, arg2
, 0);
5441 case TARGET_NR_chmod
:
5442 if (!(p
= lock_user_string(arg1
)))
5444 ret
= get_errno(chmod(p
, arg2
));
5445 unlock_user(p
, arg1
, 0);
5447 #ifdef TARGET_NR_break
5448 case TARGET_NR_break
:
5451 #ifdef TARGET_NR_oldstat
5452 case TARGET_NR_oldstat
:
5455 case TARGET_NR_lseek
:
5456 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
5458 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5459 /* Alpha specific */
5460 case TARGET_NR_getxpid
:
5461 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
5462 ret
= get_errno(getpid());
5465 #ifdef TARGET_NR_getpid
5466 case TARGET_NR_getpid
:
5467 ret
= get_errno(getpid());
5470 case TARGET_NR_mount
:
5472 /* need to look at the data field */
5474 p
= lock_user_string(arg1
);
5475 p2
= lock_user_string(arg2
);
5476 p3
= lock_user_string(arg3
);
5477 if (!p
|| !p2
|| !p3
)
5478 ret
= -TARGET_EFAULT
;
5480 /* FIXME - arg5 should be locked, but it isn't clear how to
5481 * do that since it's not guaranteed to be a NULL-terminated
5485 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
5487 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
5489 unlock_user(p
, arg1
, 0);
5490 unlock_user(p2
, arg2
, 0);
5491 unlock_user(p3
, arg3
, 0);
5494 #ifdef TARGET_NR_umount
5495 case TARGET_NR_umount
:
5496 if (!(p
= lock_user_string(arg1
)))
5498 ret
= get_errno(umount(p
));
5499 unlock_user(p
, arg1
, 0);
5502 #ifdef TARGET_NR_stime /* not on alpha */
5503 case TARGET_NR_stime
:
5506 if (get_user_sal(host_time
, arg1
))
5508 ret
= get_errno(stime(&host_time
));
5512 case TARGET_NR_ptrace
:
5514 #ifdef TARGET_NR_alarm /* not on alpha */
5515 case TARGET_NR_alarm
:
5519 #ifdef TARGET_NR_oldfstat
5520 case TARGET_NR_oldfstat
:
5523 #ifdef TARGET_NR_pause /* not on alpha */
5524 case TARGET_NR_pause
:
5525 ret
= get_errno(pause());
5528 #ifdef TARGET_NR_utime
5529 case TARGET_NR_utime
:
5531 struct utimbuf tbuf
, *host_tbuf
;
5532 struct target_utimbuf
*target_tbuf
;
5534 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
5536 tbuf
.actime
= tswapal(target_tbuf
->actime
);
5537 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
5538 unlock_user_struct(target_tbuf
, arg2
, 0);
5543 if (!(p
= lock_user_string(arg1
)))
5545 ret
= get_errno(utime(p
, host_tbuf
));
5546 unlock_user(p
, arg1
, 0);
5550 case TARGET_NR_utimes
:
5552 struct timeval
*tvp
, tv
[2];
5554 if (copy_from_user_timeval(&tv
[0], arg2
)
5555 || copy_from_user_timeval(&tv
[1],
5556 arg2
+ sizeof(struct target_timeval
)))
5562 if (!(p
= lock_user_string(arg1
)))
5564 ret
= get_errno(utimes(p
, tvp
));
5565 unlock_user(p
, arg1
, 0);
5568 #if defined(TARGET_NR_futimesat)
5569 case TARGET_NR_futimesat
:
5571 struct timeval
*tvp
, tv
[2];
5573 if (copy_from_user_timeval(&tv
[0], arg3
)
5574 || copy_from_user_timeval(&tv
[1],
5575 arg3
+ sizeof(struct target_timeval
)))
5581 if (!(p
= lock_user_string(arg2
)))
5583 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
5584 unlock_user(p
, arg2
, 0);
5588 #ifdef TARGET_NR_stty
5589 case TARGET_NR_stty
:
5592 #ifdef TARGET_NR_gtty
5593 case TARGET_NR_gtty
:
5596 case TARGET_NR_access
:
5597 if (!(p
= lock_user_string(arg1
)))
5599 ret
= get_errno(access(path(p
), arg2
));
5600 unlock_user(p
, arg1
, 0);
5602 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5603 case TARGET_NR_faccessat
:
5604 if (!(p
= lock_user_string(arg2
)))
5606 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
5607 unlock_user(p
, arg2
, 0);
5610 #ifdef TARGET_NR_nice /* not on alpha */
5611 case TARGET_NR_nice
:
5612 ret
= get_errno(nice(arg1
));
5615 #ifdef TARGET_NR_ftime
5616 case TARGET_NR_ftime
:
5619 case TARGET_NR_sync
:
5623 case TARGET_NR_kill
:
5624 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
5626 case TARGET_NR_rename
:
5629 p
= lock_user_string(arg1
);
5630 p2
= lock_user_string(arg2
);
5632 ret
= -TARGET_EFAULT
;
5634 ret
= get_errno(rename(p
, p2
));
5635 unlock_user(p2
, arg2
, 0);
5636 unlock_user(p
, arg1
, 0);
5639 #if defined(TARGET_NR_renameat)
5640 case TARGET_NR_renameat
:
5643 p
= lock_user_string(arg2
);
5644 p2
= lock_user_string(arg4
);
5646 ret
= -TARGET_EFAULT
;
5648 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
5649 unlock_user(p2
, arg4
, 0);
5650 unlock_user(p
, arg2
, 0);
5654 case TARGET_NR_mkdir
:
5655 if (!(p
= lock_user_string(arg1
)))
5657 ret
= get_errno(mkdir(p
, arg2
));
5658 unlock_user(p
, arg1
, 0);
5660 #if defined(TARGET_NR_mkdirat)
5661 case TARGET_NR_mkdirat
:
5662 if (!(p
= lock_user_string(arg2
)))
5664 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
5665 unlock_user(p
, arg2
, 0);
5668 case TARGET_NR_rmdir
:
5669 if (!(p
= lock_user_string(arg1
)))
5671 ret
= get_errno(rmdir(p
));
5672 unlock_user(p
, arg1
, 0);
5675 ret
= get_errno(dup(arg1
));
5677 case TARGET_NR_pipe
:
5678 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5680 #ifdef TARGET_NR_pipe2
5681 case TARGET_NR_pipe2
:
5682 ret
= do_pipe(cpu_env
, arg1
,
5683 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
5686 case TARGET_NR_times
:
5688 struct target_tms
*tmsp
;
5690 ret
= get_errno(times(&tms
));
5692 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5695 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
5696 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
5697 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
5698 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
5701 ret
= host_to_target_clock_t(ret
);
5704 #ifdef TARGET_NR_prof
5705 case TARGET_NR_prof
:
5708 #ifdef TARGET_NR_signal
5709 case TARGET_NR_signal
:
5712 case TARGET_NR_acct
:
5714 ret
= get_errno(acct(NULL
));
5716 if (!(p
= lock_user_string(arg1
)))
5718 ret
= get_errno(acct(path(p
)));
5719 unlock_user(p
, arg1
, 0);
5722 #ifdef TARGET_NR_umount2 /* not on alpha */
5723 case TARGET_NR_umount2
:
5724 if (!(p
= lock_user_string(arg1
)))
5726 ret
= get_errno(umount2(p
, arg2
));
5727 unlock_user(p
, arg1
, 0);
5730 #ifdef TARGET_NR_lock
5731 case TARGET_NR_lock
:
5734 case TARGET_NR_ioctl
:
5735 ret
= do_ioctl(arg1
, arg2
, arg3
);
5737 case TARGET_NR_fcntl
:
5738 ret
= do_fcntl(arg1
, arg2
, arg3
);
5740 #ifdef TARGET_NR_mpx
5744 case TARGET_NR_setpgid
:
5745 ret
= get_errno(setpgid(arg1
, arg2
));
5747 #ifdef TARGET_NR_ulimit
5748 case TARGET_NR_ulimit
:
5751 #ifdef TARGET_NR_oldolduname
5752 case TARGET_NR_oldolduname
:
5755 case TARGET_NR_umask
:
5756 ret
= get_errno(umask(arg1
));
5758 case TARGET_NR_chroot
:
5759 if (!(p
= lock_user_string(arg1
)))
5761 ret
= get_errno(chroot(p
));
5762 unlock_user(p
, arg1
, 0);
5764 case TARGET_NR_ustat
:
5766 case TARGET_NR_dup2
:
5767 ret
= get_errno(dup2(arg1
, arg2
));
5769 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5770 case TARGET_NR_dup3
:
5771 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
5774 #ifdef TARGET_NR_getppid /* not on alpha */
5775 case TARGET_NR_getppid
:
5776 ret
= get_errno(getppid());
5779 case TARGET_NR_getpgrp
:
5780 ret
= get_errno(getpgrp());
5782 case TARGET_NR_setsid
:
5783 ret
= get_errno(setsid());
5785 #ifdef TARGET_NR_sigaction
5786 case TARGET_NR_sigaction
:
5788 #if defined(TARGET_ALPHA)
5789 struct target_sigaction act
, oact
, *pact
= 0;
5790 struct target_old_sigaction
*old_act
;
5792 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5794 act
._sa_handler
= old_act
->_sa_handler
;
5795 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5796 act
.sa_flags
= old_act
->sa_flags
;
5797 act
.sa_restorer
= 0;
5798 unlock_user_struct(old_act
, arg2
, 0);
5801 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5802 if (!is_error(ret
) && arg3
) {
5803 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5805 old_act
->_sa_handler
= oact
._sa_handler
;
5806 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5807 old_act
->sa_flags
= oact
.sa_flags
;
5808 unlock_user_struct(old_act
, arg3
, 1);
5810 #elif defined(TARGET_MIPS)
5811 struct target_sigaction act
, oact
, *pact
, *old_act
;
5814 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5816 act
._sa_handler
= old_act
->_sa_handler
;
5817 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5818 act
.sa_flags
= old_act
->sa_flags
;
5819 unlock_user_struct(old_act
, arg2
, 0);
5825 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5827 if (!is_error(ret
) && arg3
) {
5828 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5830 old_act
->_sa_handler
= oact
._sa_handler
;
5831 old_act
->sa_flags
= oact
.sa_flags
;
5832 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5833 old_act
->sa_mask
.sig
[1] = 0;
5834 old_act
->sa_mask
.sig
[2] = 0;
5835 old_act
->sa_mask
.sig
[3] = 0;
5836 unlock_user_struct(old_act
, arg3
, 1);
5839 struct target_old_sigaction
*old_act
;
5840 struct target_sigaction act
, oact
, *pact
;
5842 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5844 act
._sa_handler
= old_act
->_sa_handler
;
5845 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5846 act
.sa_flags
= old_act
->sa_flags
;
5847 act
.sa_restorer
= old_act
->sa_restorer
;
5848 unlock_user_struct(old_act
, arg2
, 0);
5853 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5854 if (!is_error(ret
) && arg3
) {
5855 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5857 old_act
->_sa_handler
= oact
._sa_handler
;
5858 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5859 old_act
->sa_flags
= oact
.sa_flags
;
5860 old_act
->sa_restorer
= oact
.sa_restorer
;
5861 unlock_user_struct(old_act
, arg3
, 1);
5867 case TARGET_NR_rt_sigaction
:
5869 #if defined(TARGET_ALPHA)
5870 struct target_sigaction act
, oact
, *pact
= 0;
5871 struct target_rt_sigaction
*rt_act
;
5872 /* ??? arg4 == sizeof(sigset_t). */
5874 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5876 act
._sa_handler
= rt_act
->_sa_handler
;
5877 act
.sa_mask
= rt_act
->sa_mask
;
5878 act
.sa_flags
= rt_act
->sa_flags
;
5879 act
.sa_restorer
= arg5
;
5880 unlock_user_struct(rt_act
, arg2
, 0);
5883 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5884 if (!is_error(ret
) && arg3
) {
5885 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5887 rt_act
->_sa_handler
= oact
._sa_handler
;
5888 rt_act
->sa_mask
= oact
.sa_mask
;
5889 rt_act
->sa_flags
= oact
.sa_flags
;
5890 unlock_user_struct(rt_act
, arg3
, 1);
5893 struct target_sigaction
*act
;
5894 struct target_sigaction
*oact
;
5897 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5902 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5903 ret
= -TARGET_EFAULT
;
5904 goto rt_sigaction_fail
;
5908 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5911 unlock_user_struct(act
, arg2
, 0);
5913 unlock_user_struct(oact
, arg3
, 1);
5917 #ifdef TARGET_NR_sgetmask /* not on alpha */
5918 case TARGET_NR_sgetmask
:
5921 abi_ulong target_set
;
5922 sigprocmask(0, NULL
, &cur_set
);
5923 host_to_target_old_sigset(&target_set
, &cur_set
);
5928 #ifdef TARGET_NR_ssetmask /* not on alpha */
5929 case TARGET_NR_ssetmask
:
5931 sigset_t set
, oset
, cur_set
;
5932 abi_ulong target_set
= arg1
;
5933 sigprocmask(0, NULL
, &cur_set
);
5934 target_to_host_old_sigset(&set
, &target_set
);
5935 sigorset(&set
, &set
, &cur_set
);
5936 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5937 host_to_target_old_sigset(&target_set
, &oset
);
5942 #ifdef TARGET_NR_sigprocmask
5943 case TARGET_NR_sigprocmask
:
5945 #if defined(TARGET_ALPHA)
5946 sigset_t set
, oldset
;
5951 case TARGET_SIG_BLOCK
:
5954 case TARGET_SIG_UNBLOCK
:
5957 case TARGET_SIG_SETMASK
:
5961 ret
= -TARGET_EINVAL
;
5965 target_to_host_old_sigset(&set
, &mask
);
5967 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
5968 if (!is_error(ret
)) {
5969 host_to_target_old_sigset(&mask
, &oldset
);
5971 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
5974 sigset_t set
, oldset
, *set_ptr
;
5979 case TARGET_SIG_BLOCK
:
5982 case TARGET_SIG_UNBLOCK
:
5985 case TARGET_SIG_SETMASK
:
5989 ret
= -TARGET_EINVAL
;
5992 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5994 target_to_host_old_sigset(&set
, p
);
5995 unlock_user(p
, arg2
, 0);
6001 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
6002 if (!is_error(ret
) && arg3
) {
6003 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6005 host_to_target_old_sigset(p
, &oldset
);
6006 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6012 case TARGET_NR_rt_sigprocmask
:
6015 sigset_t set
, oldset
, *set_ptr
;
6019 case TARGET_SIG_BLOCK
:
6022 case TARGET_SIG_UNBLOCK
:
6025 case TARGET_SIG_SETMASK
:
6029 ret
= -TARGET_EINVAL
;
6032 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6034 target_to_host_sigset(&set
, p
);
6035 unlock_user(p
, arg2
, 0);
6041 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
6042 if (!is_error(ret
) && arg3
) {
6043 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6045 host_to_target_sigset(p
, &oldset
);
6046 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6050 #ifdef TARGET_NR_sigpending
6051 case TARGET_NR_sigpending
:
6054 ret
= get_errno(sigpending(&set
));
6055 if (!is_error(ret
)) {
6056 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6058 host_to_target_old_sigset(p
, &set
);
6059 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6064 case TARGET_NR_rt_sigpending
:
6067 ret
= get_errno(sigpending(&set
));
6068 if (!is_error(ret
)) {
6069 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6071 host_to_target_sigset(p
, &set
);
6072 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6076 #ifdef TARGET_NR_sigsuspend
6077 case TARGET_NR_sigsuspend
:
6080 #if defined(TARGET_ALPHA)
6081 abi_ulong mask
= arg1
;
6082 target_to_host_old_sigset(&set
, &mask
);
6084 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6086 target_to_host_old_sigset(&set
, p
);
6087 unlock_user(p
, arg1
, 0);
6089 ret
= get_errno(sigsuspend(&set
));
6093 case TARGET_NR_rt_sigsuspend
:
6096 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6098 target_to_host_sigset(&set
, p
);
6099 unlock_user(p
, arg1
, 0);
6100 ret
= get_errno(sigsuspend(&set
));
6103 case TARGET_NR_rt_sigtimedwait
:
6106 struct timespec uts
, *puts
;
6109 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6111 target_to_host_sigset(&set
, p
);
6112 unlock_user(p
, arg1
, 0);
6115 target_to_host_timespec(puts
, arg3
);
6119 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6120 if (!is_error(ret
) && arg2
) {
6121 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
6123 host_to_target_siginfo(p
, &uinfo
);
6124 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
6128 case TARGET_NR_rt_sigqueueinfo
:
6131 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
6133 target_to_host_siginfo(&uinfo
, p
);
6134 unlock_user(p
, arg1
, 0);
6135 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
6138 #ifdef TARGET_NR_sigreturn
6139 case TARGET_NR_sigreturn
:
6140 /* NOTE: ret is eax, so not transcoding must be done */
6141 ret
= do_sigreturn(cpu_env
);
6144 case TARGET_NR_rt_sigreturn
:
6145 /* NOTE: ret is eax, so not transcoding must be done */
6146 ret
= do_rt_sigreturn(cpu_env
);
6148 case TARGET_NR_sethostname
:
6149 if (!(p
= lock_user_string(arg1
)))
6151 ret
= get_errno(sethostname(p
, arg2
));
6152 unlock_user(p
, arg1
, 0);
6154 case TARGET_NR_setrlimit
:
6156 int resource
= target_to_host_resource(arg1
);
6157 struct target_rlimit
*target_rlim
;
6159 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
6161 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
6162 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
6163 unlock_user_struct(target_rlim
, arg2
, 0);
6164 ret
= get_errno(setrlimit(resource
, &rlim
));
6167 case TARGET_NR_getrlimit
:
6169 int resource
= target_to_host_resource(arg1
);
6170 struct target_rlimit
*target_rlim
;
6173 ret
= get_errno(getrlimit(resource
, &rlim
));
6174 if (!is_error(ret
)) {
6175 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6177 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6178 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6179 unlock_user_struct(target_rlim
, arg2
, 1);
6183 case TARGET_NR_getrusage
:
6185 struct rusage rusage
;
6186 ret
= get_errno(getrusage(arg1
, &rusage
));
6187 if (!is_error(ret
)) {
6188 host_to_target_rusage(arg2
, &rusage
);
6192 case TARGET_NR_gettimeofday
:
6195 ret
= get_errno(gettimeofday(&tv
, NULL
));
6196 if (!is_error(ret
)) {
6197 if (copy_to_user_timeval(arg1
, &tv
))
6202 case TARGET_NR_settimeofday
:
6205 if (copy_from_user_timeval(&tv
, arg1
))
6207 ret
= get_errno(settimeofday(&tv
, NULL
));
6210 #if defined(TARGET_NR_select)
6211 case TARGET_NR_select
:
6212 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6213 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6216 struct target_sel_arg_struct
*sel
;
6217 abi_ulong inp
, outp
, exp
, tvp
;
6220 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
6222 nsel
= tswapal(sel
->n
);
6223 inp
= tswapal(sel
->inp
);
6224 outp
= tswapal(sel
->outp
);
6225 exp
= tswapal(sel
->exp
);
6226 tvp
= tswapal(sel
->tvp
);
6227 unlock_user_struct(sel
, arg1
, 0);
6228 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
6233 #ifdef TARGET_NR_pselect6
6234 case TARGET_NR_pselect6
:
6236 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
6237 fd_set rfds
, wfds
, efds
;
6238 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
6239 struct timespec ts
, *ts_ptr
;
6242 * The 6th arg is actually two args smashed together,
6243 * so we cannot use the C library.
6251 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
6252 target_sigset_t
*target_sigset
;
6260 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
6264 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
6268 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
6274 * This takes a timespec, and not a timeval, so we cannot
6275 * use the do_select() helper ...
6278 if (target_to_host_timespec(&ts
, ts_addr
)) {
6286 /* Extract the two packed args for the sigset */
6289 sig
.size
= _NSIG
/ 8;
6291 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
6295 arg_sigset
= tswapal(arg7
[0]);
6296 arg_sigsize
= tswapal(arg7
[1]);
6297 unlock_user(arg7
, arg6
, 0);
6301 if (arg_sigsize
!= sizeof(*target_sigset
)) {
6302 /* Like the kernel, we enforce correct size sigsets */
6303 ret
= -TARGET_EINVAL
;
6306 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
6307 sizeof(*target_sigset
), 1);
6308 if (!target_sigset
) {
6311 target_to_host_sigset(&set
, target_sigset
);
6312 unlock_user(target_sigset
, arg_sigset
, 0);
6320 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
6323 if (!is_error(ret
)) {
6324 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
6326 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
6328 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
6331 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
6337 case TARGET_NR_symlink
:
6340 p
= lock_user_string(arg1
);
6341 p2
= lock_user_string(arg2
);
6343 ret
= -TARGET_EFAULT
;
6345 ret
= get_errno(symlink(p
, p2
));
6346 unlock_user(p2
, arg2
, 0);
6347 unlock_user(p
, arg1
, 0);
6350 #if defined(TARGET_NR_symlinkat)
6351 case TARGET_NR_symlinkat
:
6354 p
= lock_user_string(arg1
);
6355 p2
= lock_user_string(arg3
);
6357 ret
= -TARGET_EFAULT
;
6359 ret
= get_errno(symlinkat(p
, arg2
, p2
));
6360 unlock_user(p2
, arg3
, 0);
6361 unlock_user(p
, arg1
, 0);
6365 #ifdef TARGET_NR_oldlstat
6366 case TARGET_NR_oldlstat
:
6369 case TARGET_NR_readlink
:
6372 p
= lock_user_string(arg1
);
6373 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
6375 ret
= -TARGET_EFAULT
;
6376 } else if (is_proc_myself((const char *)p
, "exe")) {
6377 char real
[PATH_MAX
], *temp
;
6378 temp
= realpath(exec_path
, real
);
6379 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
6380 snprintf((char *)p2
, arg3
, "%s", real
);
6382 ret
= get_errno(readlink(path(p
), p2
, arg3
));
6384 unlock_user(p2
, arg2
, ret
);
6385 unlock_user(p
, arg1
, 0);
6388 #if defined(TARGET_NR_readlinkat)
6389 case TARGET_NR_readlinkat
:
6392 p
= lock_user_string(arg2
);
6393 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
6395 ret
= -TARGET_EFAULT
;
6396 } else if (is_proc_myself((const char *)p
, "exe")) {
6397 char real
[PATH_MAX
], *temp
;
6398 temp
= realpath(exec_path
, real
);
6399 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
6400 snprintf((char *)p2
, arg4
, "%s", real
);
6402 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
6404 unlock_user(p2
, arg3
, ret
);
6405 unlock_user(p
, arg2
, 0);
6409 #ifdef TARGET_NR_uselib
6410 case TARGET_NR_uselib
:
6413 #ifdef TARGET_NR_swapon
6414 case TARGET_NR_swapon
:
6415 if (!(p
= lock_user_string(arg1
)))
6417 ret
= get_errno(swapon(p
, arg2
));
6418 unlock_user(p
, arg1
, 0);
6421 case TARGET_NR_reboot
:
6422 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
6423 /* arg4 must be ignored in all other cases */
6424 p
= lock_user_string(arg4
);
6428 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
6429 unlock_user(p
, arg4
, 0);
6431 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
6434 #ifdef TARGET_NR_readdir
6435 case TARGET_NR_readdir
:
6438 #ifdef TARGET_NR_mmap
6439 case TARGET_NR_mmap
:
6440 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6441 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6442 || defined(TARGET_S390X)
6445 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
6446 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
6454 unlock_user(v
, arg1
, 0);
6455 ret
= get_errno(target_mmap(v1
, v2
, v3
,
6456 target_to_host_bitmask(v4
, mmap_flags_tbl
),
6460 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6461 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6467 #ifdef TARGET_NR_mmap2
6468 case TARGET_NR_mmap2
:
6470 #define MMAP_SHIFT 12
6472 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6473 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6475 arg6
<< MMAP_SHIFT
));
6478 case TARGET_NR_munmap
:
6479 ret
= get_errno(target_munmap(arg1
, arg2
));
6481 case TARGET_NR_mprotect
:
6483 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
6484 /* Special hack to detect libc making the stack executable. */
6485 if ((arg3
& PROT_GROWSDOWN
)
6486 && arg1
>= ts
->info
->stack_limit
6487 && arg1
<= ts
->info
->start_stack
) {
6488 arg3
&= ~PROT_GROWSDOWN
;
6489 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
6490 arg1
= ts
->info
->stack_limit
;
6493 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
6495 #ifdef TARGET_NR_mremap
6496 case TARGET_NR_mremap
:
6497 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
6500 /* ??? msync/mlock/munlock are broken for softmmu. */
6501 #ifdef TARGET_NR_msync
6502 case TARGET_NR_msync
:
6503 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
6506 #ifdef TARGET_NR_mlock
6507 case TARGET_NR_mlock
:
6508 ret
= get_errno(mlock(g2h(arg1
), arg2
));
6511 #ifdef TARGET_NR_munlock
6512 case TARGET_NR_munlock
:
6513 ret
= get_errno(munlock(g2h(arg1
), arg2
));
6516 #ifdef TARGET_NR_mlockall
6517 case TARGET_NR_mlockall
:
6518 ret
= get_errno(mlockall(arg1
));
6521 #ifdef TARGET_NR_munlockall
6522 case TARGET_NR_munlockall
:
6523 ret
= get_errno(munlockall());
6526 case TARGET_NR_truncate
:
6527 if (!(p
= lock_user_string(arg1
)))
6529 ret
= get_errno(truncate(p
, arg2
));
6530 unlock_user(p
, arg1
, 0);
6532 case TARGET_NR_ftruncate
:
6533 ret
= get_errno(ftruncate(arg1
, arg2
));
6535 case TARGET_NR_fchmod
:
6536 ret
= get_errno(fchmod(arg1
, arg2
));
6538 #if defined(TARGET_NR_fchmodat)
6539 case TARGET_NR_fchmodat
:
6540 if (!(p
= lock_user_string(arg2
)))
6542 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
6543 unlock_user(p
, arg2
, 0);
6546 case TARGET_NR_getpriority
:
6547 /* Note that negative values are valid for getpriority, so we must
6548 differentiate based on errno settings. */
6550 ret
= getpriority(arg1
, arg2
);
6551 if (ret
== -1 && errno
!= 0) {
6552 ret
= -host_to_target_errno(errno
);
6556 /* Return value is the unbiased priority. Signal no error. */
6557 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
6559 /* Return value is a biased priority to avoid negative numbers. */
6563 case TARGET_NR_setpriority
:
6564 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
6566 #ifdef TARGET_NR_profil
6567 case TARGET_NR_profil
:
6570 case TARGET_NR_statfs
:
6571 if (!(p
= lock_user_string(arg1
)))
6573 ret
= get_errno(statfs(path(p
), &stfs
));
6574 unlock_user(p
, arg1
, 0);
6576 if (!is_error(ret
)) {
6577 struct target_statfs
*target_stfs
;
6579 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
6581 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6582 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6583 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6584 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6585 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6586 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6587 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6588 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6589 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6590 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6591 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6592 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6593 unlock_user_struct(target_stfs
, arg2
, 1);
6596 case TARGET_NR_fstatfs
:
6597 ret
= get_errno(fstatfs(arg1
, &stfs
));
6598 goto convert_statfs
;
6599 #ifdef TARGET_NR_statfs64
6600 case TARGET_NR_statfs64
:
6601 if (!(p
= lock_user_string(arg1
)))
6603 ret
= get_errno(statfs(path(p
), &stfs
));
6604 unlock_user(p
, arg1
, 0);
6606 if (!is_error(ret
)) {
6607 struct target_statfs64
*target_stfs
;
6609 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
6611 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6612 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6613 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6614 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6615 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6616 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6617 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6618 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6619 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6620 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6621 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6622 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6623 unlock_user_struct(target_stfs
, arg3
, 1);
6626 case TARGET_NR_fstatfs64
:
6627 ret
= get_errno(fstatfs(arg1
, &stfs
));
6628 goto convert_statfs64
;
6630 #ifdef TARGET_NR_ioperm
6631 case TARGET_NR_ioperm
:
6634 #ifdef TARGET_NR_socketcall
6635 case TARGET_NR_socketcall
:
6636 ret
= do_socketcall(arg1
, arg2
);
6639 #ifdef TARGET_NR_accept
6640 case TARGET_NR_accept
:
6641 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
6644 #ifdef TARGET_NR_accept4
6645 case TARGET_NR_accept4
:
6646 #ifdef CONFIG_ACCEPT4
6647 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
6653 #ifdef TARGET_NR_bind
6654 case TARGET_NR_bind
:
6655 ret
= do_bind(arg1
, arg2
, arg3
);
6658 #ifdef TARGET_NR_connect
6659 case TARGET_NR_connect
:
6660 ret
= do_connect(arg1
, arg2
, arg3
);
6663 #ifdef TARGET_NR_getpeername
6664 case TARGET_NR_getpeername
:
6665 ret
= do_getpeername(arg1
, arg2
, arg3
);
6668 #ifdef TARGET_NR_getsockname
6669 case TARGET_NR_getsockname
:
6670 ret
= do_getsockname(arg1
, arg2
, arg3
);
6673 #ifdef TARGET_NR_getsockopt
6674 case TARGET_NR_getsockopt
:
6675 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
6678 #ifdef TARGET_NR_listen
6679 case TARGET_NR_listen
:
6680 ret
= get_errno(listen(arg1
, arg2
));
6683 #ifdef TARGET_NR_recv
6684 case TARGET_NR_recv
:
6685 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
6688 #ifdef TARGET_NR_recvfrom
6689 case TARGET_NR_recvfrom
:
6690 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6693 #ifdef TARGET_NR_recvmsg
6694 case TARGET_NR_recvmsg
:
6695 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
6698 #ifdef TARGET_NR_send
6699 case TARGET_NR_send
:
6700 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
6703 #ifdef TARGET_NR_sendmsg
6704 case TARGET_NR_sendmsg
:
6705 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
6708 #ifdef TARGET_NR_sendto
6709 case TARGET_NR_sendto
:
6710 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6713 #ifdef TARGET_NR_shutdown
6714 case TARGET_NR_shutdown
:
6715 ret
= get_errno(shutdown(arg1
, arg2
));
6718 #ifdef TARGET_NR_socket
6719 case TARGET_NR_socket
:
6720 ret
= do_socket(arg1
, arg2
, arg3
);
6723 #ifdef TARGET_NR_socketpair
6724 case TARGET_NR_socketpair
:
6725 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
6728 #ifdef TARGET_NR_setsockopt
6729 case TARGET_NR_setsockopt
:
6730 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
6734 case TARGET_NR_syslog
:
6735 if (!(p
= lock_user_string(arg2
)))
6737 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
6738 unlock_user(p
, arg2
, 0);
6741 case TARGET_NR_setitimer
:
6743 struct itimerval value
, ovalue
, *pvalue
;
6747 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
6748 || copy_from_user_timeval(&pvalue
->it_value
,
6749 arg2
+ sizeof(struct target_timeval
)))
6754 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
6755 if (!is_error(ret
) && arg3
) {
6756 if (copy_to_user_timeval(arg3
,
6757 &ovalue
.it_interval
)
6758 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
6764 case TARGET_NR_getitimer
:
6766 struct itimerval value
;
6768 ret
= get_errno(getitimer(arg1
, &value
));
6769 if (!is_error(ret
) && arg2
) {
6770 if (copy_to_user_timeval(arg2
,
6772 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
6778 case TARGET_NR_stat
:
6779 if (!(p
= lock_user_string(arg1
)))
6781 ret
= get_errno(stat(path(p
), &st
));
6782 unlock_user(p
, arg1
, 0);
6784 case TARGET_NR_lstat
:
6785 if (!(p
= lock_user_string(arg1
)))
6787 ret
= get_errno(lstat(path(p
), &st
));
6788 unlock_user(p
, arg1
, 0);
6790 case TARGET_NR_fstat
:
6792 ret
= get_errno(fstat(arg1
, &st
));
6794 if (!is_error(ret
)) {
6795 struct target_stat
*target_st
;
6797 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
6799 memset(target_st
, 0, sizeof(*target_st
));
6800 __put_user(st
.st_dev
, &target_st
->st_dev
);
6801 __put_user(st
.st_ino
, &target_st
->st_ino
);
6802 __put_user(st
.st_mode
, &target_st
->st_mode
);
6803 __put_user(st
.st_uid
, &target_st
->st_uid
);
6804 __put_user(st
.st_gid
, &target_st
->st_gid
);
6805 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
6806 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
6807 __put_user(st
.st_size
, &target_st
->st_size
);
6808 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
6809 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
6810 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
6811 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
6812 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
6813 unlock_user_struct(target_st
, arg2
, 1);
6817 #ifdef TARGET_NR_olduname
6818 case TARGET_NR_olduname
:
6821 #ifdef TARGET_NR_iopl
6822 case TARGET_NR_iopl
:
6825 case TARGET_NR_vhangup
:
6826 ret
= get_errno(vhangup());
6828 #ifdef TARGET_NR_idle
6829 case TARGET_NR_idle
:
6832 #ifdef TARGET_NR_syscall
6833 case TARGET_NR_syscall
:
6834 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
6835 arg6
, arg7
, arg8
, 0);
6838 case TARGET_NR_wait4
:
6841 abi_long status_ptr
= arg2
;
6842 struct rusage rusage
, *rusage_ptr
;
6843 abi_ulong target_rusage
= arg4
;
6845 rusage_ptr
= &rusage
;
6848 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
6849 if (!is_error(ret
)) {
6850 if (status_ptr
&& ret
) {
6851 status
= host_to_target_waitstatus(status
);
6852 if (put_user_s32(status
, status_ptr
))
6856 host_to_target_rusage(target_rusage
, &rusage
);
6860 #ifdef TARGET_NR_swapoff
6861 case TARGET_NR_swapoff
:
6862 if (!(p
= lock_user_string(arg1
)))
6864 ret
= get_errno(swapoff(p
));
6865 unlock_user(p
, arg1
, 0);
6868 case TARGET_NR_sysinfo
:
6870 struct target_sysinfo
*target_value
;
6871 struct sysinfo value
;
6872 ret
= get_errno(sysinfo(&value
));
6873 if (!is_error(ret
) && arg1
)
6875 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
6877 __put_user(value
.uptime
, &target_value
->uptime
);
6878 __put_user(value
.loads
[0], &target_value
->loads
[0]);
6879 __put_user(value
.loads
[1], &target_value
->loads
[1]);
6880 __put_user(value
.loads
[2], &target_value
->loads
[2]);
6881 __put_user(value
.totalram
, &target_value
->totalram
);
6882 __put_user(value
.freeram
, &target_value
->freeram
);
6883 __put_user(value
.sharedram
, &target_value
->sharedram
);
6884 __put_user(value
.bufferram
, &target_value
->bufferram
);
6885 __put_user(value
.totalswap
, &target_value
->totalswap
);
6886 __put_user(value
.freeswap
, &target_value
->freeswap
);
6887 __put_user(value
.procs
, &target_value
->procs
);
6888 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
6889 __put_user(value
.freehigh
, &target_value
->freehigh
);
6890 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
6891 unlock_user_struct(target_value
, arg1
, 1);
6895 #ifdef TARGET_NR_ipc
6897 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6900 #ifdef TARGET_NR_semget
6901 case TARGET_NR_semget
:
6902 ret
= get_errno(semget(arg1
, arg2
, arg3
));
6905 #ifdef TARGET_NR_semop
6906 case TARGET_NR_semop
:
6907 ret
= do_semop(arg1
, arg2
, arg3
);
6910 #ifdef TARGET_NR_semctl
6911 case TARGET_NR_semctl
:
6912 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
6915 #ifdef TARGET_NR_msgctl
6916 case TARGET_NR_msgctl
:
6917 ret
= do_msgctl(arg1
, arg2
, arg3
);
6920 #ifdef TARGET_NR_msgget
6921 case TARGET_NR_msgget
:
6922 ret
= get_errno(msgget(arg1
, arg2
));
6925 #ifdef TARGET_NR_msgrcv
6926 case TARGET_NR_msgrcv
:
6927 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
6930 #ifdef TARGET_NR_msgsnd
6931 case TARGET_NR_msgsnd
:
6932 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
6935 #ifdef TARGET_NR_shmget
6936 case TARGET_NR_shmget
:
6937 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
6940 #ifdef TARGET_NR_shmctl
6941 case TARGET_NR_shmctl
:
6942 ret
= do_shmctl(arg1
, arg2
, arg3
);
6945 #ifdef TARGET_NR_shmat
6946 case TARGET_NR_shmat
:
6947 ret
= do_shmat(arg1
, arg2
, arg3
);
6950 #ifdef TARGET_NR_shmdt
6951 case TARGET_NR_shmdt
:
6952 ret
= do_shmdt(arg1
);
6955 case TARGET_NR_fsync
:
6956 ret
= get_errno(fsync(arg1
));
6958 case TARGET_NR_clone
:
6959 /* Linux manages to have three different orderings for its
6960 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
6961 * match the kernel's CONFIG_CLONE_* settings.
6962 * Microblaze is further special in that it uses a sixth
6963 * implicit argument to clone for the TLS pointer.
6965 #if defined(TARGET_MICROBLAZE)
6966 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
6967 #elif defined(TARGET_CLONE_BACKWARDS)
6968 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
6969 #elif defined(TARGET_CLONE_BACKWARDS2)
6970 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
6972 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
6975 #ifdef __NR_exit_group
6976 /* new thread calls */
6977 case TARGET_NR_exit_group
:
6981 gdb_exit(cpu_env
, arg1
);
6982 ret
= get_errno(exit_group(arg1
));
6985 case TARGET_NR_setdomainname
:
6986 if (!(p
= lock_user_string(arg1
)))
6988 ret
= get_errno(setdomainname(p
, arg2
));
6989 unlock_user(p
, arg1
, 0);
6991 case TARGET_NR_uname
:
6992 /* no need to transcode because we use the linux syscall */
6994 struct new_utsname
* buf
;
6996 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
6998 ret
= get_errno(sys_uname(buf
));
6999 if (!is_error(ret
)) {
7000 /* Overrite the native machine name with whatever is being
7002 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
7003 /* Allow the user to override the reported release. */
7004 if (qemu_uname_release
&& *qemu_uname_release
)
7005 strcpy (buf
->release
, qemu_uname_release
);
7007 unlock_user_struct(buf
, arg1
, 1);
7011 case TARGET_NR_modify_ldt
:
7012 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
7014 #if !defined(TARGET_X86_64)
7015 case TARGET_NR_vm86old
:
7017 case TARGET_NR_vm86
:
7018 ret
= do_vm86(cpu_env
, arg1
, arg2
);
7022 case TARGET_NR_adjtimex
:
7024 #ifdef TARGET_NR_create_module
7025 case TARGET_NR_create_module
:
7027 case TARGET_NR_init_module
:
7028 case TARGET_NR_delete_module
:
7029 #ifdef TARGET_NR_get_kernel_syms
7030 case TARGET_NR_get_kernel_syms
:
7033 case TARGET_NR_quotactl
:
7035 case TARGET_NR_getpgid
:
7036 ret
= get_errno(getpgid(arg1
));
7038 case TARGET_NR_fchdir
:
7039 ret
= get_errno(fchdir(arg1
));
7041 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7042 case TARGET_NR_bdflush
:
7045 #ifdef TARGET_NR_sysfs
7046 case TARGET_NR_sysfs
:
7049 case TARGET_NR_personality
:
7050 ret
= get_errno(personality(arg1
));
7052 #ifdef TARGET_NR_afs_syscall
7053 case TARGET_NR_afs_syscall
:
7056 #ifdef TARGET_NR__llseek /* Not on alpha */
7057 case TARGET_NR__llseek
:
7060 #if !defined(__NR_llseek)
7061 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
7063 ret
= get_errno(res
);
7068 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
7070 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
7076 case TARGET_NR_getdents
:
7077 #ifdef __NR_getdents
7078 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7080 struct target_dirent
*target_dirp
;
7081 struct linux_dirent
*dirp
;
7082 abi_long count
= arg3
;
7084 dirp
= malloc(count
);
7086 ret
= -TARGET_ENOMEM
;
7090 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7091 if (!is_error(ret
)) {
7092 struct linux_dirent
*de
;
7093 struct target_dirent
*tde
;
7095 int reclen
, treclen
;
7096 int count1
, tnamelen
;
7100 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7104 reclen
= de
->d_reclen
;
7105 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
7106 assert(tnamelen
>= 0);
7107 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
7108 assert(count1
+ treclen
<= count
);
7109 tde
->d_reclen
= tswap16(treclen
);
7110 tde
->d_ino
= tswapal(de
->d_ino
);
7111 tde
->d_off
= tswapal(de
->d_off
);
7112 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
7113 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7115 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7119 unlock_user(target_dirp
, arg2
, ret
);
7125 struct linux_dirent
*dirp
;
7126 abi_long count
= arg3
;
7128 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7130 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7131 if (!is_error(ret
)) {
7132 struct linux_dirent
*de
;
7137 reclen
= de
->d_reclen
;
7140 de
->d_reclen
= tswap16(reclen
);
7141 tswapls(&de
->d_ino
);
7142 tswapls(&de
->d_off
);
7143 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7147 unlock_user(dirp
, arg2
, ret
);
7151 /* Implement getdents in terms of getdents64 */
7153 struct linux_dirent64
*dirp
;
7154 abi_long count
= arg3
;
7156 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
7160 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7161 if (!is_error(ret
)) {
7162 /* Convert the dirent64 structs to target dirent. We do this
7163 * in-place, since we can guarantee that a target_dirent is no
7164 * larger than a dirent64; however this means we have to be
7165 * careful to read everything before writing in the new format.
7167 struct linux_dirent64
*de
;
7168 struct target_dirent
*tde
;
7173 tde
= (struct target_dirent
*)dirp
;
7175 int namelen
, treclen
;
7176 int reclen
= de
->d_reclen
;
7177 uint64_t ino
= de
->d_ino
;
7178 int64_t off
= de
->d_off
;
7179 uint8_t type
= de
->d_type
;
7181 namelen
= strlen(de
->d_name
);
7182 treclen
= offsetof(struct target_dirent
, d_name
)
7184 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
7186 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
7187 tde
->d_ino
= tswapal(ino
);
7188 tde
->d_off
= tswapal(off
);
7189 tde
->d_reclen
= tswap16(treclen
);
7190 /* The target_dirent type is in what was formerly a padding
7191 * byte at the end of the structure:
7193 *(((char *)tde
) + treclen
- 1) = type
;
7195 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7196 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7202 unlock_user(dirp
, arg2
, ret
);
7206 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7207 case TARGET_NR_getdents64
:
7209 struct linux_dirent64
*dirp
;
7210 abi_long count
= arg3
;
7211 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7213 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7214 if (!is_error(ret
)) {
7215 struct linux_dirent64
*de
;
7220 reclen
= de
->d_reclen
;
7223 de
->d_reclen
= tswap16(reclen
);
7224 tswap64s((uint64_t *)&de
->d_ino
);
7225 tswap64s((uint64_t *)&de
->d_off
);
7226 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7230 unlock_user(dirp
, arg2
, ret
);
7233 #endif /* TARGET_NR_getdents64 */
7234 #if defined(TARGET_NR__newselect)
7235 case TARGET_NR__newselect
:
7236 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7239 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7240 # ifdef TARGET_NR_poll
7241 case TARGET_NR_poll
:
7243 # ifdef TARGET_NR_ppoll
7244 case TARGET_NR_ppoll
:
7247 struct target_pollfd
*target_pfd
;
7248 unsigned int nfds
= arg2
;
7253 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
7257 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
7258 for(i
= 0; i
< nfds
; i
++) {
7259 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
7260 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
7263 # ifdef TARGET_NR_ppoll
7264 if (num
== TARGET_NR_ppoll
) {
7265 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
7266 target_sigset_t
*target_set
;
7267 sigset_t _set
, *set
= &_set
;
7270 if (target_to_host_timespec(timeout_ts
, arg3
)) {
7271 unlock_user(target_pfd
, arg1
, 0);
7279 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
7281 unlock_user(target_pfd
, arg1
, 0);
7284 target_to_host_sigset(set
, target_set
);
7289 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
7291 if (!is_error(ret
) && arg3
) {
7292 host_to_target_timespec(arg3
, timeout_ts
);
7295 unlock_user(target_set
, arg4
, 0);
7299 ret
= get_errno(poll(pfd
, nfds
, timeout
));
7301 if (!is_error(ret
)) {
7302 for(i
= 0; i
< nfds
; i
++) {
7303 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
7306 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
7310 case TARGET_NR_flock
:
7311 /* NOTE: the flock constant seems to be the same for every
7313 ret
= get_errno(flock(arg1
, arg2
));
7315 case TARGET_NR_readv
:
7317 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
7319 ret
= get_errno(readv(arg1
, vec
, arg3
));
7320 unlock_iovec(vec
, arg2
, arg3
, 1);
7322 ret
= -host_to_target_errno(errno
);
7326 case TARGET_NR_writev
:
7328 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
7330 ret
= get_errno(writev(arg1
, vec
, arg3
));
7331 unlock_iovec(vec
, arg2
, arg3
, 0);
7333 ret
= -host_to_target_errno(errno
);
7337 case TARGET_NR_getsid
:
7338 ret
= get_errno(getsid(arg1
));
7340 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7341 case TARGET_NR_fdatasync
:
7342 ret
= get_errno(fdatasync(arg1
));
7345 case TARGET_NR__sysctl
:
7346 /* We don't implement this, but ENOTDIR is always a safe
7348 ret
= -TARGET_ENOTDIR
;
7350 case TARGET_NR_sched_getaffinity
:
7352 unsigned int mask_size
;
7353 unsigned long *mask
;
7356 * sched_getaffinity needs multiples of ulong, so need to take
7357 * care of mismatches between target ulong and host ulong sizes.
7359 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7360 ret
= -TARGET_EINVAL
;
7363 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7365 mask
= alloca(mask_size
);
7366 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
7368 if (!is_error(ret
)) {
7369 if (copy_to_user(arg3
, mask
, ret
)) {
7375 case TARGET_NR_sched_setaffinity
:
7377 unsigned int mask_size
;
7378 unsigned long *mask
;
7381 * sched_setaffinity needs multiples of ulong, so need to take
7382 * care of mismatches between target ulong and host ulong sizes.
7384 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7385 ret
= -TARGET_EINVAL
;
7388 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7390 mask
= alloca(mask_size
);
7391 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
7394 memcpy(mask
, p
, arg2
);
7395 unlock_user_struct(p
, arg2
, 0);
7397 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
7400 case TARGET_NR_sched_setparam
:
7402 struct sched_param
*target_schp
;
7403 struct sched_param schp
;
7405 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
7407 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7408 unlock_user_struct(target_schp
, arg2
, 0);
7409 ret
= get_errno(sched_setparam(arg1
, &schp
));
7412 case TARGET_NR_sched_getparam
:
7414 struct sched_param
*target_schp
;
7415 struct sched_param schp
;
7416 ret
= get_errno(sched_getparam(arg1
, &schp
));
7417 if (!is_error(ret
)) {
7418 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
7420 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
7421 unlock_user_struct(target_schp
, arg2
, 1);
7425 case TARGET_NR_sched_setscheduler
:
7427 struct sched_param
*target_schp
;
7428 struct sched_param schp
;
7429 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
7431 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7432 unlock_user_struct(target_schp
, arg3
, 0);
7433 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
7436 case TARGET_NR_sched_getscheduler
:
7437 ret
= get_errno(sched_getscheduler(arg1
));
7439 case TARGET_NR_sched_yield
:
7440 ret
= get_errno(sched_yield());
7442 case TARGET_NR_sched_get_priority_max
:
7443 ret
= get_errno(sched_get_priority_max(arg1
));
7445 case TARGET_NR_sched_get_priority_min
:
7446 ret
= get_errno(sched_get_priority_min(arg1
));
7448 case TARGET_NR_sched_rr_get_interval
:
7451 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
7452 if (!is_error(ret
)) {
7453 host_to_target_timespec(arg2
, &ts
);
7457 case TARGET_NR_nanosleep
:
7459 struct timespec req
, rem
;
7460 target_to_host_timespec(&req
, arg1
);
7461 ret
= get_errno(nanosleep(&req
, &rem
));
7462 if (is_error(ret
) && arg2
) {
7463 host_to_target_timespec(arg2
, &rem
);
7467 #ifdef TARGET_NR_query_module
7468 case TARGET_NR_query_module
:
7471 #ifdef TARGET_NR_nfsservctl
7472 case TARGET_NR_nfsservctl
:
7475 case TARGET_NR_prctl
:
7477 case PR_GET_PDEATHSIG
:
7480 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
7481 if (!is_error(ret
) && arg2
7482 && put_user_ual(deathsig
, arg2
)) {
7490 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
7494 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7496 unlock_user(name
, arg2
, 16);
7501 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
7505 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7507 unlock_user(name
, arg2
, 0);
7512 /* Most prctl options have no pointer arguments */
7513 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
7517 #ifdef TARGET_NR_arch_prctl
7518 case TARGET_NR_arch_prctl
:
7519 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7520 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
7526 #ifdef TARGET_NR_pread64
7527 case TARGET_NR_pread64
:
7528 if (regpairs_aligned(cpu_env
)) {
7532 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7534 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7535 unlock_user(p
, arg2
, ret
);
7537 case TARGET_NR_pwrite64
:
7538 if (regpairs_aligned(cpu_env
)) {
7542 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7544 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7545 unlock_user(p
, arg2
, 0);
7548 case TARGET_NR_getcwd
:
7549 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
7551 ret
= get_errno(sys_getcwd1(p
, arg2
));
7552 unlock_user(p
, arg1
, ret
);
7554 case TARGET_NR_capget
:
7556 case TARGET_NR_capset
:
7558 case TARGET_NR_sigaltstack
:
7559 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7560 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7561 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7562 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
7568 #ifdef CONFIG_SENDFILE
7569 case TARGET_NR_sendfile
:
7574 ret
= get_user_sal(off
, arg3
);
7575 if (is_error(ret
)) {
7580 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
7581 if (!is_error(ret
) && arg3
) {
7582 abi_long ret2
= put_user_sal(off
, arg3
);
7583 if (is_error(ret2
)) {
7589 #ifdef TARGET_NR_sendfile64
7590 case TARGET_NR_sendfile64
:
7595 ret
= get_user_s64(off
, arg3
);
7596 if (is_error(ret
)) {
7601 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
7602 if (!is_error(ret
) && arg3
) {
7603 abi_long ret2
= put_user_s64(off
, arg3
);
7604 if (is_error(ret2
)) {
7612 case TARGET_NR_sendfile
:
7613 #ifdef TARGET_NR_sendfile64
7614 case TARGET_NR_sendfile64
:
7619 #ifdef TARGET_NR_getpmsg
7620 case TARGET_NR_getpmsg
:
7623 #ifdef TARGET_NR_putpmsg
7624 case TARGET_NR_putpmsg
:
7627 #ifdef TARGET_NR_vfork
7628 case TARGET_NR_vfork
:
7629 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
7633 #ifdef TARGET_NR_ugetrlimit
7634 case TARGET_NR_ugetrlimit
:
7637 int resource
= target_to_host_resource(arg1
);
7638 ret
= get_errno(getrlimit(resource
, &rlim
));
7639 if (!is_error(ret
)) {
7640 struct target_rlimit
*target_rlim
;
7641 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7643 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7644 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7645 unlock_user_struct(target_rlim
, arg2
, 1);
7650 #ifdef TARGET_NR_truncate64
7651 case TARGET_NR_truncate64
:
7652 if (!(p
= lock_user_string(arg1
)))
7654 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
7655 unlock_user(p
, arg1
, 0);
7658 #ifdef TARGET_NR_ftruncate64
7659 case TARGET_NR_ftruncate64
:
7660 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
7663 #ifdef TARGET_NR_stat64
7664 case TARGET_NR_stat64
:
7665 if (!(p
= lock_user_string(arg1
)))
7667 ret
= get_errno(stat(path(p
), &st
));
7668 unlock_user(p
, arg1
, 0);
7670 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7673 #ifdef TARGET_NR_lstat64
7674 case TARGET_NR_lstat64
:
7675 if (!(p
= lock_user_string(arg1
)))
7677 ret
= get_errno(lstat(path(p
), &st
));
7678 unlock_user(p
, arg1
, 0);
7680 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7683 #ifdef TARGET_NR_fstat64
7684 case TARGET_NR_fstat64
:
7685 ret
= get_errno(fstat(arg1
, &st
));
7687 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7690 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
7691 #ifdef TARGET_NR_fstatat64
7692 case TARGET_NR_fstatat64
:
7694 #ifdef TARGET_NR_newfstatat
7695 case TARGET_NR_newfstatat
:
7697 if (!(p
= lock_user_string(arg2
)))
7699 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
7701 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
7704 case TARGET_NR_lchown
:
7705 if (!(p
= lock_user_string(arg1
)))
7707 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7708 unlock_user(p
, arg1
, 0);
7710 #ifdef TARGET_NR_getuid
7711 case TARGET_NR_getuid
:
7712 ret
= get_errno(high2lowuid(getuid()));
7715 #ifdef TARGET_NR_getgid
7716 case TARGET_NR_getgid
:
7717 ret
= get_errno(high2lowgid(getgid()));
7720 #ifdef TARGET_NR_geteuid
7721 case TARGET_NR_geteuid
:
7722 ret
= get_errno(high2lowuid(geteuid()));
7725 #ifdef TARGET_NR_getegid
7726 case TARGET_NR_getegid
:
7727 ret
= get_errno(high2lowgid(getegid()));
7730 case TARGET_NR_setreuid
:
7731 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
7733 case TARGET_NR_setregid
:
7734 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
7736 case TARGET_NR_getgroups
:
7738 int gidsetsize
= arg1
;
7739 target_id
*target_grouplist
;
7743 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7744 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7745 if (gidsetsize
== 0)
7747 if (!is_error(ret
)) {
7748 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
7749 if (!target_grouplist
)
7751 for(i
= 0;i
< ret
; i
++)
7752 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
7753 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
7757 case TARGET_NR_setgroups
:
7759 int gidsetsize
= arg1
;
7760 target_id
*target_grouplist
;
7761 gid_t
*grouplist
= NULL
;
7764 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7765 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
7766 if (!target_grouplist
) {
7767 ret
= -TARGET_EFAULT
;
7770 for (i
= 0; i
< gidsetsize
; i
++) {
7771 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
7773 unlock_user(target_grouplist
, arg2
, 0);
7775 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7778 case TARGET_NR_fchown
:
7779 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
7781 #if defined(TARGET_NR_fchownat)
7782 case TARGET_NR_fchownat
:
7783 if (!(p
= lock_user_string(arg2
)))
7785 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
7786 low2highgid(arg4
), arg5
));
7787 unlock_user(p
, arg2
, 0);
7790 #ifdef TARGET_NR_setresuid
7791 case TARGET_NR_setresuid
:
7792 ret
= get_errno(setresuid(low2highuid(arg1
),
7794 low2highuid(arg3
)));
7797 #ifdef TARGET_NR_getresuid
7798 case TARGET_NR_getresuid
:
7800 uid_t ruid
, euid
, suid
;
7801 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7802 if (!is_error(ret
)) {
7803 if (put_user_u16(high2lowuid(ruid
), arg1
)
7804 || put_user_u16(high2lowuid(euid
), arg2
)
7805 || put_user_u16(high2lowuid(suid
), arg3
))
7811 #ifdef TARGET_NR_getresgid
7812 case TARGET_NR_setresgid
:
7813 ret
= get_errno(setresgid(low2highgid(arg1
),
7815 low2highgid(arg3
)));
7818 #ifdef TARGET_NR_getresgid
7819 case TARGET_NR_getresgid
:
7821 gid_t rgid
, egid
, sgid
;
7822 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7823 if (!is_error(ret
)) {
7824 if (put_user_u16(high2lowgid(rgid
), arg1
)
7825 || put_user_u16(high2lowgid(egid
), arg2
)
7826 || put_user_u16(high2lowgid(sgid
), arg3
))
7832 case TARGET_NR_chown
:
7833 if (!(p
= lock_user_string(arg1
)))
7835 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7836 unlock_user(p
, arg1
, 0);
7838 case TARGET_NR_setuid
:
7839 ret
= get_errno(setuid(low2highuid(arg1
)));
7841 case TARGET_NR_setgid
:
7842 ret
= get_errno(setgid(low2highgid(arg1
)));
7844 case TARGET_NR_setfsuid
:
7845 ret
= get_errno(setfsuid(arg1
));
7847 case TARGET_NR_setfsgid
:
7848 ret
= get_errno(setfsgid(arg1
));
7851 #ifdef TARGET_NR_lchown32
7852 case TARGET_NR_lchown32
:
7853 if (!(p
= lock_user_string(arg1
)))
7855 ret
= get_errno(lchown(p
, arg2
, arg3
));
7856 unlock_user(p
, arg1
, 0);
7859 #ifdef TARGET_NR_getuid32
7860 case TARGET_NR_getuid32
:
7861 ret
= get_errno(getuid());
7865 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7866 /* Alpha specific */
7867 case TARGET_NR_getxuid
:
7871 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
7873 ret
= get_errno(getuid());
7876 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7877 /* Alpha specific */
7878 case TARGET_NR_getxgid
:
7882 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
7884 ret
= get_errno(getgid());
7887 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7888 /* Alpha specific */
7889 case TARGET_NR_osf_getsysinfo
:
7890 ret
= -TARGET_EOPNOTSUPP
;
7892 case TARGET_GSI_IEEE_FP_CONTROL
:
7894 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7896 /* Copied from linux ieee_fpcr_to_swcr. */
7897 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
7898 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
7899 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
7900 | SWCR_TRAP_ENABLE_DZE
7901 | SWCR_TRAP_ENABLE_OVF
);
7902 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
7903 | SWCR_TRAP_ENABLE_INE
);
7904 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
7905 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
7907 if (put_user_u64 (swcr
, arg2
))
7913 /* case GSI_IEEE_STATE_AT_SIGNAL:
7914 -- Not implemented in linux kernel.
7916 -- Retrieves current unaligned access state; not much used.
7918 -- Retrieves implver information; surely not used.
7920 -- Grabs a copy of the HWRPB; surely not used.
7925 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7926 /* Alpha specific */
7927 case TARGET_NR_osf_setsysinfo
:
7928 ret
= -TARGET_EOPNOTSUPP
;
7930 case TARGET_SSI_IEEE_FP_CONTROL
:
7932 uint64_t swcr
, fpcr
, orig_fpcr
;
7934 if (get_user_u64 (swcr
, arg2
)) {
7937 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
7938 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
7940 /* Copied from linux ieee_swcr_to_fpcr. */
7941 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
7942 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
7943 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
7944 | SWCR_TRAP_ENABLE_DZE
7945 | SWCR_TRAP_ENABLE_OVF
)) << 48;
7946 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
7947 | SWCR_TRAP_ENABLE_INE
)) << 57;
7948 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
7949 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
7951 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
7956 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
7958 uint64_t exc
, fpcr
, orig_fpcr
;
7961 if (get_user_u64(exc
, arg2
)) {
7965 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
7967 /* We only add to the exception status here. */
7968 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
7970 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
7973 /* Old exceptions are not signaled. */
7974 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
7976 /* If any exceptions set by this call,
7977 and are unmasked, send a signal. */
7979 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
7980 si_code
= TARGET_FPE_FLTRES
;
7982 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
7983 si_code
= TARGET_FPE_FLTUND
;
7985 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
7986 si_code
= TARGET_FPE_FLTOVF
;
7988 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
7989 si_code
= TARGET_FPE_FLTDIV
;
7991 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
7992 si_code
= TARGET_FPE_FLTINV
;
7995 target_siginfo_t info
;
7996 info
.si_signo
= SIGFPE
;
7998 info
.si_code
= si_code
;
7999 info
._sifields
._sigfault
._addr
8000 = ((CPUArchState
*)cpu_env
)->pc
;
8001 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
8006 /* case SSI_NVPAIRS:
8007 -- Used with SSIN_UACPROC to enable unaligned accesses.
8008 case SSI_IEEE_STATE_AT_SIGNAL:
8009 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8010 -- Not implemented in linux kernel
8015 #ifdef TARGET_NR_osf_sigprocmask
8016 /* Alpha specific. */
8017 case TARGET_NR_osf_sigprocmask
:
8021 sigset_t set
, oldset
;
8024 case TARGET_SIG_BLOCK
:
8027 case TARGET_SIG_UNBLOCK
:
8030 case TARGET_SIG_SETMASK
:
8034 ret
= -TARGET_EINVAL
;
8038 target_to_host_old_sigset(&set
, &mask
);
8039 sigprocmask(how
, &set
, &oldset
);
8040 host_to_target_old_sigset(&mask
, &oldset
);
8046 #ifdef TARGET_NR_getgid32
8047 case TARGET_NR_getgid32
:
8048 ret
= get_errno(getgid());
8051 #ifdef TARGET_NR_geteuid32
8052 case TARGET_NR_geteuid32
:
8053 ret
= get_errno(geteuid());
8056 #ifdef TARGET_NR_getegid32
8057 case TARGET_NR_getegid32
:
8058 ret
= get_errno(getegid());
8061 #ifdef TARGET_NR_setreuid32
8062 case TARGET_NR_setreuid32
:
8063 ret
= get_errno(setreuid(arg1
, arg2
));
8066 #ifdef TARGET_NR_setregid32
8067 case TARGET_NR_setregid32
:
8068 ret
= get_errno(setregid(arg1
, arg2
));
8071 #ifdef TARGET_NR_getgroups32
8072 case TARGET_NR_getgroups32
:
8074 int gidsetsize
= arg1
;
8075 uint32_t *target_grouplist
;
8079 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8080 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
8081 if (gidsetsize
== 0)
8083 if (!is_error(ret
)) {
8084 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
8085 if (!target_grouplist
) {
8086 ret
= -TARGET_EFAULT
;
8089 for(i
= 0;i
< ret
; i
++)
8090 target_grouplist
[i
] = tswap32(grouplist
[i
]);
8091 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
8096 #ifdef TARGET_NR_setgroups32
8097 case TARGET_NR_setgroups32
:
8099 int gidsetsize
= arg1
;
8100 uint32_t *target_grouplist
;
8104 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8105 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
8106 if (!target_grouplist
) {
8107 ret
= -TARGET_EFAULT
;
8110 for(i
= 0;i
< gidsetsize
; i
++)
8111 grouplist
[i
] = tswap32(target_grouplist
[i
]);
8112 unlock_user(target_grouplist
, arg2
, 0);
8113 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8117 #ifdef TARGET_NR_fchown32
8118 case TARGET_NR_fchown32
:
8119 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
8122 #ifdef TARGET_NR_setresuid32
8123 case TARGET_NR_setresuid32
:
8124 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
8127 #ifdef TARGET_NR_getresuid32
8128 case TARGET_NR_getresuid32
:
8130 uid_t ruid
, euid
, suid
;
8131 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8132 if (!is_error(ret
)) {
8133 if (put_user_u32(ruid
, arg1
)
8134 || put_user_u32(euid
, arg2
)
8135 || put_user_u32(suid
, arg3
))
8141 #ifdef TARGET_NR_setresgid32
8142 case TARGET_NR_setresgid32
:
8143 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
8146 #ifdef TARGET_NR_getresgid32
8147 case TARGET_NR_getresgid32
:
8149 gid_t rgid
, egid
, sgid
;
8150 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8151 if (!is_error(ret
)) {
8152 if (put_user_u32(rgid
, arg1
)
8153 || put_user_u32(egid
, arg2
)
8154 || put_user_u32(sgid
, arg3
))
8160 #ifdef TARGET_NR_chown32
8161 case TARGET_NR_chown32
:
8162 if (!(p
= lock_user_string(arg1
)))
8164 ret
= get_errno(chown(p
, arg2
, arg3
));
8165 unlock_user(p
, arg1
, 0);
8168 #ifdef TARGET_NR_setuid32
8169 case TARGET_NR_setuid32
:
8170 ret
= get_errno(setuid(arg1
));
8173 #ifdef TARGET_NR_setgid32
8174 case TARGET_NR_setgid32
:
8175 ret
= get_errno(setgid(arg1
));
8178 #ifdef TARGET_NR_setfsuid32
8179 case TARGET_NR_setfsuid32
:
8180 ret
= get_errno(setfsuid(arg1
));
8183 #ifdef TARGET_NR_setfsgid32
8184 case TARGET_NR_setfsgid32
:
8185 ret
= get_errno(setfsgid(arg1
));
8189 case TARGET_NR_pivot_root
:
8191 #ifdef TARGET_NR_mincore
8192 case TARGET_NR_mincore
:
8195 ret
= -TARGET_EFAULT
;
8196 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
8198 if (!(p
= lock_user_string(arg3
)))
8200 ret
= get_errno(mincore(a
, arg2
, p
));
8201 unlock_user(p
, arg3
, ret
);
8203 unlock_user(a
, arg1
, 0);
8207 #ifdef TARGET_NR_arm_fadvise64_64
8208 case TARGET_NR_arm_fadvise64_64
:
8211 * arm_fadvise64_64 looks like fadvise64_64 but
8212 * with different argument order
8220 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8221 #ifdef TARGET_NR_fadvise64_64
8222 case TARGET_NR_fadvise64_64
:
8224 #ifdef TARGET_NR_fadvise64
8225 case TARGET_NR_fadvise64
:
8229 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
8230 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
8231 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
8232 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
8236 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
8239 #ifdef TARGET_NR_madvise
8240 case TARGET_NR_madvise
:
8241 /* A straight passthrough may not be safe because qemu sometimes
8242 turns private file-backed mappings into anonymous mappings.
8243 This will break MADV_DONTNEED.
8244 This is a hint, so ignoring and returning success is ok. */
8248 #if TARGET_ABI_BITS == 32
8249 case TARGET_NR_fcntl64
:
8253 struct target_flock64
*target_fl
;
8255 struct target_eabi_flock64
*target_efl
;
8258 cmd
= target_to_host_fcntl_cmd(arg2
);
8259 if (cmd
== -TARGET_EINVAL
) {
8265 case TARGET_F_GETLK64
:
8267 if (((CPUARMState
*)cpu_env
)->eabi
) {
8268 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8270 fl
.l_type
= tswap16(target_efl
->l_type
);
8271 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8272 fl
.l_start
= tswap64(target_efl
->l_start
);
8273 fl
.l_len
= tswap64(target_efl
->l_len
);
8274 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8275 unlock_user_struct(target_efl
, arg3
, 0);
8279 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8281 fl
.l_type
= tswap16(target_fl
->l_type
);
8282 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8283 fl
.l_start
= tswap64(target_fl
->l_start
);
8284 fl
.l_len
= tswap64(target_fl
->l_len
);
8285 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8286 unlock_user_struct(target_fl
, arg3
, 0);
8288 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8291 if (((CPUARMState
*)cpu_env
)->eabi
) {
8292 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
8294 target_efl
->l_type
= tswap16(fl
.l_type
);
8295 target_efl
->l_whence
= tswap16(fl
.l_whence
);
8296 target_efl
->l_start
= tswap64(fl
.l_start
);
8297 target_efl
->l_len
= tswap64(fl
.l_len
);
8298 target_efl
->l_pid
= tswap32(fl
.l_pid
);
8299 unlock_user_struct(target_efl
, arg3
, 1);
8303 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
8305 target_fl
->l_type
= tswap16(fl
.l_type
);
8306 target_fl
->l_whence
= tswap16(fl
.l_whence
);
8307 target_fl
->l_start
= tswap64(fl
.l_start
);
8308 target_fl
->l_len
= tswap64(fl
.l_len
);
8309 target_fl
->l_pid
= tswap32(fl
.l_pid
);
8310 unlock_user_struct(target_fl
, arg3
, 1);
8315 case TARGET_F_SETLK64
:
8316 case TARGET_F_SETLKW64
:
8318 if (((CPUARMState
*)cpu_env
)->eabi
) {
8319 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8321 fl
.l_type
= tswap16(target_efl
->l_type
);
8322 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8323 fl
.l_start
= tswap64(target_efl
->l_start
);
8324 fl
.l_len
= tswap64(target_efl
->l_len
);
8325 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8326 unlock_user_struct(target_efl
, arg3
, 0);
8330 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8332 fl
.l_type
= tswap16(target_fl
->l_type
);
8333 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8334 fl
.l_start
= tswap64(target_fl
->l_start
);
8335 fl
.l_len
= tswap64(target_fl
->l_len
);
8336 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8337 unlock_user_struct(target_fl
, arg3
, 0);
8339 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8342 ret
= do_fcntl(arg1
, arg2
, arg3
);
8348 #ifdef TARGET_NR_cacheflush
8349 case TARGET_NR_cacheflush
:
8350 /* self-modifying code is handled automatically, so nothing needed */
8354 #ifdef TARGET_NR_security
8355 case TARGET_NR_security
:
8358 #ifdef TARGET_NR_getpagesize
8359 case TARGET_NR_getpagesize
:
8360 ret
= TARGET_PAGE_SIZE
;
8363 case TARGET_NR_gettid
:
8364 ret
= get_errno(gettid());
8366 #ifdef TARGET_NR_readahead
8367 case TARGET_NR_readahead
:
8368 #if TARGET_ABI_BITS == 32
8369 if (regpairs_aligned(cpu_env
)) {
8374 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
8376 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
8381 #ifdef TARGET_NR_setxattr
8382 case TARGET_NR_listxattr
:
8383 case TARGET_NR_llistxattr
:
8387 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8389 ret
= -TARGET_EFAULT
;
8393 p
= lock_user_string(arg1
);
8395 if (num
== TARGET_NR_listxattr
) {
8396 ret
= get_errno(listxattr(p
, b
, arg3
));
8398 ret
= get_errno(llistxattr(p
, b
, arg3
));
8401 ret
= -TARGET_EFAULT
;
8403 unlock_user(p
, arg1
, 0);
8404 unlock_user(b
, arg2
, arg3
);
8407 case TARGET_NR_flistxattr
:
8411 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8413 ret
= -TARGET_EFAULT
;
8417 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
8418 unlock_user(b
, arg2
, arg3
);
8421 case TARGET_NR_setxattr
:
8422 case TARGET_NR_lsetxattr
:
8424 void *p
, *n
, *v
= 0;
8426 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8428 ret
= -TARGET_EFAULT
;
8432 p
= lock_user_string(arg1
);
8433 n
= lock_user_string(arg2
);
8435 if (num
== TARGET_NR_setxattr
) {
8436 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
8438 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
8441 ret
= -TARGET_EFAULT
;
8443 unlock_user(p
, arg1
, 0);
8444 unlock_user(n
, arg2
, 0);
8445 unlock_user(v
, arg3
, 0);
8448 case TARGET_NR_fsetxattr
:
8452 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8454 ret
= -TARGET_EFAULT
;
8458 n
= lock_user_string(arg2
);
8460 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
8462 ret
= -TARGET_EFAULT
;
8464 unlock_user(n
, arg2
, 0);
8465 unlock_user(v
, arg3
, 0);
8468 case TARGET_NR_getxattr
:
8469 case TARGET_NR_lgetxattr
:
8471 void *p
, *n
, *v
= 0;
8473 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8475 ret
= -TARGET_EFAULT
;
8479 p
= lock_user_string(arg1
);
8480 n
= lock_user_string(arg2
);
8482 if (num
== TARGET_NR_getxattr
) {
8483 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
8485 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
8488 ret
= -TARGET_EFAULT
;
8490 unlock_user(p
, arg1
, 0);
8491 unlock_user(n
, arg2
, 0);
8492 unlock_user(v
, arg3
, arg4
);
8495 case TARGET_NR_fgetxattr
:
8499 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8501 ret
= -TARGET_EFAULT
;
8505 n
= lock_user_string(arg2
);
8507 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
8509 ret
= -TARGET_EFAULT
;
8511 unlock_user(n
, arg2
, 0);
8512 unlock_user(v
, arg3
, arg4
);
8515 case TARGET_NR_removexattr
:
8516 case TARGET_NR_lremovexattr
:
8519 p
= lock_user_string(arg1
);
8520 n
= lock_user_string(arg2
);
8522 if (num
== TARGET_NR_removexattr
) {
8523 ret
= get_errno(removexattr(p
, n
));
8525 ret
= get_errno(lremovexattr(p
, n
));
8528 ret
= -TARGET_EFAULT
;
8530 unlock_user(p
, arg1
, 0);
8531 unlock_user(n
, arg2
, 0);
8534 case TARGET_NR_fremovexattr
:
8537 n
= lock_user_string(arg2
);
8539 ret
= get_errno(fremovexattr(arg1
, n
));
8541 ret
= -TARGET_EFAULT
;
8543 unlock_user(n
, arg2
, 0);
8547 #endif /* CONFIG_ATTR */
8548 #ifdef TARGET_NR_set_thread_area
8549 case TARGET_NR_set_thread_area
:
8550 #if defined(TARGET_MIPS)
8551 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
8554 #elif defined(TARGET_CRIS)
8556 ret
= -TARGET_EINVAL
;
8558 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
8562 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8563 ret
= do_set_thread_area(cpu_env
, arg1
);
8565 #elif defined(TARGET_M68K)
8567 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
8568 ts
->tp_value
= arg1
;
8572 goto unimplemented_nowarn
;
8575 #ifdef TARGET_NR_get_thread_area
8576 case TARGET_NR_get_thread_area
:
8577 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8578 ret
= do_get_thread_area(cpu_env
, arg1
);
8580 #elif defined(TARGET_M68K)
8582 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
8587 goto unimplemented_nowarn
;
8590 #ifdef TARGET_NR_getdomainname
8591 case TARGET_NR_getdomainname
:
8592 goto unimplemented_nowarn
;
8595 #ifdef TARGET_NR_clock_gettime
8596 case TARGET_NR_clock_gettime
:
8599 ret
= get_errno(clock_gettime(arg1
, &ts
));
8600 if (!is_error(ret
)) {
8601 host_to_target_timespec(arg2
, &ts
);
8606 #ifdef TARGET_NR_clock_getres
8607 case TARGET_NR_clock_getres
:
8610 ret
= get_errno(clock_getres(arg1
, &ts
));
8611 if (!is_error(ret
)) {
8612 host_to_target_timespec(arg2
, &ts
);
8617 #ifdef TARGET_NR_clock_nanosleep
8618 case TARGET_NR_clock_nanosleep
:
8621 target_to_host_timespec(&ts
, arg3
);
8622 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
8624 host_to_target_timespec(arg4
, &ts
);
8629 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8630 case TARGET_NR_set_tid_address
:
8631 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
8635 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8636 case TARGET_NR_tkill
:
8637 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
8641 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8642 case TARGET_NR_tgkill
:
8643 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
8644 target_to_host_signal(arg3
)));
8648 #ifdef TARGET_NR_set_robust_list
8649 case TARGET_NR_set_robust_list
:
8650 case TARGET_NR_get_robust_list
:
8651 /* The ABI for supporting robust futexes has userspace pass
8652 * the kernel a pointer to a linked list which is updated by
8653 * userspace after the syscall; the list is walked by the kernel
8654 * when the thread exits. Since the linked list in QEMU guest
8655 * memory isn't a valid linked list for the host and we have
8656 * no way to reliably intercept the thread-death event, we can't
8657 * support these. Silently return ENOSYS so that guest userspace
8658 * falls back to a non-robust futex implementation (which should
8659 * be OK except in the corner case of the guest crashing while
8660 * holding a mutex that is shared with another process via
8663 goto unimplemented_nowarn
;
8666 #if defined(TARGET_NR_utimensat)
8667 case TARGET_NR_utimensat
:
8669 struct timespec
*tsp
, ts
[2];
8673 target_to_host_timespec(ts
, arg3
);
8674 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
8678 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
8680 if (!(p
= lock_user_string(arg2
))) {
8681 ret
= -TARGET_EFAULT
;
8684 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
8685 unlock_user(p
, arg2
, 0);
8690 #if defined(CONFIG_USE_NPTL)
8691 case TARGET_NR_futex
:
8692 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8695 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8696 case TARGET_NR_inotify_init
:
8697 ret
= get_errno(sys_inotify_init());
8700 #ifdef CONFIG_INOTIFY1
8701 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8702 case TARGET_NR_inotify_init1
:
8703 ret
= get_errno(sys_inotify_init1(arg1
));
8707 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8708 case TARGET_NR_inotify_add_watch
:
8709 p
= lock_user_string(arg2
);
8710 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
8711 unlock_user(p
, arg2
, 0);
8714 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8715 case TARGET_NR_inotify_rm_watch
:
8716 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
8720 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8721 case TARGET_NR_mq_open
:
8723 struct mq_attr posix_mq_attr
;
8725 p
= lock_user_string(arg1
- 1);
8727 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
8728 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
8729 unlock_user (p
, arg1
, 0);
8733 case TARGET_NR_mq_unlink
:
8734 p
= lock_user_string(arg1
- 1);
8735 ret
= get_errno(mq_unlink(p
));
8736 unlock_user (p
, arg1
, 0);
8739 case TARGET_NR_mq_timedsend
:
8743 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8745 target_to_host_timespec(&ts
, arg5
);
8746 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
8747 host_to_target_timespec(arg5
, &ts
);
8750 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
8751 unlock_user (p
, arg2
, arg3
);
8755 case TARGET_NR_mq_timedreceive
:
8760 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8762 target_to_host_timespec(&ts
, arg5
);
8763 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
8764 host_to_target_timespec(arg5
, &ts
);
8767 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
8768 unlock_user (p
, arg2
, arg3
);
8770 put_user_u32(prio
, arg4
);
8774 /* Not implemented for now... */
8775 /* case TARGET_NR_mq_notify: */
8778 case TARGET_NR_mq_getsetattr
:
8780 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
8783 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
8784 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
8787 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
8788 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
8795 #ifdef CONFIG_SPLICE
8796 #ifdef TARGET_NR_tee
8799 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
8803 #ifdef TARGET_NR_splice
8804 case TARGET_NR_splice
:
8806 loff_t loff_in
, loff_out
;
8807 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
8809 get_user_u64(loff_in
, arg2
);
8810 ploff_in
= &loff_in
;
8813 get_user_u64(loff_out
, arg2
);
8814 ploff_out
= &loff_out
;
8816 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
8820 #ifdef TARGET_NR_vmsplice
8821 case TARGET_NR_vmsplice
:
8823 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
8825 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
8826 unlock_iovec(vec
, arg2
, arg3
, 0);
8828 ret
= -host_to_target_errno(errno
);
8833 #endif /* CONFIG_SPLICE */
8834 #ifdef CONFIG_EVENTFD
8835 #if defined(TARGET_NR_eventfd)
8836 case TARGET_NR_eventfd
:
8837 ret
= get_errno(eventfd(arg1
, 0));
8840 #if defined(TARGET_NR_eventfd2)
8841 case TARGET_NR_eventfd2
:
8843 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
8844 if (arg2
& TARGET_O_NONBLOCK
) {
8845 host_flags
|= O_NONBLOCK
;
8847 if (arg2
& TARGET_O_CLOEXEC
) {
8848 host_flags
|= O_CLOEXEC
;
8850 ret
= get_errno(eventfd(arg1
, host_flags
));
8854 #endif /* CONFIG_EVENTFD */
8855 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8856 case TARGET_NR_fallocate
:
8857 #if TARGET_ABI_BITS == 32
8858 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
8859 target_offset64(arg5
, arg6
)));
8861 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
8865 #if defined(CONFIG_SYNC_FILE_RANGE)
8866 #if defined(TARGET_NR_sync_file_range)
8867 case TARGET_NR_sync_file_range
:
8868 #if TARGET_ABI_BITS == 32
8869 #if defined(TARGET_MIPS)
8870 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8871 target_offset64(arg5
, arg6
), arg7
));
8873 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
8874 target_offset64(arg4
, arg5
), arg6
));
8875 #endif /* !TARGET_MIPS */
8877 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
8881 #if defined(TARGET_NR_sync_file_range2)
8882 case TARGET_NR_sync_file_range2
:
8883 /* This is like sync_file_range but the arguments are reordered */
8884 #if TARGET_ABI_BITS == 32
8885 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8886 target_offset64(arg5
, arg6
), arg2
));
8888 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
8893 #if defined(CONFIG_EPOLL)
8894 #if defined(TARGET_NR_epoll_create)
8895 case TARGET_NR_epoll_create
:
8896 ret
= get_errno(epoll_create(arg1
));
8899 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8900 case TARGET_NR_epoll_create1
:
8901 ret
= get_errno(epoll_create1(arg1
));
8904 #if defined(TARGET_NR_epoll_ctl)
8905 case TARGET_NR_epoll_ctl
:
8907 struct epoll_event ep
;
8908 struct epoll_event
*epp
= 0;
8910 struct target_epoll_event
*target_ep
;
8911 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
8914 ep
.events
= tswap32(target_ep
->events
);
8915 /* The epoll_data_t union is just opaque data to the kernel,
8916 * so we transfer all 64 bits across and need not worry what
8917 * actual data type it is.
8919 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
8920 unlock_user_struct(target_ep
, arg4
, 0);
8923 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
8928 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8929 #define IMPLEMENT_EPOLL_PWAIT
8931 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8932 #if defined(TARGET_NR_epoll_wait)
8933 case TARGET_NR_epoll_wait
:
8935 #if defined(IMPLEMENT_EPOLL_PWAIT)
8936 case TARGET_NR_epoll_pwait
:
8939 struct target_epoll_event
*target_ep
;
8940 struct epoll_event
*ep
;
8942 int maxevents
= arg3
;
8945 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
8946 maxevents
* sizeof(struct target_epoll_event
), 1);
8951 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
8954 #if defined(IMPLEMENT_EPOLL_PWAIT)
8955 case TARGET_NR_epoll_pwait
:
8957 target_sigset_t
*target_set
;
8958 sigset_t _set
, *set
= &_set
;
8961 target_set
= lock_user(VERIFY_READ
, arg5
,
8962 sizeof(target_sigset_t
), 1);
8964 unlock_user(target_ep
, arg2
, 0);
8967 target_to_host_sigset(set
, target_set
);
8968 unlock_user(target_set
, arg5
, 0);
8973 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
8977 #if defined(TARGET_NR_epoll_wait)
8978 case TARGET_NR_epoll_wait
:
8979 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
8983 ret
= -TARGET_ENOSYS
;
8985 if (!is_error(ret
)) {
8987 for (i
= 0; i
< ret
; i
++) {
8988 target_ep
[i
].events
= tswap32(ep
[i
].events
);
8989 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
8992 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
8997 #ifdef TARGET_NR_prlimit64
8998 case TARGET_NR_prlimit64
:
9000 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9001 struct target_rlimit64
*target_rnew
, *target_rold
;
9002 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
9004 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
9007 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
9008 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
9009 unlock_user_struct(target_rnew
, arg3
, 0);
9013 ret
= get_errno(sys_prlimit64(arg1
, arg2
, rnewp
, arg4
? &rold
: 0));
9014 if (!is_error(ret
) && arg4
) {
9015 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
9018 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
9019 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
9020 unlock_user_struct(target_rold
, arg4
, 1);
9025 #ifdef TARGET_NR_gethostname
9026 case TARGET_NR_gethostname
:
9028 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9030 ret
= get_errno(gethostname(name
, arg2
));
9031 unlock_user(name
, arg1
, arg2
);
9033 ret
= -TARGET_EFAULT
;
9040 gemu_log("qemu: Unsupported syscall: %d\n", num
);
9041 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9042 unimplemented_nowarn
:
9044 ret
= -TARGET_ENOSYS
;
9049 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
9052 print_syscall_ret(num
, ret
);
9055 ret
= -TARGET_EFAULT
;