4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn
)(void *), void *child_stack_base
,
46 size_t stack_size
, int flags
, void *arg
, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include "qemu-common.h"
68 #include <sys/eventfd.h>
71 #include <sys/epoll.h>
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/utsname.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
88 #include <linux/mtio.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
95 #include "linux_loop.h"
96 #include "cpu-uname.h"
100 #if defined(CONFIG_USE_NPTL)
101 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
102 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
104 /* XXX: Hardcode the above values. */
105 #define CLONE_NPTL_FLAGS2 0
110 //#include <linux/msdos_fs.h>
111 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
112 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
123 #define _syscall0(type,name) \
124 static type name (void) \
126 return syscall(__NR_##name); \
129 #define _syscall1(type,name,type1,arg1) \
130 static type name (type1 arg1) \
132 return syscall(__NR_##name, arg1); \
135 #define _syscall2(type,name,type1,arg1,type2,arg2) \
136 static type name (type1 arg1,type2 arg2) \
138 return syscall(__NR_##name, arg1, arg2); \
141 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
142 static type name (type1 arg1,type2 arg2,type3 arg3) \
144 return syscall(__NR_##name, arg1, arg2, arg3); \
147 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
148 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
150 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
153 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
155 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
157 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
161 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
162 type5,arg5,type6,arg6) \
163 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
166 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
170 #define __NR_sys_uname __NR_uname
171 #define __NR_sys_faccessat __NR_faccessat
172 #define __NR_sys_fchmodat __NR_fchmodat
173 #define __NR_sys_fchownat __NR_fchownat
174 #define __NR_sys_fstatat64 __NR_fstatat64
175 #define __NR_sys_futimesat __NR_futimesat
176 #define __NR_sys_getcwd1 __NR_getcwd
177 #define __NR_sys_getdents __NR_getdents
178 #define __NR_sys_getdents64 __NR_getdents64
179 #define __NR_sys_getpriority __NR_getpriority
180 #define __NR_sys_linkat __NR_linkat
181 #define __NR_sys_mkdirat __NR_mkdirat
182 #define __NR_sys_mknodat __NR_mknodat
183 #define __NR_sys_newfstatat __NR_newfstatat
184 #define __NR_sys_openat __NR_openat
185 #define __NR_sys_readlinkat __NR_readlinkat
186 #define __NR_sys_renameat __NR_renameat
187 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
188 #define __NR_sys_symlinkat __NR_symlinkat
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_tgkill __NR_tgkill
191 #define __NR_sys_tkill __NR_tkill
192 #define __NR_sys_unlinkat __NR_unlinkat
193 #define __NR_sys_utimensat __NR_utimensat
194 #define __NR_sys_futex __NR_futex
195 #define __NR_sys_inotify_init __NR_inotify_init
196 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
197 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
199 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
201 #define __NR__llseek __NR_lseek
205 _syscall0(int, gettid
)
207 /* This is a replacement for the host gettid() and must return a host
209 static int gettid(void) {
213 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
214 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
215 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
217 _syscall2(int, sys_getpriority
, int, which
, int, who
);
218 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
219 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
220 loff_t
*, res
, uint
, wh
);
222 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
223 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
224 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
225 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
227 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
228 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
230 #ifdef __NR_exit_group
231 _syscall1(int,exit_group
,int,error_code
)
233 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
234 _syscall1(int,set_tid_address
,int *,tidptr
)
236 #if defined(CONFIG_USE_NPTL)
237 #if defined(TARGET_NR_futex) && defined(__NR_futex)
238 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
239 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
242 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
243 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
244 unsigned long *, user_mask_ptr
);
245 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
246 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
247 unsigned long *, user_mask_ptr
);
249 static bitmask_transtbl fcntl_flags_tbl
[] = {
250 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
251 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
252 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
253 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
254 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
255 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
256 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
257 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
258 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
259 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
260 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
261 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
262 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
263 #if defined(O_DIRECT)
264 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
269 #define COPY_UTSNAME_FIELD(dest, src) \
271 /* __NEW_UTS_LEN doesn't include terminating null */ \
272 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
273 (dest)[__NEW_UTS_LEN] = '\0'; \
276 static int sys_uname(struct new_utsname
*buf
)
278 struct utsname uts_buf
;
280 if (uname(&uts_buf
) < 0)
284 * Just in case these have some differences, we
285 * translate utsname to new_utsname (which is the
286 * struct linux kernel uses).
289 memset(buf
, 0, sizeof(*buf
));
290 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
291 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
292 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
293 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
294 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
296 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
300 #undef COPY_UTSNAME_FIELD
303 static int sys_getcwd1(char *buf
, size_t size
)
305 if (getcwd(buf
, size
) == NULL
) {
306 /* getcwd() sets errno */
309 return strlen(buf
)+1;
314 * Host system seems to have atfile syscall stubs available. We
315 * now enable them one by one as specified by target syscall_nr.h.
318 #ifdef TARGET_NR_faccessat
319 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
321 return (faccessat(dirfd
, pathname
, mode
, 0));
324 #ifdef TARGET_NR_fchmodat
325 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
327 return (fchmodat(dirfd
, pathname
, mode
, 0));
330 #if defined(TARGET_NR_fchownat)
331 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
332 gid_t group
, int flags
)
334 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
337 #ifdef __NR_fstatat64
338 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
341 return (fstatat(dirfd
, pathname
, buf
, flags
));
344 #ifdef __NR_newfstatat
345 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
348 return (fstatat(dirfd
, pathname
, buf
, flags
));
351 #ifdef TARGET_NR_futimesat
352 static int sys_futimesat(int dirfd
, const char *pathname
,
353 const struct timeval times
[2])
355 return (futimesat(dirfd
, pathname
, times
));
358 #ifdef TARGET_NR_linkat
359 static int sys_linkat(int olddirfd
, const char *oldpath
,
360 int newdirfd
, const char *newpath
, int flags
)
362 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
365 #ifdef TARGET_NR_mkdirat
366 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
368 return (mkdirat(dirfd
, pathname
, mode
));
371 #ifdef TARGET_NR_mknodat
372 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
375 return (mknodat(dirfd
, pathname
, mode
, dev
));
378 #ifdef TARGET_NR_openat
379 static int sys_openat(int dirfd
, const char *pathname
, int flags
, ...)
382 * open(2) has extra parameter 'mode' when called with
385 if ((flags
& O_CREAT
) != 0) {
390 * Get the 'mode' parameter and translate it to
394 mode
= va_arg(ap
, mode_t
);
395 mode
= target_to_host_bitmask(mode
, fcntl_flags_tbl
);
398 return (openat(dirfd
, pathname
, flags
, mode
));
400 return (openat(dirfd
, pathname
, flags
));
403 #ifdef TARGET_NR_readlinkat
404 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
406 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
409 #ifdef TARGET_NR_renameat
410 static int sys_renameat(int olddirfd
, const char *oldpath
,
411 int newdirfd
, const char *newpath
)
413 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
416 #ifdef TARGET_NR_symlinkat
417 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
419 return (symlinkat(oldpath
, newdirfd
, newpath
));
422 #ifdef TARGET_NR_unlinkat
423 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
425 return (unlinkat(dirfd
, pathname
, flags
));
428 #else /* !CONFIG_ATFILE */
431 * Try direct syscalls instead
433 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
434 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
436 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
437 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
439 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
440 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
441 uid_t
,owner
,gid_t
,group
,int,flags
)
443 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
444 defined(__NR_fstatat64)
445 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
446 struct stat
*,buf
,int,flags
)
448 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
449 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
450 const struct timeval
*,times
)
452 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
453 defined(__NR_newfstatat)
454 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
455 struct stat
*,buf
,int,flags
)
457 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
458 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
459 int,newdirfd
,const char *,newpath
,int,flags
)
461 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
462 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
464 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
465 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
466 mode_t
,mode
,dev_t
,dev
)
468 #if defined(TARGET_NR_openat) && defined(__NR_openat)
469 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
471 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
472 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
473 char *,buf
,size_t,bufsize
)
475 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
476 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
477 int,newdirfd
,const char *,newpath
)
479 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
480 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
481 int,newdirfd
,const char *,newpath
)
483 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
484 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
487 #endif /* CONFIG_ATFILE */
489 #ifdef CONFIG_UTIMENSAT
490 static int sys_utimensat(int dirfd
, const char *pathname
,
491 const struct timespec times
[2], int flags
)
493 if (pathname
== NULL
)
494 return futimens(dirfd
, times
);
496 return utimensat(dirfd
, pathname
, times
, flags
);
499 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
500 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
501 const struct timespec
*,tsp
,int,flags
)
503 #endif /* CONFIG_UTIMENSAT */
505 #ifdef CONFIG_INOTIFY
506 #include <sys/inotify.h>
508 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
509 static int sys_inotify_init(void)
511 return (inotify_init());
514 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
515 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
517 return (inotify_add_watch(fd
, pathname
, mask
));
520 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
521 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
523 return (inotify_rm_watch(fd
, wd
));
526 #ifdef CONFIG_INOTIFY1
527 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
528 static int sys_inotify_init1(int flags
)
530 return (inotify_init1(flags
));
535 /* Userspace can usually survive runtime without inotify */
536 #undef TARGET_NR_inotify_init
537 #undef TARGET_NR_inotify_init1
538 #undef TARGET_NR_inotify_add_watch
539 #undef TARGET_NR_inotify_rm_watch
540 #endif /* CONFIG_INOTIFY */
542 #if defined(TARGET_NR_ppoll)
544 # define __NR_ppoll -1
546 #define __NR_sys_ppoll __NR_ppoll
547 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
548 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
552 #if defined(TARGET_NR_pselect6)
553 #ifndef __NR_pselect6
554 # define __NR_pselect6 -1
556 #define __NR_sys_pselect6 __NR_pselect6
557 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
558 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
561 #if defined(TARGET_NR_prlimit64)
562 #ifndef __NR_prlimit64
563 # define __NR_prlimit64 -1
565 #define __NR_sys_prlimit64 __NR_prlimit64
566 /* The glibc rlimit structure may not be that used by the underlying syscall */
567 struct host_rlimit64
{
571 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
572 const struct host_rlimit64
*, new_limit
,
573 struct host_rlimit64
*, old_limit
)
576 extern int personality(int);
577 extern int flock(int, int);
578 extern int setfsuid(int);
579 extern int setfsgid(int);
580 extern int setgroups(int, gid_t
*);
582 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
584 static inline int regpairs_aligned(void *cpu_env
) {
585 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
587 #elif defined(TARGET_MIPS)
588 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
590 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
593 #define ERRNO_TABLE_SIZE 1200
595 /* target_to_host_errno_table[] is initialized from
596 * host_to_target_errno_table[] in syscall_init(). */
597 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
601 * This list is the union of errno values overridden in asm-<arch>/errno.h
602 * minus the errnos that are not actually generic to all archs.
604 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
605 [EIDRM
] = TARGET_EIDRM
,
606 [ECHRNG
] = TARGET_ECHRNG
,
607 [EL2NSYNC
] = TARGET_EL2NSYNC
,
608 [EL3HLT
] = TARGET_EL3HLT
,
609 [EL3RST
] = TARGET_EL3RST
,
610 [ELNRNG
] = TARGET_ELNRNG
,
611 [EUNATCH
] = TARGET_EUNATCH
,
612 [ENOCSI
] = TARGET_ENOCSI
,
613 [EL2HLT
] = TARGET_EL2HLT
,
614 [EDEADLK
] = TARGET_EDEADLK
,
615 [ENOLCK
] = TARGET_ENOLCK
,
616 [EBADE
] = TARGET_EBADE
,
617 [EBADR
] = TARGET_EBADR
,
618 [EXFULL
] = TARGET_EXFULL
,
619 [ENOANO
] = TARGET_ENOANO
,
620 [EBADRQC
] = TARGET_EBADRQC
,
621 [EBADSLT
] = TARGET_EBADSLT
,
622 [EBFONT
] = TARGET_EBFONT
,
623 [ENOSTR
] = TARGET_ENOSTR
,
624 [ENODATA
] = TARGET_ENODATA
,
625 [ETIME
] = TARGET_ETIME
,
626 [ENOSR
] = TARGET_ENOSR
,
627 [ENONET
] = TARGET_ENONET
,
628 [ENOPKG
] = TARGET_ENOPKG
,
629 [EREMOTE
] = TARGET_EREMOTE
,
630 [ENOLINK
] = TARGET_ENOLINK
,
631 [EADV
] = TARGET_EADV
,
632 [ESRMNT
] = TARGET_ESRMNT
,
633 [ECOMM
] = TARGET_ECOMM
,
634 [EPROTO
] = TARGET_EPROTO
,
635 [EDOTDOT
] = TARGET_EDOTDOT
,
636 [EMULTIHOP
] = TARGET_EMULTIHOP
,
637 [EBADMSG
] = TARGET_EBADMSG
,
638 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
639 [EOVERFLOW
] = TARGET_EOVERFLOW
,
640 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
641 [EBADFD
] = TARGET_EBADFD
,
642 [EREMCHG
] = TARGET_EREMCHG
,
643 [ELIBACC
] = TARGET_ELIBACC
,
644 [ELIBBAD
] = TARGET_ELIBBAD
,
645 [ELIBSCN
] = TARGET_ELIBSCN
,
646 [ELIBMAX
] = TARGET_ELIBMAX
,
647 [ELIBEXEC
] = TARGET_ELIBEXEC
,
648 [EILSEQ
] = TARGET_EILSEQ
,
649 [ENOSYS
] = TARGET_ENOSYS
,
650 [ELOOP
] = TARGET_ELOOP
,
651 [ERESTART
] = TARGET_ERESTART
,
652 [ESTRPIPE
] = TARGET_ESTRPIPE
,
653 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
654 [EUSERS
] = TARGET_EUSERS
,
655 [ENOTSOCK
] = TARGET_ENOTSOCK
,
656 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
657 [EMSGSIZE
] = TARGET_EMSGSIZE
,
658 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
659 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
660 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
661 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
662 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
663 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
664 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
665 [EADDRINUSE
] = TARGET_EADDRINUSE
,
666 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
667 [ENETDOWN
] = TARGET_ENETDOWN
,
668 [ENETUNREACH
] = TARGET_ENETUNREACH
,
669 [ENETRESET
] = TARGET_ENETRESET
,
670 [ECONNABORTED
] = TARGET_ECONNABORTED
,
671 [ECONNRESET
] = TARGET_ECONNRESET
,
672 [ENOBUFS
] = TARGET_ENOBUFS
,
673 [EISCONN
] = TARGET_EISCONN
,
674 [ENOTCONN
] = TARGET_ENOTCONN
,
675 [EUCLEAN
] = TARGET_EUCLEAN
,
676 [ENOTNAM
] = TARGET_ENOTNAM
,
677 [ENAVAIL
] = TARGET_ENAVAIL
,
678 [EISNAM
] = TARGET_EISNAM
,
679 [EREMOTEIO
] = TARGET_EREMOTEIO
,
680 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
681 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
682 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
683 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
684 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
685 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
686 [EALREADY
] = TARGET_EALREADY
,
687 [EINPROGRESS
] = TARGET_EINPROGRESS
,
688 [ESTALE
] = TARGET_ESTALE
,
689 [ECANCELED
] = TARGET_ECANCELED
,
690 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
691 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
693 [ENOKEY
] = TARGET_ENOKEY
,
696 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
699 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
702 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
705 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
707 #ifdef ENOTRECOVERABLE
708 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
712 static inline int host_to_target_errno(int err
)
714 if(host_to_target_errno_table
[err
])
715 return host_to_target_errno_table
[err
];
719 static inline int target_to_host_errno(int err
)
721 if (target_to_host_errno_table
[err
])
722 return target_to_host_errno_table
[err
];
726 static inline abi_long
get_errno(abi_long ret
)
729 return -host_to_target_errno(errno
);
734 static inline int is_error(abi_long ret
)
736 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
739 char *target_strerror(int err
)
741 return strerror(target_to_host_errno(err
));
744 static abi_ulong target_brk
;
745 static abi_ulong target_original_brk
;
746 static abi_ulong brk_page
;
748 void target_set_brk(abi_ulong new_brk
)
750 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
751 brk_page
= HOST_PAGE_ALIGN(target_brk
);
754 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
755 #define DEBUGF_BRK(message, args...)
757 /* do_brk() must return target values and target errnos. */
758 abi_long
do_brk(abi_ulong new_brk
)
760 abi_long mapped_addr
;
763 DEBUGF_BRK("do_brk(%#010x) -> ", new_brk
);
766 DEBUGF_BRK("%#010x (!new_brk)\n", target_brk
);
769 if (new_brk
< target_original_brk
) {
770 DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk
);
774 /* If the new brk is less than the highest page reserved to the
775 * target heap allocation, set it and we're almost done... */
776 if (new_brk
<= brk_page
) {
777 /* Heap contents are initialized to zero, as for anonymous
779 if (new_brk
> target_brk
) {
780 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
782 target_brk
= new_brk
;
783 DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk
);
787 /* We need to allocate more memory after the brk... Note that
788 * we don't use MAP_FIXED because that will map over the top of
789 * any existing mapping (like the one with the host libc or qemu
790 * itself); instead we treat "mapped but at wrong address" as
791 * a failure and unmap again.
793 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
794 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
795 PROT_READ
|PROT_WRITE
,
796 MAP_ANON
|MAP_PRIVATE
, 0, 0));
798 if (mapped_addr
== brk_page
) {
799 target_brk
= new_brk
;
800 brk_page
= HOST_PAGE_ALIGN(target_brk
);
801 DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk
);
803 } else if (mapped_addr
!= -1) {
804 /* Mapped but at wrong address, meaning there wasn't actually
805 * enough space for this brk.
807 target_munmap(mapped_addr
, new_alloc_size
);
809 DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk
);
812 DEBUGF_BRK("%#010x (otherwise)\n", target_brk
);
815 #if defined(TARGET_ALPHA)
816 /* We (partially) emulate OSF/1 on Alpha, which requires we
817 return a proper errno, not an unchanged brk value. */
818 return -TARGET_ENOMEM
;
820 /* For everything else, return the previous break. */
824 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
825 abi_ulong target_fds_addr
,
829 abi_ulong b
, *target_fds
;
831 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
832 if (!(target_fds
= lock_user(VERIFY_READ
,
834 sizeof(abi_ulong
) * nw
,
836 return -TARGET_EFAULT
;
840 for (i
= 0; i
< nw
; i
++) {
841 /* grab the abi_ulong */
842 __get_user(b
, &target_fds
[i
]);
843 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
844 /* check the bit inside the abi_ulong */
851 unlock_user(target_fds
, target_fds_addr
, 0);
856 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
857 abi_ulong target_fds_addr
,
860 if (target_fds_addr
) {
861 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
862 return -TARGET_EFAULT
;
870 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
876 abi_ulong
*target_fds
;
878 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
879 if (!(target_fds
= lock_user(VERIFY_WRITE
,
881 sizeof(abi_ulong
) * nw
,
883 return -TARGET_EFAULT
;
886 for (i
= 0; i
< nw
; i
++) {
888 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
889 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
892 __put_user(v
, &target_fds
[i
]);
895 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
900 #if defined(__alpha__)
906 static inline abi_long
host_to_target_clock_t(long ticks
)
908 #if HOST_HZ == TARGET_HZ
911 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
915 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
916 const struct rusage
*rusage
)
918 struct target_rusage
*target_rusage
;
920 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
921 return -TARGET_EFAULT
;
922 target_rusage
->ru_utime
.tv_sec
= tswapl(rusage
->ru_utime
.tv_sec
);
923 target_rusage
->ru_utime
.tv_usec
= tswapl(rusage
->ru_utime
.tv_usec
);
924 target_rusage
->ru_stime
.tv_sec
= tswapl(rusage
->ru_stime
.tv_sec
);
925 target_rusage
->ru_stime
.tv_usec
= tswapl(rusage
->ru_stime
.tv_usec
);
926 target_rusage
->ru_maxrss
= tswapl(rusage
->ru_maxrss
);
927 target_rusage
->ru_ixrss
= tswapl(rusage
->ru_ixrss
);
928 target_rusage
->ru_idrss
= tswapl(rusage
->ru_idrss
);
929 target_rusage
->ru_isrss
= tswapl(rusage
->ru_isrss
);
930 target_rusage
->ru_minflt
= tswapl(rusage
->ru_minflt
);
931 target_rusage
->ru_majflt
= tswapl(rusage
->ru_majflt
);
932 target_rusage
->ru_nswap
= tswapl(rusage
->ru_nswap
);
933 target_rusage
->ru_inblock
= tswapl(rusage
->ru_inblock
);
934 target_rusage
->ru_oublock
= tswapl(rusage
->ru_oublock
);
935 target_rusage
->ru_msgsnd
= tswapl(rusage
->ru_msgsnd
);
936 target_rusage
->ru_msgrcv
= tswapl(rusage
->ru_msgrcv
);
937 target_rusage
->ru_nsignals
= tswapl(rusage
->ru_nsignals
);
938 target_rusage
->ru_nvcsw
= tswapl(rusage
->ru_nvcsw
);
939 target_rusage
->ru_nivcsw
= tswapl(rusage
->ru_nivcsw
);
940 unlock_user_struct(target_rusage
, target_addr
, 1);
945 static inline rlim_t
target_to_host_rlim(target_ulong target_rlim
)
947 target_ulong target_rlim_swap
;
950 target_rlim_swap
= tswapl(target_rlim
);
951 if (target_rlim_swap
== TARGET_RLIM_INFINITY
|| target_rlim_swap
!= (rlim_t
)target_rlim_swap
)
952 result
= RLIM_INFINITY
;
954 result
= target_rlim_swap
;
959 static inline target_ulong
host_to_target_rlim(rlim_t rlim
)
961 target_ulong target_rlim_swap
;
964 if (rlim
== RLIM_INFINITY
|| rlim
!= (target_long
)rlim
)
965 target_rlim_swap
= TARGET_RLIM_INFINITY
;
967 target_rlim_swap
= rlim
;
968 result
= tswapl(target_rlim_swap
);
973 static inline int target_to_host_resource(int code
)
976 case TARGET_RLIMIT_AS
:
978 case TARGET_RLIMIT_CORE
:
980 case TARGET_RLIMIT_CPU
:
982 case TARGET_RLIMIT_DATA
:
984 case TARGET_RLIMIT_FSIZE
:
986 case TARGET_RLIMIT_LOCKS
:
988 case TARGET_RLIMIT_MEMLOCK
:
989 return RLIMIT_MEMLOCK
;
990 case TARGET_RLIMIT_MSGQUEUE
:
991 return RLIMIT_MSGQUEUE
;
992 case TARGET_RLIMIT_NICE
:
994 case TARGET_RLIMIT_NOFILE
:
995 return RLIMIT_NOFILE
;
996 case TARGET_RLIMIT_NPROC
:
998 case TARGET_RLIMIT_RSS
:
1000 case TARGET_RLIMIT_RTPRIO
:
1001 return RLIMIT_RTPRIO
;
1002 case TARGET_RLIMIT_SIGPENDING
:
1003 return RLIMIT_SIGPENDING
;
1004 case TARGET_RLIMIT_STACK
:
1005 return RLIMIT_STACK
;
1011 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1012 abi_ulong target_tv_addr
)
1014 struct target_timeval
*target_tv
;
1016 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1017 return -TARGET_EFAULT
;
1019 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1020 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1022 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1027 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1028 const struct timeval
*tv
)
1030 struct target_timeval
*target_tv
;
1032 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1033 return -TARGET_EFAULT
;
1035 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1036 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1038 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1043 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1046 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1047 abi_ulong target_mq_attr_addr
)
1049 struct target_mq_attr
*target_mq_attr
;
1051 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1052 target_mq_attr_addr
, 1))
1053 return -TARGET_EFAULT
;
1055 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1056 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1057 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1058 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1060 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1065 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1066 const struct mq_attr
*attr
)
1068 struct target_mq_attr
*target_mq_attr
;
1070 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1071 target_mq_attr_addr
, 0))
1072 return -TARGET_EFAULT
;
1074 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1075 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1076 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1077 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1079 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1085 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1086 /* do_select() must return target values and target errnos. */
1087 static abi_long
do_select(int n
,
1088 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1089 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1091 fd_set rfds
, wfds
, efds
;
1092 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1093 struct timeval tv
, *tv_ptr
;
1096 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1100 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1104 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1109 if (target_tv_addr
) {
1110 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1111 return -TARGET_EFAULT
;
1117 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1119 if (!is_error(ret
)) {
1120 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1121 return -TARGET_EFAULT
;
1122 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1123 return -TARGET_EFAULT
;
1124 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1125 return -TARGET_EFAULT
;
1127 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1128 return -TARGET_EFAULT
;
1135 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1138 return pipe2(host_pipe
, flags
);
1144 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1145 int flags
, int is_pipe2
)
1149 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1152 return get_errno(ret
);
1154 /* Several targets have special calling conventions for the original
1155 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1157 #if defined(TARGET_ALPHA)
1158 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1159 return host_pipe
[0];
1160 #elif defined(TARGET_MIPS)
1161 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1162 return host_pipe
[0];
1163 #elif defined(TARGET_SH4)
1164 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1165 return host_pipe
[0];
1169 if (put_user_s32(host_pipe
[0], pipedes
)
1170 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1171 return -TARGET_EFAULT
;
1172 return get_errno(ret
);
1175 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1176 abi_ulong target_addr
,
1179 struct target_ip_mreqn
*target_smreqn
;
1181 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1183 return -TARGET_EFAULT
;
1184 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1185 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1186 if (len
== sizeof(struct target_ip_mreqn
))
1187 mreqn
->imr_ifindex
= tswapl(target_smreqn
->imr_ifindex
);
1188 unlock_user(target_smreqn
, target_addr
, 0);
1193 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1194 abi_ulong target_addr
,
1197 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1198 sa_family_t sa_family
;
1199 struct target_sockaddr
*target_saddr
;
1201 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1203 return -TARGET_EFAULT
;
1205 sa_family
= tswap16(target_saddr
->sa_family
);
1207 /* Oops. The caller might send a incomplete sun_path; sun_path
1208 * must be terminated by \0 (see the manual page), but
1209 * unfortunately it is quite common to specify sockaddr_un
1210 * length as "strlen(x->sun_path)" while it should be
1211 * "strlen(...) + 1". We'll fix that here if needed.
1212 * Linux kernel has a similar feature.
1215 if (sa_family
== AF_UNIX
) {
1216 if (len
< unix_maxlen
&& len
> 0) {
1217 char *cp
= (char*)target_saddr
;
1219 if ( cp
[len
-1] && !cp
[len
] )
1222 if (len
> unix_maxlen
)
1226 memcpy(addr
, target_saddr
, len
);
1227 addr
->sa_family
= sa_family
;
1228 unlock_user(target_saddr
, target_addr
, 0);
1233 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1234 struct sockaddr
*addr
,
1237 struct target_sockaddr
*target_saddr
;
1239 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1241 return -TARGET_EFAULT
;
1242 memcpy(target_saddr
, addr
, len
);
1243 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1244 unlock_user(target_saddr
, target_addr
, len
);
1249 /* ??? Should this also swap msgh->name? */
1250 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1251 struct target_msghdr
*target_msgh
)
1253 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1254 abi_long msg_controllen
;
1255 abi_ulong target_cmsg_addr
;
1256 struct target_cmsghdr
*target_cmsg
;
1257 socklen_t space
= 0;
1259 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1260 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1262 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1263 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1265 return -TARGET_EFAULT
;
1267 while (cmsg
&& target_cmsg
) {
1268 void *data
= CMSG_DATA(cmsg
);
1269 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1271 int len
= tswapl(target_cmsg
->cmsg_len
)
1272 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1274 space
+= CMSG_SPACE(len
);
1275 if (space
> msgh
->msg_controllen
) {
1276 space
-= CMSG_SPACE(len
);
1277 gemu_log("Host cmsg overflow\n");
1281 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1282 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1283 cmsg
->cmsg_len
= CMSG_LEN(len
);
1285 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1286 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1287 memcpy(data
, target_data
, len
);
1289 int *fd
= (int *)data
;
1290 int *target_fd
= (int *)target_data
;
1291 int i
, numfds
= len
/ sizeof(int);
1293 for (i
= 0; i
< numfds
; i
++)
1294 fd
[i
] = tswap32(target_fd
[i
]);
1297 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1298 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1300 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1302 msgh
->msg_controllen
= space
;
1306 /* ??? Should this also swap msgh->name? */
1307 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1308 struct msghdr
*msgh
)
1310 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1311 abi_long msg_controllen
;
1312 abi_ulong target_cmsg_addr
;
1313 struct target_cmsghdr
*target_cmsg
;
1314 socklen_t space
= 0;
1316 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1317 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1319 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1320 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1322 return -TARGET_EFAULT
;
1324 while (cmsg
&& target_cmsg
) {
1325 void *data
= CMSG_DATA(cmsg
);
1326 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1328 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1330 space
+= TARGET_CMSG_SPACE(len
);
1331 if (space
> msg_controllen
) {
1332 space
-= TARGET_CMSG_SPACE(len
);
1333 gemu_log("Target cmsg overflow\n");
1337 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1338 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1339 target_cmsg
->cmsg_len
= tswapl(TARGET_CMSG_LEN(len
));
1341 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1342 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1343 memcpy(target_data
, data
, len
);
1345 int *fd
= (int *)data
;
1346 int *target_fd
= (int *)target_data
;
1347 int i
, numfds
= len
/ sizeof(int);
1349 for (i
= 0; i
< numfds
; i
++)
1350 target_fd
[i
] = tswap32(fd
[i
]);
1353 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1354 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1356 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1358 target_msgh
->msg_controllen
= tswapl(space
);
1362 /* do_setsockopt() Must return target values and target errnos. */
1363 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1364 abi_ulong optval_addr
, socklen_t optlen
)
1368 struct ip_mreqn
*ip_mreq
;
1369 struct ip_mreq_source
*ip_mreq_source
;
1373 /* TCP options all take an 'int' value. */
1374 if (optlen
< sizeof(uint32_t))
1375 return -TARGET_EINVAL
;
1377 if (get_user_u32(val
, optval_addr
))
1378 return -TARGET_EFAULT
;
1379 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1386 case IP_ROUTER_ALERT
:
1390 case IP_MTU_DISCOVER
:
1396 case IP_MULTICAST_TTL
:
1397 case IP_MULTICAST_LOOP
:
1399 if (optlen
>= sizeof(uint32_t)) {
1400 if (get_user_u32(val
, optval_addr
))
1401 return -TARGET_EFAULT
;
1402 } else if (optlen
>= 1) {
1403 if (get_user_u8(val
, optval_addr
))
1404 return -TARGET_EFAULT
;
1406 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1408 case IP_ADD_MEMBERSHIP
:
1409 case IP_DROP_MEMBERSHIP
:
1410 if (optlen
< sizeof (struct target_ip_mreq
) ||
1411 optlen
> sizeof (struct target_ip_mreqn
))
1412 return -TARGET_EINVAL
;
1414 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1415 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1416 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1419 case IP_BLOCK_SOURCE
:
1420 case IP_UNBLOCK_SOURCE
:
1421 case IP_ADD_SOURCE_MEMBERSHIP
:
1422 case IP_DROP_SOURCE_MEMBERSHIP
:
1423 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1424 return -TARGET_EINVAL
;
1426 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1427 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1428 unlock_user (ip_mreq_source
, optval_addr
, 0);
1435 case TARGET_SOL_SOCKET
:
1437 /* Options with 'int' argument. */
1438 case TARGET_SO_DEBUG
:
1441 case TARGET_SO_REUSEADDR
:
1442 optname
= SO_REUSEADDR
;
1444 case TARGET_SO_TYPE
:
1447 case TARGET_SO_ERROR
:
1450 case TARGET_SO_DONTROUTE
:
1451 optname
= SO_DONTROUTE
;
1453 case TARGET_SO_BROADCAST
:
1454 optname
= SO_BROADCAST
;
1456 case TARGET_SO_SNDBUF
:
1457 optname
= SO_SNDBUF
;
1459 case TARGET_SO_RCVBUF
:
1460 optname
= SO_RCVBUF
;
1462 case TARGET_SO_KEEPALIVE
:
1463 optname
= SO_KEEPALIVE
;
1465 case TARGET_SO_OOBINLINE
:
1466 optname
= SO_OOBINLINE
;
1468 case TARGET_SO_NO_CHECK
:
1469 optname
= SO_NO_CHECK
;
1471 case TARGET_SO_PRIORITY
:
1472 optname
= SO_PRIORITY
;
1475 case TARGET_SO_BSDCOMPAT
:
1476 optname
= SO_BSDCOMPAT
;
1479 case TARGET_SO_PASSCRED
:
1480 optname
= SO_PASSCRED
;
1482 case TARGET_SO_TIMESTAMP
:
1483 optname
= SO_TIMESTAMP
;
1485 case TARGET_SO_RCVLOWAT
:
1486 optname
= SO_RCVLOWAT
;
1488 case TARGET_SO_RCVTIMEO
:
1489 optname
= SO_RCVTIMEO
;
1491 case TARGET_SO_SNDTIMEO
:
1492 optname
= SO_SNDTIMEO
;
1498 if (optlen
< sizeof(uint32_t))
1499 return -TARGET_EINVAL
;
1501 if (get_user_u32(val
, optval_addr
))
1502 return -TARGET_EFAULT
;
1503 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1507 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level
, optname
);
1508 ret
= -TARGET_ENOPROTOOPT
;
1513 /* do_getsockopt() Must return target values and target errnos. */
1514 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1515 abi_ulong optval_addr
, abi_ulong optlen
)
1522 case TARGET_SOL_SOCKET
:
1525 /* These don't just return a single integer */
1526 case TARGET_SO_LINGER
:
1527 case TARGET_SO_RCVTIMEO
:
1528 case TARGET_SO_SNDTIMEO
:
1529 case TARGET_SO_PEERCRED
:
1530 case TARGET_SO_PEERNAME
:
1532 /* Options with 'int' argument. */
1533 case TARGET_SO_DEBUG
:
1536 case TARGET_SO_REUSEADDR
:
1537 optname
= SO_REUSEADDR
;
1539 case TARGET_SO_TYPE
:
1542 case TARGET_SO_ERROR
:
1545 case TARGET_SO_DONTROUTE
:
1546 optname
= SO_DONTROUTE
;
1548 case TARGET_SO_BROADCAST
:
1549 optname
= SO_BROADCAST
;
1551 case TARGET_SO_SNDBUF
:
1552 optname
= SO_SNDBUF
;
1554 case TARGET_SO_RCVBUF
:
1555 optname
= SO_RCVBUF
;
1557 case TARGET_SO_KEEPALIVE
:
1558 optname
= SO_KEEPALIVE
;
1560 case TARGET_SO_OOBINLINE
:
1561 optname
= SO_OOBINLINE
;
1563 case TARGET_SO_NO_CHECK
:
1564 optname
= SO_NO_CHECK
;
1566 case TARGET_SO_PRIORITY
:
1567 optname
= SO_PRIORITY
;
1570 case TARGET_SO_BSDCOMPAT
:
1571 optname
= SO_BSDCOMPAT
;
1574 case TARGET_SO_PASSCRED
:
1575 optname
= SO_PASSCRED
;
1577 case TARGET_SO_TIMESTAMP
:
1578 optname
= SO_TIMESTAMP
;
1580 case TARGET_SO_RCVLOWAT
:
1581 optname
= SO_RCVLOWAT
;
1588 /* TCP options all take an 'int' value. */
1590 if (get_user_u32(len
, optlen
))
1591 return -TARGET_EFAULT
;
1593 return -TARGET_EINVAL
;
1595 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1601 if (put_user_u32(val
, optval_addr
))
1602 return -TARGET_EFAULT
;
1604 if (put_user_u8(val
, optval_addr
))
1605 return -TARGET_EFAULT
;
1607 if (put_user_u32(len
, optlen
))
1608 return -TARGET_EFAULT
;
1615 case IP_ROUTER_ALERT
:
1619 case IP_MTU_DISCOVER
:
1625 case IP_MULTICAST_TTL
:
1626 case IP_MULTICAST_LOOP
:
1627 if (get_user_u32(len
, optlen
))
1628 return -TARGET_EFAULT
;
1630 return -TARGET_EINVAL
;
1632 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1635 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1637 if (put_user_u32(len
, optlen
)
1638 || put_user_u8(val
, optval_addr
))
1639 return -TARGET_EFAULT
;
1641 if (len
> sizeof(int))
1643 if (put_user_u32(len
, optlen
)
1644 || put_user_u32(val
, optval_addr
))
1645 return -TARGET_EFAULT
;
1649 ret
= -TARGET_ENOPROTOOPT
;
1655 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1657 ret
= -TARGET_EOPNOTSUPP
;
1664 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1665 * other lock functions have a return code of 0 for failure.
1667 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1668 int count
, int copy
)
1670 struct target_iovec
*target_vec
;
1674 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1676 return -TARGET_EFAULT
;
1677 for(i
= 0;i
< count
; i
++) {
1678 base
= tswapl(target_vec
[i
].iov_base
);
1679 vec
[i
].iov_len
= tswapl(target_vec
[i
].iov_len
);
1680 if (vec
[i
].iov_len
!= 0) {
1681 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1682 /* Don't check lock_user return value. We must call writev even
1683 if a element has invalid base address. */
1685 /* zero length pointer is ignored */
1686 vec
[i
].iov_base
= NULL
;
1689 unlock_user (target_vec
, target_addr
, 0);
1693 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1694 int count
, int copy
)
1696 struct target_iovec
*target_vec
;
1700 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1702 return -TARGET_EFAULT
;
1703 for(i
= 0;i
< count
; i
++) {
1704 if (target_vec
[i
].iov_base
) {
1705 base
= tswapl(target_vec
[i
].iov_base
);
1706 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1709 unlock_user (target_vec
, target_addr
, 0);
1714 /* do_socket() Must return target values and target errnos. */
1715 static abi_long
do_socket(int domain
, int type
, int protocol
)
1717 #if defined(TARGET_MIPS)
1719 case TARGET_SOCK_DGRAM
:
1722 case TARGET_SOCK_STREAM
:
1725 case TARGET_SOCK_RAW
:
1728 case TARGET_SOCK_RDM
:
1731 case TARGET_SOCK_SEQPACKET
:
1732 type
= SOCK_SEQPACKET
;
1734 case TARGET_SOCK_PACKET
:
1739 if (domain
== PF_NETLINK
)
1740 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1741 return get_errno(socket(domain
, type
, protocol
));
1744 /* do_bind() Must return target values and target errnos. */
1745 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1751 if ((int)addrlen
< 0) {
1752 return -TARGET_EINVAL
;
1755 addr
= alloca(addrlen
+1);
1757 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1761 return get_errno(bind(sockfd
, addr
, addrlen
));
1764 /* do_connect() Must return target values and target errnos. */
1765 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1771 if ((int)addrlen
< 0) {
1772 return -TARGET_EINVAL
;
1775 addr
= alloca(addrlen
);
1777 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1781 return get_errno(connect(sockfd
, addr
, addrlen
));
1784 /* do_sendrecvmsg() Must return target values and target errnos. */
1785 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1786 int flags
, int send
)
1789 struct target_msghdr
*msgp
;
1793 abi_ulong target_vec
;
1796 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1800 return -TARGET_EFAULT
;
1801 if (msgp
->msg_name
) {
1802 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1803 msg
.msg_name
= alloca(msg
.msg_namelen
);
1804 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapl(msgp
->msg_name
),
1807 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1811 msg
.msg_name
= NULL
;
1812 msg
.msg_namelen
= 0;
1814 msg
.msg_controllen
= 2 * tswapl(msgp
->msg_controllen
);
1815 msg
.msg_control
= alloca(msg
.msg_controllen
);
1816 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1818 count
= tswapl(msgp
->msg_iovlen
);
1819 vec
= alloca(count
* sizeof(struct iovec
));
1820 target_vec
= tswapl(msgp
->msg_iov
);
1821 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1822 msg
.msg_iovlen
= count
;
1826 ret
= target_to_host_cmsg(&msg
, msgp
);
1828 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1830 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1831 if (!is_error(ret
)) {
1833 ret
= host_to_target_cmsg(msgp
, &msg
);
1838 unlock_iovec(vec
, target_vec
, count
, !send
);
1839 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1843 /* do_accept() Must return target values and target errnos. */
1844 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1845 abi_ulong target_addrlen_addr
)
1851 if (target_addr
== 0)
1852 return get_errno(accept(fd
, NULL
, NULL
));
1854 /* linux returns EINVAL if addrlen pointer is invalid */
1855 if (get_user_u32(addrlen
, target_addrlen_addr
))
1856 return -TARGET_EINVAL
;
1858 if ((int)addrlen
< 0) {
1859 return -TARGET_EINVAL
;
1862 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1863 return -TARGET_EINVAL
;
1865 addr
= alloca(addrlen
);
1867 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1868 if (!is_error(ret
)) {
1869 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1870 if (put_user_u32(addrlen
, target_addrlen_addr
))
1871 ret
= -TARGET_EFAULT
;
1876 /* do_getpeername() Must return target values and target errnos. */
1877 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1878 abi_ulong target_addrlen_addr
)
1884 if (get_user_u32(addrlen
, target_addrlen_addr
))
1885 return -TARGET_EFAULT
;
1887 if ((int)addrlen
< 0) {
1888 return -TARGET_EINVAL
;
1891 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1892 return -TARGET_EFAULT
;
1894 addr
= alloca(addrlen
);
1896 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1897 if (!is_error(ret
)) {
1898 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1899 if (put_user_u32(addrlen
, target_addrlen_addr
))
1900 ret
= -TARGET_EFAULT
;
1905 /* do_getsockname() Must return target values and target errnos. */
1906 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1907 abi_ulong target_addrlen_addr
)
1913 if (get_user_u32(addrlen
, target_addrlen_addr
))
1914 return -TARGET_EFAULT
;
1916 if ((int)addrlen
< 0) {
1917 return -TARGET_EINVAL
;
1920 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1921 return -TARGET_EFAULT
;
1923 addr
= alloca(addrlen
);
1925 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1926 if (!is_error(ret
)) {
1927 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1928 if (put_user_u32(addrlen
, target_addrlen_addr
))
1929 ret
= -TARGET_EFAULT
;
1934 /* do_socketpair() Must return target values and target errnos. */
1935 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1936 abi_ulong target_tab_addr
)
1941 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1942 if (!is_error(ret
)) {
1943 if (put_user_s32(tab
[0], target_tab_addr
)
1944 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1945 ret
= -TARGET_EFAULT
;
1950 /* do_sendto() Must return target values and target errnos. */
1951 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1952 abi_ulong target_addr
, socklen_t addrlen
)
1958 if ((int)addrlen
< 0) {
1959 return -TARGET_EINVAL
;
1962 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1964 return -TARGET_EFAULT
;
1966 addr
= alloca(addrlen
);
1967 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1969 unlock_user(host_msg
, msg
, 0);
1972 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
1974 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
1976 unlock_user(host_msg
, msg
, 0);
1980 /* do_recvfrom() Must return target values and target errnos. */
1981 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
1982 abi_ulong target_addr
,
1983 abi_ulong target_addrlen
)
1990 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
1992 return -TARGET_EFAULT
;
1994 if (get_user_u32(addrlen
, target_addrlen
)) {
1995 ret
= -TARGET_EFAULT
;
1998 if ((int)addrlen
< 0) {
1999 ret
= -TARGET_EINVAL
;
2002 addr
= alloca(addrlen
);
2003 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2005 addr
= NULL
; /* To keep compiler quiet. */
2006 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2008 if (!is_error(ret
)) {
2010 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2011 if (put_user_u32(addrlen
, target_addrlen
)) {
2012 ret
= -TARGET_EFAULT
;
2016 unlock_user(host_msg
, msg
, len
);
2019 unlock_user(host_msg
, msg
, 0);
2024 #ifdef TARGET_NR_socketcall
2025 /* do_socketcall() Must return target values and target errnos. */
2026 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2029 const int n
= sizeof(abi_ulong
);
2034 abi_ulong domain
, type
, protocol
;
2036 if (get_user_ual(domain
, vptr
)
2037 || get_user_ual(type
, vptr
+ n
)
2038 || get_user_ual(protocol
, vptr
+ 2 * n
))
2039 return -TARGET_EFAULT
;
2041 ret
= do_socket(domain
, type
, protocol
);
2047 abi_ulong target_addr
;
2050 if (get_user_ual(sockfd
, vptr
)
2051 || get_user_ual(target_addr
, vptr
+ n
)
2052 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2053 return -TARGET_EFAULT
;
2055 ret
= do_bind(sockfd
, target_addr
, addrlen
);
2058 case SOCKOP_connect
:
2061 abi_ulong target_addr
;
2064 if (get_user_ual(sockfd
, vptr
)
2065 || get_user_ual(target_addr
, vptr
+ n
)
2066 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2067 return -TARGET_EFAULT
;
2069 ret
= do_connect(sockfd
, target_addr
, addrlen
);
2074 abi_ulong sockfd
, backlog
;
2076 if (get_user_ual(sockfd
, vptr
)
2077 || get_user_ual(backlog
, vptr
+ n
))
2078 return -TARGET_EFAULT
;
2080 ret
= get_errno(listen(sockfd
, backlog
));
2086 abi_ulong target_addr
, target_addrlen
;
2088 if (get_user_ual(sockfd
, vptr
)
2089 || get_user_ual(target_addr
, vptr
+ n
)
2090 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2091 return -TARGET_EFAULT
;
2093 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
2096 case SOCKOP_getsockname
:
2099 abi_ulong target_addr
, target_addrlen
;
2101 if (get_user_ual(sockfd
, vptr
)
2102 || get_user_ual(target_addr
, vptr
+ n
)
2103 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2104 return -TARGET_EFAULT
;
2106 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
2109 case SOCKOP_getpeername
:
2112 abi_ulong target_addr
, target_addrlen
;
2114 if (get_user_ual(sockfd
, vptr
)
2115 || get_user_ual(target_addr
, vptr
+ n
)
2116 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2117 return -TARGET_EFAULT
;
2119 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
2122 case SOCKOP_socketpair
:
2124 abi_ulong domain
, type
, protocol
;
2127 if (get_user_ual(domain
, vptr
)
2128 || get_user_ual(type
, vptr
+ n
)
2129 || get_user_ual(protocol
, vptr
+ 2 * n
)
2130 || get_user_ual(tab
, vptr
+ 3 * n
))
2131 return -TARGET_EFAULT
;
2133 ret
= do_socketpair(domain
, type
, protocol
, tab
);
2143 if (get_user_ual(sockfd
, vptr
)
2144 || get_user_ual(msg
, vptr
+ n
)
2145 || get_user_ual(len
, vptr
+ 2 * n
)
2146 || get_user_ual(flags
, vptr
+ 3 * n
))
2147 return -TARGET_EFAULT
;
2149 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2159 if (get_user_ual(sockfd
, vptr
)
2160 || get_user_ual(msg
, vptr
+ n
)
2161 || get_user_ual(len
, vptr
+ 2 * n
)
2162 || get_user_ual(flags
, vptr
+ 3 * n
))
2163 return -TARGET_EFAULT
;
2165 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2177 if (get_user_ual(sockfd
, vptr
)
2178 || get_user_ual(msg
, vptr
+ n
)
2179 || get_user_ual(len
, vptr
+ 2 * n
)
2180 || get_user_ual(flags
, vptr
+ 3 * n
)
2181 || get_user_ual(addr
, vptr
+ 4 * n
)
2182 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2183 return -TARGET_EFAULT
;
2185 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2188 case SOCKOP_recvfrom
:
2197 if (get_user_ual(sockfd
, vptr
)
2198 || get_user_ual(msg
, vptr
+ n
)
2199 || get_user_ual(len
, vptr
+ 2 * n
)
2200 || get_user_ual(flags
, vptr
+ 3 * n
)
2201 || get_user_ual(addr
, vptr
+ 4 * n
)
2202 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2203 return -TARGET_EFAULT
;
2205 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2208 case SOCKOP_shutdown
:
2210 abi_ulong sockfd
, how
;
2212 if (get_user_ual(sockfd
, vptr
)
2213 || get_user_ual(how
, vptr
+ n
))
2214 return -TARGET_EFAULT
;
2216 ret
= get_errno(shutdown(sockfd
, how
));
2219 case SOCKOP_sendmsg
:
2220 case SOCKOP_recvmsg
:
2223 abi_ulong target_msg
;
2226 if (get_user_ual(fd
, vptr
)
2227 || get_user_ual(target_msg
, vptr
+ n
)
2228 || get_user_ual(flags
, vptr
+ 2 * n
))
2229 return -TARGET_EFAULT
;
2231 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2232 (num
== SOCKOP_sendmsg
));
2235 case SOCKOP_setsockopt
:
2243 if (get_user_ual(sockfd
, vptr
)
2244 || get_user_ual(level
, vptr
+ n
)
2245 || get_user_ual(optname
, vptr
+ 2 * n
)
2246 || get_user_ual(optval
, vptr
+ 3 * n
)
2247 || get_user_ual(optlen
, vptr
+ 4 * n
))
2248 return -TARGET_EFAULT
;
2250 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2253 case SOCKOP_getsockopt
:
2261 if (get_user_ual(sockfd
, vptr
)
2262 || get_user_ual(level
, vptr
+ n
)
2263 || get_user_ual(optname
, vptr
+ 2 * n
)
2264 || get_user_ual(optval
, vptr
+ 3 * n
)
2265 || get_user_ual(optlen
, vptr
+ 4 * n
))
2266 return -TARGET_EFAULT
;
2268 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2272 gemu_log("Unsupported socketcall: %d\n", num
);
2273 ret
= -TARGET_ENOSYS
;
2280 #define N_SHM_REGIONS 32
2282 static struct shm_region
{
2285 } shm_regions
[N_SHM_REGIONS
];
2287 struct target_ipc_perm
2294 unsigned short int mode
;
2295 unsigned short int __pad1
;
2296 unsigned short int __seq
;
2297 unsigned short int __pad2
;
2298 abi_ulong __unused1
;
2299 abi_ulong __unused2
;
2302 struct target_semid_ds
2304 struct target_ipc_perm sem_perm
;
2305 abi_ulong sem_otime
;
2306 abi_ulong __unused1
;
2307 abi_ulong sem_ctime
;
2308 abi_ulong __unused2
;
2309 abi_ulong sem_nsems
;
2310 abi_ulong __unused3
;
2311 abi_ulong __unused4
;
2314 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2315 abi_ulong target_addr
)
2317 struct target_ipc_perm
*target_ip
;
2318 struct target_semid_ds
*target_sd
;
2320 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2321 return -TARGET_EFAULT
;
2322 target_ip
= &(target_sd
->sem_perm
);
2323 host_ip
->__key
= tswapl(target_ip
->__key
);
2324 host_ip
->uid
= tswapl(target_ip
->uid
);
2325 host_ip
->gid
= tswapl(target_ip
->gid
);
2326 host_ip
->cuid
= tswapl(target_ip
->cuid
);
2327 host_ip
->cgid
= tswapl(target_ip
->cgid
);
2328 host_ip
->mode
= tswapl(target_ip
->mode
);
2329 unlock_user_struct(target_sd
, target_addr
, 0);
2333 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2334 struct ipc_perm
*host_ip
)
2336 struct target_ipc_perm
*target_ip
;
2337 struct target_semid_ds
*target_sd
;
2339 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2340 return -TARGET_EFAULT
;
2341 target_ip
= &(target_sd
->sem_perm
);
2342 target_ip
->__key
= tswapl(host_ip
->__key
);
2343 target_ip
->uid
= tswapl(host_ip
->uid
);
2344 target_ip
->gid
= tswapl(host_ip
->gid
);
2345 target_ip
->cuid
= tswapl(host_ip
->cuid
);
2346 target_ip
->cgid
= tswapl(host_ip
->cgid
);
2347 target_ip
->mode
= tswapl(host_ip
->mode
);
2348 unlock_user_struct(target_sd
, target_addr
, 1);
2352 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2353 abi_ulong target_addr
)
2355 struct target_semid_ds
*target_sd
;
2357 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2358 return -TARGET_EFAULT
;
2359 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2360 return -TARGET_EFAULT
;
2361 host_sd
->sem_nsems
= tswapl(target_sd
->sem_nsems
);
2362 host_sd
->sem_otime
= tswapl(target_sd
->sem_otime
);
2363 host_sd
->sem_ctime
= tswapl(target_sd
->sem_ctime
);
2364 unlock_user_struct(target_sd
, target_addr
, 0);
2368 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2369 struct semid_ds
*host_sd
)
2371 struct target_semid_ds
*target_sd
;
2373 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2374 return -TARGET_EFAULT
;
2375 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2376 return -TARGET_EFAULT
;;
2377 target_sd
->sem_nsems
= tswapl(host_sd
->sem_nsems
);
2378 target_sd
->sem_otime
= tswapl(host_sd
->sem_otime
);
2379 target_sd
->sem_ctime
= tswapl(host_sd
->sem_ctime
);
2380 unlock_user_struct(target_sd
, target_addr
, 1);
2384 struct target_seminfo
{
2397 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2398 struct seminfo
*host_seminfo
)
2400 struct target_seminfo
*target_seminfo
;
2401 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2402 return -TARGET_EFAULT
;
2403 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2404 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2405 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2406 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2407 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2408 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2409 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2410 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2411 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2412 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2413 unlock_user_struct(target_seminfo
, target_addr
, 1);
2419 struct semid_ds
*buf
;
2420 unsigned short *array
;
2421 struct seminfo
*__buf
;
2424 union target_semun
{
2431 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2432 abi_ulong target_addr
)
2435 unsigned short *array
;
2437 struct semid_ds semid_ds
;
2440 semun
.buf
= &semid_ds
;
2442 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2444 return get_errno(ret
);
2446 nsems
= semid_ds
.sem_nsems
;
2448 *host_array
= malloc(nsems
*sizeof(unsigned short));
2449 array
= lock_user(VERIFY_READ
, target_addr
,
2450 nsems
*sizeof(unsigned short), 1);
2452 return -TARGET_EFAULT
;
2454 for(i
=0; i
<nsems
; i
++) {
2455 __get_user((*host_array
)[i
], &array
[i
]);
2457 unlock_user(array
, target_addr
, 0);
2462 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2463 unsigned short **host_array
)
2466 unsigned short *array
;
2468 struct semid_ds semid_ds
;
2471 semun
.buf
= &semid_ds
;
2473 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2475 return get_errno(ret
);
2477 nsems
= semid_ds
.sem_nsems
;
2479 array
= lock_user(VERIFY_WRITE
, target_addr
,
2480 nsems
*sizeof(unsigned short), 0);
2482 return -TARGET_EFAULT
;
2484 for(i
=0; i
<nsems
; i
++) {
2485 __put_user((*host_array
)[i
], &array
[i
]);
2488 unlock_user(array
, target_addr
, 1);
2493 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2494 union target_semun target_su
)
2497 struct semid_ds dsarg
;
2498 unsigned short *array
= NULL
;
2499 struct seminfo seminfo
;
2500 abi_long ret
= -TARGET_EINVAL
;
2507 arg
.val
= tswapl(target_su
.val
);
2508 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2509 target_su
.val
= tswapl(arg
.val
);
2513 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2517 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2518 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2525 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2529 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2530 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2536 arg
.__buf
= &seminfo
;
2537 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2538 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2546 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2553 struct target_sembuf
{
2554 unsigned short sem_num
;
2559 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2560 abi_ulong target_addr
,
2563 struct target_sembuf
*target_sembuf
;
2566 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2567 nsops
*sizeof(struct target_sembuf
), 1);
2569 return -TARGET_EFAULT
;
2571 for(i
=0; i
<nsops
; i
++) {
2572 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2573 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2574 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2577 unlock_user(target_sembuf
, target_addr
, 0);
2582 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2584 struct sembuf sops
[nsops
];
2586 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2587 return -TARGET_EFAULT
;
2589 return semop(semid
, sops
, nsops
);
2592 struct target_msqid_ds
2594 struct target_ipc_perm msg_perm
;
2595 abi_ulong msg_stime
;
2596 #if TARGET_ABI_BITS == 32
2597 abi_ulong __unused1
;
2599 abi_ulong msg_rtime
;
2600 #if TARGET_ABI_BITS == 32
2601 abi_ulong __unused2
;
2603 abi_ulong msg_ctime
;
2604 #if TARGET_ABI_BITS == 32
2605 abi_ulong __unused3
;
2607 abi_ulong __msg_cbytes
;
2609 abi_ulong msg_qbytes
;
2610 abi_ulong msg_lspid
;
2611 abi_ulong msg_lrpid
;
2612 abi_ulong __unused4
;
2613 abi_ulong __unused5
;
2616 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2617 abi_ulong target_addr
)
2619 struct target_msqid_ds
*target_md
;
2621 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2622 return -TARGET_EFAULT
;
2623 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2624 return -TARGET_EFAULT
;
2625 host_md
->msg_stime
= tswapl(target_md
->msg_stime
);
2626 host_md
->msg_rtime
= tswapl(target_md
->msg_rtime
);
2627 host_md
->msg_ctime
= tswapl(target_md
->msg_ctime
);
2628 host_md
->__msg_cbytes
= tswapl(target_md
->__msg_cbytes
);
2629 host_md
->msg_qnum
= tswapl(target_md
->msg_qnum
);
2630 host_md
->msg_qbytes
= tswapl(target_md
->msg_qbytes
);
2631 host_md
->msg_lspid
= tswapl(target_md
->msg_lspid
);
2632 host_md
->msg_lrpid
= tswapl(target_md
->msg_lrpid
);
2633 unlock_user_struct(target_md
, target_addr
, 0);
2637 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2638 struct msqid_ds
*host_md
)
2640 struct target_msqid_ds
*target_md
;
2642 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2643 return -TARGET_EFAULT
;
2644 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2645 return -TARGET_EFAULT
;
2646 target_md
->msg_stime
= tswapl(host_md
->msg_stime
);
2647 target_md
->msg_rtime
= tswapl(host_md
->msg_rtime
);
2648 target_md
->msg_ctime
= tswapl(host_md
->msg_ctime
);
2649 target_md
->__msg_cbytes
= tswapl(host_md
->__msg_cbytes
);
2650 target_md
->msg_qnum
= tswapl(host_md
->msg_qnum
);
2651 target_md
->msg_qbytes
= tswapl(host_md
->msg_qbytes
);
2652 target_md
->msg_lspid
= tswapl(host_md
->msg_lspid
);
2653 target_md
->msg_lrpid
= tswapl(host_md
->msg_lrpid
);
2654 unlock_user_struct(target_md
, target_addr
, 1);
2658 struct target_msginfo
{
2666 unsigned short int msgseg
;
2669 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2670 struct msginfo
*host_msginfo
)
2672 struct target_msginfo
*target_msginfo
;
2673 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2674 return -TARGET_EFAULT
;
2675 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2676 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2677 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2678 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2679 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2680 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2681 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2682 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2683 unlock_user_struct(target_msginfo
, target_addr
, 1);
2687 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2689 struct msqid_ds dsarg
;
2690 struct msginfo msginfo
;
2691 abi_long ret
= -TARGET_EINVAL
;
2699 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2700 return -TARGET_EFAULT
;
2701 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2702 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2703 return -TARGET_EFAULT
;
2706 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2710 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2711 if (host_to_target_msginfo(ptr
, &msginfo
))
2712 return -TARGET_EFAULT
;
2719 struct target_msgbuf
{
2724 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2725 unsigned int msgsz
, int msgflg
)
2727 struct target_msgbuf
*target_mb
;
2728 struct msgbuf
*host_mb
;
2731 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2732 return -TARGET_EFAULT
;
2733 host_mb
= malloc(msgsz
+sizeof(long));
2734 host_mb
->mtype
= (abi_long
) tswapl(target_mb
->mtype
);
2735 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2736 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2738 unlock_user_struct(target_mb
, msgp
, 0);
2743 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2744 unsigned int msgsz
, abi_long msgtyp
,
2747 struct target_msgbuf
*target_mb
;
2749 struct msgbuf
*host_mb
;
2752 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2753 return -TARGET_EFAULT
;
2755 host_mb
= malloc(msgsz
+sizeof(long));
2756 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapl(msgtyp
), msgflg
));
2759 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2760 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2761 if (!target_mtext
) {
2762 ret
= -TARGET_EFAULT
;
2765 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2766 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2769 target_mb
->mtype
= tswapl(host_mb
->mtype
);
2774 unlock_user_struct(target_mb
, msgp
, 1);
2778 struct target_shmid_ds
2780 struct target_ipc_perm shm_perm
;
2781 abi_ulong shm_segsz
;
2782 abi_ulong shm_atime
;
2783 #if TARGET_ABI_BITS == 32
2784 abi_ulong __unused1
;
2786 abi_ulong shm_dtime
;
2787 #if TARGET_ABI_BITS == 32
2788 abi_ulong __unused2
;
2790 abi_ulong shm_ctime
;
2791 #if TARGET_ABI_BITS == 32
2792 abi_ulong __unused3
;
2796 abi_ulong shm_nattch
;
2797 unsigned long int __unused4
;
2798 unsigned long int __unused5
;
2801 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2802 abi_ulong target_addr
)
2804 struct target_shmid_ds
*target_sd
;
2806 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2807 return -TARGET_EFAULT
;
2808 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2809 return -TARGET_EFAULT
;
2810 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2811 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2812 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2813 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2814 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2815 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2816 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2817 unlock_user_struct(target_sd
, target_addr
, 0);
2821 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2822 struct shmid_ds
*host_sd
)
2824 struct target_shmid_ds
*target_sd
;
2826 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2827 return -TARGET_EFAULT
;
2828 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2829 return -TARGET_EFAULT
;
2830 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2831 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2832 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2833 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2834 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2835 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2836 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2837 unlock_user_struct(target_sd
, target_addr
, 1);
2841 struct target_shminfo
{
2849 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2850 struct shminfo
*host_shminfo
)
2852 struct target_shminfo
*target_shminfo
;
2853 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2854 return -TARGET_EFAULT
;
2855 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2856 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2857 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2858 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2859 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2860 unlock_user_struct(target_shminfo
, target_addr
, 1);
2864 struct target_shm_info
{
2869 abi_ulong swap_attempts
;
2870 abi_ulong swap_successes
;
2873 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2874 struct shm_info
*host_shm_info
)
2876 struct target_shm_info
*target_shm_info
;
2877 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2878 return -TARGET_EFAULT
;
2879 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2880 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2881 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2882 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2883 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2884 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2885 unlock_user_struct(target_shm_info
, target_addr
, 1);
2889 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2891 struct shmid_ds dsarg
;
2892 struct shminfo shminfo
;
2893 struct shm_info shm_info
;
2894 abi_long ret
= -TARGET_EINVAL
;
2902 if (target_to_host_shmid_ds(&dsarg
, buf
))
2903 return -TARGET_EFAULT
;
2904 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2905 if (host_to_target_shmid_ds(buf
, &dsarg
))
2906 return -TARGET_EFAULT
;
2909 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2910 if (host_to_target_shminfo(buf
, &shminfo
))
2911 return -TARGET_EFAULT
;
2914 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2915 if (host_to_target_shm_info(buf
, &shm_info
))
2916 return -TARGET_EFAULT
;
2921 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2928 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2932 struct shmid_ds shm_info
;
2935 /* find out the length of the shared memory segment */
2936 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2937 if (is_error(ret
)) {
2938 /* can't get length, bail out */
2945 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2947 abi_ulong mmap_start
;
2949 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2951 if (mmap_start
== -1) {
2953 host_raddr
= (void *)-1;
2955 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2958 if (host_raddr
== (void *)-1) {
2960 return get_errno((long)host_raddr
);
2962 raddr
=h2g((unsigned long)host_raddr
);
2964 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2965 PAGE_VALID
| PAGE_READ
|
2966 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2968 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
2969 if (shm_regions
[i
].start
== 0) {
2970 shm_regions
[i
].start
= raddr
;
2971 shm_regions
[i
].size
= shm_info
.shm_segsz
;
2981 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
2985 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2986 if (shm_regions
[i
].start
== shmaddr
) {
2987 shm_regions
[i
].start
= 0;
2988 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
2993 return get_errno(shmdt(g2h(shmaddr
)));
2996 #ifdef TARGET_NR_ipc
2997 /* ??? This only works with linear mappings. */
2998 /* do_ipc() must return target values and target errnos. */
2999 static abi_long
do_ipc(unsigned int call
, int first
,
3000 int second
, int third
,
3001 abi_long ptr
, abi_long fifth
)
3006 version
= call
>> 16;
3011 ret
= do_semop(first
, ptr
, second
);
3015 ret
= get_errno(semget(first
, second
, third
));
3019 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
3023 ret
= get_errno(msgget(first
, second
));
3027 ret
= do_msgsnd(first
, ptr
, second
, third
);
3031 ret
= do_msgctl(first
, second
, ptr
);
3038 struct target_ipc_kludge
{
3043 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3044 ret
= -TARGET_EFAULT
;
3048 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
3050 unlock_user_struct(tmp
, ptr
, 0);
3054 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3063 raddr
= do_shmat(first
, ptr
, second
);
3064 if (is_error(raddr
))
3065 return get_errno(raddr
);
3066 if (put_user_ual(raddr
, third
))
3067 return -TARGET_EFAULT
;
3071 ret
= -TARGET_EINVAL
;
3076 ret
= do_shmdt(ptr
);
3080 /* IPC_* flag values are the same on all linux platforms */
3081 ret
= get_errno(shmget(first
, second
, third
));
3084 /* IPC_* and SHM_* command values are the same on all linux platforms */
3086 ret
= do_shmctl(first
, second
, third
);
3089 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3090 ret
= -TARGET_ENOSYS
;
3097 /* kernel structure types definitions */
3099 #define STRUCT(name, ...) STRUCT_ ## name,
3100 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3102 #include "syscall_types.h"
3105 #undef STRUCT_SPECIAL
3107 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3108 #define STRUCT_SPECIAL(name)
3109 #include "syscall_types.h"
3111 #undef STRUCT_SPECIAL
3113 typedef struct IOCTLEntry IOCTLEntry
;
3115 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3116 int fd
, abi_long cmd
, abi_long arg
);
3119 unsigned int target_cmd
;
3120 unsigned int host_cmd
;
3123 do_ioctl_fn
*do_ioctl
;
3124 const argtype arg_type
[5];
3127 #define IOC_R 0x0001
3128 #define IOC_W 0x0002
3129 #define IOC_RW (IOC_R | IOC_W)
3131 #define MAX_STRUCT_SIZE 4096
3133 #ifdef CONFIG_FIEMAP
3134 /* So fiemap access checks don't overflow on 32 bit systems.
3135 * This is very slightly smaller than the limit imposed by
3136 * the underlying kernel.
3138 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3139 / sizeof(struct fiemap_extent))
3141 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3142 int fd
, abi_long cmd
, abi_long arg
)
3144 /* The parameter for this ioctl is a struct fiemap followed
3145 * by an array of struct fiemap_extent whose size is set
3146 * in fiemap->fm_extent_count. The array is filled in by the
3149 int target_size_in
, target_size_out
;
3151 const argtype
*arg_type
= ie
->arg_type
;
3152 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3155 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3159 assert(arg_type
[0] == TYPE_PTR
);
3160 assert(ie
->access
== IOC_RW
);
3162 target_size_in
= thunk_type_size(arg_type
, 0);
3163 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3165 return -TARGET_EFAULT
;
3167 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3168 unlock_user(argptr
, arg
, 0);
3169 fm
= (struct fiemap
*)buf_temp
;
3170 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3171 return -TARGET_EINVAL
;
3174 outbufsz
= sizeof (*fm
) +
3175 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3177 if (outbufsz
> MAX_STRUCT_SIZE
) {
3178 /* We can't fit all the extents into the fixed size buffer.
3179 * Allocate one that is large enough and use it instead.
3181 fm
= malloc(outbufsz
);
3183 return -TARGET_ENOMEM
;
3185 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3188 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3189 if (!is_error(ret
)) {
3190 target_size_out
= target_size_in
;
3191 /* An extent_count of 0 means we were only counting the extents
3192 * so there are no structs to copy
3194 if (fm
->fm_extent_count
!= 0) {
3195 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3197 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3199 ret
= -TARGET_EFAULT
;
3201 /* Convert the struct fiemap */
3202 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3203 if (fm
->fm_extent_count
!= 0) {
3204 p
= argptr
+ target_size_in
;
3205 /* ...and then all the struct fiemap_extents */
3206 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3207 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3212 unlock_user(argptr
, arg
, target_size_out
);
3222 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3223 int fd
, abi_long cmd
, abi_long arg
)
3225 const argtype
*arg_type
= ie
->arg_type
;
3229 struct ifconf
*host_ifconf
;
3231 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3232 int target_ifreq_size
;
3237 abi_long target_ifc_buf
;
3241 assert(arg_type
[0] == TYPE_PTR
);
3242 assert(ie
->access
== IOC_RW
);
3245 target_size
= thunk_type_size(arg_type
, 0);
3247 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3249 return -TARGET_EFAULT
;
3250 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3251 unlock_user(argptr
, arg
, 0);
3253 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3254 target_ifc_len
= host_ifconf
->ifc_len
;
3255 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3257 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3258 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3259 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3261 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3262 if (outbufsz
> MAX_STRUCT_SIZE
) {
3263 /* We can't fit all the extents into the fixed size buffer.
3264 * Allocate one that is large enough and use it instead.
3266 host_ifconf
= malloc(outbufsz
);
3268 return -TARGET_ENOMEM
;
3270 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3273 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3275 host_ifconf
->ifc_len
= host_ifc_len
;
3276 host_ifconf
->ifc_buf
= host_ifc_buf
;
3278 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3279 if (!is_error(ret
)) {
3280 /* convert host ifc_len to target ifc_len */
3282 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3283 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3284 host_ifconf
->ifc_len
= target_ifc_len
;
3286 /* restore target ifc_buf */
3288 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3290 /* copy struct ifconf to target user */
3292 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3294 return -TARGET_EFAULT
;
3295 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3296 unlock_user(argptr
, arg
, target_size
);
3298 /* copy ifreq[] to target user */
3300 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3301 for (i
= 0; i
< nb_ifreq
; i
++) {
3302 thunk_convert(argptr
+ i
* target_ifreq_size
,
3303 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3304 ifreq_arg_type
, THUNK_TARGET
);
3306 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3316 static IOCTLEntry ioctl_entries
[] = {
3317 #define IOCTL(cmd, access, ...) \
3318 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3319 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3320 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3325 /* ??? Implement proper locking for ioctls. */
3326 /* do_ioctl() Must return target values and target errnos. */
3327 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3329 const IOCTLEntry
*ie
;
3330 const argtype
*arg_type
;
3332 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3338 if (ie
->target_cmd
== 0) {
3339 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3340 return -TARGET_ENOSYS
;
3342 if (ie
->target_cmd
== cmd
)
3346 arg_type
= ie
->arg_type
;
3348 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3351 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3354 switch(arg_type
[0]) {
3357 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3362 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3366 target_size
= thunk_type_size(arg_type
, 0);
3367 switch(ie
->access
) {
3369 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3370 if (!is_error(ret
)) {
3371 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3373 return -TARGET_EFAULT
;
3374 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3375 unlock_user(argptr
, arg
, target_size
);
3379 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3381 return -TARGET_EFAULT
;
3382 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3383 unlock_user(argptr
, arg
, 0);
3384 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3388 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3390 return -TARGET_EFAULT
;
3391 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3392 unlock_user(argptr
, arg
, 0);
3393 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3394 if (!is_error(ret
)) {
3395 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3397 return -TARGET_EFAULT
;
3398 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3399 unlock_user(argptr
, arg
, target_size
);
3405 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3406 (long)cmd
, arg_type
[0]);
3407 ret
= -TARGET_ENOSYS
;
3413 static const bitmask_transtbl iflag_tbl
[] = {
3414 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3415 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3416 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3417 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3418 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3419 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3420 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3421 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3422 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3423 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3424 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3425 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3426 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3427 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3431 static const bitmask_transtbl oflag_tbl
[] = {
3432 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3433 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3434 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3435 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3436 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3437 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3438 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3439 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3440 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3441 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3442 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3443 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3444 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3445 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3446 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3447 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3448 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3449 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3450 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3451 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3452 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3453 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3454 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3455 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3459 static const bitmask_transtbl cflag_tbl
[] = {
3460 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3461 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3462 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3463 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3464 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3465 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3466 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3467 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3468 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3469 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3470 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3471 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3472 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3473 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3474 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3475 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3476 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3477 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3478 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3479 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3480 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3481 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3482 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3483 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3484 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3485 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3486 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3487 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3488 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3489 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3490 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3494 static const bitmask_transtbl lflag_tbl
[] = {
3495 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3496 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3497 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3498 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3499 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3500 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3501 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3502 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3503 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3504 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3505 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3506 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3507 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3508 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3509 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3513 static void target_to_host_termios (void *dst
, const void *src
)
3515 struct host_termios
*host
= dst
;
3516 const struct target_termios
*target
= src
;
3519 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3521 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3523 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3525 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3526 host
->c_line
= target
->c_line
;
3528 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3529 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3530 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3531 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3532 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3533 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3534 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3535 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3536 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3537 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3538 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3539 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3540 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3541 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3542 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3543 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3544 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3545 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3548 static void host_to_target_termios (void *dst
, const void *src
)
3550 struct target_termios
*target
= dst
;
3551 const struct host_termios
*host
= src
;
3554 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3556 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3558 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3560 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3561 target
->c_line
= host
->c_line
;
3563 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3564 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3565 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3566 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3567 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3568 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3569 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3570 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3571 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3572 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3573 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3574 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3575 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3576 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3577 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3578 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3579 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3580 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3583 static const StructEntry struct_termios_def
= {
3584 .convert
= { host_to_target_termios
, target_to_host_termios
},
3585 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3586 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3589 static bitmask_transtbl mmap_flags_tbl
[] = {
3590 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3591 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3592 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3593 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3594 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3595 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3596 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3597 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3601 #if defined(TARGET_I386)
3603 /* NOTE: there is really one LDT for all the threads */
3604 static uint8_t *ldt_table
;
3606 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3613 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3614 if (size
> bytecount
)
3616 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3618 return -TARGET_EFAULT
;
3619 /* ??? Should this by byteswapped? */
3620 memcpy(p
, ldt_table
, size
);
3621 unlock_user(p
, ptr
, size
);
3625 /* XXX: add locking support */
3626 static abi_long
write_ldt(CPUX86State
*env
,
3627 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3629 struct target_modify_ldt_ldt_s ldt_info
;
3630 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3631 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3632 int seg_not_present
, useable
, lm
;
3633 uint32_t *lp
, entry_1
, entry_2
;
3635 if (bytecount
!= sizeof(ldt_info
))
3636 return -TARGET_EINVAL
;
3637 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3638 return -TARGET_EFAULT
;
3639 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3640 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3641 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3642 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3643 unlock_user_struct(target_ldt_info
, ptr
, 0);
3645 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3646 return -TARGET_EINVAL
;
3647 seg_32bit
= ldt_info
.flags
& 1;
3648 contents
= (ldt_info
.flags
>> 1) & 3;
3649 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3650 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3651 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3652 useable
= (ldt_info
.flags
>> 6) & 1;
3656 lm
= (ldt_info
.flags
>> 7) & 1;
3658 if (contents
== 3) {
3660 return -TARGET_EINVAL
;
3661 if (seg_not_present
== 0)
3662 return -TARGET_EINVAL
;
3664 /* allocate the LDT */
3666 env
->ldt
.base
= target_mmap(0,
3667 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3668 PROT_READ
|PROT_WRITE
,
3669 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3670 if (env
->ldt
.base
== -1)
3671 return -TARGET_ENOMEM
;
3672 memset(g2h(env
->ldt
.base
), 0,
3673 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3674 env
->ldt
.limit
= 0xffff;
3675 ldt_table
= g2h(env
->ldt
.base
);
3678 /* NOTE: same code as Linux kernel */
3679 /* Allow LDTs to be cleared by the user. */
3680 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3683 read_exec_only
== 1 &&
3685 limit_in_pages
== 0 &&
3686 seg_not_present
== 1 &&
3694 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3695 (ldt_info
.limit
& 0x0ffff);
3696 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3697 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3698 (ldt_info
.limit
& 0xf0000) |
3699 ((read_exec_only
^ 1) << 9) |
3701 ((seg_not_present
^ 1) << 15) |
3703 (limit_in_pages
<< 23) |
3707 entry_2
|= (useable
<< 20);
3709 /* Install the new entry ... */
3711 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3712 lp
[0] = tswap32(entry_1
);
3713 lp
[1] = tswap32(entry_2
);
3717 /* specific and weird i386 syscalls */
3718 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3719 unsigned long bytecount
)
3725 ret
= read_ldt(ptr
, bytecount
);
3728 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3731 ret
= write_ldt(env
, ptr
, bytecount
, 0);
3734 ret
= -TARGET_ENOSYS
;
3740 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3741 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3743 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3744 struct target_modify_ldt_ldt_s ldt_info
;
3745 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3746 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3747 int seg_not_present
, useable
, lm
;
3748 uint32_t *lp
, entry_1
, entry_2
;
3751 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3752 if (!target_ldt_info
)
3753 return -TARGET_EFAULT
;
3754 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3755 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3756 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3757 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3758 if (ldt_info
.entry_number
== -1) {
3759 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
3760 if (gdt_table
[i
] == 0) {
3761 ldt_info
.entry_number
= i
;
3762 target_ldt_info
->entry_number
= tswap32(i
);
3767 unlock_user_struct(target_ldt_info
, ptr
, 1);
3769 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
3770 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
3771 return -TARGET_EINVAL
;
3772 seg_32bit
= ldt_info
.flags
& 1;
3773 contents
= (ldt_info
.flags
>> 1) & 3;
3774 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3775 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3776 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3777 useable
= (ldt_info
.flags
>> 6) & 1;
3781 lm
= (ldt_info
.flags
>> 7) & 1;
3784 if (contents
== 3) {
3785 if (seg_not_present
== 0)
3786 return -TARGET_EINVAL
;
3789 /* NOTE: same code as Linux kernel */
3790 /* Allow LDTs to be cleared by the user. */
3791 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3792 if ((contents
== 0 &&
3793 read_exec_only
== 1 &&
3795 limit_in_pages
== 0 &&
3796 seg_not_present
== 1 &&
3804 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3805 (ldt_info
.limit
& 0x0ffff);
3806 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3807 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3808 (ldt_info
.limit
& 0xf0000) |
3809 ((read_exec_only
^ 1) << 9) |
3811 ((seg_not_present
^ 1) << 15) |
3813 (limit_in_pages
<< 23) |
3818 /* Install the new entry ... */
3820 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
3821 lp
[0] = tswap32(entry_1
);
3822 lp
[1] = tswap32(entry_2
);
3826 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3828 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3829 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3830 uint32_t base_addr
, limit
, flags
;
3831 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
3832 int seg_not_present
, useable
, lm
;
3833 uint32_t *lp
, entry_1
, entry_2
;
3835 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3836 if (!target_ldt_info
)
3837 return -TARGET_EFAULT
;
3838 idx
= tswap32(target_ldt_info
->entry_number
);
3839 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
3840 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
3841 unlock_user_struct(target_ldt_info
, ptr
, 1);
3842 return -TARGET_EINVAL
;
3844 lp
= (uint32_t *)(gdt_table
+ idx
);
3845 entry_1
= tswap32(lp
[0]);
3846 entry_2
= tswap32(lp
[1]);
3848 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
3849 contents
= (entry_2
>> 10) & 3;
3850 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
3851 seg_32bit
= (entry_2
>> 22) & 1;
3852 limit_in_pages
= (entry_2
>> 23) & 1;
3853 useable
= (entry_2
>> 20) & 1;
3857 lm
= (entry_2
>> 21) & 1;
3859 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
3860 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
3861 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
3862 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
3863 base_addr
= (entry_1
>> 16) |
3864 (entry_2
& 0xff000000) |
3865 ((entry_2
& 0xff) << 16);
3866 target_ldt_info
->base_addr
= tswapl(base_addr
);
3867 target_ldt_info
->limit
= tswap32(limit
);
3868 target_ldt_info
->flags
= tswap32(flags
);
3869 unlock_user_struct(target_ldt_info
, ptr
, 1);
3872 #endif /* TARGET_I386 && TARGET_ABI32 */
3874 #ifndef TARGET_ABI32
3875 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
3882 case TARGET_ARCH_SET_GS
:
3883 case TARGET_ARCH_SET_FS
:
3884 if (code
== TARGET_ARCH_SET_GS
)
3888 cpu_x86_load_seg(env
, idx
, 0);
3889 env
->segs
[idx
].base
= addr
;
3891 case TARGET_ARCH_GET_GS
:
3892 case TARGET_ARCH_GET_FS
:
3893 if (code
== TARGET_ARCH_GET_GS
)
3897 val
= env
->segs
[idx
].base
;
3898 if (put_user(val
, addr
, abi_ulong
))
3899 ret
= -TARGET_EFAULT
;
3902 ret
= -TARGET_EINVAL
;
3909 #endif /* defined(TARGET_I386) */
3911 #define NEW_STACK_SIZE 0x40000
3913 #if defined(CONFIG_USE_NPTL)
3915 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
3918 pthread_mutex_t mutex
;
3919 pthread_cond_t cond
;
3922 abi_ulong child_tidptr
;
3923 abi_ulong parent_tidptr
;
3927 static void *clone_func(void *arg
)
3929 new_thread_info
*info
= arg
;
3935 ts
= (TaskState
*)thread_env
->opaque
;
3936 info
->tid
= gettid();
3937 env
->host_tid
= info
->tid
;
3939 if (info
->child_tidptr
)
3940 put_user_u32(info
->tid
, info
->child_tidptr
);
3941 if (info
->parent_tidptr
)
3942 put_user_u32(info
->tid
, info
->parent_tidptr
);
3943 /* Enable signals. */
3944 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
3945 /* Signal to the parent that we're ready. */
3946 pthread_mutex_lock(&info
->mutex
);
3947 pthread_cond_broadcast(&info
->cond
);
3948 pthread_mutex_unlock(&info
->mutex
);
3949 /* Wait until the parent has finshed initializing the tls state. */
3950 pthread_mutex_lock(&clone_lock
);
3951 pthread_mutex_unlock(&clone_lock
);
3958 static int clone_func(void *arg
)
3960 CPUState
*env
= arg
;
3967 /* do_fork() Must return host values and target errnos (unlike most
3968 do_*() functions). */
3969 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
3970 abi_ulong parent_tidptr
, target_ulong newtls
,
3971 abi_ulong child_tidptr
)
3976 #if defined(CONFIG_USE_NPTL)
3977 unsigned int nptl_flags
;
3983 /* Emulate vfork() with fork() */
3984 if (flags
& CLONE_VFORK
)
3985 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
3987 if (flags
& CLONE_VM
) {
3988 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
3989 #if defined(CONFIG_USE_NPTL)
3990 new_thread_info info
;
3991 pthread_attr_t attr
;
3993 ts
= g_malloc0(sizeof(TaskState
));
3994 init_task_state(ts
);
3995 /* we create a new CPU instance. */
3996 new_env
= cpu_copy(env
);
3997 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4000 /* Init regs that differ from the parent. */
4001 cpu_clone_regs(new_env
, newsp
);
4002 new_env
->opaque
= ts
;
4003 ts
->bprm
= parent_ts
->bprm
;
4004 ts
->info
= parent_ts
->info
;
4005 #if defined(CONFIG_USE_NPTL)
4007 flags
&= ~CLONE_NPTL_FLAGS2
;
4009 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4010 ts
->child_tidptr
= child_tidptr
;
4013 if (nptl_flags
& CLONE_SETTLS
)
4014 cpu_set_tls (new_env
, newtls
);
4016 /* Grab a mutex so that thread setup appears atomic. */
4017 pthread_mutex_lock(&clone_lock
);
4019 memset(&info
, 0, sizeof(info
));
4020 pthread_mutex_init(&info
.mutex
, NULL
);
4021 pthread_mutex_lock(&info
.mutex
);
4022 pthread_cond_init(&info
.cond
, NULL
);
4024 if (nptl_flags
& CLONE_CHILD_SETTID
)
4025 info
.child_tidptr
= child_tidptr
;
4026 if (nptl_flags
& CLONE_PARENT_SETTID
)
4027 info
.parent_tidptr
= parent_tidptr
;
4029 ret
= pthread_attr_init(&attr
);
4030 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4031 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4032 /* It is not safe to deliver signals until the child has finished
4033 initializing, so temporarily block all signals. */
4034 sigfillset(&sigmask
);
4035 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4037 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4038 /* TODO: Free new CPU state if thread creation failed. */
4040 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4041 pthread_attr_destroy(&attr
);
4043 /* Wait for the child to initialize. */
4044 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4046 if (flags
& CLONE_PARENT_SETTID
)
4047 put_user_u32(ret
, parent_tidptr
);
4051 pthread_mutex_unlock(&info
.mutex
);
4052 pthread_cond_destroy(&info
.cond
);
4053 pthread_mutex_destroy(&info
.mutex
);
4054 pthread_mutex_unlock(&clone_lock
);
4056 if (flags
& CLONE_NPTL_FLAGS2
)
4058 /* This is probably going to die very quickly, but do it anyway. */
4059 new_stack
= g_malloc0 (NEW_STACK_SIZE
);
4061 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
4063 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
4067 /* if no CLONE_VM, we consider it is a fork */
4068 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4073 /* Child Process. */
4074 cpu_clone_regs(env
, newsp
);
4076 #if defined(CONFIG_USE_NPTL)
4077 /* There is a race condition here. The parent process could
4078 theoretically read the TID in the child process before the child
4079 tid is set. This would require using either ptrace
4080 (not implemented) or having *_tidptr to point at a shared memory
4081 mapping. We can't repeat the spinlock hack used above because
4082 the child process gets its own copy of the lock. */
4083 if (flags
& CLONE_CHILD_SETTID
)
4084 put_user_u32(gettid(), child_tidptr
);
4085 if (flags
& CLONE_PARENT_SETTID
)
4086 put_user_u32(gettid(), parent_tidptr
);
4087 ts
= (TaskState
*)env
->opaque
;
4088 if (flags
& CLONE_SETTLS
)
4089 cpu_set_tls (env
, newtls
);
4090 if (flags
& CLONE_CHILD_CLEARTID
)
4091 ts
->child_tidptr
= child_tidptr
;
4100 /* warning : doesn't handle linux specific flags... */
4101 static int target_to_host_fcntl_cmd(int cmd
)
4104 case TARGET_F_DUPFD
:
4105 case TARGET_F_GETFD
:
4106 case TARGET_F_SETFD
:
4107 case TARGET_F_GETFL
:
4108 case TARGET_F_SETFL
:
4110 case TARGET_F_GETLK
:
4112 case TARGET_F_SETLK
:
4114 case TARGET_F_SETLKW
:
4116 case TARGET_F_GETOWN
:
4118 case TARGET_F_SETOWN
:
4120 case TARGET_F_GETSIG
:
4122 case TARGET_F_SETSIG
:
4124 #if TARGET_ABI_BITS == 32
4125 case TARGET_F_GETLK64
:
4127 case TARGET_F_SETLK64
:
4129 case TARGET_F_SETLKW64
:
4132 case TARGET_F_SETLEASE
:
4134 case TARGET_F_GETLEASE
:
4136 #ifdef F_DUPFD_CLOEXEC
4137 case TARGET_F_DUPFD_CLOEXEC
:
4138 return F_DUPFD_CLOEXEC
;
4140 case TARGET_F_NOTIFY
:
4143 return -TARGET_EINVAL
;
4145 return -TARGET_EINVAL
;
4148 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4151 struct target_flock
*target_fl
;
4152 struct flock64 fl64
;
4153 struct target_flock64
*target_fl64
;
4155 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4157 if (host_cmd
== -TARGET_EINVAL
)
4161 case TARGET_F_GETLK
:
4162 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4163 return -TARGET_EFAULT
;
4164 fl
.l_type
= tswap16(target_fl
->l_type
);
4165 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4166 fl
.l_start
= tswapl(target_fl
->l_start
);
4167 fl
.l_len
= tswapl(target_fl
->l_len
);
4168 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4169 unlock_user_struct(target_fl
, arg
, 0);
4170 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4172 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4173 return -TARGET_EFAULT
;
4174 target_fl
->l_type
= tswap16(fl
.l_type
);
4175 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4176 target_fl
->l_start
= tswapl(fl
.l_start
);
4177 target_fl
->l_len
= tswapl(fl
.l_len
);
4178 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4179 unlock_user_struct(target_fl
, arg
, 1);
4183 case TARGET_F_SETLK
:
4184 case TARGET_F_SETLKW
:
4185 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4186 return -TARGET_EFAULT
;
4187 fl
.l_type
= tswap16(target_fl
->l_type
);
4188 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4189 fl
.l_start
= tswapl(target_fl
->l_start
);
4190 fl
.l_len
= tswapl(target_fl
->l_len
);
4191 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4192 unlock_user_struct(target_fl
, arg
, 0);
4193 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4196 case TARGET_F_GETLK64
:
4197 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4198 return -TARGET_EFAULT
;
4199 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4200 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4201 fl64
.l_start
= tswapl(target_fl64
->l_start
);
4202 fl64
.l_len
= tswapl(target_fl64
->l_len
);
4203 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4204 unlock_user_struct(target_fl64
, arg
, 0);
4205 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4207 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4208 return -TARGET_EFAULT
;
4209 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
4210 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4211 target_fl64
->l_start
= tswapl(fl64
.l_start
);
4212 target_fl64
->l_len
= tswapl(fl64
.l_len
);
4213 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4214 unlock_user_struct(target_fl64
, arg
, 1);
4217 case TARGET_F_SETLK64
:
4218 case TARGET_F_SETLKW64
:
4219 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4220 return -TARGET_EFAULT
;
4221 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4222 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4223 fl64
.l_start
= tswapl(target_fl64
->l_start
);
4224 fl64
.l_len
= tswapl(target_fl64
->l_len
);
4225 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4226 unlock_user_struct(target_fl64
, arg
, 0);
4227 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4230 case TARGET_F_GETFL
:
4231 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4233 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4237 case TARGET_F_SETFL
:
4238 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4241 case TARGET_F_SETOWN
:
4242 case TARGET_F_GETOWN
:
4243 case TARGET_F_SETSIG
:
4244 case TARGET_F_GETSIG
:
4245 case TARGET_F_SETLEASE
:
4246 case TARGET_F_GETLEASE
:
4247 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4251 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4259 static inline int high2lowuid(int uid
)
4267 static inline int high2lowgid(int gid
)
4275 static inline int low2highuid(int uid
)
4277 if ((int16_t)uid
== -1)
4283 static inline int low2highgid(int gid
)
4285 if ((int16_t)gid
== -1)
4290 static inline int tswapid(int id
)
4294 #else /* !USE_UID16 */
4295 static inline int high2lowuid(int uid
)
4299 static inline int high2lowgid(int gid
)
4303 static inline int low2highuid(int uid
)
4307 static inline int low2highgid(int gid
)
4311 static inline int tswapid(int id
)
4315 #endif /* USE_UID16 */
4317 void syscall_init(void)
4320 const argtype
*arg_type
;
4324 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4325 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4326 #include "syscall_types.h"
4328 #undef STRUCT_SPECIAL
4330 /* we patch the ioctl size if necessary. We rely on the fact that
4331 no ioctl has all the bits at '1' in the size field */
4333 while (ie
->target_cmd
!= 0) {
4334 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4335 TARGET_IOC_SIZEMASK
) {
4336 arg_type
= ie
->arg_type
;
4337 if (arg_type
[0] != TYPE_PTR
) {
4338 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4343 size
= thunk_type_size(arg_type
, 0);
4344 ie
->target_cmd
= (ie
->target_cmd
&
4345 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4346 (size
<< TARGET_IOC_SIZESHIFT
);
4349 /* Build target_to_host_errno_table[] table from
4350 * host_to_target_errno_table[]. */
4351 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
4352 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4354 /* automatic consistency check if same arch */
4355 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4356 (defined(__x86_64__) && defined(TARGET_X86_64))
4357 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4358 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4359 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4366 #if TARGET_ABI_BITS == 32
4367 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4369 #ifdef TARGET_WORDS_BIGENDIAN
4370 return ((uint64_t)word0
<< 32) | word1
;
4372 return ((uint64_t)word1
<< 32) | word0
;
4375 #else /* TARGET_ABI_BITS == 32 */
4376 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4380 #endif /* TARGET_ABI_BITS != 32 */
4382 #ifdef TARGET_NR_truncate64
4383 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4388 if (regpairs_aligned(cpu_env
)) {
4392 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4396 #ifdef TARGET_NR_ftruncate64
4397 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4402 if (regpairs_aligned(cpu_env
)) {
4406 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4410 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4411 abi_ulong target_addr
)
4413 struct target_timespec
*target_ts
;
4415 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4416 return -TARGET_EFAULT
;
4417 host_ts
->tv_sec
= tswapl(target_ts
->tv_sec
);
4418 host_ts
->tv_nsec
= tswapl(target_ts
->tv_nsec
);
4419 unlock_user_struct(target_ts
, target_addr
, 0);
4423 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4424 struct timespec
*host_ts
)
4426 struct target_timespec
*target_ts
;
4428 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4429 return -TARGET_EFAULT
;
4430 target_ts
->tv_sec
= tswapl(host_ts
->tv_sec
);
4431 target_ts
->tv_nsec
= tswapl(host_ts
->tv_nsec
);
4432 unlock_user_struct(target_ts
, target_addr
, 1);
4436 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4437 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4438 abi_ulong target_addr
,
4439 struct stat
*host_st
)
4442 if (((CPUARMState
*)cpu_env
)->eabi
) {
4443 struct target_eabi_stat64
*target_st
;
4445 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4446 return -TARGET_EFAULT
;
4447 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4448 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4449 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4450 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4451 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4453 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4454 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4455 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4456 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4457 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4458 __put_user(host_st
->st_size
, &target_st
->st_size
);
4459 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4460 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4461 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4462 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4463 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4464 unlock_user_struct(target_st
, target_addr
, 1);
4468 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4469 struct target_stat
*target_st
;
4471 struct target_stat64
*target_st
;
4474 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4475 return -TARGET_EFAULT
;
4476 memset(target_st
, 0, sizeof(*target_st
));
4477 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4478 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4479 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4480 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4482 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4483 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4484 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4485 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4486 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4487 /* XXX: better use of kernel struct */
4488 __put_user(host_st
->st_size
, &target_st
->st_size
);
4489 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4490 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4491 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4492 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4493 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4494 unlock_user_struct(target_st
, target_addr
, 1);
4501 #if defined(CONFIG_USE_NPTL)
4502 /* ??? Using host futex calls even when target atomic operations
4503 are not really atomic probably breaks things. However implementing
4504 futexes locally would make futexes shared between multiple processes
4505 tricky. However they're probably useless because guest atomic
4506 operations won't work either. */
4507 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4508 target_ulong uaddr2
, int val3
)
4510 struct timespec ts
, *pts
;
4513 /* ??? We assume FUTEX_* constants are the same on both host
4515 #ifdef FUTEX_CMD_MASK
4516 base_op
= op
& FUTEX_CMD_MASK
;
4524 target_to_host_timespec(pts
, timeout
);
4528 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4531 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4533 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4535 case FUTEX_CMP_REQUEUE
:
4537 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4538 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4539 But the prototype takes a `struct timespec *'; insert casts
4540 to satisfy the compiler. We do not need to tswap TIMEOUT
4541 since it's not compared to guest memory. */
4542 pts
= (struct timespec
*)(uintptr_t) timeout
;
4543 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4545 (base_op
== FUTEX_CMP_REQUEUE
4549 return -TARGET_ENOSYS
;
4554 /* Map host to target signal numbers for the wait family of syscalls.
4555 Assume all other status bits are the same. */
4556 static int host_to_target_waitstatus(int status
)
4558 if (WIFSIGNALED(status
)) {
4559 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4561 if (WIFSTOPPED(status
)) {
4562 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4568 int get_osversion(void)
4570 static int osversion
;
4571 struct new_utsname buf
;
4576 if (qemu_uname_release
&& *qemu_uname_release
) {
4577 s
= qemu_uname_release
;
4579 if (sys_uname(&buf
))
4584 for (i
= 0; i
< 3; i
++) {
4586 while (*s
>= '0' && *s
<= '9') {
4591 tmp
= (tmp
<< 8) + n
;
4599 /* do_syscall() should always have a single exit point at the end so
4600 that actions, such as logging of syscall results, can be performed.
4601 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4602 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
4603 abi_long arg2
, abi_long arg3
, abi_long arg4
,
4604 abi_long arg5
, abi_long arg6
, abi_long arg7
,
4613 gemu_log("syscall %d", num
);
4616 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4619 case TARGET_NR_exit
:
4620 #ifdef CONFIG_USE_NPTL
4621 /* In old applications this may be used to implement _exit(2).
4622 However in threaded applictions it is used for thread termination,
4623 and _exit_group is used for application termination.
4624 Do thread termination if we have more then one thread. */
4625 /* FIXME: This probably breaks if a signal arrives. We should probably
4626 be disabling signals. */
4627 if (first_cpu
->next_cpu
) {
4635 while (p
&& p
!= (CPUState
*)cpu_env
) {
4636 lastp
= &p
->next_cpu
;
4639 /* If we didn't find the CPU for this thread then something is
4643 /* Remove the CPU from the list. */
4644 *lastp
= p
->next_cpu
;
4646 ts
= ((CPUState
*)cpu_env
)->opaque
;
4647 if (ts
->child_tidptr
) {
4648 put_user_u32(0, ts
->child_tidptr
);
4649 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
4661 gdb_exit(cpu_env
, arg1
);
4663 ret
= 0; /* avoid warning */
4665 case TARGET_NR_read
:
4669 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
4671 ret
= get_errno(read(arg1
, p
, arg3
));
4672 unlock_user(p
, arg2
, ret
);
4675 case TARGET_NR_write
:
4676 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
4678 ret
= get_errno(write(arg1
, p
, arg3
));
4679 unlock_user(p
, arg2
, 0);
4681 case TARGET_NR_open
:
4682 if (!(p
= lock_user_string(arg1
)))
4684 ret
= get_errno(open(path(p
),
4685 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
4687 unlock_user(p
, arg1
, 0);
4689 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4690 case TARGET_NR_openat
:
4691 if (!(p
= lock_user_string(arg2
)))
4693 ret
= get_errno(sys_openat(arg1
,
4695 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
4697 unlock_user(p
, arg2
, 0);
4700 case TARGET_NR_close
:
4701 ret
= get_errno(close(arg1
));
4706 case TARGET_NR_fork
:
4707 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
4709 #ifdef TARGET_NR_waitpid
4710 case TARGET_NR_waitpid
:
4713 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
4714 if (!is_error(ret
) && arg2
4715 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
4720 #ifdef TARGET_NR_waitid
4721 case TARGET_NR_waitid
:
4725 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
4726 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
4727 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
4729 host_to_target_siginfo(p
, &info
);
4730 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
4735 #ifdef TARGET_NR_creat /* not on alpha */
4736 case TARGET_NR_creat
:
4737 if (!(p
= lock_user_string(arg1
)))
4739 ret
= get_errno(creat(p
, arg2
));
4740 unlock_user(p
, arg1
, 0);
4743 case TARGET_NR_link
:
4746 p
= lock_user_string(arg1
);
4747 p2
= lock_user_string(arg2
);
4749 ret
= -TARGET_EFAULT
;
4751 ret
= get_errno(link(p
, p2
));
4752 unlock_user(p2
, arg2
, 0);
4753 unlock_user(p
, arg1
, 0);
4756 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4757 case TARGET_NR_linkat
:
4762 p
= lock_user_string(arg2
);
4763 p2
= lock_user_string(arg4
);
4765 ret
= -TARGET_EFAULT
;
4767 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
4768 unlock_user(p
, arg2
, 0);
4769 unlock_user(p2
, arg4
, 0);
4773 case TARGET_NR_unlink
:
4774 if (!(p
= lock_user_string(arg1
)))
4776 ret
= get_errno(unlink(p
));
4777 unlock_user(p
, arg1
, 0);
4779 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4780 case TARGET_NR_unlinkat
:
4781 if (!(p
= lock_user_string(arg2
)))
4783 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
4784 unlock_user(p
, arg2
, 0);
4787 case TARGET_NR_execve
:
4789 char **argp
, **envp
;
4792 abi_ulong guest_argp
;
4793 abi_ulong guest_envp
;
4799 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
4800 if (get_user_ual(addr
, gp
))
4808 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
4809 if (get_user_ual(addr
, gp
))
4816 argp
= alloca((argc
+ 1) * sizeof(void *));
4817 envp
= alloca((envc
+ 1) * sizeof(void *));
4819 for (gp
= guest_argp
, q
= argp
; gp
;
4820 gp
+= sizeof(abi_ulong
), q
++) {
4821 if (get_user_ual(addr
, gp
))
4825 if (!(*q
= lock_user_string(addr
)))
4830 for (gp
= guest_envp
, q
= envp
; gp
;
4831 gp
+= sizeof(abi_ulong
), q
++) {
4832 if (get_user_ual(addr
, gp
))
4836 if (!(*q
= lock_user_string(addr
)))
4841 if (!(p
= lock_user_string(arg1
)))
4843 ret
= get_errno(execve(p
, argp
, envp
));
4844 unlock_user(p
, arg1
, 0);
4849 ret
= -TARGET_EFAULT
;
4852 for (gp
= guest_argp
, q
= argp
; *q
;
4853 gp
+= sizeof(abi_ulong
), q
++) {
4854 if (get_user_ual(addr
, gp
)
4857 unlock_user(*q
, addr
, 0);
4859 for (gp
= guest_envp
, q
= envp
; *q
;
4860 gp
+= sizeof(abi_ulong
), q
++) {
4861 if (get_user_ual(addr
, gp
)
4864 unlock_user(*q
, addr
, 0);
4868 case TARGET_NR_chdir
:
4869 if (!(p
= lock_user_string(arg1
)))
4871 ret
= get_errno(chdir(p
));
4872 unlock_user(p
, arg1
, 0);
4874 #ifdef TARGET_NR_time
4875 case TARGET_NR_time
:
4878 ret
= get_errno(time(&host_time
));
4881 && put_user_sal(host_time
, arg1
))
4886 case TARGET_NR_mknod
:
4887 if (!(p
= lock_user_string(arg1
)))
4889 ret
= get_errno(mknod(p
, arg2
, arg3
));
4890 unlock_user(p
, arg1
, 0);
4892 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4893 case TARGET_NR_mknodat
:
4894 if (!(p
= lock_user_string(arg2
)))
4896 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
4897 unlock_user(p
, arg2
, 0);
4900 case TARGET_NR_chmod
:
4901 if (!(p
= lock_user_string(arg1
)))
4903 ret
= get_errno(chmod(p
, arg2
));
4904 unlock_user(p
, arg1
, 0);
4906 #ifdef TARGET_NR_break
4907 case TARGET_NR_break
:
4910 #ifdef TARGET_NR_oldstat
4911 case TARGET_NR_oldstat
:
4914 case TARGET_NR_lseek
:
4915 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
4917 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4918 /* Alpha specific */
4919 case TARGET_NR_getxpid
:
4920 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
4921 ret
= get_errno(getpid());
4924 #ifdef TARGET_NR_getpid
4925 case TARGET_NR_getpid
:
4926 ret
= get_errno(getpid());
4929 case TARGET_NR_mount
:
4931 /* need to look at the data field */
4933 p
= lock_user_string(arg1
);
4934 p2
= lock_user_string(arg2
);
4935 p3
= lock_user_string(arg3
);
4936 if (!p
|| !p2
|| !p3
)
4937 ret
= -TARGET_EFAULT
;
4939 /* FIXME - arg5 should be locked, but it isn't clear how to
4940 * do that since it's not guaranteed to be a NULL-terminated
4944 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
4946 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
4948 unlock_user(p
, arg1
, 0);
4949 unlock_user(p2
, arg2
, 0);
4950 unlock_user(p3
, arg3
, 0);
4953 #ifdef TARGET_NR_umount
4954 case TARGET_NR_umount
:
4955 if (!(p
= lock_user_string(arg1
)))
4957 ret
= get_errno(umount(p
));
4958 unlock_user(p
, arg1
, 0);
4961 #ifdef TARGET_NR_stime /* not on alpha */
4962 case TARGET_NR_stime
:
4965 if (get_user_sal(host_time
, arg1
))
4967 ret
= get_errno(stime(&host_time
));
4971 case TARGET_NR_ptrace
:
4973 #ifdef TARGET_NR_alarm /* not on alpha */
4974 case TARGET_NR_alarm
:
4978 #ifdef TARGET_NR_oldfstat
4979 case TARGET_NR_oldfstat
:
4982 #ifdef TARGET_NR_pause /* not on alpha */
4983 case TARGET_NR_pause
:
4984 ret
= get_errno(pause());
4987 #ifdef TARGET_NR_utime
4988 case TARGET_NR_utime
:
4990 struct utimbuf tbuf
, *host_tbuf
;
4991 struct target_utimbuf
*target_tbuf
;
4993 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
4995 tbuf
.actime
= tswapl(target_tbuf
->actime
);
4996 tbuf
.modtime
= tswapl(target_tbuf
->modtime
);
4997 unlock_user_struct(target_tbuf
, arg2
, 0);
5002 if (!(p
= lock_user_string(arg1
)))
5004 ret
= get_errno(utime(p
, host_tbuf
));
5005 unlock_user(p
, arg1
, 0);
5009 case TARGET_NR_utimes
:
5011 struct timeval
*tvp
, tv
[2];
5013 if (copy_from_user_timeval(&tv
[0], arg2
)
5014 || copy_from_user_timeval(&tv
[1],
5015 arg2
+ sizeof(struct target_timeval
)))
5021 if (!(p
= lock_user_string(arg1
)))
5023 ret
= get_errno(utimes(p
, tvp
));
5024 unlock_user(p
, arg1
, 0);
5027 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5028 case TARGET_NR_futimesat
:
5030 struct timeval
*tvp
, tv
[2];
5032 if (copy_from_user_timeval(&tv
[0], arg3
)
5033 || copy_from_user_timeval(&tv
[1],
5034 arg3
+ sizeof(struct target_timeval
)))
5040 if (!(p
= lock_user_string(arg2
)))
5042 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
5043 unlock_user(p
, arg2
, 0);
5047 #ifdef TARGET_NR_stty
5048 case TARGET_NR_stty
:
5051 #ifdef TARGET_NR_gtty
5052 case TARGET_NR_gtty
:
5055 case TARGET_NR_access
:
5056 if (!(p
= lock_user_string(arg1
)))
5058 ret
= get_errno(access(path(p
), arg2
));
5059 unlock_user(p
, arg1
, 0);
5061 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5062 case TARGET_NR_faccessat
:
5063 if (!(p
= lock_user_string(arg2
)))
5065 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
5066 unlock_user(p
, arg2
, 0);
5069 #ifdef TARGET_NR_nice /* not on alpha */
5070 case TARGET_NR_nice
:
5071 ret
= get_errno(nice(arg1
));
5074 #ifdef TARGET_NR_ftime
5075 case TARGET_NR_ftime
:
5078 case TARGET_NR_sync
:
5082 case TARGET_NR_kill
:
5083 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
5085 case TARGET_NR_rename
:
5088 p
= lock_user_string(arg1
);
5089 p2
= lock_user_string(arg2
);
5091 ret
= -TARGET_EFAULT
;
5093 ret
= get_errno(rename(p
, p2
));
5094 unlock_user(p2
, arg2
, 0);
5095 unlock_user(p
, arg1
, 0);
5098 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5099 case TARGET_NR_renameat
:
5102 p
= lock_user_string(arg2
);
5103 p2
= lock_user_string(arg4
);
5105 ret
= -TARGET_EFAULT
;
5107 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
5108 unlock_user(p2
, arg4
, 0);
5109 unlock_user(p
, arg2
, 0);
5113 case TARGET_NR_mkdir
:
5114 if (!(p
= lock_user_string(arg1
)))
5116 ret
= get_errno(mkdir(p
, arg2
));
5117 unlock_user(p
, arg1
, 0);
5119 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5120 case TARGET_NR_mkdirat
:
5121 if (!(p
= lock_user_string(arg2
)))
5123 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
5124 unlock_user(p
, arg2
, 0);
5127 case TARGET_NR_rmdir
:
5128 if (!(p
= lock_user_string(arg1
)))
5130 ret
= get_errno(rmdir(p
));
5131 unlock_user(p
, arg1
, 0);
5134 ret
= get_errno(dup(arg1
));
5136 case TARGET_NR_pipe
:
5137 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5139 #ifdef TARGET_NR_pipe2
5140 case TARGET_NR_pipe2
:
5141 ret
= do_pipe(cpu_env
, arg1
, arg2
, 1);
5144 case TARGET_NR_times
:
5146 struct target_tms
*tmsp
;
5148 ret
= get_errno(times(&tms
));
5150 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5153 tmsp
->tms_utime
= tswapl(host_to_target_clock_t(tms
.tms_utime
));
5154 tmsp
->tms_stime
= tswapl(host_to_target_clock_t(tms
.tms_stime
));
5155 tmsp
->tms_cutime
= tswapl(host_to_target_clock_t(tms
.tms_cutime
));
5156 tmsp
->tms_cstime
= tswapl(host_to_target_clock_t(tms
.tms_cstime
));
5159 ret
= host_to_target_clock_t(ret
);
5162 #ifdef TARGET_NR_prof
5163 case TARGET_NR_prof
:
5166 #ifdef TARGET_NR_signal
5167 case TARGET_NR_signal
:
5170 case TARGET_NR_acct
:
5172 ret
= get_errno(acct(NULL
));
5174 if (!(p
= lock_user_string(arg1
)))
5176 ret
= get_errno(acct(path(p
)));
5177 unlock_user(p
, arg1
, 0);
5180 #ifdef TARGET_NR_umount2 /* not on alpha */
5181 case TARGET_NR_umount2
:
5182 if (!(p
= lock_user_string(arg1
)))
5184 ret
= get_errno(umount2(p
, arg2
));
5185 unlock_user(p
, arg1
, 0);
5188 #ifdef TARGET_NR_lock
5189 case TARGET_NR_lock
:
5192 case TARGET_NR_ioctl
:
5193 ret
= do_ioctl(arg1
, arg2
, arg3
);
5195 case TARGET_NR_fcntl
:
5196 ret
= do_fcntl(arg1
, arg2
, arg3
);
5198 #ifdef TARGET_NR_mpx
5202 case TARGET_NR_setpgid
:
5203 ret
= get_errno(setpgid(arg1
, arg2
));
5205 #ifdef TARGET_NR_ulimit
5206 case TARGET_NR_ulimit
:
5209 #ifdef TARGET_NR_oldolduname
5210 case TARGET_NR_oldolduname
:
5213 case TARGET_NR_umask
:
5214 ret
= get_errno(umask(arg1
));
5216 case TARGET_NR_chroot
:
5217 if (!(p
= lock_user_string(arg1
)))
5219 ret
= get_errno(chroot(p
));
5220 unlock_user(p
, arg1
, 0);
5222 case TARGET_NR_ustat
:
5224 case TARGET_NR_dup2
:
5225 ret
= get_errno(dup2(arg1
, arg2
));
5227 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5228 case TARGET_NR_dup3
:
5229 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
5232 #ifdef TARGET_NR_getppid /* not on alpha */
5233 case TARGET_NR_getppid
:
5234 ret
= get_errno(getppid());
5237 case TARGET_NR_getpgrp
:
5238 ret
= get_errno(getpgrp());
5240 case TARGET_NR_setsid
:
5241 ret
= get_errno(setsid());
5243 #ifdef TARGET_NR_sigaction
5244 case TARGET_NR_sigaction
:
5246 #if defined(TARGET_ALPHA)
5247 struct target_sigaction act
, oact
, *pact
= 0;
5248 struct target_old_sigaction
*old_act
;
5250 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5252 act
._sa_handler
= old_act
->_sa_handler
;
5253 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5254 act
.sa_flags
= old_act
->sa_flags
;
5255 act
.sa_restorer
= 0;
5256 unlock_user_struct(old_act
, arg2
, 0);
5259 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5260 if (!is_error(ret
) && arg3
) {
5261 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5263 old_act
->_sa_handler
= oact
._sa_handler
;
5264 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5265 old_act
->sa_flags
= oact
.sa_flags
;
5266 unlock_user_struct(old_act
, arg3
, 1);
5268 #elif defined(TARGET_MIPS)
5269 struct target_sigaction act
, oact
, *pact
, *old_act
;
5272 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5274 act
._sa_handler
= old_act
->_sa_handler
;
5275 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5276 act
.sa_flags
= old_act
->sa_flags
;
5277 unlock_user_struct(old_act
, arg2
, 0);
5283 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5285 if (!is_error(ret
) && arg3
) {
5286 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5288 old_act
->_sa_handler
= oact
._sa_handler
;
5289 old_act
->sa_flags
= oact
.sa_flags
;
5290 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5291 old_act
->sa_mask
.sig
[1] = 0;
5292 old_act
->sa_mask
.sig
[2] = 0;
5293 old_act
->sa_mask
.sig
[3] = 0;
5294 unlock_user_struct(old_act
, arg3
, 1);
5297 struct target_old_sigaction
*old_act
;
5298 struct target_sigaction act
, oact
, *pact
;
5300 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5302 act
._sa_handler
= old_act
->_sa_handler
;
5303 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5304 act
.sa_flags
= old_act
->sa_flags
;
5305 act
.sa_restorer
= old_act
->sa_restorer
;
5306 unlock_user_struct(old_act
, arg2
, 0);
5311 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5312 if (!is_error(ret
) && arg3
) {
5313 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5315 old_act
->_sa_handler
= oact
._sa_handler
;
5316 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5317 old_act
->sa_flags
= oact
.sa_flags
;
5318 old_act
->sa_restorer
= oact
.sa_restorer
;
5319 unlock_user_struct(old_act
, arg3
, 1);
5325 case TARGET_NR_rt_sigaction
:
5327 #if defined(TARGET_ALPHA)
5328 struct target_sigaction act
, oact
, *pact
= 0;
5329 struct target_rt_sigaction
*rt_act
;
5330 /* ??? arg4 == sizeof(sigset_t). */
5332 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5334 act
._sa_handler
= rt_act
->_sa_handler
;
5335 act
.sa_mask
= rt_act
->sa_mask
;
5336 act
.sa_flags
= rt_act
->sa_flags
;
5337 act
.sa_restorer
= arg5
;
5338 unlock_user_struct(rt_act
, arg2
, 0);
5341 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5342 if (!is_error(ret
) && arg3
) {
5343 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5345 rt_act
->_sa_handler
= oact
._sa_handler
;
5346 rt_act
->sa_mask
= oact
.sa_mask
;
5347 rt_act
->sa_flags
= oact
.sa_flags
;
5348 unlock_user_struct(rt_act
, arg3
, 1);
5351 struct target_sigaction
*act
;
5352 struct target_sigaction
*oact
;
5355 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5360 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5361 ret
= -TARGET_EFAULT
;
5362 goto rt_sigaction_fail
;
5366 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5369 unlock_user_struct(act
, arg2
, 0);
5371 unlock_user_struct(oact
, arg3
, 1);
5375 #ifdef TARGET_NR_sgetmask /* not on alpha */
5376 case TARGET_NR_sgetmask
:
5379 abi_ulong target_set
;
5380 sigprocmask(0, NULL
, &cur_set
);
5381 host_to_target_old_sigset(&target_set
, &cur_set
);
5386 #ifdef TARGET_NR_ssetmask /* not on alpha */
5387 case TARGET_NR_ssetmask
:
5389 sigset_t set
, oset
, cur_set
;
5390 abi_ulong target_set
= arg1
;
5391 sigprocmask(0, NULL
, &cur_set
);
5392 target_to_host_old_sigset(&set
, &target_set
);
5393 sigorset(&set
, &set
, &cur_set
);
5394 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5395 host_to_target_old_sigset(&target_set
, &oset
);
5400 #ifdef TARGET_NR_sigprocmask
5401 case TARGET_NR_sigprocmask
:
5403 #if defined(TARGET_ALPHA)
5404 sigset_t set
, oldset
;
5409 case TARGET_SIG_BLOCK
:
5412 case TARGET_SIG_UNBLOCK
:
5415 case TARGET_SIG_SETMASK
:
5419 ret
= -TARGET_EINVAL
;
5423 target_to_host_old_sigset(&set
, &mask
);
5425 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
5427 if (!is_error(ret
)) {
5428 host_to_target_old_sigset(&mask
, &oldset
);
5430 ((CPUAlphaState
*)cpu_env
)->[IR_V0
] = 0; /* force no error */
5433 sigset_t set
, oldset
, *set_ptr
;
5438 case TARGET_SIG_BLOCK
:
5441 case TARGET_SIG_UNBLOCK
:
5444 case TARGET_SIG_SETMASK
:
5448 ret
= -TARGET_EINVAL
;
5451 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5453 target_to_host_old_sigset(&set
, p
);
5454 unlock_user(p
, arg2
, 0);
5460 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5461 if (!is_error(ret
) && arg3
) {
5462 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5464 host_to_target_old_sigset(p
, &oldset
);
5465 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5471 case TARGET_NR_rt_sigprocmask
:
5474 sigset_t set
, oldset
, *set_ptr
;
5478 case TARGET_SIG_BLOCK
:
5481 case TARGET_SIG_UNBLOCK
:
5484 case TARGET_SIG_SETMASK
:
5488 ret
= -TARGET_EINVAL
;
5491 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5493 target_to_host_sigset(&set
, p
);
5494 unlock_user(p
, arg2
, 0);
5500 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5501 if (!is_error(ret
) && arg3
) {
5502 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5504 host_to_target_sigset(p
, &oldset
);
5505 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5509 #ifdef TARGET_NR_sigpending
5510 case TARGET_NR_sigpending
:
5513 ret
= get_errno(sigpending(&set
));
5514 if (!is_error(ret
)) {
5515 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5517 host_to_target_old_sigset(p
, &set
);
5518 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5523 case TARGET_NR_rt_sigpending
:
5526 ret
= get_errno(sigpending(&set
));
5527 if (!is_error(ret
)) {
5528 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5530 host_to_target_sigset(p
, &set
);
5531 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5535 #ifdef TARGET_NR_sigsuspend
5536 case TARGET_NR_sigsuspend
:
5539 #if defined(TARGET_ALPHA)
5540 abi_ulong mask
= arg1
;
5541 target_to_host_old_sigset(&set
, &mask
);
5543 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5545 target_to_host_old_sigset(&set
, p
);
5546 unlock_user(p
, arg1
, 0);
5548 ret
= get_errno(sigsuspend(&set
));
5552 case TARGET_NR_rt_sigsuspend
:
5555 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5557 target_to_host_sigset(&set
, p
);
5558 unlock_user(p
, arg1
, 0);
5559 ret
= get_errno(sigsuspend(&set
));
5562 case TARGET_NR_rt_sigtimedwait
:
5565 struct timespec uts
, *puts
;
5568 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5570 target_to_host_sigset(&set
, p
);
5571 unlock_user(p
, arg1
, 0);
5574 target_to_host_timespec(puts
, arg3
);
5578 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
5579 if (!is_error(ret
) && arg2
) {
5580 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
5582 host_to_target_siginfo(p
, &uinfo
);
5583 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
5587 case TARGET_NR_rt_sigqueueinfo
:
5590 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
5592 target_to_host_siginfo(&uinfo
, p
);
5593 unlock_user(p
, arg1
, 0);
5594 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
5597 #ifdef TARGET_NR_sigreturn
5598 case TARGET_NR_sigreturn
:
5599 /* NOTE: ret is eax, so not transcoding must be done */
5600 ret
= do_sigreturn(cpu_env
);
5603 case TARGET_NR_rt_sigreturn
:
5604 /* NOTE: ret is eax, so not transcoding must be done */
5605 ret
= do_rt_sigreturn(cpu_env
);
5607 case TARGET_NR_sethostname
:
5608 if (!(p
= lock_user_string(arg1
)))
5610 ret
= get_errno(sethostname(p
, arg2
));
5611 unlock_user(p
, arg1
, 0);
5613 case TARGET_NR_setrlimit
:
5615 int resource
= target_to_host_resource(arg1
);
5616 struct target_rlimit
*target_rlim
;
5618 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
5620 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
5621 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
5622 unlock_user_struct(target_rlim
, arg2
, 0);
5623 ret
= get_errno(setrlimit(resource
, &rlim
));
5626 case TARGET_NR_getrlimit
:
5628 int resource
= target_to_host_resource(arg1
);
5629 struct target_rlimit
*target_rlim
;
5632 ret
= get_errno(getrlimit(resource
, &rlim
));
5633 if (!is_error(ret
)) {
5634 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5636 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
5637 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
5638 unlock_user_struct(target_rlim
, arg2
, 1);
5642 case TARGET_NR_getrusage
:
5644 struct rusage rusage
;
5645 ret
= get_errno(getrusage(arg1
, &rusage
));
5646 if (!is_error(ret
)) {
5647 host_to_target_rusage(arg2
, &rusage
);
5651 case TARGET_NR_gettimeofday
:
5654 ret
= get_errno(gettimeofday(&tv
, NULL
));
5655 if (!is_error(ret
)) {
5656 if (copy_to_user_timeval(arg1
, &tv
))
5661 case TARGET_NR_settimeofday
:
5664 if (copy_from_user_timeval(&tv
, arg1
))
5666 ret
= get_errno(settimeofday(&tv
, NULL
));
5669 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5670 case TARGET_NR_select
:
5672 struct target_sel_arg_struct
*sel
;
5673 abi_ulong inp
, outp
, exp
, tvp
;
5676 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
5678 nsel
= tswapl(sel
->n
);
5679 inp
= tswapl(sel
->inp
);
5680 outp
= tswapl(sel
->outp
);
5681 exp
= tswapl(sel
->exp
);
5682 tvp
= tswapl(sel
->tvp
);
5683 unlock_user_struct(sel
, arg1
, 0);
5684 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
5688 #ifdef TARGET_NR_pselect6
5689 case TARGET_NR_pselect6
:
5691 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
5692 fd_set rfds
, wfds
, efds
;
5693 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
5694 struct timespec ts
, *ts_ptr
;
5697 * The 6th arg is actually two args smashed together,
5698 * so we cannot use the C library.
5706 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
5707 target_sigset_t
*target_sigset
;
5715 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
5719 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
5723 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
5729 * This takes a timespec, and not a timeval, so we cannot
5730 * use the do_select() helper ...
5733 if (target_to_host_timespec(&ts
, ts_addr
)) {
5741 /* Extract the two packed args for the sigset */
5744 sig
.size
= _NSIG
/ 8;
5746 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
5750 arg_sigset
= tswapl(arg7
[0]);
5751 arg_sigsize
= tswapl(arg7
[1]);
5752 unlock_user(arg7
, arg6
, 0);
5756 if (arg_sigsize
!= sizeof(*target_sigset
)) {
5757 /* Like the kernel, we enforce correct size sigsets */
5758 ret
= -TARGET_EINVAL
;
5761 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
5762 sizeof(*target_sigset
), 1);
5763 if (!target_sigset
) {
5766 target_to_host_sigset(&set
, target_sigset
);
5767 unlock_user(target_sigset
, arg_sigset
, 0);
5775 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
5778 if (!is_error(ret
)) {
5779 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
5781 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
5783 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
5786 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
5792 case TARGET_NR_symlink
:
5795 p
= lock_user_string(arg1
);
5796 p2
= lock_user_string(arg2
);
5798 ret
= -TARGET_EFAULT
;
5800 ret
= get_errno(symlink(p
, p2
));
5801 unlock_user(p2
, arg2
, 0);
5802 unlock_user(p
, arg1
, 0);
5805 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5806 case TARGET_NR_symlinkat
:
5809 p
= lock_user_string(arg1
);
5810 p2
= lock_user_string(arg3
);
5812 ret
= -TARGET_EFAULT
;
5814 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
5815 unlock_user(p2
, arg3
, 0);
5816 unlock_user(p
, arg1
, 0);
5820 #ifdef TARGET_NR_oldlstat
5821 case TARGET_NR_oldlstat
:
5824 case TARGET_NR_readlink
:
5827 p
= lock_user_string(arg1
);
5828 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
5830 ret
= -TARGET_EFAULT
;
5832 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
5833 char real
[PATH_MAX
];
5834 temp
= realpath(exec_path
,real
);
5835 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
5836 snprintf((char *)p2
, arg3
, "%s", real
);
5839 ret
= get_errno(readlink(path(p
), p2
, arg3
));
5841 unlock_user(p2
, arg2
, ret
);
5842 unlock_user(p
, arg1
, 0);
5845 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5846 case TARGET_NR_readlinkat
:
5849 p
= lock_user_string(arg2
);
5850 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
5852 ret
= -TARGET_EFAULT
;
5854 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
5855 unlock_user(p2
, arg3
, ret
);
5856 unlock_user(p
, arg2
, 0);
5860 #ifdef TARGET_NR_uselib
5861 case TARGET_NR_uselib
:
5864 #ifdef TARGET_NR_swapon
5865 case TARGET_NR_swapon
:
5866 if (!(p
= lock_user_string(arg1
)))
5868 ret
= get_errno(swapon(p
, arg2
));
5869 unlock_user(p
, arg1
, 0);
5872 case TARGET_NR_reboot
:
5874 #ifdef TARGET_NR_readdir
5875 case TARGET_NR_readdir
:
5878 #ifdef TARGET_NR_mmap
5879 case TARGET_NR_mmap
:
5880 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
5881 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
5882 || defined(TARGET_S390X)
5885 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
5886 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
5894 unlock_user(v
, arg1
, 0);
5895 ret
= get_errno(target_mmap(v1
, v2
, v3
,
5896 target_to_host_bitmask(v4
, mmap_flags_tbl
),
5900 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5901 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5907 #ifdef TARGET_NR_mmap2
5908 case TARGET_NR_mmap2
:
5910 #define MMAP_SHIFT 12
5912 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5913 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5915 arg6
<< MMAP_SHIFT
));
5918 case TARGET_NR_munmap
:
5919 ret
= get_errno(target_munmap(arg1
, arg2
));
5921 case TARGET_NR_mprotect
:
5923 TaskState
*ts
= ((CPUState
*)cpu_env
)->opaque
;
5924 /* Special hack to detect libc making the stack executable. */
5925 if ((arg3
& PROT_GROWSDOWN
)
5926 && arg1
>= ts
->info
->stack_limit
5927 && arg1
<= ts
->info
->start_stack
) {
5928 arg3
&= ~PROT_GROWSDOWN
;
5929 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
5930 arg1
= ts
->info
->stack_limit
;
5933 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
5935 #ifdef TARGET_NR_mremap
5936 case TARGET_NR_mremap
:
5937 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
5940 /* ??? msync/mlock/munlock are broken for softmmu. */
5941 #ifdef TARGET_NR_msync
5942 case TARGET_NR_msync
:
5943 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
5946 #ifdef TARGET_NR_mlock
5947 case TARGET_NR_mlock
:
5948 ret
= get_errno(mlock(g2h(arg1
), arg2
));
5951 #ifdef TARGET_NR_munlock
5952 case TARGET_NR_munlock
:
5953 ret
= get_errno(munlock(g2h(arg1
), arg2
));
5956 #ifdef TARGET_NR_mlockall
5957 case TARGET_NR_mlockall
:
5958 ret
= get_errno(mlockall(arg1
));
5961 #ifdef TARGET_NR_munlockall
5962 case TARGET_NR_munlockall
:
5963 ret
= get_errno(munlockall());
5966 case TARGET_NR_truncate
:
5967 if (!(p
= lock_user_string(arg1
)))
5969 ret
= get_errno(truncate(p
, arg2
));
5970 unlock_user(p
, arg1
, 0);
5972 case TARGET_NR_ftruncate
:
5973 ret
= get_errno(ftruncate(arg1
, arg2
));
5975 case TARGET_NR_fchmod
:
5976 ret
= get_errno(fchmod(arg1
, arg2
));
5978 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5979 case TARGET_NR_fchmodat
:
5980 if (!(p
= lock_user_string(arg2
)))
5982 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
5983 unlock_user(p
, arg2
, 0);
5986 case TARGET_NR_getpriority
:
5987 /* libc does special remapping of the return value of
5988 * sys_getpriority() so it's just easiest to call
5989 * sys_getpriority() directly rather than through libc. */
5990 ret
= get_errno(sys_getpriority(arg1
, arg2
));
5992 case TARGET_NR_setpriority
:
5993 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
5995 #ifdef TARGET_NR_profil
5996 case TARGET_NR_profil
:
5999 case TARGET_NR_statfs
:
6000 if (!(p
= lock_user_string(arg1
)))
6002 ret
= get_errno(statfs(path(p
), &stfs
));
6003 unlock_user(p
, arg1
, 0);
6005 if (!is_error(ret
)) {
6006 struct target_statfs
*target_stfs
;
6008 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
6010 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6011 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6012 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6013 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6014 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6015 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6016 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6017 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6018 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6019 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6020 unlock_user_struct(target_stfs
, arg2
, 1);
6023 case TARGET_NR_fstatfs
:
6024 ret
= get_errno(fstatfs(arg1
, &stfs
));
6025 goto convert_statfs
;
6026 #ifdef TARGET_NR_statfs64
6027 case TARGET_NR_statfs64
:
6028 if (!(p
= lock_user_string(arg1
)))
6030 ret
= get_errno(statfs(path(p
), &stfs
));
6031 unlock_user(p
, arg1
, 0);
6033 if (!is_error(ret
)) {
6034 struct target_statfs64
*target_stfs
;
6036 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
6038 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6039 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6040 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6041 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6042 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6043 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6044 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6045 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6046 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6047 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6048 unlock_user_struct(target_stfs
, arg3
, 1);
6051 case TARGET_NR_fstatfs64
:
6052 ret
= get_errno(fstatfs(arg1
, &stfs
));
6053 goto convert_statfs64
;
6055 #ifdef TARGET_NR_ioperm
6056 case TARGET_NR_ioperm
:
6059 #ifdef TARGET_NR_socketcall
6060 case TARGET_NR_socketcall
:
6061 ret
= do_socketcall(arg1
, arg2
);
6064 #ifdef TARGET_NR_accept
6065 case TARGET_NR_accept
:
6066 ret
= do_accept(arg1
, arg2
, arg3
);
6069 #ifdef TARGET_NR_bind
6070 case TARGET_NR_bind
:
6071 ret
= do_bind(arg1
, arg2
, arg3
);
6074 #ifdef TARGET_NR_connect
6075 case TARGET_NR_connect
:
6076 ret
= do_connect(arg1
, arg2
, arg3
);
6079 #ifdef TARGET_NR_getpeername
6080 case TARGET_NR_getpeername
:
6081 ret
= do_getpeername(arg1
, arg2
, arg3
);
6084 #ifdef TARGET_NR_getsockname
6085 case TARGET_NR_getsockname
:
6086 ret
= do_getsockname(arg1
, arg2
, arg3
);
6089 #ifdef TARGET_NR_getsockopt
6090 case TARGET_NR_getsockopt
:
6091 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
6094 #ifdef TARGET_NR_listen
6095 case TARGET_NR_listen
:
6096 ret
= get_errno(listen(arg1
, arg2
));
6099 #ifdef TARGET_NR_recv
6100 case TARGET_NR_recv
:
6101 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
6104 #ifdef TARGET_NR_recvfrom
6105 case TARGET_NR_recvfrom
:
6106 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6109 #ifdef TARGET_NR_recvmsg
6110 case TARGET_NR_recvmsg
:
6111 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
6114 #ifdef TARGET_NR_send
6115 case TARGET_NR_send
:
6116 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
6119 #ifdef TARGET_NR_sendmsg
6120 case TARGET_NR_sendmsg
:
6121 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
6124 #ifdef TARGET_NR_sendto
6125 case TARGET_NR_sendto
:
6126 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6129 #ifdef TARGET_NR_shutdown
6130 case TARGET_NR_shutdown
:
6131 ret
= get_errno(shutdown(arg1
, arg2
));
6134 #ifdef TARGET_NR_socket
6135 case TARGET_NR_socket
:
6136 ret
= do_socket(arg1
, arg2
, arg3
);
6139 #ifdef TARGET_NR_socketpair
6140 case TARGET_NR_socketpair
:
6141 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
6144 #ifdef TARGET_NR_setsockopt
6145 case TARGET_NR_setsockopt
:
6146 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
6150 case TARGET_NR_syslog
:
6151 if (!(p
= lock_user_string(arg2
)))
6153 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
6154 unlock_user(p
, arg2
, 0);
6157 case TARGET_NR_setitimer
:
6159 struct itimerval value
, ovalue
, *pvalue
;
6163 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
6164 || copy_from_user_timeval(&pvalue
->it_value
,
6165 arg2
+ sizeof(struct target_timeval
)))
6170 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
6171 if (!is_error(ret
) && arg3
) {
6172 if (copy_to_user_timeval(arg3
,
6173 &ovalue
.it_interval
)
6174 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
6180 case TARGET_NR_getitimer
:
6182 struct itimerval value
;
6184 ret
= get_errno(getitimer(arg1
, &value
));
6185 if (!is_error(ret
) && arg2
) {
6186 if (copy_to_user_timeval(arg2
,
6188 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
6194 case TARGET_NR_stat
:
6195 if (!(p
= lock_user_string(arg1
)))
6197 ret
= get_errno(stat(path(p
), &st
));
6198 unlock_user(p
, arg1
, 0);
6200 case TARGET_NR_lstat
:
6201 if (!(p
= lock_user_string(arg1
)))
6203 ret
= get_errno(lstat(path(p
), &st
));
6204 unlock_user(p
, arg1
, 0);
6206 case TARGET_NR_fstat
:
6208 ret
= get_errno(fstat(arg1
, &st
));
6210 if (!is_error(ret
)) {
6211 struct target_stat
*target_st
;
6213 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
6215 memset(target_st
, 0, sizeof(*target_st
));
6216 __put_user(st
.st_dev
, &target_st
->st_dev
);
6217 __put_user(st
.st_ino
, &target_st
->st_ino
);
6218 __put_user(st
.st_mode
, &target_st
->st_mode
);
6219 __put_user(st
.st_uid
, &target_st
->st_uid
);
6220 __put_user(st
.st_gid
, &target_st
->st_gid
);
6221 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
6222 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
6223 __put_user(st
.st_size
, &target_st
->st_size
);
6224 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
6225 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
6226 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
6227 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
6228 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
6229 unlock_user_struct(target_st
, arg2
, 1);
6233 #ifdef TARGET_NR_olduname
6234 case TARGET_NR_olduname
:
6237 #ifdef TARGET_NR_iopl
6238 case TARGET_NR_iopl
:
6241 case TARGET_NR_vhangup
:
6242 ret
= get_errno(vhangup());
6244 #ifdef TARGET_NR_idle
6245 case TARGET_NR_idle
:
6248 #ifdef TARGET_NR_syscall
6249 case TARGET_NR_syscall
:
6250 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
6251 arg6
, arg7
, arg8
, 0);
6254 case TARGET_NR_wait4
:
6257 abi_long status_ptr
= arg2
;
6258 struct rusage rusage
, *rusage_ptr
;
6259 abi_ulong target_rusage
= arg4
;
6261 rusage_ptr
= &rusage
;
6264 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
6265 if (!is_error(ret
)) {
6267 status
= host_to_target_waitstatus(status
);
6268 if (put_user_s32(status
, status_ptr
))
6272 host_to_target_rusage(target_rusage
, &rusage
);
6276 #ifdef TARGET_NR_swapoff
6277 case TARGET_NR_swapoff
:
6278 if (!(p
= lock_user_string(arg1
)))
6280 ret
= get_errno(swapoff(p
));
6281 unlock_user(p
, arg1
, 0);
6284 case TARGET_NR_sysinfo
:
6286 struct target_sysinfo
*target_value
;
6287 struct sysinfo value
;
6288 ret
= get_errno(sysinfo(&value
));
6289 if (!is_error(ret
) && arg1
)
6291 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
6293 __put_user(value
.uptime
, &target_value
->uptime
);
6294 __put_user(value
.loads
[0], &target_value
->loads
[0]);
6295 __put_user(value
.loads
[1], &target_value
->loads
[1]);
6296 __put_user(value
.loads
[2], &target_value
->loads
[2]);
6297 __put_user(value
.totalram
, &target_value
->totalram
);
6298 __put_user(value
.freeram
, &target_value
->freeram
);
6299 __put_user(value
.sharedram
, &target_value
->sharedram
);
6300 __put_user(value
.bufferram
, &target_value
->bufferram
);
6301 __put_user(value
.totalswap
, &target_value
->totalswap
);
6302 __put_user(value
.freeswap
, &target_value
->freeswap
);
6303 __put_user(value
.procs
, &target_value
->procs
);
6304 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
6305 __put_user(value
.freehigh
, &target_value
->freehigh
);
6306 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
6307 unlock_user_struct(target_value
, arg1
, 1);
6311 #ifdef TARGET_NR_ipc
6313 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6316 #ifdef TARGET_NR_semget
6317 case TARGET_NR_semget
:
6318 ret
= get_errno(semget(arg1
, arg2
, arg3
));
6321 #ifdef TARGET_NR_semop
6322 case TARGET_NR_semop
:
6323 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
6326 #ifdef TARGET_NR_semctl
6327 case TARGET_NR_semctl
:
6328 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
6331 #ifdef TARGET_NR_msgctl
6332 case TARGET_NR_msgctl
:
6333 ret
= do_msgctl(arg1
, arg2
, arg3
);
6336 #ifdef TARGET_NR_msgget
6337 case TARGET_NR_msgget
:
6338 ret
= get_errno(msgget(arg1
, arg2
));
6341 #ifdef TARGET_NR_msgrcv
6342 case TARGET_NR_msgrcv
:
6343 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
6346 #ifdef TARGET_NR_msgsnd
6347 case TARGET_NR_msgsnd
:
6348 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
6351 #ifdef TARGET_NR_shmget
6352 case TARGET_NR_shmget
:
6353 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
6356 #ifdef TARGET_NR_shmctl
6357 case TARGET_NR_shmctl
:
6358 ret
= do_shmctl(arg1
, arg2
, arg3
);
6361 #ifdef TARGET_NR_shmat
6362 case TARGET_NR_shmat
:
6363 ret
= do_shmat(arg1
, arg2
, arg3
);
6366 #ifdef TARGET_NR_shmdt
6367 case TARGET_NR_shmdt
:
6368 ret
= do_shmdt(arg1
);
6371 case TARGET_NR_fsync
:
6372 ret
= get_errno(fsync(arg1
));
6374 case TARGET_NR_clone
:
6375 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6376 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
6377 #elif defined(TARGET_CRIS)
6378 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
6379 #elif defined(TARGET_S390X)
6380 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
6382 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
6385 #ifdef __NR_exit_group
6386 /* new thread calls */
6387 case TARGET_NR_exit_group
:
6391 gdb_exit(cpu_env
, arg1
);
6392 ret
= get_errno(exit_group(arg1
));
6395 case TARGET_NR_setdomainname
:
6396 if (!(p
= lock_user_string(arg1
)))
6398 ret
= get_errno(setdomainname(p
, arg2
));
6399 unlock_user(p
, arg1
, 0);
6401 case TARGET_NR_uname
:
6402 /* no need to transcode because we use the linux syscall */
6404 struct new_utsname
* buf
;
6406 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
6408 ret
= get_errno(sys_uname(buf
));
6409 if (!is_error(ret
)) {
6410 /* Overrite the native machine name with whatever is being
6412 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
6413 /* Allow the user to override the reported release. */
6414 if (qemu_uname_release
&& *qemu_uname_release
)
6415 strcpy (buf
->release
, qemu_uname_release
);
6417 unlock_user_struct(buf
, arg1
, 1);
6421 case TARGET_NR_modify_ldt
:
6422 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
6424 #if !defined(TARGET_X86_64)
6425 case TARGET_NR_vm86old
:
6427 case TARGET_NR_vm86
:
6428 ret
= do_vm86(cpu_env
, arg1
, arg2
);
6432 case TARGET_NR_adjtimex
:
6434 #ifdef TARGET_NR_create_module
6435 case TARGET_NR_create_module
:
6437 case TARGET_NR_init_module
:
6438 case TARGET_NR_delete_module
:
6439 #ifdef TARGET_NR_get_kernel_syms
6440 case TARGET_NR_get_kernel_syms
:
6443 case TARGET_NR_quotactl
:
6445 case TARGET_NR_getpgid
:
6446 ret
= get_errno(getpgid(arg1
));
6448 case TARGET_NR_fchdir
:
6449 ret
= get_errno(fchdir(arg1
));
6451 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6452 case TARGET_NR_bdflush
:
6455 #ifdef TARGET_NR_sysfs
6456 case TARGET_NR_sysfs
:
6459 case TARGET_NR_personality
:
6460 ret
= get_errno(personality(arg1
));
6462 #ifdef TARGET_NR_afs_syscall
6463 case TARGET_NR_afs_syscall
:
6466 #ifdef TARGET_NR__llseek /* Not on alpha */
6467 case TARGET_NR__llseek
:
6470 #if !defined(__NR_llseek)
6471 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
6473 ret
= get_errno(res
);
6478 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
6480 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
6486 case TARGET_NR_getdents
:
6487 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6489 struct target_dirent
*target_dirp
;
6490 struct linux_dirent
*dirp
;
6491 abi_long count
= arg3
;
6493 dirp
= malloc(count
);
6495 ret
= -TARGET_ENOMEM
;
6499 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6500 if (!is_error(ret
)) {
6501 struct linux_dirent
*de
;
6502 struct target_dirent
*tde
;
6504 int reclen
, treclen
;
6505 int count1
, tnamelen
;
6509 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6513 reclen
= de
->d_reclen
;
6514 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
6515 tde
->d_reclen
= tswap16(treclen
);
6516 tde
->d_ino
= tswapl(de
->d_ino
);
6517 tde
->d_off
= tswapl(de
->d_off
);
6518 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
6521 /* XXX: may not be correct */
6522 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
6523 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6525 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
6529 unlock_user(target_dirp
, arg2
, ret
);
6535 struct linux_dirent
*dirp
;
6536 abi_long count
= arg3
;
6538 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6540 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6541 if (!is_error(ret
)) {
6542 struct linux_dirent
*de
;
6547 reclen
= de
->d_reclen
;
6550 de
->d_reclen
= tswap16(reclen
);
6551 tswapls(&de
->d_ino
);
6552 tswapls(&de
->d_off
);
6553 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6557 unlock_user(dirp
, arg2
, ret
);
6561 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6562 case TARGET_NR_getdents64
:
6564 struct linux_dirent64
*dirp
;
6565 abi_long count
= arg3
;
6566 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6568 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
6569 if (!is_error(ret
)) {
6570 struct linux_dirent64
*de
;
6575 reclen
= de
->d_reclen
;
6578 de
->d_reclen
= tswap16(reclen
);
6579 tswap64s((uint64_t *)&de
->d_ino
);
6580 tswap64s((uint64_t *)&de
->d_off
);
6581 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
6585 unlock_user(dirp
, arg2
, ret
);
6588 #endif /* TARGET_NR_getdents64 */
6589 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6591 case TARGET_NR_select
:
6593 case TARGET_NR__newselect
:
6595 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6598 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6599 # ifdef TARGET_NR_poll
6600 case TARGET_NR_poll
:
6602 # ifdef TARGET_NR_ppoll
6603 case TARGET_NR_ppoll
:
6606 struct target_pollfd
*target_pfd
;
6607 unsigned int nfds
= arg2
;
6612 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
6616 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
6617 for(i
= 0; i
< nfds
; i
++) {
6618 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
6619 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
6622 # ifdef TARGET_NR_ppoll
6623 if (num
== TARGET_NR_ppoll
) {
6624 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
6625 target_sigset_t
*target_set
;
6626 sigset_t _set
, *set
= &_set
;
6629 if (target_to_host_timespec(timeout_ts
, arg3
)) {
6630 unlock_user(target_pfd
, arg1
, 0);
6638 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
6640 unlock_user(target_pfd
, arg1
, 0);
6643 target_to_host_sigset(set
, target_set
);
6648 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
6650 if (!is_error(ret
) && arg3
) {
6651 host_to_target_timespec(arg3
, timeout_ts
);
6654 unlock_user(target_set
, arg4
, 0);
6658 ret
= get_errno(poll(pfd
, nfds
, timeout
));
6660 if (!is_error(ret
)) {
6661 for(i
= 0; i
< nfds
; i
++) {
6662 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
6665 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
6669 case TARGET_NR_flock
:
6670 /* NOTE: the flock constant seems to be the same for every
6672 ret
= get_errno(flock(arg1
, arg2
));
6674 case TARGET_NR_readv
:
6679 vec
= alloca(count
* sizeof(struct iovec
));
6680 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
6682 ret
= get_errno(readv(arg1
, vec
, count
));
6683 unlock_iovec(vec
, arg2
, count
, 1);
6686 case TARGET_NR_writev
:
6691 vec
= alloca(count
* sizeof(struct iovec
));
6692 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
6694 ret
= get_errno(writev(arg1
, vec
, count
));
6695 unlock_iovec(vec
, arg2
, count
, 0);
6698 case TARGET_NR_getsid
:
6699 ret
= get_errno(getsid(arg1
));
6701 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6702 case TARGET_NR_fdatasync
:
6703 ret
= get_errno(fdatasync(arg1
));
6706 case TARGET_NR__sysctl
:
6707 /* We don't implement this, but ENOTDIR is always a safe
6709 ret
= -TARGET_ENOTDIR
;
6711 case TARGET_NR_sched_getaffinity
:
6713 unsigned int mask_size
;
6714 unsigned long *mask
;
6717 * sched_getaffinity needs multiples of ulong, so need to take
6718 * care of mismatches between target ulong and host ulong sizes.
6720 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6721 ret
= -TARGET_EINVAL
;
6724 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6726 mask
= alloca(mask_size
);
6727 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
6729 if (!is_error(ret
)) {
6730 if (copy_to_user(arg3
, mask
, ret
)) {
6736 case TARGET_NR_sched_setaffinity
:
6738 unsigned int mask_size
;
6739 unsigned long *mask
;
6742 * sched_setaffinity needs multiples of ulong, so need to take
6743 * care of mismatches between target ulong and host ulong sizes.
6745 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6746 ret
= -TARGET_EINVAL
;
6749 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6751 mask
= alloca(mask_size
);
6752 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
6755 memcpy(mask
, p
, arg2
);
6756 unlock_user_struct(p
, arg2
, 0);
6758 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
6761 case TARGET_NR_sched_setparam
:
6763 struct sched_param
*target_schp
;
6764 struct sched_param schp
;
6766 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
6768 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6769 unlock_user_struct(target_schp
, arg2
, 0);
6770 ret
= get_errno(sched_setparam(arg1
, &schp
));
6773 case TARGET_NR_sched_getparam
:
6775 struct sched_param
*target_schp
;
6776 struct sched_param schp
;
6777 ret
= get_errno(sched_getparam(arg1
, &schp
));
6778 if (!is_error(ret
)) {
6779 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
6781 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
6782 unlock_user_struct(target_schp
, arg2
, 1);
6786 case TARGET_NR_sched_setscheduler
:
6788 struct sched_param
*target_schp
;
6789 struct sched_param schp
;
6790 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
6792 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6793 unlock_user_struct(target_schp
, arg3
, 0);
6794 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
6797 case TARGET_NR_sched_getscheduler
:
6798 ret
= get_errno(sched_getscheduler(arg1
));
6800 case TARGET_NR_sched_yield
:
6801 ret
= get_errno(sched_yield());
6803 case TARGET_NR_sched_get_priority_max
:
6804 ret
= get_errno(sched_get_priority_max(arg1
));
6806 case TARGET_NR_sched_get_priority_min
:
6807 ret
= get_errno(sched_get_priority_min(arg1
));
6809 case TARGET_NR_sched_rr_get_interval
:
6812 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
6813 if (!is_error(ret
)) {
6814 host_to_target_timespec(arg2
, &ts
);
6818 case TARGET_NR_nanosleep
:
6820 struct timespec req
, rem
;
6821 target_to_host_timespec(&req
, arg1
);
6822 ret
= get_errno(nanosleep(&req
, &rem
));
6823 if (is_error(ret
) && arg2
) {
6824 host_to_target_timespec(arg2
, &rem
);
6828 #ifdef TARGET_NR_query_module
6829 case TARGET_NR_query_module
:
6832 #ifdef TARGET_NR_nfsservctl
6833 case TARGET_NR_nfsservctl
:
6836 case TARGET_NR_prctl
:
6839 case PR_GET_PDEATHSIG
:
6842 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
6843 if (!is_error(ret
) && arg2
6844 && put_user_ual(deathsig
, arg2
))
6849 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
6853 #ifdef TARGET_NR_arch_prctl
6854 case TARGET_NR_arch_prctl
:
6855 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6856 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
6862 #ifdef TARGET_NR_pread
6863 case TARGET_NR_pread
:
6864 if (regpairs_aligned(cpu_env
))
6866 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6868 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
6869 unlock_user(p
, arg2
, ret
);
6871 case TARGET_NR_pwrite
:
6872 if (regpairs_aligned(cpu_env
))
6874 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6876 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
6877 unlock_user(p
, arg2
, 0);
6880 #ifdef TARGET_NR_pread64
6881 case TARGET_NR_pread64
:
6882 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6884 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6885 unlock_user(p
, arg2
, ret
);
6887 case TARGET_NR_pwrite64
:
6888 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6890 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6891 unlock_user(p
, arg2
, 0);
6894 case TARGET_NR_getcwd
:
6895 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
6897 ret
= get_errno(sys_getcwd1(p
, arg2
));
6898 unlock_user(p
, arg1
, ret
);
6900 case TARGET_NR_capget
:
6902 case TARGET_NR_capset
:
6904 case TARGET_NR_sigaltstack
:
6905 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6906 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6907 defined(TARGET_M68K) || defined(TARGET_S390X)
6908 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
6913 case TARGET_NR_sendfile
:
6915 #ifdef TARGET_NR_getpmsg
6916 case TARGET_NR_getpmsg
:
6919 #ifdef TARGET_NR_putpmsg
6920 case TARGET_NR_putpmsg
:
6923 #ifdef TARGET_NR_vfork
6924 case TARGET_NR_vfork
:
6925 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
6929 #ifdef TARGET_NR_ugetrlimit
6930 case TARGET_NR_ugetrlimit
:
6933 int resource
= target_to_host_resource(arg1
);
6934 ret
= get_errno(getrlimit(resource
, &rlim
));
6935 if (!is_error(ret
)) {
6936 struct target_rlimit
*target_rlim
;
6937 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6939 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6940 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6941 unlock_user_struct(target_rlim
, arg2
, 1);
6946 #ifdef TARGET_NR_truncate64
6947 case TARGET_NR_truncate64
:
6948 if (!(p
= lock_user_string(arg1
)))
6950 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
6951 unlock_user(p
, arg1
, 0);
6954 #ifdef TARGET_NR_ftruncate64
6955 case TARGET_NR_ftruncate64
:
6956 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
6959 #ifdef TARGET_NR_stat64
6960 case TARGET_NR_stat64
:
6961 if (!(p
= lock_user_string(arg1
)))
6963 ret
= get_errno(stat(path(p
), &st
));
6964 unlock_user(p
, arg1
, 0);
6966 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6969 #ifdef TARGET_NR_lstat64
6970 case TARGET_NR_lstat64
:
6971 if (!(p
= lock_user_string(arg1
)))
6973 ret
= get_errno(lstat(path(p
), &st
));
6974 unlock_user(p
, arg1
, 0);
6976 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6979 #ifdef TARGET_NR_fstat64
6980 case TARGET_NR_fstat64
:
6981 ret
= get_errno(fstat(arg1
, &st
));
6983 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6986 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6987 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6988 #ifdef TARGET_NR_fstatat64
6989 case TARGET_NR_fstatat64
:
6991 #ifdef TARGET_NR_newfstatat
6992 case TARGET_NR_newfstatat
:
6994 if (!(p
= lock_user_string(arg2
)))
6996 #ifdef __NR_fstatat64
6997 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
6999 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
7002 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
7005 case TARGET_NR_lchown
:
7006 if (!(p
= lock_user_string(arg1
)))
7008 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7009 unlock_user(p
, arg1
, 0);
7011 #ifdef TARGET_NR_getuid
7012 case TARGET_NR_getuid
:
7013 ret
= get_errno(high2lowuid(getuid()));
7016 #ifdef TARGET_NR_getgid
7017 case TARGET_NR_getgid
:
7018 ret
= get_errno(high2lowgid(getgid()));
7021 #ifdef TARGET_NR_geteuid
7022 case TARGET_NR_geteuid
:
7023 ret
= get_errno(high2lowuid(geteuid()));
7026 #ifdef TARGET_NR_getegid
7027 case TARGET_NR_getegid
:
7028 ret
= get_errno(high2lowgid(getegid()));
7031 case TARGET_NR_setreuid
:
7032 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
7034 case TARGET_NR_setregid
:
7035 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
7037 case TARGET_NR_getgroups
:
7039 int gidsetsize
= arg1
;
7040 target_id
*target_grouplist
;
7044 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7045 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7046 if (gidsetsize
== 0)
7048 if (!is_error(ret
)) {
7049 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
7050 if (!target_grouplist
)
7052 for(i
= 0;i
< ret
; i
++)
7053 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
7054 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
7058 case TARGET_NR_setgroups
:
7060 int gidsetsize
= arg1
;
7061 target_id
*target_grouplist
;
7065 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7066 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
7067 if (!target_grouplist
) {
7068 ret
= -TARGET_EFAULT
;
7071 for(i
= 0;i
< gidsetsize
; i
++)
7072 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
7073 unlock_user(target_grouplist
, arg2
, 0);
7074 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7077 case TARGET_NR_fchown
:
7078 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
7080 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7081 case TARGET_NR_fchownat
:
7082 if (!(p
= lock_user_string(arg2
)))
7084 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
7085 unlock_user(p
, arg2
, 0);
7088 #ifdef TARGET_NR_setresuid
7089 case TARGET_NR_setresuid
:
7090 ret
= get_errno(setresuid(low2highuid(arg1
),
7092 low2highuid(arg3
)));
7095 #ifdef TARGET_NR_getresuid
7096 case TARGET_NR_getresuid
:
7098 uid_t ruid
, euid
, suid
;
7099 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7100 if (!is_error(ret
)) {
7101 if (put_user_u16(high2lowuid(ruid
), arg1
)
7102 || put_user_u16(high2lowuid(euid
), arg2
)
7103 || put_user_u16(high2lowuid(suid
), arg3
))
7109 #ifdef TARGET_NR_getresgid
7110 case TARGET_NR_setresgid
:
7111 ret
= get_errno(setresgid(low2highgid(arg1
),
7113 low2highgid(arg3
)));
7116 #ifdef TARGET_NR_getresgid
7117 case TARGET_NR_getresgid
:
7119 gid_t rgid
, egid
, sgid
;
7120 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7121 if (!is_error(ret
)) {
7122 if (put_user_u16(high2lowgid(rgid
), arg1
)
7123 || put_user_u16(high2lowgid(egid
), arg2
)
7124 || put_user_u16(high2lowgid(sgid
), arg3
))
7130 case TARGET_NR_chown
:
7131 if (!(p
= lock_user_string(arg1
)))
7133 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7134 unlock_user(p
, arg1
, 0);
7136 case TARGET_NR_setuid
:
7137 ret
= get_errno(setuid(low2highuid(arg1
)));
7139 case TARGET_NR_setgid
:
7140 ret
= get_errno(setgid(low2highgid(arg1
)));
7142 case TARGET_NR_setfsuid
:
7143 ret
= get_errno(setfsuid(arg1
));
7145 case TARGET_NR_setfsgid
:
7146 ret
= get_errno(setfsgid(arg1
));
7149 #ifdef TARGET_NR_lchown32
7150 case TARGET_NR_lchown32
:
7151 if (!(p
= lock_user_string(arg1
)))
7153 ret
= get_errno(lchown(p
, arg2
, arg3
));
7154 unlock_user(p
, arg1
, 0);
7157 #ifdef TARGET_NR_getuid32
7158 case TARGET_NR_getuid32
:
7159 ret
= get_errno(getuid());
7163 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7164 /* Alpha specific */
7165 case TARGET_NR_getxuid
:
7169 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
7171 ret
= get_errno(getuid());
7174 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7175 /* Alpha specific */
7176 case TARGET_NR_getxgid
:
7180 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
7182 ret
= get_errno(getgid());
7185 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7186 /* Alpha specific */
7187 case TARGET_NR_osf_getsysinfo
:
7188 ret
= -TARGET_EOPNOTSUPP
;
7190 case TARGET_GSI_IEEE_FP_CONTROL
:
7192 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7194 /* Copied from linux ieee_fpcr_to_swcr. */
7195 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
7196 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
7197 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
7198 | SWCR_TRAP_ENABLE_DZE
7199 | SWCR_TRAP_ENABLE_OVF
);
7200 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
7201 | SWCR_TRAP_ENABLE_INE
);
7202 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
7203 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
7205 if (put_user_u64 (swcr
, arg2
))
7211 /* case GSI_IEEE_STATE_AT_SIGNAL:
7212 -- Not implemented in linux kernel.
7214 -- Retrieves current unaligned access state; not much used.
7216 -- Retrieves implver information; surely not used.
7218 -- Grabs a copy of the HWRPB; surely not used.
7223 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7224 /* Alpha specific */
7225 case TARGET_NR_osf_setsysinfo
:
7226 ret
= -TARGET_EOPNOTSUPP
;
7228 case TARGET_SSI_IEEE_FP_CONTROL
:
7229 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
7231 uint64_t swcr
, fpcr
, orig_fpcr
;
7233 if (get_user_u64 (swcr
, arg2
))
7235 orig_fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7236 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
7238 /* Copied from linux ieee_swcr_to_fpcr. */
7239 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
7240 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
7241 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
7242 | SWCR_TRAP_ENABLE_DZE
7243 | SWCR_TRAP_ENABLE_OVF
)) << 48;
7244 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
7245 | SWCR_TRAP_ENABLE_INE
)) << 57;
7246 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
7247 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
7249 cpu_alpha_store_fpcr (cpu_env
, fpcr
);
7252 if (arg1
== TARGET_SSI_IEEE_RAISE_EXCEPTION
) {
7253 /* Old exceptions are not signaled. */
7254 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
7256 /* If any exceptions set by this call, and are unmasked,
7263 /* case SSI_NVPAIRS:
7264 -- Used with SSIN_UACPROC to enable unaligned accesses.
7265 case SSI_IEEE_STATE_AT_SIGNAL:
7266 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7267 -- Not implemented in linux kernel
7272 #ifdef TARGET_NR_osf_sigprocmask
7273 /* Alpha specific. */
7274 case TARGET_NR_osf_sigprocmask
:
7278 sigset_t set
, oldset
;
7281 case TARGET_SIG_BLOCK
:
7284 case TARGET_SIG_UNBLOCK
:
7287 case TARGET_SIG_SETMASK
:
7291 ret
= -TARGET_EINVAL
;
7295 target_to_host_old_sigset(&set
, &mask
);
7296 sigprocmask(how
, &set
, &oldset
);
7297 host_to_target_old_sigset(&mask
, &oldset
);
7303 #ifdef TARGET_NR_getgid32
7304 case TARGET_NR_getgid32
:
7305 ret
= get_errno(getgid());
7308 #ifdef TARGET_NR_geteuid32
7309 case TARGET_NR_geteuid32
:
7310 ret
= get_errno(geteuid());
7313 #ifdef TARGET_NR_getegid32
7314 case TARGET_NR_getegid32
:
7315 ret
= get_errno(getegid());
7318 #ifdef TARGET_NR_setreuid32
7319 case TARGET_NR_setreuid32
:
7320 ret
= get_errno(setreuid(arg1
, arg2
));
7323 #ifdef TARGET_NR_setregid32
7324 case TARGET_NR_setregid32
:
7325 ret
= get_errno(setregid(arg1
, arg2
));
7328 #ifdef TARGET_NR_getgroups32
7329 case TARGET_NR_getgroups32
:
7331 int gidsetsize
= arg1
;
7332 uint32_t *target_grouplist
;
7336 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7337 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7338 if (gidsetsize
== 0)
7340 if (!is_error(ret
)) {
7341 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
7342 if (!target_grouplist
) {
7343 ret
= -TARGET_EFAULT
;
7346 for(i
= 0;i
< ret
; i
++)
7347 target_grouplist
[i
] = tswap32(grouplist
[i
]);
7348 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
7353 #ifdef TARGET_NR_setgroups32
7354 case TARGET_NR_setgroups32
:
7356 int gidsetsize
= arg1
;
7357 uint32_t *target_grouplist
;
7361 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7362 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
7363 if (!target_grouplist
) {
7364 ret
= -TARGET_EFAULT
;
7367 for(i
= 0;i
< gidsetsize
; i
++)
7368 grouplist
[i
] = tswap32(target_grouplist
[i
]);
7369 unlock_user(target_grouplist
, arg2
, 0);
7370 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7374 #ifdef TARGET_NR_fchown32
7375 case TARGET_NR_fchown32
:
7376 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
7379 #ifdef TARGET_NR_setresuid32
7380 case TARGET_NR_setresuid32
:
7381 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
7384 #ifdef TARGET_NR_getresuid32
7385 case TARGET_NR_getresuid32
:
7387 uid_t ruid
, euid
, suid
;
7388 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7389 if (!is_error(ret
)) {
7390 if (put_user_u32(ruid
, arg1
)
7391 || put_user_u32(euid
, arg2
)
7392 || put_user_u32(suid
, arg3
))
7398 #ifdef TARGET_NR_setresgid32
7399 case TARGET_NR_setresgid32
:
7400 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
7403 #ifdef TARGET_NR_getresgid32
7404 case TARGET_NR_getresgid32
:
7406 gid_t rgid
, egid
, sgid
;
7407 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7408 if (!is_error(ret
)) {
7409 if (put_user_u32(rgid
, arg1
)
7410 || put_user_u32(egid
, arg2
)
7411 || put_user_u32(sgid
, arg3
))
7417 #ifdef TARGET_NR_chown32
7418 case TARGET_NR_chown32
:
7419 if (!(p
= lock_user_string(arg1
)))
7421 ret
= get_errno(chown(p
, arg2
, arg3
));
7422 unlock_user(p
, arg1
, 0);
7425 #ifdef TARGET_NR_setuid32
7426 case TARGET_NR_setuid32
:
7427 ret
= get_errno(setuid(arg1
));
7430 #ifdef TARGET_NR_setgid32
7431 case TARGET_NR_setgid32
:
7432 ret
= get_errno(setgid(arg1
));
7435 #ifdef TARGET_NR_setfsuid32
7436 case TARGET_NR_setfsuid32
:
7437 ret
= get_errno(setfsuid(arg1
));
7440 #ifdef TARGET_NR_setfsgid32
7441 case TARGET_NR_setfsgid32
:
7442 ret
= get_errno(setfsgid(arg1
));
7446 case TARGET_NR_pivot_root
:
7448 #ifdef TARGET_NR_mincore
7449 case TARGET_NR_mincore
:
7452 ret
= -TARGET_EFAULT
;
7453 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
7455 if (!(p
= lock_user_string(arg3
)))
7457 ret
= get_errno(mincore(a
, arg2
, p
));
7458 unlock_user(p
, arg3
, ret
);
7460 unlock_user(a
, arg1
, 0);
7464 #ifdef TARGET_NR_arm_fadvise64_64
7465 case TARGET_NR_arm_fadvise64_64
:
7468 * arm_fadvise64_64 looks like fadvise64_64 but
7469 * with different argument order
7477 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7478 #ifdef TARGET_NR_fadvise64_64
7479 case TARGET_NR_fadvise64_64
:
7481 #ifdef TARGET_NR_fadvise64
7482 case TARGET_NR_fadvise64
:
7486 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
7487 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
7488 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
7489 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
7493 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
7496 #ifdef TARGET_NR_madvise
7497 case TARGET_NR_madvise
:
7498 /* A straight passthrough may not be safe because qemu sometimes
7499 turns private flie-backed mappings into anonymous mappings.
7500 This will break MADV_DONTNEED.
7501 This is a hint, so ignoring and returning success is ok. */
7505 #if TARGET_ABI_BITS == 32
7506 case TARGET_NR_fcntl64
:
7510 struct target_flock64
*target_fl
;
7512 struct target_eabi_flock64
*target_efl
;
7515 cmd
= target_to_host_fcntl_cmd(arg2
);
7516 if (cmd
== -TARGET_EINVAL
)
7520 case TARGET_F_GETLK64
:
7522 if (((CPUARMState
*)cpu_env
)->eabi
) {
7523 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7525 fl
.l_type
= tswap16(target_efl
->l_type
);
7526 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7527 fl
.l_start
= tswap64(target_efl
->l_start
);
7528 fl
.l_len
= tswap64(target_efl
->l_len
);
7529 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7530 unlock_user_struct(target_efl
, arg3
, 0);
7534 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7536 fl
.l_type
= tswap16(target_fl
->l_type
);
7537 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7538 fl
.l_start
= tswap64(target_fl
->l_start
);
7539 fl
.l_len
= tswap64(target_fl
->l_len
);
7540 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7541 unlock_user_struct(target_fl
, arg3
, 0);
7543 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7546 if (((CPUARMState
*)cpu_env
)->eabi
) {
7547 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
7549 target_efl
->l_type
= tswap16(fl
.l_type
);
7550 target_efl
->l_whence
= tswap16(fl
.l_whence
);
7551 target_efl
->l_start
= tswap64(fl
.l_start
);
7552 target_efl
->l_len
= tswap64(fl
.l_len
);
7553 target_efl
->l_pid
= tswap32(fl
.l_pid
);
7554 unlock_user_struct(target_efl
, arg3
, 1);
7558 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
7560 target_fl
->l_type
= tswap16(fl
.l_type
);
7561 target_fl
->l_whence
= tswap16(fl
.l_whence
);
7562 target_fl
->l_start
= tswap64(fl
.l_start
);
7563 target_fl
->l_len
= tswap64(fl
.l_len
);
7564 target_fl
->l_pid
= tswap32(fl
.l_pid
);
7565 unlock_user_struct(target_fl
, arg3
, 1);
7570 case TARGET_F_SETLK64
:
7571 case TARGET_F_SETLKW64
:
7573 if (((CPUARMState
*)cpu_env
)->eabi
) {
7574 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7576 fl
.l_type
= tswap16(target_efl
->l_type
);
7577 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7578 fl
.l_start
= tswap64(target_efl
->l_start
);
7579 fl
.l_len
= tswap64(target_efl
->l_len
);
7580 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7581 unlock_user_struct(target_efl
, arg3
, 0);
7585 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7587 fl
.l_type
= tswap16(target_fl
->l_type
);
7588 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7589 fl
.l_start
= tswap64(target_fl
->l_start
);
7590 fl
.l_len
= tswap64(target_fl
->l_len
);
7591 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7592 unlock_user_struct(target_fl
, arg3
, 0);
7594 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7597 ret
= do_fcntl(arg1
, arg2
, arg3
);
7603 #ifdef TARGET_NR_cacheflush
7604 case TARGET_NR_cacheflush
:
7605 /* self-modifying code is handled automatically, so nothing needed */
7609 #ifdef TARGET_NR_security
7610 case TARGET_NR_security
:
7613 #ifdef TARGET_NR_getpagesize
7614 case TARGET_NR_getpagesize
:
7615 ret
= TARGET_PAGE_SIZE
;
7618 case TARGET_NR_gettid
:
7619 ret
= get_errno(gettid());
7621 #ifdef TARGET_NR_readahead
7622 case TARGET_NR_readahead
:
7623 #if TARGET_ABI_BITS == 32
7624 if (regpairs_aligned(cpu_env
)) {
7629 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
7631 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
7635 #ifdef TARGET_NR_setxattr
7636 case TARGET_NR_setxattr
:
7637 case TARGET_NR_lsetxattr
:
7638 case TARGET_NR_fsetxattr
:
7639 case TARGET_NR_getxattr
:
7640 case TARGET_NR_lgetxattr
:
7641 case TARGET_NR_fgetxattr
:
7642 case TARGET_NR_listxattr
:
7643 case TARGET_NR_llistxattr
:
7644 case TARGET_NR_flistxattr
:
7645 case TARGET_NR_removexattr
:
7646 case TARGET_NR_lremovexattr
:
7647 case TARGET_NR_fremovexattr
:
7648 ret
= -TARGET_EOPNOTSUPP
;
7651 #ifdef TARGET_NR_set_thread_area
7652 case TARGET_NR_set_thread_area
:
7653 #if defined(TARGET_MIPS)
7654 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
7657 #elif defined(TARGET_CRIS)
7659 ret
= -TARGET_EINVAL
;
7661 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
7665 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7666 ret
= do_set_thread_area(cpu_env
, arg1
);
7669 goto unimplemented_nowarn
;
7672 #ifdef TARGET_NR_get_thread_area
7673 case TARGET_NR_get_thread_area
:
7674 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7675 ret
= do_get_thread_area(cpu_env
, arg1
);
7677 goto unimplemented_nowarn
;
7680 #ifdef TARGET_NR_getdomainname
7681 case TARGET_NR_getdomainname
:
7682 goto unimplemented_nowarn
;
7685 #ifdef TARGET_NR_clock_gettime
7686 case TARGET_NR_clock_gettime
:
7689 ret
= get_errno(clock_gettime(arg1
, &ts
));
7690 if (!is_error(ret
)) {
7691 host_to_target_timespec(arg2
, &ts
);
7696 #ifdef TARGET_NR_clock_getres
7697 case TARGET_NR_clock_getres
:
7700 ret
= get_errno(clock_getres(arg1
, &ts
));
7701 if (!is_error(ret
)) {
7702 host_to_target_timespec(arg2
, &ts
);
7707 #ifdef TARGET_NR_clock_nanosleep
7708 case TARGET_NR_clock_nanosleep
:
7711 target_to_host_timespec(&ts
, arg3
);
7712 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
7714 host_to_target_timespec(arg4
, &ts
);
7719 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7720 case TARGET_NR_set_tid_address
:
7721 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
7725 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7726 case TARGET_NR_tkill
:
7727 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
7731 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7732 case TARGET_NR_tgkill
:
7733 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
7734 target_to_host_signal(arg3
)));
7738 #ifdef TARGET_NR_set_robust_list
7739 case TARGET_NR_set_robust_list
:
7740 goto unimplemented_nowarn
;
7743 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7744 case TARGET_NR_utimensat
:
7746 struct timespec
*tsp
, ts
[2];
7750 target_to_host_timespec(ts
, arg3
);
7751 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
7755 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
7757 if (!(p
= lock_user_string(arg2
))) {
7758 ret
= -TARGET_EFAULT
;
7761 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
7762 unlock_user(p
, arg2
, 0);
7767 #if defined(CONFIG_USE_NPTL)
7768 case TARGET_NR_futex
:
7769 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7772 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7773 case TARGET_NR_inotify_init
:
7774 ret
= get_errno(sys_inotify_init());
7777 #ifdef CONFIG_INOTIFY1
7778 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7779 case TARGET_NR_inotify_init1
:
7780 ret
= get_errno(sys_inotify_init1(arg1
));
7784 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7785 case TARGET_NR_inotify_add_watch
:
7786 p
= lock_user_string(arg2
);
7787 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
7788 unlock_user(p
, arg2
, 0);
7791 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7792 case TARGET_NR_inotify_rm_watch
:
7793 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
7797 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7798 case TARGET_NR_mq_open
:
7800 struct mq_attr posix_mq_attr
;
7802 p
= lock_user_string(arg1
- 1);
7804 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
7805 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
7806 unlock_user (p
, arg1
, 0);
7810 case TARGET_NR_mq_unlink
:
7811 p
= lock_user_string(arg1
- 1);
7812 ret
= get_errno(mq_unlink(p
));
7813 unlock_user (p
, arg1
, 0);
7816 case TARGET_NR_mq_timedsend
:
7820 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7822 target_to_host_timespec(&ts
, arg5
);
7823 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
7824 host_to_target_timespec(arg5
, &ts
);
7827 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
7828 unlock_user (p
, arg2
, arg3
);
7832 case TARGET_NR_mq_timedreceive
:
7837 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7839 target_to_host_timespec(&ts
, arg5
);
7840 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
7841 host_to_target_timespec(arg5
, &ts
);
7844 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
7845 unlock_user (p
, arg2
, arg3
);
7847 put_user_u32(prio
, arg4
);
7851 /* Not implemented for now... */
7852 /* case TARGET_NR_mq_notify: */
7855 case TARGET_NR_mq_getsetattr
:
7857 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
7860 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
7861 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
7864 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
7865 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
7872 #ifdef CONFIG_SPLICE
7873 #ifdef TARGET_NR_tee
7876 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
7880 #ifdef TARGET_NR_splice
7881 case TARGET_NR_splice
:
7883 loff_t loff_in
, loff_out
;
7884 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
7886 get_user_u64(loff_in
, arg2
);
7887 ploff_in
= &loff_in
;
7890 get_user_u64(loff_out
, arg2
);
7891 ploff_out
= &loff_out
;
7893 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
7897 #ifdef TARGET_NR_vmsplice
7898 case TARGET_NR_vmsplice
:
7903 vec
= alloca(count
* sizeof(struct iovec
));
7904 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
7906 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
7907 unlock_iovec(vec
, arg2
, count
, 0);
7911 #endif /* CONFIG_SPLICE */
7912 #ifdef CONFIG_EVENTFD
7913 #if defined(TARGET_NR_eventfd)
7914 case TARGET_NR_eventfd
:
7915 ret
= get_errno(eventfd(arg1
, 0));
7918 #if defined(TARGET_NR_eventfd2)
7919 case TARGET_NR_eventfd2
:
7920 ret
= get_errno(eventfd(arg1
, arg2
));
7923 #endif /* CONFIG_EVENTFD */
7924 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7925 case TARGET_NR_fallocate
:
7926 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
7929 #if defined(CONFIG_SYNC_FILE_RANGE)
7930 #if defined(TARGET_NR_sync_file_range)
7931 case TARGET_NR_sync_file_range
:
7932 #if TARGET_ABI_BITS == 32
7933 #if defined(TARGET_MIPS)
7934 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
7935 target_offset64(arg5
, arg6
), arg7
));
7937 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
7938 target_offset64(arg4
, arg5
), arg6
));
7939 #endif /* !TARGET_MIPS */
7941 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
7945 #if defined(TARGET_NR_sync_file_range2)
7946 case TARGET_NR_sync_file_range2
:
7947 /* This is like sync_file_range but the arguments are reordered */
7948 #if TARGET_ABI_BITS == 32
7949 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
7950 target_offset64(arg5
, arg6
), arg2
));
7952 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
7957 #if defined(CONFIG_EPOLL)
7958 #if defined(TARGET_NR_epoll_create)
7959 case TARGET_NR_epoll_create
:
7960 ret
= get_errno(epoll_create(arg1
));
7963 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
7964 case TARGET_NR_epoll_create1
:
7965 ret
= get_errno(epoll_create1(arg1
));
7968 #if defined(TARGET_NR_epoll_ctl)
7969 case TARGET_NR_epoll_ctl
:
7971 struct epoll_event ep
;
7972 struct epoll_event
*epp
= 0;
7974 struct target_epoll_event
*target_ep
;
7975 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
7978 ep
.events
= tswap32(target_ep
->events
);
7979 /* The epoll_data_t union is just opaque data to the kernel,
7980 * so we transfer all 64 bits across and need not worry what
7981 * actual data type it is.
7983 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
7984 unlock_user_struct(target_ep
, arg4
, 0);
7987 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
7992 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
7993 #define IMPLEMENT_EPOLL_PWAIT
7995 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
7996 #if defined(TARGET_NR_epoll_wait)
7997 case TARGET_NR_epoll_wait
:
7999 #if defined(IMPLEMENT_EPOLL_PWAIT)
8000 case TARGET_NR_epoll_pwait
:
8003 struct target_epoll_event
*target_ep
;
8004 struct epoll_event
*ep
;
8006 int maxevents
= arg3
;
8009 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
8010 maxevents
* sizeof(struct target_epoll_event
), 1);
8015 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
8018 #if defined(IMPLEMENT_EPOLL_PWAIT)
8019 case TARGET_NR_epoll_pwait
:
8021 target_sigset_t
*target_set
;
8022 sigset_t _set
, *set
= &_set
;
8025 target_set
= lock_user(VERIFY_READ
, arg5
,
8026 sizeof(target_sigset_t
), 1);
8028 unlock_user(target_ep
, arg2
, 0);
8031 target_to_host_sigset(set
, target_set
);
8032 unlock_user(target_set
, arg5
, 0);
8037 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
8041 #if defined(TARGET_NR_epoll_wait)
8042 case TARGET_NR_epoll_wait
:
8043 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
8047 ret
= -TARGET_ENOSYS
;
8049 if (!is_error(ret
)) {
8051 for (i
= 0; i
< ret
; i
++) {
8052 target_ep
[i
].events
= tswap32(ep
[i
].events
);
8053 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
8056 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
8061 #ifdef TARGET_NR_prlimit64
8062 case TARGET_NR_prlimit64
:
8064 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8065 struct target_rlimit64
*target_rnew
, *target_rold
;
8066 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
8068 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
8071 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
8072 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
8073 unlock_user_struct(target_rnew
, arg3
, 0);
8077 ret
= get_errno(sys_prlimit64(arg1
, arg2
, rnewp
, arg4
? &rold
: 0));
8078 if (!is_error(ret
) && arg4
) {
8079 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
8082 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
8083 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
8084 unlock_user_struct(target_rold
, arg4
, 1);
8091 gemu_log("qemu: Unsupported syscall: %d\n", num
);
8092 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8093 unimplemented_nowarn
:
8095 ret
= -TARGET_ENOSYS
;
8100 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
8103 print_syscall_ret(num
, ret
);
8106 ret
= -TARGET_EFAULT
;