4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
32 #include <sys/types.h>
38 #include <sys/mount.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
49 int __clone2(int (*fn
)(void *), void *child_stack_base
,
50 size_t stack_size
, int flags
, void *arg
, ...);
52 #include <sys/socket.h>
56 #include <sys/times.h>
59 #include <sys/statfs.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
73 #include <sys/eventfd.h>
76 #include <sys/epoll.h>
79 #include "qemu/xattr.h"
82 #define termios host_termios
83 #define winsize host_winsize
84 #define termio host_termio
85 #define sgttyb host_sgttyb /* same as target */
86 #define tchars host_tchars /* same as target */
87 #define ltchars host_ltchars /* same as target */
89 #include <linux/termios.h>
90 #include <linux/unistd.h>
91 #include <linux/utsname.h>
92 #include <linux/cdrom.h>
93 #include <linux/hdreg.h>
94 #include <linux/soundcard.h>
96 #include <linux/mtio.h>
98 #if defined(CONFIG_FIEMAP)
99 #include <linux/fiemap.h>
101 #include <linux/fb.h>
102 #include <linux/vt.h>
103 #include <linux/dm-ioctl.h>
104 #include "linux_loop.h"
105 #include "cpu-uname.h"
109 #if defined(CONFIG_USE_NPTL)
110 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
111 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
113 /* XXX: Hardcode the above values. */
114 #define CLONE_NPTL_FLAGS2 0
119 //#include <linux/msdos_fs.h>
120 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
121 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
132 #define _syscall0(type,name) \
133 static type name (void) \
135 return syscall(__NR_##name); \
138 #define _syscall1(type,name,type1,arg1) \
139 static type name (type1 arg1) \
141 return syscall(__NR_##name, arg1); \
144 #define _syscall2(type,name,type1,arg1,type2,arg2) \
145 static type name (type1 arg1,type2 arg2) \
147 return syscall(__NR_##name, arg1, arg2); \
150 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
151 static type name (type1 arg1,type2 arg2,type3 arg3) \
153 return syscall(__NR_##name, arg1, arg2, arg3); \
156 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
157 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
159 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
162 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
166 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
170 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
171 type5,arg5,type6,arg6) \
172 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
175 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
179 #define __NR_sys_uname __NR_uname
180 #define __NR_sys_faccessat __NR_faccessat
181 #define __NR_sys_fchmodat __NR_fchmodat
182 #define __NR_sys_fchownat __NR_fchownat
183 #define __NR_sys_fstatat64 __NR_fstatat64
184 #define __NR_sys_futimesat __NR_futimesat
185 #define __NR_sys_getcwd1 __NR_getcwd
186 #define __NR_sys_getdents __NR_getdents
187 #define __NR_sys_getdents64 __NR_getdents64
188 #define __NR_sys_getpriority __NR_getpriority
189 #define __NR_sys_linkat __NR_linkat
190 #define __NR_sys_mkdirat __NR_mkdirat
191 #define __NR_sys_mknodat __NR_mknodat
192 #define __NR_sys_newfstatat __NR_newfstatat
193 #define __NR_sys_openat __NR_openat
194 #define __NR_sys_readlinkat __NR_readlinkat
195 #define __NR_sys_renameat __NR_renameat
196 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
197 #define __NR_sys_symlinkat __NR_symlinkat
198 #define __NR_sys_syslog __NR_syslog
199 #define __NR_sys_tgkill __NR_tgkill
200 #define __NR_sys_tkill __NR_tkill
201 #define __NR_sys_unlinkat __NR_unlinkat
202 #define __NR_sys_utimensat __NR_utimensat
203 #define __NR_sys_futex __NR_futex
204 #define __NR_sys_inotify_init __NR_inotify_init
205 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
206 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
208 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
210 #define __NR__llseek __NR_lseek
214 _syscall0(int, gettid
)
216 /* This is a replacement for the host gettid() and must return a host
218 static int gettid(void) {
222 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
223 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
224 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
226 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
227 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
228 loff_t
*, res
, uint
, wh
);
230 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
231 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
232 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
233 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
235 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
236 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
238 #ifdef __NR_exit_group
239 _syscall1(int,exit_group
,int,error_code
)
241 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
242 _syscall1(int,set_tid_address
,int *,tidptr
)
244 #if defined(CONFIG_USE_NPTL)
245 #if defined(TARGET_NR_futex) && defined(__NR_futex)
246 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
247 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
250 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
251 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
252 unsigned long *, user_mask_ptr
);
253 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
254 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
255 unsigned long *, user_mask_ptr
);
256 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
259 static bitmask_transtbl fcntl_flags_tbl
[] = {
260 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
261 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
262 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
263 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
264 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
265 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
266 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
267 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
268 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
269 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
270 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
271 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
272 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
273 #if defined(O_DIRECT)
274 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
276 #if defined(O_NOATIME)
277 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
279 #if defined(O_CLOEXEC)
280 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
283 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
285 /* Don't terminate the list prematurely on 64-bit host+guest. */
286 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
287 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
292 #define COPY_UTSNAME_FIELD(dest, src) \
294 /* __NEW_UTS_LEN doesn't include terminating null */ \
295 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
296 (dest)[__NEW_UTS_LEN] = '\0'; \
299 static int sys_uname(struct new_utsname
*buf
)
301 struct utsname uts_buf
;
303 if (uname(&uts_buf
) < 0)
307 * Just in case these have some differences, we
308 * translate utsname to new_utsname (which is the
309 * struct linux kernel uses).
312 memset(buf
, 0, sizeof(*buf
));
313 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
314 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
315 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
316 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
317 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
319 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
323 #undef COPY_UTSNAME_FIELD
326 static int sys_getcwd1(char *buf
, size_t size
)
328 if (getcwd(buf
, size
) == NULL
) {
329 /* getcwd() sets errno */
332 return strlen(buf
)+1;
337 * Host system seems to have atfile syscall stubs available. We
338 * now enable them one by one as specified by target syscall_nr.h.
341 #ifdef TARGET_NR_faccessat
342 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
344 return (faccessat(dirfd
, pathname
, mode
, 0));
347 #ifdef TARGET_NR_fchmodat
348 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
350 return (fchmodat(dirfd
, pathname
, mode
, 0));
353 #if defined(TARGET_NR_fchownat)
354 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
355 gid_t group
, int flags
)
357 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
360 #ifdef __NR_fstatat64
361 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
364 return (fstatat(dirfd
, pathname
, buf
, flags
));
367 #ifdef __NR_newfstatat
368 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
371 return (fstatat(dirfd
, pathname
, buf
, flags
));
374 #ifdef TARGET_NR_futimesat
375 static int sys_futimesat(int dirfd
, const char *pathname
,
376 const struct timeval times
[2])
378 return (futimesat(dirfd
, pathname
, times
));
381 #ifdef TARGET_NR_linkat
382 static int sys_linkat(int olddirfd
, const char *oldpath
,
383 int newdirfd
, const char *newpath
, int flags
)
385 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
388 #ifdef TARGET_NR_mkdirat
389 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
391 return (mkdirat(dirfd
, pathname
, mode
));
394 #ifdef TARGET_NR_mknodat
395 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
398 return (mknodat(dirfd
, pathname
, mode
, dev
));
401 #ifdef TARGET_NR_openat
402 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
405 * open(2) has extra parameter 'mode' when called with
408 if ((flags
& O_CREAT
) != 0) {
409 return (openat(dirfd
, pathname
, flags
, mode
));
411 return (openat(dirfd
, pathname
, flags
));
414 #ifdef TARGET_NR_readlinkat
415 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
417 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
420 #ifdef TARGET_NR_renameat
421 static int sys_renameat(int olddirfd
, const char *oldpath
,
422 int newdirfd
, const char *newpath
)
424 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
427 #ifdef TARGET_NR_symlinkat
428 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
430 return (symlinkat(oldpath
, newdirfd
, newpath
));
433 #ifdef TARGET_NR_unlinkat
434 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
436 return (unlinkat(dirfd
, pathname
, flags
));
439 #else /* !CONFIG_ATFILE */
442 * Try direct syscalls instead
444 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
445 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
447 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
448 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
450 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
451 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
452 uid_t
,owner
,gid_t
,group
,int,flags
)
454 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
455 defined(__NR_fstatat64)
456 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
457 struct stat
*,buf
,int,flags
)
459 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
460 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
461 const struct timeval
*,times
)
463 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
464 defined(__NR_newfstatat)
465 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
466 struct stat
*,buf
,int,flags
)
468 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
469 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
470 int,newdirfd
,const char *,newpath
,int,flags
)
472 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
473 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
475 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
476 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
477 mode_t
,mode
,dev_t
,dev
)
479 #if defined(TARGET_NR_openat) && defined(__NR_openat)
480 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
482 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
483 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
484 char *,buf
,size_t,bufsize
)
486 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
487 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
488 int,newdirfd
,const char *,newpath
)
490 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
491 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
492 int,newdirfd
,const char *,newpath
)
494 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
495 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
498 #endif /* CONFIG_ATFILE */
500 #ifdef CONFIG_UTIMENSAT
501 static int sys_utimensat(int dirfd
, const char *pathname
,
502 const struct timespec times
[2], int flags
)
504 if (pathname
== NULL
)
505 return futimens(dirfd
, times
);
507 return utimensat(dirfd
, pathname
, times
, flags
);
510 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
511 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
512 const struct timespec
*,tsp
,int,flags
)
514 #endif /* CONFIG_UTIMENSAT */
516 #ifdef CONFIG_INOTIFY
517 #include <sys/inotify.h>
519 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
520 static int sys_inotify_init(void)
522 return (inotify_init());
525 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
526 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
528 return (inotify_add_watch(fd
, pathname
, mask
));
531 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
532 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
534 return (inotify_rm_watch(fd
, wd
));
537 #ifdef CONFIG_INOTIFY1
538 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
539 static int sys_inotify_init1(int flags
)
541 return (inotify_init1(flags
));
546 /* Userspace can usually survive runtime without inotify */
547 #undef TARGET_NR_inotify_init
548 #undef TARGET_NR_inotify_init1
549 #undef TARGET_NR_inotify_add_watch
550 #undef TARGET_NR_inotify_rm_watch
551 #endif /* CONFIG_INOTIFY */
553 #if defined(TARGET_NR_ppoll)
555 # define __NR_ppoll -1
557 #define __NR_sys_ppoll __NR_ppoll
558 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
559 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
563 #if defined(TARGET_NR_pselect6)
564 #ifndef __NR_pselect6
565 # define __NR_pselect6 -1
567 #define __NR_sys_pselect6 __NR_pselect6
568 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
569 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
572 #if defined(TARGET_NR_prlimit64)
573 #ifndef __NR_prlimit64
574 # define __NR_prlimit64 -1
576 #define __NR_sys_prlimit64 __NR_prlimit64
577 /* The glibc rlimit structure may not be that used by the underlying syscall */
578 struct host_rlimit64
{
582 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
583 const struct host_rlimit64
*, new_limit
,
584 struct host_rlimit64
*, old_limit
)
587 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
589 static inline int regpairs_aligned(void *cpu_env
) {
590 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
592 #elif defined(TARGET_MIPS)
593 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
594 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
595 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
596 * of registers which translates to the same as ARM/MIPS, because we start with
598 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
600 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
603 #define ERRNO_TABLE_SIZE 1200
605 /* target_to_host_errno_table[] is initialized from
606 * host_to_target_errno_table[] in syscall_init(). */
607 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
611 * This list is the union of errno values overridden in asm-<arch>/errno.h
612 * minus the errnos that are not actually generic to all archs.
614 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
615 [EIDRM
] = TARGET_EIDRM
,
616 [ECHRNG
] = TARGET_ECHRNG
,
617 [EL2NSYNC
] = TARGET_EL2NSYNC
,
618 [EL3HLT
] = TARGET_EL3HLT
,
619 [EL3RST
] = TARGET_EL3RST
,
620 [ELNRNG
] = TARGET_ELNRNG
,
621 [EUNATCH
] = TARGET_EUNATCH
,
622 [ENOCSI
] = TARGET_ENOCSI
,
623 [EL2HLT
] = TARGET_EL2HLT
,
624 [EDEADLK
] = TARGET_EDEADLK
,
625 [ENOLCK
] = TARGET_ENOLCK
,
626 [EBADE
] = TARGET_EBADE
,
627 [EBADR
] = TARGET_EBADR
,
628 [EXFULL
] = TARGET_EXFULL
,
629 [ENOANO
] = TARGET_ENOANO
,
630 [EBADRQC
] = TARGET_EBADRQC
,
631 [EBADSLT
] = TARGET_EBADSLT
,
632 [EBFONT
] = TARGET_EBFONT
,
633 [ENOSTR
] = TARGET_ENOSTR
,
634 [ENODATA
] = TARGET_ENODATA
,
635 [ETIME
] = TARGET_ETIME
,
636 [ENOSR
] = TARGET_ENOSR
,
637 [ENONET
] = TARGET_ENONET
,
638 [ENOPKG
] = TARGET_ENOPKG
,
639 [EREMOTE
] = TARGET_EREMOTE
,
640 [ENOLINK
] = TARGET_ENOLINK
,
641 [EADV
] = TARGET_EADV
,
642 [ESRMNT
] = TARGET_ESRMNT
,
643 [ECOMM
] = TARGET_ECOMM
,
644 [EPROTO
] = TARGET_EPROTO
,
645 [EDOTDOT
] = TARGET_EDOTDOT
,
646 [EMULTIHOP
] = TARGET_EMULTIHOP
,
647 [EBADMSG
] = TARGET_EBADMSG
,
648 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
649 [EOVERFLOW
] = TARGET_EOVERFLOW
,
650 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
651 [EBADFD
] = TARGET_EBADFD
,
652 [EREMCHG
] = TARGET_EREMCHG
,
653 [ELIBACC
] = TARGET_ELIBACC
,
654 [ELIBBAD
] = TARGET_ELIBBAD
,
655 [ELIBSCN
] = TARGET_ELIBSCN
,
656 [ELIBMAX
] = TARGET_ELIBMAX
,
657 [ELIBEXEC
] = TARGET_ELIBEXEC
,
658 [EILSEQ
] = TARGET_EILSEQ
,
659 [ENOSYS
] = TARGET_ENOSYS
,
660 [ELOOP
] = TARGET_ELOOP
,
661 [ERESTART
] = TARGET_ERESTART
,
662 [ESTRPIPE
] = TARGET_ESTRPIPE
,
663 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
664 [EUSERS
] = TARGET_EUSERS
,
665 [ENOTSOCK
] = TARGET_ENOTSOCK
,
666 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
667 [EMSGSIZE
] = TARGET_EMSGSIZE
,
668 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
669 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
670 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
671 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
672 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
673 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
674 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
675 [EADDRINUSE
] = TARGET_EADDRINUSE
,
676 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
677 [ENETDOWN
] = TARGET_ENETDOWN
,
678 [ENETUNREACH
] = TARGET_ENETUNREACH
,
679 [ENETRESET
] = TARGET_ENETRESET
,
680 [ECONNABORTED
] = TARGET_ECONNABORTED
,
681 [ECONNRESET
] = TARGET_ECONNRESET
,
682 [ENOBUFS
] = TARGET_ENOBUFS
,
683 [EISCONN
] = TARGET_EISCONN
,
684 [ENOTCONN
] = TARGET_ENOTCONN
,
685 [EUCLEAN
] = TARGET_EUCLEAN
,
686 [ENOTNAM
] = TARGET_ENOTNAM
,
687 [ENAVAIL
] = TARGET_ENAVAIL
,
688 [EISNAM
] = TARGET_EISNAM
,
689 [EREMOTEIO
] = TARGET_EREMOTEIO
,
690 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
691 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
692 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
693 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
694 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
695 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
696 [EALREADY
] = TARGET_EALREADY
,
697 [EINPROGRESS
] = TARGET_EINPROGRESS
,
698 [ESTALE
] = TARGET_ESTALE
,
699 [ECANCELED
] = TARGET_ECANCELED
,
700 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
701 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
703 [ENOKEY
] = TARGET_ENOKEY
,
706 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
709 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
712 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
715 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
717 #ifdef ENOTRECOVERABLE
718 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
722 static inline int host_to_target_errno(int err
)
724 if(host_to_target_errno_table
[err
])
725 return host_to_target_errno_table
[err
];
729 static inline int target_to_host_errno(int err
)
731 if (target_to_host_errno_table
[err
])
732 return target_to_host_errno_table
[err
];
736 static inline abi_long
get_errno(abi_long ret
)
739 return -host_to_target_errno(errno
);
744 static inline int is_error(abi_long ret
)
746 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
749 char *target_strerror(int err
)
751 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
754 return strerror(target_to_host_errno(err
));
757 static abi_ulong target_brk
;
758 static abi_ulong target_original_brk
;
759 static abi_ulong brk_page
;
761 void target_set_brk(abi_ulong new_brk
)
763 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
764 brk_page
= HOST_PAGE_ALIGN(target_brk
);
767 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
768 #define DEBUGF_BRK(message, args...)
770 /* do_brk() must return target values and target errnos. */
771 abi_long
do_brk(abi_ulong new_brk
)
773 abi_long mapped_addr
;
776 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
779 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
782 if (new_brk
< target_original_brk
) {
783 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
788 /* If the new brk is less than the highest page reserved to the
789 * target heap allocation, set it and we're almost done... */
790 if (new_brk
<= brk_page
) {
791 /* Heap contents are initialized to zero, as for anonymous
793 if (new_brk
> target_brk
) {
794 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
796 target_brk
= new_brk
;
797 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
801 /* We need to allocate more memory after the brk... Note that
802 * we don't use MAP_FIXED because that will map over the top of
803 * any existing mapping (like the one with the host libc or qemu
804 * itself); instead we treat "mapped but at wrong address" as
805 * a failure and unmap again.
807 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
808 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
809 PROT_READ
|PROT_WRITE
,
810 MAP_ANON
|MAP_PRIVATE
, 0, 0));
812 if (mapped_addr
== brk_page
) {
813 /* Heap contents are initialized to zero, as for anonymous
814 * mapped pages. Technically the new pages are already
815 * initialized to zero since they *are* anonymous mapped
816 * pages, however we have to take care with the contents that
817 * come from the remaining part of the previous page: it may
818 * contains garbage data due to a previous heap usage (grown
820 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
822 target_brk
= new_brk
;
823 brk_page
= HOST_PAGE_ALIGN(target_brk
);
824 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
827 } else if (mapped_addr
!= -1) {
828 /* Mapped but at wrong address, meaning there wasn't actually
829 * enough space for this brk.
831 target_munmap(mapped_addr
, new_alloc_size
);
833 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
836 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
839 #if defined(TARGET_ALPHA)
840 /* We (partially) emulate OSF/1 on Alpha, which requires we
841 return a proper errno, not an unchanged brk value. */
842 return -TARGET_ENOMEM
;
844 /* For everything else, return the previous break. */
848 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
849 abi_ulong target_fds_addr
,
853 abi_ulong b
, *target_fds
;
855 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
856 if (!(target_fds
= lock_user(VERIFY_READ
,
858 sizeof(abi_ulong
) * nw
,
860 return -TARGET_EFAULT
;
864 for (i
= 0; i
< nw
; i
++) {
865 /* grab the abi_ulong */
866 __get_user(b
, &target_fds
[i
]);
867 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
868 /* check the bit inside the abi_ulong */
875 unlock_user(target_fds
, target_fds_addr
, 0);
880 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
881 abi_ulong target_fds_addr
,
884 if (target_fds_addr
) {
885 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
886 return -TARGET_EFAULT
;
894 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
900 abi_ulong
*target_fds
;
902 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
903 if (!(target_fds
= lock_user(VERIFY_WRITE
,
905 sizeof(abi_ulong
) * nw
,
907 return -TARGET_EFAULT
;
910 for (i
= 0; i
< nw
; i
++) {
912 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
913 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
916 __put_user(v
, &target_fds
[i
]);
919 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
924 #if defined(__alpha__)
930 static inline abi_long
host_to_target_clock_t(long ticks
)
932 #if HOST_HZ == TARGET_HZ
935 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
939 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
940 const struct rusage
*rusage
)
942 struct target_rusage
*target_rusage
;
944 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
945 return -TARGET_EFAULT
;
946 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
947 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
948 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
949 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
950 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
951 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
952 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
953 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
954 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
955 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
956 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
957 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
958 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
959 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
960 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
961 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
962 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
963 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
964 unlock_user_struct(target_rusage
, target_addr
, 1);
969 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
971 abi_ulong target_rlim_swap
;
974 target_rlim_swap
= tswapal(target_rlim
);
975 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
976 return RLIM_INFINITY
;
978 result
= target_rlim_swap
;
979 if (target_rlim_swap
!= (rlim_t
)result
)
980 return RLIM_INFINITY
;
985 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
987 abi_ulong target_rlim_swap
;
990 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
991 target_rlim_swap
= TARGET_RLIM_INFINITY
;
993 target_rlim_swap
= rlim
;
994 result
= tswapal(target_rlim_swap
);
999 static inline int target_to_host_resource(int code
)
1002 case TARGET_RLIMIT_AS
:
1004 case TARGET_RLIMIT_CORE
:
1006 case TARGET_RLIMIT_CPU
:
1008 case TARGET_RLIMIT_DATA
:
1010 case TARGET_RLIMIT_FSIZE
:
1011 return RLIMIT_FSIZE
;
1012 case TARGET_RLIMIT_LOCKS
:
1013 return RLIMIT_LOCKS
;
1014 case TARGET_RLIMIT_MEMLOCK
:
1015 return RLIMIT_MEMLOCK
;
1016 case TARGET_RLIMIT_MSGQUEUE
:
1017 return RLIMIT_MSGQUEUE
;
1018 case TARGET_RLIMIT_NICE
:
1020 case TARGET_RLIMIT_NOFILE
:
1021 return RLIMIT_NOFILE
;
1022 case TARGET_RLIMIT_NPROC
:
1023 return RLIMIT_NPROC
;
1024 case TARGET_RLIMIT_RSS
:
1026 case TARGET_RLIMIT_RTPRIO
:
1027 return RLIMIT_RTPRIO
;
1028 case TARGET_RLIMIT_SIGPENDING
:
1029 return RLIMIT_SIGPENDING
;
1030 case TARGET_RLIMIT_STACK
:
1031 return RLIMIT_STACK
;
1037 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1038 abi_ulong target_tv_addr
)
1040 struct target_timeval
*target_tv
;
1042 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1043 return -TARGET_EFAULT
;
1045 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1046 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1048 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1053 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1054 const struct timeval
*tv
)
1056 struct target_timeval
*target_tv
;
1058 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1059 return -TARGET_EFAULT
;
1061 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1062 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1064 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1069 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1072 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1073 abi_ulong target_mq_attr_addr
)
1075 struct target_mq_attr
*target_mq_attr
;
1077 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1078 target_mq_attr_addr
, 1))
1079 return -TARGET_EFAULT
;
1081 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1082 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1083 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1084 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1086 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1091 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1092 const struct mq_attr
*attr
)
1094 struct target_mq_attr
*target_mq_attr
;
1096 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1097 target_mq_attr_addr
, 0))
1098 return -TARGET_EFAULT
;
1100 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1101 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1102 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1103 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1105 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1111 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1112 /* do_select() must return target values and target errnos. */
1113 static abi_long
do_select(int n
,
1114 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1115 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1117 fd_set rfds
, wfds
, efds
;
1118 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1119 struct timeval tv
, *tv_ptr
;
1122 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1126 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1130 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1135 if (target_tv_addr
) {
1136 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1137 return -TARGET_EFAULT
;
1143 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1145 if (!is_error(ret
)) {
1146 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1147 return -TARGET_EFAULT
;
1148 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1149 return -TARGET_EFAULT
;
1150 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1151 return -TARGET_EFAULT
;
1153 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1154 return -TARGET_EFAULT
;
1161 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1164 return pipe2(host_pipe
, flags
);
1170 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1171 int flags
, int is_pipe2
)
1175 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1178 return get_errno(ret
);
1180 /* Several targets have special calling conventions for the original
1181 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1183 #if defined(TARGET_ALPHA)
1184 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1185 return host_pipe
[0];
1186 #elif defined(TARGET_MIPS)
1187 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1188 return host_pipe
[0];
1189 #elif defined(TARGET_SH4)
1190 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1191 return host_pipe
[0];
1195 if (put_user_s32(host_pipe
[0], pipedes
)
1196 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1197 return -TARGET_EFAULT
;
1198 return get_errno(ret
);
1201 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1202 abi_ulong target_addr
,
1205 struct target_ip_mreqn
*target_smreqn
;
1207 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1209 return -TARGET_EFAULT
;
1210 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1211 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1212 if (len
== sizeof(struct target_ip_mreqn
))
1213 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1214 unlock_user(target_smreqn
, target_addr
, 0);
1219 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1220 abi_ulong target_addr
,
1223 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1224 sa_family_t sa_family
;
1225 struct target_sockaddr
*target_saddr
;
1227 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1229 return -TARGET_EFAULT
;
1231 sa_family
= tswap16(target_saddr
->sa_family
);
1233 /* Oops. The caller might send a incomplete sun_path; sun_path
1234 * must be terminated by \0 (see the manual page), but
1235 * unfortunately it is quite common to specify sockaddr_un
1236 * length as "strlen(x->sun_path)" while it should be
1237 * "strlen(...) + 1". We'll fix that here if needed.
1238 * Linux kernel has a similar feature.
1241 if (sa_family
== AF_UNIX
) {
1242 if (len
< unix_maxlen
&& len
> 0) {
1243 char *cp
= (char*)target_saddr
;
1245 if ( cp
[len
-1] && !cp
[len
] )
1248 if (len
> unix_maxlen
)
1252 memcpy(addr
, target_saddr
, len
);
1253 addr
->sa_family
= sa_family
;
1254 unlock_user(target_saddr
, target_addr
, 0);
1259 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1260 struct sockaddr
*addr
,
1263 struct target_sockaddr
*target_saddr
;
1265 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1267 return -TARGET_EFAULT
;
1268 memcpy(target_saddr
, addr
, len
);
1269 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1270 unlock_user(target_saddr
, target_addr
, len
);
1275 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1276 struct target_msghdr
*target_msgh
)
1278 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1279 abi_long msg_controllen
;
1280 abi_ulong target_cmsg_addr
;
1281 struct target_cmsghdr
*target_cmsg
;
1282 socklen_t space
= 0;
1284 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1285 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1287 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1288 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1290 return -TARGET_EFAULT
;
1292 while (cmsg
&& target_cmsg
) {
1293 void *data
= CMSG_DATA(cmsg
);
1294 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1296 int len
= tswapal(target_cmsg
->cmsg_len
)
1297 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1299 space
+= CMSG_SPACE(len
);
1300 if (space
> msgh
->msg_controllen
) {
1301 space
-= CMSG_SPACE(len
);
1302 gemu_log("Host cmsg overflow\n");
1306 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1307 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1308 cmsg
->cmsg_len
= CMSG_LEN(len
);
1310 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1311 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1312 memcpy(data
, target_data
, len
);
1314 int *fd
= (int *)data
;
1315 int *target_fd
= (int *)target_data
;
1316 int i
, numfds
= len
/ sizeof(int);
1318 for (i
= 0; i
< numfds
; i
++)
1319 fd
[i
] = tswap32(target_fd
[i
]);
1322 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1323 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1325 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1327 msgh
->msg_controllen
= space
;
1331 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1332 struct msghdr
*msgh
)
1334 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1335 abi_long msg_controllen
;
1336 abi_ulong target_cmsg_addr
;
1337 struct target_cmsghdr
*target_cmsg
;
1338 socklen_t space
= 0;
1340 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1341 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1343 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1344 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1346 return -TARGET_EFAULT
;
1348 while (cmsg
&& target_cmsg
) {
1349 void *data
= CMSG_DATA(cmsg
);
1350 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1352 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1354 space
+= TARGET_CMSG_SPACE(len
);
1355 if (space
> msg_controllen
) {
1356 space
-= TARGET_CMSG_SPACE(len
);
1357 gemu_log("Target cmsg overflow\n");
1361 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1362 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1363 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(len
));
1365 if ((cmsg
->cmsg_level
== TARGET_SOL_SOCKET
) &&
1366 (cmsg
->cmsg_type
== SCM_RIGHTS
)) {
1367 int *fd
= (int *)data
;
1368 int *target_fd
= (int *)target_data
;
1369 int i
, numfds
= len
/ sizeof(int);
1371 for (i
= 0; i
< numfds
; i
++)
1372 target_fd
[i
] = tswap32(fd
[i
]);
1373 } else if ((cmsg
->cmsg_level
== TARGET_SOL_SOCKET
) &&
1374 (cmsg
->cmsg_type
== SO_TIMESTAMP
) &&
1375 (len
== sizeof(struct timeval
))) {
1376 /* copy struct timeval to target */
1377 struct timeval
*tv
= (struct timeval
*)data
;
1378 struct target_timeval
*target_tv
=
1379 (struct target_timeval
*)target_data
;
1381 target_tv
->tv_sec
= tswapal(tv
->tv_sec
);
1382 target_tv
->tv_usec
= tswapal(tv
->tv_usec
);
1384 gemu_log("Unsupported ancillary data: %d/%d\n",
1385 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1386 memcpy(target_data
, data
, len
);
1389 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1390 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1392 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1394 target_msgh
->msg_controllen
= tswapal(space
);
1398 /* do_setsockopt() Must return target values and target errnos. */
1399 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1400 abi_ulong optval_addr
, socklen_t optlen
)
1404 struct ip_mreqn
*ip_mreq
;
1405 struct ip_mreq_source
*ip_mreq_source
;
1409 /* TCP options all take an 'int' value. */
1410 if (optlen
< sizeof(uint32_t))
1411 return -TARGET_EINVAL
;
1413 if (get_user_u32(val
, optval_addr
))
1414 return -TARGET_EFAULT
;
1415 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1422 case IP_ROUTER_ALERT
:
1426 case IP_MTU_DISCOVER
:
1432 case IP_MULTICAST_TTL
:
1433 case IP_MULTICAST_LOOP
:
1435 if (optlen
>= sizeof(uint32_t)) {
1436 if (get_user_u32(val
, optval_addr
))
1437 return -TARGET_EFAULT
;
1438 } else if (optlen
>= 1) {
1439 if (get_user_u8(val
, optval_addr
))
1440 return -TARGET_EFAULT
;
1442 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1444 case IP_ADD_MEMBERSHIP
:
1445 case IP_DROP_MEMBERSHIP
:
1446 if (optlen
< sizeof (struct target_ip_mreq
) ||
1447 optlen
> sizeof (struct target_ip_mreqn
))
1448 return -TARGET_EINVAL
;
1450 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1451 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1452 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1455 case IP_BLOCK_SOURCE
:
1456 case IP_UNBLOCK_SOURCE
:
1457 case IP_ADD_SOURCE_MEMBERSHIP
:
1458 case IP_DROP_SOURCE_MEMBERSHIP
:
1459 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1460 return -TARGET_EINVAL
;
1462 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1463 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1464 unlock_user (ip_mreq_source
, optval_addr
, 0);
1474 /* struct icmp_filter takes an u32 value */
1475 if (optlen
< sizeof(uint32_t)) {
1476 return -TARGET_EINVAL
;
1479 if (get_user_u32(val
, optval_addr
)) {
1480 return -TARGET_EFAULT
;
1482 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1483 &val
, sizeof(val
)));
1490 case TARGET_SOL_SOCKET
:
1492 case TARGET_SO_RCVTIMEO
:
1496 optname
= SO_RCVTIMEO
;
1499 if (optlen
!= sizeof(struct target_timeval
)) {
1500 return -TARGET_EINVAL
;
1503 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1504 return -TARGET_EFAULT
;
1507 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1511 case TARGET_SO_SNDTIMEO
:
1512 optname
= SO_SNDTIMEO
;
1514 /* Options with 'int' argument. */
1515 case TARGET_SO_DEBUG
:
1518 case TARGET_SO_REUSEADDR
:
1519 optname
= SO_REUSEADDR
;
1521 case TARGET_SO_TYPE
:
1524 case TARGET_SO_ERROR
:
1527 case TARGET_SO_DONTROUTE
:
1528 optname
= SO_DONTROUTE
;
1530 case TARGET_SO_BROADCAST
:
1531 optname
= SO_BROADCAST
;
1533 case TARGET_SO_SNDBUF
:
1534 optname
= SO_SNDBUF
;
1536 case TARGET_SO_RCVBUF
:
1537 optname
= SO_RCVBUF
;
1539 case TARGET_SO_KEEPALIVE
:
1540 optname
= SO_KEEPALIVE
;
1542 case TARGET_SO_OOBINLINE
:
1543 optname
= SO_OOBINLINE
;
1545 case TARGET_SO_NO_CHECK
:
1546 optname
= SO_NO_CHECK
;
1548 case TARGET_SO_PRIORITY
:
1549 optname
= SO_PRIORITY
;
1552 case TARGET_SO_BSDCOMPAT
:
1553 optname
= SO_BSDCOMPAT
;
1556 case TARGET_SO_PASSCRED
:
1557 optname
= SO_PASSCRED
;
1559 case TARGET_SO_TIMESTAMP
:
1560 optname
= SO_TIMESTAMP
;
1562 case TARGET_SO_RCVLOWAT
:
1563 optname
= SO_RCVLOWAT
;
1569 if (optlen
< sizeof(uint32_t))
1570 return -TARGET_EINVAL
;
1572 if (get_user_u32(val
, optval_addr
))
1573 return -TARGET_EFAULT
;
1574 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1578 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1579 ret
= -TARGET_ENOPROTOOPT
;
1584 /* do_getsockopt() Must return target values and target errnos. */
1585 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1586 abi_ulong optval_addr
, abi_ulong optlen
)
1593 case TARGET_SOL_SOCKET
:
1596 /* These don't just return a single integer */
1597 case TARGET_SO_LINGER
:
1598 case TARGET_SO_RCVTIMEO
:
1599 case TARGET_SO_SNDTIMEO
:
1600 case TARGET_SO_PEERNAME
:
1602 case TARGET_SO_PEERCRED
: {
1605 struct target_ucred
*tcr
;
1607 if (get_user_u32(len
, optlen
)) {
1608 return -TARGET_EFAULT
;
1611 return -TARGET_EINVAL
;
1615 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1623 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1624 return -TARGET_EFAULT
;
1626 __put_user(cr
.pid
, &tcr
->pid
);
1627 __put_user(cr
.uid
, &tcr
->uid
);
1628 __put_user(cr
.gid
, &tcr
->gid
);
1629 unlock_user_struct(tcr
, optval_addr
, 1);
1630 if (put_user_u32(len
, optlen
)) {
1631 return -TARGET_EFAULT
;
1635 /* Options with 'int' argument. */
1636 case TARGET_SO_DEBUG
:
1639 case TARGET_SO_REUSEADDR
:
1640 optname
= SO_REUSEADDR
;
1642 case TARGET_SO_TYPE
:
1645 case TARGET_SO_ERROR
:
1648 case TARGET_SO_DONTROUTE
:
1649 optname
= SO_DONTROUTE
;
1651 case TARGET_SO_BROADCAST
:
1652 optname
= SO_BROADCAST
;
1654 case TARGET_SO_SNDBUF
:
1655 optname
= SO_SNDBUF
;
1657 case TARGET_SO_RCVBUF
:
1658 optname
= SO_RCVBUF
;
1660 case TARGET_SO_KEEPALIVE
:
1661 optname
= SO_KEEPALIVE
;
1663 case TARGET_SO_OOBINLINE
:
1664 optname
= SO_OOBINLINE
;
1666 case TARGET_SO_NO_CHECK
:
1667 optname
= SO_NO_CHECK
;
1669 case TARGET_SO_PRIORITY
:
1670 optname
= SO_PRIORITY
;
1673 case TARGET_SO_BSDCOMPAT
:
1674 optname
= SO_BSDCOMPAT
;
1677 case TARGET_SO_PASSCRED
:
1678 optname
= SO_PASSCRED
;
1680 case TARGET_SO_TIMESTAMP
:
1681 optname
= SO_TIMESTAMP
;
1683 case TARGET_SO_RCVLOWAT
:
1684 optname
= SO_RCVLOWAT
;
1691 /* TCP options all take an 'int' value. */
1693 if (get_user_u32(len
, optlen
))
1694 return -TARGET_EFAULT
;
1696 return -TARGET_EINVAL
;
1698 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1704 if (put_user_u32(val
, optval_addr
))
1705 return -TARGET_EFAULT
;
1707 if (put_user_u8(val
, optval_addr
))
1708 return -TARGET_EFAULT
;
1710 if (put_user_u32(len
, optlen
))
1711 return -TARGET_EFAULT
;
1718 case IP_ROUTER_ALERT
:
1722 case IP_MTU_DISCOVER
:
1728 case IP_MULTICAST_TTL
:
1729 case IP_MULTICAST_LOOP
:
1730 if (get_user_u32(len
, optlen
))
1731 return -TARGET_EFAULT
;
1733 return -TARGET_EINVAL
;
1735 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1738 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1740 if (put_user_u32(len
, optlen
)
1741 || put_user_u8(val
, optval_addr
))
1742 return -TARGET_EFAULT
;
1744 if (len
> sizeof(int))
1746 if (put_user_u32(len
, optlen
)
1747 || put_user_u32(val
, optval_addr
))
1748 return -TARGET_EFAULT
;
1752 ret
= -TARGET_ENOPROTOOPT
;
1758 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1760 ret
= -TARGET_EOPNOTSUPP
;
1766 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
1767 int count
, int copy
)
1769 struct target_iovec
*target_vec
;
1771 abi_ulong total_len
, max_len
;
1778 if (count
> IOV_MAX
) {
1783 vec
= calloc(count
, sizeof(struct iovec
));
1789 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1790 count
* sizeof(struct target_iovec
), 1);
1791 if (target_vec
== NULL
) {
1796 /* ??? If host page size > target page size, this will result in a
1797 value larger than what we can actually support. */
1798 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
1801 for (i
= 0; i
< count
; i
++) {
1802 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1803 abi_long len
= tswapal(target_vec
[i
].iov_len
);
1808 } else if (len
== 0) {
1809 /* Zero length pointer is ignored. */
1810 vec
[i
].iov_base
= 0;
1812 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
1813 if (!vec
[i
].iov_base
) {
1817 if (len
> max_len
- total_len
) {
1818 len
= max_len
- total_len
;
1821 vec
[i
].iov_len
= len
;
1825 unlock_user(target_vec
, target_addr
, 0);
1831 unlock_user(target_vec
, target_addr
, 0);
1835 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1836 int count
, int copy
)
1838 struct target_iovec
*target_vec
;
1841 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1842 count
* sizeof(struct target_iovec
), 1);
1844 for (i
= 0; i
< count
; i
++) {
1845 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1846 abi_long len
= tswapal(target_vec
[i
].iov_base
);
1850 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1852 unlock_user(target_vec
, target_addr
, 0);
1858 /* do_socket() Must return target values and target errnos. */
1859 static abi_long
do_socket(int domain
, int type
, int protocol
)
1861 #if defined(TARGET_MIPS)
1863 case TARGET_SOCK_DGRAM
:
1866 case TARGET_SOCK_STREAM
:
1869 case TARGET_SOCK_RAW
:
1872 case TARGET_SOCK_RDM
:
1875 case TARGET_SOCK_SEQPACKET
:
1876 type
= SOCK_SEQPACKET
;
1878 case TARGET_SOCK_PACKET
:
1883 if (domain
== PF_NETLINK
)
1884 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1885 return get_errno(socket(domain
, type
, protocol
));
1888 /* do_bind() Must return target values and target errnos. */
1889 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1895 if ((int)addrlen
< 0) {
1896 return -TARGET_EINVAL
;
1899 addr
= alloca(addrlen
+1);
1901 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1905 return get_errno(bind(sockfd
, addr
, addrlen
));
1908 /* do_connect() Must return target values and target errnos. */
1909 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1915 if ((int)addrlen
< 0) {
1916 return -TARGET_EINVAL
;
1919 addr
= alloca(addrlen
);
1921 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1925 return get_errno(connect(sockfd
, addr
, addrlen
));
1928 /* do_sendrecvmsg() Must return target values and target errnos. */
1929 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1930 int flags
, int send
)
1933 struct target_msghdr
*msgp
;
1937 abi_ulong target_vec
;
1940 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1944 return -TARGET_EFAULT
;
1945 if (msgp
->msg_name
) {
1946 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1947 msg
.msg_name
= alloca(msg
.msg_namelen
);
1948 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapal(msgp
->msg_name
),
1954 msg
.msg_name
= NULL
;
1955 msg
.msg_namelen
= 0;
1957 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
1958 msg
.msg_control
= alloca(msg
.msg_controllen
);
1959 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1961 count
= tswapal(msgp
->msg_iovlen
);
1962 target_vec
= tswapal(msgp
->msg_iov
);
1963 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
1964 target_vec
, count
, send
);
1966 ret
= -host_to_target_errno(errno
);
1969 msg
.msg_iovlen
= count
;
1973 ret
= target_to_host_cmsg(&msg
, msgp
);
1975 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1977 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1978 if (!is_error(ret
)) {
1980 ret
= host_to_target_cmsg(msgp
, &msg
);
1981 if (!is_error(ret
)) {
1982 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
1983 if (msg
.msg_name
!= NULL
) {
1984 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
1985 msg
.msg_name
, msg
.msg_namelen
);
1997 unlock_iovec(vec
, target_vec
, count
, !send
);
1999 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2003 /* do_accept() Must return target values and target errnos. */
2004 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
2005 abi_ulong target_addrlen_addr
)
2011 if (target_addr
== 0)
2012 return get_errno(accept(fd
, NULL
, NULL
));
2014 /* linux returns EINVAL if addrlen pointer is invalid */
2015 if (get_user_u32(addrlen
, target_addrlen_addr
))
2016 return -TARGET_EINVAL
;
2018 if ((int)addrlen
< 0) {
2019 return -TARGET_EINVAL
;
2022 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2023 return -TARGET_EINVAL
;
2025 addr
= alloca(addrlen
);
2027 ret
= get_errno(accept(fd
, addr
, &addrlen
));
2028 if (!is_error(ret
)) {
2029 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2030 if (put_user_u32(addrlen
, target_addrlen_addr
))
2031 ret
= -TARGET_EFAULT
;
2036 /* do_getpeername() Must return target values and target errnos. */
2037 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2038 abi_ulong target_addrlen_addr
)
2044 if (get_user_u32(addrlen
, target_addrlen_addr
))
2045 return -TARGET_EFAULT
;
2047 if ((int)addrlen
< 0) {
2048 return -TARGET_EINVAL
;
2051 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2052 return -TARGET_EFAULT
;
2054 addr
= alloca(addrlen
);
2056 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2057 if (!is_error(ret
)) {
2058 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2059 if (put_user_u32(addrlen
, target_addrlen_addr
))
2060 ret
= -TARGET_EFAULT
;
2065 /* do_getsockname() Must return target values and target errnos. */
2066 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2067 abi_ulong target_addrlen_addr
)
2073 if (get_user_u32(addrlen
, target_addrlen_addr
))
2074 return -TARGET_EFAULT
;
2076 if ((int)addrlen
< 0) {
2077 return -TARGET_EINVAL
;
2080 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2081 return -TARGET_EFAULT
;
2083 addr
= alloca(addrlen
);
2085 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2086 if (!is_error(ret
)) {
2087 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2088 if (put_user_u32(addrlen
, target_addrlen_addr
))
2089 ret
= -TARGET_EFAULT
;
2094 /* do_socketpair() Must return target values and target errnos. */
2095 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2096 abi_ulong target_tab_addr
)
2101 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2102 if (!is_error(ret
)) {
2103 if (put_user_s32(tab
[0], target_tab_addr
)
2104 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2105 ret
= -TARGET_EFAULT
;
2110 /* do_sendto() Must return target values and target errnos. */
2111 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2112 abi_ulong target_addr
, socklen_t addrlen
)
2118 if ((int)addrlen
< 0) {
2119 return -TARGET_EINVAL
;
2122 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2124 return -TARGET_EFAULT
;
2126 addr
= alloca(addrlen
);
2127 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2129 unlock_user(host_msg
, msg
, 0);
2132 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2134 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2136 unlock_user(host_msg
, msg
, 0);
2140 /* do_recvfrom() Must return target values and target errnos. */
2141 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2142 abi_ulong target_addr
,
2143 abi_ulong target_addrlen
)
2150 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2152 return -TARGET_EFAULT
;
2154 if (get_user_u32(addrlen
, target_addrlen
)) {
2155 ret
= -TARGET_EFAULT
;
2158 if ((int)addrlen
< 0) {
2159 ret
= -TARGET_EINVAL
;
2162 addr
= alloca(addrlen
);
2163 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2165 addr
= NULL
; /* To keep compiler quiet. */
2166 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2168 if (!is_error(ret
)) {
2170 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2171 if (put_user_u32(addrlen
, target_addrlen
)) {
2172 ret
= -TARGET_EFAULT
;
2176 unlock_user(host_msg
, msg
, len
);
2179 unlock_user(host_msg
, msg
, 0);
2184 #ifdef TARGET_NR_socketcall
2185 /* do_socketcall() Must return target values and target errnos. */
2186 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2189 const int n
= sizeof(abi_ulong
);
2194 abi_ulong domain
, type
, protocol
;
2196 if (get_user_ual(domain
, vptr
)
2197 || get_user_ual(type
, vptr
+ n
)
2198 || get_user_ual(protocol
, vptr
+ 2 * n
))
2199 return -TARGET_EFAULT
;
2201 ret
= do_socket(domain
, type
, protocol
);
2207 abi_ulong target_addr
;
2210 if (get_user_ual(sockfd
, vptr
)
2211 || get_user_ual(target_addr
, vptr
+ n
)
2212 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2213 return -TARGET_EFAULT
;
2215 ret
= do_bind(sockfd
, target_addr
, addrlen
);
2218 case SOCKOP_connect
:
2221 abi_ulong target_addr
;
2224 if (get_user_ual(sockfd
, vptr
)
2225 || get_user_ual(target_addr
, vptr
+ n
)
2226 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2227 return -TARGET_EFAULT
;
2229 ret
= do_connect(sockfd
, target_addr
, addrlen
);
2234 abi_ulong sockfd
, backlog
;
2236 if (get_user_ual(sockfd
, vptr
)
2237 || get_user_ual(backlog
, vptr
+ n
))
2238 return -TARGET_EFAULT
;
2240 ret
= get_errno(listen(sockfd
, backlog
));
2246 abi_ulong target_addr
, target_addrlen
;
2248 if (get_user_ual(sockfd
, vptr
)
2249 || get_user_ual(target_addr
, vptr
+ n
)
2250 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2251 return -TARGET_EFAULT
;
2253 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
2256 case SOCKOP_getsockname
:
2259 abi_ulong target_addr
, target_addrlen
;
2261 if (get_user_ual(sockfd
, vptr
)
2262 || get_user_ual(target_addr
, vptr
+ n
)
2263 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2264 return -TARGET_EFAULT
;
2266 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
2269 case SOCKOP_getpeername
:
2272 abi_ulong target_addr
, target_addrlen
;
2274 if (get_user_ual(sockfd
, vptr
)
2275 || get_user_ual(target_addr
, vptr
+ n
)
2276 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2277 return -TARGET_EFAULT
;
2279 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
2282 case SOCKOP_socketpair
:
2284 abi_ulong domain
, type
, protocol
;
2287 if (get_user_ual(domain
, vptr
)
2288 || get_user_ual(type
, vptr
+ n
)
2289 || get_user_ual(protocol
, vptr
+ 2 * n
)
2290 || get_user_ual(tab
, vptr
+ 3 * n
))
2291 return -TARGET_EFAULT
;
2293 ret
= do_socketpair(domain
, type
, protocol
, tab
);
2303 if (get_user_ual(sockfd
, vptr
)
2304 || get_user_ual(msg
, vptr
+ n
)
2305 || get_user_ual(len
, vptr
+ 2 * n
)
2306 || get_user_ual(flags
, vptr
+ 3 * n
))
2307 return -TARGET_EFAULT
;
2309 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2319 if (get_user_ual(sockfd
, vptr
)
2320 || get_user_ual(msg
, vptr
+ n
)
2321 || get_user_ual(len
, vptr
+ 2 * n
)
2322 || get_user_ual(flags
, vptr
+ 3 * n
))
2323 return -TARGET_EFAULT
;
2325 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2337 if (get_user_ual(sockfd
, vptr
)
2338 || get_user_ual(msg
, vptr
+ n
)
2339 || get_user_ual(len
, vptr
+ 2 * n
)
2340 || get_user_ual(flags
, vptr
+ 3 * n
)
2341 || get_user_ual(addr
, vptr
+ 4 * n
)
2342 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2343 return -TARGET_EFAULT
;
2345 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2348 case SOCKOP_recvfrom
:
2357 if (get_user_ual(sockfd
, vptr
)
2358 || get_user_ual(msg
, vptr
+ n
)
2359 || get_user_ual(len
, vptr
+ 2 * n
)
2360 || get_user_ual(flags
, vptr
+ 3 * n
)
2361 || get_user_ual(addr
, vptr
+ 4 * n
)
2362 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2363 return -TARGET_EFAULT
;
2365 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2368 case SOCKOP_shutdown
:
2370 abi_ulong sockfd
, how
;
2372 if (get_user_ual(sockfd
, vptr
)
2373 || get_user_ual(how
, vptr
+ n
))
2374 return -TARGET_EFAULT
;
2376 ret
= get_errno(shutdown(sockfd
, how
));
2379 case SOCKOP_sendmsg
:
2380 case SOCKOP_recvmsg
:
2383 abi_ulong target_msg
;
2386 if (get_user_ual(fd
, vptr
)
2387 || get_user_ual(target_msg
, vptr
+ n
)
2388 || get_user_ual(flags
, vptr
+ 2 * n
))
2389 return -TARGET_EFAULT
;
2391 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2392 (num
== SOCKOP_sendmsg
));
2395 case SOCKOP_setsockopt
:
2403 if (get_user_ual(sockfd
, vptr
)
2404 || get_user_ual(level
, vptr
+ n
)
2405 || get_user_ual(optname
, vptr
+ 2 * n
)
2406 || get_user_ual(optval
, vptr
+ 3 * n
)
2407 || get_user_ual(optlen
, vptr
+ 4 * n
))
2408 return -TARGET_EFAULT
;
2410 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2413 case SOCKOP_getsockopt
:
2421 if (get_user_ual(sockfd
, vptr
)
2422 || get_user_ual(level
, vptr
+ n
)
2423 || get_user_ual(optname
, vptr
+ 2 * n
)
2424 || get_user_ual(optval
, vptr
+ 3 * n
)
2425 || get_user_ual(optlen
, vptr
+ 4 * n
))
2426 return -TARGET_EFAULT
;
2428 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2432 gemu_log("Unsupported socketcall: %d\n", num
);
2433 ret
= -TARGET_ENOSYS
;
2440 #define N_SHM_REGIONS 32
2442 static struct shm_region
{
2445 } shm_regions
[N_SHM_REGIONS
];
2447 struct target_ipc_perm
2454 unsigned short int mode
;
2455 unsigned short int __pad1
;
2456 unsigned short int __seq
;
2457 unsigned short int __pad2
;
2458 abi_ulong __unused1
;
2459 abi_ulong __unused2
;
2462 struct target_semid_ds
2464 struct target_ipc_perm sem_perm
;
2465 abi_ulong sem_otime
;
2466 abi_ulong __unused1
;
2467 abi_ulong sem_ctime
;
2468 abi_ulong __unused2
;
2469 abi_ulong sem_nsems
;
2470 abi_ulong __unused3
;
2471 abi_ulong __unused4
;
2474 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2475 abi_ulong target_addr
)
2477 struct target_ipc_perm
*target_ip
;
2478 struct target_semid_ds
*target_sd
;
2480 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2481 return -TARGET_EFAULT
;
2482 target_ip
= &(target_sd
->sem_perm
);
2483 host_ip
->__key
= tswapal(target_ip
->__key
);
2484 host_ip
->uid
= tswapal(target_ip
->uid
);
2485 host_ip
->gid
= tswapal(target_ip
->gid
);
2486 host_ip
->cuid
= tswapal(target_ip
->cuid
);
2487 host_ip
->cgid
= tswapal(target_ip
->cgid
);
2488 host_ip
->mode
= tswap16(target_ip
->mode
);
2489 unlock_user_struct(target_sd
, target_addr
, 0);
2493 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2494 struct ipc_perm
*host_ip
)
2496 struct target_ipc_perm
*target_ip
;
2497 struct target_semid_ds
*target_sd
;
2499 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2500 return -TARGET_EFAULT
;
2501 target_ip
= &(target_sd
->sem_perm
);
2502 target_ip
->__key
= tswapal(host_ip
->__key
);
2503 target_ip
->uid
= tswapal(host_ip
->uid
);
2504 target_ip
->gid
= tswapal(host_ip
->gid
);
2505 target_ip
->cuid
= tswapal(host_ip
->cuid
);
2506 target_ip
->cgid
= tswapal(host_ip
->cgid
);
2507 target_ip
->mode
= tswap16(host_ip
->mode
);
2508 unlock_user_struct(target_sd
, target_addr
, 1);
2512 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2513 abi_ulong target_addr
)
2515 struct target_semid_ds
*target_sd
;
2517 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2518 return -TARGET_EFAULT
;
2519 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2520 return -TARGET_EFAULT
;
2521 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2522 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2523 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2524 unlock_user_struct(target_sd
, target_addr
, 0);
2528 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2529 struct semid_ds
*host_sd
)
2531 struct target_semid_ds
*target_sd
;
2533 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2534 return -TARGET_EFAULT
;
2535 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2536 return -TARGET_EFAULT
;
2537 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2538 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2539 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2540 unlock_user_struct(target_sd
, target_addr
, 1);
2544 struct target_seminfo
{
2557 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2558 struct seminfo
*host_seminfo
)
2560 struct target_seminfo
*target_seminfo
;
2561 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2562 return -TARGET_EFAULT
;
2563 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2564 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2565 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2566 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2567 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2568 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2569 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2570 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2571 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2572 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2573 unlock_user_struct(target_seminfo
, target_addr
, 1);
2579 struct semid_ds
*buf
;
2580 unsigned short *array
;
2581 struct seminfo
*__buf
;
2584 union target_semun
{
2591 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2592 abi_ulong target_addr
)
2595 unsigned short *array
;
2597 struct semid_ds semid_ds
;
2600 semun
.buf
= &semid_ds
;
2602 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2604 return get_errno(ret
);
2606 nsems
= semid_ds
.sem_nsems
;
2608 *host_array
= malloc(nsems
*sizeof(unsigned short));
2609 array
= lock_user(VERIFY_READ
, target_addr
,
2610 nsems
*sizeof(unsigned short), 1);
2612 return -TARGET_EFAULT
;
2614 for(i
=0; i
<nsems
; i
++) {
2615 __get_user((*host_array
)[i
], &array
[i
]);
2617 unlock_user(array
, target_addr
, 0);
2622 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2623 unsigned short **host_array
)
2626 unsigned short *array
;
2628 struct semid_ds semid_ds
;
2631 semun
.buf
= &semid_ds
;
2633 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2635 return get_errno(ret
);
2637 nsems
= semid_ds
.sem_nsems
;
2639 array
= lock_user(VERIFY_WRITE
, target_addr
,
2640 nsems
*sizeof(unsigned short), 0);
2642 return -TARGET_EFAULT
;
2644 for(i
=0; i
<nsems
; i
++) {
2645 __put_user((*host_array
)[i
], &array
[i
]);
2648 unlock_user(array
, target_addr
, 1);
2653 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2654 union target_semun target_su
)
2657 struct semid_ds dsarg
;
2658 unsigned short *array
= NULL
;
2659 struct seminfo seminfo
;
2660 abi_long ret
= -TARGET_EINVAL
;
2667 arg
.val
= tswap32(target_su
.val
);
2668 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2669 target_su
.val
= tswap32(arg
.val
);
2673 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2677 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2678 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2685 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2689 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2690 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2696 arg
.__buf
= &seminfo
;
2697 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2698 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2706 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2713 struct target_sembuf
{
2714 unsigned short sem_num
;
2719 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2720 abi_ulong target_addr
,
2723 struct target_sembuf
*target_sembuf
;
2726 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2727 nsops
*sizeof(struct target_sembuf
), 1);
2729 return -TARGET_EFAULT
;
2731 for(i
=0; i
<nsops
; i
++) {
2732 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2733 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2734 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2737 unlock_user(target_sembuf
, target_addr
, 0);
2742 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2744 struct sembuf sops
[nsops
];
2746 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2747 return -TARGET_EFAULT
;
2749 return semop(semid
, sops
, nsops
);
2752 struct target_msqid_ds
2754 struct target_ipc_perm msg_perm
;
2755 abi_ulong msg_stime
;
2756 #if TARGET_ABI_BITS == 32
2757 abi_ulong __unused1
;
2759 abi_ulong msg_rtime
;
2760 #if TARGET_ABI_BITS == 32
2761 abi_ulong __unused2
;
2763 abi_ulong msg_ctime
;
2764 #if TARGET_ABI_BITS == 32
2765 abi_ulong __unused3
;
2767 abi_ulong __msg_cbytes
;
2769 abi_ulong msg_qbytes
;
2770 abi_ulong msg_lspid
;
2771 abi_ulong msg_lrpid
;
2772 abi_ulong __unused4
;
2773 abi_ulong __unused5
;
2776 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2777 abi_ulong target_addr
)
2779 struct target_msqid_ds
*target_md
;
2781 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2782 return -TARGET_EFAULT
;
2783 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2784 return -TARGET_EFAULT
;
2785 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2786 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2787 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2788 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2789 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2790 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2791 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2792 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2793 unlock_user_struct(target_md
, target_addr
, 0);
2797 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2798 struct msqid_ds
*host_md
)
2800 struct target_msqid_ds
*target_md
;
2802 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2803 return -TARGET_EFAULT
;
2804 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2805 return -TARGET_EFAULT
;
2806 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2807 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2808 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2809 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
2810 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
2811 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
2812 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
2813 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
2814 unlock_user_struct(target_md
, target_addr
, 1);
2818 struct target_msginfo
{
2826 unsigned short int msgseg
;
2829 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2830 struct msginfo
*host_msginfo
)
2832 struct target_msginfo
*target_msginfo
;
2833 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2834 return -TARGET_EFAULT
;
2835 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2836 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2837 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2838 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2839 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2840 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2841 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2842 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2843 unlock_user_struct(target_msginfo
, target_addr
, 1);
2847 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2849 struct msqid_ds dsarg
;
2850 struct msginfo msginfo
;
2851 abi_long ret
= -TARGET_EINVAL
;
2859 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2860 return -TARGET_EFAULT
;
2861 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2862 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2863 return -TARGET_EFAULT
;
2866 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2870 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2871 if (host_to_target_msginfo(ptr
, &msginfo
))
2872 return -TARGET_EFAULT
;
2879 struct target_msgbuf
{
2884 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2885 unsigned int msgsz
, int msgflg
)
2887 struct target_msgbuf
*target_mb
;
2888 struct msgbuf
*host_mb
;
2891 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2892 return -TARGET_EFAULT
;
2893 host_mb
= malloc(msgsz
+sizeof(long));
2894 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
2895 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2896 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2898 unlock_user_struct(target_mb
, msgp
, 0);
2903 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2904 unsigned int msgsz
, abi_long msgtyp
,
2907 struct target_msgbuf
*target_mb
;
2909 struct msgbuf
*host_mb
;
2912 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2913 return -TARGET_EFAULT
;
2915 host_mb
= g_malloc(msgsz
+sizeof(long));
2916 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
2919 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2920 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2921 if (!target_mtext
) {
2922 ret
= -TARGET_EFAULT
;
2925 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2926 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2929 target_mb
->mtype
= tswapal(host_mb
->mtype
);
2933 unlock_user_struct(target_mb
, msgp
, 1);
2938 struct target_shmid_ds
2940 struct target_ipc_perm shm_perm
;
2941 abi_ulong shm_segsz
;
2942 abi_ulong shm_atime
;
2943 #if TARGET_ABI_BITS == 32
2944 abi_ulong __unused1
;
2946 abi_ulong shm_dtime
;
2947 #if TARGET_ABI_BITS == 32
2948 abi_ulong __unused2
;
2950 abi_ulong shm_ctime
;
2951 #if TARGET_ABI_BITS == 32
2952 abi_ulong __unused3
;
2956 abi_ulong shm_nattch
;
2957 unsigned long int __unused4
;
2958 unsigned long int __unused5
;
2961 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2962 abi_ulong target_addr
)
2964 struct target_shmid_ds
*target_sd
;
2966 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2967 return -TARGET_EFAULT
;
2968 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2969 return -TARGET_EFAULT
;
2970 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2971 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2972 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2973 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2974 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2975 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2976 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2977 unlock_user_struct(target_sd
, target_addr
, 0);
2981 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2982 struct shmid_ds
*host_sd
)
2984 struct target_shmid_ds
*target_sd
;
2986 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2987 return -TARGET_EFAULT
;
2988 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2989 return -TARGET_EFAULT
;
2990 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2991 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2992 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2993 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2994 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2995 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2996 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2997 unlock_user_struct(target_sd
, target_addr
, 1);
3001 struct target_shminfo
{
3009 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3010 struct shminfo
*host_shminfo
)
3012 struct target_shminfo
*target_shminfo
;
3013 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3014 return -TARGET_EFAULT
;
3015 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3016 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3017 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3018 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3019 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3020 unlock_user_struct(target_shminfo
, target_addr
, 1);
3024 struct target_shm_info
{
3029 abi_ulong swap_attempts
;
3030 abi_ulong swap_successes
;
3033 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3034 struct shm_info
*host_shm_info
)
3036 struct target_shm_info
*target_shm_info
;
3037 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3038 return -TARGET_EFAULT
;
3039 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3040 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3041 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3042 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3043 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3044 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3045 unlock_user_struct(target_shm_info
, target_addr
, 1);
3049 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3051 struct shmid_ds dsarg
;
3052 struct shminfo shminfo
;
3053 struct shm_info shm_info
;
3054 abi_long ret
= -TARGET_EINVAL
;
3062 if (target_to_host_shmid_ds(&dsarg
, buf
))
3063 return -TARGET_EFAULT
;
3064 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3065 if (host_to_target_shmid_ds(buf
, &dsarg
))
3066 return -TARGET_EFAULT
;
3069 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3070 if (host_to_target_shminfo(buf
, &shminfo
))
3071 return -TARGET_EFAULT
;
3074 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3075 if (host_to_target_shm_info(buf
, &shm_info
))
3076 return -TARGET_EFAULT
;
3081 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3088 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
3092 struct shmid_ds shm_info
;
3095 /* find out the length of the shared memory segment */
3096 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3097 if (is_error(ret
)) {
3098 /* can't get length, bail out */
3105 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3107 abi_ulong mmap_start
;
3109 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3111 if (mmap_start
== -1) {
3113 host_raddr
= (void *)-1;
3115 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3118 if (host_raddr
== (void *)-1) {
3120 return get_errno((long)host_raddr
);
3122 raddr
=h2g((unsigned long)host_raddr
);
3124 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3125 PAGE_VALID
| PAGE_READ
|
3126 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3128 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3129 if (shm_regions
[i
].start
== 0) {
3130 shm_regions
[i
].start
= raddr
;
3131 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3141 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3145 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3146 if (shm_regions
[i
].start
== shmaddr
) {
3147 shm_regions
[i
].start
= 0;
3148 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3153 return get_errno(shmdt(g2h(shmaddr
)));
3156 #ifdef TARGET_NR_ipc
3157 /* ??? This only works with linear mappings. */
3158 /* do_ipc() must return target values and target errnos. */
3159 static abi_long
do_ipc(unsigned int call
, int first
,
3160 int second
, int third
,
3161 abi_long ptr
, abi_long fifth
)
3166 version
= call
>> 16;
3171 ret
= do_semop(first
, ptr
, second
);
3175 ret
= get_errno(semget(first
, second
, third
));
3179 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
3183 ret
= get_errno(msgget(first
, second
));
3187 ret
= do_msgsnd(first
, ptr
, second
, third
);
3191 ret
= do_msgctl(first
, second
, ptr
);
3198 struct target_ipc_kludge
{
3203 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3204 ret
= -TARGET_EFAULT
;
3208 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3210 unlock_user_struct(tmp
, ptr
, 0);
3214 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3223 raddr
= do_shmat(first
, ptr
, second
);
3224 if (is_error(raddr
))
3225 return get_errno(raddr
);
3226 if (put_user_ual(raddr
, third
))
3227 return -TARGET_EFAULT
;
3231 ret
= -TARGET_EINVAL
;
3236 ret
= do_shmdt(ptr
);
3240 /* IPC_* flag values are the same on all linux platforms */
3241 ret
= get_errno(shmget(first
, second
, third
));
3244 /* IPC_* and SHM_* command values are the same on all linux platforms */
3246 ret
= do_shmctl(first
, second
, third
);
3249 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3250 ret
= -TARGET_ENOSYS
;
3257 /* kernel structure types definitions */
3259 #define STRUCT(name, ...) STRUCT_ ## name,
3260 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3262 #include "syscall_types.h"
3265 #undef STRUCT_SPECIAL
3267 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3268 #define STRUCT_SPECIAL(name)
3269 #include "syscall_types.h"
3271 #undef STRUCT_SPECIAL
3273 typedef struct IOCTLEntry IOCTLEntry
;
3275 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3276 int fd
, abi_long cmd
, abi_long arg
);
3279 unsigned int target_cmd
;
3280 unsigned int host_cmd
;
3283 do_ioctl_fn
*do_ioctl
;
3284 const argtype arg_type
[5];
3287 #define IOC_R 0x0001
3288 #define IOC_W 0x0002
3289 #define IOC_RW (IOC_R | IOC_W)
3291 #define MAX_STRUCT_SIZE 4096
3293 #ifdef CONFIG_FIEMAP
3294 /* So fiemap access checks don't overflow on 32 bit systems.
3295 * This is very slightly smaller than the limit imposed by
3296 * the underlying kernel.
3298 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3299 / sizeof(struct fiemap_extent))
3301 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3302 int fd
, abi_long cmd
, abi_long arg
)
3304 /* The parameter for this ioctl is a struct fiemap followed
3305 * by an array of struct fiemap_extent whose size is set
3306 * in fiemap->fm_extent_count. The array is filled in by the
3309 int target_size_in
, target_size_out
;
3311 const argtype
*arg_type
= ie
->arg_type
;
3312 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3315 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3319 assert(arg_type
[0] == TYPE_PTR
);
3320 assert(ie
->access
== IOC_RW
);
3322 target_size_in
= thunk_type_size(arg_type
, 0);
3323 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3325 return -TARGET_EFAULT
;
3327 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3328 unlock_user(argptr
, arg
, 0);
3329 fm
= (struct fiemap
*)buf_temp
;
3330 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3331 return -TARGET_EINVAL
;
3334 outbufsz
= sizeof (*fm
) +
3335 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3337 if (outbufsz
> MAX_STRUCT_SIZE
) {
3338 /* We can't fit all the extents into the fixed size buffer.
3339 * Allocate one that is large enough and use it instead.
3341 fm
= malloc(outbufsz
);
3343 return -TARGET_ENOMEM
;
3345 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3348 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3349 if (!is_error(ret
)) {
3350 target_size_out
= target_size_in
;
3351 /* An extent_count of 0 means we were only counting the extents
3352 * so there are no structs to copy
3354 if (fm
->fm_extent_count
!= 0) {
3355 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3357 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3359 ret
= -TARGET_EFAULT
;
3361 /* Convert the struct fiemap */
3362 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3363 if (fm
->fm_extent_count
!= 0) {
3364 p
= argptr
+ target_size_in
;
3365 /* ...and then all the struct fiemap_extents */
3366 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3367 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3372 unlock_user(argptr
, arg
, target_size_out
);
3382 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3383 int fd
, abi_long cmd
, abi_long arg
)
3385 const argtype
*arg_type
= ie
->arg_type
;
3389 struct ifconf
*host_ifconf
;
3391 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3392 int target_ifreq_size
;
3397 abi_long target_ifc_buf
;
3401 assert(arg_type
[0] == TYPE_PTR
);
3402 assert(ie
->access
== IOC_RW
);
3405 target_size
= thunk_type_size(arg_type
, 0);
3407 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3409 return -TARGET_EFAULT
;
3410 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3411 unlock_user(argptr
, arg
, 0);
3413 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3414 target_ifc_len
= host_ifconf
->ifc_len
;
3415 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3417 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3418 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3419 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3421 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3422 if (outbufsz
> MAX_STRUCT_SIZE
) {
3423 /* We can't fit all the extents into the fixed size buffer.
3424 * Allocate one that is large enough and use it instead.
3426 host_ifconf
= malloc(outbufsz
);
3428 return -TARGET_ENOMEM
;
3430 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3433 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3435 host_ifconf
->ifc_len
= host_ifc_len
;
3436 host_ifconf
->ifc_buf
= host_ifc_buf
;
3438 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3439 if (!is_error(ret
)) {
3440 /* convert host ifc_len to target ifc_len */
3442 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3443 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3444 host_ifconf
->ifc_len
= target_ifc_len
;
3446 /* restore target ifc_buf */
3448 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3450 /* copy struct ifconf to target user */
3452 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3454 return -TARGET_EFAULT
;
3455 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3456 unlock_user(argptr
, arg
, target_size
);
3458 /* copy ifreq[] to target user */
3460 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3461 for (i
= 0; i
< nb_ifreq
; i
++) {
3462 thunk_convert(argptr
+ i
* target_ifreq_size
,
3463 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3464 ifreq_arg_type
, THUNK_TARGET
);
3466 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3476 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3477 abi_long cmd
, abi_long arg
)
3480 struct dm_ioctl
*host_dm
;
3481 abi_long guest_data
;
3482 uint32_t guest_data_size
;
3484 const argtype
*arg_type
= ie
->arg_type
;
3486 void *big_buf
= NULL
;
3490 target_size
= thunk_type_size(arg_type
, 0);
3491 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3493 ret
= -TARGET_EFAULT
;
3496 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3497 unlock_user(argptr
, arg
, 0);
3499 /* buf_temp is too small, so fetch things into a bigger buffer */
3500 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3501 memcpy(big_buf
, buf_temp
, target_size
);
3505 guest_data
= arg
+ host_dm
->data_start
;
3506 if ((guest_data
- arg
) < 0) {
3510 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3511 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3513 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3514 switch (ie
->host_cmd
) {
3516 case DM_LIST_DEVICES
:
3519 case DM_DEV_SUSPEND
:
3522 case DM_TABLE_STATUS
:
3523 case DM_TABLE_CLEAR
:
3525 case DM_LIST_VERSIONS
:
3529 case DM_DEV_SET_GEOMETRY
:
3530 /* data contains only strings */
3531 memcpy(host_data
, argptr
, guest_data_size
);
3534 memcpy(host_data
, argptr
, guest_data_size
);
3535 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3539 void *gspec
= argptr
;
3540 void *cur_data
= host_data
;
3541 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3542 int spec_size
= thunk_type_size(arg_type
, 0);
3545 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3546 struct dm_target_spec
*spec
= cur_data
;
3550 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3551 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3553 spec
->next
= sizeof(*spec
) + slen
;
3554 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3556 cur_data
+= spec
->next
;
3561 ret
= -TARGET_EINVAL
;
3564 unlock_user(argptr
, guest_data
, 0);
3566 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3567 if (!is_error(ret
)) {
3568 guest_data
= arg
+ host_dm
->data_start
;
3569 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3570 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3571 switch (ie
->host_cmd
) {
3576 case DM_DEV_SUSPEND
:
3579 case DM_TABLE_CLEAR
:
3581 case DM_DEV_SET_GEOMETRY
:
3582 /* no return data */
3584 case DM_LIST_DEVICES
:
3586 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3587 uint32_t remaining_data
= guest_data_size
;
3588 void *cur_data
= argptr
;
3589 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3590 int nl_size
= 12; /* can't use thunk_size due to alignment */
3593 uint32_t next
= nl
->next
;
3595 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3597 if (remaining_data
< nl
->next
) {
3598 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3601 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3602 strcpy(cur_data
+ nl_size
, nl
->name
);
3603 cur_data
+= nl
->next
;
3604 remaining_data
-= nl
->next
;
3608 nl
= (void*)nl
+ next
;
3613 case DM_TABLE_STATUS
:
3615 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3616 void *cur_data
= argptr
;
3617 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3618 int spec_size
= thunk_type_size(arg_type
, 0);
3621 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3622 uint32_t next
= spec
->next
;
3623 int slen
= strlen((char*)&spec
[1]) + 1;
3624 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3625 if (guest_data_size
< spec
->next
) {
3626 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3629 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3630 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3631 cur_data
= argptr
+ spec
->next
;
3632 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3638 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3639 int count
= *(uint32_t*)hdata
;
3640 uint64_t *hdev
= hdata
+ 8;
3641 uint64_t *gdev
= argptr
+ 8;
3644 *(uint32_t*)argptr
= tswap32(count
);
3645 for (i
= 0; i
< count
; i
++) {
3646 *gdev
= tswap64(*hdev
);
3652 case DM_LIST_VERSIONS
:
3654 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3655 uint32_t remaining_data
= guest_data_size
;
3656 void *cur_data
= argptr
;
3657 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3658 int vers_size
= thunk_type_size(arg_type
, 0);
3661 uint32_t next
= vers
->next
;
3663 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3665 if (remaining_data
< vers
->next
) {
3666 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3669 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3670 strcpy(cur_data
+ vers_size
, vers
->name
);
3671 cur_data
+= vers
->next
;
3672 remaining_data
-= vers
->next
;
3676 vers
= (void*)vers
+ next
;
3681 ret
= -TARGET_EINVAL
;
3684 unlock_user(argptr
, guest_data
, guest_data_size
);
3686 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3688 ret
= -TARGET_EFAULT
;
3691 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3692 unlock_user(argptr
, arg
, target_size
);
3699 static IOCTLEntry ioctl_entries
[] = {
3700 #define IOCTL(cmd, access, ...) \
3701 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3702 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3703 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3708 /* ??? Implement proper locking for ioctls. */
3709 /* do_ioctl() Must return target values and target errnos. */
3710 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3712 const IOCTLEntry
*ie
;
3713 const argtype
*arg_type
;
3715 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3721 if (ie
->target_cmd
== 0) {
3722 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3723 return -TARGET_ENOSYS
;
3725 if (ie
->target_cmd
== cmd
)
3729 arg_type
= ie
->arg_type
;
3731 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3734 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3737 switch(arg_type
[0]) {
3740 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3745 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3749 target_size
= thunk_type_size(arg_type
, 0);
3750 switch(ie
->access
) {
3752 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3753 if (!is_error(ret
)) {
3754 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3756 return -TARGET_EFAULT
;
3757 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3758 unlock_user(argptr
, arg
, target_size
);
3762 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3764 return -TARGET_EFAULT
;
3765 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3766 unlock_user(argptr
, arg
, 0);
3767 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3771 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3773 return -TARGET_EFAULT
;
3774 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3775 unlock_user(argptr
, arg
, 0);
3776 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3777 if (!is_error(ret
)) {
3778 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3780 return -TARGET_EFAULT
;
3781 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3782 unlock_user(argptr
, arg
, target_size
);
3788 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3789 (long)cmd
, arg_type
[0]);
3790 ret
= -TARGET_ENOSYS
;
3796 static const bitmask_transtbl iflag_tbl
[] = {
3797 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3798 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3799 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3800 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3801 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3802 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3803 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3804 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3805 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3806 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3807 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3808 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3809 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3810 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3814 static const bitmask_transtbl oflag_tbl
[] = {
3815 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3816 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3817 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3818 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3819 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3820 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3821 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3822 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3823 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3824 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3825 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3826 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3827 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3828 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3829 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3830 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3831 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3832 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3833 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3834 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3835 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3836 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3837 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3838 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3842 static const bitmask_transtbl cflag_tbl
[] = {
3843 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3844 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3845 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3846 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3847 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3848 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3849 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3850 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3851 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3852 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3853 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3854 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3855 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3856 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3857 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3858 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3859 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3860 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3861 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3862 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3863 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3864 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3865 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3866 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3867 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3868 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3869 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3870 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3871 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3872 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3873 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3877 static const bitmask_transtbl lflag_tbl
[] = {
3878 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3879 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3880 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3881 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3882 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3883 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3884 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3885 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3886 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3887 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3888 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3889 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3890 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3891 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3892 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3896 static void target_to_host_termios (void *dst
, const void *src
)
3898 struct host_termios
*host
= dst
;
3899 const struct target_termios
*target
= src
;
3902 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3904 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3906 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3908 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3909 host
->c_line
= target
->c_line
;
3911 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3912 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3913 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3914 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3915 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3916 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3917 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3918 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3919 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3920 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3921 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3922 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3923 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3924 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3925 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3926 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3927 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3928 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3931 static void host_to_target_termios (void *dst
, const void *src
)
3933 struct target_termios
*target
= dst
;
3934 const struct host_termios
*host
= src
;
3937 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3939 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3941 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3943 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3944 target
->c_line
= host
->c_line
;
3946 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3947 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3948 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3949 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3950 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3951 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3952 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3953 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3954 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3955 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3956 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3957 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3958 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3959 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3960 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3961 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3962 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3963 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3966 static const StructEntry struct_termios_def
= {
3967 .convert
= { host_to_target_termios
, target_to_host_termios
},
3968 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3969 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3972 static bitmask_transtbl mmap_flags_tbl
[] = {
3973 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3974 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3975 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3976 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3977 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3978 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3979 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3980 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3984 #if defined(TARGET_I386)
3986 /* NOTE: there is really one LDT for all the threads */
3987 static uint8_t *ldt_table
;
3989 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3996 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3997 if (size
> bytecount
)
3999 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
4001 return -TARGET_EFAULT
;
4002 /* ??? Should this by byteswapped? */
4003 memcpy(p
, ldt_table
, size
);
4004 unlock_user(p
, ptr
, size
);
4008 /* XXX: add locking support */
4009 static abi_long
write_ldt(CPUX86State
*env
,
4010 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
4012 struct target_modify_ldt_ldt_s ldt_info
;
4013 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4014 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4015 int seg_not_present
, useable
, lm
;
4016 uint32_t *lp
, entry_1
, entry_2
;
4018 if (bytecount
!= sizeof(ldt_info
))
4019 return -TARGET_EINVAL
;
4020 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
4021 return -TARGET_EFAULT
;
4022 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4023 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4024 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4025 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4026 unlock_user_struct(target_ldt_info
, ptr
, 0);
4028 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
4029 return -TARGET_EINVAL
;
4030 seg_32bit
= ldt_info
.flags
& 1;
4031 contents
= (ldt_info
.flags
>> 1) & 3;
4032 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4033 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4034 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4035 useable
= (ldt_info
.flags
>> 6) & 1;
4039 lm
= (ldt_info
.flags
>> 7) & 1;
4041 if (contents
== 3) {
4043 return -TARGET_EINVAL
;
4044 if (seg_not_present
== 0)
4045 return -TARGET_EINVAL
;
4047 /* allocate the LDT */
4049 env
->ldt
.base
= target_mmap(0,
4050 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
4051 PROT_READ
|PROT_WRITE
,
4052 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4053 if (env
->ldt
.base
== -1)
4054 return -TARGET_ENOMEM
;
4055 memset(g2h(env
->ldt
.base
), 0,
4056 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
4057 env
->ldt
.limit
= 0xffff;
4058 ldt_table
= g2h(env
->ldt
.base
);
4061 /* NOTE: same code as Linux kernel */
4062 /* Allow LDTs to be cleared by the user. */
4063 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4066 read_exec_only
== 1 &&
4068 limit_in_pages
== 0 &&
4069 seg_not_present
== 1 &&
4077 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4078 (ldt_info
.limit
& 0x0ffff);
4079 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4080 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4081 (ldt_info
.limit
& 0xf0000) |
4082 ((read_exec_only
^ 1) << 9) |
4084 ((seg_not_present
^ 1) << 15) |
4086 (limit_in_pages
<< 23) |
4090 entry_2
|= (useable
<< 20);
4092 /* Install the new entry ... */
4094 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
4095 lp
[0] = tswap32(entry_1
);
4096 lp
[1] = tswap32(entry_2
);
4100 /* specific and weird i386 syscalls */
4101 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
4102 unsigned long bytecount
)
4108 ret
= read_ldt(ptr
, bytecount
);
4111 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4114 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4117 ret
= -TARGET_ENOSYS
;
4123 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4124 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4126 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4127 struct target_modify_ldt_ldt_s ldt_info
;
4128 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4129 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4130 int seg_not_present
, useable
, lm
;
4131 uint32_t *lp
, entry_1
, entry_2
;
4134 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4135 if (!target_ldt_info
)
4136 return -TARGET_EFAULT
;
4137 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4138 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4139 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4140 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4141 if (ldt_info
.entry_number
== -1) {
4142 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4143 if (gdt_table
[i
] == 0) {
4144 ldt_info
.entry_number
= i
;
4145 target_ldt_info
->entry_number
= tswap32(i
);
4150 unlock_user_struct(target_ldt_info
, ptr
, 1);
4152 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4153 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4154 return -TARGET_EINVAL
;
4155 seg_32bit
= ldt_info
.flags
& 1;
4156 contents
= (ldt_info
.flags
>> 1) & 3;
4157 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4158 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4159 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4160 useable
= (ldt_info
.flags
>> 6) & 1;
4164 lm
= (ldt_info
.flags
>> 7) & 1;
4167 if (contents
== 3) {
4168 if (seg_not_present
== 0)
4169 return -TARGET_EINVAL
;
4172 /* NOTE: same code as Linux kernel */
4173 /* Allow LDTs to be cleared by the user. */
4174 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4175 if ((contents
== 0 &&
4176 read_exec_only
== 1 &&
4178 limit_in_pages
== 0 &&
4179 seg_not_present
== 1 &&
4187 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4188 (ldt_info
.limit
& 0x0ffff);
4189 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4190 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4191 (ldt_info
.limit
& 0xf0000) |
4192 ((read_exec_only
^ 1) << 9) |
4194 ((seg_not_present
^ 1) << 15) |
4196 (limit_in_pages
<< 23) |
4201 /* Install the new entry ... */
4203 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4204 lp
[0] = tswap32(entry_1
);
4205 lp
[1] = tswap32(entry_2
);
4209 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4211 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4212 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4213 uint32_t base_addr
, limit
, flags
;
4214 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4215 int seg_not_present
, useable
, lm
;
4216 uint32_t *lp
, entry_1
, entry_2
;
4218 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4219 if (!target_ldt_info
)
4220 return -TARGET_EFAULT
;
4221 idx
= tswap32(target_ldt_info
->entry_number
);
4222 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4223 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4224 unlock_user_struct(target_ldt_info
, ptr
, 1);
4225 return -TARGET_EINVAL
;
4227 lp
= (uint32_t *)(gdt_table
+ idx
);
4228 entry_1
= tswap32(lp
[0]);
4229 entry_2
= tswap32(lp
[1]);
4231 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4232 contents
= (entry_2
>> 10) & 3;
4233 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4234 seg_32bit
= (entry_2
>> 22) & 1;
4235 limit_in_pages
= (entry_2
>> 23) & 1;
4236 useable
= (entry_2
>> 20) & 1;
4240 lm
= (entry_2
>> 21) & 1;
4242 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4243 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4244 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4245 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4246 base_addr
= (entry_1
>> 16) |
4247 (entry_2
& 0xff000000) |
4248 ((entry_2
& 0xff) << 16);
4249 target_ldt_info
->base_addr
= tswapal(base_addr
);
4250 target_ldt_info
->limit
= tswap32(limit
);
4251 target_ldt_info
->flags
= tswap32(flags
);
4252 unlock_user_struct(target_ldt_info
, ptr
, 1);
4255 #endif /* TARGET_I386 && TARGET_ABI32 */
4257 #ifndef TARGET_ABI32
4258 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4265 case TARGET_ARCH_SET_GS
:
4266 case TARGET_ARCH_SET_FS
:
4267 if (code
== TARGET_ARCH_SET_GS
)
4271 cpu_x86_load_seg(env
, idx
, 0);
4272 env
->segs
[idx
].base
= addr
;
4274 case TARGET_ARCH_GET_GS
:
4275 case TARGET_ARCH_GET_FS
:
4276 if (code
== TARGET_ARCH_GET_GS
)
4280 val
= env
->segs
[idx
].base
;
4281 if (put_user(val
, addr
, abi_ulong
))
4282 ret
= -TARGET_EFAULT
;
4285 ret
= -TARGET_EINVAL
;
4292 #endif /* defined(TARGET_I386) */
4294 #define NEW_STACK_SIZE 0x40000
4296 #if defined(CONFIG_USE_NPTL)
4298 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4301 pthread_mutex_t mutex
;
4302 pthread_cond_t cond
;
4305 abi_ulong child_tidptr
;
4306 abi_ulong parent_tidptr
;
4310 static void *clone_func(void *arg
)
4312 new_thread_info
*info
= arg
;
4318 ts
= (TaskState
*)thread_env
->opaque
;
4319 info
->tid
= gettid();
4320 env
->host_tid
= info
->tid
;
4322 if (info
->child_tidptr
)
4323 put_user_u32(info
->tid
, info
->child_tidptr
);
4324 if (info
->parent_tidptr
)
4325 put_user_u32(info
->tid
, info
->parent_tidptr
);
4326 /* Enable signals. */
4327 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4328 /* Signal to the parent that we're ready. */
4329 pthread_mutex_lock(&info
->mutex
);
4330 pthread_cond_broadcast(&info
->cond
);
4331 pthread_mutex_unlock(&info
->mutex
);
4332 /* Wait until the parent has finshed initializing the tls state. */
4333 pthread_mutex_lock(&clone_lock
);
4334 pthread_mutex_unlock(&clone_lock
);
4341 static int clone_func(void *arg
)
4343 CPUArchState
*env
= arg
;
4350 /* do_fork() Must return host values and target errnos (unlike most
4351 do_*() functions). */
4352 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4353 abi_ulong parent_tidptr
, target_ulong newtls
,
4354 abi_ulong child_tidptr
)
4358 CPUArchState
*new_env
;
4359 #if defined(CONFIG_USE_NPTL)
4360 unsigned int nptl_flags
;
4366 /* Emulate vfork() with fork() */
4367 if (flags
& CLONE_VFORK
)
4368 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4370 if (flags
& CLONE_VM
) {
4371 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
4372 #if defined(CONFIG_USE_NPTL)
4373 new_thread_info info
;
4374 pthread_attr_t attr
;
4376 ts
= g_malloc0(sizeof(TaskState
));
4377 init_task_state(ts
);
4378 /* we create a new CPU instance. */
4379 new_env
= cpu_copy(env
);
4380 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4381 cpu_reset(ENV_GET_CPU(new_env
));
4383 /* Init regs that differ from the parent. */
4384 cpu_clone_regs(new_env
, newsp
);
4385 new_env
->opaque
= ts
;
4386 ts
->bprm
= parent_ts
->bprm
;
4387 ts
->info
= parent_ts
->info
;
4388 #if defined(CONFIG_USE_NPTL)
4390 flags
&= ~CLONE_NPTL_FLAGS2
;
4392 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4393 ts
->child_tidptr
= child_tidptr
;
4396 if (nptl_flags
& CLONE_SETTLS
)
4397 cpu_set_tls (new_env
, newtls
);
4399 /* Grab a mutex so that thread setup appears atomic. */
4400 pthread_mutex_lock(&clone_lock
);
4402 memset(&info
, 0, sizeof(info
));
4403 pthread_mutex_init(&info
.mutex
, NULL
);
4404 pthread_mutex_lock(&info
.mutex
);
4405 pthread_cond_init(&info
.cond
, NULL
);
4407 if (nptl_flags
& CLONE_CHILD_SETTID
)
4408 info
.child_tidptr
= child_tidptr
;
4409 if (nptl_flags
& CLONE_PARENT_SETTID
)
4410 info
.parent_tidptr
= parent_tidptr
;
4412 ret
= pthread_attr_init(&attr
);
4413 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4414 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4415 /* It is not safe to deliver signals until the child has finished
4416 initializing, so temporarily block all signals. */
4417 sigfillset(&sigmask
);
4418 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4420 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4421 /* TODO: Free new CPU state if thread creation failed. */
4423 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4424 pthread_attr_destroy(&attr
);
4426 /* Wait for the child to initialize. */
4427 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4429 if (flags
& CLONE_PARENT_SETTID
)
4430 put_user_u32(ret
, parent_tidptr
);
4434 pthread_mutex_unlock(&info
.mutex
);
4435 pthread_cond_destroy(&info
.cond
);
4436 pthread_mutex_destroy(&info
.mutex
);
4437 pthread_mutex_unlock(&clone_lock
);
4439 if (flags
& CLONE_NPTL_FLAGS2
)
4441 /* This is probably going to die very quickly, but do it anyway. */
4442 new_stack
= g_malloc0 (NEW_STACK_SIZE
);
4444 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
4446 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
4450 /* if no CLONE_VM, we consider it is a fork */
4451 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4456 /* Child Process. */
4457 cpu_clone_regs(env
, newsp
);
4459 #if defined(CONFIG_USE_NPTL)
4460 /* There is a race condition here. The parent process could
4461 theoretically read the TID in the child process before the child
4462 tid is set. This would require using either ptrace
4463 (not implemented) or having *_tidptr to point at a shared memory
4464 mapping. We can't repeat the spinlock hack used above because
4465 the child process gets its own copy of the lock. */
4466 if (flags
& CLONE_CHILD_SETTID
)
4467 put_user_u32(gettid(), child_tidptr
);
4468 if (flags
& CLONE_PARENT_SETTID
)
4469 put_user_u32(gettid(), parent_tidptr
);
4470 ts
= (TaskState
*)env
->opaque
;
4471 if (flags
& CLONE_SETTLS
)
4472 cpu_set_tls (env
, newtls
);
4473 if (flags
& CLONE_CHILD_CLEARTID
)
4474 ts
->child_tidptr
= child_tidptr
;
4483 /* warning : doesn't handle linux specific flags... */
4484 static int target_to_host_fcntl_cmd(int cmd
)
4487 case TARGET_F_DUPFD
:
4488 case TARGET_F_GETFD
:
4489 case TARGET_F_SETFD
:
4490 case TARGET_F_GETFL
:
4491 case TARGET_F_SETFL
:
4493 case TARGET_F_GETLK
:
4495 case TARGET_F_SETLK
:
4497 case TARGET_F_SETLKW
:
4499 case TARGET_F_GETOWN
:
4501 case TARGET_F_SETOWN
:
4503 case TARGET_F_GETSIG
:
4505 case TARGET_F_SETSIG
:
4507 #if TARGET_ABI_BITS == 32
4508 case TARGET_F_GETLK64
:
4510 case TARGET_F_SETLK64
:
4512 case TARGET_F_SETLKW64
:
4515 case TARGET_F_SETLEASE
:
4517 case TARGET_F_GETLEASE
:
4519 #ifdef F_DUPFD_CLOEXEC
4520 case TARGET_F_DUPFD_CLOEXEC
:
4521 return F_DUPFD_CLOEXEC
;
4523 case TARGET_F_NOTIFY
:
4526 return -TARGET_EINVAL
;
4528 return -TARGET_EINVAL
;
4531 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4532 static const bitmask_transtbl flock_tbl
[] = {
4533 TRANSTBL_CONVERT(F_RDLCK
),
4534 TRANSTBL_CONVERT(F_WRLCK
),
4535 TRANSTBL_CONVERT(F_UNLCK
),
4536 TRANSTBL_CONVERT(F_EXLCK
),
4537 TRANSTBL_CONVERT(F_SHLCK
),
4541 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4544 struct target_flock
*target_fl
;
4545 struct flock64 fl64
;
4546 struct target_flock64
*target_fl64
;
4548 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4550 if (host_cmd
== -TARGET_EINVAL
)
4554 case TARGET_F_GETLK
:
4555 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4556 return -TARGET_EFAULT
;
4558 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4559 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4560 fl
.l_start
= tswapal(target_fl
->l_start
);
4561 fl
.l_len
= tswapal(target_fl
->l_len
);
4562 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4563 unlock_user_struct(target_fl
, arg
, 0);
4564 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4566 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4567 return -TARGET_EFAULT
;
4569 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
4570 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4571 target_fl
->l_start
= tswapal(fl
.l_start
);
4572 target_fl
->l_len
= tswapal(fl
.l_len
);
4573 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4574 unlock_user_struct(target_fl
, arg
, 1);
4578 case TARGET_F_SETLK
:
4579 case TARGET_F_SETLKW
:
4580 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4581 return -TARGET_EFAULT
;
4583 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4584 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4585 fl
.l_start
= tswapal(target_fl
->l_start
);
4586 fl
.l_len
= tswapal(target_fl
->l_len
);
4587 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4588 unlock_user_struct(target_fl
, arg
, 0);
4589 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4592 case TARGET_F_GETLK64
:
4593 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4594 return -TARGET_EFAULT
;
4596 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4597 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4598 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4599 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4600 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4601 unlock_user_struct(target_fl64
, arg
, 0);
4602 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4604 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4605 return -TARGET_EFAULT
;
4606 target_fl64
->l_type
=
4607 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
4608 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4609 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4610 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4611 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4612 unlock_user_struct(target_fl64
, arg
, 1);
4615 case TARGET_F_SETLK64
:
4616 case TARGET_F_SETLKW64
:
4617 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4618 return -TARGET_EFAULT
;
4620 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4621 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4622 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4623 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4624 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4625 unlock_user_struct(target_fl64
, arg
, 0);
4626 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4629 case TARGET_F_GETFL
:
4630 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4632 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4636 case TARGET_F_SETFL
:
4637 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4640 case TARGET_F_SETOWN
:
4641 case TARGET_F_GETOWN
:
4642 case TARGET_F_SETSIG
:
4643 case TARGET_F_GETSIG
:
4644 case TARGET_F_SETLEASE
:
4645 case TARGET_F_GETLEASE
:
4646 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4650 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4658 static inline int high2lowuid(int uid
)
4666 static inline int high2lowgid(int gid
)
4674 static inline int low2highuid(int uid
)
4676 if ((int16_t)uid
== -1)
4682 static inline int low2highgid(int gid
)
4684 if ((int16_t)gid
== -1)
4689 static inline int tswapid(int id
)
4693 #else /* !USE_UID16 */
4694 static inline int high2lowuid(int uid
)
4698 static inline int high2lowgid(int gid
)
4702 static inline int low2highuid(int uid
)
4706 static inline int low2highgid(int gid
)
4710 static inline int tswapid(int id
)
4714 #endif /* USE_UID16 */
4716 void syscall_init(void)
4719 const argtype
*arg_type
;
4723 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4724 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4725 #include "syscall_types.h"
4727 #undef STRUCT_SPECIAL
4729 /* Build target_to_host_errno_table[] table from
4730 * host_to_target_errno_table[]. */
4731 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
4732 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4735 /* we patch the ioctl size if necessary. We rely on the fact that
4736 no ioctl has all the bits at '1' in the size field */
4738 while (ie
->target_cmd
!= 0) {
4739 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4740 TARGET_IOC_SIZEMASK
) {
4741 arg_type
= ie
->arg_type
;
4742 if (arg_type
[0] != TYPE_PTR
) {
4743 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4748 size
= thunk_type_size(arg_type
, 0);
4749 ie
->target_cmd
= (ie
->target_cmd
&
4750 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4751 (size
<< TARGET_IOC_SIZESHIFT
);
4754 /* automatic consistency check if same arch */
4755 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4756 (defined(__x86_64__) && defined(TARGET_X86_64))
4757 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4758 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4759 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4766 #if TARGET_ABI_BITS == 32
4767 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4769 #ifdef TARGET_WORDS_BIGENDIAN
4770 return ((uint64_t)word0
<< 32) | word1
;
4772 return ((uint64_t)word1
<< 32) | word0
;
4775 #else /* TARGET_ABI_BITS == 32 */
4776 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4780 #endif /* TARGET_ABI_BITS != 32 */
4782 #ifdef TARGET_NR_truncate64
4783 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4788 if (regpairs_aligned(cpu_env
)) {
4792 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4796 #ifdef TARGET_NR_ftruncate64
4797 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4802 if (regpairs_aligned(cpu_env
)) {
4806 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4810 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4811 abi_ulong target_addr
)
4813 struct target_timespec
*target_ts
;
4815 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4816 return -TARGET_EFAULT
;
4817 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
4818 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
4819 unlock_user_struct(target_ts
, target_addr
, 0);
4823 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4824 struct timespec
*host_ts
)
4826 struct target_timespec
*target_ts
;
4828 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4829 return -TARGET_EFAULT
;
4830 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
4831 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
4832 unlock_user_struct(target_ts
, target_addr
, 1);
4836 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4837 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4838 abi_ulong target_addr
,
4839 struct stat
*host_st
)
4842 if (((CPUARMState
*)cpu_env
)->eabi
) {
4843 struct target_eabi_stat64
*target_st
;
4845 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4846 return -TARGET_EFAULT
;
4847 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4848 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4849 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4850 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4851 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4853 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4854 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4855 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4856 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4857 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4858 __put_user(host_st
->st_size
, &target_st
->st_size
);
4859 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4860 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4861 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4862 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4863 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4864 unlock_user_struct(target_st
, target_addr
, 1);
4868 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4869 struct target_stat
*target_st
;
4871 struct target_stat64
*target_st
;
4874 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4875 return -TARGET_EFAULT
;
4876 memset(target_st
, 0, sizeof(*target_st
));
4877 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4878 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4879 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4880 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4882 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4883 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4884 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4885 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4886 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4887 /* XXX: better use of kernel struct */
4888 __put_user(host_st
->st_size
, &target_st
->st_size
);
4889 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4890 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4891 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4892 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4893 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4894 unlock_user_struct(target_st
, target_addr
, 1);
4901 #if defined(CONFIG_USE_NPTL)
4902 /* ??? Using host futex calls even when target atomic operations
4903 are not really atomic probably breaks things. However implementing
4904 futexes locally would make futexes shared between multiple processes
4905 tricky. However they're probably useless because guest atomic
4906 operations won't work either. */
4907 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4908 target_ulong uaddr2
, int val3
)
4910 struct timespec ts
, *pts
;
4913 /* ??? We assume FUTEX_* constants are the same on both host
4915 #ifdef FUTEX_CMD_MASK
4916 base_op
= op
& FUTEX_CMD_MASK
;
4924 target_to_host_timespec(pts
, timeout
);
4928 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4931 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4933 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4935 case FUTEX_CMP_REQUEUE
:
4937 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4938 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4939 But the prototype takes a `struct timespec *'; insert casts
4940 to satisfy the compiler. We do not need to tswap TIMEOUT
4941 since it's not compared to guest memory. */
4942 pts
= (struct timespec
*)(uintptr_t) timeout
;
4943 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4945 (base_op
== FUTEX_CMP_REQUEUE
4949 return -TARGET_ENOSYS
;
4954 /* Map host to target signal numbers for the wait family of syscalls.
4955 Assume all other status bits are the same. */
4956 int host_to_target_waitstatus(int status
)
4958 if (WIFSIGNALED(status
)) {
4959 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4961 if (WIFSTOPPED(status
)) {
4962 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4968 int get_osversion(void)
4970 static int osversion
;
4971 struct new_utsname buf
;
4976 if (qemu_uname_release
&& *qemu_uname_release
) {
4977 s
= qemu_uname_release
;
4979 if (sys_uname(&buf
))
4984 for (i
= 0; i
< 3; i
++) {
4986 while (*s
>= '0' && *s
<= '9') {
4991 tmp
= (tmp
<< 8) + n
;
5000 static int open_self_maps(void *cpu_env
, int fd
)
5002 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5003 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5010 fp
= fopen("/proc/self/maps", "r");
5015 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5016 int fields
, dev_maj
, dev_min
, inode
;
5017 uint64_t min
, max
, offset
;
5018 char flag_r
, flag_w
, flag_x
, flag_p
;
5019 char path
[512] = "";
5020 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
5021 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
5022 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
5024 if ((fields
< 10) || (fields
> 11)) {
5027 if (!strncmp(path
, "[stack]", 7)) {
5030 if (h2g_valid(min
) && h2g_valid(max
)) {
5031 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
5032 " %c%c%c%c %08" PRIx64
" %02x:%02x %d%s%s\n",
5033 h2g(min
), h2g(max
), flag_r
, flag_w
,
5034 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
5035 path
[0] ? " " : "", path
);
5042 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5043 dprintf(fd
, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5044 (unsigned long long)ts
->info
->stack_limit
,
5045 (unsigned long long)(ts
->info
->start_stack
+
5046 (TARGET_PAGE_SIZE
- 1)) & TARGET_PAGE_MASK
,
5047 (unsigned long long)0);
5053 static int open_self_stat(void *cpu_env
, int fd
)
5055 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5056 abi_ulong start_stack
= ts
->info
->start_stack
;
5059 for (i
= 0; i
< 44; i
++) {
5067 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5068 } else if (i
== 1) {
5070 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
5071 } else if (i
== 27) {
5074 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5076 /* for the rest, there is MasterCard */
5077 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
5081 if (write(fd
, buf
, len
) != len
) {
5089 static int open_self_auxv(void *cpu_env
, int fd
)
5091 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5092 abi_ulong auxv
= ts
->info
->saved_auxv
;
5093 abi_ulong len
= ts
->info
->auxv_len
;
5097 * Auxiliary vector is stored in target process stack.
5098 * read in whole auxv vector and copy it to file
5100 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
5104 r
= write(fd
, ptr
, len
);
5111 lseek(fd
, 0, SEEK_SET
);
5112 unlock_user(ptr
, auxv
, len
);
5118 static int do_open(void *cpu_env
, const char *pathname
, int flags
, mode_t mode
)
5121 const char *filename
;
5122 int (*fill
)(void *cpu_env
, int fd
);
5124 const struct fake_open
*fake_open
;
5125 static const struct fake_open fakes
[] = {
5126 { "/proc/self/maps", open_self_maps
},
5127 { "/proc/self/stat", open_self_stat
},
5128 { "/proc/self/auxv", open_self_auxv
},
5132 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5133 if (!strncmp(pathname
, fake_open
->filename
,
5134 strlen(fake_open
->filename
))) {
5139 if (fake_open
->filename
) {
5141 char filename
[PATH_MAX
];
5144 /* create temporary file to map stat to */
5145 tmpdir
= getenv("TMPDIR");
5148 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5149 fd
= mkstemp(filename
);
5155 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5159 lseek(fd
, 0, SEEK_SET
);
5164 return get_errno(open(path(pathname
), flags
, mode
));
5167 /* do_syscall() should always have a single exit point at the end so
5168 that actions, such as logging of syscall results, can be performed.
5169 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5170 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5171 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5172 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5181 gemu_log("syscall %d", num
);
5184 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5187 case TARGET_NR_exit
:
5188 #ifdef CONFIG_USE_NPTL
5189 /* In old applications this may be used to implement _exit(2).
5190 However in threaded applictions it is used for thread termination,
5191 and _exit_group is used for application termination.
5192 Do thread termination if we have more then one thread. */
5193 /* FIXME: This probably breaks if a signal arrives. We should probably
5194 be disabling signals. */
5195 if (first_cpu
->next_cpu
) {
5197 CPUArchState
**lastp
;
5203 while (p
&& p
!= (CPUArchState
*)cpu_env
) {
5204 lastp
= &p
->next_cpu
;
5207 /* If we didn't find the CPU for this thread then something is
5211 /* Remove the CPU from the list. */
5212 *lastp
= p
->next_cpu
;
5214 ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5215 if (ts
->child_tidptr
) {
5216 put_user_u32(0, ts
->child_tidptr
);
5217 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5221 object_delete(OBJECT(ENV_GET_CPU(cpu_env
)));
5229 gdb_exit(cpu_env
, arg1
);
5231 ret
= 0; /* avoid warning */
5233 case TARGET_NR_read
:
5237 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5239 ret
= get_errno(read(arg1
, p
, arg3
));
5240 unlock_user(p
, arg2
, ret
);
5243 case TARGET_NR_write
:
5244 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5246 ret
= get_errno(write(arg1
, p
, arg3
));
5247 unlock_user(p
, arg2
, 0);
5249 case TARGET_NR_open
:
5250 if (!(p
= lock_user_string(arg1
)))
5252 ret
= get_errno(do_open(cpu_env
, p
,
5253 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
5255 unlock_user(p
, arg1
, 0);
5257 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5258 case TARGET_NR_openat
:
5259 if (!(p
= lock_user_string(arg2
)))
5261 ret
= get_errno(sys_openat(arg1
,
5263 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
5265 unlock_user(p
, arg2
, 0);
5268 case TARGET_NR_close
:
5269 ret
= get_errno(close(arg1
));
5274 case TARGET_NR_fork
:
5275 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
5277 #ifdef TARGET_NR_waitpid
5278 case TARGET_NR_waitpid
:
5281 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
5282 if (!is_error(ret
) && arg2
&& ret
5283 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
5288 #ifdef TARGET_NR_waitid
5289 case TARGET_NR_waitid
:
5293 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
5294 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
5295 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
5297 host_to_target_siginfo(p
, &info
);
5298 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
5303 #ifdef TARGET_NR_creat /* not on alpha */
5304 case TARGET_NR_creat
:
5305 if (!(p
= lock_user_string(arg1
)))
5307 ret
= get_errno(creat(p
, arg2
));
5308 unlock_user(p
, arg1
, 0);
5311 case TARGET_NR_link
:
5314 p
= lock_user_string(arg1
);
5315 p2
= lock_user_string(arg2
);
5317 ret
= -TARGET_EFAULT
;
5319 ret
= get_errno(link(p
, p2
));
5320 unlock_user(p2
, arg2
, 0);
5321 unlock_user(p
, arg1
, 0);
5324 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5325 case TARGET_NR_linkat
:
5330 p
= lock_user_string(arg2
);
5331 p2
= lock_user_string(arg4
);
5333 ret
= -TARGET_EFAULT
;
5335 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
5336 unlock_user(p
, arg2
, 0);
5337 unlock_user(p2
, arg4
, 0);
5341 case TARGET_NR_unlink
:
5342 if (!(p
= lock_user_string(arg1
)))
5344 ret
= get_errno(unlink(p
));
5345 unlock_user(p
, arg1
, 0);
5347 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5348 case TARGET_NR_unlinkat
:
5349 if (!(p
= lock_user_string(arg2
)))
5351 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
5352 unlock_user(p
, arg2
, 0);
5355 case TARGET_NR_execve
:
5357 char **argp
, **envp
;
5360 abi_ulong guest_argp
;
5361 abi_ulong guest_envp
;
5368 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
5369 if (get_user_ual(addr
, gp
))
5377 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
5378 if (get_user_ual(addr
, gp
))
5385 argp
= alloca((argc
+ 1) * sizeof(void *));
5386 envp
= alloca((envc
+ 1) * sizeof(void *));
5388 for (gp
= guest_argp
, q
= argp
; gp
;
5389 gp
+= sizeof(abi_ulong
), q
++) {
5390 if (get_user_ual(addr
, gp
))
5394 if (!(*q
= lock_user_string(addr
)))
5396 total_size
+= strlen(*q
) + 1;
5400 for (gp
= guest_envp
, q
= envp
; gp
;
5401 gp
+= sizeof(abi_ulong
), q
++) {
5402 if (get_user_ual(addr
, gp
))
5406 if (!(*q
= lock_user_string(addr
)))
5408 total_size
+= strlen(*q
) + 1;
5412 /* This case will not be caught by the host's execve() if its
5413 page size is bigger than the target's. */
5414 if (total_size
> MAX_ARG_PAGES
* TARGET_PAGE_SIZE
) {
5415 ret
= -TARGET_E2BIG
;
5418 if (!(p
= lock_user_string(arg1
)))
5420 ret
= get_errno(execve(p
, argp
, envp
));
5421 unlock_user(p
, arg1
, 0);
5426 ret
= -TARGET_EFAULT
;
5429 for (gp
= guest_argp
, q
= argp
; *q
;
5430 gp
+= sizeof(abi_ulong
), q
++) {
5431 if (get_user_ual(addr
, gp
)
5434 unlock_user(*q
, addr
, 0);
5436 for (gp
= guest_envp
, q
= envp
; *q
;
5437 gp
+= sizeof(abi_ulong
), q
++) {
5438 if (get_user_ual(addr
, gp
)
5441 unlock_user(*q
, addr
, 0);
5445 case TARGET_NR_chdir
:
5446 if (!(p
= lock_user_string(arg1
)))
5448 ret
= get_errno(chdir(p
));
5449 unlock_user(p
, arg1
, 0);
5451 #ifdef TARGET_NR_time
5452 case TARGET_NR_time
:
5455 ret
= get_errno(time(&host_time
));
5458 && put_user_sal(host_time
, arg1
))
5463 case TARGET_NR_mknod
:
5464 if (!(p
= lock_user_string(arg1
)))
5466 ret
= get_errno(mknod(p
, arg2
, arg3
));
5467 unlock_user(p
, arg1
, 0);
5469 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5470 case TARGET_NR_mknodat
:
5471 if (!(p
= lock_user_string(arg2
)))
5473 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
5474 unlock_user(p
, arg2
, 0);
5477 case TARGET_NR_chmod
:
5478 if (!(p
= lock_user_string(arg1
)))
5480 ret
= get_errno(chmod(p
, arg2
));
5481 unlock_user(p
, arg1
, 0);
5483 #ifdef TARGET_NR_break
5484 case TARGET_NR_break
:
5487 #ifdef TARGET_NR_oldstat
5488 case TARGET_NR_oldstat
:
5491 case TARGET_NR_lseek
:
5492 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
5494 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5495 /* Alpha specific */
5496 case TARGET_NR_getxpid
:
5497 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
5498 ret
= get_errno(getpid());
5501 #ifdef TARGET_NR_getpid
5502 case TARGET_NR_getpid
:
5503 ret
= get_errno(getpid());
5506 case TARGET_NR_mount
:
5508 /* need to look at the data field */
5510 p
= lock_user_string(arg1
);
5511 p2
= lock_user_string(arg2
);
5512 p3
= lock_user_string(arg3
);
5513 if (!p
|| !p2
|| !p3
)
5514 ret
= -TARGET_EFAULT
;
5516 /* FIXME - arg5 should be locked, but it isn't clear how to
5517 * do that since it's not guaranteed to be a NULL-terminated
5521 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
5523 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
5525 unlock_user(p
, arg1
, 0);
5526 unlock_user(p2
, arg2
, 0);
5527 unlock_user(p3
, arg3
, 0);
5530 #ifdef TARGET_NR_umount
5531 case TARGET_NR_umount
:
5532 if (!(p
= lock_user_string(arg1
)))
5534 ret
= get_errno(umount(p
));
5535 unlock_user(p
, arg1
, 0);
5538 #ifdef TARGET_NR_stime /* not on alpha */
5539 case TARGET_NR_stime
:
5542 if (get_user_sal(host_time
, arg1
))
5544 ret
= get_errno(stime(&host_time
));
5548 case TARGET_NR_ptrace
:
5550 #ifdef TARGET_NR_alarm /* not on alpha */
5551 case TARGET_NR_alarm
:
5555 #ifdef TARGET_NR_oldfstat
5556 case TARGET_NR_oldfstat
:
5559 #ifdef TARGET_NR_pause /* not on alpha */
5560 case TARGET_NR_pause
:
5561 ret
= get_errno(pause());
5564 #ifdef TARGET_NR_utime
5565 case TARGET_NR_utime
:
5567 struct utimbuf tbuf
, *host_tbuf
;
5568 struct target_utimbuf
*target_tbuf
;
5570 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
5572 tbuf
.actime
= tswapal(target_tbuf
->actime
);
5573 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
5574 unlock_user_struct(target_tbuf
, arg2
, 0);
5579 if (!(p
= lock_user_string(arg1
)))
5581 ret
= get_errno(utime(p
, host_tbuf
));
5582 unlock_user(p
, arg1
, 0);
5586 case TARGET_NR_utimes
:
5588 struct timeval
*tvp
, tv
[2];
5590 if (copy_from_user_timeval(&tv
[0], arg2
)
5591 || copy_from_user_timeval(&tv
[1],
5592 arg2
+ sizeof(struct target_timeval
)))
5598 if (!(p
= lock_user_string(arg1
)))
5600 ret
= get_errno(utimes(p
, tvp
));
5601 unlock_user(p
, arg1
, 0);
5604 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5605 case TARGET_NR_futimesat
:
5607 struct timeval
*tvp
, tv
[2];
5609 if (copy_from_user_timeval(&tv
[0], arg3
)
5610 || copy_from_user_timeval(&tv
[1],
5611 arg3
+ sizeof(struct target_timeval
)))
5617 if (!(p
= lock_user_string(arg2
)))
5619 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
5620 unlock_user(p
, arg2
, 0);
5624 #ifdef TARGET_NR_stty
5625 case TARGET_NR_stty
:
5628 #ifdef TARGET_NR_gtty
5629 case TARGET_NR_gtty
:
5632 case TARGET_NR_access
:
5633 if (!(p
= lock_user_string(arg1
)))
5635 ret
= get_errno(access(path(p
), arg2
));
5636 unlock_user(p
, arg1
, 0);
5638 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5639 case TARGET_NR_faccessat
:
5640 if (!(p
= lock_user_string(arg2
)))
5642 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
5643 unlock_user(p
, arg2
, 0);
5646 #ifdef TARGET_NR_nice /* not on alpha */
5647 case TARGET_NR_nice
:
5648 ret
= get_errno(nice(arg1
));
5651 #ifdef TARGET_NR_ftime
5652 case TARGET_NR_ftime
:
5655 case TARGET_NR_sync
:
5659 case TARGET_NR_kill
:
5660 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
5662 case TARGET_NR_rename
:
5665 p
= lock_user_string(arg1
);
5666 p2
= lock_user_string(arg2
);
5668 ret
= -TARGET_EFAULT
;
5670 ret
= get_errno(rename(p
, p2
));
5671 unlock_user(p2
, arg2
, 0);
5672 unlock_user(p
, arg1
, 0);
5675 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5676 case TARGET_NR_renameat
:
5679 p
= lock_user_string(arg2
);
5680 p2
= lock_user_string(arg4
);
5682 ret
= -TARGET_EFAULT
;
5684 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
5685 unlock_user(p2
, arg4
, 0);
5686 unlock_user(p
, arg2
, 0);
5690 case TARGET_NR_mkdir
:
5691 if (!(p
= lock_user_string(arg1
)))
5693 ret
= get_errno(mkdir(p
, arg2
));
5694 unlock_user(p
, arg1
, 0);
5696 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5697 case TARGET_NR_mkdirat
:
5698 if (!(p
= lock_user_string(arg2
)))
5700 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
5701 unlock_user(p
, arg2
, 0);
5704 case TARGET_NR_rmdir
:
5705 if (!(p
= lock_user_string(arg1
)))
5707 ret
= get_errno(rmdir(p
));
5708 unlock_user(p
, arg1
, 0);
5711 ret
= get_errno(dup(arg1
));
5713 case TARGET_NR_pipe
:
5714 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5716 #ifdef TARGET_NR_pipe2
5717 case TARGET_NR_pipe2
:
5718 ret
= do_pipe(cpu_env
, arg1
,
5719 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
5722 case TARGET_NR_times
:
5724 struct target_tms
*tmsp
;
5726 ret
= get_errno(times(&tms
));
5728 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5731 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
5732 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
5733 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
5734 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
5737 ret
= host_to_target_clock_t(ret
);
5740 #ifdef TARGET_NR_prof
5741 case TARGET_NR_prof
:
5744 #ifdef TARGET_NR_signal
5745 case TARGET_NR_signal
:
5748 case TARGET_NR_acct
:
5750 ret
= get_errno(acct(NULL
));
5752 if (!(p
= lock_user_string(arg1
)))
5754 ret
= get_errno(acct(path(p
)));
5755 unlock_user(p
, arg1
, 0);
5758 #ifdef TARGET_NR_umount2 /* not on alpha */
5759 case TARGET_NR_umount2
:
5760 if (!(p
= lock_user_string(arg1
)))
5762 ret
= get_errno(umount2(p
, arg2
));
5763 unlock_user(p
, arg1
, 0);
5766 #ifdef TARGET_NR_lock
5767 case TARGET_NR_lock
:
5770 case TARGET_NR_ioctl
:
5771 ret
= do_ioctl(arg1
, arg2
, arg3
);
5773 case TARGET_NR_fcntl
:
5774 ret
= do_fcntl(arg1
, arg2
, arg3
);
5776 #ifdef TARGET_NR_mpx
5780 case TARGET_NR_setpgid
:
5781 ret
= get_errno(setpgid(arg1
, arg2
));
5783 #ifdef TARGET_NR_ulimit
5784 case TARGET_NR_ulimit
:
5787 #ifdef TARGET_NR_oldolduname
5788 case TARGET_NR_oldolduname
:
5791 case TARGET_NR_umask
:
5792 ret
= get_errno(umask(arg1
));
5794 case TARGET_NR_chroot
:
5795 if (!(p
= lock_user_string(arg1
)))
5797 ret
= get_errno(chroot(p
));
5798 unlock_user(p
, arg1
, 0);
5800 case TARGET_NR_ustat
:
5802 case TARGET_NR_dup2
:
5803 ret
= get_errno(dup2(arg1
, arg2
));
5805 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5806 case TARGET_NR_dup3
:
5807 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
5810 #ifdef TARGET_NR_getppid /* not on alpha */
5811 case TARGET_NR_getppid
:
5812 ret
= get_errno(getppid());
5815 case TARGET_NR_getpgrp
:
5816 ret
= get_errno(getpgrp());
5818 case TARGET_NR_setsid
:
5819 ret
= get_errno(setsid());
5821 #ifdef TARGET_NR_sigaction
5822 case TARGET_NR_sigaction
:
5824 #if defined(TARGET_ALPHA)
5825 struct target_sigaction act
, oact
, *pact
= 0;
5826 struct target_old_sigaction
*old_act
;
5828 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5830 act
._sa_handler
= old_act
->_sa_handler
;
5831 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5832 act
.sa_flags
= old_act
->sa_flags
;
5833 act
.sa_restorer
= 0;
5834 unlock_user_struct(old_act
, arg2
, 0);
5837 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5838 if (!is_error(ret
) && arg3
) {
5839 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5841 old_act
->_sa_handler
= oact
._sa_handler
;
5842 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5843 old_act
->sa_flags
= oact
.sa_flags
;
5844 unlock_user_struct(old_act
, arg3
, 1);
5846 #elif defined(TARGET_MIPS)
5847 struct target_sigaction act
, oact
, *pact
, *old_act
;
5850 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5852 act
._sa_handler
= old_act
->_sa_handler
;
5853 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5854 act
.sa_flags
= old_act
->sa_flags
;
5855 unlock_user_struct(old_act
, arg2
, 0);
5861 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5863 if (!is_error(ret
) && arg3
) {
5864 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5866 old_act
->_sa_handler
= oact
._sa_handler
;
5867 old_act
->sa_flags
= oact
.sa_flags
;
5868 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5869 old_act
->sa_mask
.sig
[1] = 0;
5870 old_act
->sa_mask
.sig
[2] = 0;
5871 old_act
->sa_mask
.sig
[3] = 0;
5872 unlock_user_struct(old_act
, arg3
, 1);
5875 struct target_old_sigaction
*old_act
;
5876 struct target_sigaction act
, oact
, *pact
;
5878 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5880 act
._sa_handler
= old_act
->_sa_handler
;
5881 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5882 act
.sa_flags
= old_act
->sa_flags
;
5883 act
.sa_restorer
= old_act
->sa_restorer
;
5884 unlock_user_struct(old_act
, arg2
, 0);
5889 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5890 if (!is_error(ret
) && arg3
) {
5891 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5893 old_act
->_sa_handler
= oact
._sa_handler
;
5894 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5895 old_act
->sa_flags
= oact
.sa_flags
;
5896 old_act
->sa_restorer
= oact
.sa_restorer
;
5897 unlock_user_struct(old_act
, arg3
, 1);
5903 case TARGET_NR_rt_sigaction
:
5905 #if defined(TARGET_ALPHA)
5906 struct target_sigaction act
, oact
, *pact
= 0;
5907 struct target_rt_sigaction
*rt_act
;
5908 /* ??? arg4 == sizeof(sigset_t). */
5910 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5912 act
._sa_handler
= rt_act
->_sa_handler
;
5913 act
.sa_mask
= rt_act
->sa_mask
;
5914 act
.sa_flags
= rt_act
->sa_flags
;
5915 act
.sa_restorer
= arg5
;
5916 unlock_user_struct(rt_act
, arg2
, 0);
5919 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5920 if (!is_error(ret
) && arg3
) {
5921 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5923 rt_act
->_sa_handler
= oact
._sa_handler
;
5924 rt_act
->sa_mask
= oact
.sa_mask
;
5925 rt_act
->sa_flags
= oact
.sa_flags
;
5926 unlock_user_struct(rt_act
, arg3
, 1);
5929 struct target_sigaction
*act
;
5930 struct target_sigaction
*oact
;
5933 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5938 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5939 ret
= -TARGET_EFAULT
;
5940 goto rt_sigaction_fail
;
5944 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5947 unlock_user_struct(act
, arg2
, 0);
5949 unlock_user_struct(oact
, arg3
, 1);
5953 #ifdef TARGET_NR_sgetmask /* not on alpha */
5954 case TARGET_NR_sgetmask
:
5957 abi_ulong target_set
;
5958 sigprocmask(0, NULL
, &cur_set
);
5959 host_to_target_old_sigset(&target_set
, &cur_set
);
5964 #ifdef TARGET_NR_ssetmask /* not on alpha */
5965 case TARGET_NR_ssetmask
:
5967 sigset_t set
, oset
, cur_set
;
5968 abi_ulong target_set
= arg1
;
5969 sigprocmask(0, NULL
, &cur_set
);
5970 target_to_host_old_sigset(&set
, &target_set
);
5971 sigorset(&set
, &set
, &cur_set
);
5972 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5973 host_to_target_old_sigset(&target_set
, &oset
);
5978 #ifdef TARGET_NR_sigprocmask
5979 case TARGET_NR_sigprocmask
:
5981 #if defined(TARGET_ALPHA)
5982 sigset_t set
, oldset
;
5987 case TARGET_SIG_BLOCK
:
5990 case TARGET_SIG_UNBLOCK
:
5993 case TARGET_SIG_SETMASK
:
5997 ret
= -TARGET_EINVAL
;
6001 target_to_host_old_sigset(&set
, &mask
);
6003 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
6004 if (!is_error(ret
)) {
6005 host_to_target_old_sigset(&mask
, &oldset
);
6007 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
6010 sigset_t set
, oldset
, *set_ptr
;
6015 case TARGET_SIG_BLOCK
:
6018 case TARGET_SIG_UNBLOCK
:
6021 case TARGET_SIG_SETMASK
:
6025 ret
= -TARGET_EINVAL
;
6028 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6030 target_to_host_old_sigset(&set
, p
);
6031 unlock_user(p
, arg2
, 0);
6037 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
6038 if (!is_error(ret
) && arg3
) {
6039 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6041 host_to_target_old_sigset(p
, &oldset
);
6042 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6048 case TARGET_NR_rt_sigprocmask
:
6051 sigset_t set
, oldset
, *set_ptr
;
6055 case TARGET_SIG_BLOCK
:
6058 case TARGET_SIG_UNBLOCK
:
6061 case TARGET_SIG_SETMASK
:
6065 ret
= -TARGET_EINVAL
;
6068 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6070 target_to_host_sigset(&set
, p
);
6071 unlock_user(p
, arg2
, 0);
6077 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
6078 if (!is_error(ret
) && arg3
) {
6079 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6081 host_to_target_sigset(p
, &oldset
);
6082 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6086 #ifdef TARGET_NR_sigpending
6087 case TARGET_NR_sigpending
:
6090 ret
= get_errno(sigpending(&set
));
6091 if (!is_error(ret
)) {
6092 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6094 host_to_target_old_sigset(p
, &set
);
6095 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6100 case TARGET_NR_rt_sigpending
:
6103 ret
= get_errno(sigpending(&set
));
6104 if (!is_error(ret
)) {
6105 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6107 host_to_target_sigset(p
, &set
);
6108 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6112 #ifdef TARGET_NR_sigsuspend
6113 case TARGET_NR_sigsuspend
:
6116 #if defined(TARGET_ALPHA)
6117 abi_ulong mask
= arg1
;
6118 target_to_host_old_sigset(&set
, &mask
);
6120 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6122 target_to_host_old_sigset(&set
, p
);
6123 unlock_user(p
, arg1
, 0);
6125 ret
= get_errno(sigsuspend(&set
));
6129 case TARGET_NR_rt_sigsuspend
:
6132 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6134 target_to_host_sigset(&set
, p
);
6135 unlock_user(p
, arg1
, 0);
6136 ret
= get_errno(sigsuspend(&set
));
6139 case TARGET_NR_rt_sigtimedwait
:
6142 struct timespec uts
, *puts
;
6145 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6147 target_to_host_sigset(&set
, p
);
6148 unlock_user(p
, arg1
, 0);
6151 target_to_host_timespec(puts
, arg3
);
6155 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6156 if (!is_error(ret
) && arg2
) {
6157 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
6159 host_to_target_siginfo(p
, &uinfo
);
6160 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
6164 case TARGET_NR_rt_sigqueueinfo
:
6167 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
6169 target_to_host_siginfo(&uinfo
, p
);
6170 unlock_user(p
, arg1
, 0);
6171 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
6174 #ifdef TARGET_NR_sigreturn
6175 case TARGET_NR_sigreturn
:
6176 /* NOTE: ret is eax, so not transcoding must be done */
6177 ret
= do_sigreturn(cpu_env
);
6180 case TARGET_NR_rt_sigreturn
:
6181 /* NOTE: ret is eax, so not transcoding must be done */
6182 ret
= do_rt_sigreturn(cpu_env
);
6184 case TARGET_NR_sethostname
:
6185 if (!(p
= lock_user_string(arg1
)))
6187 ret
= get_errno(sethostname(p
, arg2
));
6188 unlock_user(p
, arg1
, 0);
6190 case TARGET_NR_setrlimit
:
6192 int resource
= target_to_host_resource(arg1
);
6193 struct target_rlimit
*target_rlim
;
6195 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
6197 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
6198 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
6199 unlock_user_struct(target_rlim
, arg2
, 0);
6200 ret
= get_errno(setrlimit(resource
, &rlim
));
6203 case TARGET_NR_getrlimit
:
6205 int resource
= target_to_host_resource(arg1
);
6206 struct target_rlimit
*target_rlim
;
6209 ret
= get_errno(getrlimit(resource
, &rlim
));
6210 if (!is_error(ret
)) {
6211 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6213 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6214 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6215 unlock_user_struct(target_rlim
, arg2
, 1);
6219 case TARGET_NR_getrusage
:
6221 struct rusage rusage
;
6222 ret
= get_errno(getrusage(arg1
, &rusage
));
6223 if (!is_error(ret
)) {
6224 host_to_target_rusage(arg2
, &rusage
);
6228 case TARGET_NR_gettimeofday
:
6231 ret
= get_errno(gettimeofday(&tv
, NULL
));
6232 if (!is_error(ret
)) {
6233 if (copy_to_user_timeval(arg1
, &tv
))
6238 case TARGET_NR_settimeofday
:
6241 if (copy_from_user_timeval(&tv
, arg1
))
6243 ret
= get_errno(settimeofday(&tv
, NULL
));
6246 #if defined(TARGET_NR_select)
6247 case TARGET_NR_select
:
6248 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6249 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6252 struct target_sel_arg_struct
*sel
;
6253 abi_ulong inp
, outp
, exp
, tvp
;
6256 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
6258 nsel
= tswapal(sel
->n
);
6259 inp
= tswapal(sel
->inp
);
6260 outp
= tswapal(sel
->outp
);
6261 exp
= tswapal(sel
->exp
);
6262 tvp
= tswapal(sel
->tvp
);
6263 unlock_user_struct(sel
, arg1
, 0);
6264 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
6269 #ifdef TARGET_NR_pselect6
6270 case TARGET_NR_pselect6
:
6272 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
6273 fd_set rfds
, wfds
, efds
;
6274 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
6275 struct timespec ts
, *ts_ptr
;
6278 * The 6th arg is actually two args smashed together,
6279 * so we cannot use the C library.
6287 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
6288 target_sigset_t
*target_sigset
;
6296 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
6300 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
6304 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
6310 * This takes a timespec, and not a timeval, so we cannot
6311 * use the do_select() helper ...
6314 if (target_to_host_timespec(&ts
, ts_addr
)) {
6322 /* Extract the two packed args for the sigset */
6325 sig
.size
= _NSIG
/ 8;
6327 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
6331 arg_sigset
= tswapal(arg7
[0]);
6332 arg_sigsize
= tswapal(arg7
[1]);
6333 unlock_user(arg7
, arg6
, 0);
6337 if (arg_sigsize
!= sizeof(*target_sigset
)) {
6338 /* Like the kernel, we enforce correct size sigsets */
6339 ret
= -TARGET_EINVAL
;
6342 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
6343 sizeof(*target_sigset
), 1);
6344 if (!target_sigset
) {
6347 target_to_host_sigset(&set
, target_sigset
);
6348 unlock_user(target_sigset
, arg_sigset
, 0);
6356 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
6359 if (!is_error(ret
)) {
6360 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
6362 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
6364 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
6367 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
6373 case TARGET_NR_symlink
:
6376 p
= lock_user_string(arg1
);
6377 p2
= lock_user_string(arg2
);
6379 ret
= -TARGET_EFAULT
;
6381 ret
= get_errno(symlink(p
, p2
));
6382 unlock_user(p2
, arg2
, 0);
6383 unlock_user(p
, arg1
, 0);
6386 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6387 case TARGET_NR_symlinkat
:
6390 p
= lock_user_string(arg1
);
6391 p2
= lock_user_string(arg3
);
6393 ret
= -TARGET_EFAULT
;
6395 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
6396 unlock_user(p2
, arg3
, 0);
6397 unlock_user(p
, arg1
, 0);
6401 #ifdef TARGET_NR_oldlstat
6402 case TARGET_NR_oldlstat
:
6405 case TARGET_NR_readlink
:
6408 p
= lock_user_string(arg1
);
6409 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
6411 ret
= -TARGET_EFAULT
;
6413 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
6414 char real
[PATH_MAX
];
6415 temp
= realpath(exec_path
,real
);
6416 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
6417 snprintf((char *)p2
, arg3
, "%s", real
);
6420 ret
= get_errno(readlink(path(p
), p2
, arg3
));
6422 unlock_user(p2
, arg2
, ret
);
6423 unlock_user(p
, arg1
, 0);
6426 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6427 case TARGET_NR_readlinkat
:
6430 p
= lock_user_string(arg2
);
6431 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
6433 ret
= -TARGET_EFAULT
;
6435 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
6436 unlock_user(p2
, arg3
, ret
);
6437 unlock_user(p
, arg2
, 0);
6441 #ifdef TARGET_NR_uselib
6442 case TARGET_NR_uselib
:
6445 #ifdef TARGET_NR_swapon
6446 case TARGET_NR_swapon
:
6447 if (!(p
= lock_user_string(arg1
)))
6449 ret
= get_errno(swapon(p
, arg2
));
6450 unlock_user(p
, arg1
, 0);
6453 case TARGET_NR_reboot
:
6454 if (!(p
= lock_user_string(arg4
)))
6456 ret
= reboot(arg1
, arg2
, arg3
, p
);
6457 unlock_user(p
, arg4
, 0);
6459 #ifdef TARGET_NR_readdir
6460 case TARGET_NR_readdir
:
6463 #ifdef TARGET_NR_mmap
6464 case TARGET_NR_mmap
:
6465 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6466 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6467 || defined(TARGET_S390X)
6470 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
6471 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
6479 unlock_user(v
, arg1
, 0);
6480 ret
= get_errno(target_mmap(v1
, v2
, v3
,
6481 target_to_host_bitmask(v4
, mmap_flags_tbl
),
6485 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6486 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6492 #ifdef TARGET_NR_mmap2
6493 case TARGET_NR_mmap2
:
6495 #define MMAP_SHIFT 12
6497 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6498 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6500 arg6
<< MMAP_SHIFT
));
6503 case TARGET_NR_munmap
:
6504 ret
= get_errno(target_munmap(arg1
, arg2
));
6506 case TARGET_NR_mprotect
:
6508 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
6509 /* Special hack to detect libc making the stack executable. */
6510 if ((arg3
& PROT_GROWSDOWN
)
6511 && arg1
>= ts
->info
->stack_limit
6512 && arg1
<= ts
->info
->start_stack
) {
6513 arg3
&= ~PROT_GROWSDOWN
;
6514 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
6515 arg1
= ts
->info
->stack_limit
;
6518 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
6520 #ifdef TARGET_NR_mremap
6521 case TARGET_NR_mremap
:
6522 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
6525 /* ??? msync/mlock/munlock are broken for softmmu. */
6526 #ifdef TARGET_NR_msync
6527 case TARGET_NR_msync
:
6528 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
6531 #ifdef TARGET_NR_mlock
6532 case TARGET_NR_mlock
:
6533 ret
= get_errno(mlock(g2h(arg1
), arg2
));
6536 #ifdef TARGET_NR_munlock
6537 case TARGET_NR_munlock
:
6538 ret
= get_errno(munlock(g2h(arg1
), arg2
));
6541 #ifdef TARGET_NR_mlockall
6542 case TARGET_NR_mlockall
:
6543 ret
= get_errno(mlockall(arg1
));
6546 #ifdef TARGET_NR_munlockall
6547 case TARGET_NR_munlockall
:
6548 ret
= get_errno(munlockall());
6551 case TARGET_NR_truncate
:
6552 if (!(p
= lock_user_string(arg1
)))
6554 ret
= get_errno(truncate(p
, arg2
));
6555 unlock_user(p
, arg1
, 0);
6557 case TARGET_NR_ftruncate
:
6558 ret
= get_errno(ftruncate(arg1
, arg2
));
6560 case TARGET_NR_fchmod
:
6561 ret
= get_errno(fchmod(arg1
, arg2
));
6563 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6564 case TARGET_NR_fchmodat
:
6565 if (!(p
= lock_user_string(arg2
)))
6567 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
6568 unlock_user(p
, arg2
, 0);
6571 case TARGET_NR_getpriority
:
6572 /* Note that negative values are valid for getpriority, so we must
6573 differentiate based on errno settings. */
6575 ret
= getpriority(arg1
, arg2
);
6576 if (ret
== -1 && errno
!= 0) {
6577 ret
= -host_to_target_errno(errno
);
6581 /* Return value is the unbiased priority. Signal no error. */
6582 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
6584 /* Return value is a biased priority to avoid negative numbers. */
6588 case TARGET_NR_setpriority
:
6589 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
6591 #ifdef TARGET_NR_profil
6592 case TARGET_NR_profil
:
6595 case TARGET_NR_statfs
:
6596 if (!(p
= lock_user_string(arg1
)))
6598 ret
= get_errno(statfs(path(p
), &stfs
));
6599 unlock_user(p
, arg1
, 0);
6601 if (!is_error(ret
)) {
6602 struct target_statfs
*target_stfs
;
6604 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
6606 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6607 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6608 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6609 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6610 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6611 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6612 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6613 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6614 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6615 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6616 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6617 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6618 unlock_user_struct(target_stfs
, arg2
, 1);
6621 case TARGET_NR_fstatfs
:
6622 ret
= get_errno(fstatfs(arg1
, &stfs
));
6623 goto convert_statfs
;
6624 #ifdef TARGET_NR_statfs64
6625 case TARGET_NR_statfs64
:
6626 if (!(p
= lock_user_string(arg1
)))
6628 ret
= get_errno(statfs(path(p
), &stfs
));
6629 unlock_user(p
, arg1
, 0);
6631 if (!is_error(ret
)) {
6632 struct target_statfs64
*target_stfs
;
6634 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
6636 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6637 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6638 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6639 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6640 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6641 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6642 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6643 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6644 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6645 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6646 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6647 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6648 unlock_user_struct(target_stfs
, arg3
, 1);
6651 case TARGET_NR_fstatfs64
:
6652 ret
= get_errno(fstatfs(arg1
, &stfs
));
6653 goto convert_statfs64
;
6655 #ifdef TARGET_NR_ioperm
6656 case TARGET_NR_ioperm
:
6659 #ifdef TARGET_NR_socketcall
6660 case TARGET_NR_socketcall
:
6661 ret
= do_socketcall(arg1
, arg2
);
6664 #ifdef TARGET_NR_accept
6665 case TARGET_NR_accept
:
6666 ret
= do_accept(arg1
, arg2
, arg3
);
6669 #ifdef TARGET_NR_bind
6670 case TARGET_NR_bind
:
6671 ret
= do_bind(arg1
, arg2
, arg3
);
6674 #ifdef TARGET_NR_connect
6675 case TARGET_NR_connect
:
6676 ret
= do_connect(arg1
, arg2
, arg3
);
6679 #ifdef TARGET_NR_getpeername
6680 case TARGET_NR_getpeername
:
6681 ret
= do_getpeername(arg1
, arg2
, arg3
);
6684 #ifdef TARGET_NR_getsockname
6685 case TARGET_NR_getsockname
:
6686 ret
= do_getsockname(arg1
, arg2
, arg3
);
6689 #ifdef TARGET_NR_getsockopt
6690 case TARGET_NR_getsockopt
:
6691 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
6694 #ifdef TARGET_NR_listen
6695 case TARGET_NR_listen
:
6696 ret
= get_errno(listen(arg1
, arg2
));
6699 #ifdef TARGET_NR_recv
6700 case TARGET_NR_recv
:
6701 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
6704 #ifdef TARGET_NR_recvfrom
6705 case TARGET_NR_recvfrom
:
6706 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6709 #ifdef TARGET_NR_recvmsg
6710 case TARGET_NR_recvmsg
:
6711 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
6714 #ifdef TARGET_NR_send
6715 case TARGET_NR_send
:
6716 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
6719 #ifdef TARGET_NR_sendmsg
6720 case TARGET_NR_sendmsg
:
6721 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
6724 #ifdef TARGET_NR_sendto
6725 case TARGET_NR_sendto
:
6726 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6729 #ifdef TARGET_NR_shutdown
6730 case TARGET_NR_shutdown
:
6731 ret
= get_errno(shutdown(arg1
, arg2
));
6734 #ifdef TARGET_NR_socket
6735 case TARGET_NR_socket
:
6736 ret
= do_socket(arg1
, arg2
, arg3
);
6739 #ifdef TARGET_NR_socketpair
6740 case TARGET_NR_socketpair
:
6741 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
6744 #ifdef TARGET_NR_setsockopt
6745 case TARGET_NR_setsockopt
:
6746 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
6750 case TARGET_NR_syslog
:
6751 if (!(p
= lock_user_string(arg2
)))
6753 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
6754 unlock_user(p
, arg2
, 0);
6757 case TARGET_NR_setitimer
:
6759 struct itimerval value
, ovalue
, *pvalue
;
6763 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
6764 || copy_from_user_timeval(&pvalue
->it_value
,
6765 arg2
+ sizeof(struct target_timeval
)))
6770 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
6771 if (!is_error(ret
) && arg3
) {
6772 if (copy_to_user_timeval(arg3
,
6773 &ovalue
.it_interval
)
6774 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
6780 case TARGET_NR_getitimer
:
6782 struct itimerval value
;
6784 ret
= get_errno(getitimer(arg1
, &value
));
6785 if (!is_error(ret
) && arg2
) {
6786 if (copy_to_user_timeval(arg2
,
6788 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
6794 case TARGET_NR_stat
:
6795 if (!(p
= lock_user_string(arg1
)))
6797 ret
= get_errno(stat(path(p
), &st
));
6798 unlock_user(p
, arg1
, 0);
6800 case TARGET_NR_lstat
:
6801 if (!(p
= lock_user_string(arg1
)))
6803 ret
= get_errno(lstat(path(p
), &st
));
6804 unlock_user(p
, arg1
, 0);
6806 case TARGET_NR_fstat
:
6808 ret
= get_errno(fstat(arg1
, &st
));
6810 if (!is_error(ret
)) {
6811 struct target_stat
*target_st
;
6813 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
6815 memset(target_st
, 0, sizeof(*target_st
));
6816 __put_user(st
.st_dev
, &target_st
->st_dev
);
6817 __put_user(st
.st_ino
, &target_st
->st_ino
);
6818 __put_user(st
.st_mode
, &target_st
->st_mode
);
6819 __put_user(st
.st_uid
, &target_st
->st_uid
);
6820 __put_user(st
.st_gid
, &target_st
->st_gid
);
6821 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
6822 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
6823 __put_user(st
.st_size
, &target_st
->st_size
);
6824 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
6825 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
6826 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
6827 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
6828 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
6829 unlock_user_struct(target_st
, arg2
, 1);
6833 #ifdef TARGET_NR_olduname
6834 case TARGET_NR_olduname
:
6837 #ifdef TARGET_NR_iopl
6838 case TARGET_NR_iopl
:
6841 case TARGET_NR_vhangup
:
6842 ret
= get_errno(vhangup());
6844 #ifdef TARGET_NR_idle
6845 case TARGET_NR_idle
:
6848 #ifdef TARGET_NR_syscall
6849 case TARGET_NR_syscall
:
6850 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
6851 arg6
, arg7
, arg8
, 0);
6854 case TARGET_NR_wait4
:
6857 abi_long status_ptr
= arg2
;
6858 struct rusage rusage
, *rusage_ptr
;
6859 abi_ulong target_rusage
= arg4
;
6861 rusage_ptr
= &rusage
;
6864 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
6865 if (!is_error(ret
)) {
6866 if (status_ptr
&& ret
) {
6867 status
= host_to_target_waitstatus(status
);
6868 if (put_user_s32(status
, status_ptr
))
6872 host_to_target_rusage(target_rusage
, &rusage
);
6876 #ifdef TARGET_NR_swapoff
6877 case TARGET_NR_swapoff
:
6878 if (!(p
= lock_user_string(arg1
)))
6880 ret
= get_errno(swapoff(p
));
6881 unlock_user(p
, arg1
, 0);
6884 case TARGET_NR_sysinfo
:
6886 struct target_sysinfo
*target_value
;
6887 struct sysinfo value
;
6888 ret
= get_errno(sysinfo(&value
));
6889 if (!is_error(ret
) && arg1
)
6891 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
6893 __put_user(value
.uptime
, &target_value
->uptime
);
6894 __put_user(value
.loads
[0], &target_value
->loads
[0]);
6895 __put_user(value
.loads
[1], &target_value
->loads
[1]);
6896 __put_user(value
.loads
[2], &target_value
->loads
[2]);
6897 __put_user(value
.totalram
, &target_value
->totalram
);
6898 __put_user(value
.freeram
, &target_value
->freeram
);
6899 __put_user(value
.sharedram
, &target_value
->sharedram
);
6900 __put_user(value
.bufferram
, &target_value
->bufferram
);
6901 __put_user(value
.totalswap
, &target_value
->totalswap
);
6902 __put_user(value
.freeswap
, &target_value
->freeswap
);
6903 __put_user(value
.procs
, &target_value
->procs
);
6904 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
6905 __put_user(value
.freehigh
, &target_value
->freehigh
);
6906 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
6907 unlock_user_struct(target_value
, arg1
, 1);
6911 #ifdef TARGET_NR_ipc
6913 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6916 #ifdef TARGET_NR_semget
6917 case TARGET_NR_semget
:
6918 ret
= get_errno(semget(arg1
, arg2
, arg3
));
6921 #ifdef TARGET_NR_semop
6922 case TARGET_NR_semop
:
6923 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
6926 #ifdef TARGET_NR_semctl
6927 case TARGET_NR_semctl
:
6928 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
6931 #ifdef TARGET_NR_msgctl
6932 case TARGET_NR_msgctl
:
6933 ret
= do_msgctl(arg1
, arg2
, arg3
);
6936 #ifdef TARGET_NR_msgget
6937 case TARGET_NR_msgget
:
6938 ret
= get_errno(msgget(arg1
, arg2
));
6941 #ifdef TARGET_NR_msgrcv
6942 case TARGET_NR_msgrcv
:
6943 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
6946 #ifdef TARGET_NR_msgsnd
6947 case TARGET_NR_msgsnd
:
6948 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
6951 #ifdef TARGET_NR_shmget
6952 case TARGET_NR_shmget
:
6953 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
6956 #ifdef TARGET_NR_shmctl
6957 case TARGET_NR_shmctl
:
6958 ret
= do_shmctl(arg1
, arg2
, arg3
);
6961 #ifdef TARGET_NR_shmat
6962 case TARGET_NR_shmat
:
6963 ret
= do_shmat(arg1
, arg2
, arg3
);
6966 #ifdef TARGET_NR_shmdt
6967 case TARGET_NR_shmdt
:
6968 ret
= do_shmdt(arg1
);
6971 case TARGET_NR_fsync
:
6972 ret
= get_errno(fsync(arg1
));
6974 case TARGET_NR_clone
:
6975 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6976 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
6977 #elif defined(TARGET_CRIS)
6978 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
6979 #elif defined(TARGET_MICROBLAZE)
6980 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
6981 #elif defined(TARGET_S390X)
6982 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
6984 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
6987 #ifdef __NR_exit_group
6988 /* new thread calls */
6989 case TARGET_NR_exit_group
:
6993 gdb_exit(cpu_env
, arg1
);
6994 ret
= get_errno(exit_group(arg1
));
6997 case TARGET_NR_setdomainname
:
6998 if (!(p
= lock_user_string(arg1
)))
7000 ret
= get_errno(setdomainname(p
, arg2
));
7001 unlock_user(p
, arg1
, 0);
7003 case TARGET_NR_uname
:
7004 /* no need to transcode because we use the linux syscall */
7006 struct new_utsname
* buf
;
7008 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
7010 ret
= get_errno(sys_uname(buf
));
7011 if (!is_error(ret
)) {
7012 /* Overrite the native machine name with whatever is being
7014 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
7015 /* Allow the user to override the reported release. */
7016 if (qemu_uname_release
&& *qemu_uname_release
)
7017 strcpy (buf
->release
, qemu_uname_release
);
7019 unlock_user_struct(buf
, arg1
, 1);
7023 case TARGET_NR_modify_ldt
:
7024 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
7026 #if !defined(TARGET_X86_64)
7027 case TARGET_NR_vm86old
:
7029 case TARGET_NR_vm86
:
7030 ret
= do_vm86(cpu_env
, arg1
, arg2
);
7034 case TARGET_NR_adjtimex
:
7036 #ifdef TARGET_NR_create_module
7037 case TARGET_NR_create_module
:
7039 case TARGET_NR_init_module
:
7040 case TARGET_NR_delete_module
:
7041 #ifdef TARGET_NR_get_kernel_syms
7042 case TARGET_NR_get_kernel_syms
:
7045 case TARGET_NR_quotactl
:
7047 case TARGET_NR_getpgid
:
7048 ret
= get_errno(getpgid(arg1
));
7050 case TARGET_NR_fchdir
:
7051 ret
= get_errno(fchdir(arg1
));
7053 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7054 case TARGET_NR_bdflush
:
7057 #ifdef TARGET_NR_sysfs
7058 case TARGET_NR_sysfs
:
7061 case TARGET_NR_personality
:
7062 ret
= get_errno(personality(arg1
));
7064 #ifdef TARGET_NR_afs_syscall
7065 case TARGET_NR_afs_syscall
:
7068 #ifdef TARGET_NR__llseek /* Not on alpha */
7069 case TARGET_NR__llseek
:
7072 #if !defined(__NR_llseek)
7073 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
7075 ret
= get_errno(res
);
7080 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
7082 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
7088 case TARGET_NR_getdents
:
7089 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7091 struct target_dirent
*target_dirp
;
7092 struct linux_dirent
*dirp
;
7093 abi_long count
= arg3
;
7095 dirp
= malloc(count
);
7097 ret
= -TARGET_ENOMEM
;
7101 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7102 if (!is_error(ret
)) {
7103 struct linux_dirent
*de
;
7104 struct target_dirent
*tde
;
7106 int reclen
, treclen
;
7107 int count1
, tnamelen
;
7111 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7115 reclen
= de
->d_reclen
;
7116 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
7117 assert(tnamelen
>= 0);
7118 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
7119 assert(count1
+ treclen
<= count
);
7120 tde
->d_reclen
= tswap16(treclen
);
7121 tde
->d_ino
= tswapal(de
->d_ino
);
7122 tde
->d_off
= tswapal(de
->d_off
);
7123 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
7124 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7126 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7130 unlock_user(target_dirp
, arg2
, ret
);
7136 struct linux_dirent
*dirp
;
7137 abi_long count
= arg3
;
7139 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7141 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7142 if (!is_error(ret
)) {
7143 struct linux_dirent
*de
;
7148 reclen
= de
->d_reclen
;
7151 de
->d_reclen
= tswap16(reclen
);
7152 tswapls(&de
->d_ino
);
7153 tswapls(&de
->d_off
);
7154 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7158 unlock_user(dirp
, arg2
, ret
);
7162 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7163 case TARGET_NR_getdents64
:
7165 struct linux_dirent64
*dirp
;
7166 abi_long count
= arg3
;
7167 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7169 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7170 if (!is_error(ret
)) {
7171 struct linux_dirent64
*de
;
7176 reclen
= de
->d_reclen
;
7179 de
->d_reclen
= tswap16(reclen
);
7180 tswap64s((uint64_t *)&de
->d_ino
);
7181 tswap64s((uint64_t *)&de
->d_off
);
7182 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7186 unlock_user(dirp
, arg2
, ret
);
7189 #endif /* TARGET_NR_getdents64 */
7190 #if defined(TARGET_NR__newselect)
7191 case TARGET_NR__newselect
:
7192 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7195 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7196 # ifdef TARGET_NR_poll
7197 case TARGET_NR_poll
:
7199 # ifdef TARGET_NR_ppoll
7200 case TARGET_NR_ppoll
:
7203 struct target_pollfd
*target_pfd
;
7204 unsigned int nfds
= arg2
;
7209 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
7213 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
7214 for(i
= 0; i
< nfds
; i
++) {
7215 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
7216 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
7219 # ifdef TARGET_NR_ppoll
7220 if (num
== TARGET_NR_ppoll
) {
7221 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
7222 target_sigset_t
*target_set
;
7223 sigset_t _set
, *set
= &_set
;
7226 if (target_to_host_timespec(timeout_ts
, arg3
)) {
7227 unlock_user(target_pfd
, arg1
, 0);
7235 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
7237 unlock_user(target_pfd
, arg1
, 0);
7240 target_to_host_sigset(set
, target_set
);
7245 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
7247 if (!is_error(ret
) && arg3
) {
7248 host_to_target_timespec(arg3
, timeout_ts
);
7251 unlock_user(target_set
, arg4
, 0);
7255 ret
= get_errno(poll(pfd
, nfds
, timeout
));
7257 if (!is_error(ret
)) {
7258 for(i
= 0; i
< nfds
; i
++) {
7259 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
7262 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
7266 case TARGET_NR_flock
:
7267 /* NOTE: the flock constant seems to be the same for every
7269 ret
= get_errno(flock(arg1
, arg2
));
7271 case TARGET_NR_readv
:
7273 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
7275 ret
= get_errno(readv(arg1
, vec
, arg3
));
7276 unlock_iovec(vec
, arg2
, arg3
, 1);
7278 ret
= -host_to_target_errno(errno
);
7282 case TARGET_NR_writev
:
7284 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
7286 ret
= get_errno(writev(arg1
, vec
, arg3
));
7287 unlock_iovec(vec
, arg2
, arg3
, 0);
7289 ret
= -host_to_target_errno(errno
);
7293 case TARGET_NR_getsid
:
7294 ret
= get_errno(getsid(arg1
));
7296 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7297 case TARGET_NR_fdatasync
:
7298 ret
= get_errno(fdatasync(arg1
));
7301 case TARGET_NR__sysctl
:
7302 /* We don't implement this, but ENOTDIR is always a safe
7304 ret
= -TARGET_ENOTDIR
;
7306 case TARGET_NR_sched_getaffinity
:
7308 unsigned int mask_size
;
7309 unsigned long *mask
;
7312 * sched_getaffinity needs multiples of ulong, so need to take
7313 * care of mismatches between target ulong and host ulong sizes.
7315 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7316 ret
= -TARGET_EINVAL
;
7319 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7321 mask
= alloca(mask_size
);
7322 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
7324 if (!is_error(ret
)) {
7325 if (copy_to_user(arg3
, mask
, ret
)) {
7331 case TARGET_NR_sched_setaffinity
:
7333 unsigned int mask_size
;
7334 unsigned long *mask
;
7337 * sched_setaffinity needs multiples of ulong, so need to take
7338 * care of mismatches between target ulong and host ulong sizes.
7340 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7341 ret
= -TARGET_EINVAL
;
7344 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7346 mask
= alloca(mask_size
);
7347 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
7350 memcpy(mask
, p
, arg2
);
7351 unlock_user_struct(p
, arg2
, 0);
7353 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
7356 case TARGET_NR_sched_setparam
:
7358 struct sched_param
*target_schp
;
7359 struct sched_param schp
;
7361 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
7363 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7364 unlock_user_struct(target_schp
, arg2
, 0);
7365 ret
= get_errno(sched_setparam(arg1
, &schp
));
7368 case TARGET_NR_sched_getparam
:
7370 struct sched_param
*target_schp
;
7371 struct sched_param schp
;
7372 ret
= get_errno(sched_getparam(arg1
, &schp
));
7373 if (!is_error(ret
)) {
7374 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
7376 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
7377 unlock_user_struct(target_schp
, arg2
, 1);
7381 case TARGET_NR_sched_setscheduler
:
7383 struct sched_param
*target_schp
;
7384 struct sched_param schp
;
7385 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
7387 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7388 unlock_user_struct(target_schp
, arg3
, 0);
7389 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
7392 case TARGET_NR_sched_getscheduler
:
7393 ret
= get_errno(sched_getscheduler(arg1
));
7395 case TARGET_NR_sched_yield
:
7396 ret
= get_errno(sched_yield());
7398 case TARGET_NR_sched_get_priority_max
:
7399 ret
= get_errno(sched_get_priority_max(arg1
));
7401 case TARGET_NR_sched_get_priority_min
:
7402 ret
= get_errno(sched_get_priority_min(arg1
));
7404 case TARGET_NR_sched_rr_get_interval
:
7407 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
7408 if (!is_error(ret
)) {
7409 host_to_target_timespec(arg2
, &ts
);
7413 case TARGET_NR_nanosleep
:
7415 struct timespec req
, rem
;
7416 target_to_host_timespec(&req
, arg1
);
7417 ret
= get_errno(nanosleep(&req
, &rem
));
7418 if (is_error(ret
) && arg2
) {
7419 host_to_target_timespec(arg2
, &rem
);
7423 #ifdef TARGET_NR_query_module
7424 case TARGET_NR_query_module
:
7427 #ifdef TARGET_NR_nfsservctl
7428 case TARGET_NR_nfsservctl
:
7431 case TARGET_NR_prctl
:
7433 case PR_GET_PDEATHSIG
:
7436 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
7437 if (!is_error(ret
) && arg2
7438 && put_user_ual(deathsig
, arg2
)) {
7446 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
7450 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7452 unlock_user(name
, arg2
, 16);
7457 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
7461 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7463 unlock_user(name
, arg2
, 0);
7468 /* Most prctl options have no pointer arguments */
7469 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
7473 #ifdef TARGET_NR_arch_prctl
7474 case TARGET_NR_arch_prctl
:
7475 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7476 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
7482 #ifdef TARGET_NR_pread64
7483 case TARGET_NR_pread64
:
7484 if (regpairs_aligned(cpu_env
)) {
7488 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7490 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7491 unlock_user(p
, arg2
, ret
);
7493 case TARGET_NR_pwrite64
:
7494 if (regpairs_aligned(cpu_env
)) {
7498 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7500 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7501 unlock_user(p
, arg2
, 0);
7504 case TARGET_NR_getcwd
:
7505 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
7507 ret
= get_errno(sys_getcwd1(p
, arg2
));
7508 unlock_user(p
, arg1
, ret
);
7510 case TARGET_NR_capget
:
7512 case TARGET_NR_capset
:
7514 case TARGET_NR_sigaltstack
:
7515 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7516 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7517 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7518 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
7523 case TARGET_NR_sendfile
:
7525 #ifdef TARGET_NR_getpmsg
7526 case TARGET_NR_getpmsg
:
7529 #ifdef TARGET_NR_putpmsg
7530 case TARGET_NR_putpmsg
:
7533 #ifdef TARGET_NR_vfork
7534 case TARGET_NR_vfork
:
7535 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
7539 #ifdef TARGET_NR_ugetrlimit
7540 case TARGET_NR_ugetrlimit
:
7543 int resource
= target_to_host_resource(arg1
);
7544 ret
= get_errno(getrlimit(resource
, &rlim
));
7545 if (!is_error(ret
)) {
7546 struct target_rlimit
*target_rlim
;
7547 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7549 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7550 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7551 unlock_user_struct(target_rlim
, arg2
, 1);
7556 #ifdef TARGET_NR_truncate64
7557 case TARGET_NR_truncate64
:
7558 if (!(p
= lock_user_string(arg1
)))
7560 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
7561 unlock_user(p
, arg1
, 0);
7564 #ifdef TARGET_NR_ftruncate64
7565 case TARGET_NR_ftruncate64
:
7566 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
7569 #ifdef TARGET_NR_stat64
7570 case TARGET_NR_stat64
:
7571 if (!(p
= lock_user_string(arg1
)))
7573 ret
= get_errno(stat(path(p
), &st
));
7574 unlock_user(p
, arg1
, 0);
7576 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7579 #ifdef TARGET_NR_lstat64
7580 case TARGET_NR_lstat64
:
7581 if (!(p
= lock_user_string(arg1
)))
7583 ret
= get_errno(lstat(path(p
), &st
));
7584 unlock_user(p
, arg1
, 0);
7586 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7589 #ifdef TARGET_NR_fstat64
7590 case TARGET_NR_fstat64
:
7591 ret
= get_errno(fstat(arg1
, &st
));
7593 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7596 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7597 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7598 #ifdef TARGET_NR_fstatat64
7599 case TARGET_NR_fstatat64
:
7601 #ifdef TARGET_NR_newfstatat
7602 case TARGET_NR_newfstatat
:
7604 if (!(p
= lock_user_string(arg2
)))
7606 #ifdef __NR_fstatat64
7607 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
7609 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
7612 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
7615 case TARGET_NR_lchown
:
7616 if (!(p
= lock_user_string(arg1
)))
7618 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7619 unlock_user(p
, arg1
, 0);
7621 #ifdef TARGET_NR_getuid
7622 case TARGET_NR_getuid
:
7623 ret
= get_errno(high2lowuid(getuid()));
7626 #ifdef TARGET_NR_getgid
7627 case TARGET_NR_getgid
:
7628 ret
= get_errno(high2lowgid(getgid()));
7631 #ifdef TARGET_NR_geteuid
7632 case TARGET_NR_geteuid
:
7633 ret
= get_errno(high2lowuid(geteuid()));
7636 #ifdef TARGET_NR_getegid
7637 case TARGET_NR_getegid
:
7638 ret
= get_errno(high2lowgid(getegid()));
7641 case TARGET_NR_setreuid
:
7642 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
7644 case TARGET_NR_setregid
:
7645 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
7647 case TARGET_NR_getgroups
:
7649 int gidsetsize
= arg1
;
7650 target_id
*target_grouplist
;
7654 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7655 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7656 if (gidsetsize
== 0)
7658 if (!is_error(ret
)) {
7659 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
7660 if (!target_grouplist
)
7662 for(i
= 0;i
< ret
; i
++)
7663 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
7664 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
7668 case TARGET_NR_setgroups
:
7670 int gidsetsize
= arg1
;
7671 target_id
*target_grouplist
;
7675 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7676 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
7677 if (!target_grouplist
) {
7678 ret
= -TARGET_EFAULT
;
7681 for(i
= 0;i
< gidsetsize
; i
++)
7682 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
7683 unlock_user(target_grouplist
, arg2
, 0);
7684 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7687 case TARGET_NR_fchown
:
7688 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
7690 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7691 case TARGET_NR_fchownat
:
7692 if (!(p
= lock_user_string(arg2
)))
7694 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
7695 unlock_user(p
, arg2
, 0);
7698 #ifdef TARGET_NR_setresuid
7699 case TARGET_NR_setresuid
:
7700 ret
= get_errno(setresuid(low2highuid(arg1
),
7702 low2highuid(arg3
)));
7705 #ifdef TARGET_NR_getresuid
7706 case TARGET_NR_getresuid
:
7708 uid_t ruid
, euid
, suid
;
7709 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7710 if (!is_error(ret
)) {
7711 if (put_user_u16(high2lowuid(ruid
), arg1
)
7712 || put_user_u16(high2lowuid(euid
), arg2
)
7713 || put_user_u16(high2lowuid(suid
), arg3
))
7719 #ifdef TARGET_NR_getresgid
7720 case TARGET_NR_setresgid
:
7721 ret
= get_errno(setresgid(low2highgid(arg1
),
7723 low2highgid(arg3
)));
7726 #ifdef TARGET_NR_getresgid
7727 case TARGET_NR_getresgid
:
7729 gid_t rgid
, egid
, sgid
;
7730 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7731 if (!is_error(ret
)) {
7732 if (put_user_u16(high2lowgid(rgid
), arg1
)
7733 || put_user_u16(high2lowgid(egid
), arg2
)
7734 || put_user_u16(high2lowgid(sgid
), arg3
))
7740 case TARGET_NR_chown
:
7741 if (!(p
= lock_user_string(arg1
)))
7743 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7744 unlock_user(p
, arg1
, 0);
7746 case TARGET_NR_setuid
:
7747 ret
= get_errno(setuid(low2highuid(arg1
)));
7749 case TARGET_NR_setgid
:
7750 ret
= get_errno(setgid(low2highgid(arg1
)));
7752 case TARGET_NR_setfsuid
:
7753 ret
= get_errno(setfsuid(arg1
));
7755 case TARGET_NR_setfsgid
:
7756 ret
= get_errno(setfsgid(arg1
));
7759 #ifdef TARGET_NR_lchown32
7760 case TARGET_NR_lchown32
:
7761 if (!(p
= lock_user_string(arg1
)))
7763 ret
= get_errno(lchown(p
, arg2
, arg3
));
7764 unlock_user(p
, arg1
, 0);
7767 #ifdef TARGET_NR_getuid32
7768 case TARGET_NR_getuid32
:
7769 ret
= get_errno(getuid());
7773 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7774 /* Alpha specific */
7775 case TARGET_NR_getxuid
:
7779 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
7781 ret
= get_errno(getuid());
7784 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7785 /* Alpha specific */
7786 case TARGET_NR_getxgid
:
7790 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
7792 ret
= get_errno(getgid());
7795 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7796 /* Alpha specific */
7797 case TARGET_NR_osf_getsysinfo
:
7798 ret
= -TARGET_EOPNOTSUPP
;
7800 case TARGET_GSI_IEEE_FP_CONTROL
:
7802 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7804 /* Copied from linux ieee_fpcr_to_swcr. */
7805 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
7806 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
7807 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
7808 | SWCR_TRAP_ENABLE_DZE
7809 | SWCR_TRAP_ENABLE_OVF
);
7810 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
7811 | SWCR_TRAP_ENABLE_INE
);
7812 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
7813 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
7815 if (put_user_u64 (swcr
, arg2
))
7821 /* case GSI_IEEE_STATE_AT_SIGNAL:
7822 -- Not implemented in linux kernel.
7824 -- Retrieves current unaligned access state; not much used.
7826 -- Retrieves implver information; surely not used.
7828 -- Grabs a copy of the HWRPB; surely not used.
7833 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7834 /* Alpha specific */
7835 case TARGET_NR_osf_setsysinfo
:
7836 ret
= -TARGET_EOPNOTSUPP
;
7838 case TARGET_SSI_IEEE_FP_CONTROL
:
7840 uint64_t swcr
, fpcr
, orig_fpcr
;
7842 if (get_user_u64 (swcr
, arg2
)) {
7845 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
7846 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
7848 /* Copied from linux ieee_swcr_to_fpcr. */
7849 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
7850 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
7851 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
7852 | SWCR_TRAP_ENABLE_DZE
7853 | SWCR_TRAP_ENABLE_OVF
)) << 48;
7854 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
7855 | SWCR_TRAP_ENABLE_INE
)) << 57;
7856 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
7857 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
7859 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
7864 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
7866 uint64_t exc
, fpcr
, orig_fpcr
;
7869 if (get_user_u64(exc
, arg2
)) {
7873 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
7875 /* We only add to the exception status here. */
7876 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
7878 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
7881 /* Old exceptions are not signaled. */
7882 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
7884 /* If any exceptions set by this call,
7885 and are unmasked, send a signal. */
7887 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
7888 si_code
= TARGET_FPE_FLTRES
;
7890 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
7891 si_code
= TARGET_FPE_FLTUND
;
7893 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
7894 si_code
= TARGET_FPE_FLTOVF
;
7896 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
7897 si_code
= TARGET_FPE_FLTDIV
;
7899 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
7900 si_code
= TARGET_FPE_FLTINV
;
7903 target_siginfo_t info
;
7904 info
.si_signo
= SIGFPE
;
7906 info
.si_code
= si_code
;
7907 info
._sifields
._sigfault
._addr
7908 = ((CPUArchState
*)cpu_env
)->pc
;
7909 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
7914 /* case SSI_NVPAIRS:
7915 -- Used with SSIN_UACPROC to enable unaligned accesses.
7916 case SSI_IEEE_STATE_AT_SIGNAL:
7917 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7918 -- Not implemented in linux kernel
7923 #ifdef TARGET_NR_osf_sigprocmask
7924 /* Alpha specific. */
7925 case TARGET_NR_osf_sigprocmask
:
7929 sigset_t set
, oldset
;
7932 case TARGET_SIG_BLOCK
:
7935 case TARGET_SIG_UNBLOCK
:
7938 case TARGET_SIG_SETMASK
:
7942 ret
= -TARGET_EINVAL
;
7946 target_to_host_old_sigset(&set
, &mask
);
7947 sigprocmask(how
, &set
, &oldset
);
7948 host_to_target_old_sigset(&mask
, &oldset
);
7954 #ifdef TARGET_NR_getgid32
7955 case TARGET_NR_getgid32
:
7956 ret
= get_errno(getgid());
7959 #ifdef TARGET_NR_geteuid32
7960 case TARGET_NR_geteuid32
:
7961 ret
= get_errno(geteuid());
7964 #ifdef TARGET_NR_getegid32
7965 case TARGET_NR_getegid32
:
7966 ret
= get_errno(getegid());
7969 #ifdef TARGET_NR_setreuid32
7970 case TARGET_NR_setreuid32
:
7971 ret
= get_errno(setreuid(arg1
, arg2
));
7974 #ifdef TARGET_NR_setregid32
7975 case TARGET_NR_setregid32
:
7976 ret
= get_errno(setregid(arg1
, arg2
));
7979 #ifdef TARGET_NR_getgroups32
7980 case TARGET_NR_getgroups32
:
7982 int gidsetsize
= arg1
;
7983 uint32_t *target_grouplist
;
7987 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7988 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7989 if (gidsetsize
== 0)
7991 if (!is_error(ret
)) {
7992 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
7993 if (!target_grouplist
) {
7994 ret
= -TARGET_EFAULT
;
7997 for(i
= 0;i
< ret
; i
++)
7998 target_grouplist
[i
] = tswap32(grouplist
[i
]);
7999 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
8004 #ifdef TARGET_NR_setgroups32
8005 case TARGET_NR_setgroups32
:
8007 int gidsetsize
= arg1
;
8008 uint32_t *target_grouplist
;
8012 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8013 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
8014 if (!target_grouplist
) {
8015 ret
= -TARGET_EFAULT
;
8018 for(i
= 0;i
< gidsetsize
; i
++)
8019 grouplist
[i
] = tswap32(target_grouplist
[i
]);
8020 unlock_user(target_grouplist
, arg2
, 0);
8021 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8025 #ifdef TARGET_NR_fchown32
8026 case TARGET_NR_fchown32
:
8027 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
8030 #ifdef TARGET_NR_setresuid32
8031 case TARGET_NR_setresuid32
:
8032 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
8035 #ifdef TARGET_NR_getresuid32
8036 case TARGET_NR_getresuid32
:
8038 uid_t ruid
, euid
, suid
;
8039 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8040 if (!is_error(ret
)) {
8041 if (put_user_u32(ruid
, arg1
)
8042 || put_user_u32(euid
, arg2
)
8043 || put_user_u32(suid
, arg3
))
8049 #ifdef TARGET_NR_setresgid32
8050 case TARGET_NR_setresgid32
:
8051 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
8054 #ifdef TARGET_NR_getresgid32
8055 case TARGET_NR_getresgid32
:
8057 gid_t rgid
, egid
, sgid
;
8058 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8059 if (!is_error(ret
)) {
8060 if (put_user_u32(rgid
, arg1
)
8061 || put_user_u32(egid
, arg2
)
8062 || put_user_u32(sgid
, arg3
))
8068 #ifdef TARGET_NR_chown32
8069 case TARGET_NR_chown32
:
8070 if (!(p
= lock_user_string(arg1
)))
8072 ret
= get_errno(chown(p
, arg2
, arg3
));
8073 unlock_user(p
, arg1
, 0);
8076 #ifdef TARGET_NR_setuid32
8077 case TARGET_NR_setuid32
:
8078 ret
= get_errno(setuid(arg1
));
8081 #ifdef TARGET_NR_setgid32
8082 case TARGET_NR_setgid32
:
8083 ret
= get_errno(setgid(arg1
));
8086 #ifdef TARGET_NR_setfsuid32
8087 case TARGET_NR_setfsuid32
:
8088 ret
= get_errno(setfsuid(arg1
));
8091 #ifdef TARGET_NR_setfsgid32
8092 case TARGET_NR_setfsgid32
:
8093 ret
= get_errno(setfsgid(arg1
));
8097 case TARGET_NR_pivot_root
:
8099 #ifdef TARGET_NR_mincore
8100 case TARGET_NR_mincore
:
8103 ret
= -TARGET_EFAULT
;
8104 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
8106 if (!(p
= lock_user_string(arg3
)))
8108 ret
= get_errno(mincore(a
, arg2
, p
));
8109 unlock_user(p
, arg3
, ret
);
8111 unlock_user(a
, arg1
, 0);
8115 #ifdef TARGET_NR_arm_fadvise64_64
8116 case TARGET_NR_arm_fadvise64_64
:
8119 * arm_fadvise64_64 looks like fadvise64_64 but
8120 * with different argument order
8128 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8129 #ifdef TARGET_NR_fadvise64_64
8130 case TARGET_NR_fadvise64_64
:
8132 #ifdef TARGET_NR_fadvise64
8133 case TARGET_NR_fadvise64
:
8137 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
8138 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
8139 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
8140 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
8144 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
8147 #ifdef TARGET_NR_madvise
8148 case TARGET_NR_madvise
:
8149 /* A straight passthrough may not be safe because qemu sometimes
8150 turns private flie-backed mappings into anonymous mappings.
8151 This will break MADV_DONTNEED.
8152 This is a hint, so ignoring and returning success is ok. */
8156 #if TARGET_ABI_BITS == 32
8157 case TARGET_NR_fcntl64
:
8161 struct target_flock64
*target_fl
;
8163 struct target_eabi_flock64
*target_efl
;
8166 cmd
= target_to_host_fcntl_cmd(arg2
);
8167 if (cmd
== -TARGET_EINVAL
) {
8173 case TARGET_F_GETLK64
:
8175 if (((CPUARMState
*)cpu_env
)->eabi
) {
8176 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8178 fl
.l_type
= tswap16(target_efl
->l_type
);
8179 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8180 fl
.l_start
= tswap64(target_efl
->l_start
);
8181 fl
.l_len
= tswap64(target_efl
->l_len
);
8182 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8183 unlock_user_struct(target_efl
, arg3
, 0);
8187 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8189 fl
.l_type
= tswap16(target_fl
->l_type
);
8190 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8191 fl
.l_start
= tswap64(target_fl
->l_start
);
8192 fl
.l_len
= tswap64(target_fl
->l_len
);
8193 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8194 unlock_user_struct(target_fl
, arg3
, 0);
8196 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8199 if (((CPUARMState
*)cpu_env
)->eabi
) {
8200 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
8202 target_efl
->l_type
= tswap16(fl
.l_type
);
8203 target_efl
->l_whence
= tswap16(fl
.l_whence
);
8204 target_efl
->l_start
= tswap64(fl
.l_start
);
8205 target_efl
->l_len
= tswap64(fl
.l_len
);
8206 target_efl
->l_pid
= tswap32(fl
.l_pid
);
8207 unlock_user_struct(target_efl
, arg3
, 1);
8211 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
8213 target_fl
->l_type
= tswap16(fl
.l_type
);
8214 target_fl
->l_whence
= tswap16(fl
.l_whence
);
8215 target_fl
->l_start
= tswap64(fl
.l_start
);
8216 target_fl
->l_len
= tswap64(fl
.l_len
);
8217 target_fl
->l_pid
= tswap32(fl
.l_pid
);
8218 unlock_user_struct(target_fl
, arg3
, 1);
8223 case TARGET_F_SETLK64
:
8224 case TARGET_F_SETLKW64
:
8226 if (((CPUARMState
*)cpu_env
)->eabi
) {
8227 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8229 fl
.l_type
= tswap16(target_efl
->l_type
);
8230 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8231 fl
.l_start
= tswap64(target_efl
->l_start
);
8232 fl
.l_len
= tswap64(target_efl
->l_len
);
8233 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8234 unlock_user_struct(target_efl
, arg3
, 0);
8238 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8240 fl
.l_type
= tswap16(target_fl
->l_type
);
8241 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8242 fl
.l_start
= tswap64(target_fl
->l_start
);
8243 fl
.l_len
= tswap64(target_fl
->l_len
);
8244 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8245 unlock_user_struct(target_fl
, arg3
, 0);
8247 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8250 ret
= do_fcntl(arg1
, arg2
, arg3
);
8256 #ifdef TARGET_NR_cacheflush
8257 case TARGET_NR_cacheflush
:
8258 /* self-modifying code is handled automatically, so nothing needed */
8262 #ifdef TARGET_NR_security
8263 case TARGET_NR_security
:
8266 #ifdef TARGET_NR_getpagesize
8267 case TARGET_NR_getpagesize
:
8268 ret
= TARGET_PAGE_SIZE
;
8271 case TARGET_NR_gettid
:
8272 ret
= get_errno(gettid());
8274 #ifdef TARGET_NR_readahead
8275 case TARGET_NR_readahead
:
8276 #if TARGET_ABI_BITS == 32
8277 if (regpairs_aligned(cpu_env
)) {
8282 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
8284 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
8289 #ifdef TARGET_NR_setxattr
8290 case TARGET_NR_listxattr
:
8291 case TARGET_NR_llistxattr
:
8295 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8297 ret
= -TARGET_EFAULT
;
8301 p
= lock_user_string(arg1
);
8303 if (num
== TARGET_NR_listxattr
) {
8304 ret
= get_errno(listxattr(p
, b
, arg3
));
8306 ret
= get_errno(llistxattr(p
, b
, arg3
));
8309 ret
= -TARGET_EFAULT
;
8311 unlock_user(p
, arg1
, 0);
8312 unlock_user(b
, arg2
, arg3
);
8315 case TARGET_NR_flistxattr
:
8319 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8321 ret
= -TARGET_EFAULT
;
8325 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
8326 unlock_user(b
, arg2
, arg3
);
8329 case TARGET_NR_setxattr
:
8330 case TARGET_NR_lsetxattr
:
8332 void *p
, *n
, *v
= 0;
8334 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8336 ret
= -TARGET_EFAULT
;
8340 p
= lock_user_string(arg1
);
8341 n
= lock_user_string(arg2
);
8343 if (num
== TARGET_NR_setxattr
) {
8344 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
8346 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
8349 ret
= -TARGET_EFAULT
;
8351 unlock_user(p
, arg1
, 0);
8352 unlock_user(n
, arg2
, 0);
8353 unlock_user(v
, arg3
, 0);
8356 case TARGET_NR_fsetxattr
:
8360 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8362 ret
= -TARGET_EFAULT
;
8366 n
= lock_user_string(arg2
);
8368 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
8370 ret
= -TARGET_EFAULT
;
8372 unlock_user(n
, arg2
, 0);
8373 unlock_user(v
, arg3
, 0);
8376 case TARGET_NR_getxattr
:
8377 case TARGET_NR_lgetxattr
:
8379 void *p
, *n
, *v
= 0;
8381 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8383 ret
= -TARGET_EFAULT
;
8387 p
= lock_user_string(arg1
);
8388 n
= lock_user_string(arg2
);
8390 if (num
== TARGET_NR_getxattr
) {
8391 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
8393 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
8396 ret
= -TARGET_EFAULT
;
8398 unlock_user(p
, arg1
, 0);
8399 unlock_user(n
, arg2
, 0);
8400 unlock_user(v
, arg3
, arg4
);
8403 case TARGET_NR_fgetxattr
:
8407 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8409 ret
= -TARGET_EFAULT
;
8413 n
= lock_user_string(arg2
);
8415 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
8417 ret
= -TARGET_EFAULT
;
8419 unlock_user(n
, arg2
, 0);
8420 unlock_user(v
, arg3
, arg4
);
8423 case TARGET_NR_removexattr
:
8424 case TARGET_NR_lremovexattr
:
8427 p
= lock_user_string(arg1
);
8428 n
= lock_user_string(arg2
);
8430 if (num
== TARGET_NR_removexattr
) {
8431 ret
= get_errno(removexattr(p
, n
));
8433 ret
= get_errno(lremovexattr(p
, n
));
8436 ret
= -TARGET_EFAULT
;
8438 unlock_user(p
, arg1
, 0);
8439 unlock_user(n
, arg2
, 0);
8442 case TARGET_NR_fremovexattr
:
8445 n
= lock_user_string(arg2
);
8447 ret
= get_errno(fremovexattr(arg1
, n
));
8449 ret
= -TARGET_EFAULT
;
8451 unlock_user(n
, arg2
, 0);
8455 #endif /* CONFIG_ATTR */
8456 #ifdef TARGET_NR_set_thread_area
8457 case TARGET_NR_set_thread_area
:
8458 #if defined(TARGET_MIPS)
8459 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
8462 #elif defined(TARGET_CRIS)
8464 ret
= -TARGET_EINVAL
;
8466 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
8470 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8471 ret
= do_set_thread_area(cpu_env
, arg1
);
8474 goto unimplemented_nowarn
;
8477 #ifdef TARGET_NR_get_thread_area
8478 case TARGET_NR_get_thread_area
:
8479 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8480 ret
= do_get_thread_area(cpu_env
, arg1
);
8482 goto unimplemented_nowarn
;
8485 #ifdef TARGET_NR_getdomainname
8486 case TARGET_NR_getdomainname
:
8487 goto unimplemented_nowarn
;
8490 #ifdef TARGET_NR_clock_gettime
8491 case TARGET_NR_clock_gettime
:
8494 ret
= get_errno(clock_gettime(arg1
, &ts
));
8495 if (!is_error(ret
)) {
8496 host_to_target_timespec(arg2
, &ts
);
8501 #ifdef TARGET_NR_clock_getres
8502 case TARGET_NR_clock_getres
:
8505 ret
= get_errno(clock_getres(arg1
, &ts
));
8506 if (!is_error(ret
)) {
8507 host_to_target_timespec(arg2
, &ts
);
8512 #ifdef TARGET_NR_clock_nanosleep
8513 case TARGET_NR_clock_nanosleep
:
8516 target_to_host_timespec(&ts
, arg3
);
8517 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
8519 host_to_target_timespec(arg4
, &ts
);
8524 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8525 case TARGET_NR_set_tid_address
:
8526 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
8530 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8531 case TARGET_NR_tkill
:
8532 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
8536 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8537 case TARGET_NR_tgkill
:
8538 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
8539 target_to_host_signal(arg3
)));
8543 #ifdef TARGET_NR_set_robust_list
8544 case TARGET_NR_set_robust_list
:
8545 goto unimplemented_nowarn
;
8548 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8549 case TARGET_NR_utimensat
:
8551 struct timespec
*tsp
, ts
[2];
8555 target_to_host_timespec(ts
, arg3
);
8556 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
8560 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
8562 if (!(p
= lock_user_string(arg2
))) {
8563 ret
= -TARGET_EFAULT
;
8566 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
8567 unlock_user(p
, arg2
, 0);
8572 #if defined(CONFIG_USE_NPTL)
8573 case TARGET_NR_futex
:
8574 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8577 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8578 case TARGET_NR_inotify_init
:
8579 ret
= get_errno(sys_inotify_init());
8582 #ifdef CONFIG_INOTIFY1
8583 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8584 case TARGET_NR_inotify_init1
:
8585 ret
= get_errno(sys_inotify_init1(arg1
));
8589 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8590 case TARGET_NR_inotify_add_watch
:
8591 p
= lock_user_string(arg2
);
8592 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
8593 unlock_user(p
, arg2
, 0);
8596 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8597 case TARGET_NR_inotify_rm_watch
:
8598 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
8602 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8603 case TARGET_NR_mq_open
:
8605 struct mq_attr posix_mq_attr
;
8607 p
= lock_user_string(arg1
- 1);
8609 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
8610 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
8611 unlock_user (p
, arg1
, 0);
8615 case TARGET_NR_mq_unlink
:
8616 p
= lock_user_string(arg1
- 1);
8617 ret
= get_errno(mq_unlink(p
));
8618 unlock_user (p
, arg1
, 0);
8621 case TARGET_NR_mq_timedsend
:
8625 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8627 target_to_host_timespec(&ts
, arg5
);
8628 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
8629 host_to_target_timespec(arg5
, &ts
);
8632 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
8633 unlock_user (p
, arg2
, arg3
);
8637 case TARGET_NR_mq_timedreceive
:
8642 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8644 target_to_host_timespec(&ts
, arg5
);
8645 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
8646 host_to_target_timespec(arg5
, &ts
);
8649 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
8650 unlock_user (p
, arg2
, arg3
);
8652 put_user_u32(prio
, arg4
);
8656 /* Not implemented for now... */
8657 /* case TARGET_NR_mq_notify: */
8660 case TARGET_NR_mq_getsetattr
:
8662 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
8665 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
8666 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
8669 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
8670 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
8677 #ifdef CONFIG_SPLICE
8678 #ifdef TARGET_NR_tee
8681 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
8685 #ifdef TARGET_NR_splice
8686 case TARGET_NR_splice
:
8688 loff_t loff_in
, loff_out
;
8689 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
8691 get_user_u64(loff_in
, arg2
);
8692 ploff_in
= &loff_in
;
8695 get_user_u64(loff_out
, arg2
);
8696 ploff_out
= &loff_out
;
8698 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
8702 #ifdef TARGET_NR_vmsplice
8703 case TARGET_NR_vmsplice
:
8705 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
8707 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
8708 unlock_iovec(vec
, arg2
, arg3
, 0);
8710 ret
= -host_to_target_errno(errno
);
8715 #endif /* CONFIG_SPLICE */
8716 #ifdef CONFIG_EVENTFD
8717 #if defined(TARGET_NR_eventfd)
8718 case TARGET_NR_eventfd
:
8719 ret
= get_errno(eventfd(arg1
, 0));
8722 #if defined(TARGET_NR_eventfd2)
8723 case TARGET_NR_eventfd2
:
8724 ret
= get_errno(eventfd(arg1
, arg2
));
8727 #endif /* CONFIG_EVENTFD */
8728 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8729 case TARGET_NR_fallocate
:
8730 #if TARGET_ABI_BITS == 32
8731 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
8732 target_offset64(arg5
, arg6
)));
8734 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
8738 #if defined(CONFIG_SYNC_FILE_RANGE)
8739 #if defined(TARGET_NR_sync_file_range)
8740 case TARGET_NR_sync_file_range
:
8741 #if TARGET_ABI_BITS == 32
8742 #if defined(TARGET_MIPS)
8743 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8744 target_offset64(arg5
, arg6
), arg7
));
8746 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
8747 target_offset64(arg4
, arg5
), arg6
));
8748 #endif /* !TARGET_MIPS */
8750 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
8754 #if defined(TARGET_NR_sync_file_range2)
8755 case TARGET_NR_sync_file_range2
:
8756 /* This is like sync_file_range but the arguments are reordered */
8757 #if TARGET_ABI_BITS == 32
8758 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8759 target_offset64(arg5
, arg6
), arg2
));
8761 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
8766 #if defined(CONFIG_EPOLL)
8767 #if defined(TARGET_NR_epoll_create)
8768 case TARGET_NR_epoll_create
:
8769 ret
= get_errno(epoll_create(arg1
));
8772 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8773 case TARGET_NR_epoll_create1
:
8774 ret
= get_errno(epoll_create1(arg1
));
8777 #if defined(TARGET_NR_epoll_ctl)
8778 case TARGET_NR_epoll_ctl
:
8780 struct epoll_event ep
;
8781 struct epoll_event
*epp
= 0;
8783 struct target_epoll_event
*target_ep
;
8784 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
8787 ep
.events
= tswap32(target_ep
->events
);
8788 /* The epoll_data_t union is just opaque data to the kernel,
8789 * so we transfer all 64 bits across and need not worry what
8790 * actual data type it is.
8792 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
8793 unlock_user_struct(target_ep
, arg4
, 0);
8796 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
8801 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8802 #define IMPLEMENT_EPOLL_PWAIT
8804 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8805 #if defined(TARGET_NR_epoll_wait)
8806 case TARGET_NR_epoll_wait
:
8808 #if defined(IMPLEMENT_EPOLL_PWAIT)
8809 case TARGET_NR_epoll_pwait
:
8812 struct target_epoll_event
*target_ep
;
8813 struct epoll_event
*ep
;
8815 int maxevents
= arg3
;
8818 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
8819 maxevents
* sizeof(struct target_epoll_event
), 1);
8824 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
8827 #if defined(IMPLEMENT_EPOLL_PWAIT)
8828 case TARGET_NR_epoll_pwait
:
8830 target_sigset_t
*target_set
;
8831 sigset_t _set
, *set
= &_set
;
8834 target_set
= lock_user(VERIFY_READ
, arg5
,
8835 sizeof(target_sigset_t
), 1);
8837 unlock_user(target_ep
, arg2
, 0);
8840 target_to_host_sigset(set
, target_set
);
8841 unlock_user(target_set
, arg5
, 0);
8846 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
8850 #if defined(TARGET_NR_epoll_wait)
8851 case TARGET_NR_epoll_wait
:
8852 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
8856 ret
= -TARGET_ENOSYS
;
8858 if (!is_error(ret
)) {
8860 for (i
= 0; i
< ret
; i
++) {
8861 target_ep
[i
].events
= tswap32(ep
[i
].events
);
8862 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
8865 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
8870 #ifdef TARGET_NR_prlimit64
8871 case TARGET_NR_prlimit64
:
8873 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8874 struct target_rlimit64
*target_rnew
, *target_rold
;
8875 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
8877 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
8880 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
8881 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
8882 unlock_user_struct(target_rnew
, arg3
, 0);
8886 ret
= get_errno(sys_prlimit64(arg1
, arg2
, rnewp
, arg4
? &rold
: 0));
8887 if (!is_error(ret
) && arg4
) {
8888 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
8891 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
8892 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
8893 unlock_user_struct(target_rold
, arg4
, 1);
8898 #ifdef TARGET_NR_gethostname
8899 case TARGET_NR_gethostname
:
8901 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8903 ret
= get_errno(gethostname(name
, arg2
));
8904 unlock_user(name
, arg1
, arg2
);
8906 ret
= -TARGET_EFAULT
;
8913 gemu_log("qemu: Unsupported syscall: %d\n", num
);
8914 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8915 unimplemented_nowarn
:
8917 ret
= -TARGET_ENOSYS
;
8922 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
8925 print_syscall_ret(num
, ret
);
8928 ret
= -TARGET_EFAULT
;