4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
32 #include <sys/types.h>
38 #include <sys/mount.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
49 int __clone2(int (*fn
)(void *), void *child_stack_base
,
50 size_t stack_size
, int flags
, void *arg
, ...);
52 #include <sys/socket.h>
56 #include <sys/times.h>
59 #include <sys/statfs.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
73 #include <sys/eventfd.h>
76 #include <sys/epoll.h>
79 #include "qemu/xattr.h"
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/utsname.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #if defined(CONFIG_FIEMAP)
102 #include <linux/fiemap.h>
104 #include <linux/fb.h>
105 #include <linux/vt.h>
106 #include <linux/dm-ioctl.h>
107 #include <linux/reboot.h>
108 #include "linux_loop.h"
109 #include "cpu-uname.h"
113 #if defined(CONFIG_USE_NPTL)
114 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
115 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
117 /* XXX: Hardcode the above values. */
118 #define CLONE_NPTL_FLAGS2 0
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_faccessat __NR_faccessat
185 #define __NR_sys_fchmodat __NR_fchmodat
186 #define __NR_sys_fchownat __NR_fchownat
187 #define __NR_sys_fstatat64 __NR_fstatat64
188 #define __NR_sys_futimesat __NR_futimesat
189 #define __NR_sys_getcwd1 __NR_getcwd
190 #define __NR_sys_getdents __NR_getdents
191 #define __NR_sys_getdents64 __NR_getdents64
192 #define __NR_sys_getpriority __NR_getpriority
193 #define __NR_sys_linkat __NR_linkat
194 #define __NR_sys_mkdirat __NR_mkdirat
195 #define __NR_sys_mknodat __NR_mknodat
196 #define __NR_sys_newfstatat __NR_newfstatat
197 #define __NR_sys_openat __NR_openat
198 #define __NR_sys_readlinkat __NR_readlinkat
199 #define __NR_sys_renameat __NR_renameat
200 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
201 #define __NR_sys_symlinkat __NR_symlinkat
202 #define __NR_sys_syslog __NR_syslog
203 #define __NR_sys_tgkill __NR_tgkill
204 #define __NR_sys_tkill __NR_tkill
205 #define __NR_sys_unlinkat __NR_unlinkat
206 #define __NR_sys_utimensat __NR_utimensat
207 #define __NR_sys_futex __NR_futex
208 #define __NR_sys_inotify_init __NR_inotify_init
209 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
210 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
212 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
214 #define __NR__llseek __NR_lseek
218 _syscall0(int, gettid
)
220 /* This is a replacement for the host gettid() and must return a host
222 static int gettid(void) {
226 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
227 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
228 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
230 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
231 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
232 loff_t
*, res
, uint
, wh
);
234 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
235 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
236 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
237 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
239 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
240 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
242 #ifdef __NR_exit_group
243 _syscall1(int,exit_group
,int,error_code
)
245 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
246 _syscall1(int,set_tid_address
,int *,tidptr
)
248 #if defined(CONFIG_USE_NPTL)
249 #if defined(TARGET_NR_futex) && defined(__NR_futex)
250 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
251 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
254 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
255 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
256 unsigned long *, user_mask_ptr
);
257 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
258 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
259 unsigned long *, user_mask_ptr
);
260 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
263 static bitmask_transtbl fcntl_flags_tbl
[] = {
264 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
265 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
266 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
267 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
268 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
269 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
270 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
271 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
272 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
273 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
274 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
275 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
276 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
277 #if defined(O_DIRECT)
278 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
280 #if defined(O_NOATIME)
281 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
283 #if defined(O_CLOEXEC)
284 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
287 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
289 /* Don't terminate the list prematurely on 64-bit host+guest. */
290 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
291 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
296 #define COPY_UTSNAME_FIELD(dest, src) \
298 /* __NEW_UTS_LEN doesn't include terminating null */ \
299 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
300 (dest)[__NEW_UTS_LEN] = '\0'; \
303 static int sys_uname(struct new_utsname
*buf
)
305 struct utsname uts_buf
;
307 if (uname(&uts_buf
) < 0)
311 * Just in case these have some differences, we
312 * translate utsname to new_utsname (which is the
313 * struct linux kernel uses).
316 memset(buf
, 0, sizeof(*buf
));
317 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
318 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
319 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
320 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
321 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
323 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
327 #undef COPY_UTSNAME_FIELD
330 static int sys_getcwd1(char *buf
, size_t size
)
332 if (getcwd(buf
, size
) == NULL
) {
333 /* getcwd() sets errno */
336 return strlen(buf
)+1;
341 * Host system seems to have atfile syscall stubs available. We
342 * now enable them one by one as specified by target syscall_nr.h.
345 #ifdef TARGET_NR_faccessat
346 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
348 return (faccessat(dirfd
, pathname
, mode
, 0));
351 #ifdef TARGET_NR_fchmodat
352 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
354 return (fchmodat(dirfd
, pathname
, mode
, 0));
357 #if defined(TARGET_NR_fchownat)
358 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
359 gid_t group
, int flags
)
361 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
364 #ifdef __NR_fstatat64
365 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
368 return (fstatat(dirfd
, pathname
, buf
, flags
));
371 #ifdef __NR_newfstatat
372 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
375 return (fstatat(dirfd
, pathname
, buf
, flags
));
378 #ifdef TARGET_NR_futimesat
379 static int sys_futimesat(int dirfd
, const char *pathname
,
380 const struct timeval times
[2])
382 return (futimesat(dirfd
, pathname
, times
));
385 #ifdef TARGET_NR_linkat
386 static int sys_linkat(int olddirfd
, const char *oldpath
,
387 int newdirfd
, const char *newpath
, int flags
)
389 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
392 #ifdef TARGET_NR_mkdirat
393 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
395 return (mkdirat(dirfd
, pathname
, mode
));
398 #ifdef TARGET_NR_mknodat
399 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
402 return (mknodat(dirfd
, pathname
, mode
, dev
));
405 #ifdef TARGET_NR_openat
406 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
409 * open(2) has extra parameter 'mode' when called with
412 if ((flags
& O_CREAT
) != 0) {
413 return (openat(dirfd
, pathname
, flags
, mode
));
415 return (openat(dirfd
, pathname
, flags
));
418 #ifdef TARGET_NR_readlinkat
419 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
421 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
424 #ifdef TARGET_NR_renameat
425 static int sys_renameat(int olddirfd
, const char *oldpath
,
426 int newdirfd
, const char *newpath
)
428 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
431 #ifdef TARGET_NR_symlinkat
432 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
434 return (symlinkat(oldpath
, newdirfd
, newpath
));
437 #ifdef TARGET_NR_unlinkat
438 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
440 return (unlinkat(dirfd
, pathname
, flags
));
443 #else /* !CONFIG_ATFILE */
446 * Try direct syscalls instead
448 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
449 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
451 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
452 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
454 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
455 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
456 uid_t
,owner
,gid_t
,group
,int,flags
)
458 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
459 defined(__NR_fstatat64)
460 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
461 struct stat
*,buf
,int,flags
)
463 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
464 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
465 const struct timeval
*,times
)
467 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
468 defined(__NR_newfstatat)
469 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
470 struct stat
*,buf
,int,flags
)
472 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
473 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
474 int,newdirfd
,const char *,newpath
,int,flags
)
476 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
477 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
479 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
480 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
481 mode_t
,mode
,dev_t
,dev
)
483 #if defined(TARGET_NR_openat) && defined(__NR_openat)
484 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
486 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
487 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
488 char *,buf
,size_t,bufsize
)
490 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
491 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
492 int,newdirfd
,const char *,newpath
)
494 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
495 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
496 int,newdirfd
,const char *,newpath
)
498 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
499 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
502 #endif /* CONFIG_ATFILE */
504 #ifdef CONFIG_UTIMENSAT
505 static int sys_utimensat(int dirfd
, const char *pathname
,
506 const struct timespec times
[2], int flags
)
508 if (pathname
== NULL
)
509 return futimens(dirfd
, times
);
511 return utimensat(dirfd
, pathname
, times
, flags
);
514 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
515 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
516 const struct timespec
*,tsp
,int,flags
)
518 #endif /* CONFIG_UTIMENSAT */
520 #ifdef CONFIG_INOTIFY
521 #include <sys/inotify.h>
523 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
524 static int sys_inotify_init(void)
526 return (inotify_init());
529 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
530 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
532 return (inotify_add_watch(fd
, pathname
, mask
));
535 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
536 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
538 return (inotify_rm_watch(fd
, wd
));
541 #ifdef CONFIG_INOTIFY1
542 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
543 static int sys_inotify_init1(int flags
)
545 return (inotify_init1(flags
));
550 /* Userspace can usually survive runtime without inotify */
551 #undef TARGET_NR_inotify_init
552 #undef TARGET_NR_inotify_init1
553 #undef TARGET_NR_inotify_add_watch
554 #undef TARGET_NR_inotify_rm_watch
555 #endif /* CONFIG_INOTIFY */
557 #if defined(TARGET_NR_ppoll)
559 # define __NR_ppoll -1
561 #define __NR_sys_ppoll __NR_ppoll
562 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
563 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
567 #if defined(TARGET_NR_pselect6)
568 #ifndef __NR_pselect6
569 # define __NR_pselect6 -1
571 #define __NR_sys_pselect6 __NR_pselect6
572 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
573 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
576 #if defined(TARGET_NR_prlimit64)
577 #ifndef __NR_prlimit64
578 # define __NR_prlimit64 -1
580 #define __NR_sys_prlimit64 __NR_prlimit64
581 /* The glibc rlimit structure may not be that used by the underlying syscall */
582 struct host_rlimit64
{
586 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
587 const struct host_rlimit64
*, new_limit
,
588 struct host_rlimit64
*, old_limit
)
591 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
593 static inline int regpairs_aligned(void *cpu_env
) {
594 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
596 #elif defined(TARGET_MIPS)
597 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
598 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
599 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
600 * of registers which translates to the same as ARM/MIPS, because we start with
602 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
604 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
607 #define ERRNO_TABLE_SIZE 1200
609 /* target_to_host_errno_table[] is initialized from
610 * host_to_target_errno_table[] in syscall_init(). */
611 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
615 * This list is the union of errno values overridden in asm-<arch>/errno.h
616 * minus the errnos that are not actually generic to all archs.
618 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
619 [EIDRM
] = TARGET_EIDRM
,
620 [ECHRNG
] = TARGET_ECHRNG
,
621 [EL2NSYNC
] = TARGET_EL2NSYNC
,
622 [EL3HLT
] = TARGET_EL3HLT
,
623 [EL3RST
] = TARGET_EL3RST
,
624 [ELNRNG
] = TARGET_ELNRNG
,
625 [EUNATCH
] = TARGET_EUNATCH
,
626 [ENOCSI
] = TARGET_ENOCSI
,
627 [EL2HLT
] = TARGET_EL2HLT
,
628 [EDEADLK
] = TARGET_EDEADLK
,
629 [ENOLCK
] = TARGET_ENOLCK
,
630 [EBADE
] = TARGET_EBADE
,
631 [EBADR
] = TARGET_EBADR
,
632 [EXFULL
] = TARGET_EXFULL
,
633 [ENOANO
] = TARGET_ENOANO
,
634 [EBADRQC
] = TARGET_EBADRQC
,
635 [EBADSLT
] = TARGET_EBADSLT
,
636 [EBFONT
] = TARGET_EBFONT
,
637 [ENOSTR
] = TARGET_ENOSTR
,
638 [ENODATA
] = TARGET_ENODATA
,
639 [ETIME
] = TARGET_ETIME
,
640 [ENOSR
] = TARGET_ENOSR
,
641 [ENONET
] = TARGET_ENONET
,
642 [ENOPKG
] = TARGET_ENOPKG
,
643 [EREMOTE
] = TARGET_EREMOTE
,
644 [ENOLINK
] = TARGET_ENOLINK
,
645 [EADV
] = TARGET_EADV
,
646 [ESRMNT
] = TARGET_ESRMNT
,
647 [ECOMM
] = TARGET_ECOMM
,
648 [EPROTO
] = TARGET_EPROTO
,
649 [EDOTDOT
] = TARGET_EDOTDOT
,
650 [EMULTIHOP
] = TARGET_EMULTIHOP
,
651 [EBADMSG
] = TARGET_EBADMSG
,
652 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
653 [EOVERFLOW
] = TARGET_EOVERFLOW
,
654 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
655 [EBADFD
] = TARGET_EBADFD
,
656 [EREMCHG
] = TARGET_EREMCHG
,
657 [ELIBACC
] = TARGET_ELIBACC
,
658 [ELIBBAD
] = TARGET_ELIBBAD
,
659 [ELIBSCN
] = TARGET_ELIBSCN
,
660 [ELIBMAX
] = TARGET_ELIBMAX
,
661 [ELIBEXEC
] = TARGET_ELIBEXEC
,
662 [EILSEQ
] = TARGET_EILSEQ
,
663 [ENOSYS
] = TARGET_ENOSYS
,
664 [ELOOP
] = TARGET_ELOOP
,
665 [ERESTART
] = TARGET_ERESTART
,
666 [ESTRPIPE
] = TARGET_ESTRPIPE
,
667 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
668 [EUSERS
] = TARGET_EUSERS
,
669 [ENOTSOCK
] = TARGET_ENOTSOCK
,
670 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
671 [EMSGSIZE
] = TARGET_EMSGSIZE
,
672 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
673 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
674 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
675 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
676 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
677 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
678 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
679 [EADDRINUSE
] = TARGET_EADDRINUSE
,
680 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
681 [ENETDOWN
] = TARGET_ENETDOWN
,
682 [ENETUNREACH
] = TARGET_ENETUNREACH
,
683 [ENETRESET
] = TARGET_ENETRESET
,
684 [ECONNABORTED
] = TARGET_ECONNABORTED
,
685 [ECONNRESET
] = TARGET_ECONNRESET
,
686 [ENOBUFS
] = TARGET_ENOBUFS
,
687 [EISCONN
] = TARGET_EISCONN
,
688 [ENOTCONN
] = TARGET_ENOTCONN
,
689 [EUCLEAN
] = TARGET_EUCLEAN
,
690 [ENOTNAM
] = TARGET_ENOTNAM
,
691 [ENAVAIL
] = TARGET_ENAVAIL
,
692 [EISNAM
] = TARGET_EISNAM
,
693 [EREMOTEIO
] = TARGET_EREMOTEIO
,
694 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
695 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
696 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
697 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
698 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
699 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
700 [EALREADY
] = TARGET_EALREADY
,
701 [EINPROGRESS
] = TARGET_EINPROGRESS
,
702 [ESTALE
] = TARGET_ESTALE
,
703 [ECANCELED
] = TARGET_ECANCELED
,
704 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
705 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
707 [ENOKEY
] = TARGET_ENOKEY
,
710 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
713 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
716 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
719 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
721 #ifdef ENOTRECOVERABLE
722 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
726 static inline int host_to_target_errno(int err
)
728 if(host_to_target_errno_table
[err
])
729 return host_to_target_errno_table
[err
];
733 static inline int target_to_host_errno(int err
)
735 if (target_to_host_errno_table
[err
])
736 return target_to_host_errno_table
[err
];
740 static inline abi_long
get_errno(abi_long ret
)
743 return -host_to_target_errno(errno
);
748 static inline int is_error(abi_long ret
)
750 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
753 char *target_strerror(int err
)
755 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
758 return strerror(target_to_host_errno(err
));
761 static abi_ulong target_brk
;
762 static abi_ulong target_original_brk
;
763 static abi_ulong brk_page
;
765 void target_set_brk(abi_ulong new_brk
)
767 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
768 brk_page
= HOST_PAGE_ALIGN(target_brk
);
771 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
772 #define DEBUGF_BRK(message, args...)
774 /* do_brk() must return target values and target errnos. */
775 abi_long
do_brk(abi_ulong new_brk
)
777 abi_long mapped_addr
;
780 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
783 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
786 if (new_brk
< target_original_brk
) {
787 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
792 /* If the new brk is less than the highest page reserved to the
793 * target heap allocation, set it and we're almost done... */
794 if (new_brk
<= brk_page
) {
795 /* Heap contents are initialized to zero, as for anonymous
797 if (new_brk
> target_brk
) {
798 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
800 target_brk
= new_brk
;
801 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
805 /* We need to allocate more memory after the brk... Note that
806 * we don't use MAP_FIXED because that will map over the top of
807 * any existing mapping (like the one with the host libc or qemu
808 * itself); instead we treat "mapped but at wrong address" as
809 * a failure and unmap again.
811 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
812 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
813 PROT_READ
|PROT_WRITE
,
814 MAP_ANON
|MAP_PRIVATE
, 0, 0));
816 if (mapped_addr
== brk_page
) {
817 /* Heap contents are initialized to zero, as for anonymous
818 * mapped pages. Technically the new pages are already
819 * initialized to zero since they *are* anonymous mapped
820 * pages, however we have to take care with the contents that
821 * come from the remaining part of the previous page: it may
822 * contains garbage data due to a previous heap usage (grown
824 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
826 target_brk
= new_brk
;
827 brk_page
= HOST_PAGE_ALIGN(target_brk
);
828 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
831 } else if (mapped_addr
!= -1) {
832 /* Mapped but at wrong address, meaning there wasn't actually
833 * enough space for this brk.
835 target_munmap(mapped_addr
, new_alloc_size
);
837 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
840 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
843 #if defined(TARGET_ALPHA)
844 /* We (partially) emulate OSF/1 on Alpha, which requires we
845 return a proper errno, not an unchanged brk value. */
846 return -TARGET_ENOMEM
;
848 /* For everything else, return the previous break. */
852 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
853 abi_ulong target_fds_addr
,
857 abi_ulong b
, *target_fds
;
859 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
860 if (!(target_fds
= lock_user(VERIFY_READ
,
862 sizeof(abi_ulong
) * nw
,
864 return -TARGET_EFAULT
;
868 for (i
= 0; i
< nw
; i
++) {
869 /* grab the abi_ulong */
870 __get_user(b
, &target_fds
[i
]);
871 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
872 /* check the bit inside the abi_ulong */
879 unlock_user(target_fds
, target_fds_addr
, 0);
884 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
885 abi_ulong target_fds_addr
,
888 if (target_fds_addr
) {
889 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
890 return -TARGET_EFAULT
;
898 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
904 abi_ulong
*target_fds
;
906 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
907 if (!(target_fds
= lock_user(VERIFY_WRITE
,
909 sizeof(abi_ulong
) * nw
,
911 return -TARGET_EFAULT
;
914 for (i
= 0; i
< nw
; i
++) {
916 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
917 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
920 __put_user(v
, &target_fds
[i
]);
923 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
928 #if defined(__alpha__)
934 static inline abi_long
host_to_target_clock_t(long ticks
)
936 #if HOST_HZ == TARGET_HZ
939 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
943 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
944 const struct rusage
*rusage
)
946 struct target_rusage
*target_rusage
;
948 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
949 return -TARGET_EFAULT
;
950 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
951 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
952 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
953 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
954 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
955 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
956 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
957 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
958 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
959 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
960 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
961 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
962 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
963 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
964 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
965 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
966 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
967 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
968 unlock_user_struct(target_rusage
, target_addr
, 1);
973 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
975 abi_ulong target_rlim_swap
;
978 target_rlim_swap
= tswapal(target_rlim
);
979 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
980 return RLIM_INFINITY
;
982 result
= target_rlim_swap
;
983 if (target_rlim_swap
!= (rlim_t
)result
)
984 return RLIM_INFINITY
;
989 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
991 abi_ulong target_rlim_swap
;
994 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
995 target_rlim_swap
= TARGET_RLIM_INFINITY
;
997 target_rlim_swap
= rlim
;
998 result
= tswapal(target_rlim_swap
);
1003 static inline int target_to_host_resource(int code
)
1006 case TARGET_RLIMIT_AS
:
1008 case TARGET_RLIMIT_CORE
:
1010 case TARGET_RLIMIT_CPU
:
1012 case TARGET_RLIMIT_DATA
:
1014 case TARGET_RLIMIT_FSIZE
:
1015 return RLIMIT_FSIZE
;
1016 case TARGET_RLIMIT_LOCKS
:
1017 return RLIMIT_LOCKS
;
1018 case TARGET_RLIMIT_MEMLOCK
:
1019 return RLIMIT_MEMLOCK
;
1020 case TARGET_RLIMIT_MSGQUEUE
:
1021 return RLIMIT_MSGQUEUE
;
1022 case TARGET_RLIMIT_NICE
:
1024 case TARGET_RLIMIT_NOFILE
:
1025 return RLIMIT_NOFILE
;
1026 case TARGET_RLIMIT_NPROC
:
1027 return RLIMIT_NPROC
;
1028 case TARGET_RLIMIT_RSS
:
1030 case TARGET_RLIMIT_RTPRIO
:
1031 return RLIMIT_RTPRIO
;
1032 case TARGET_RLIMIT_SIGPENDING
:
1033 return RLIMIT_SIGPENDING
;
1034 case TARGET_RLIMIT_STACK
:
1035 return RLIMIT_STACK
;
1041 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1042 abi_ulong target_tv_addr
)
1044 struct target_timeval
*target_tv
;
1046 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1047 return -TARGET_EFAULT
;
1049 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1050 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1052 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1057 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1058 const struct timeval
*tv
)
1060 struct target_timeval
*target_tv
;
1062 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1063 return -TARGET_EFAULT
;
1065 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1066 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1068 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1073 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1076 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1077 abi_ulong target_mq_attr_addr
)
1079 struct target_mq_attr
*target_mq_attr
;
1081 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1082 target_mq_attr_addr
, 1))
1083 return -TARGET_EFAULT
;
1085 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1086 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1087 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1088 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1090 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1095 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1096 const struct mq_attr
*attr
)
1098 struct target_mq_attr
*target_mq_attr
;
1100 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1101 target_mq_attr_addr
, 0))
1102 return -TARGET_EFAULT
;
1104 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1105 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1106 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1107 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1109 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1115 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1116 /* do_select() must return target values and target errnos. */
1117 static abi_long
do_select(int n
,
1118 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1119 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1121 fd_set rfds
, wfds
, efds
;
1122 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1123 struct timeval tv
, *tv_ptr
;
1126 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1130 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1134 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1139 if (target_tv_addr
) {
1140 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1141 return -TARGET_EFAULT
;
1147 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1149 if (!is_error(ret
)) {
1150 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1151 return -TARGET_EFAULT
;
1152 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1153 return -TARGET_EFAULT
;
1154 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1155 return -TARGET_EFAULT
;
1157 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1158 return -TARGET_EFAULT
;
1165 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1168 return pipe2(host_pipe
, flags
);
1174 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1175 int flags
, int is_pipe2
)
1179 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1182 return get_errno(ret
);
1184 /* Several targets have special calling conventions for the original
1185 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1187 #if defined(TARGET_ALPHA)
1188 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1189 return host_pipe
[0];
1190 #elif defined(TARGET_MIPS)
1191 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1192 return host_pipe
[0];
1193 #elif defined(TARGET_SH4)
1194 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1195 return host_pipe
[0];
1199 if (put_user_s32(host_pipe
[0], pipedes
)
1200 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1201 return -TARGET_EFAULT
;
1202 return get_errno(ret
);
1205 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1206 abi_ulong target_addr
,
1209 struct target_ip_mreqn
*target_smreqn
;
1211 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1213 return -TARGET_EFAULT
;
1214 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1215 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1216 if (len
== sizeof(struct target_ip_mreqn
))
1217 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1218 unlock_user(target_smreqn
, target_addr
, 0);
1223 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1224 abi_ulong target_addr
,
1227 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1228 sa_family_t sa_family
;
1229 struct target_sockaddr
*target_saddr
;
1231 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1233 return -TARGET_EFAULT
;
1235 sa_family
= tswap16(target_saddr
->sa_family
);
1237 /* Oops. The caller might send a incomplete sun_path; sun_path
1238 * must be terminated by \0 (see the manual page), but
1239 * unfortunately it is quite common to specify sockaddr_un
1240 * length as "strlen(x->sun_path)" while it should be
1241 * "strlen(...) + 1". We'll fix that here if needed.
1242 * Linux kernel has a similar feature.
1245 if (sa_family
== AF_UNIX
) {
1246 if (len
< unix_maxlen
&& len
> 0) {
1247 char *cp
= (char*)target_saddr
;
1249 if ( cp
[len
-1] && !cp
[len
] )
1252 if (len
> unix_maxlen
)
1256 memcpy(addr
, target_saddr
, len
);
1257 addr
->sa_family
= sa_family
;
1258 unlock_user(target_saddr
, target_addr
, 0);
1263 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1264 struct sockaddr
*addr
,
1267 struct target_sockaddr
*target_saddr
;
1269 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1271 return -TARGET_EFAULT
;
1272 memcpy(target_saddr
, addr
, len
);
1273 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1274 unlock_user(target_saddr
, target_addr
, len
);
1279 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1280 struct target_msghdr
*target_msgh
)
1282 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1283 abi_long msg_controllen
;
1284 abi_ulong target_cmsg_addr
;
1285 struct target_cmsghdr
*target_cmsg
;
1286 socklen_t space
= 0;
1288 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1289 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1291 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1292 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1294 return -TARGET_EFAULT
;
1296 while (cmsg
&& target_cmsg
) {
1297 void *data
= CMSG_DATA(cmsg
);
1298 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1300 int len
= tswapal(target_cmsg
->cmsg_len
)
1301 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1303 space
+= CMSG_SPACE(len
);
1304 if (space
> msgh
->msg_controllen
) {
1305 space
-= CMSG_SPACE(len
);
1306 gemu_log("Host cmsg overflow\n");
1310 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1311 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1312 cmsg
->cmsg_len
= CMSG_LEN(len
);
1314 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1315 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1316 memcpy(data
, target_data
, len
);
1318 int *fd
= (int *)data
;
1319 int *target_fd
= (int *)target_data
;
1320 int i
, numfds
= len
/ sizeof(int);
1322 for (i
= 0; i
< numfds
; i
++)
1323 fd
[i
] = tswap32(target_fd
[i
]);
1326 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1327 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1329 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1331 msgh
->msg_controllen
= space
;
1335 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1336 struct msghdr
*msgh
)
1338 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1339 abi_long msg_controllen
;
1340 abi_ulong target_cmsg_addr
;
1341 struct target_cmsghdr
*target_cmsg
;
1342 socklen_t space
= 0;
1344 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1345 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1347 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1348 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1350 return -TARGET_EFAULT
;
1352 while (cmsg
&& target_cmsg
) {
1353 void *data
= CMSG_DATA(cmsg
);
1354 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1356 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1358 space
+= TARGET_CMSG_SPACE(len
);
1359 if (space
> msg_controllen
) {
1360 space
-= TARGET_CMSG_SPACE(len
);
1361 gemu_log("Target cmsg overflow\n");
1365 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1366 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1367 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(len
));
1369 if ((cmsg
->cmsg_level
== TARGET_SOL_SOCKET
) &&
1370 (cmsg
->cmsg_type
== SCM_RIGHTS
)) {
1371 int *fd
= (int *)data
;
1372 int *target_fd
= (int *)target_data
;
1373 int i
, numfds
= len
/ sizeof(int);
1375 for (i
= 0; i
< numfds
; i
++)
1376 target_fd
[i
] = tswap32(fd
[i
]);
1377 } else if ((cmsg
->cmsg_level
== TARGET_SOL_SOCKET
) &&
1378 (cmsg
->cmsg_type
== SO_TIMESTAMP
) &&
1379 (len
== sizeof(struct timeval
))) {
1380 /* copy struct timeval to target */
1381 struct timeval
*tv
= (struct timeval
*)data
;
1382 struct target_timeval
*target_tv
=
1383 (struct target_timeval
*)target_data
;
1385 target_tv
->tv_sec
= tswapal(tv
->tv_sec
);
1386 target_tv
->tv_usec
= tswapal(tv
->tv_usec
);
1388 gemu_log("Unsupported ancillary data: %d/%d\n",
1389 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1390 memcpy(target_data
, data
, len
);
1393 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1394 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1396 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1398 target_msgh
->msg_controllen
= tswapal(space
);
1402 /* do_setsockopt() Must return target values and target errnos. */
1403 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1404 abi_ulong optval_addr
, socklen_t optlen
)
1408 struct ip_mreqn
*ip_mreq
;
1409 struct ip_mreq_source
*ip_mreq_source
;
1413 /* TCP options all take an 'int' value. */
1414 if (optlen
< sizeof(uint32_t))
1415 return -TARGET_EINVAL
;
1417 if (get_user_u32(val
, optval_addr
))
1418 return -TARGET_EFAULT
;
1419 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1426 case IP_ROUTER_ALERT
:
1430 case IP_MTU_DISCOVER
:
1436 case IP_MULTICAST_TTL
:
1437 case IP_MULTICAST_LOOP
:
1439 if (optlen
>= sizeof(uint32_t)) {
1440 if (get_user_u32(val
, optval_addr
))
1441 return -TARGET_EFAULT
;
1442 } else if (optlen
>= 1) {
1443 if (get_user_u8(val
, optval_addr
))
1444 return -TARGET_EFAULT
;
1446 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1448 case IP_ADD_MEMBERSHIP
:
1449 case IP_DROP_MEMBERSHIP
:
1450 if (optlen
< sizeof (struct target_ip_mreq
) ||
1451 optlen
> sizeof (struct target_ip_mreqn
))
1452 return -TARGET_EINVAL
;
1454 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1455 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1456 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1459 case IP_BLOCK_SOURCE
:
1460 case IP_UNBLOCK_SOURCE
:
1461 case IP_ADD_SOURCE_MEMBERSHIP
:
1462 case IP_DROP_SOURCE_MEMBERSHIP
:
1463 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1464 return -TARGET_EINVAL
;
1466 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1467 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1468 unlock_user (ip_mreq_source
, optval_addr
, 0);
1478 /* struct icmp_filter takes an u32 value */
1479 if (optlen
< sizeof(uint32_t)) {
1480 return -TARGET_EINVAL
;
1483 if (get_user_u32(val
, optval_addr
)) {
1484 return -TARGET_EFAULT
;
1486 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1487 &val
, sizeof(val
)));
1494 case TARGET_SOL_SOCKET
:
1496 case TARGET_SO_RCVTIMEO
:
1500 optname
= SO_RCVTIMEO
;
1503 if (optlen
!= sizeof(struct target_timeval
)) {
1504 return -TARGET_EINVAL
;
1507 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1508 return -TARGET_EFAULT
;
1511 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1515 case TARGET_SO_SNDTIMEO
:
1516 optname
= SO_SNDTIMEO
;
1518 /* Options with 'int' argument. */
1519 case TARGET_SO_DEBUG
:
1522 case TARGET_SO_REUSEADDR
:
1523 optname
= SO_REUSEADDR
;
1525 case TARGET_SO_TYPE
:
1528 case TARGET_SO_ERROR
:
1531 case TARGET_SO_DONTROUTE
:
1532 optname
= SO_DONTROUTE
;
1534 case TARGET_SO_BROADCAST
:
1535 optname
= SO_BROADCAST
;
1537 case TARGET_SO_SNDBUF
:
1538 optname
= SO_SNDBUF
;
1540 case TARGET_SO_RCVBUF
:
1541 optname
= SO_RCVBUF
;
1543 case TARGET_SO_KEEPALIVE
:
1544 optname
= SO_KEEPALIVE
;
1546 case TARGET_SO_OOBINLINE
:
1547 optname
= SO_OOBINLINE
;
1549 case TARGET_SO_NO_CHECK
:
1550 optname
= SO_NO_CHECK
;
1552 case TARGET_SO_PRIORITY
:
1553 optname
= SO_PRIORITY
;
1556 case TARGET_SO_BSDCOMPAT
:
1557 optname
= SO_BSDCOMPAT
;
1560 case TARGET_SO_PASSCRED
:
1561 optname
= SO_PASSCRED
;
1563 case TARGET_SO_TIMESTAMP
:
1564 optname
= SO_TIMESTAMP
;
1566 case TARGET_SO_RCVLOWAT
:
1567 optname
= SO_RCVLOWAT
;
1573 if (optlen
< sizeof(uint32_t))
1574 return -TARGET_EINVAL
;
1576 if (get_user_u32(val
, optval_addr
))
1577 return -TARGET_EFAULT
;
1578 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1582 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1583 ret
= -TARGET_ENOPROTOOPT
;
1588 /* do_getsockopt() Must return target values and target errnos. */
1589 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1590 abi_ulong optval_addr
, abi_ulong optlen
)
1597 case TARGET_SOL_SOCKET
:
1600 /* These don't just return a single integer */
1601 case TARGET_SO_LINGER
:
1602 case TARGET_SO_RCVTIMEO
:
1603 case TARGET_SO_SNDTIMEO
:
1604 case TARGET_SO_PEERNAME
:
1606 case TARGET_SO_PEERCRED
: {
1609 struct target_ucred
*tcr
;
1611 if (get_user_u32(len
, optlen
)) {
1612 return -TARGET_EFAULT
;
1615 return -TARGET_EINVAL
;
1619 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1627 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1628 return -TARGET_EFAULT
;
1630 __put_user(cr
.pid
, &tcr
->pid
);
1631 __put_user(cr
.uid
, &tcr
->uid
);
1632 __put_user(cr
.gid
, &tcr
->gid
);
1633 unlock_user_struct(tcr
, optval_addr
, 1);
1634 if (put_user_u32(len
, optlen
)) {
1635 return -TARGET_EFAULT
;
1639 /* Options with 'int' argument. */
1640 case TARGET_SO_DEBUG
:
1643 case TARGET_SO_REUSEADDR
:
1644 optname
= SO_REUSEADDR
;
1646 case TARGET_SO_TYPE
:
1649 case TARGET_SO_ERROR
:
1652 case TARGET_SO_DONTROUTE
:
1653 optname
= SO_DONTROUTE
;
1655 case TARGET_SO_BROADCAST
:
1656 optname
= SO_BROADCAST
;
1658 case TARGET_SO_SNDBUF
:
1659 optname
= SO_SNDBUF
;
1661 case TARGET_SO_RCVBUF
:
1662 optname
= SO_RCVBUF
;
1664 case TARGET_SO_KEEPALIVE
:
1665 optname
= SO_KEEPALIVE
;
1667 case TARGET_SO_OOBINLINE
:
1668 optname
= SO_OOBINLINE
;
1670 case TARGET_SO_NO_CHECK
:
1671 optname
= SO_NO_CHECK
;
1673 case TARGET_SO_PRIORITY
:
1674 optname
= SO_PRIORITY
;
1677 case TARGET_SO_BSDCOMPAT
:
1678 optname
= SO_BSDCOMPAT
;
1681 case TARGET_SO_PASSCRED
:
1682 optname
= SO_PASSCRED
;
1684 case TARGET_SO_TIMESTAMP
:
1685 optname
= SO_TIMESTAMP
;
1687 case TARGET_SO_RCVLOWAT
:
1688 optname
= SO_RCVLOWAT
;
1695 /* TCP options all take an 'int' value. */
1697 if (get_user_u32(len
, optlen
))
1698 return -TARGET_EFAULT
;
1700 return -TARGET_EINVAL
;
1702 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1708 if (put_user_u32(val
, optval_addr
))
1709 return -TARGET_EFAULT
;
1711 if (put_user_u8(val
, optval_addr
))
1712 return -TARGET_EFAULT
;
1714 if (put_user_u32(len
, optlen
))
1715 return -TARGET_EFAULT
;
1722 case IP_ROUTER_ALERT
:
1726 case IP_MTU_DISCOVER
:
1732 case IP_MULTICAST_TTL
:
1733 case IP_MULTICAST_LOOP
:
1734 if (get_user_u32(len
, optlen
))
1735 return -TARGET_EFAULT
;
1737 return -TARGET_EINVAL
;
1739 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1742 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1744 if (put_user_u32(len
, optlen
)
1745 || put_user_u8(val
, optval_addr
))
1746 return -TARGET_EFAULT
;
1748 if (len
> sizeof(int))
1750 if (put_user_u32(len
, optlen
)
1751 || put_user_u32(val
, optval_addr
))
1752 return -TARGET_EFAULT
;
1756 ret
= -TARGET_ENOPROTOOPT
;
1762 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1764 ret
= -TARGET_EOPNOTSUPP
;
1770 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
1771 int count
, int copy
)
1773 struct target_iovec
*target_vec
;
1775 abi_ulong total_len
, max_len
;
1782 if (count
< 0 || count
> IOV_MAX
) {
1787 vec
= calloc(count
, sizeof(struct iovec
));
1793 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1794 count
* sizeof(struct target_iovec
), 1);
1795 if (target_vec
== NULL
) {
1800 /* ??? If host page size > target page size, this will result in a
1801 value larger than what we can actually support. */
1802 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
1805 for (i
= 0; i
< count
; i
++) {
1806 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1807 abi_long len
= tswapal(target_vec
[i
].iov_len
);
1812 } else if (len
== 0) {
1813 /* Zero length pointer is ignored. */
1814 vec
[i
].iov_base
= 0;
1816 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
1817 if (!vec
[i
].iov_base
) {
1821 if (len
> max_len
- total_len
) {
1822 len
= max_len
- total_len
;
1825 vec
[i
].iov_len
= len
;
1829 unlock_user(target_vec
, target_addr
, 0);
1835 unlock_user(target_vec
, target_addr
, 0);
1839 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1840 int count
, int copy
)
1842 struct target_iovec
*target_vec
;
1845 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1846 count
* sizeof(struct target_iovec
), 1);
1848 for (i
= 0; i
< count
; i
++) {
1849 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1850 abi_long len
= tswapal(target_vec
[i
].iov_base
);
1854 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1856 unlock_user(target_vec
, target_addr
, 0);
1862 /* do_socket() Must return target values and target errnos. */
1863 static abi_long
do_socket(int domain
, int type
, int protocol
)
1865 #if defined(TARGET_MIPS)
1867 case TARGET_SOCK_DGRAM
:
1870 case TARGET_SOCK_STREAM
:
1873 case TARGET_SOCK_RAW
:
1876 case TARGET_SOCK_RDM
:
1879 case TARGET_SOCK_SEQPACKET
:
1880 type
= SOCK_SEQPACKET
;
1882 case TARGET_SOCK_PACKET
:
1887 if (domain
== PF_NETLINK
)
1888 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1889 return get_errno(socket(domain
, type
, protocol
));
1892 /* do_bind() Must return target values and target errnos. */
1893 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1899 if ((int)addrlen
< 0) {
1900 return -TARGET_EINVAL
;
1903 addr
= alloca(addrlen
+1);
1905 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1909 return get_errno(bind(sockfd
, addr
, addrlen
));
1912 /* do_connect() Must return target values and target errnos. */
1913 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1919 if ((int)addrlen
< 0) {
1920 return -TARGET_EINVAL
;
1923 addr
= alloca(addrlen
);
1925 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1929 return get_errno(connect(sockfd
, addr
, addrlen
));
1932 /* do_sendrecvmsg() Must return target values and target errnos. */
1933 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1934 int flags
, int send
)
1937 struct target_msghdr
*msgp
;
1941 abi_ulong target_vec
;
1944 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1948 return -TARGET_EFAULT
;
1949 if (msgp
->msg_name
) {
1950 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1951 msg
.msg_name
= alloca(msg
.msg_namelen
);
1952 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapal(msgp
->msg_name
),
1958 msg
.msg_name
= NULL
;
1959 msg
.msg_namelen
= 0;
1961 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
1962 msg
.msg_control
= alloca(msg
.msg_controllen
);
1963 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1965 count
= tswapal(msgp
->msg_iovlen
);
1966 target_vec
= tswapal(msgp
->msg_iov
);
1967 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
1968 target_vec
, count
, send
);
1970 ret
= -host_to_target_errno(errno
);
1973 msg
.msg_iovlen
= count
;
1977 ret
= target_to_host_cmsg(&msg
, msgp
);
1979 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1981 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1982 if (!is_error(ret
)) {
1984 ret
= host_to_target_cmsg(msgp
, &msg
);
1985 if (!is_error(ret
)) {
1986 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
1987 if (msg
.msg_name
!= NULL
) {
1988 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
1989 msg
.msg_name
, msg
.msg_namelen
);
2001 unlock_iovec(vec
, target_vec
, count
, !send
);
2003 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2007 /* do_accept() Must return target values and target errnos. */
2008 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
2009 abi_ulong target_addrlen_addr
)
2015 if (target_addr
== 0)
2016 return get_errno(accept(fd
, NULL
, NULL
));
2018 /* linux returns EINVAL if addrlen pointer is invalid */
2019 if (get_user_u32(addrlen
, target_addrlen_addr
))
2020 return -TARGET_EINVAL
;
2022 if ((int)addrlen
< 0) {
2023 return -TARGET_EINVAL
;
2026 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2027 return -TARGET_EINVAL
;
2029 addr
= alloca(addrlen
);
2031 ret
= get_errno(accept(fd
, addr
, &addrlen
));
2032 if (!is_error(ret
)) {
2033 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2034 if (put_user_u32(addrlen
, target_addrlen_addr
))
2035 ret
= -TARGET_EFAULT
;
2040 /* do_getpeername() Must return target values and target errnos. */
2041 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2042 abi_ulong target_addrlen_addr
)
2048 if (get_user_u32(addrlen
, target_addrlen_addr
))
2049 return -TARGET_EFAULT
;
2051 if ((int)addrlen
< 0) {
2052 return -TARGET_EINVAL
;
2055 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2056 return -TARGET_EFAULT
;
2058 addr
= alloca(addrlen
);
2060 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2061 if (!is_error(ret
)) {
2062 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2063 if (put_user_u32(addrlen
, target_addrlen_addr
))
2064 ret
= -TARGET_EFAULT
;
2069 /* do_getsockname() Must return target values and target errnos. */
2070 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2071 abi_ulong target_addrlen_addr
)
2077 if (get_user_u32(addrlen
, target_addrlen_addr
))
2078 return -TARGET_EFAULT
;
2080 if ((int)addrlen
< 0) {
2081 return -TARGET_EINVAL
;
2084 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2085 return -TARGET_EFAULT
;
2087 addr
= alloca(addrlen
);
2089 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2090 if (!is_error(ret
)) {
2091 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2092 if (put_user_u32(addrlen
, target_addrlen_addr
))
2093 ret
= -TARGET_EFAULT
;
2098 /* do_socketpair() Must return target values and target errnos. */
2099 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2100 abi_ulong target_tab_addr
)
2105 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2106 if (!is_error(ret
)) {
2107 if (put_user_s32(tab
[0], target_tab_addr
)
2108 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2109 ret
= -TARGET_EFAULT
;
2114 /* do_sendto() Must return target values and target errnos. */
2115 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2116 abi_ulong target_addr
, socklen_t addrlen
)
2122 if ((int)addrlen
< 0) {
2123 return -TARGET_EINVAL
;
2126 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2128 return -TARGET_EFAULT
;
2130 addr
= alloca(addrlen
);
2131 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2133 unlock_user(host_msg
, msg
, 0);
2136 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2138 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2140 unlock_user(host_msg
, msg
, 0);
2144 /* do_recvfrom() Must return target values and target errnos. */
2145 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2146 abi_ulong target_addr
,
2147 abi_ulong target_addrlen
)
2154 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2156 return -TARGET_EFAULT
;
2158 if (get_user_u32(addrlen
, target_addrlen
)) {
2159 ret
= -TARGET_EFAULT
;
2162 if ((int)addrlen
< 0) {
2163 ret
= -TARGET_EINVAL
;
2166 addr
= alloca(addrlen
);
2167 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2169 addr
= NULL
; /* To keep compiler quiet. */
2170 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2172 if (!is_error(ret
)) {
2174 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2175 if (put_user_u32(addrlen
, target_addrlen
)) {
2176 ret
= -TARGET_EFAULT
;
2180 unlock_user(host_msg
, msg
, len
);
2183 unlock_user(host_msg
, msg
, 0);
2188 #ifdef TARGET_NR_socketcall
2189 /* do_socketcall() Must return target values and target errnos. */
2190 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2193 const int n
= sizeof(abi_ulong
);
2198 abi_ulong domain
, type
, protocol
;
2200 if (get_user_ual(domain
, vptr
)
2201 || get_user_ual(type
, vptr
+ n
)
2202 || get_user_ual(protocol
, vptr
+ 2 * n
))
2203 return -TARGET_EFAULT
;
2205 ret
= do_socket(domain
, type
, protocol
);
2211 abi_ulong target_addr
;
2214 if (get_user_ual(sockfd
, vptr
)
2215 || get_user_ual(target_addr
, vptr
+ n
)
2216 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2217 return -TARGET_EFAULT
;
2219 ret
= do_bind(sockfd
, target_addr
, addrlen
);
2222 case SOCKOP_connect
:
2225 abi_ulong target_addr
;
2228 if (get_user_ual(sockfd
, vptr
)
2229 || get_user_ual(target_addr
, vptr
+ n
)
2230 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2231 return -TARGET_EFAULT
;
2233 ret
= do_connect(sockfd
, target_addr
, addrlen
);
2238 abi_ulong sockfd
, backlog
;
2240 if (get_user_ual(sockfd
, vptr
)
2241 || get_user_ual(backlog
, vptr
+ n
))
2242 return -TARGET_EFAULT
;
2244 ret
= get_errno(listen(sockfd
, backlog
));
2250 abi_ulong target_addr
, target_addrlen
;
2252 if (get_user_ual(sockfd
, vptr
)
2253 || get_user_ual(target_addr
, vptr
+ n
)
2254 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2255 return -TARGET_EFAULT
;
2257 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
2260 case SOCKOP_getsockname
:
2263 abi_ulong target_addr
, target_addrlen
;
2265 if (get_user_ual(sockfd
, vptr
)
2266 || get_user_ual(target_addr
, vptr
+ n
)
2267 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2268 return -TARGET_EFAULT
;
2270 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
2273 case SOCKOP_getpeername
:
2276 abi_ulong target_addr
, target_addrlen
;
2278 if (get_user_ual(sockfd
, vptr
)
2279 || get_user_ual(target_addr
, vptr
+ n
)
2280 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2281 return -TARGET_EFAULT
;
2283 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
2286 case SOCKOP_socketpair
:
2288 abi_ulong domain
, type
, protocol
;
2291 if (get_user_ual(domain
, vptr
)
2292 || get_user_ual(type
, vptr
+ n
)
2293 || get_user_ual(protocol
, vptr
+ 2 * n
)
2294 || get_user_ual(tab
, vptr
+ 3 * n
))
2295 return -TARGET_EFAULT
;
2297 ret
= do_socketpair(domain
, type
, protocol
, tab
);
2307 if (get_user_ual(sockfd
, vptr
)
2308 || get_user_ual(msg
, vptr
+ n
)
2309 || get_user_ual(len
, vptr
+ 2 * n
)
2310 || get_user_ual(flags
, vptr
+ 3 * n
))
2311 return -TARGET_EFAULT
;
2313 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2323 if (get_user_ual(sockfd
, vptr
)
2324 || get_user_ual(msg
, vptr
+ n
)
2325 || get_user_ual(len
, vptr
+ 2 * n
)
2326 || get_user_ual(flags
, vptr
+ 3 * n
))
2327 return -TARGET_EFAULT
;
2329 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2341 if (get_user_ual(sockfd
, vptr
)
2342 || get_user_ual(msg
, vptr
+ n
)
2343 || get_user_ual(len
, vptr
+ 2 * n
)
2344 || get_user_ual(flags
, vptr
+ 3 * n
)
2345 || get_user_ual(addr
, vptr
+ 4 * n
)
2346 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2347 return -TARGET_EFAULT
;
2349 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2352 case SOCKOP_recvfrom
:
2361 if (get_user_ual(sockfd
, vptr
)
2362 || get_user_ual(msg
, vptr
+ n
)
2363 || get_user_ual(len
, vptr
+ 2 * n
)
2364 || get_user_ual(flags
, vptr
+ 3 * n
)
2365 || get_user_ual(addr
, vptr
+ 4 * n
)
2366 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2367 return -TARGET_EFAULT
;
2369 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2372 case SOCKOP_shutdown
:
2374 abi_ulong sockfd
, how
;
2376 if (get_user_ual(sockfd
, vptr
)
2377 || get_user_ual(how
, vptr
+ n
))
2378 return -TARGET_EFAULT
;
2380 ret
= get_errno(shutdown(sockfd
, how
));
2383 case SOCKOP_sendmsg
:
2384 case SOCKOP_recvmsg
:
2387 abi_ulong target_msg
;
2390 if (get_user_ual(fd
, vptr
)
2391 || get_user_ual(target_msg
, vptr
+ n
)
2392 || get_user_ual(flags
, vptr
+ 2 * n
))
2393 return -TARGET_EFAULT
;
2395 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2396 (num
== SOCKOP_sendmsg
));
2399 case SOCKOP_setsockopt
:
2407 if (get_user_ual(sockfd
, vptr
)
2408 || get_user_ual(level
, vptr
+ n
)
2409 || get_user_ual(optname
, vptr
+ 2 * n
)
2410 || get_user_ual(optval
, vptr
+ 3 * n
)
2411 || get_user_ual(optlen
, vptr
+ 4 * n
))
2412 return -TARGET_EFAULT
;
2414 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2417 case SOCKOP_getsockopt
:
2425 if (get_user_ual(sockfd
, vptr
)
2426 || get_user_ual(level
, vptr
+ n
)
2427 || get_user_ual(optname
, vptr
+ 2 * n
)
2428 || get_user_ual(optval
, vptr
+ 3 * n
)
2429 || get_user_ual(optlen
, vptr
+ 4 * n
))
2430 return -TARGET_EFAULT
;
2432 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2436 gemu_log("Unsupported socketcall: %d\n", num
);
2437 ret
= -TARGET_ENOSYS
;
2444 #define N_SHM_REGIONS 32
2446 static struct shm_region
{
2449 } shm_regions
[N_SHM_REGIONS
];
2451 struct target_ipc_perm
2458 unsigned short int mode
;
2459 unsigned short int __pad1
;
2460 unsigned short int __seq
;
2461 unsigned short int __pad2
;
2462 abi_ulong __unused1
;
2463 abi_ulong __unused2
;
2466 struct target_semid_ds
2468 struct target_ipc_perm sem_perm
;
2469 abi_ulong sem_otime
;
2470 abi_ulong __unused1
;
2471 abi_ulong sem_ctime
;
2472 abi_ulong __unused2
;
2473 abi_ulong sem_nsems
;
2474 abi_ulong __unused3
;
2475 abi_ulong __unused4
;
2478 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2479 abi_ulong target_addr
)
2481 struct target_ipc_perm
*target_ip
;
2482 struct target_semid_ds
*target_sd
;
2484 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2485 return -TARGET_EFAULT
;
2486 target_ip
= &(target_sd
->sem_perm
);
2487 host_ip
->__key
= tswapal(target_ip
->__key
);
2488 host_ip
->uid
= tswapal(target_ip
->uid
);
2489 host_ip
->gid
= tswapal(target_ip
->gid
);
2490 host_ip
->cuid
= tswapal(target_ip
->cuid
);
2491 host_ip
->cgid
= tswapal(target_ip
->cgid
);
2492 host_ip
->mode
= tswap16(target_ip
->mode
);
2493 unlock_user_struct(target_sd
, target_addr
, 0);
2497 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2498 struct ipc_perm
*host_ip
)
2500 struct target_ipc_perm
*target_ip
;
2501 struct target_semid_ds
*target_sd
;
2503 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2504 return -TARGET_EFAULT
;
2505 target_ip
= &(target_sd
->sem_perm
);
2506 target_ip
->__key
= tswapal(host_ip
->__key
);
2507 target_ip
->uid
= tswapal(host_ip
->uid
);
2508 target_ip
->gid
= tswapal(host_ip
->gid
);
2509 target_ip
->cuid
= tswapal(host_ip
->cuid
);
2510 target_ip
->cgid
= tswapal(host_ip
->cgid
);
2511 target_ip
->mode
= tswap16(host_ip
->mode
);
2512 unlock_user_struct(target_sd
, target_addr
, 1);
2516 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2517 abi_ulong target_addr
)
2519 struct target_semid_ds
*target_sd
;
2521 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2522 return -TARGET_EFAULT
;
2523 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2524 return -TARGET_EFAULT
;
2525 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2526 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2527 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2528 unlock_user_struct(target_sd
, target_addr
, 0);
2532 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2533 struct semid_ds
*host_sd
)
2535 struct target_semid_ds
*target_sd
;
2537 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2538 return -TARGET_EFAULT
;
2539 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2540 return -TARGET_EFAULT
;
2541 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2542 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2543 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2544 unlock_user_struct(target_sd
, target_addr
, 1);
2548 struct target_seminfo
{
2561 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2562 struct seminfo
*host_seminfo
)
2564 struct target_seminfo
*target_seminfo
;
2565 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2566 return -TARGET_EFAULT
;
2567 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2568 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2569 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2570 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2571 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2572 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2573 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2574 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2575 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2576 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2577 unlock_user_struct(target_seminfo
, target_addr
, 1);
2583 struct semid_ds
*buf
;
2584 unsigned short *array
;
2585 struct seminfo
*__buf
;
2588 union target_semun
{
2595 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2596 abi_ulong target_addr
)
2599 unsigned short *array
;
2601 struct semid_ds semid_ds
;
2604 semun
.buf
= &semid_ds
;
2606 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2608 return get_errno(ret
);
2610 nsems
= semid_ds
.sem_nsems
;
2612 *host_array
= malloc(nsems
*sizeof(unsigned short));
2613 array
= lock_user(VERIFY_READ
, target_addr
,
2614 nsems
*sizeof(unsigned short), 1);
2616 return -TARGET_EFAULT
;
2618 for(i
=0; i
<nsems
; i
++) {
2619 __get_user((*host_array
)[i
], &array
[i
]);
2621 unlock_user(array
, target_addr
, 0);
2626 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2627 unsigned short **host_array
)
2630 unsigned short *array
;
2632 struct semid_ds semid_ds
;
2635 semun
.buf
= &semid_ds
;
2637 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2639 return get_errno(ret
);
2641 nsems
= semid_ds
.sem_nsems
;
2643 array
= lock_user(VERIFY_WRITE
, target_addr
,
2644 nsems
*sizeof(unsigned short), 0);
2646 return -TARGET_EFAULT
;
2648 for(i
=0; i
<nsems
; i
++) {
2649 __put_user((*host_array
)[i
], &array
[i
]);
2652 unlock_user(array
, target_addr
, 1);
2657 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2658 union target_semun target_su
)
2661 struct semid_ds dsarg
;
2662 unsigned short *array
= NULL
;
2663 struct seminfo seminfo
;
2664 abi_long ret
= -TARGET_EINVAL
;
2671 arg
.val
= tswap32(target_su
.val
);
2672 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2673 target_su
.val
= tswap32(arg
.val
);
2677 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2681 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2682 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2689 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2693 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2694 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2700 arg
.__buf
= &seminfo
;
2701 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2702 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2710 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2717 struct target_sembuf
{
2718 unsigned short sem_num
;
2723 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2724 abi_ulong target_addr
,
2727 struct target_sembuf
*target_sembuf
;
2730 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2731 nsops
*sizeof(struct target_sembuf
), 1);
2733 return -TARGET_EFAULT
;
2735 for(i
=0; i
<nsops
; i
++) {
2736 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2737 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2738 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2741 unlock_user(target_sembuf
, target_addr
, 0);
2746 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2748 struct sembuf sops
[nsops
];
2750 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2751 return -TARGET_EFAULT
;
2753 return semop(semid
, sops
, nsops
);
2756 struct target_msqid_ds
2758 struct target_ipc_perm msg_perm
;
2759 abi_ulong msg_stime
;
2760 #if TARGET_ABI_BITS == 32
2761 abi_ulong __unused1
;
2763 abi_ulong msg_rtime
;
2764 #if TARGET_ABI_BITS == 32
2765 abi_ulong __unused2
;
2767 abi_ulong msg_ctime
;
2768 #if TARGET_ABI_BITS == 32
2769 abi_ulong __unused3
;
2771 abi_ulong __msg_cbytes
;
2773 abi_ulong msg_qbytes
;
2774 abi_ulong msg_lspid
;
2775 abi_ulong msg_lrpid
;
2776 abi_ulong __unused4
;
2777 abi_ulong __unused5
;
2780 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2781 abi_ulong target_addr
)
2783 struct target_msqid_ds
*target_md
;
2785 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2786 return -TARGET_EFAULT
;
2787 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2788 return -TARGET_EFAULT
;
2789 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2790 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2791 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2792 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2793 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2794 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2795 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2796 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2797 unlock_user_struct(target_md
, target_addr
, 0);
2801 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2802 struct msqid_ds
*host_md
)
2804 struct target_msqid_ds
*target_md
;
2806 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2807 return -TARGET_EFAULT
;
2808 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2809 return -TARGET_EFAULT
;
2810 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2811 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2812 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2813 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
2814 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
2815 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
2816 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
2817 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
2818 unlock_user_struct(target_md
, target_addr
, 1);
2822 struct target_msginfo
{
2830 unsigned short int msgseg
;
2833 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2834 struct msginfo
*host_msginfo
)
2836 struct target_msginfo
*target_msginfo
;
2837 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2838 return -TARGET_EFAULT
;
2839 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2840 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2841 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2842 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2843 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2844 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2845 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2846 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2847 unlock_user_struct(target_msginfo
, target_addr
, 1);
2851 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2853 struct msqid_ds dsarg
;
2854 struct msginfo msginfo
;
2855 abi_long ret
= -TARGET_EINVAL
;
2863 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2864 return -TARGET_EFAULT
;
2865 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2866 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2867 return -TARGET_EFAULT
;
2870 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2874 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2875 if (host_to_target_msginfo(ptr
, &msginfo
))
2876 return -TARGET_EFAULT
;
2883 struct target_msgbuf
{
2888 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2889 unsigned int msgsz
, int msgflg
)
2891 struct target_msgbuf
*target_mb
;
2892 struct msgbuf
*host_mb
;
2895 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2896 return -TARGET_EFAULT
;
2897 host_mb
= malloc(msgsz
+sizeof(long));
2898 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
2899 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2900 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2902 unlock_user_struct(target_mb
, msgp
, 0);
2907 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2908 unsigned int msgsz
, abi_long msgtyp
,
2911 struct target_msgbuf
*target_mb
;
2913 struct msgbuf
*host_mb
;
2916 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2917 return -TARGET_EFAULT
;
2919 host_mb
= g_malloc(msgsz
+sizeof(long));
2920 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
2923 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2924 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2925 if (!target_mtext
) {
2926 ret
= -TARGET_EFAULT
;
2929 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2930 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2933 target_mb
->mtype
= tswapal(host_mb
->mtype
);
2937 unlock_user_struct(target_mb
, msgp
, 1);
2942 struct target_shmid_ds
2944 struct target_ipc_perm shm_perm
;
2945 abi_ulong shm_segsz
;
2946 abi_ulong shm_atime
;
2947 #if TARGET_ABI_BITS == 32
2948 abi_ulong __unused1
;
2950 abi_ulong shm_dtime
;
2951 #if TARGET_ABI_BITS == 32
2952 abi_ulong __unused2
;
2954 abi_ulong shm_ctime
;
2955 #if TARGET_ABI_BITS == 32
2956 abi_ulong __unused3
;
2960 abi_ulong shm_nattch
;
2961 unsigned long int __unused4
;
2962 unsigned long int __unused5
;
2965 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2966 abi_ulong target_addr
)
2968 struct target_shmid_ds
*target_sd
;
2970 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2971 return -TARGET_EFAULT
;
2972 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2973 return -TARGET_EFAULT
;
2974 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2975 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2976 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2977 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2978 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2979 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2980 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2981 unlock_user_struct(target_sd
, target_addr
, 0);
2985 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2986 struct shmid_ds
*host_sd
)
2988 struct target_shmid_ds
*target_sd
;
2990 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2991 return -TARGET_EFAULT
;
2992 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2993 return -TARGET_EFAULT
;
2994 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2995 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2996 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2997 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2998 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2999 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3000 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3001 unlock_user_struct(target_sd
, target_addr
, 1);
3005 struct target_shminfo
{
3013 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3014 struct shminfo
*host_shminfo
)
3016 struct target_shminfo
*target_shminfo
;
3017 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3018 return -TARGET_EFAULT
;
3019 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3020 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3021 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3022 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3023 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3024 unlock_user_struct(target_shminfo
, target_addr
, 1);
3028 struct target_shm_info
{
3033 abi_ulong swap_attempts
;
3034 abi_ulong swap_successes
;
3037 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3038 struct shm_info
*host_shm_info
)
3040 struct target_shm_info
*target_shm_info
;
3041 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3042 return -TARGET_EFAULT
;
3043 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3044 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3045 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3046 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3047 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3048 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3049 unlock_user_struct(target_shm_info
, target_addr
, 1);
3053 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3055 struct shmid_ds dsarg
;
3056 struct shminfo shminfo
;
3057 struct shm_info shm_info
;
3058 abi_long ret
= -TARGET_EINVAL
;
3066 if (target_to_host_shmid_ds(&dsarg
, buf
))
3067 return -TARGET_EFAULT
;
3068 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3069 if (host_to_target_shmid_ds(buf
, &dsarg
))
3070 return -TARGET_EFAULT
;
3073 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3074 if (host_to_target_shminfo(buf
, &shminfo
))
3075 return -TARGET_EFAULT
;
3078 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3079 if (host_to_target_shm_info(buf
, &shm_info
))
3080 return -TARGET_EFAULT
;
3085 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3092 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
3096 struct shmid_ds shm_info
;
3099 /* find out the length of the shared memory segment */
3100 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3101 if (is_error(ret
)) {
3102 /* can't get length, bail out */
3109 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3111 abi_ulong mmap_start
;
3113 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3115 if (mmap_start
== -1) {
3117 host_raddr
= (void *)-1;
3119 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3122 if (host_raddr
== (void *)-1) {
3124 return get_errno((long)host_raddr
);
3126 raddr
=h2g((unsigned long)host_raddr
);
3128 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3129 PAGE_VALID
| PAGE_READ
|
3130 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3132 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3133 if (shm_regions
[i
].start
== 0) {
3134 shm_regions
[i
].start
= raddr
;
3135 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3145 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3149 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3150 if (shm_regions
[i
].start
== shmaddr
) {
3151 shm_regions
[i
].start
= 0;
3152 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3157 return get_errno(shmdt(g2h(shmaddr
)));
3160 #ifdef TARGET_NR_ipc
3161 /* ??? This only works with linear mappings. */
3162 /* do_ipc() must return target values and target errnos. */
3163 static abi_long
do_ipc(unsigned int call
, int first
,
3164 int second
, int third
,
3165 abi_long ptr
, abi_long fifth
)
3170 version
= call
>> 16;
3175 ret
= do_semop(first
, ptr
, second
);
3179 ret
= get_errno(semget(first
, second
, third
));
3183 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
3187 ret
= get_errno(msgget(first
, second
));
3191 ret
= do_msgsnd(first
, ptr
, second
, third
);
3195 ret
= do_msgctl(first
, second
, ptr
);
3202 struct target_ipc_kludge
{
3207 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3208 ret
= -TARGET_EFAULT
;
3212 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
3214 unlock_user_struct(tmp
, ptr
, 0);
3218 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3227 raddr
= do_shmat(first
, ptr
, second
);
3228 if (is_error(raddr
))
3229 return get_errno(raddr
);
3230 if (put_user_ual(raddr
, third
))
3231 return -TARGET_EFAULT
;
3235 ret
= -TARGET_EINVAL
;
3240 ret
= do_shmdt(ptr
);
3244 /* IPC_* flag values are the same on all linux platforms */
3245 ret
= get_errno(shmget(first
, second
, third
));
3248 /* IPC_* and SHM_* command values are the same on all linux platforms */
3250 ret
= do_shmctl(first
, second
, third
);
3253 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3254 ret
= -TARGET_ENOSYS
;
3261 /* kernel structure types definitions */
3263 #define STRUCT(name, ...) STRUCT_ ## name,
3264 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3266 #include "syscall_types.h"
3269 #undef STRUCT_SPECIAL
3271 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3272 #define STRUCT_SPECIAL(name)
3273 #include "syscall_types.h"
3275 #undef STRUCT_SPECIAL
3277 typedef struct IOCTLEntry IOCTLEntry
;
3279 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3280 int fd
, abi_long cmd
, abi_long arg
);
3283 unsigned int target_cmd
;
3284 unsigned int host_cmd
;
3287 do_ioctl_fn
*do_ioctl
;
3288 const argtype arg_type
[5];
3291 #define IOC_R 0x0001
3292 #define IOC_W 0x0002
3293 #define IOC_RW (IOC_R | IOC_W)
3295 #define MAX_STRUCT_SIZE 4096
3297 #ifdef CONFIG_FIEMAP
3298 /* So fiemap access checks don't overflow on 32 bit systems.
3299 * This is very slightly smaller than the limit imposed by
3300 * the underlying kernel.
3302 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3303 / sizeof(struct fiemap_extent))
3305 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3306 int fd
, abi_long cmd
, abi_long arg
)
3308 /* The parameter for this ioctl is a struct fiemap followed
3309 * by an array of struct fiemap_extent whose size is set
3310 * in fiemap->fm_extent_count. The array is filled in by the
3313 int target_size_in
, target_size_out
;
3315 const argtype
*arg_type
= ie
->arg_type
;
3316 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3319 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3323 assert(arg_type
[0] == TYPE_PTR
);
3324 assert(ie
->access
== IOC_RW
);
3326 target_size_in
= thunk_type_size(arg_type
, 0);
3327 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3329 return -TARGET_EFAULT
;
3331 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3332 unlock_user(argptr
, arg
, 0);
3333 fm
= (struct fiemap
*)buf_temp
;
3334 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3335 return -TARGET_EINVAL
;
3338 outbufsz
= sizeof (*fm
) +
3339 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3341 if (outbufsz
> MAX_STRUCT_SIZE
) {
3342 /* We can't fit all the extents into the fixed size buffer.
3343 * Allocate one that is large enough and use it instead.
3345 fm
= malloc(outbufsz
);
3347 return -TARGET_ENOMEM
;
3349 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3352 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3353 if (!is_error(ret
)) {
3354 target_size_out
= target_size_in
;
3355 /* An extent_count of 0 means we were only counting the extents
3356 * so there are no structs to copy
3358 if (fm
->fm_extent_count
!= 0) {
3359 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3361 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3363 ret
= -TARGET_EFAULT
;
3365 /* Convert the struct fiemap */
3366 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3367 if (fm
->fm_extent_count
!= 0) {
3368 p
= argptr
+ target_size_in
;
3369 /* ...and then all the struct fiemap_extents */
3370 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3371 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3376 unlock_user(argptr
, arg
, target_size_out
);
3386 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3387 int fd
, abi_long cmd
, abi_long arg
)
3389 const argtype
*arg_type
= ie
->arg_type
;
3393 struct ifconf
*host_ifconf
;
3395 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3396 int target_ifreq_size
;
3401 abi_long target_ifc_buf
;
3405 assert(arg_type
[0] == TYPE_PTR
);
3406 assert(ie
->access
== IOC_RW
);
3409 target_size
= thunk_type_size(arg_type
, 0);
3411 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3413 return -TARGET_EFAULT
;
3414 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3415 unlock_user(argptr
, arg
, 0);
3417 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3418 target_ifc_len
= host_ifconf
->ifc_len
;
3419 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3421 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3422 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3423 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3425 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3426 if (outbufsz
> MAX_STRUCT_SIZE
) {
3427 /* We can't fit all the extents into the fixed size buffer.
3428 * Allocate one that is large enough and use it instead.
3430 host_ifconf
= malloc(outbufsz
);
3432 return -TARGET_ENOMEM
;
3434 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3437 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3439 host_ifconf
->ifc_len
= host_ifc_len
;
3440 host_ifconf
->ifc_buf
= host_ifc_buf
;
3442 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3443 if (!is_error(ret
)) {
3444 /* convert host ifc_len to target ifc_len */
3446 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3447 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3448 host_ifconf
->ifc_len
= target_ifc_len
;
3450 /* restore target ifc_buf */
3452 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3454 /* copy struct ifconf to target user */
3456 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3458 return -TARGET_EFAULT
;
3459 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3460 unlock_user(argptr
, arg
, target_size
);
3462 /* copy ifreq[] to target user */
3464 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3465 for (i
= 0; i
< nb_ifreq
; i
++) {
3466 thunk_convert(argptr
+ i
* target_ifreq_size
,
3467 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3468 ifreq_arg_type
, THUNK_TARGET
);
3470 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3480 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3481 abi_long cmd
, abi_long arg
)
3484 struct dm_ioctl
*host_dm
;
3485 abi_long guest_data
;
3486 uint32_t guest_data_size
;
3488 const argtype
*arg_type
= ie
->arg_type
;
3490 void *big_buf
= NULL
;
3494 target_size
= thunk_type_size(arg_type
, 0);
3495 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3497 ret
= -TARGET_EFAULT
;
3500 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3501 unlock_user(argptr
, arg
, 0);
3503 /* buf_temp is too small, so fetch things into a bigger buffer */
3504 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3505 memcpy(big_buf
, buf_temp
, target_size
);
3509 guest_data
= arg
+ host_dm
->data_start
;
3510 if ((guest_data
- arg
) < 0) {
3514 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3515 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3517 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3518 switch (ie
->host_cmd
) {
3520 case DM_LIST_DEVICES
:
3523 case DM_DEV_SUSPEND
:
3526 case DM_TABLE_STATUS
:
3527 case DM_TABLE_CLEAR
:
3529 case DM_LIST_VERSIONS
:
3533 case DM_DEV_SET_GEOMETRY
:
3534 /* data contains only strings */
3535 memcpy(host_data
, argptr
, guest_data_size
);
3538 memcpy(host_data
, argptr
, guest_data_size
);
3539 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3543 void *gspec
= argptr
;
3544 void *cur_data
= host_data
;
3545 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3546 int spec_size
= thunk_type_size(arg_type
, 0);
3549 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3550 struct dm_target_spec
*spec
= cur_data
;
3554 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3555 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3557 spec
->next
= sizeof(*spec
) + slen
;
3558 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3560 cur_data
+= spec
->next
;
3565 ret
= -TARGET_EINVAL
;
3568 unlock_user(argptr
, guest_data
, 0);
3570 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3571 if (!is_error(ret
)) {
3572 guest_data
= arg
+ host_dm
->data_start
;
3573 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3574 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3575 switch (ie
->host_cmd
) {
3580 case DM_DEV_SUSPEND
:
3583 case DM_TABLE_CLEAR
:
3585 case DM_DEV_SET_GEOMETRY
:
3586 /* no return data */
3588 case DM_LIST_DEVICES
:
3590 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3591 uint32_t remaining_data
= guest_data_size
;
3592 void *cur_data
= argptr
;
3593 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3594 int nl_size
= 12; /* can't use thunk_size due to alignment */
3597 uint32_t next
= nl
->next
;
3599 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3601 if (remaining_data
< nl
->next
) {
3602 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3605 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3606 strcpy(cur_data
+ nl_size
, nl
->name
);
3607 cur_data
+= nl
->next
;
3608 remaining_data
-= nl
->next
;
3612 nl
= (void*)nl
+ next
;
3617 case DM_TABLE_STATUS
:
3619 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3620 void *cur_data
= argptr
;
3621 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3622 int spec_size
= thunk_type_size(arg_type
, 0);
3625 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3626 uint32_t next
= spec
->next
;
3627 int slen
= strlen((char*)&spec
[1]) + 1;
3628 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3629 if (guest_data_size
< spec
->next
) {
3630 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3633 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3634 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3635 cur_data
= argptr
+ spec
->next
;
3636 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3642 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3643 int count
= *(uint32_t*)hdata
;
3644 uint64_t *hdev
= hdata
+ 8;
3645 uint64_t *gdev
= argptr
+ 8;
3648 *(uint32_t*)argptr
= tswap32(count
);
3649 for (i
= 0; i
< count
; i
++) {
3650 *gdev
= tswap64(*hdev
);
3656 case DM_LIST_VERSIONS
:
3658 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3659 uint32_t remaining_data
= guest_data_size
;
3660 void *cur_data
= argptr
;
3661 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3662 int vers_size
= thunk_type_size(arg_type
, 0);
3665 uint32_t next
= vers
->next
;
3667 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3669 if (remaining_data
< vers
->next
) {
3670 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3673 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3674 strcpy(cur_data
+ vers_size
, vers
->name
);
3675 cur_data
+= vers
->next
;
3676 remaining_data
-= vers
->next
;
3680 vers
= (void*)vers
+ next
;
3685 ret
= -TARGET_EINVAL
;
3688 unlock_user(argptr
, guest_data
, guest_data_size
);
3690 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3692 ret
= -TARGET_EFAULT
;
3695 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3696 unlock_user(argptr
, arg
, target_size
);
3703 static IOCTLEntry ioctl_entries
[] = {
3704 #define IOCTL(cmd, access, ...) \
3705 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3706 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3707 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3712 /* ??? Implement proper locking for ioctls. */
3713 /* do_ioctl() Must return target values and target errnos. */
3714 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3716 const IOCTLEntry
*ie
;
3717 const argtype
*arg_type
;
3719 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3725 if (ie
->target_cmd
== 0) {
3726 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3727 return -TARGET_ENOSYS
;
3729 if (ie
->target_cmd
== cmd
)
3733 arg_type
= ie
->arg_type
;
3735 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3738 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3741 switch(arg_type
[0]) {
3744 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3749 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3753 target_size
= thunk_type_size(arg_type
, 0);
3754 switch(ie
->access
) {
3756 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3757 if (!is_error(ret
)) {
3758 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3760 return -TARGET_EFAULT
;
3761 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3762 unlock_user(argptr
, arg
, target_size
);
3766 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3768 return -TARGET_EFAULT
;
3769 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3770 unlock_user(argptr
, arg
, 0);
3771 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3775 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3777 return -TARGET_EFAULT
;
3778 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3779 unlock_user(argptr
, arg
, 0);
3780 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3781 if (!is_error(ret
)) {
3782 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3784 return -TARGET_EFAULT
;
3785 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3786 unlock_user(argptr
, arg
, target_size
);
3792 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3793 (long)cmd
, arg_type
[0]);
3794 ret
= -TARGET_ENOSYS
;
3800 static const bitmask_transtbl iflag_tbl
[] = {
3801 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3802 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3803 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3804 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3805 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3806 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3807 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3808 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3809 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3810 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3811 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3812 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3813 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3814 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3818 static const bitmask_transtbl oflag_tbl
[] = {
3819 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3820 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3821 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3822 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3823 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3824 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3825 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3826 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3827 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3828 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3829 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3830 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3831 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3832 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3833 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3834 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3835 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3836 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3837 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3838 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3839 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3840 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3841 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3842 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3846 static const bitmask_transtbl cflag_tbl
[] = {
3847 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3848 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3849 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3850 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3851 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3852 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3853 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3854 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3855 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3856 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3857 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3858 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3859 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3860 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3861 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3862 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3863 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3864 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3865 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3866 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3867 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3868 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3869 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3870 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3871 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3872 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3873 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3874 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3875 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3876 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3877 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3881 static const bitmask_transtbl lflag_tbl
[] = {
3882 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3883 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3884 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3885 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3886 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3887 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3888 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3889 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3890 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3891 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3892 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3893 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3894 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3895 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3896 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3900 static void target_to_host_termios (void *dst
, const void *src
)
3902 struct host_termios
*host
= dst
;
3903 const struct target_termios
*target
= src
;
3906 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3908 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3910 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3912 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3913 host
->c_line
= target
->c_line
;
3915 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3916 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3917 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3918 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3919 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3920 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3921 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3922 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3923 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3924 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3925 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3926 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3927 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3928 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3929 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3930 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3931 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3932 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3935 static void host_to_target_termios (void *dst
, const void *src
)
3937 struct target_termios
*target
= dst
;
3938 const struct host_termios
*host
= src
;
3941 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3943 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3945 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3947 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3948 target
->c_line
= host
->c_line
;
3950 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3951 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3952 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3953 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3954 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3955 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3956 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3957 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3958 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3959 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3960 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3961 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3962 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3963 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3964 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3965 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3966 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3967 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3970 static const StructEntry struct_termios_def
= {
3971 .convert
= { host_to_target_termios
, target_to_host_termios
},
3972 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3973 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3976 static bitmask_transtbl mmap_flags_tbl
[] = {
3977 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3978 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3979 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3980 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3981 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3982 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3983 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3984 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3988 #if defined(TARGET_I386)
3990 /* NOTE: there is really one LDT for all the threads */
3991 static uint8_t *ldt_table
;
3993 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
4000 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
4001 if (size
> bytecount
)
4003 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
4005 return -TARGET_EFAULT
;
4006 /* ??? Should this by byteswapped? */
4007 memcpy(p
, ldt_table
, size
);
4008 unlock_user(p
, ptr
, size
);
4012 /* XXX: add locking support */
4013 static abi_long
write_ldt(CPUX86State
*env
,
4014 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
4016 struct target_modify_ldt_ldt_s ldt_info
;
4017 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4018 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4019 int seg_not_present
, useable
, lm
;
4020 uint32_t *lp
, entry_1
, entry_2
;
4022 if (bytecount
!= sizeof(ldt_info
))
4023 return -TARGET_EINVAL
;
4024 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
4025 return -TARGET_EFAULT
;
4026 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4027 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4028 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4029 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4030 unlock_user_struct(target_ldt_info
, ptr
, 0);
4032 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
4033 return -TARGET_EINVAL
;
4034 seg_32bit
= ldt_info
.flags
& 1;
4035 contents
= (ldt_info
.flags
>> 1) & 3;
4036 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4037 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4038 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4039 useable
= (ldt_info
.flags
>> 6) & 1;
4043 lm
= (ldt_info
.flags
>> 7) & 1;
4045 if (contents
== 3) {
4047 return -TARGET_EINVAL
;
4048 if (seg_not_present
== 0)
4049 return -TARGET_EINVAL
;
4051 /* allocate the LDT */
4053 env
->ldt
.base
= target_mmap(0,
4054 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
4055 PROT_READ
|PROT_WRITE
,
4056 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4057 if (env
->ldt
.base
== -1)
4058 return -TARGET_ENOMEM
;
4059 memset(g2h(env
->ldt
.base
), 0,
4060 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
4061 env
->ldt
.limit
= 0xffff;
4062 ldt_table
= g2h(env
->ldt
.base
);
4065 /* NOTE: same code as Linux kernel */
4066 /* Allow LDTs to be cleared by the user. */
4067 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4070 read_exec_only
== 1 &&
4072 limit_in_pages
== 0 &&
4073 seg_not_present
== 1 &&
4081 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4082 (ldt_info
.limit
& 0x0ffff);
4083 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4084 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4085 (ldt_info
.limit
& 0xf0000) |
4086 ((read_exec_only
^ 1) << 9) |
4088 ((seg_not_present
^ 1) << 15) |
4090 (limit_in_pages
<< 23) |
4094 entry_2
|= (useable
<< 20);
4096 /* Install the new entry ... */
4098 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
4099 lp
[0] = tswap32(entry_1
);
4100 lp
[1] = tswap32(entry_2
);
4104 /* specific and weird i386 syscalls */
4105 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
4106 unsigned long bytecount
)
4112 ret
= read_ldt(ptr
, bytecount
);
4115 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4118 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4121 ret
= -TARGET_ENOSYS
;
4127 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4128 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4130 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4131 struct target_modify_ldt_ldt_s ldt_info
;
4132 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4133 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4134 int seg_not_present
, useable
, lm
;
4135 uint32_t *lp
, entry_1
, entry_2
;
4138 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4139 if (!target_ldt_info
)
4140 return -TARGET_EFAULT
;
4141 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4142 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4143 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4144 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4145 if (ldt_info
.entry_number
== -1) {
4146 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4147 if (gdt_table
[i
] == 0) {
4148 ldt_info
.entry_number
= i
;
4149 target_ldt_info
->entry_number
= tswap32(i
);
4154 unlock_user_struct(target_ldt_info
, ptr
, 1);
4156 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4157 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4158 return -TARGET_EINVAL
;
4159 seg_32bit
= ldt_info
.flags
& 1;
4160 contents
= (ldt_info
.flags
>> 1) & 3;
4161 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4162 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4163 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4164 useable
= (ldt_info
.flags
>> 6) & 1;
4168 lm
= (ldt_info
.flags
>> 7) & 1;
4171 if (contents
== 3) {
4172 if (seg_not_present
== 0)
4173 return -TARGET_EINVAL
;
4176 /* NOTE: same code as Linux kernel */
4177 /* Allow LDTs to be cleared by the user. */
4178 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4179 if ((contents
== 0 &&
4180 read_exec_only
== 1 &&
4182 limit_in_pages
== 0 &&
4183 seg_not_present
== 1 &&
4191 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4192 (ldt_info
.limit
& 0x0ffff);
4193 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4194 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4195 (ldt_info
.limit
& 0xf0000) |
4196 ((read_exec_only
^ 1) << 9) |
4198 ((seg_not_present
^ 1) << 15) |
4200 (limit_in_pages
<< 23) |
4205 /* Install the new entry ... */
4207 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4208 lp
[0] = tswap32(entry_1
);
4209 lp
[1] = tswap32(entry_2
);
4213 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4215 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4216 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4217 uint32_t base_addr
, limit
, flags
;
4218 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4219 int seg_not_present
, useable
, lm
;
4220 uint32_t *lp
, entry_1
, entry_2
;
4222 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4223 if (!target_ldt_info
)
4224 return -TARGET_EFAULT
;
4225 idx
= tswap32(target_ldt_info
->entry_number
);
4226 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4227 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4228 unlock_user_struct(target_ldt_info
, ptr
, 1);
4229 return -TARGET_EINVAL
;
4231 lp
= (uint32_t *)(gdt_table
+ idx
);
4232 entry_1
= tswap32(lp
[0]);
4233 entry_2
= tswap32(lp
[1]);
4235 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4236 contents
= (entry_2
>> 10) & 3;
4237 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4238 seg_32bit
= (entry_2
>> 22) & 1;
4239 limit_in_pages
= (entry_2
>> 23) & 1;
4240 useable
= (entry_2
>> 20) & 1;
4244 lm
= (entry_2
>> 21) & 1;
4246 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4247 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4248 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4249 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4250 base_addr
= (entry_1
>> 16) |
4251 (entry_2
& 0xff000000) |
4252 ((entry_2
& 0xff) << 16);
4253 target_ldt_info
->base_addr
= tswapal(base_addr
);
4254 target_ldt_info
->limit
= tswap32(limit
);
4255 target_ldt_info
->flags
= tswap32(flags
);
4256 unlock_user_struct(target_ldt_info
, ptr
, 1);
4259 #endif /* TARGET_I386 && TARGET_ABI32 */
4261 #ifndef TARGET_ABI32
4262 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4269 case TARGET_ARCH_SET_GS
:
4270 case TARGET_ARCH_SET_FS
:
4271 if (code
== TARGET_ARCH_SET_GS
)
4275 cpu_x86_load_seg(env
, idx
, 0);
4276 env
->segs
[idx
].base
= addr
;
4278 case TARGET_ARCH_GET_GS
:
4279 case TARGET_ARCH_GET_FS
:
4280 if (code
== TARGET_ARCH_GET_GS
)
4284 val
= env
->segs
[idx
].base
;
4285 if (put_user(val
, addr
, abi_ulong
))
4286 ret
= -TARGET_EFAULT
;
4289 ret
= -TARGET_EINVAL
;
4296 #endif /* defined(TARGET_I386) */
4298 #define NEW_STACK_SIZE 0x40000
4300 #if defined(CONFIG_USE_NPTL)
4302 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4305 pthread_mutex_t mutex
;
4306 pthread_cond_t cond
;
4309 abi_ulong child_tidptr
;
4310 abi_ulong parent_tidptr
;
4314 static void *clone_func(void *arg
)
4316 new_thread_info
*info
= arg
;
4322 cpu
= ENV_GET_CPU(env
);
4324 ts
= (TaskState
*)thread_env
->opaque
;
4325 info
->tid
= gettid();
4326 cpu
->host_tid
= info
->tid
;
4328 if (info
->child_tidptr
)
4329 put_user_u32(info
->tid
, info
->child_tidptr
);
4330 if (info
->parent_tidptr
)
4331 put_user_u32(info
->tid
, info
->parent_tidptr
);
4332 /* Enable signals. */
4333 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4334 /* Signal to the parent that we're ready. */
4335 pthread_mutex_lock(&info
->mutex
);
4336 pthread_cond_broadcast(&info
->cond
);
4337 pthread_mutex_unlock(&info
->mutex
);
4338 /* Wait until the parent has finshed initializing the tls state. */
4339 pthread_mutex_lock(&clone_lock
);
4340 pthread_mutex_unlock(&clone_lock
);
4347 static int clone_func(void *arg
)
4349 CPUArchState
*env
= arg
;
4356 /* do_fork() Must return host values and target errnos (unlike most
4357 do_*() functions). */
4358 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4359 abi_ulong parent_tidptr
, target_ulong newtls
,
4360 abi_ulong child_tidptr
)
4364 CPUArchState
*new_env
;
4365 #if defined(CONFIG_USE_NPTL)
4366 unsigned int nptl_flags
;
4372 /* Emulate vfork() with fork() */
4373 if (flags
& CLONE_VFORK
)
4374 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4376 if (flags
& CLONE_VM
) {
4377 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
4378 #if defined(CONFIG_USE_NPTL)
4379 new_thread_info info
;
4380 pthread_attr_t attr
;
4382 ts
= g_malloc0(sizeof(TaskState
));
4383 init_task_state(ts
);
4384 /* we create a new CPU instance. */
4385 new_env
= cpu_copy(env
);
4386 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4387 cpu_reset(ENV_GET_CPU(new_env
));
4389 /* Init regs that differ from the parent. */
4390 cpu_clone_regs(new_env
, newsp
);
4391 new_env
->opaque
= ts
;
4392 ts
->bprm
= parent_ts
->bprm
;
4393 ts
->info
= parent_ts
->info
;
4394 #if defined(CONFIG_USE_NPTL)
4396 flags
&= ~CLONE_NPTL_FLAGS2
;
4398 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4399 ts
->child_tidptr
= child_tidptr
;
4402 if (nptl_flags
& CLONE_SETTLS
)
4403 cpu_set_tls (new_env
, newtls
);
4405 /* Grab a mutex so that thread setup appears atomic. */
4406 pthread_mutex_lock(&clone_lock
);
4408 memset(&info
, 0, sizeof(info
));
4409 pthread_mutex_init(&info
.mutex
, NULL
);
4410 pthread_mutex_lock(&info
.mutex
);
4411 pthread_cond_init(&info
.cond
, NULL
);
4413 if (nptl_flags
& CLONE_CHILD_SETTID
)
4414 info
.child_tidptr
= child_tidptr
;
4415 if (nptl_flags
& CLONE_PARENT_SETTID
)
4416 info
.parent_tidptr
= parent_tidptr
;
4418 ret
= pthread_attr_init(&attr
);
4419 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4420 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4421 /* It is not safe to deliver signals until the child has finished
4422 initializing, so temporarily block all signals. */
4423 sigfillset(&sigmask
);
4424 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4426 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4427 /* TODO: Free new CPU state if thread creation failed. */
4429 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4430 pthread_attr_destroy(&attr
);
4432 /* Wait for the child to initialize. */
4433 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4435 if (flags
& CLONE_PARENT_SETTID
)
4436 put_user_u32(ret
, parent_tidptr
);
4440 pthread_mutex_unlock(&info
.mutex
);
4441 pthread_cond_destroy(&info
.cond
);
4442 pthread_mutex_destroy(&info
.mutex
);
4443 pthread_mutex_unlock(&clone_lock
);
4445 if (flags
& CLONE_NPTL_FLAGS2
)
4447 /* This is probably going to die very quickly, but do it anyway. */
4448 new_stack
= g_malloc0 (NEW_STACK_SIZE
);
4450 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
4452 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
4456 /* if no CLONE_VM, we consider it is a fork */
4457 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4462 /* Child Process. */
4463 cpu_clone_regs(env
, newsp
);
4465 #if defined(CONFIG_USE_NPTL)
4466 /* There is a race condition here. The parent process could
4467 theoretically read the TID in the child process before the child
4468 tid is set. This would require using either ptrace
4469 (not implemented) or having *_tidptr to point at a shared memory
4470 mapping. We can't repeat the spinlock hack used above because
4471 the child process gets its own copy of the lock. */
4472 if (flags
& CLONE_CHILD_SETTID
)
4473 put_user_u32(gettid(), child_tidptr
);
4474 if (flags
& CLONE_PARENT_SETTID
)
4475 put_user_u32(gettid(), parent_tidptr
);
4476 ts
= (TaskState
*)env
->opaque
;
4477 if (flags
& CLONE_SETTLS
)
4478 cpu_set_tls (env
, newtls
);
4479 if (flags
& CLONE_CHILD_CLEARTID
)
4480 ts
->child_tidptr
= child_tidptr
;
4489 /* warning : doesn't handle linux specific flags... */
4490 static int target_to_host_fcntl_cmd(int cmd
)
4493 case TARGET_F_DUPFD
:
4494 case TARGET_F_GETFD
:
4495 case TARGET_F_SETFD
:
4496 case TARGET_F_GETFL
:
4497 case TARGET_F_SETFL
:
4499 case TARGET_F_GETLK
:
4501 case TARGET_F_SETLK
:
4503 case TARGET_F_SETLKW
:
4505 case TARGET_F_GETOWN
:
4507 case TARGET_F_SETOWN
:
4509 case TARGET_F_GETSIG
:
4511 case TARGET_F_SETSIG
:
4513 #if TARGET_ABI_BITS == 32
4514 case TARGET_F_GETLK64
:
4516 case TARGET_F_SETLK64
:
4518 case TARGET_F_SETLKW64
:
4521 case TARGET_F_SETLEASE
:
4523 case TARGET_F_GETLEASE
:
4525 #ifdef F_DUPFD_CLOEXEC
4526 case TARGET_F_DUPFD_CLOEXEC
:
4527 return F_DUPFD_CLOEXEC
;
4529 case TARGET_F_NOTIFY
:
4532 return -TARGET_EINVAL
;
4534 return -TARGET_EINVAL
;
4537 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4538 static const bitmask_transtbl flock_tbl
[] = {
4539 TRANSTBL_CONVERT(F_RDLCK
),
4540 TRANSTBL_CONVERT(F_WRLCK
),
4541 TRANSTBL_CONVERT(F_UNLCK
),
4542 TRANSTBL_CONVERT(F_EXLCK
),
4543 TRANSTBL_CONVERT(F_SHLCK
),
4547 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4550 struct target_flock
*target_fl
;
4551 struct flock64 fl64
;
4552 struct target_flock64
*target_fl64
;
4554 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4556 if (host_cmd
== -TARGET_EINVAL
)
4560 case TARGET_F_GETLK
:
4561 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4562 return -TARGET_EFAULT
;
4564 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4565 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4566 fl
.l_start
= tswapal(target_fl
->l_start
);
4567 fl
.l_len
= tswapal(target_fl
->l_len
);
4568 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4569 unlock_user_struct(target_fl
, arg
, 0);
4570 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4572 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4573 return -TARGET_EFAULT
;
4575 host_to_target_bitmask(tswap16(fl
.l_type
), flock_tbl
);
4576 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4577 target_fl
->l_start
= tswapal(fl
.l_start
);
4578 target_fl
->l_len
= tswapal(fl
.l_len
);
4579 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4580 unlock_user_struct(target_fl
, arg
, 1);
4584 case TARGET_F_SETLK
:
4585 case TARGET_F_SETLKW
:
4586 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4587 return -TARGET_EFAULT
;
4589 target_to_host_bitmask(tswap16(target_fl
->l_type
), flock_tbl
);
4590 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4591 fl
.l_start
= tswapal(target_fl
->l_start
);
4592 fl
.l_len
= tswapal(target_fl
->l_len
);
4593 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4594 unlock_user_struct(target_fl
, arg
, 0);
4595 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4598 case TARGET_F_GETLK64
:
4599 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4600 return -TARGET_EFAULT
;
4602 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4603 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4604 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4605 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4606 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4607 unlock_user_struct(target_fl64
, arg
, 0);
4608 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4610 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4611 return -TARGET_EFAULT
;
4612 target_fl64
->l_type
=
4613 host_to_target_bitmask(tswap16(fl64
.l_type
), flock_tbl
) >> 1;
4614 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4615 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4616 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4617 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4618 unlock_user_struct(target_fl64
, arg
, 1);
4621 case TARGET_F_SETLK64
:
4622 case TARGET_F_SETLKW64
:
4623 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4624 return -TARGET_EFAULT
;
4626 target_to_host_bitmask(tswap16(target_fl64
->l_type
), flock_tbl
) >> 1;
4627 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4628 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4629 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4630 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4631 unlock_user_struct(target_fl64
, arg
, 0);
4632 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4635 case TARGET_F_GETFL
:
4636 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4638 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4642 case TARGET_F_SETFL
:
4643 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4646 case TARGET_F_SETOWN
:
4647 case TARGET_F_GETOWN
:
4648 case TARGET_F_SETSIG
:
4649 case TARGET_F_GETSIG
:
4650 case TARGET_F_SETLEASE
:
4651 case TARGET_F_GETLEASE
:
4652 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4656 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4664 static inline int high2lowuid(int uid
)
4672 static inline int high2lowgid(int gid
)
4680 static inline int low2highuid(int uid
)
4682 if ((int16_t)uid
== -1)
4688 static inline int low2highgid(int gid
)
4690 if ((int16_t)gid
== -1)
4695 static inline int tswapid(int id
)
4699 #else /* !USE_UID16 */
4700 static inline int high2lowuid(int uid
)
4704 static inline int high2lowgid(int gid
)
4708 static inline int low2highuid(int uid
)
4712 static inline int low2highgid(int gid
)
4716 static inline int tswapid(int id
)
4720 #endif /* USE_UID16 */
4722 void syscall_init(void)
4725 const argtype
*arg_type
;
4729 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4730 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4731 #include "syscall_types.h"
4733 #undef STRUCT_SPECIAL
4735 /* Build target_to_host_errno_table[] table from
4736 * host_to_target_errno_table[]. */
4737 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
4738 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4741 /* we patch the ioctl size if necessary. We rely on the fact that
4742 no ioctl has all the bits at '1' in the size field */
4744 while (ie
->target_cmd
!= 0) {
4745 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4746 TARGET_IOC_SIZEMASK
) {
4747 arg_type
= ie
->arg_type
;
4748 if (arg_type
[0] != TYPE_PTR
) {
4749 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4754 size
= thunk_type_size(arg_type
, 0);
4755 ie
->target_cmd
= (ie
->target_cmd
&
4756 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4757 (size
<< TARGET_IOC_SIZESHIFT
);
4760 /* automatic consistency check if same arch */
4761 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4762 (defined(__x86_64__) && defined(TARGET_X86_64))
4763 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4764 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4765 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4772 #if TARGET_ABI_BITS == 32
4773 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4775 #ifdef TARGET_WORDS_BIGENDIAN
4776 return ((uint64_t)word0
<< 32) | word1
;
4778 return ((uint64_t)word1
<< 32) | word0
;
4781 #else /* TARGET_ABI_BITS == 32 */
4782 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4786 #endif /* TARGET_ABI_BITS != 32 */
4788 #ifdef TARGET_NR_truncate64
4789 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4794 if (regpairs_aligned(cpu_env
)) {
4798 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4802 #ifdef TARGET_NR_ftruncate64
4803 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4808 if (regpairs_aligned(cpu_env
)) {
4812 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4816 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4817 abi_ulong target_addr
)
4819 struct target_timespec
*target_ts
;
4821 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4822 return -TARGET_EFAULT
;
4823 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
4824 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
4825 unlock_user_struct(target_ts
, target_addr
, 0);
4829 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4830 struct timespec
*host_ts
)
4832 struct target_timespec
*target_ts
;
4834 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4835 return -TARGET_EFAULT
;
4836 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
4837 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
4838 unlock_user_struct(target_ts
, target_addr
, 1);
4842 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4843 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4844 abi_ulong target_addr
,
4845 struct stat
*host_st
)
4848 if (((CPUARMState
*)cpu_env
)->eabi
) {
4849 struct target_eabi_stat64
*target_st
;
4851 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4852 return -TARGET_EFAULT
;
4853 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4854 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4855 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4856 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4857 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4859 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4860 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4861 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4862 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4863 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4864 __put_user(host_st
->st_size
, &target_st
->st_size
);
4865 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4866 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4867 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4868 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4869 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4870 unlock_user_struct(target_st
, target_addr
, 1);
4874 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4875 struct target_stat
*target_st
;
4877 struct target_stat64
*target_st
;
4880 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4881 return -TARGET_EFAULT
;
4882 memset(target_st
, 0, sizeof(*target_st
));
4883 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4884 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4885 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4886 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4888 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4889 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4890 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4891 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4892 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4893 /* XXX: better use of kernel struct */
4894 __put_user(host_st
->st_size
, &target_st
->st_size
);
4895 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4896 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4897 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4898 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4899 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4900 unlock_user_struct(target_st
, target_addr
, 1);
4907 #if defined(CONFIG_USE_NPTL)
4908 /* ??? Using host futex calls even when target atomic operations
4909 are not really atomic probably breaks things. However implementing
4910 futexes locally would make futexes shared between multiple processes
4911 tricky. However they're probably useless because guest atomic
4912 operations won't work either. */
4913 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4914 target_ulong uaddr2
, int val3
)
4916 struct timespec ts
, *pts
;
4919 /* ??? We assume FUTEX_* constants are the same on both host
4921 #ifdef FUTEX_CMD_MASK
4922 base_op
= op
& FUTEX_CMD_MASK
;
4928 case FUTEX_WAIT_BITSET
:
4931 target_to_host_timespec(pts
, timeout
);
4935 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4938 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4940 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4942 case FUTEX_CMP_REQUEUE
:
4944 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4945 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4946 But the prototype takes a `struct timespec *'; insert casts
4947 to satisfy the compiler. We do not need to tswap TIMEOUT
4948 since it's not compared to guest memory. */
4949 pts
= (struct timespec
*)(uintptr_t) timeout
;
4950 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4952 (base_op
== FUTEX_CMP_REQUEUE
4956 return -TARGET_ENOSYS
;
4961 /* Map host to target signal numbers for the wait family of syscalls.
4962 Assume all other status bits are the same. */
4963 int host_to_target_waitstatus(int status
)
4965 if (WIFSIGNALED(status
)) {
4966 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4968 if (WIFSTOPPED(status
)) {
4969 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4975 int get_osversion(void)
4977 static int osversion
;
4978 struct new_utsname buf
;
4983 if (qemu_uname_release
&& *qemu_uname_release
) {
4984 s
= qemu_uname_release
;
4986 if (sys_uname(&buf
))
4991 for (i
= 0; i
< 3; i
++) {
4993 while (*s
>= '0' && *s
<= '9') {
4998 tmp
= (tmp
<< 8) + n
;
5007 static int open_self_maps(void *cpu_env
, int fd
)
5009 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5010 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5017 fp
= fopen("/proc/self/maps", "r");
5022 while ((read
= getline(&line
, &len
, fp
)) != -1) {
5023 int fields
, dev_maj
, dev_min
, inode
;
5024 uint64_t min
, max
, offset
;
5025 char flag_r
, flag_w
, flag_x
, flag_p
;
5026 char path
[512] = "";
5027 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
5028 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
5029 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
5031 if ((fields
< 10) || (fields
> 11)) {
5034 if (!strncmp(path
, "[stack]", 7)) {
5037 if (h2g_valid(min
) && h2g_valid(max
)) {
5038 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
5039 " %c%c%c%c %08" PRIx64
" %02x:%02x %d%s%s\n",
5040 h2g(min
), h2g(max
), flag_r
, flag_w
,
5041 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
5042 path
[0] ? " " : "", path
);
5049 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5050 dprintf(fd
, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5051 (unsigned long long)ts
->info
->stack_limit
,
5052 (unsigned long long)(ts
->info
->start_stack
+
5053 (TARGET_PAGE_SIZE
- 1)) & TARGET_PAGE_MASK
,
5054 (unsigned long long)0);
5060 static int open_self_stat(void *cpu_env
, int fd
)
5062 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5063 abi_ulong start_stack
= ts
->info
->start_stack
;
5066 for (i
= 0; i
< 44; i
++) {
5074 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5075 } else if (i
== 1) {
5077 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
5078 } else if (i
== 27) {
5081 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5083 /* for the rest, there is MasterCard */
5084 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
5088 if (write(fd
, buf
, len
) != len
) {
5096 static int open_self_auxv(void *cpu_env
, int fd
)
5098 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5099 abi_ulong auxv
= ts
->info
->saved_auxv
;
5100 abi_ulong len
= ts
->info
->auxv_len
;
5104 * Auxiliary vector is stored in target process stack.
5105 * read in whole auxv vector and copy it to file
5107 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
5111 r
= write(fd
, ptr
, len
);
5118 lseek(fd
, 0, SEEK_SET
);
5119 unlock_user(ptr
, auxv
, len
);
5125 static int do_open(void *cpu_env
, const char *pathname
, int flags
, mode_t mode
)
5128 const char *filename
;
5129 int (*fill
)(void *cpu_env
, int fd
);
5131 const struct fake_open
*fake_open
;
5132 static const struct fake_open fakes
[] = {
5133 { "/proc/self/maps", open_self_maps
},
5134 { "/proc/self/stat", open_self_stat
},
5135 { "/proc/self/auxv", open_self_auxv
},
5139 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5140 if (!strncmp(pathname
, fake_open
->filename
,
5141 strlen(fake_open
->filename
))) {
5146 if (fake_open
->filename
) {
5148 char filename
[PATH_MAX
];
5151 /* create temporary file to map stat to */
5152 tmpdir
= getenv("TMPDIR");
5155 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5156 fd
= mkstemp(filename
);
5162 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5166 lseek(fd
, 0, SEEK_SET
);
5171 return get_errno(open(path(pathname
), flags
, mode
));
5174 /* do_syscall() should always have a single exit point at the end so
5175 that actions, such as logging of syscall results, can be performed.
5176 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5177 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5178 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5179 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5188 gemu_log("syscall %d", num
);
5191 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5194 case TARGET_NR_exit
:
5195 #ifdef CONFIG_USE_NPTL
5196 /* In old applications this may be used to implement _exit(2).
5197 However in threaded applictions it is used for thread termination,
5198 and _exit_group is used for application termination.
5199 Do thread termination if we have more then one thread. */
5200 /* FIXME: This probably breaks if a signal arrives. We should probably
5201 be disabling signals. */
5202 if (first_cpu
->next_cpu
) {
5204 CPUArchState
**lastp
;
5210 while (p
&& p
!= (CPUArchState
*)cpu_env
) {
5211 lastp
= &p
->next_cpu
;
5214 /* If we didn't find the CPU for this thread then something is
5218 /* Remove the CPU from the list. */
5219 *lastp
= p
->next_cpu
;
5221 ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5222 if (ts
->child_tidptr
) {
5223 put_user_u32(0, ts
->child_tidptr
);
5224 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5228 object_unref(OBJECT(ENV_GET_CPU(cpu_env
)));
5236 gdb_exit(cpu_env
, arg1
);
5238 ret
= 0; /* avoid warning */
5240 case TARGET_NR_read
:
5244 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5246 ret
= get_errno(read(arg1
, p
, arg3
));
5247 unlock_user(p
, arg2
, ret
);
5250 case TARGET_NR_write
:
5251 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5253 ret
= get_errno(write(arg1
, p
, arg3
));
5254 unlock_user(p
, arg2
, 0);
5256 case TARGET_NR_open
:
5257 if (!(p
= lock_user_string(arg1
)))
5259 ret
= get_errno(do_open(cpu_env
, p
,
5260 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
5262 unlock_user(p
, arg1
, 0);
5264 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5265 case TARGET_NR_openat
:
5266 if (!(p
= lock_user_string(arg2
)))
5268 ret
= get_errno(sys_openat(arg1
,
5270 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
5272 unlock_user(p
, arg2
, 0);
5275 case TARGET_NR_close
:
5276 ret
= get_errno(close(arg1
));
5281 case TARGET_NR_fork
:
5282 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
5284 #ifdef TARGET_NR_waitpid
5285 case TARGET_NR_waitpid
:
5288 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
5289 if (!is_error(ret
) && arg2
&& ret
5290 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
5295 #ifdef TARGET_NR_waitid
5296 case TARGET_NR_waitid
:
5300 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
5301 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
5302 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
5304 host_to_target_siginfo(p
, &info
);
5305 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
5310 #ifdef TARGET_NR_creat /* not on alpha */
5311 case TARGET_NR_creat
:
5312 if (!(p
= lock_user_string(arg1
)))
5314 ret
= get_errno(creat(p
, arg2
));
5315 unlock_user(p
, arg1
, 0);
5318 case TARGET_NR_link
:
5321 p
= lock_user_string(arg1
);
5322 p2
= lock_user_string(arg2
);
5324 ret
= -TARGET_EFAULT
;
5326 ret
= get_errno(link(p
, p2
));
5327 unlock_user(p2
, arg2
, 0);
5328 unlock_user(p
, arg1
, 0);
5331 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5332 case TARGET_NR_linkat
:
5337 p
= lock_user_string(arg2
);
5338 p2
= lock_user_string(arg4
);
5340 ret
= -TARGET_EFAULT
;
5342 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
5343 unlock_user(p
, arg2
, 0);
5344 unlock_user(p2
, arg4
, 0);
5348 case TARGET_NR_unlink
:
5349 if (!(p
= lock_user_string(arg1
)))
5351 ret
= get_errno(unlink(p
));
5352 unlock_user(p
, arg1
, 0);
5354 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5355 case TARGET_NR_unlinkat
:
5356 if (!(p
= lock_user_string(arg2
)))
5358 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
5359 unlock_user(p
, arg2
, 0);
5362 case TARGET_NR_execve
:
5364 char **argp
, **envp
;
5367 abi_ulong guest_argp
;
5368 abi_ulong guest_envp
;
5375 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
5376 if (get_user_ual(addr
, gp
))
5384 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
5385 if (get_user_ual(addr
, gp
))
5392 argp
= alloca((argc
+ 1) * sizeof(void *));
5393 envp
= alloca((envc
+ 1) * sizeof(void *));
5395 for (gp
= guest_argp
, q
= argp
; gp
;
5396 gp
+= sizeof(abi_ulong
), q
++) {
5397 if (get_user_ual(addr
, gp
))
5401 if (!(*q
= lock_user_string(addr
)))
5403 total_size
+= strlen(*q
) + 1;
5407 for (gp
= guest_envp
, q
= envp
; gp
;
5408 gp
+= sizeof(abi_ulong
), q
++) {
5409 if (get_user_ual(addr
, gp
))
5413 if (!(*q
= lock_user_string(addr
)))
5415 total_size
+= strlen(*q
) + 1;
5419 /* This case will not be caught by the host's execve() if its
5420 page size is bigger than the target's. */
5421 if (total_size
> MAX_ARG_PAGES
* TARGET_PAGE_SIZE
) {
5422 ret
= -TARGET_E2BIG
;
5425 if (!(p
= lock_user_string(arg1
)))
5427 ret
= get_errno(execve(p
, argp
, envp
));
5428 unlock_user(p
, arg1
, 0);
5433 ret
= -TARGET_EFAULT
;
5436 for (gp
= guest_argp
, q
= argp
; *q
;
5437 gp
+= sizeof(abi_ulong
), q
++) {
5438 if (get_user_ual(addr
, gp
)
5441 unlock_user(*q
, addr
, 0);
5443 for (gp
= guest_envp
, q
= envp
; *q
;
5444 gp
+= sizeof(abi_ulong
), q
++) {
5445 if (get_user_ual(addr
, gp
)
5448 unlock_user(*q
, addr
, 0);
5452 case TARGET_NR_chdir
:
5453 if (!(p
= lock_user_string(arg1
)))
5455 ret
= get_errno(chdir(p
));
5456 unlock_user(p
, arg1
, 0);
5458 #ifdef TARGET_NR_time
5459 case TARGET_NR_time
:
5462 ret
= get_errno(time(&host_time
));
5465 && put_user_sal(host_time
, arg1
))
5470 case TARGET_NR_mknod
:
5471 if (!(p
= lock_user_string(arg1
)))
5473 ret
= get_errno(mknod(p
, arg2
, arg3
));
5474 unlock_user(p
, arg1
, 0);
5476 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5477 case TARGET_NR_mknodat
:
5478 if (!(p
= lock_user_string(arg2
)))
5480 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
5481 unlock_user(p
, arg2
, 0);
5484 case TARGET_NR_chmod
:
5485 if (!(p
= lock_user_string(arg1
)))
5487 ret
= get_errno(chmod(p
, arg2
));
5488 unlock_user(p
, arg1
, 0);
5490 #ifdef TARGET_NR_break
5491 case TARGET_NR_break
:
5494 #ifdef TARGET_NR_oldstat
5495 case TARGET_NR_oldstat
:
5498 case TARGET_NR_lseek
:
5499 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
5501 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5502 /* Alpha specific */
5503 case TARGET_NR_getxpid
:
5504 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
5505 ret
= get_errno(getpid());
5508 #ifdef TARGET_NR_getpid
5509 case TARGET_NR_getpid
:
5510 ret
= get_errno(getpid());
5513 case TARGET_NR_mount
:
5515 /* need to look at the data field */
5517 p
= lock_user_string(arg1
);
5518 p2
= lock_user_string(arg2
);
5519 p3
= lock_user_string(arg3
);
5520 if (!p
|| !p2
|| !p3
)
5521 ret
= -TARGET_EFAULT
;
5523 /* FIXME - arg5 should be locked, but it isn't clear how to
5524 * do that since it's not guaranteed to be a NULL-terminated
5528 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
5530 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
5532 unlock_user(p
, arg1
, 0);
5533 unlock_user(p2
, arg2
, 0);
5534 unlock_user(p3
, arg3
, 0);
5537 #ifdef TARGET_NR_umount
5538 case TARGET_NR_umount
:
5539 if (!(p
= lock_user_string(arg1
)))
5541 ret
= get_errno(umount(p
));
5542 unlock_user(p
, arg1
, 0);
5545 #ifdef TARGET_NR_stime /* not on alpha */
5546 case TARGET_NR_stime
:
5549 if (get_user_sal(host_time
, arg1
))
5551 ret
= get_errno(stime(&host_time
));
5555 case TARGET_NR_ptrace
:
5557 #ifdef TARGET_NR_alarm /* not on alpha */
5558 case TARGET_NR_alarm
:
5562 #ifdef TARGET_NR_oldfstat
5563 case TARGET_NR_oldfstat
:
5566 #ifdef TARGET_NR_pause /* not on alpha */
5567 case TARGET_NR_pause
:
5568 ret
= get_errno(pause());
5571 #ifdef TARGET_NR_utime
5572 case TARGET_NR_utime
:
5574 struct utimbuf tbuf
, *host_tbuf
;
5575 struct target_utimbuf
*target_tbuf
;
5577 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
5579 tbuf
.actime
= tswapal(target_tbuf
->actime
);
5580 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
5581 unlock_user_struct(target_tbuf
, arg2
, 0);
5586 if (!(p
= lock_user_string(arg1
)))
5588 ret
= get_errno(utime(p
, host_tbuf
));
5589 unlock_user(p
, arg1
, 0);
5593 case TARGET_NR_utimes
:
5595 struct timeval
*tvp
, tv
[2];
5597 if (copy_from_user_timeval(&tv
[0], arg2
)
5598 || copy_from_user_timeval(&tv
[1],
5599 arg2
+ sizeof(struct target_timeval
)))
5605 if (!(p
= lock_user_string(arg1
)))
5607 ret
= get_errno(utimes(p
, tvp
));
5608 unlock_user(p
, arg1
, 0);
5611 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5612 case TARGET_NR_futimesat
:
5614 struct timeval
*tvp
, tv
[2];
5616 if (copy_from_user_timeval(&tv
[0], arg3
)
5617 || copy_from_user_timeval(&tv
[1],
5618 arg3
+ sizeof(struct target_timeval
)))
5624 if (!(p
= lock_user_string(arg2
)))
5626 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
5627 unlock_user(p
, arg2
, 0);
5631 #ifdef TARGET_NR_stty
5632 case TARGET_NR_stty
:
5635 #ifdef TARGET_NR_gtty
5636 case TARGET_NR_gtty
:
5639 case TARGET_NR_access
:
5640 if (!(p
= lock_user_string(arg1
)))
5642 ret
= get_errno(access(path(p
), arg2
));
5643 unlock_user(p
, arg1
, 0);
5645 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5646 case TARGET_NR_faccessat
:
5647 if (!(p
= lock_user_string(arg2
)))
5649 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
5650 unlock_user(p
, arg2
, 0);
5653 #ifdef TARGET_NR_nice /* not on alpha */
5654 case TARGET_NR_nice
:
5655 ret
= get_errno(nice(arg1
));
5658 #ifdef TARGET_NR_ftime
5659 case TARGET_NR_ftime
:
5662 case TARGET_NR_sync
:
5666 case TARGET_NR_kill
:
5667 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
5669 case TARGET_NR_rename
:
5672 p
= lock_user_string(arg1
);
5673 p2
= lock_user_string(arg2
);
5675 ret
= -TARGET_EFAULT
;
5677 ret
= get_errno(rename(p
, p2
));
5678 unlock_user(p2
, arg2
, 0);
5679 unlock_user(p
, arg1
, 0);
5682 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5683 case TARGET_NR_renameat
:
5686 p
= lock_user_string(arg2
);
5687 p2
= lock_user_string(arg4
);
5689 ret
= -TARGET_EFAULT
;
5691 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
5692 unlock_user(p2
, arg4
, 0);
5693 unlock_user(p
, arg2
, 0);
5697 case TARGET_NR_mkdir
:
5698 if (!(p
= lock_user_string(arg1
)))
5700 ret
= get_errno(mkdir(p
, arg2
));
5701 unlock_user(p
, arg1
, 0);
5703 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5704 case TARGET_NR_mkdirat
:
5705 if (!(p
= lock_user_string(arg2
)))
5707 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
5708 unlock_user(p
, arg2
, 0);
5711 case TARGET_NR_rmdir
:
5712 if (!(p
= lock_user_string(arg1
)))
5714 ret
= get_errno(rmdir(p
));
5715 unlock_user(p
, arg1
, 0);
5718 ret
= get_errno(dup(arg1
));
5720 case TARGET_NR_pipe
:
5721 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5723 #ifdef TARGET_NR_pipe2
5724 case TARGET_NR_pipe2
:
5725 ret
= do_pipe(cpu_env
, arg1
,
5726 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
5729 case TARGET_NR_times
:
5731 struct target_tms
*tmsp
;
5733 ret
= get_errno(times(&tms
));
5735 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5738 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
5739 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
5740 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
5741 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
5744 ret
= host_to_target_clock_t(ret
);
5747 #ifdef TARGET_NR_prof
5748 case TARGET_NR_prof
:
5751 #ifdef TARGET_NR_signal
5752 case TARGET_NR_signal
:
5755 case TARGET_NR_acct
:
5757 ret
= get_errno(acct(NULL
));
5759 if (!(p
= lock_user_string(arg1
)))
5761 ret
= get_errno(acct(path(p
)));
5762 unlock_user(p
, arg1
, 0);
5765 #ifdef TARGET_NR_umount2 /* not on alpha */
5766 case TARGET_NR_umount2
:
5767 if (!(p
= lock_user_string(arg1
)))
5769 ret
= get_errno(umount2(p
, arg2
));
5770 unlock_user(p
, arg1
, 0);
5773 #ifdef TARGET_NR_lock
5774 case TARGET_NR_lock
:
5777 case TARGET_NR_ioctl
:
5778 ret
= do_ioctl(arg1
, arg2
, arg3
);
5780 case TARGET_NR_fcntl
:
5781 ret
= do_fcntl(arg1
, arg2
, arg3
);
5783 #ifdef TARGET_NR_mpx
5787 case TARGET_NR_setpgid
:
5788 ret
= get_errno(setpgid(arg1
, arg2
));
5790 #ifdef TARGET_NR_ulimit
5791 case TARGET_NR_ulimit
:
5794 #ifdef TARGET_NR_oldolduname
5795 case TARGET_NR_oldolduname
:
5798 case TARGET_NR_umask
:
5799 ret
= get_errno(umask(arg1
));
5801 case TARGET_NR_chroot
:
5802 if (!(p
= lock_user_string(arg1
)))
5804 ret
= get_errno(chroot(p
));
5805 unlock_user(p
, arg1
, 0);
5807 case TARGET_NR_ustat
:
5809 case TARGET_NR_dup2
:
5810 ret
= get_errno(dup2(arg1
, arg2
));
5812 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5813 case TARGET_NR_dup3
:
5814 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
5817 #ifdef TARGET_NR_getppid /* not on alpha */
5818 case TARGET_NR_getppid
:
5819 ret
= get_errno(getppid());
5822 case TARGET_NR_getpgrp
:
5823 ret
= get_errno(getpgrp());
5825 case TARGET_NR_setsid
:
5826 ret
= get_errno(setsid());
5828 #ifdef TARGET_NR_sigaction
5829 case TARGET_NR_sigaction
:
5831 #if defined(TARGET_ALPHA)
5832 struct target_sigaction act
, oact
, *pact
= 0;
5833 struct target_old_sigaction
*old_act
;
5835 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5837 act
._sa_handler
= old_act
->_sa_handler
;
5838 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5839 act
.sa_flags
= old_act
->sa_flags
;
5840 act
.sa_restorer
= 0;
5841 unlock_user_struct(old_act
, arg2
, 0);
5844 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5845 if (!is_error(ret
) && arg3
) {
5846 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5848 old_act
->_sa_handler
= oact
._sa_handler
;
5849 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5850 old_act
->sa_flags
= oact
.sa_flags
;
5851 unlock_user_struct(old_act
, arg3
, 1);
5853 #elif defined(TARGET_MIPS)
5854 struct target_sigaction act
, oact
, *pact
, *old_act
;
5857 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5859 act
._sa_handler
= old_act
->_sa_handler
;
5860 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5861 act
.sa_flags
= old_act
->sa_flags
;
5862 unlock_user_struct(old_act
, arg2
, 0);
5868 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5870 if (!is_error(ret
) && arg3
) {
5871 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5873 old_act
->_sa_handler
= oact
._sa_handler
;
5874 old_act
->sa_flags
= oact
.sa_flags
;
5875 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5876 old_act
->sa_mask
.sig
[1] = 0;
5877 old_act
->sa_mask
.sig
[2] = 0;
5878 old_act
->sa_mask
.sig
[3] = 0;
5879 unlock_user_struct(old_act
, arg3
, 1);
5882 struct target_old_sigaction
*old_act
;
5883 struct target_sigaction act
, oact
, *pact
;
5885 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5887 act
._sa_handler
= old_act
->_sa_handler
;
5888 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5889 act
.sa_flags
= old_act
->sa_flags
;
5890 act
.sa_restorer
= old_act
->sa_restorer
;
5891 unlock_user_struct(old_act
, arg2
, 0);
5896 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5897 if (!is_error(ret
) && arg3
) {
5898 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5900 old_act
->_sa_handler
= oact
._sa_handler
;
5901 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5902 old_act
->sa_flags
= oact
.sa_flags
;
5903 old_act
->sa_restorer
= oact
.sa_restorer
;
5904 unlock_user_struct(old_act
, arg3
, 1);
5910 case TARGET_NR_rt_sigaction
:
5912 #if defined(TARGET_ALPHA)
5913 struct target_sigaction act
, oact
, *pact
= 0;
5914 struct target_rt_sigaction
*rt_act
;
5915 /* ??? arg4 == sizeof(sigset_t). */
5917 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5919 act
._sa_handler
= rt_act
->_sa_handler
;
5920 act
.sa_mask
= rt_act
->sa_mask
;
5921 act
.sa_flags
= rt_act
->sa_flags
;
5922 act
.sa_restorer
= arg5
;
5923 unlock_user_struct(rt_act
, arg2
, 0);
5926 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5927 if (!is_error(ret
) && arg3
) {
5928 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5930 rt_act
->_sa_handler
= oact
._sa_handler
;
5931 rt_act
->sa_mask
= oact
.sa_mask
;
5932 rt_act
->sa_flags
= oact
.sa_flags
;
5933 unlock_user_struct(rt_act
, arg3
, 1);
5936 struct target_sigaction
*act
;
5937 struct target_sigaction
*oact
;
5940 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5945 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5946 ret
= -TARGET_EFAULT
;
5947 goto rt_sigaction_fail
;
5951 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5954 unlock_user_struct(act
, arg2
, 0);
5956 unlock_user_struct(oact
, arg3
, 1);
5960 #ifdef TARGET_NR_sgetmask /* not on alpha */
5961 case TARGET_NR_sgetmask
:
5964 abi_ulong target_set
;
5965 sigprocmask(0, NULL
, &cur_set
);
5966 host_to_target_old_sigset(&target_set
, &cur_set
);
5971 #ifdef TARGET_NR_ssetmask /* not on alpha */
5972 case TARGET_NR_ssetmask
:
5974 sigset_t set
, oset
, cur_set
;
5975 abi_ulong target_set
= arg1
;
5976 sigprocmask(0, NULL
, &cur_set
);
5977 target_to_host_old_sigset(&set
, &target_set
);
5978 sigorset(&set
, &set
, &cur_set
);
5979 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5980 host_to_target_old_sigset(&target_set
, &oset
);
5985 #ifdef TARGET_NR_sigprocmask
5986 case TARGET_NR_sigprocmask
:
5988 #if defined(TARGET_ALPHA)
5989 sigset_t set
, oldset
;
5994 case TARGET_SIG_BLOCK
:
5997 case TARGET_SIG_UNBLOCK
:
6000 case TARGET_SIG_SETMASK
:
6004 ret
= -TARGET_EINVAL
;
6008 target_to_host_old_sigset(&set
, &mask
);
6010 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
6011 if (!is_error(ret
)) {
6012 host_to_target_old_sigset(&mask
, &oldset
);
6014 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
6017 sigset_t set
, oldset
, *set_ptr
;
6022 case TARGET_SIG_BLOCK
:
6025 case TARGET_SIG_UNBLOCK
:
6028 case TARGET_SIG_SETMASK
:
6032 ret
= -TARGET_EINVAL
;
6035 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6037 target_to_host_old_sigset(&set
, p
);
6038 unlock_user(p
, arg2
, 0);
6044 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
6045 if (!is_error(ret
) && arg3
) {
6046 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6048 host_to_target_old_sigset(p
, &oldset
);
6049 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6055 case TARGET_NR_rt_sigprocmask
:
6058 sigset_t set
, oldset
, *set_ptr
;
6062 case TARGET_SIG_BLOCK
:
6065 case TARGET_SIG_UNBLOCK
:
6068 case TARGET_SIG_SETMASK
:
6072 ret
= -TARGET_EINVAL
;
6075 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6077 target_to_host_sigset(&set
, p
);
6078 unlock_user(p
, arg2
, 0);
6084 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
6085 if (!is_error(ret
) && arg3
) {
6086 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6088 host_to_target_sigset(p
, &oldset
);
6089 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6093 #ifdef TARGET_NR_sigpending
6094 case TARGET_NR_sigpending
:
6097 ret
= get_errno(sigpending(&set
));
6098 if (!is_error(ret
)) {
6099 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6101 host_to_target_old_sigset(p
, &set
);
6102 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6107 case TARGET_NR_rt_sigpending
:
6110 ret
= get_errno(sigpending(&set
));
6111 if (!is_error(ret
)) {
6112 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6114 host_to_target_sigset(p
, &set
);
6115 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6119 #ifdef TARGET_NR_sigsuspend
6120 case TARGET_NR_sigsuspend
:
6123 #if defined(TARGET_ALPHA)
6124 abi_ulong mask
= arg1
;
6125 target_to_host_old_sigset(&set
, &mask
);
6127 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6129 target_to_host_old_sigset(&set
, p
);
6130 unlock_user(p
, arg1
, 0);
6132 ret
= get_errno(sigsuspend(&set
));
6136 case TARGET_NR_rt_sigsuspend
:
6139 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6141 target_to_host_sigset(&set
, p
);
6142 unlock_user(p
, arg1
, 0);
6143 ret
= get_errno(sigsuspend(&set
));
6146 case TARGET_NR_rt_sigtimedwait
:
6149 struct timespec uts
, *puts
;
6152 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6154 target_to_host_sigset(&set
, p
);
6155 unlock_user(p
, arg1
, 0);
6158 target_to_host_timespec(puts
, arg3
);
6162 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6163 if (!is_error(ret
) && arg2
) {
6164 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
6166 host_to_target_siginfo(p
, &uinfo
);
6167 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
6171 case TARGET_NR_rt_sigqueueinfo
:
6174 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
6176 target_to_host_siginfo(&uinfo
, p
);
6177 unlock_user(p
, arg1
, 0);
6178 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
6181 #ifdef TARGET_NR_sigreturn
6182 case TARGET_NR_sigreturn
:
6183 /* NOTE: ret is eax, so not transcoding must be done */
6184 ret
= do_sigreturn(cpu_env
);
6187 case TARGET_NR_rt_sigreturn
:
6188 /* NOTE: ret is eax, so not transcoding must be done */
6189 ret
= do_rt_sigreturn(cpu_env
);
6191 case TARGET_NR_sethostname
:
6192 if (!(p
= lock_user_string(arg1
)))
6194 ret
= get_errno(sethostname(p
, arg2
));
6195 unlock_user(p
, arg1
, 0);
6197 case TARGET_NR_setrlimit
:
6199 int resource
= target_to_host_resource(arg1
);
6200 struct target_rlimit
*target_rlim
;
6202 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
6204 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
6205 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
6206 unlock_user_struct(target_rlim
, arg2
, 0);
6207 ret
= get_errno(setrlimit(resource
, &rlim
));
6210 case TARGET_NR_getrlimit
:
6212 int resource
= target_to_host_resource(arg1
);
6213 struct target_rlimit
*target_rlim
;
6216 ret
= get_errno(getrlimit(resource
, &rlim
));
6217 if (!is_error(ret
)) {
6218 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6220 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6221 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6222 unlock_user_struct(target_rlim
, arg2
, 1);
6226 case TARGET_NR_getrusage
:
6228 struct rusage rusage
;
6229 ret
= get_errno(getrusage(arg1
, &rusage
));
6230 if (!is_error(ret
)) {
6231 host_to_target_rusage(arg2
, &rusage
);
6235 case TARGET_NR_gettimeofday
:
6238 ret
= get_errno(gettimeofday(&tv
, NULL
));
6239 if (!is_error(ret
)) {
6240 if (copy_to_user_timeval(arg1
, &tv
))
6245 case TARGET_NR_settimeofday
:
6248 if (copy_from_user_timeval(&tv
, arg1
))
6250 ret
= get_errno(settimeofday(&tv
, NULL
));
6253 #if defined(TARGET_NR_select)
6254 case TARGET_NR_select
:
6255 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6256 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6259 struct target_sel_arg_struct
*sel
;
6260 abi_ulong inp
, outp
, exp
, tvp
;
6263 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
6265 nsel
= tswapal(sel
->n
);
6266 inp
= tswapal(sel
->inp
);
6267 outp
= tswapal(sel
->outp
);
6268 exp
= tswapal(sel
->exp
);
6269 tvp
= tswapal(sel
->tvp
);
6270 unlock_user_struct(sel
, arg1
, 0);
6271 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
6276 #ifdef TARGET_NR_pselect6
6277 case TARGET_NR_pselect6
:
6279 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
6280 fd_set rfds
, wfds
, efds
;
6281 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
6282 struct timespec ts
, *ts_ptr
;
6285 * The 6th arg is actually two args smashed together,
6286 * so we cannot use the C library.
6294 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
6295 target_sigset_t
*target_sigset
;
6303 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
6307 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
6311 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
6317 * This takes a timespec, and not a timeval, so we cannot
6318 * use the do_select() helper ...
6321 if (target_to_host_timespec(&ts
, ts_addr
)) {
6329 /* Extract the two packed args for the sigset */
6332 sig
.size
= _NSIG
/ 8;
6334 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
6338 arg_sigset
= tswapal(arg7
[0]);
6339 arg_sigsize
= tswapal(arg7
[1]);
6340 unlock_user(arg7
, arg6
, 0);
6344 if (arg_sigsize
!= sizeof(*target_sigset
)) {
6345 /* Like the kernel, we enforce correct size sigsets */
6346 ret
= -TARGET_EINVAL
;
6349 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
6350 sizeof(*target_sigset
), 1);
6351 if (!target_sigset
) {
6354 target_to_host_sigset(&set
, target_sigset
);
6355 unlock_user(target_sigset
, arg_sigset
, 0);
6363 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
6366 if (!is_error(ret
)) {
6367 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
6369 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
6371 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
6374 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
6380 case TARGET_NR_symlink
:
6383 p
= lock_user_string(arg1
);
6384 p2
= lock_user_string(arg2
);
6386 ret
= -TARGET_EFAULT
;
6388 ret
= get_errno(symlink(p
, p2
));
6389 unlock_user(p2
, arg2
, 0);
6390 unlock_user(p
, arg1
, 0);
6393 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6394 case TARGET_NR_symlinkat
:
6397 p
= lock_user_string(arg1
);
6398 p2
= lock_user_string(arg3
);
6400 ret
= -TARGET_EFAULT
;
6402 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
6403 unlock_user(p2
, arg3
, 0);
6404 unlock_user(p
, arg1
, 0);
6408 #ifdef TARGET_NR_oldlstat
6409 case TARGET_NR_oldlstat
:
6412 case TARGET_NR_readlink
:
6415 p
= lock_user_string(arg1
);
6416 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
6418 ret
= -TARGET_EFAULT
;
6420 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
6421 char real
[PATH_MAX
];
6422 temp
= realpath(exec_path
,real
);
6423 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
6424 snprintf((char *)p2
, arg3
, "%s", real
);
6427 ret
= get_errno(readlink(path(p
), p2
, arg3
));
6429 unlock_user(p2
, arg2
, ret
);
6430 unlock_user(p
, arg1
, 0);
6433 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6434 case TARGET_NR_readlinkat
:
6437 p
= lock_user_string(arg2
);
6438 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
6440 ret
= -TARGET_EFAULT
;
6442 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
6443 unlock_user(p2
, arg3
, ret
);
6444 unlock_user(p
, arg2
, 0);
6448 #ifdef TARGET_NR_uselib
6449 case TARGET_NR_uselib
:
6452 #ifdef TARGET_NR_swapon
6453 case TARGET_NR_swapon
:
6454 if (!(p
= lock_user_string(arg1
)))
6456 ret
= get_errno(swapon(p
, arg2
));
6457 unlock_user(p
, arg1
, 0);
6460 case TARGET_NR_reboot
:
6461 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
6462 /* arg4 must be ignored in all other cases */
6463 p
= lock_user_string(arg4
);
6467 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
6468 unlock_user(p
, arg4
, 0);
6470 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
6473 #ifdef TARGET_NR_readdir
6474 case TARGET_NR_readdir
:
6477 #ifdef TARGET_NR_mmap
6478 case TARGET_NR_mmap
:
6479 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6480 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6481 || defined(TARGET_S390X)
6484 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
6485 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
6493 unlock_user(v
, arg1
, 0);
6494 ret
= get_errno(target_mmap(v1
, v2
, v3
,
6495 target_to_host_bitmask(v4
, mmap_flags_tbl
),
6499 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6500 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6506 #ifdef TARGET_NR_mmap2
6507 case TARGET_NR_mmap2
:
6509 #define MMAP_SHIFT 12
6511 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6512 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6514 arg6
<< MMAP_SHIFT
));
6517 case TARGET_NR_munmap
:
6518 ret
= get_errno(target_munmap(arg1
, arg2
));
6520 case TARGET_NR_mprotect
:
6522 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
6523 /* Special hack to detect libc making the stack executable. */
6524 if ((arg3
& PROT_GROWSDOWN
)
6525 && arg1
>= ts
->info
->stack_limit
6526 && arg1
<= ts
->info
->start_stack
) {
6527 arg3
&= ~PROT_GROWSDOWN
;
6528 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
6529 arg1
= ts
->info
->stack_limit
;
6532 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
6534 #ifdef TARGET_NR_mremap
6535 case TARGET_NR_mremap
:
6536 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
6539 /* ??? msync/mlock/munlock are broken for softmmu. */
6540 #ifdef TARGET_NR_msync
6541 case TARGET_NR_msync
:
6542 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
6545 #ifdef TARGET_NR_mlock
6546 case TARGET_NR_mlock
:
6547 ret
= get_errno(mlock(g2h(arg1
), arg2
));
6550 #ifdef TARGET_NR_munlock
6551 case TARGET_NR_munlock
:
6552 ret
= get_errno(munlock(g2h(arg1
), arg2
));
6555 #ifdef TARGET_NR_mlockall
6556 case TARGET_NR_mlockall
:
6557 ret
= get_errno(mlockall(arg1
));
6560 #ifdef TARGET_NR_munlockall
6561 case TARGET_NR_munlockall
:
6562 ret
= get_errno(munlockall());
6565 case TARGET_NR_truncate
:
6566 if (!(p
= lock_user_string(arg1
)))
6568 ret
= get_errno(truncate(p
, arg2
));
6569 unlock_user(p
, arg1
, 0);
6571 case TARGET_NR_ftruncate
:
6572 ret
= get_errno(ftruncate(arg1
, arg2
));
6574 case TARGET_NR_fchmod
:
6575 ret
= get_errno(fchmod(arg1
, arg2
));
6577 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6578 case TARGET_NR_fchmodat
:
6579 if (!(p
= lock_user_string(arg2
)))
6581 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
6582 unlock_user(p
, arg2
, 0);
6585 case TARGET_NR_getpriority
:
6586 /* Note that negative values are valid for getpriority, so we must
6587 differentiate based on errno settings. */
6589 ret
= getpriority(arg1
, arg2
);
6590 if (ret
== -1 && errno
!= 0) {
6591 ret
= -host_to_target_errno(errno
);
6595 /* Return value is the unbiased priority. Signal no error. */
6596 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
6598 /* Return value is a biased priority to avoid negative numbers. */
6602 case TARGET_NR_setpriority
:
6603 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
6605 #ifdef TARGET_NR_profil
6606 case TARGET_NR_profil
:
6609 case TARGET_NR_statfs
:
6610 if (!(p
= lock_user_string(arg1
)))
6612 ret
= get_errno(statfs(path(p
), &stfs
));
6613 unlock_user(p
, arg1
, 0);
6615 if (!is_error(ret
)) {
6616 struct target_statfs
*target_stfs
;
6618 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
6620 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6621 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6622 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6623 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6624 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6625 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6626 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6627 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6628 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6629 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6630 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6631 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6632 unlock_user_struct(target_stfs
, arg2
, 1);
6635 case TARGET_NR_fstatfs
:
6636 ret
= get_errno(fstatfs(arg1
, &stfs
));
6637 goto convert_statfs
;
6638 #ifdef TARGET_NR_statfs64
6639 case TARGET_NR_statfs64
:
6640 if (!(p
= lock_user_string(arg1
)))
6642 ret
= get_errno(statfs(path(p
), &stfs
));
6643 unlock_user(p
, arg1
, 0);
6645 if (!is_error(ret
)) {
6646 struct target_statfs64
*target_stfs
;
6648 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
6650 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6651 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6652 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6653 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6654 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6655 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6656 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6657 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6658 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6659 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6660 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6661 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6662 unlock_user_struct(target_stfs
, arg3
, 1);
6665 case TARGET_NR_fstatfs64
:
6666 ret
= get_errno(fstatfs(arg1
, &stfs
));
6667 goto convert_statfs64
;
6669 #ifdef TARGET_NR_ioperm
6670 case TARGET_NR_ioperm
:
6673 #ifdef TARGET_NR_socketcall
6674 case TARGET_NR_socketcall
:
6675 ret
= do_socketcall(arg1
, arg2
);
6678 #ifdef TARGET_NR_accept
6679 case TARGET_NR_accept
:
6680 ret
= do_accept(arg1
, arg2
, arg3
);
6683 #ifdef TARGET_NR_bind
6684 case TARGET_NR_bind
:
6685 ret
= do_bind(arg1
, arg2
, arg3
);
6688 #ifdef TARGET_NR_connect
6689 case TARGET_NR_connect
:
6690 ret
= do_connect(arg1
, arg2
, arg3
);
6693 #ifdef TARGET_NR_getpeername
6694 case TARGET_NR_getpeername
:
6695 ret
= do_getpeername(arg1
, arg2
, arg3
);
6698 #ifdef TARGET_NR_getsockname
6699 case TARGET_NR_getsockname
:
6700 ret
= do_getsockname(arg1
, arg2
, arg3
);
6703 #ifdef TARGET_NR_getsockopt
6704 case TARGET_NR_getsockopt
:
6705 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
6708 #ifdef TARGET_NR_listen
6709 case TARGET_NR_listen
:
6710 ret
= get_errno(listen(arg1
, arg2
));
6713 #ifdef TARGET_NR_recv
6714 case TARGET_NR_recv
:
6715 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
6718 #ifdef TARGET_NR_recvfrom
6719 case TARGET_NR_recvfrom
:
6720 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6723 #ifdef TARGET_NR_recvmsg
6724 case TARGET_NR_recvmsg
:
6725 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
6728 #ifdef TARGET_NR_send
6729 case TARGET_NR_send
:
6730 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
6733 #ifdef TARGET_NR_sendmsg
6734 case TARGET_NR_sendmsg
:
6735 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
6738 #ifdef TARGET_NR_sendto
6739 case TARGET_NR_sendto
:
6740 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6743 #ifdef TARGET_NR_shutdown
6744 case TARGET_NR_shutdown
:
6745 ret
= get_errno(shutdown(arg1
, arg2
));
6748 #ifdef TARGET_NR_socket
6749 case TARGET_NR_socket
:
6750 ret
= do_socket(arg1
, arg2
, arg3
);
6753 #ifdef TARGET_NR_socketpair
6754 case TARGET_NR_socketpair
:
6755 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
6758 #ifdef TARGET_NR_setsockopt
6759 case TARGET_NR_setsockopt
:
6760 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
6764 case TARGET_NR_syslog
:
6765 if (!(p
= lock_user_string(arg2
)))
6767 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
6768 unlock_user(p
, arg2
, 0);
6771 case TARGET_NR_setitimer
:
6773 struct itimerval value
, ovalue
, *pvalue
;
6777 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
6778 || copy_from_user_timeval(&pvalue
->it_value
,
6779 arg2
+ sizeof(struct target_timeval
)))
6784 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
6785 if (!is_error(ret
) && arg3
) {
6786 if (copy_to_user_timeval(arg3
,
6787 &ovalue
.it_interval
)
6788 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
6794 case TARGET_NR_getitimer
:
6796 struct itimerval value
;
6798 ret
= get_errno(getitimer(arg1
, &value
));
6799 if (!is_error(ret
) && arg2
) {
6800 if (copy_to_user_timeval(arg2
,
6802 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
6808 case TARGET_NR_stat
:
6809 if (!(p
= lock_user_string(arg1
)))
6811 ret
= get_errno(stat(path(p
), &st
));
6812 unlock_user(p
, arg1
, 0);
6814 case TARGET_NR_lstat
:
6815 if (!(p
= lock_user_string(arg1
)))
6817 ret
= get_errno(lstat(path(p
), &st
));
6818 unlock_user(p
, arg1
, 0);
6820 case TARGET_NR_fstat
:
6822 ret
= get_errno(fstat(arg1
, &st
));
6824 if (!is_error(ret
)) {
6825 struct target_stat
*target_st
;
6827 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
6829 memset(target_st
, 0, sizeof(*target_st
));
6830 __put_user(st
.st_dev
, &target_st
->st_dev
);
6831 __put_user(st
.st_ino
, &target_st
->st_ino
);
6832 __put_user(st
.st_mode
, &target_st
->st_mode
);
6833 __put_user(st
.st_uid
, &target_st
->st_uid
);
6834 __put_user(st
.st_gid
, &target_st
->st_gid
);
6835 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
6836 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
6837 __put_user(st
.st_size
, &target_st
->st_size
);
6838 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
6839 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
6840 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
6841 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
6842 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
6843 unlock_user_struct(target_st
, arg2
, 1);
6847 #ifdef TARGET_NR_olduname
6848 case TARGET_NR_olduname
:
6851 #ifdef TARGET_NR_iopl
6852 case TARGET_NR_iopl
:
6855 case TARGET_NR_vhangup
:
6856 ret
= get_errno(vhangup());
6858 #ifdef TARGET_NR_idle
6859 case TARGET_NR_idle
:
6862 #ifdef TARGET_NR_syscall
6863 case TARGET_NR_syscall
:
6864 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
6865 arg6
, arg7
, arg8
, 0);
6868 case TARGET_NR_wait4
:
6871 abi_long status_ptr
= arg2
;
6872 struct rusage rusage
, *rusage_ptr
;
6873 abi_ulong target_rusage
= arg4
;
6875 rusage_ptr
= &rusage
;
6878 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
6879 if (!is_error(ret
)) {
6880 if (status_ptr
&& ret
) {
6881 status
= host_to_target_waitstatus(status
);
6882 if (put_user_s32(status
, status_ptr
))
6886 host_to_target_rusage(target_rusage
, &rusage
);
6890 #ifdef TARGET_NR_swapoff
6891 case TARGET_NR_swapoff
:
6892 if (!(p
= lock_user_string(arg1
)))
6894 ret
= get_errno(swapoff(p
));
6895 unlock_user(p
, arg1
, 0);
6898 case TARGET_NR_sysinfo
:
6900 struct target_sysinfo
*target_value
;
6901 struct sysinfo value
;
6902 ret
= get_errno(sysinfo(&value
));
6903 if (!is_error(ret
) && arg1
)
6905 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
6907 __put_user(value
.uptime
, &target_value
->uptime
);
6908 __put_user(value
.loads
[0], &target_value
->loads
[0]);
6909 __put_user(value
.loads
[1], &target_value
->loads
[1]);
6910 __put_user(value
.loads
[2], &target_value
->loads
[2]);
6911 __put_user(value
.totalram
, &target_value
->totalram
);
6912 __put_user(value
.freeram
, &target_value
->freeram
);
6913 __put_user(value
.sharedram
, &target_value
->sharedram
);
6914 __put_user(value
.bufferram
, &target_value
->bufferram
);
6915 __put_user(value
.totalswap
, &target_value
->totalswap
);
6916 __put_user(value
.freeswap
, &target_value
->freeswap
);
6917 __put_user(value
.procs
, &target_value
->procs
);
6918 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
6919 __put_user(value
.freehigh
, &target_value
->freehigh
);
6920 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
6921 unlock_user_struct(target_value
, arg1
, 1);
6925 #ifdef TARGET_NR_ipc
6927 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6930 #ifdef TARGET_NR_semget
6931 case TARGET_NR_semget
:
6932 ret
= get_errno(semget(arg1
, arg2
, arg3
));
6935 #ifdef TARGET_NR_semop
6936 case TARGET_NR_semop
:
6937 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
6940 #ifdef TARGET_NR_semctl
6941 case TARGET_NR_semctl
:
6942 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
6945 #ifdef TARGET_NR_msgctl
6946 case TARGET_NR_msgctl
:
6947 ret
= do_msgctl(arg1
, arg2
, arg3
);
6950 #ifdef TARGET_NR_msgget
6951 case TARGET_NR_msgget
:
6952 ret
= get_errno(msgget(arg1
, arg2
));
6955 #ifdef TARGET_NR_msgrcv
6956 case TARGET_NR_msgrcv
:
6957 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
6960 #ifdef TARGET_NR_msgsnd
6961 case TARGET_NR_msgsnd
:
6962 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
6965 #ifdef TARGET_NR_shmget
6966 case TARGET_NR_shmget
:
6967 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
6970 #ifdef TARGET_NR_shmctl
6971 case TARGET_NR_shmctl
:
6972 ret
= do_shmctl(arg1
, arg2
, arg3
);
6975 #ifdef TARGET_NR_shmat
6976 case TARGET_NR_shmat
:
6977 ret
= do_shmat(arg1
, arg2
, arg3
);
6980 #ifdef TARGET_NR_shmdt
6981 case TARGET_NR_shmdt
:
6982 ret
= do_shmdt(arg1
);
6985 case TARGET_NR_fsync
:
6986 ret
= get_errno(fsync(arg1
));
6988 case TARGET_NR_clone
:
6989 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6990 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
6991 #elif defined(TARGET_CRIS)
6992 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
6993 #elif defined(TARGET_MICROBLAZE)
6994 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
6995 #elif defined(TARGET_S390X)
6996 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
6998 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
7001 #ifdef __NR_exit_group
7002 /* new thread calls */
7003 case TARGET_NR_exit_group
:
7007 gdb_exit(cpu_env
, arg1
);
7008 ret
= get_errno(exit_group(arg1
));
7011 case TARGET_NR_setdomainname
:
7012 if (!(p
= lock_user_string(arg1
)))
7014 ret
= get_errno(setdomainname(p
, arg2
));
7015 unlock_user(p
, arg1
, 0);
7017 case TARGET_NR_uname
:
7018 /* no need to transcode because we use the linux syscall */
7020 struct new_utsname
* buf
;
7022 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
7024 ret
= get_errno(sys_uname(buf
));
7025 if (!is_error(ret
)) {
7026 /* Overrite the native machine name with whatever is being
7028 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
7029 /* Allow the user to override the reported release. */
7030 if (qemu_uname_release
&& *qemu_uname_release
)
7031 strcpy (buf
->release
, qemu_uname_release
);
7033 unlock_user_struct(buf
, arg1
, 1);
7037 case TARGET_NR_modify_ldt
:
7038 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
7040 #if !defined(TARGET_X86_64)
7041 case TARGET_NR_vm86old
:
7043 case TARGET_NR_vm86
:
7044 ret
= do_vm86(cpu_env
, arg1
, arg2
);
7048 case TARGET_NR_adjtimex
:
7050 #ifdef TARGET_NR_create_module
7051 case TARGET_NR_create_module
:
7053 case TARGET_NR_init_module
:
7054 case TARGET_NR_delete_module
:
7055 #ifdef TARGET_NR_get_kernel_syms
7056 case TARGET_NR_get_kernel_syms
:
7059 case TARGET_NR_quotactl
:
7061 case TARGET_NR_getpgid
:
7062 ret
= get_errno(getpgid(arg1
));
7064 case TARGET_NR_fchdir
:
7065 ret
= get_errno(fchdir(arg1
));
7067 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7068 case TARGET_NR_bdflush
:
7071 #ifdef TARGET_NR_sysfs
7072 case TARGET_NR_sysfs
:
7075 case TARGET_NR_personality
:
7076 ret
= get_errno(personality(arg1
));
7078 #ifdef TARGET_NR_afs_syscall
7079 case TARGET_NR_afs_syscall
:
7082 #ifdef TARGET_NR__llseek /* Not on alpha */
7083 case TARGET_NR__llseek
:
7086 #if !defined(__NR_llseek)
7087 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
7089 ret
= get_errno(res
);
7094 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
7096 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
7102 case TARGET_NR_getdents
:
7103 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7105 struct target_dirent
*target_dirp
;
7106 struct linux_dirent
*dirp
;
7107 abi_long count
= arg3
;
7109 dirp
= malloc(count
);
7111 ret
= -TARGET_ENOMEM
;
7115 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7116 if (!is_error(ret
)) {
7117 struct linux_dirent
*de
;
7118 struct target_dirent
*tde
;
7120 int reclen
, treclen
;
7121 int count1
, tnamelen
;
7125 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7129 reclen
= de
->d_reclen
;
7130 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
7131 assert(tnamelen
>= 0);
7132 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
7133 assert(count1
+ treclen
<= count
);
7134 tde
->d_reclen
= tswap16(treclen
);
7135 tde
->d_ino
= tswapal(de
->d_ino
);
7136 tde
->d_off
= tswapal(de
->d_off
);
7137 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
7138 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7140 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7144 unlock_user(target_dirp
, arg2
, ret
);
7150 struct linux_dirent
*dirp
;
7151 abi_long count
= arg3
;
7153 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7155 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7156 if (!is_error(ret
)) {
7157 struct linux_dirent
*de
;
7162 reclen
= de
->d_reclen
;
7165 de
->d_reclen
= tswap16(reclen
);
7166 tswapls(&de
->d_ino
);
7167 tswapls(&de
->d_off
);
7168 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7172 unlock_user(dirp
, arg2
, ret
);
7176 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7177 case TARGET_NR_getdents64
:
7179 struct linux_dirent64
*dirp
;
7180 abi_long count
= arg3
;
7181 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7183 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7184 if (!is_error(ret
)) {
7185 struct linux_dirent64
*de
;
7190 reclen
= de
->d_reclen
;
7193 de
->d_reclen
= tswap16(reclen
);
7194 tswap64s((uint64_t *)&de
->d_ino
);
7195 tswap64s((uint64_t *)&de
->d_off
);
7196 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7200 unlock_user(dirp
, arg2
, ret
);
7203 #endif /* TARGET_NR_getdents64 */
7204 #if defined(TARGET_NR__newselect)
7205 case TARGET_NR__newselect
:
7206 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7209 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7210 # ifdef TARGET_NR_poll
7211 case TARGET_NR_poll
:
7213 # ifdef TARGET_NR_ppoll
7214 case TARGET_NR_ppoll
:
7217 struct target_pollfd
*target_pfd
;
7218 unsigned int nfds
= arg2
;
7223 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
7227 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
7228 for(i
= 0; i
< nfds
; i
++) {
7229 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
7230 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
7233 # ifdef TARGET_NR_ppoll
7234 if (num
== TARGET_NR_ppoll
) {
7235 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
7236 target_sigset_t
*target_set
;
7237 sigset_t _set
, *set
= &_set
;
7240 if (target_to_host_timespec(timeout_ts
, arg3
)) {
7241 unlock_user(target_pfd
, arg1
, 0);
7249 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
7251 unlock_user(target_pfd
, arg1
, 0);
7254 target_to_host_sigset(set
, target_set
);
7259 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
7261 if (!is_error(ret
) && arg3
) {
7262 host_to_target_timespec(arg3
, timeout_ts
);
7265 unlock_user(target_set
, arg4
, 0);
7269 ret
= get_errno(poll(pfd
, nfds
, timeout
));
7271 if (!is_error(ret
)) {
7272 for(i
= 0; i
< nfds
; i
++) {
7273 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
7276 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
7280 case TARGET_NR_flock
:
7281 /* NOTE: the flock constant seems to be the same for every
7283 ret
= get_errno(flock(arg1
, arg2
));
7285 case TARGET_NR_readv
:
7287 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
7289 ret
= get_errno(readv(arg1
, vec
, arg3
));
7290 unlock_iovec(vec
, arg2
, arg3
, 1);
7292 ret
= -host_to_target_errno(errno
);
7296 case TARGET_NR_writev
:
7298 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
7300 ret
= get_errno(writev(arg1
, vec
, arg3
));
7301 unlock_iovec(vec
, arg2
, arg3
, 0);
7303 ret
= -host_to_target_errno(errno
);
7307 case TARGET_NR_getsid
:
7308 ret
= get_errno(getsid(arg1
));
7310 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7311 case TARGET_NR_fdatasync
:
7312 ret
= get_errno(fdatasync(arg1
));
7315 case TARGET_NR__sysctl
:
7316 /* We don't implement this, but ENOTDIR is always a safe
7318 ret
= -TARGET_ENOTDIR
;
7320 case TARGET_NR_sched_getaffinity
:
7322 unsigned int mask_size
;
7323 unsigned long *mask
;
7326 * sched_getaffinity needs multiples of ulong, so need to take
7327 * care of mismatches between target ulong and host ulong sizes.
7329 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7330 ret
= -TARGET_EINVAL
;
7333 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7335 mask
= alloca(mask_size
);
7336 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
7338 if (!is_error(ret
)) {
7339 if (copy_to_user(arg3
, mask
, ret
)) {
7345 case TARGET_NR_sched_setaffinity
:
7347 unsigned int mask_size
;
7348 unsigned long *mask
;
7351 * sched_setaffinity needs multiples of ulong, so need to take
7352 * care of mismatches between target ulong and host ulong sizes.
7354 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7355 ret
= -TARGET_EINVAL
;
7358 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7360 mask
= alloca(mask_size
);
7361 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
7364 memcpy(mask
, p
, arg2
);
7365 unlock_user_struct(p
, arg2
, 0);
7367 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
7370 case TARGET_NR_sched_setparam
:
7372 struct sched_param
*target_schp
;
7373 struct sched_param schp
;
7375 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
7377 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7378 unlock_user_struct(target_schp
, arg2
, 0);
7379 ret
= get_errno(sched_setparam(arg1
, &schp
));
7382 case TARGET_NR_sched_getparam
:
7384 struct sched_param
*target_schp
;
7385 struct sched_param schp
;
7386 ret
= get_errno(sched_getparam(arg1
, &schp
));
7387 if (!is_error(ret
)) {
7388 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
7390 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
7391 unlock_user_struct(target_schp
, arg2
, 1);
7395 case TARGET_NR_sched_setscheduler
:
7397 struct sched_param
*target_schp
;
7398 struct sched_param schp
;
7399 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
7401 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7402 unlock_user_struct(target_schp
, arg3
, 0);
7403 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
7406 case TARGET_NR_sched_getscheduler
:
7407 ret
= get_errno(sched_getscheduler(arg1
));
7409 case TARGET_NR_sched_yield
:
7410 ret
= get_errno(sched_yield());
7412 case TARGET_NR_sched_get_priority_max
:
7413 ret
= get_errno(sched_get_priority_max(arg1
));
7415 case TARGET_NR_sched_get_priority_min
:
7416 ret
= get_errno(sched_get_priority_min(arg1
));
7418 case TARGET_NR_sched_rr_get_interval
:
7421 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
7422 if (!is_error(ret
)) {
7423 host_to_target_timespec(arg2
, &ts
);
7427 case TARGET_NR_nanosleep
:
7429 struct timespec req
, rem
;
7430 target_to_host_timespec(&req
, arg1
);
7431 ret
= get_errno(nanosleep(&req
, &rem
));
7432 if (is_error(ret
) && arg2
) {
7433 host_to_target_timespec(arg2
, &rem
);
7437 #ifdef TARGET_NR_query_module
7438 case TARGET_NR_query_module
:
7441 #ifdef TARGET_NR_nfsservctl
7442 case TARGET_NR_nfsservctl
:
7445 case TARGET_NR_prctl
:
7447 case PR_GET_PDEATHSIG
:
7450 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
7451 if (!is_error(ret
) && arg2
7452 && put_user_ual(deathsig
, arg2
)) {
7460 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
7464 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7466 unlock_user(name
, arg2
, 16);
7471 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
7475 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7477 unlock_user(name
, arg2
, 0);
7482 /* Most prctl options have no pointer arguments */
7483 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
7487 #ifdef TARGET_NR_arch_prctl
7488 case TARGET_NR_arch_prctl
:
7489 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7490 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
7496 #ifdef TARGET_NR_pread64
7497 case TARGET_NR_pread64
:
7498 if (regpairs_aligned(cpu_env
)) {
7502 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7504 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7505 unlock_user(p
, arg2
, ret
);
7507 case TARGET_NR_pwrite64
:
7508 if (regpairs_aligned(cpu_env
)) {
7512 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7514 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7515 unlock_user(p
, arg2
, 0);
7518 case TARGET_NR_getcwd
:
7519 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
7521 ret
= get_errno(sys_getcwd1(p
, arg2
));
7522 unlock_user(p
, arg1
, ret
);
7524 case TARGET_NR_capget
:
7526 case TARGET_NR_capset
:
7528 case TARGET_NR_sigaltstack
:
7529 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7530 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7531 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7532 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
7538 #ifdef CONFIG_SENDFILE
7539 case TARGET_NR_sendfile
:
7544 ret
= get_user_sal(off
, arg3
);
7545 if (is_error(ret
)) {
7550 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
7551 if (!is_error(ret
) && arg3
) {
7552 abi_long ret2
= put_user_sal(off
, arg3
);
7553 if (is_error(ret2
)) {
7559 #ifdef TARGET_NR_sendfile64
7560 case TARGET_NR_sendfile64
:
7565 ret
= get_user_s64(off
, arg3
);
7566 if (is_error(ret
)) {
7571 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
7572 if (!is_error(ret
) && arg3
) {
7573 abi_long ret2
= put_user_s64(off
, arg3
);
7574 if (is_error(ret2
)) {
7582 case TARGET_NR_sendfile
:
7583 #ifdef TARGET_NR_sendfile64:
7584 case TARGET_NR_sendfile64
:
7589 #ifdef TARGET_NR_getpmsg
7590 case TARGET_NR_getpmsg
:
7593 #ifdef TARGET_NR_putpmsg
7594 case TARGET_NR_putpmsg
:
7597 #ifdef TARGET_NR_vfork
7598 case TARGET_NR_vfork
:
7599 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
7603 #ifdef TARGET_NR_ugetrlimit
7604 case TARGET_NR_ugetrlimit
:
7607 int resource
= target_to_host_resource(arg1
);
7608 ret
= get_errno(getrlimit(resource
, &rlim
));
7609 if (!is_error(ret
)) {
7610 struct target_rlimit
*target_rlim
;
7611 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7613 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7614 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7615 unlock_user_struct(target_rlim
, arg2
, 1);
7620 #ifdef TARGET_NR_truncate64
7621 case TARGET_NR_truncate64
:
7622 if (!(p
= lock_user_string(arg1
)))
7624 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
7625 unlock_user(p
, arg1
, 0);
7628 #ifdef TARGET_NR_ftruncate64
7629 case TARGET_NR_ftruncate64
:
7630 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
7633 #ifdef TARGET_NR_stat64
7634 case TARGET_NR_stat64
:
7635 if (!(p
= lock_user_string(arg1
)))
7637 ret
= get_errno(stat(path(p
), &st
));
7638 unlock_user(p
, arg1
, 0);
7640 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7643 #ifdef TARGET_NR_lstat64
7644 case TARGET_NR_lstat64
:
7645 if (!(p
= lock_user_string(arg1
)))
7647 ret
= get_errno(lstat(path(p
), &st
));
7648 unlock_user(p
, arg1
, 0);
7650 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7653 #ifdef TARGET_NR_fstat64
7654 case TARGET_NR_fstat64
:
7655 ret
= get_errno(fstat(arg1
, &st
));
7657 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7660 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7661 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7662 #ifdef TARGET_NR_fstatat64
7663 case TARGET_NR_fstatat64
:
7665 #ifdef TARGET_NR_newfstatat
7666 case TARGET_NR_newfstatat
:
7668 if (!(p
= lock_user_string(arg2
)))
7670 #ifdef __NR_fstatat64
7671 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
7673 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
7676 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
7679 case TARGET_NR_lchown
:
7680 if (!(p
= lock_user_string(arg1
)))
7682 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7683 unlock_user(p
, arg1
, 0);
7685 #ifdef TARGET_NR_getuid
7686 case TARGET_NR_getuid
:
7687 ret
= get_errno(high2lowuid(getuid()));
7690 #ifdef TARGET_NR_getgid
7691 case TARGET_NR_getgid
:
7692 ret
= get_errno(high2lowgid(getgid()));
7695 #ifdef TARGET_NR_geteuid
7696 case TARGET_NR_geteuid
:
7697 ret
= get_errno(high2lowuid(geteuid()));
7700 #ifdef TARGET_NR_getegid
7701 case TARGET_NR_getegid
:
7702 ret
= get_errno(high2lowgid(getegid()));
7705 case TARGET_NR_setreuid
:
7706 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
7708 case TARGET_NR_setregid
:
7709 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
7711 case TARGET_NR_getgroups
:
7713 int gidsetsize
= arg1
;
7714 target_id
*target_grouplist
;
7718 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7719 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7720 if (gidsetsize
== 0)
7722 if (!is_error(ret
)) {
7723 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
7724 if (!target_grouplist
)
7726 for(i
= 0;i
< ret
; i
++)
7727 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
7728 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
7732 case TARGET_NR_setgroups
:
7734 int gidsetsize
= arg1
;
7735 target_id
*target_grouplist
;
7736 gid_t
*grouplist
= NULL
;
7739 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7740 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
7741 if (!target_grouplist
) {
7742 ret
= -TARGET_EFAULT
;
7745 for (i
= 0; i
< gidsetsize
; i
++) {
7746 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
7748 unlock_user(target_grouplist
, arg2
, 0);
7750 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7753 case TARGET_NR_fchown
:
7754 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
7756 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7757 case TARGET_NR_fchownat
:
7758 if (!(p
= lock_user_string(arg2
)))
7760 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
7761 unlock_user(p
, arg2
, 0);
7764 #ifdef TARGET_NR_setresuid
7765 case TARGET_NR_setresuid
:
7766 ret
= get_errno(setresuid(low2highuid(arg1
),
7768 low2highuid(arg3
)));
7771 #ifdef TARGET_NR_getresuid
7772 case TARGET_NR_getresuid
:
7774 uid_t ruid
, euid
, suid
;
7775 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7776 if (!is_error(ret
)) {
7777 if (put_user_u16(high2lowuid(ruid
), arg1
)
7778 || put_user_u16(high2lowuid(euid
), arg2
)
7779 || put_user_u16(high2lowuid(suid
), arg3
))
7785 #ifdef TARGET_NR_getresgid
7786 case TARGET_NR_setresgid
:
7787 ret
= get_errno(setresgid(low2highgid(arg1
),
7789 low2highgid(arg3
)));
7792 #ifdef TARGET_NR_getresgid
7793 case TARGET_NR_getresgid
:
7795 gid_t rgid
, egid
, sgid
;
7796 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7797 if (!is_error(ret
)) {
7798 if (put_user_u16(high2lowgid(rgid
), arg1
)
7799 || put_user_u16(high2lowgid(egid
), arg2
)
7800 || put_user_u16(high2lowgid(sgid
), arg3
))
7806 case TARGET_NR_chown
:
7807 if (!(p
= lock_user_string(arg1
)))
7809 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7810 unlock_user(p
, arg1
, 0);
7812 case TARGET_NR_setuid
:
7813 ret
= get_errno(setuid(low2highuid(arg1
)));
7815 case TARGET_NR_setgid
:
7816 ret
= get_errno(setgid(low2highgid(arg1
)));
7818 case TARGET_NR_setfsuid
:
7819 ret
= get_errno(setfsuid(arg1
));
7821 case TARGET_NR_setfsgid
:
7822 ret
= get_errno(setfsgid(arg1
));
7825 #ifdef TARGET_NR_lchown32
7826 case TARGET_NR_lchown32
:
7827 if (!(p
= lock_user_string(arg1
)))
7829 ret
= get_errno(lchown(p
, arg2
, arg3
));
7830 unlock_user(p
, arg1
, 0);
7833 #ifdef TARGET_NR_getuid32
7834 case TARGET_NR_getuid32
:
7835 ret
= get_errno(getuid());
7839 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7840 /* Alpha specific */
7841 case TARGET_NR_getxuid
:
7845 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
7847 ret
= get_errno(getuid());
7850 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7851 /* Alpha specific */
7852 case TARGET_NR_getxgid
:
7856 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
7858 ret
= get_errno(getgid());
7861 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7862 /* Alpha specific */
7863 case TARGET_NR_osf_getsysinfo
:
7864 ret
= -TARGET_EOPNOTSUPP
;
7866 case TARGET_GSI_IEEE_FP_CONTROL
:
7868 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7870 /* Copied from linux ieee_fpcr_to_swcr. */
7871 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
7872 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
7873 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
7874 | SWCR_TRAP_ENABLE_DZE
7875 | SWCR_TRAP_ENABLE_OVF
);
7876 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
7877 | SWCR_TRAP_ENABLE_INE
);
7878 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
7879 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
7881 if (put_user_u64 (swcr
, arg2
))
7887 /* case GSI_IEEE_STATE_AT_SIGNAL:
7888 -- Not implemented in linux kernel.
7890 -- Retrieves current unaligned access state; not much used.
7892 -- Retrieves implver information; surely not used.
7894 -- Grabs a copy of the HWRPB; surely not used.
7899 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7900 /* Alpha specific */
7901 case TARGET_NR_osf_setsysinfo
:
7902 ret
= -TARGET_EOPNOTSUPP
;
7904 case TARGET_SSI_IEEE_FP_CONTROL
:
7906 uint64_t swcr
, fpcr
, orig_fpcr
;
7908 if (get_user_u64 (swcr
, arg2
)) {
7911 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
7912 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
7914 /* Copied from linux ieee_swcr_to_fpcr. */
7915 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
7916 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
7917 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
7918 | SWCR_TRAP_ENABLE_DZE
7919 | SWCR_TRAP_ENABLE_OVF
)) << 48;
7920 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
7921 | SWCR_TRAP_ENABLE_INE
)) << 57;
7922 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
7923 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
7925 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
7930 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
7932 uint64_t exc
, fpcr
, orig_fpcr
;
7935 if (get_user_u64(exc
, arg2
)) {
7939 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
7941 /* We only add to the exception status here. */
7942 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
7944 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
7947 /* Old exceptions are not signaled. */
7948 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
7950 /* If any exceptions set by this call,
7951 and are unmasked, send a signal. */
7953 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
7954 si_code
= TARGET_FPE_FLTRES
;
7956 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
7957 si_code
= TARGET_FPE_FLTUND
;
7959 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
7960 si_code
= TARGET_FPE_FLTOVF
;
7962 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
7963 si_code
= TARGET_FPE_FLTDIV
;
7965 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
7966 si_code
= TARGET_FPE_FLTINV
;
7969 target_siginfo_t info
;
7970 info
.si_signo
= SIGFPE
;
7972 info
.si_code
= si_code
;
7973 info
._sifields
._sigfault
._addr
7974 = ((CPUArchState
*)cpu_env
)->pc
;
7975 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
7980 /* case SSI_NVPAIRS:
7981 -- Used with SSIN_UACPROC to enable unaligned accesses.
7982 case SSI_IEEE_STATE_AT_SIGNAL:
7983 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7984 -- Not implemented in linux kernel
7989 #ifdef TARGET_NR_osf_sigprocmask
7990 /* Alpha specific. */
7991 case TARGET_NR_osf_sigprocmask
:
7995 sigset_t set
, oldset
;
7998 case TARGET_SIG_BLOCK
:
8001 case TARGET_SIG_UNBLOCK
:
8004 case TARGET_SIG_SETMASK
:
8008 ret
= -TARGET_EINVAL
;
8012 target_to_host_old_sigset(&set
, &mask
);
8013 sigprocmask(how
, &set
, &oldset
);
8014 host_to_target_old_sigset(&mask
, &oldset
);
8020 #ifdef TARGET_NR_getgid32
8021 case TARGET_NR_getgid32
:
8022 ret
= get_errno(getgid());
8025 #ifdef TARGET_NR_geteuid32
8026 case TARGET_NR_geteuid32
:
8027 ret
= get_errno(geteuid());
8030 #ifdef TARGET_NR_getegid32
8031 case TARGET_NR_getegid32
:
8032 ret
= get_errno(getegid());
8035 #ifdef TARGET_NR_setreuid32
8036 case TARGET_NR_setreuid32
:
8037 ret
= get_errno(setreuid(arg1
, arg2
));
8040 #ifdef TARGET_NR_setregid32
8041 case TARGET_NR_setregid32
:
8042 ret
= get_errno(setregid(arg1
, arg2
));
8045 #ifdef TARGET_NR_getgroups32
8046 case TARGET_NR_getgroups32
:
8048 int gidsetsize
= arg1
;
8049 uint32_t *target_grouplist
;
8053 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8054 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
8055 if (gidsetsize
== 0)
8057 if (!is_error(ret
)) {
8058 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
8059 if (!target_grouplist
) {
8060 ret
= -TARGET_EFAULT
;
8063 for(i
= 0;i
< ret
; i
++)
8064 target_grouplist
[i
] = tswap32(grouplist
[i
]);
8065 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
8070 #ifdef TARGET_NR_setgroups32
8071 case TARGET_NR_setgroups32
:
8073 int gidsetsize
= arg1
;
8074 uint32_t *target_grouplist
;
8078 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
8079 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
8080 if (!target_grouplist
) {
8081 ret
= -TARGET_EFAULT
;
8084 for(i
= 0;i
< gidsetsize
; i
++)
8085 grouplist
[i
] = tswap32(target_grouplist
[i
]);
8086 unlock_user(target_grouplist
, arg2
, 0);
8087 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
8091 #ifdef TARGET_NR_fchown32
8092 case TARGET_NR_fchown32
:
8093 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
8096 #ifdef TARGET_NR_setresuid32
8097 case TARGET_NR_setresuid32
:
8098 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
8101 #ifdef TARGET_NR_getresuid32
8102 case TARGET_NR_getresuid32
:
8104 uid_t ruid
, euid
, suid
;
8105 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8106 if (!is_error(ret
)) {
8107 if (put_user_u32(ruid
, arg1
)
8108 || put_user_u32(euid
, arg2
)
8109 || put_user_u32(suid
, arg3
))
8115 #ifdef TARGET_NR_setresgid32
8116 case TARGET_NR_setresgid32
:
8117 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
8120 #ifdef TARGET_NR_getresgid32
8121 case TARGET_NR_getresgid32
:
8123 gid_t rgid
, egid
, sgid
;
8124 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8125 if (!is_error(ret
)) {
8126 if (put_user_u32(rgid
, arg1
)
8127 || put_user_u32(egid
, arg2
)
8128 || put_user_u32(sgid
, arg3
))
8134 #ifdef TARGET_NR_chown32
8135 case TARGET_NR_chown32
:
8136 if (!(p
= lock_user_string(arg1
)))
8138 ret
= get_errno(chown(p
, arg2
, arg3
));
8139 unlock_user(p
, arg1
, 0);
8142 #ifdef TARGET_NR_setuid32
8143 case TARGET_NR_setuid32
:
8144 ret
= get_errno(setuid(arg1
));
8147 #ifdef TARGET_NR_setgid32
8148 case TARGET_NR_setgid32
:
8149 ret
= get_errno(setgid(arg1
));
8152 #ifdef TARGET_NR_setfsuid32
8153 case TARGET_NR_setfsuid32
:
8154 ret
= get_errno(setfsuid(arg1
));
8157 #ifdef TARGET_NR_setfsgid32
8158 case TARGET_NR_setfsgid32
:
8159 ret
= get_errno(setfsgid(arg1
));
8163 case TARGET_NR_pivot_root
:
8165 #ifdef TARGET_NR_mincore
8166 case TARGET_NR_mincore
:
8169 ret
= -TARGET_EFAULT
;
8170 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
8172 if (!(p
= lock_user_string(arg3
)))
8174 ret
= get_errno(mincore(a
, arg2
, p
));
8175 unlock_user(p
, arg3
, ret
);
8177 unlock_user(a
, arg1
, 0);
8181 #ifdef TARGET_NR_arm_fadvise64_64
8182 case TARGET_NR_arm_fadvise64_64
:
8185 * arm_fadvise64_64 looks like fadvise64_64 but
8186 * with different argument order
8194 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8195 #ifdef TARGET_NR_fadvise64_64
8196 case TARGET_NR_fadvise64_64
:
8198 #ifdef TARGET_NR_fadvise64
8199 case TARGET_NR_fadvise64
:
8203 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
8204 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
8205 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
8206 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
8210 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
8213 #ifdef TARGET_NR_madvise
8214 case TARGET_NR_madvise
:
8215 /* A straight passthrough may not be safe because qemu sometimes
8216 turns private flie-backed mappings into anonymous mappings.
8217 This will break MADV_DONTNEED.
8218 This is a hint, so ignoring and returning success is ok. */
8222 #if TARGET_ABI_BITS == 32
8223 case TARGET_NR_fcntl64
:
8227 struct target_flock64
*target_fl
;
8229 struct target_eabi_flock64
*target_efl
;
8232 cmd
= target_to_host_fcntl_cmd(arg2
);
8233 if (cmd
== -TARGET_EINVAL
) {
8239 case TARGET_F_GETLK64
:
8241 if (((CPUARMState
*)cpu_env
)->eabi
) {
8242 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8244 fl
.l_type
= tswap16(target_efl
->l_type
);
8245 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8246 fl
.l_start
= tswap64(target_efl
->l_start
);
8247 fl
.l_len
= tswap64(target_efl
->l_len
);
8248 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8249 unlock_user_struct(target_efl
, arg3
, 0);
8253 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8255 fl
.l_type
= tswap16(target_fl
->l_type
);
8256 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8257 fl
.l_start
= tswap64(target_fl
->l_start
);
8258 fl
.l_len
= tswap64(target_fl
->l_len
);
8259 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8260 unlock_user_struct(target_fl
, arg3
, 0);
8262 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8265 if (((CPUARMState
*)cpu_env
)->eabi
) {
8266 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
8268 target_efl
->l_type
= tswap16(fl
.l_type
);
8269 target_efl
->l_whence
= tswap16(fl
.l_whence
);
8270 target_efl
->l_start
= tswap64(fl
.l_start
);
8271 target_efl
->l_len
= tswap64(fl
.l_len
);
8272 target_efl
->l_pid
= tswap32(fl
.l_pid
);
8273 unlock_user_struct(target_efl
, arg3
, 1);
8277 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
8279 target_fl
->l_type
= tswap16(fl
.l_type
);
8280 target_fl
->l_whence
= tswap16(fl
.l_whence
);
8281 target_fl
->l_start
= tswap64(fl
.l_start
);
8282 target_fl
->l_len
= tswap64(fl
.l_len
);
8283 target_fl
->l_pid
= tswap32(fl
.l_pid
);
8284 unlock_user_struct(target_fl
, arg3
, 1);
8289 case TARGET_F_SETLK64
:
8290 case TARGET_F_SETLKW64
:
8292 if (((CPUARMState
*)cpu_env
)->eabi
) {
8293 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8295 fl
.l_type
= tswap16(target_efl
->l_type
);
8296 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8297 fl
.l_start
= tswap64(target_efl
->l_start
);
8298 fl
.l_len
= tswap64(target_efl
->l_len
);
8299 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8300 unlock_user_struct(target_efl
, arg3
, 0);
8304 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8306 fl
.l_type
= tswap16(target_fl
->l_type
);
8307 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8308 fl
.l_start
= tswap64(target_fl
->l_start
);
8309 fl
.l_len
= tswap64(target_fl
->l_len
);
8310 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8311 unlock_user_struct(target_fl
, arg3
, 0);
8313 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8316 ret
= do_fcntl(arg1
, arg2
, arg3
);
8322 #ifdef TARGET_NR_cacheflush
8323 case TARGET_NR_cacheflush
:
8324 /* self-modifying code is handled automatically, so nothing needed */
8328 #ifdef TARGET_NR_security
8329 case TARGET_NR_security
:
8332 #ifdef TARGET_NR_getpagesize
8333 case TARGET_NR_getpagesize
:
8334 ret
= TARGET_PAGE_SIZE
;
8337 case TARGET_NR_gettid
:
8338 ret
= get_errno(gettid());
8340 #ifdef TARGET_NR_readahead
8341 case TARGET_NR_readahead
:
8342 #if TARGET_ABI_BITS == 32
8343 if (regpairs_aligned(cpu_env
)) {
8348 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
8350 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
8355 #ifdef TARGET_NR_setxattr
8356 case TARGET_NR_listxattr
:
8357 case TARGET_NR_llistxattr
:
8361 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8363 ret
= -TARGET_EFAULT
;
8367 p
= lock_user_string(arg1
);
8369 if (num
== TARGET_NR_listxattr
) {
8370 ret
= get_errno(listxattr(p
, b
, arg3
));
8372 ret
= get_errno(llistxattr(p
, b
, arg3
));
8375 ret
= -TARGET_EFAULT
;
8377 unlock_user(p
, arg1
, 0);
8378 unlock_user(b
, arg2
, arg3
);
8381 case TARGET_NR_flistxattr
:
8385 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8387 ret
= -TARGET_EFAULT
;
8391 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
8392 unlock_user(b
, arg2
, arg3
);
8395 case TARGET_NR_setxattr
:
8396 case TARGET_NR_lsetxattr
:
8398 void *p
, *n
, *v
= 0;
8400 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8402 ret
= -TARGET_EFAULT
;
8406 p
= lock_user_string(arg1
);
8407 n
= lock_user_string(arg2
);
8409 if (num
== TARGET_NR_setxattr
) {
8410 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
8412 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
8415 ret
= -TARGET_EFAULT
;
8417 unlock_user(p
, arg1
, 0);
8418 unlock_user(n
, arg2
, 0);
8419 unlock_user(v
, arg3
, 0);
8422 case TARGET_NR_fsetxattr
:
8426 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8428 ret
= -TARGET_EFAULT
;
8432 n
= lock_user_string(arg2
);
8434 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
8436 ret
= -TARGET_EFAULT
;
8438 unlock_user(n
, arg2
, 0);
8439 unlock_user(v
, arg3
, 0);
8442 case TARGET_NR_getxattr
:
8443 case TARGET_NR_lgetxattr
:
8445 void *p
, *n
, *v
= 0;
8447 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8449 ret
= -TARGET_EFAULT
;
8453 p
= lock_user_string(arg1
);
8454 n
= lock_user_string(arg2
);
8456 if (num
== TARGET_NR_getxattr
) {
8457 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
8459 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
8462 ret
= -TARGET_EFAULT
;
8464 unlock_user(p
, arg1
, 0);
8465 unlock_user(n
, arg2
, 0);
8466 unlock_user(v
, arg3
, arg4
);
8469 case TARGET_NR_fgetxattr
:
8473 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8475 ret
= -TARGET_EFAULT
;
8479 n
= lock_user_string(arg2
);
8481 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
8483 ret
= -TARGET_EFAULT
;
8485 unlock_user(n
, arg2
, 0);
8486 unlock_user(v
, arg3
, arg4
);
8489 case TARGET_NR_removexattr
:
8490 case TARGET_NR_lremovexattr
:
8493 p
= lock_user_string(arg1
);
8494 n
= lock_user_string(arg2
);
8496 if (num
== TARGET_NR_removexattr
) {
8497 ret
= get_errno(removexattr(p
, n
));
8499 ret
= get_errno(lremovexattr(p
, n
));
8502 ret
= -TARGET_EFAULT
;
8504 unlock_user(p
, arg1
, 0);
8505 unlock_user(n
, arg2
, 0);
8508 case TARGET_NR_fremovexattr
:
8511 n
= lock_user_string(arg2
);
8513 ret
= get_errno(fremovexattr(arg1
, n
));
8515 ret
= -TARGET_EFAULT
;
8517 unlock_user(n
, arg2
, 0);
8521 #endif /* CONFIG_ATTR */
8522 #ifdef TARGET_NR_set_thread_area
8523 case TARGET_NR_set_thread_area
:
8524 #if defined(TARGET_MIPS)
8525 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
8528 #elif defined(TARGET_CRIS)
8530 ret
= -TARGET_EINVAL
;
8532 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
8536 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8537 ret
= do_set_thread_area(cpu_env
, arg1
);
8540 goto unimplemented_nowarn
;
8543 #ifdef TARGET_NR_get_thread_area
8544 case TARGET_NR_get_thread_area
:
8545 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8546 ret
= do_get_thread_area(cpu_env
, arg1
);
8548 goto unimplemented_nowarn
;
8551 #ifdef TARGET_NR_getdomainname
8552 case TARGET_NR_getdomainname
:
8553 goto unimplemented_nowarn
;
8556 #ifdef TARGET_NR_clock_gettime
8557 case TARGET_NR_clock_gettime
:
8560 ret
= get_errno(clock_gettime(arg1
, &ts
));
8561 if (!is_error(ret
)) {
8562 host_to_target_timespec(arg2
, &ts
);
8567 #ifdef TARGET_NR_clock_getres
8568 case TARGET_NR_clock_getres
:
8571 ret
= get_errno(clock_getres(arg1
, &ts
));
8572 if (!is_error(ret
)) {
8573 host_to_target_timespec(arg2
, &ts
);
8578 #ifdef TARGET_NR_clock_nanosleep
8579 case TARGET_NR_clock_nanosleep
:
8582 target_to_host_timespec(&ts
, arg3
);
8583 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
8585 host_to_target_timespec(arg4
, &ts
);
8590 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8591 case TARGET_NR_set_tid_address
:
8592 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
8596 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8597 case TARGET_NR_tkill
:
8598 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
8602 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8603 case TARGET_NR_tgkill
:
8604 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
8605 target_to_host_signal(arg3
)));
8609 #ifdef TARGET_NR_set_robust_list
8610 case TARGET_NR_set_robust_list
:
8611 goto unimplemented_nowarn
;
8614 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8615 case TARGET_NR_utimensat
:
8617 struct timespec
*tsp
, ts
[2];
8621 target_to_host_timespec(ts
, arg3
);
8622 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
8626 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
8628 if (!(p
= lock_user_string(arg2
))) {
8629 ret
= -TARGET_EFAULT
;
8632 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
8633 unlock_user(p
, arg2
, 0);
8638 #if defined(CONFIG_USE_NPTL)
8639 case TARGET_NR_futex
:
8640 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8643 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8644 case TARGET_NR_inotify_init
:
8645 ret
= get_errno(sys_inotify_init());
8648 #ifdef CONFIG_INOTIFY1
8649 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8650 case TARGET_NR_inotify_init1
:
8651 ret
= get_errno(sys_inotify_init1(arg1
));
8655 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8656 case TARGET_NR_inotify_add_watch
:
8657 p
= lock_user_string(arg2
);
8658 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
8659 unlock_user(p
, arg2
, 0);
8662 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8663 case TARGET_NR_inotify_rm_watch
:
8664 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
8668 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8669 case TARGET_NR_mq_open
:
8671 struct mq_attr posix_mq_attr
;
8673 p
= lock_user_string(arg1
- 1);
8675 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
8676 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
8677 unlock_user (p
, arg1
, 0);
8681 case TARGET_NR_mq_unlink
:
8682 p
= lock_user_string(arg1
- 1);
8683 ret
= get_errno(mq_unlink(p
));
8684 unlock_user (p
, arg1
, 0);
8687 case TARGET_NR_mq_timedsend
:
8691 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8693 target_to_host_timespec(&ts
, arg5
);
8694 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
8695 host_to_target_timespec(arg5
, &ts
);
8698 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
8699 unlock_user (p
, arg2
, arg3
);
8703 case TARGET_NR_mq_timedreceive
:
8708 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8710 target_to_host_timespec(&ts
, arg5
);
8711 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
8712 host_to_target_timespec(arg5
, &ts
);
8715 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
8716 unlock_user (p
, arg2
, arg3
);
8718 put_user_u32(prio
, arg4
);
8722 /* Not implemented for now... */
8723 /* case TARGET_NR_mq_notify: */
8726 case TARGET_NR_mq_getsetattr
:
8728 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
8731 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
8732 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
8735 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
8736 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
8743 #ifdef CONFIG_SPLICE
8744 #ifdef TARGET_NR_tee
8747 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
8751 #ifdef TARGET_NR_splice
8752 case TARGET_NR_splice
:
8754 loff_t loff_in
, loff_out
;
8755 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
8757 get_user_u64(loff_in
, arg2
);
8758 ploff_in
= &loff_in
;
8761 get_user_u64(loff_out
, arg2
);
8762 ploff_out
= &loff_out
;
8764 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
8768 #ifdef TARGET_NR_vmsplice
8769 case TARGET_NR_vmsplice
:
8771 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
8773 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
8774 unlock_iovec(vec
, arg2
, arg3
, 0);
8776 ret
= -host_to_target_errno(errno
);
8781 #endif /* CONFIG_SPLICE */
8782 #ifdef CONFIG_EVENTFD
8783 #if defined(TARGET_NR_eventfd)
8784 case TARGET_NR_eventfd
:
8785 ret
= get_errno(eventfd(arg1
, 0));
8788 #if defined(TARGET_NR_eventfd2)
8789 case TARGET_NR_eventfd2
:
8790 ret
= get_errno(eventfd(arg1
, arg2
));
8793 #endif /* CONFIG_EVENTFD */
8794 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8795 case TARGET_NR_fallocate
:
8796 #if TARGET_ABI_BITS == 32
8797 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
8798 target_offset64(arg5
, arg6
)));
8800 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
8804 #if defined(CONFIG_SYNC_FILE_RANGE)
8805 #if defined(TARGET_NR_sync_file_range)
8806 case TARGET_NR_sync_file_range
:
8807 #if TARGET_ABI_BITS == 32
8808 #if defined(TARGET_MIPS)
8809 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8810 target_offset64(arg5
, arg6
), arg7
));
8812 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
8813 target_offset64(arg4
, arg5
), arg6
));
8814 #endif /* !TARGET_MIPS */
8816 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
8820 #if defined(TARGET_NR_sync_file_range2)
8821 case TARGET_NR_sync_file_range2
:
8822 /* This is like sync_file_range but the arguments are reordered */
8823 #if TARGET_ABI_BITS == 32
8824 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8825 target_offset64(arg5
, arg6
), arg2
));
8827 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
8832 #if defined(CONFIG_EPOLL)
8833 #if defined(TARGET_NR_epoll_create)
8834 case TARGET_NR_epoll_create
:
8835 ret
= get_errno(epoll_create(arg1
));
8838 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8839 case TARGET_NR_epoll_create1
:
8840 ret
= get_errno(epoll_create1(arg1
));
8843 #if defined(TARGET_NR_epoll_ctl)
8844 case TARGET_NR_epoll_ctl
:
8846 struct epoll_event ep
;
8847 struct epoll_event
*epp
= 0;
8849 struct target_epoll_event
*target_ep
;
8850 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
8853 ep
.events
= tswap32(target_ep
->events
);
8854 /* The epoll_data_t union is just opaque data to the kernel,
8855 * so we transfer all 64 bits across and need not worry what
8856 * actual data type it is.
8858 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
8859 unlock_user_struct(target_ep
, arg4
, 0);
8862 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
8867 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8868 #define IMPLEMENT_EPOLL_PWAIT
8870 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8871 #if defined(TARGET_NR_epoll_wait)
8872 case TARGET_NR_epoll_wait
:
8874 #if defined(IMPLEMENT_EPOLL_PWAIT)
8875 case TARGET_NR_epoll_pwait
:
8878 struct target_epoll_event
*target_ep
;
8879 struct epoll_event
*ep
;
8881 int maxevents
= arg3
;
8884 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
8885 maxevents
* sizeof(struct target_epoll_event
), 1);
8890 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
8893 #if defined(IMPLEMENT_EPOLL_PWAIT)
8894 case TARGET_NR_epoll_pwait
:
8896 target_sigset_t
*target_set
;
8897 sigset_t _set
, *set
= &_set
;
8900 target_set
= lock_user(VERIFY_READ
, arg5
,
8901 sizeof(target_sigset_t
), 1);
8903 unlock_user(target_ep
, arg2
, 0);
8906 target_to_host_sigset(set
, target_set
);
8907 unlock_user(target_set
, arg5
, 0);
8912 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
8916 #if defined(TARGET_NR_epoll_wait)
8917 case TARGET_NR_epoll_wait
:
8918 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
8922 ret
= -TARGET_ENOSYS
;
8924 if (!is_error(ret
)) {
8926 for (i
= 0; i
< ret
; i
++) {
8927 target_ep
[i
].events
= tswap32(ep
[i
].events
);
8928 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
8931 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
8936 #ifdef TARGET_NR_prlimit64
8937 case TARGET_NR_prlimit64
:
8939 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8940 struct target_rlimit64
*target_rnew
, *target_rold
;
8941 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
8943 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
8946 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
8947 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
8948 unlock_user_struct(target_rnew
, arg3
, 0);
8952 ret
= get_errno(sys_prlimit64(arg1
, arg2
, rnewp
, arg4
? &rold
: 0));
8953 if (!is_error(ret
) && arg4
) {
8954 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
8957 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
8958 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
8959 unlock_user_struct(target_rold
, arg4
, 1);
8964 #ifdef TARGET_NR_gethostname
8965 case TARGET_NR_gethostname
:
8967 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8969 ret
= get_errno(gethostname(name
, arg2
));
8970 unlock_user(name
, arg1
, arg2
);
8972 ret
= -TARGET_EFAULT
;
8979 gemu_log("qemu: Unsupported syscall: %d\n", num
);
8980 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8981 unimplemented_nowarn
:
8983 ret
= -TARGET_ENOSYS
;
8988 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
8991 print_syscall_ret(num
, ret
);
8994 ret
= -TARGET_EFAULT
;