4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn
)(void *), void *child_stack_base
,
46 size_t stack_size
, int flags
, void *arg
, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include "qemu-common.h"
68 #include <sys/eventfd.h>
71 #include <sys/epoll.h>
74 #include "qemu-xattr.h"
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/utsname.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include "linux_loop.h"
100 #include "cpu-uname.h"
104 #if defined(CONFIG_USE_NPTL)
105 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
106 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
108 /* XXX: Hardcode the above values. */
109 #define CLONE_NPTL_FLAGS2 0
114 //#include <linux/msdos_fs.h>
115 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
116 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
127 #define _syscall0(type,name) \
128 static type name (void) \
130 return syscall(__NR_##name); \
133 #define _syscall1(type,name,type1,arg1) \
134 static type name (type1 arg1) \
136 return syscall(__NR_##name, arg1); \
139 #define _syscall2(type,name,type1,arg1,type2,arg2) \
140 static type name (type1 arg1,type2 arg2) \
142 return syscall(__NR_##name, arg1, arg2); \
145 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
146 static type name (type1 arg1,type2 arg2,type3 arg3) \
148 return syscall(__NR_##name, arg1, arg2, arg3); \
151 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
152 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
154 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
157 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
165 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
166 type5,arg5,type6,arg6) \
167 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
174 #define __NR_sys_uname __NR_uname
175 #define __NR_sys_faccessat __NR_faccessat
176 #define __NR_sys_fchmodat __NR_fchmodat
177 #define __NR_sys_fchownat __NR_fchownat
178 #define __NR_sys_fstatat64 __NR_fstatat64
179 #define __NR_sys_futimesat __NR_futimesat
180 #define __NR_sys_getcwd1 __NR_getcwd
181 #define __NR_sys_getdents __NR_getdents
182 #define __NR_sys_getdents64 __NR_getdents64
183 #define __NR_sys_getpriority __NR_getpriority
184 #define __NR_sys_linkat __NR_linkat
185 #define __NR_sys_mkdirat __NR_mkdirat
186 #define __NR_sys_mknodat __NR_mknodat
187 #define __NR_sys_newfstatat __NR_newfstatat
188 #define __NR_sys_openat __NR_openat
189 #define __NR_sys_readlinkat __NR_readlinkat
190 #define __NR_sys_renameat __NR_renameat
191 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
192 #define __NR_sys_symlinkat __NR_symlinkat
193 #define __NR_sys_syslog __NR_syslog
194 #define __NR_sys_tgkill __NR_tgkill
195 #define __NR_sys_tkill __NR_tkill
196 #define __NR_sys_unlinkat __NR_unlinkat
197 #define __NR_sys_utimensat __NR_utimensat
198 #define __NR_sys_futex __NR_futex
199 #define __NR_sys_inotify_init __NR_inotify_init
200 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
201 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
203 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
205 #define __NR__llseek __NR_lseek
209 _syscall0(int, gettid
)
211 /* This is a replacement for the host gettid() and must return a host
213 static int gettid(void) {
217 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
218 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
219 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
221 _syscall2(int, sys_getpriority
, int, which
, int, who
);
222 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
223 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
224 loff_t
*, res
, uint
, wh
);
226 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
227 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
228 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
229 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
231 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
232 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
234 #ifdef __NR_exit_group
235 _syscall1(int,exit_group
,int,error_code
)
237 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
238 _syscall1(int,set_tid_address
,int *,tidptr
)
240 #if defined(CONFIG_USE_NPTL)
241 #if defined(TARGET_NR_futex) && defined(__NR_futex)
242 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
243 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
246 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
247 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
248 unsigned long *, user_mask_ptr
);
249 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
250 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
251 unsigned long *, user_mask_ptr
);
252 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
255 static bitmask_transtbl fcntl_flags_tbl
[] = {
256 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
257 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
258 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
259 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
260 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
261 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
262 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
263 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
264 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
265 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
266 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
267 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
268 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
269 #if defined(O_DIRECT)
270 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
272 #if defined(O_NOATIME)
273 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
275 #if defined(O_CLOEXEC)
276 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
279 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
281 /* Don't terminate the list prematurely on 64-bit host+guest. */
282 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
283 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
288 #define COPY_UTSNAME_FIELD(dest, src) \
290 /* __NEW_UTS_LEN doesn't include terminating null */ \
291 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
292 (dest)[__NEW_UTS_LEN] = '\0'; \
295 static int sys_uname(struct new_utsname
*buf
)
297 struct utsname uts_buf
;
299 if (uname(&uts_buf
) < 0)
303 * Just in case these have some differences, we
304 * translate utsname to new_utsname (which is the
305 * struct linux kernel uses).
308 memset(buf
, 0, sizeof(*buf
));
309 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
310 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
311 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
312 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
313 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
315 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
319 #undef COPY_UTSNAME_FIELD
322 static int sys_getcwd1(char *buf
, size_t size
)
324 if (getcwd(buf
, size
) == NULL
) {
325 /* getcwd() sets errno */
328 return strlen(buf
)+1;
333 * Host system seems to have atfile syscall stubs available. We
334 * now enable them one by one as specified by target syscall_nr.h.
337 #ifdef TARGET_NR_faccessat
338 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
340 return (faccessat(dirfd
, pathname
, mode
, 0));
343 #ifdef TARGET_NR_fchmodat
344 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
346 return (fchmodat(dirfd
, pathname
, mode
, 0));
349 #if defined(TARGET_NR_fchownat)
350 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
351 gid_t group
, int flags
)
353 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
356 #ifdef __NR_fstatat64
357 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
360 return (fstatat(dirfd
, pathname
, buf
, flags
));
363 #ifdef __NR_newfstatat
364 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
367 return (fstatat(dirfd
, pathname
, buf
, flags
));
370 #ifdef TARGET_NR_futimesat
371 static int sys_futimesat(int dirfd
, const char *pathname
,
372 const struct timeval times
[2])
374 return (futimesat(dirfd
, pathname
, times
));
377 #ifdef TARGET_NR_linkat
378 static int sys_linkat(int olddirfd
, const char *oldpath
,
379 int newdirfd
, const char *newpath
, int flags
)
381 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
384 #ifdef TARGET_NR_mkdirat
385 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
387 return (mkdirat(dirfd
, pathname
, mode
));
390 #ifdef TARGET_NR_mknodat
391 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
394 return (mknodat(dirfd
, pathname
, mode
, dev
));
397 #ifdef TARGET_NR_openat
398 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
401 * open(2) has extra parameter 'mode' when called with
404 if ((flags
& O_CREAT
) != 0) {
405 return (openat(dirfd
, pathname
, flags
, mode
));
407 return (openat(dirfd
, pathname
, flags
));
410 #ifdef TARGET_NR_readlinkat
411 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
413 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
416 #ifdef TARGET_NR_renameat
417 static int sys_renameat(int olddirfd
, const char *oldpath
,
418 int newdirfd
, const char *newpath
)
420 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
423 #ifdef TARGET_NR_symlinkat
424 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
426 return (symlinkat(oldpath
, newdirfd
, newpath
));
429 #ifdef TARGET_NR_unlinkat
430 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
432 return (unlinkat(dirfd
, pathname
, flags
));
435 #else /* !CONFIG_ATFILE */
438 * Try direct syscalls instead
440 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
441 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
443 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
444 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
446 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
447 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
448 uid_t
,owner
,gid_t
,group
,int,flags
)
450 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
451 defined(__NR_fstatat64)
452 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
453 struct stat
*,buf
,int,flags
)
455 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
456 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
457 const struct timeval
*,times
)
459 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
460 defined(__NR_newfstatat)
461 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
462 struct stat
*,buf
,int,flags
)
464 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
465 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
466 int,newdirfd
,const char *,newpath
,int,flags
)
468 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
469 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
471 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
472 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
473 mode_t
,mode
,dev_t
,dev
)
475 #if defined(TARGET_NR_openat) && defined(__NR_openat)
476 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
478 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
479 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
480 char *,buf
,size_t,bufsize
)
482 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
483 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
484 int,newdirfd
,const char *,newpath
)
486 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
487 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
488 int,newdirfd
,const char *,newpath
)
490 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
491 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
494 #endif /* CONFIG_ATFILE */
496 #ifdef CONFIG_UTIMENSAT
497 static int sys_utimensat(int dirfd
, const char *pathname
,
498 const struct timespec times
[2], int flags
)
500 if (pathname
== NULL
)
501 return futimens(dirfd
, times
);
503 return utimensat(dirfd
, pathname
, times
, flags
);
506 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
507 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
508 const struct timespec
*,tsp
,int,flags
)
510 #endif /* CONFIG_UTIMENSAT */
512 #ifdef CONFIG_INOTIFY
513 #include <sys/inotify.h>
515 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
516 static int sys_inotify_init(void)
518 return (inotify_init());
521 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
522 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
524 return (inotify_add_watch(fd
, pathname
, mask
));
527 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
528 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
530 return (inotify_rm_watch(fd
, wd
));
533 #ifdef CONFIG_INOTIFY1
534 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
535 static int sys_inotify_init1(int flags
)
537 return (inotify_init1(flags
));
542 /* Userspace can usually survive runtime without inotify */
543 #undef TARGET_NR_inotify_init
544 #undef TARGET_NR_inotify_init1
545 #undef TARGET_NR_inotify_add_watch
546 #undef TARGET_NR_inotify_rm_watch
547 #endif /* CONFIG_INOTIFY */
549 #if defined(TARGET_NR_ppoll)
551 # define __NR_ppoll -1
553 #define __NR_sys_ppoll __NR_ppoll
554 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
555 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
559 #if defined(TARGET_NR_pselect6)
560 #ifndef __NR_pselect6
561 # define __NR_pselect6 -1
563 #define __NR_sys_pselect6 __NR_pselect6
564 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
565 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
568 #if defined(TARGET_NR_prlimit64)
569 #ifndef __NR_prlimit64
570 # define __NR_prlimit64 -1
572 #define __NR_sys_prlimit64 __NR_prlimit64
573 /* The glibc rlimit structure may not be that used by the underlying syscall */
574 struct host_rlimit64
{
578 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
579 const struct host_rlimit64
*, new_limit
,
580 struct host_rlimit64
*, old_limit
)
583 extern int personality(int);
584 extern int flock(int, int);
585 extern int setfsuid(int);
586 extern int setfsgid(int);
587 extern int setgroups(int, gid_t
*);
589 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
591 static inline int regpairs_aligned(void *cpu_env
) {
592 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
594 #elif defined(TARGET_MIPS)
595 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
597 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
600 #define ERRNO_TABLE_SIZE 1200
602 /* target_to_host_errno_table[] is initialized from
603 * host_to_target_errno_table[] in syscall_init(). */
604 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
608 * This list is the union of errno values overridden in asm-<arch>/errno.h
609 * minus the errnos that are not actually generic to all archs.
611 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
612 [EIDRM
] = TARGET_EIDRM
,
613 [ECHRNG
] = TARGET_ECHRNG
,
614 [EL2NSYNC
] = TARGET_EL2NSYNC
,
615 [EL3HLT
] = TARGET_EL3HLT
,
616 [EL3RST
] = TARGET_EL3RST
,
617 [ELNRNG
] = TARGET_ELNRNG
,
618 [EUNATCH
] = TARGET_EUNATCH
,
619 [ENOCSI
] = TARGET_ENOCSI
,
620 [EL2HLT
] = TARGET_EL2HLT
,
621 [EDEADLK
] = TARGET_EDEADLK
,
622 [ENOLCK
] = TARGET_ENOLCK
,
623 [EBADE
] = TARGET_EBADE
,
624 [EBADR
] = TARGET_EBADR
,
625 [EXFULL
] = TARGET_EXFULL
,
626 [ENOANO
] = TARGET_ENOANO
,
627 [EBADRQC
] = TARGET_EBADRQC
,
628 [EBADSLT
] = TARGET_EBADSLT
,
629 [EBFONT
] = TARGET_EBFONT
,
630 [ENOSTR
] = TARGET_ENOSTR
,
631 [ENODATA
] = TARGET_ENODATA
,
632 [ETIME
] = TARGET_ETIME
,
633 [ENOSR
] = TARGET_ENOSR
,
634 [ENONET
] = TARGET_ENONET
,
635 [ENOPKG
] = TARGET_ENOPKG
,
636 [EREMOTE
] = TARGET_EREMOTE
,
637 [ENOLINK
] = TARGET_ENOLINK
,
638 [EADV
] = TARGET_EADV
,
639 [ESRMNT
] = TARGET_ESRMNT
,
640 [ECOMM
] = TARGET_ECOMM
,
641 [EPROTO
] = TARGET_EPROTO
,
642 [EDOTDOT
] = TARGET_EDOTDOT
,
643 [EMULTIHOP
] = TARGET_EMULTIHOP
,
644 [EBADMSG
] = TARGET_EBADMSG
,
645 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
646 [EOVERFLOW
] = TARGET_EOVERFLOW
,
647 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
648 [EBADFD
] = TARGET_EBADFD
,
649 [EREMCHG
] = TARGET_EREMCHG
,
650 [ELIBACC
] = TARGET_ELIBACC
,
651 [ELIBBAD
] = TARGET_ELIBBAD
,
652 [ELIBSCN
] = TARGET_ELIBSCN
,
653 [ELIBMAX
] = TARGET_ELIBMAX
,
654 [ELIBEXEC
] = TARGET_ELIBEXEC
,
655 [EILSEQ
] = TARGET_EILSEQ
,
656 [ENOSYS
] = TARGET_ENOSYS
,
657 [ELOOP
] = TARGET_ELOOP
,
658 [ERESTART
] = TARGET_ERESTART
,
659 [ESTRPIPE
] = TARGET_ESTRPIPE
,
660 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
661 [EUSERS
] = TARGET_EUSERS
,
662 [ENOTSOCK
] = TARGET_ENOTSOCK
,
663 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
664 [EMSGSIZE
] = TARGET_EMSGSIZE
,
665 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
666 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
667 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
668 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
669 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
670 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
671 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
672 [EADDRINUSE
] = TARGET_EADDRINUSE
,
673 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
674 [ENETDOWN
] = TARGET_ENETDOWN
,
675 [ENETUNREACH
] = TARGET_ENETUNREACH
,
676 [ENETRESET
] = TARGET_ENETRESET
,
677 [ECONNABORTED
] = TARGET_ECONNABORTED
,
678 [ECONNRESET
] = TARGET_ECONNRESET
,
679 [ENOBUFS
] = TARGET_ENOBUFS
,
680 [EISCONN
] = TARGET_EISCONN
,
681 [ENOTCONN
] = TARGET_ENOTCONN
,
682 [EUCLEAN
] = TARGET_EUCLEAN
,
683 [ENOTNAM
] = TARGET_ENOTNAM
,
684 [ENAVAIL
] = TARGET_ENAVAIL
,
685 [EISNAM
] = TARGET_EISNAM
,
686 [EREMOTEIO
] = TARGET_EREMOTEIO
,
687 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
688 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
689 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
690 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
691 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
692 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
693 [EALREADY
] = TARGET_EALREADY
,
694 [EINPROGRESS
] = TARGET_EINPROGRESS
,
695 [ESTALE
] = TARGET_ESTALE
,
696 [ECANCELED
] = TARGET_ECANCELED
,
697 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
698 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
700 [ENOKEY
] = TARGET_ENOKEY
,
703 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
706 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
709 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
712 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
714 #ifdef ENOTRECOVERABLE
715 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
719 static inline int host_to_target_errno(int err
)
721 if(host_to_target_errno_table
[err
])
722 return host_to_target_errno_table
[err
];
726 static inline int target_to_host_errno(int err
)
728 if (target_to_host_errno_table
[err
])
729 return target_to_host_errno_table
[err
];
733 static inline abi_long
get_errno(abi_long ret
)
736 return -host_to_target_errno(errno
);
741 static inline int is_error(abi_long ret
)
743 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
746 char *target_strerror(int err
)
748 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
751 return strerror(target_to_host_errno(err
));
754 static abi_ulong target_brk
;
755 static abi_ulong target_original_brk
;
756 static abi_ulong brk_page
;
758 void target_set_brk(abi_ulong new_brk
)
760 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
761 brk_page
= HOST_PAGE_ALIGN(target_brk
);
764 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
765 #define DEBUGF_BRK(message, args...)
767 /* do_brk() must return target values and target errnos. */
768 abi_long
do_brk(abi_ulong new_brk
)
770 abi_long mapped_addr
;
773 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
776 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
779 if (new_brk
< target_original_brk
) {
780 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
785 /* If the new brk is less than the highest page reserved to the
786 * target heap allocation, set it and we're almost done... */
787 if (new_brk
<= brk_page
) {
788 /* Heap contents are initialized to zero, as for anonymous
790 if (new_brk
> target_brk
) {
791 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
793 target_brk
= new_brk
;
794 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
798 /* We need to allocate more memory after the brk... Note that
799 * we don't use MAP_FIXED because that will map over the top of
800 * any existing mapping (like the one with the host libc or qemu
801 * itself); instead we treat "mapped but at wrong address" as
802 * a failure and unmap again.
804 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
805 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
806 PROT_READ
|PROT_WRITE
,
807 MAP_ANON
|MAP_PRIVATE
, 0, 0));
809 if (mapped_addr
== brk_page
) {
810 /* Heap contents are initialized to zero, as for anonymous
811 * mapped pages. Technically the new pages are already
812 * initialized to zero since they *are* anonymous mapped
813 * pages, however we have to take care with the contents that
814 * come from the remaining part of the previous page: it may
815 * contains garbage data due to a previous heap usage (grown
817 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
819 target_brk
= new_brk
;
820 brk_page
= HOST_PAGE_ALIGN(target_brk
);
821 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
824 } else if (mapped_addr
!= -1) {
825 /* Mapped but at wrong address, meaning there wasn't actually
826 * enough space for this brk.
828 target_munmap(mapped_addr
, new_alloc_size
);
830 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
833 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
836 #if defined(TARGET_ALPHA)
837 /* We (partially) emulate OSF/1 on Alpha, which requires we
838 return a proper errno, not an unchanged brk value. */
839 return -TARGET_ENOMEM
;
841 /* For everything else, return the previous break. */
845 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
846 abi_ulong target_fds_addr
,
850 abi_ulong b
, *target_fds
;
852 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
853 if (!(target_fds
= lock_user(VERIFY_READ
,
855 sizeof(abi_ulong
) * nw
,
857 return -TARGET_EFAULT
;
861 for (i
= 0; i
< nw
; i
++) {
862 /* grab the abi_ulong */
863 __get_user(b
, &target_fds
[i
]);
864 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
865 /* check the bit inside the abi_ulong */
872 unlock_user(target_fds
, target_fds_addr
, 0);
877 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
878 abi_ulong target_fds_addr
,
881 if (target_fds_addr
) {
882 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
883 return -TARGET_EFAULT
;
891 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
897 abi_ulong
*target_fds
;
899 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
900 if (!(target_fds
= lock_user(VERIFY_WRITE
,
902 sizeof(abi_ulong
) * nw
,
904 return -TARGET_EFAULT
;
907 for (i
= 0; i
< nw
; i
++) {
909 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
910 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
913 __put_user(v
, &target_fds
[i
]);
916 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
921 #if defined(__alpha__)
927 static inline abi_long
host_to_target_clock_t(long ticks
)
929 #if HOST_HZ == TARGET_HZ
932 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
936 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
937 const struct rusage
*rusage
)
939 struct target_rusage
*target_rusage
;
941 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
942 return -TARGET_EFAULT
;
943 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
944 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
945 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
946 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
947 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
948 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
949 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
950 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
951 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
952 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
953 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
954 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
955 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
956 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
957 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
958 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
959 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
960 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
961 unlock_user_struct(target_rusage
, target_addr
, 1);
966 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
968 abi_ulong target_rlim_swap
;
971 target_rlim_swap
= tswapal(target_rlim
);
972 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
973 return RLIM_INFINITY
;
975 result
= target_rlim_swap
;
976 if (target_rlim_swap
!= (rlim_t
)result
)
977 return RLIM_INFINITY
;
982 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
984 abi_ulong target_rlim_swap
;
987 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
988 target_rlim_swap
= TARGET_RLIM_INFINITY
;
990 target_rlim_swap
= rlim
;
991 result
= tswapal(target_rlim_swap
);
996 static inline int target_to_host_resource(int code
)
999 case TARGET_RLIMIT_AS
:
1001 case TARGET_RLIMIT_CORE
:
1003 case TARGET_RLIMIT_CPU
:
1005 case TARGET_RLIMIT_DATA
:
1007 case TARGET_RLIMIT_FSIZE
:
1008 return RLIMIT_FSIZE
;
1009 case TARGET_RLIMIT_LOCKS
:
1010 return RLIMIT_LOCKS
;
1011 case TARGET_RLIMIT_MEMLOCK
:
1012 return RLIMIT_MEMLOCK
;
1013 case TARGET_RLIMIT_MSGQUEUE
:
1014 return RLIMIT_MSGQUEUE
;
1015 case TARGET_RLIMIT_NICE
:
1017 case TARGET_RLIMIT_NOFILE
:
1018 return RLIMIT_NOFILE
;
1019 case TARGET_RLIMIT_NPROC
:
1020 return RLIMIT_NPROC
;
1021 case TARGET_RLIMIT_RSS
:
1023 case TARGET_RLIMIT_RTPRIO
:
1024 return RLIMIT_RTPRIO
;
1025 case TARGET_RLIMIT_SIGPENDING
:
1026 return RLIMIT_SIGPENDING
;
1027 case TARGET_RLIMIT_STACK
:
1028 return RLIMIT_STACK
;
1034 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1035 abi_ulong target_tv_addr
)
1037 struct target_timeval
*target_tv
;
1039 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1040 return -TARGET_EFAULT
;
1042 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1043 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1045 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1050 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1051 const struct timeval
*tv
)
1053 struct target_timeval
*target_tv
;
1055 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1056 return -TARGET_EFAULT
;
1058 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1059 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1061 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1066 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1069 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1070 abi_ulong target_mq_attr_addr
)
1072 struct target_mq_attr
*target_mq_attr
;
1074 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1075 target_mq_attr_addr
, 1))
1076 return -TARGET_EFAULT
;
1078 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1079 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1080 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1081 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1083 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1088 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1089 const struct mq_attr
*attr
)
1091 struct target_mq_attr
*target_mq_attr
;
1093 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1094 target_mq_attr_addr
, 0))
1095 return -TARGET_EFAULT
;
1097 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1098 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1099 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1100 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1102 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1108 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1109 /* do_select() must return target values and target errnos. */
1110 static abi_long
do_select(int n
,
1111 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1112 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1114 fd_set rfds
, wfds
, efds
;
1115 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1116 struct timeval tv
, *tv_ptr
;
1119 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1123 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1127 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1132 if (target_tv_addr
) {
1133 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1134 return -TARGET_EFAULT
;
1140 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1142 if (!is_error(ret
)) {
1143 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1144 return -TARGET_EFAULT
;
1145 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1146 return -TARGET_EFAULT
;
1147 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1148 return -TARGET_EFAULT
;
1150 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1151 return -TARGET_EFAULT
;
1158 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1161 return pipe2(host_pipe
, flags
);
1167 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1168 int flags
, int is_pipe2
)
1172 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1175 return get_errno(ret
);
1177 /* Several targets have special calling conventions for the original
1178 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1180 #if defined(TARGET_ALPHA)
1181 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1182 return host_pipe
[0];
1183 #elif defined(TARGET_MIPS)
1184 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1185 return host_pipe
[0];
1186 #elif defined(TARGET_SH4)
1187 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1188 return host_pipe
[0];
1192 if (put_user_s32(host_pipe
[0], pipedes
)
1193 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1194 return -TARGET_EFAULT
;
1195 return get_errno(ret
);
1198 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1199 abi_ulong target_addr
,
1202 struct target_ip_mreqn
*target_smreqn
;
1204 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1206 return -TARGET_EFAULT
;
1207 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1208 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1209 if (len
== sizeof(struct target_ip_mreqn
))
1210 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1211 unlock_user(target_smreqn
, target_addr
, 0);
1216 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1217 abi_ulong target_addr
,
1220 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1221 sa_family_t sa_family
;
1222 struct target_sockaddr
*target_saddr
;
1224 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1226 return -TARGET_EFAULT
;
1228 sa_family
= tswap16(target_saddr
->sa_family
);
1230 /* Oops. The caller might send a incomplete sun_path; sun_path
1231 * must be terminated by \0 (see the manual page), but
1232 * unfortunately it is quite common to specify sockaddr_un
1233 * length as "strlen(x->sun_path)" while it should be
1234 * "strlen(...) + 1". We'll fix that here if needed.
1235 * Linux kernel has a similar feature.
1238 if (sa_family
== AF_UNIX
) {
1239 if (len
< unix_maxlen
&& len
> 0) {
1240 char *cp
= (char*)target_saddr
;
1242 if ( cp
[len
-1] && !cp
[len
] )
1245 if (len
> unix_maxlen
)
1249 memcpy(addr
, target_saddr
, len
);
1250 addr
->sa_family
= sa_family
;
1251 unlock_user(target_saddr
, target_addr
, 0);
1256 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1257 struct sockaddr
*addr
,
1260 struct target_sockaddr
*target_saddr
;
1262 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1264 return -TARGET_EFAULT
;
1265 memcpy(target_saddr
, addr
, len
);
1266 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1267 unlock_user(target_saddr
, target_addr
, len
);
1272 /* ??? Should this also swap msgh->name? */
1273 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1274 struct target_msghdr
*target_msgh
)
1276 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1277 abi_long msg_controllen
;
1278 abi_ulong target_cmsg_addr
;
1279 struct target_cmsghdr
*target_cmsg
;
1280 socklen_t space
= 0;
1282 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1283 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1285 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1286 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1288 return -TARGET_EFAULT
;
1290 while (cmsg
&& target_cmsg
) {
1291 void *data
= CMSG_DATA(cmsg
);
1292 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1294 int len
= tswapal(target_cmsg
->cmsg_len
)
1295 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1297 space
+= CMSG_SPACE(len
);
1298 if (space
> msgh
->msg_controllen
) {
1299 space
-= CMSG_SPACE(len
);
1300 gemu_log("Host cmsg overflow\n");
1304 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1305 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1306 cmsg
->cmsg_len
= CMSG_LEN(len
);
1308 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1309 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1310 memcpy(data
, target_data
, len
);
1312 int *fd
= (int *)data
;
1313 int *target_fd
= (int *)target_data
;
1314 int i
, numfds
= len
/ sizeof(int);
1316 for (i
= 0; i
< numfds
; i
++)
1317 fd
[i
] = tswap32(target_fd
[i
]);
1320 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1321 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1323 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1325 msgh
->msg_controllen
= space
;
1329 /* ??? Should this also swap msgh->name? */
1330 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1331 struct msghdr
*msgh
)
1333 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1334 abi_long msg_controllen
;
1335 abi_ulong target_cmsg_addr
;
1336 struct target_cmsghdr
*target_cmsg
;
1337 socklen_t space
= 0;
1339 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1340 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1342 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1343 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1345 return -TARGET_EFAULT
;
1347 while (cmsg
&& target_cmsg
) {
1348 void *data
= CMSG_DATA(cmsg
);
1349 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1351 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1353 space
+= TARGET_CMSG_SPACE(len
);
1354 if (space
> msg_controllen
) {
1355 space
-= TARGET_CMSG_SPACE(len
);
1356 gemu_log("Target cmsg overflow\n");
1360 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1361 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1362 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(len
));
1364 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1365 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1366 memcpy(target_data
, data
, len
);
1368 int *fd
= (int *)data
;
1369 int *target_fd
= (int *)target_data
;
1370 int i
, numfds
= len
/ sizeof(int);
1372 for (i
= 0; i
< numfds
; i
++)
1373 target_fd
[i
] = tswap32(fd
[i
]);
1376 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1377 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1379 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1381 target_msgh
->msg_controllen
= tswapal(space
);
1385 /* do_setsockopt() Must return target values and target errnos. */
1386 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1387 abi_ulong optval_addr
, socklen_t optlen
)
1391 struct ip_mreqn
*ip_mreq
;
1392 struct ip_mreq_source
*ip_mreq_source
;
1396 /* TCP options all take an 'int' value. */
1397 if (optlen
< sizeof(uint32_t))
1398 return -TARGET_EINVAL
;
1400 if (get_user_u32(val
, optval_addr
))
1401 return -TARGET_EFAULT
;
1402 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1409 case IP_ROUTER_ALERT
:
1413 case IP_MTU_DISCOVER
:
1419 case IP_MULTICAST_TTL
:
1420 case IP_MULTICAST_LOOP
:
1422 if (optlen
>= sizeof(uint32_t)) {
1423 if (get_user_u32(val
, optval_addr
))
1424 return -TARGET_EFAULT
;
1425 } else if (optlen
>= 1) {
1426 if (get_user_u8(val
, optval_addr
))
1427 return -TARGET_EFAULT
;
1429 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1431 case IP_ADD_MEMBERSHIP
:
1432 case IP_DROP_MEMBERSHIP
:
1433 if (optlen
< sizeof (struct target_ip_mreq
) ||
1434 optlen
> sizeof (struct target_ip_mreqn
))
1435 return -TARGET_EINVAL
;
1437 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1438 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1439 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1442 case IP_BLOCK_SOURCE
:
1443 case IP_UNBLOCK_SOURCE
:
1444 case IP_ADD_SOURCE_MEMBERSHIP
:
1445 case IP_DROP_SOURCE_MEMBERSHIP
:
1446 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1447 return -TARGET_EINVAL
;
1449 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1450 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1451 unlock_user (ip_mreq_source
, optval_addr
, 0);
1458 case TARGET_SOL_SOCKET
:
1460 /* Options with 'int' argument. */
1461 case TARGET_SO_DEBUG
:
1464 case TARGET_SO_REUSEADDR
:
1465 optname
= SO_REUSEADDR
;
1467 case TARGET_SO_TYPE
:
1470 case TARGET_SO_ERROR
:
1473 case TARGET_SO_DONTROUTE
:
1474 optname
= SO_DONTROUTE
;
1476 case TARGET_SO_BROADCAST
:
1477 optname
= SO_BROADCAST
;
1479 case TARGET_SO_SNDBUF
:
1480 optname
= SO_SNDBUF
;
1482 case TARGET_SO_RCVBUF
:
1483 optname
= SO_RCVBUF
;
1485 case TARGET_SO_KEEPALIVE
:
1486 optname
= SO_KEEPALIVE
;
1488 case TARGET_SO_OOBINLINE
:
1489 optname
= SO_OOBINLINE
;
1491 case TARGET_SO_NO_CHECK
:
1492 optname
= SO_NO_CHECK
;
1494 case TARGET_SO_PRIORITY
:
1495 optname
= SO_PRIORITY
;
1498 case TARGET_SO_BSDCOMPAT
:
1499 optname
= SO_BSDCOMPAT
;
1502 case TARGET_SO_PASSCRED
:
1503 optname
= SO_PASSCRED
;
1505 case TARGET_SO_TIMESTAMP
:
1506 optname
= SO_TIMESTAMP
;
1508 case TARGET_SO_RCVLOWAT
:
1509 optname
= SO_RCVLOWAT
;
1511 case TARGET_SO_RCVTIMEO
:
1512 optname
= SO_RCVTIMEO
;
1514 case TARGET_SO_SNDTIMEO
:
1515 optname
= SO_SNDTIMEO
;
1521 if (optlen
< sizeof(uint32_t))
1522 return -TARGET_EINVAL
;
1524 if (get_user_u32(val
, optval_addr
))
1525 return -TARGET_EFAULT
;
1526 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1530 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1531 ret
= -TARGET_ENOPROTOOPT
;
1536 /* do_getsockopt() Must return target values and target errnos. */
1537 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1538 abi_ulong optval_addr
, abi_ulong optlen
)
1545 case TARGET_SOL_SOCKET
:
1548 /* These don't just return a single integer */
1549 case TARGET_SO_LINGER
:
1550 case TARGET_SO_RCVTIMEO
:
1551 case TARGET_SO_SNDTIMEO
:
1552 case TARGET_SO_PEERNAME
:
1554 case TARGET_SO_PEERCRED
: {
1557 struct target_ucred
*tcr
;
1559 if (get_user_u32(len
, optlen
)) {
1560 return -TARGET_EFAULT
;
1563 return -TARGET_EINVAL
;
1567 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1575 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1576 return -TARGET_EFAULT
;
1578 __put_user(cr
.pid
, &tcr
->pid
);
1579 __put_user(cr
.uid
, &tcr
->uid
);
1580 __put_user(cr
.gid
, &tcr
->gid
);
1581 unlock_user_struct(tcr
, optval_addr
, 1);
1582 if (put_user_u32(len
, optlen
)) {
1583 return -TARGET_EFAULT
;
1587 /* Options with 'int' argument. */
1588 case TARGET_SO_DEBUG
:
1591 case TARGET_SO_REUSEADDR
:
1592 optname
= SO_REUSEADDR
;
1594 case TARGET_SO_TYPE
:
1597 case TARGET_SO_ERROR
:
1600 case TARGET_SO_DONTROUTE
:
1601 optname
= SO_DONTROUTE
;
1603 case TARGET_SO_BROADCAST
:
1604 optname
= SO_BROADCAST
;
1606 case TARGET_SO_SNDBUF
:
1607 optname
= SO_SNDBUF
;
1609 case TARGET_SO_RCVBUF
:
1610 optname
= SO_RCVBUF
;
1612 case TARGET_SO_KEEPALIVE
:
1613 optname
= SO_KEEPALIVE
;
1615 case TARGET_SO_OOBINLINE
:
1616 optname
= SO_OOBINLINE
;
1618 case TARGET_SO_NO_CHECK
:
1619 optname
= SO_NO_CHECK
;
1621 case TARGET_SO_PRIORITY
:
1622 optname
= SO_PRIORITY
;
1625 case TARGET_SO_BSDCOMPAT
:
1626 optname
= SO_BSDCOMPAT
;
1629 case TARGET_SO_PASSCRED
:
1630 optname
= SO_PASSCRED
;
1632 case TARGET_SO_TIMESTAMP
:
1633 optname
= SO_TIMESTAMP
;
1635 case TARGET_SO_RCVLOWAT
:
1636 optname
= SO_RCVLOWAT
;
1643 /* TCP options all take an 'int' value. */
1645 if (get_user_u32(len
, optlen
))
1646 return -TARGET_EFAULT
;
1648 return -TARGET_EINVAL
;
1650 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1656 if (put_user_u32(val
, optval_addr
))
1657 return -TARGET_EFAULT
;
1659 if (put_user_u8(val
, optval_addr
))
1660 return -TARGET_EFAULT
;
1662 if (put_user_u32(len
, optlen
))
1663 return -TARGET_EFAULT
;
1670 case IP_ROUTER_ALERT
:
1674 case IP_MTU_DISCOVER
:
1680 case IP_MULTICAST_TTL
:
1681 case IP_MULTICAST_LOOP
:
1682 if (get_user_u32(len
, optlen
))
1683 return -TARGET_EFAULT
;
1685 return -TARGET_EINVAL
;
1687 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1690 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1692 if (put_user_u32(len
, optlen
)
1693 || put_user_u8(val
, optval_addr
))
1694 return -TARGET_EFAULT
;
1696 if (len
> sizeof(int))
1698 if (put_user_u32(len
, optlen
)
1699 || put_user_u32(val
, optval_addr
))
1700 return -TARGET_EFAULT
;
1704 ret
= -TARGET_ENOPROTOOPT
;
1710 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1712 ret
= -TARGET_EOPNOTSUPP
;
1719 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1720 * other lock functions have a return code of 0 for failure.
1722 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1723 int count
, int copy
)
1725 struct target_iovec
*target_vec
;
1729 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1731 return -TARGET_EFAULT
;
1732 for(i
= 0;i
< count
; i
++) {
1733 base
= tswapal(target_vec
[i
].iov_base
);
1734 vec
[i
].iov_len
= tswapal(target_vec
[i
].iov_len
);
1735 if (vec
[i
].iov_len
!= 0) {
1736 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1737 /* Don't check lock_user return value. We must call writev even
1738 if a element has invalid base address. */
1740 /* zero length pointer is ignored */
1741 vec
[i
].iov_base
= NULL
;
1744 unlock_user (target_vec
, target_addr
, 0);
1748 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1749 int count
, int copy
)
1751 struct target_iovec
*target_vec
;
1755 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1757 return -TARGET_EFAULT
;
1758 for(i
= 0;i
< count
; i
++) {
1759 if (target_vec
[i
].iov_base
) {
1760 base
= tswapal(target_vec
[i
].iov_base
);
1761 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1764 unlock_user (target_vec
, target_addr
, 0);
1769 /* do_socket() Must return target values and target errnos. */
1770 static abi_long
do_socket(int domain
, int type
, int protocol
)
1772 #if defined(TARGET_MIPS)
1774 case TARGET_SOCK_DGRAM
:
1777 case TARGET_SOCK_STREAM
:
1780 case TARGET_SOCK_RAW
:
1783 case TARGET_SOCK_RDM
:
1786 case TARGET_SOCK_SEQPACKET
:
1787 type
= SOCK_SEQPACKET
;
1789 case TARGET_SOCK_PACKET
:
1794 if (domain
== PF_NETLINK
)
1795 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1796 return get_errno(socket(domain
, type
, protocol
));
1799 /* do_bind() Must return target values and target errnos. */
1800 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1806 if ((int)addrlen
< 0) {
1807 return -TARGET_EINVAL
;
1810 addr
= alloca(addrlen
+1);
1812 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1816 return get_errno(bind(sockfd
, addr
, addrlen
));
1819 /* do_connect() Must return target values and target errnos. */
1820 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1826 if ((int)addrlen
< 0) {
1827 return -TARGET_EINVAL
;
1830 addr
= alloca(addrlen
);
1832 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1836 return get_errno(connect(sockfd
, addr
, addrlen
));
1839 /* do_sendrecvmsg() Must return target values and target errnos. */
1840 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1841 int flags
, int send
)
1844 struct target_msghdr
*msgp
;
1848 abi_ulong target_vec
;
1851 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1855 return -TARGET_EFAULT
;
1856 if (msgp
->msg_name
) {
1857 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1858 msg
.msg_name
= alloca(msg
.msg_namelen
);
1859 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapal(msgp
->msg_name
),
1862 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1866 msg
.msg_name
= NULL
;
1867 msg
.msg_namelen
= 0;
1869 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
1870 msg
.msg_control
= alloca(msg
.msg_controllen
);
1871 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1873 count
= tswapal(msgp
->msg_iovlen
);
1874 vec
= alloca(count
* sizeof(struct iovec
));
1875 target_vec
= tswapal(msgp
->msg_iov
);
1876 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1877 msg
.msg_iovlen
= count
;
1881 ret
= target_to_host_cmsg(&msg
, msgp
);
1883 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1885 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1886 if (!is_error(ret
)) {
1888 ret
= host_to_target_cmsg(msgp
, &msg
);
1893 unlock_iovec(vec
, target_vec
, count
, !send
);
1894 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1898 /* do_accept() Must return target values and target errnos. */
1899 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1900 abi_ulong target_addrlen_addr
)
1906 if (target_addr
== 0)
1907 return get_errno(accept(fd
, NULL
, NULL
));
1909 /* linux returns EINVAL if addrlen pointer is invalid */
1910 if (get_user_u32(addrlen
, target_addrlen_addr
))
1911 return -TARGET_EINVAL
;
1913 if ((int)addrlen
< 0) {
1914 return -TARGET_EINVAL
;
1917 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1918 return -TARGET_EINVAL
;
1920 addr
= alloca(addrlen
);
1922 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1923 if (!is_error(ret
)) {
1924 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1925 if (put_user_u32(addrlen
, target_addrlen_addr
))
1926 ret
= -TARGET_EFAULT
;
1931 /* do_getpeername() Must return target values and target errnos. */
1932 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1933 abi_ulong target_addrlen_addr
)
1939 if (get_user_u32(addrlen
, target_addrlen_addr
))
1940 return -TARGET_EFAULT
;
1942 if ((int)addrlen
< 0) {
1943 return -TARGET_EINVAL
;
1946 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1947 return -TARGET_EFAULT
;
1949 addr
= alloca(addrlen
);
1951 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1952 if (!is_error(ret
)) {
1953 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1954 if (put_user_u32(addrlen
, target_addrlen_addr
))
1955 ret
= -TARGET_EFAULT
;
1960 /* do_getsockname() Must return target values and target errnos. */
1961 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1962 abi_ulong target_addrlen_addr
)
1968 if (get_user_u32(addrlen
, target_addrlen_addr
))
1969 return -TARGET_EFAULT
;
1971 if ((int)addrlen
< 0) {
1972 return -TARGET_EINVAL
;
1975 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1976 return -TARGET_EFAULT
;
1978 addr
= alloca(addrlen
);
1980 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1981 if (!is_error(ret
)) {
1982 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1983 if (put_user_u32(addrlen
, target_addrlen_addr
))
1984 ret
= -TARGET_EFAULT
;
1989 /* do_socketpair() Must return target values and target errnos. */
1990 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1991 abi_ulong target_tab_addr
)
1996 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1997 if (!is_error(ret
)) {
1998 if (put_user_s32(tab
[0], target_tab_addr
)
1999 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2000 ret
= -TARGET_EFAULT
;
2005 /* do_sendto() Must return target values and target errnos. */
2006 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2007 abi_ulong target_addr
, socklen_t addrlen
)
2013 if ((int)addrlen
< 0) {
2014 return -TARGET_EINVAL
;
2017 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2019 return -TARGET_EFAULT
;
2021 addr
= alloca(addrlen
);
2022 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2024 unlock_user(host_msg
, msg
, 0);
2027 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2029 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2031 unlock_user(host_msg
, msg
, 0);
2035 /* do_recvfrom() Must return target values and target errnos. */
2036 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2037 abi_ulong target_addr
,
2038 abi_ulong target_addrlen
)
2045 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2047 return -TARGET_EFAULT
;
2049 if (get_user_u32(addrlen
, target_addrlen
)) {
2050 ret
= -TARGET_EFAULT
;
2053 if ((int)addrlen
< 0) {
2054 ret
= -TARGET_EINVAL
;
2057 addr
= alloca(addrlen
);
2058 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2060 addr
= NULL
; /* To keep compiler quiet. */
2061 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2063 if (!is_error(ret
)) {
2065 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2066 if (put_user_u32(addrlen
, target_addrlen
)) {
2067 ret
= -TARGET_EFAULT
;
2071 unlock_user(host_msg
, msg
, len
);
2074 unlock_user(host_msg
, msg
, 0);
2079 #ifdef TARGET_NR_socketcall
2080 /* do_socketcall() Must return target values and target errnos. */
2081 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2084 const int n
= sizeof(abi_ulong
);
2089 abi_ulong domain
, type
, protocol
;
2091 if (get_user_ual(domain
, vptr
)
2092 || get_user_ual(type
, vptr
+ n
)
2093 || get_user_ual(protocol
, vptr
+ 2 * n
))
2094 return -TARGET_EFAULT
;
2096 ret
= do_socket(domain
, type
, protocol
);
2102 abi_ulong target_addr
;
2105 if (get_user_ual(sockfd
, vptr
)
2106 || get_user_ual(target_addr
, vptr
+ n
)
2107 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2108 return -TARGET_EFAULT
;
2110 ret
= do_bind(sockfd
, target_addr
, addrlen
);
2113 case SOCKOP_connect
:
2116 abi_ulong target_addr
;
2119 if (get_user_ual(sockfd
, vptr
)
2120 || get_user_ual(target_addr
, vptr
+ n
)
2121 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2122 return -TARGET_EFAULT
;
2124 ret
= do_connect(sockfd
, target_addr
, addrlen
);
2129 abi_ulong sockfd
, backlog
;
2131 if (get_user_ual(sockfd
, vptr
)
2132 || get_user_ual(backlog
, vptr
+ n
))
2133 return -TARGET_EFAULT
;
2135 ret
= get_errno(listen(sockfd
, backlog
));
2141 abi_ulong target_addr
, target_addrlen
;
2143 if (get_user_ual(sockfd
, vptr
)
2144 || get_user_ual(target_addr
, vptr
+ n
)
2145 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2146 return -TARGET_EFAULT
;
2148 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
2151 case SOCKOP_getsockname
:
2154 abi_ulong target_addr
, target_addrlen
;
2156 if (get_user_ual(sockfd
, vptr
)
2157 || get_user_ual(target_addr
, vptr
+ n
)
2158 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2159 return -TARGET_EFAULT
;
2161 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
2164 case SOCKOP_getpeername
:
2167 abi_ulong target_addr
, target_addrlen
;
2169 if (get_user_ual(sockfd
, vptr
)
2170 || get_user_ual(target_addr
, vptr
+ n
)
2171 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2172 return -TARGET_EFAULT
;
2174 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
2177 case SOCKOP_socketpair
:
2179 abi_ulong domain
, type
, protocol
;
2182 if (get_user_ual(domain
, vptr
)
2183 || get_user_ual(type
, vptr
+ n
)
2184 || get_user_ual(protocol
, vptr
+ 2 * n
)
2185 || get_user_ual(tab
, vptr
+ 3 * n
))
2186 return -TARGET_EFAULT
;
2188 ret
= do_socketpair(domain
, type
, protocol
, tab
);
2198 if (get_user_ual(sockfd
, vptr
)
2199 || get_user_ual(msg
, vptr
+ n
)
2200 || get_user_ual(len
, vptr
+ 2 * n
)
2201 || get_user_ual(flags
, vptr
+ 3 * n
))
2202 return -TARGET_EFAULT
;
2204 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2214 if (get_user_ual(sockfd
, vptr
)
2215 || get_user_ual(msg
, vptr
+ n
)
2216 || get_user_ual(len
, vptr
+ 2 * n
)
2217 || get_user_ual(flags
, vptr
+ 3 * n
))
2218 return -TARGET_EFAULT
;
2220 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2232 if (get_user_ual(sockfd
, vptr
)
2233 || get_user_ual(msg
, vptr
+ n
)
2234 || get_user_ual(len
, vptr
+ 2 * n
)
2235 || get_user_ual(flags
, vptr
+ 3 * n
)
2236 || get_user_ual(addr
, vptr
+ 4 * n
)
2237 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2238 return -TARGET_EFAULT
;
2240 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2243 case SOCKOP_recvfrom
:
2252 if (get_user_ual(sockfd
, vptr
)
2253 || get_user_ual(msg
, vptr
+ n
)
2254 || get_user_ual(len
, vptr
+ 2 * n
)
2255 || get_user_ual(flags
, vptr
+ 3 * n
)
2256 || get_user_ual(addr
, vptr
+ 4 * n
)
2257 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2258 return -TARGET_EFAULT
;
2260 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2263 case SOCKOP_shutdown
:
2265 abi_ulong sockfd
, how
;
2267 if (get_user_ual(sockfd
, vptr
)
2268 || get_user_ual(how
, vptr
+ n
))
2269 return -TARGET_EFAULT
;
2271 ret
= get_errno(shutdown(sockfd
, how
));
2274 case SOCKOP_sendmsg
:
2275 case SOCKOP_recvmsg
:
2278 abi_ulong target_msg
;
2281 if (get_user_ual(fd
, vptr
)
2282 || get_user_ual(target_msg
, vptr
+ n
)
2283 || get_user_ual(flags
, vptr
+ 2 * n
))
2284 return -TARGET_EFAULT
;
2286 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2287 (num
== SOCKOP_sendmsg
));
2290 case SOCKOP_setsockopt
:
2298 if (get_user_ual(sockfd
, vptr
)
2299 || get_user_ual(level
, vptr
+ n
)
2300 || get_user_ual(optname
, vptr
+ 2 * n
)
2301 || get_user_ual(optval
, vptr
+ 3 * n
)
2302 || get_user_ual(optlen
, vptr
+ 4 * n
))
2303 return -TARGET_EFAULT
;
2305 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2308 case SOCKOP_getsockopt
:
2316 if (get_user_ual(sockfd
, vptr
)
2317 || get_user_ual(level
, vptr
+ n
)
2318 || get_user_ual(optname
, vptr
+ 2 * n
)
2319 || get_user_ual(optval
, vptr
+ 3 * n
)
2320 || get_user_ual(optlen
, vptr
+ 4 * n
))
2321 return -TARGET_EFAULT
;
2323 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2327 gemu_log("Unsupported socketcall: %d\n", num
);
2328 ret
= -TARGET_ENOSYS
;
2335 #define N_SHM_REGIONS 32
2337 static struct shm_region
{
2340 } shm_regions
[N_SHM_REGIONS
];
2342 struct target_ipc_perm
2349 unsigned short int mode
;
2350 unsigned short int __pad1
;
2351 unsigned short int __seq
;
2352 unsigned short int __pad2
;
2353 abi_ulong __unused1
;
2354 abi_ulong __unused2
;
2357 struct target_semid_ds
2359 struct target_ipc_perm sem_perm
;
2360 abi_ulong sem_otime
;
2361 abi_ulong __unused1
;
2362 abi_ulong sem_ctime
;
2363 abi_ulong __unused2
;
2364 abi_ulong sem_nsems
;
2365 abi_ulong __unused3
;
2366 abi_ulong __unused4
;
2369 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2370 abi_ulong target_addr
)
2372 struct target_ipc_perm
*target_ip
;
2373 struct target_semid_ds
*target_sd
;
2375 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2376 return -TARGET_EFAULT
;
2377 target_ip
= &(target_sd
->sem_perm
);
2378 host_ip
->__key
= tswapal(target_ip
->__key
);
2379 host_ip
->uid
= tswapal(target_ip
->uid
);
2380 host_ip
->gid
= tswapal(target_ip
->gid
);
2381 host_ip
->cuid
= tswapal(target_ip
->cuid
);
2382 host_ip
->cgid
= tswapal(target_ip
->cgid
);
2383 host_ip
->mode
= tswap16(target_ip
->mode
);
2384 unlock_user_struct(target_sd
, target_addr
, 0);
2388 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2389 struct ipc_perm
*host_ip
)
2391 struct target_ipc_perm
*target_ip
;
2392 struct target_semid_ds
*target_sd
;
2394 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2395 return -TARGET_EFAULT
;
2396 target_ip
= &(target_sd
->sem_perm
);
2397 target_ip
->__key
= tswapal(host_ip
->__key
);
2398 target_ip
->uid
= tswapal(host_ip
->uid
);
2399 target_ip
->gid
= tswapal(host_ip
->gid
);
2400 target_ip
->cuid
= tswapal(host_ip
->cuid
);
2401 target_ip
->cgid
= tswapal(host_ip
->cgid
);
2402 target_ip
->mode
= tswap16(host_ip
->mode
);
2403 unlock_user_struct(target_sd
, target_addr
, 1);
2407 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2408 abi_ulong target_addr
)
2410 struct target_semid_ds
*target_sd
;
2412 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2413 return -TARGET_EFAULT
;
2414 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2415 return -TARGET_EFAULT
;
2416 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2417 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2418 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2419 unlock_user_struct(target_sd
, target_addr
, 0);
2423 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2424 struct semid_ds
*host_sd
)
2426 struct target_semid_ds
*target_sd
;
2428 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2429 return -TARGET_EFAULT
;
2430 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2431 return -TARGET_EFAULT
;
2432 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2433 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2434 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2435 unlock_user_struct(target_sd
, target_addr
, 1);
2439 struct target_seminfo
{
2452 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2453 struct seminfo
*host_seminfo
)
2455 struct target_seminfo
*target_seminfo
;
2456 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2457 return -TARGET_EFAULT
;
2458 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2459 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2460 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2461 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2462 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2463 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2464 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2465 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2466 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2467 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2468 unlock_user_struct(target_seminfo
, target_addr
, 1);
2474 struct semid_ds
*buf
;
2475 unsigned short *array
;
2476 struct seminfo
*__buf
;
2479 union target_semun
{
2486 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2487 abi_ulong target_addr
)
2490 unsigned short *array
;
2492 struct semid_ds semid_ds
;
2495 semun
.buf
= &semid_ds
;
2497 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2499 return get_errno(ret
);
2501 nsems
= semid_ds
.sem_nsems
;
2503 *host_array
= malloc(nsems
*sizeof(unsigned short));
2504 array
= lock_user(VERIFY_READ
, target_addr
,
2505 nsems
*sizeof(unsigned short), 1);
2507 return -TARGET_EFAULT
;
2509 for(i
=0; i
<nsems
; i
++) {
2510 __get_user((*host_array
)[i
], &array
[i
]);
2512 unlock_user(array
, target_addr
, 0);
2517 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2518 unsigned short **host_array
)
2521 unsigned short *array
;
2523 struct semid_ds semid_ds
;
2526 semun
.buf
= &semid_ds
;
2528 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2530 return get_errno(ret
);
2532 nsems
= semid_ds
.sem_nsems
;
2534 array
= lock_user(VERIFY_WRITE
, target_addr
,
2535 nsems
*sizeof(unsigned short), 0);
2537 return -TARGET_EFAULT
;
2539 for(i
=0; i
<nsems
; i
++) {
2540 __put_user((*host_array
)[i
], &array
[i
]);
2543 unlock_user(array
, target_addr
, 1);
2548 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2549 union target_semun target_su
)
2552 struct semid_ds dsarg
;
2553 unsigned short *array
= NULL
;
2554 struct seminfo seminfo
;
2555 abi_long ret
= -TARGET_EINVAL
;
2562 arg
.val
= tswap32(target_su
.val
);
2563 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2564 target_su
.val
= tswap32(arg
.val
);
2568 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2572 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2573 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2580 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2584 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2585 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2591 arg
.__buf
= &seminfo
;
2592 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2593 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2601 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2608 struct target_sembuf
{
2609 unsigned short sem_num
;
2614 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2615 abi_ulong target_addr
,
2618 struct target_sembuf
*target_sembuf
;
2621 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2622 nsops
*sizeof(struct target_sembuf
), 1);
2624 return -TARGET_EFAULT
;
2626 for(i
=0; i
<nsops
; i
++) {
2627 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2628 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2629 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2632 unlock_user(target_sembuf
, target_addr
, 0);
2637 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2639 struct sembuf sops
[nsops
];
2641 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2642 return -TARGET_EFAULT
;
2644 return semop(semid
, sops
, nsops
);
2647 struct target_msqid_ds
2649 struct target_ipc_perm msg_perm
;
2650 abi_ulong msg_stime
;
2651 #if TARGET_ABI_BITS == 32
2652 abi_ulong __unused1
;
2654 abi_ulong msg_rtime
;
2655 #if TARGET_ABI_BITS == 32
2656 abi_ulong __unused2
;
2658 abi_ulong msg_ctime
;
2659 #if TARGET_ABI_BITS == 32
2660 abi_ulong __unused3
;
2662 abi_ulong __msg_cbytes
;
2664 abi_ulong msg_qbytes
;
2665 abi_ulong msg_lspid
;
2666 abi_ulong msg_lrpid
;
2667 abi_ulong __unused4
;
2668 abi_ulong __unused5
;
2671 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2672 abi_ulong target_addr
)
2674 struct target_msqid_ds
*target_md
;
2676 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2677 return -TARGET_EFAULT
;
2678 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2679 return -TARGET_EFAULT
;
2680 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2681 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2682 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2683 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2684 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2685 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2686 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2687 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2688 unlock_user_struct(target_md
, target_addr
, 0);
2692 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2693 struct msqid_ds
*host_md
)
2695 struct target_msqid_ds
*target_md
;
2697 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2698 return -TARGET_EFAULT
;
2699 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2700 return -TARGET_EFAULT
;
2701 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2702 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2703 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2704 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
2705 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
2706 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
2707 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
2708 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
2709 unlock_user_struct(target_md
, target_addr
, 1);
2713 struct target_msginfo
{
2721 unsigned short int msgseg
;
2724 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2725 struct msginfo
*host_msginfo
)
2727 struct target_msginfo
*target_msginfo
;
2728 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2729 return -TARGET_EFAULT
;
2730 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2731 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2732 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2733 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2734 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2735 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2736 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2737 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2738 unlock_user_struct(target_msginfo
, target_addr
, 1);
2742 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2744 struct msqid_ds dsarg
;
2745 struct msginfo msginfo
;
2746 abi_long ret
= -TARGET_EINVAL
;
2754 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2755 return -TARGET_EFAULT
;
2756 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2757 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2758 return -TARGET_EFAULT
;
2761 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2765 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2766 if (host_to_target_msginfo(ptr
, &msginfo
))
2767 return -TARGET_EFAULT
;
2774 struct target_msgbuf
{
2779 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2780 unsigned int msgsz
, int msgflg
)
2782 struct target_msgbuf
*target_mb
;
2783 struct msgbuf
*host_mb
;
2786 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2787 return -TARGET_EFAULT
;
2788 host_mb
= malloc(msgsz
+sizeof(long));
2789 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
2790 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2791 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2793 unlock_user_struct(target_mb
, msgp
, 0);
2798 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2799 unsigned int msgsz
, abi_long msgtyp
,
2802 struct target_msgbuf
*target_mb
;
2804 struct msgbuf
*host_mb
;
2807 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2808 return -TARGET_EFAULT
;
2810 host_mb
= malloc(msgsz
+sizeof(long));
2811 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapal(msgtyp
), msgflg
));
2814 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2815 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2816 if (!target_mtext
) {
2817 ret
= -TARGET_EFAULT
;
2820 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2821 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2824 target_mb
->mtype
= tswapal(host_mb
->mtype
);
2829 unlock_user_struct(target_mb
, msgp
, 1);
2833 struct target_shmid_ds
2835 struct target_ipc_perm shm_perm
;
2836 abi_ulong shm_segsz
;
2837 abi_ulong shm_atime
;
2838 #if TARGET_ABI_BITS == 32
2839 abi_ulong __unused1
;
2841 abi_ulong shm_dtime
;
2842 #if TARGET_ABI_BITS == 32
2843 abi_ulong __unused2
;
2845 abi_ulong shm_ctime
;
2846 #if TARGET_ABI_BITS == 32
2847 abi_ulong __unused3
;
2851 abi_ulong shm_nattch
;
2852 unsigned long int __unused4
;
2853 unsigned long int __unused5
;
2856 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2857 abi_ulong target_addr
)
2859 struct target_shmid_ds
*target_sd
;
2861 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2862 return -TARGET_EFAULT
;
2863 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2864 return -TARGET_EFAULT
;
2865 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2866 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2867 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2868 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2869 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2870 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2871 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2872 unlock_user_struct(target_sd
, target_addr
, 0);
2876 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2877 struct shmid_ds
*host_sd
)
2879 struct target_shmid_ds
*target_sd
;
2881 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2882 return -TARGET_EFAULT
;
2883 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2884 return -TARGET_EFAULT
;
2885 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2886 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2887 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2888 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2889 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2890 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2891 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2892 unlock_user_struct(target_sd
, target_addr
, 1);
2896 struct target_shminfo
{
2904 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2905 struct shminfo
*host_shminfo
)
2907 struct target_shminfo
*target_shminfo
;
2908 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2909 return -TARGET_EFAULT
;
2910 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2911 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2912 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2913 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2914 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2915 unlock_user_struct(target_shminfo
, target_addr
, 1);
2919 struct target_shm_info
{
2924 abi_ulong swap_attempts
;
2925 abi_ulong swap_successes
;
2928 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2929 struct shm_info
*host_shm_info
)
2931 struct target_shm_info
*target_shm_info
;
2932 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2933 return -TARGET_EFAULT
;
2934 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2935 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2936 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2937 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2938 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2939 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2940 unlock_user_struct(target_shm_info
, target_addr
, 1);
2944 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2946 struct shmid_ds dsarg
;
2947 struct shminfo shminfo
;
2948 struct shm_info shm_info
;
2949 abi_long ret
= -TARGET_EINVAL
;
2957 if (target_to_host_shmid_ds(&dsarg
, buf
))
2958 return -TARGET_EFAULT
;
2959 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2960 if (host_to_target_shmid_ds(buf
, &dsarg
))
2961 return -TARGET_EFAULT
;
2964 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2965 if (host_to_target_shminfo(buf
, &shminfo
))
2966 return -TARGET_EFAULT
;
2969 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2970 if (host_to_target_shm_info(buf
, &shm_info
))
2971 return -TARGET_EFAULT
;
2976 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2983 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2987 struct shmid_ds shm_info
;
2990 /* find out the length of the shared memory segment */
2991 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2992 if (is_error(ret
)) {
2993 /* can't get length, bail out */
3000 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3002 abi_ulong mmap_start
;
3004 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3006 if (mmap_start
== -1) {
3008 host_raddr
= (void *)-1;
3010 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3013 if (host_raddr
== (void *)-1) {
3015 return get_errno((long)host_raddr
);
3017 raddr
=h2g((unsigned long)host_raddr
);
3019 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3020 PAGE_VALID
| PAGE_READ
|
3021 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3023 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3024 if (shm_regions
[i
].start
== 0) {
3025 shm_regions
[i
].start
= raddr
;
3026 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3036 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3040 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3041 if (shm_regions
[i
].start
== shmaddr
) {
3042 shm_regions
[i
].start
= 0;
3043 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3048 return get_errno(shmdt(g2h(shmaddr
)));
3051 #ifdef TARGET_NR_ipc
3052 /* ??? This only works with linear mappings. */
3053 /* do_ipc() must return target values and target errnos. */
3054 static abi_long
do_ipc(unsigned int call
, int first
,
3055 int second
, int third
,
3056 abi_long ptr
, abi_long fifth
)
3061 version
= call
>> 16;
3066 ret
= do_semop(first
, ptr
, second
);
3070 ret
= get_errno(semget(first
, second
, third
));
3074 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
3078 ret
= get_errno(msgget(first
, second
));
3082 ret
= do_msgsnd(first
, ptr
, second
, third
);
3086 ret
= do_msgctl(first
, second
, ptr
);
3093 struct target_ipc_kludge
{
3098 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3099 ret
= -TARGET_EFAULT
;
3103 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
3105 unlock_user_struct(tmp
, ptr
, 0);
3109 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3118 raddr
= do_shmat(first
, ptr
, second
);
3119 if (is_error(raddr
))
3120 return get_errno(raddr
);
3121 if (put_user_ual(raddr
, third
))
3122 return -TARGET_EFAULT
;
3126 ret
= -TARGET_EINVAL
;
3131 ret
= do_shmdt(ptr
);
3135 /* IPC_* flag values are the same on all linux platforms */
3136 ret
= get_errno(shmget(first
, second
, third
));
3139 /* IPC_* and SHM_* command values are the same on all linux platforms */
3141 ret
= do_shmctl(first
, second
, third
);
3144 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3145 ret
= -TARGET_ENOSYS
;
3152 /* kernel structure types definitions */
3154 #define STRUCT(name, ...) STRUCT_ ## name,
3155 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3157 #include "syscall_types.h"
3160 #undef STRUCT_SPECIAL
3162 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3163 #define STRUCT_SPECIAL(name)
3164 #include "syscall_types.h"
3166 #undef STRUCT_SPECIAL
3168 typedef struct IOCTLEntry IOCTLEntry
;
3170 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3171 int fd
, abi_long cmd
, abi_long arg
);
3174 unsigned int target_cmd
;
3175 unsigned int host_cmd
;
3178 do_ioctl_fn
*do_ioctl
;
3179 const argtype arg_type
[5];
3182 #define IOC_R 0x0001
3183 #define IOC_W 0x0002
3184 #define IOC_RW (IOC_R | IOC_W)
3186 #define MAX_STRUCT_SIZE 4096
3188 #ifdef CONFIG_FIEMAP
3189 /* So fiemap access checks don't overflow on 32 bit systems.
3190 * This is very slightly smaller than the limit imposed by
3191 * the underlying kernel.
3193 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3194 / sizeof(struct fiemap_extent))
3196 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3197 int fd
, abi_long cmd
, abi_long arg
)
3199 /* The parameter for this ioctl is a struct fiemap followed
3200 * by an array of struct fiemap_extent whose size is set
3201 * in fiemap->fm_extent_count. The array is filled in by the
3204 int target_size_in
, target_size_out
;
3206 const argtype
*arg_type
= ie
->arg_type
;
3207 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3210 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3214 assert(arg_type
[0] == TYPE_PTR
);
3215 assert(ie
->access
== IOC_RW
);
3217 target_size_in
= thunk_type_size(arg_type
, 0);
3218 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3220 return -TARGET_EFAULT
;
3222 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3223 unlock_user(argptr
, arg
, 0);
3224 fm
= (struct fiemap
*)buf_temp
;
3225 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3226 return -TARGET_EINVAL
;
3229 outbufsz
= sizeof (*fm
) +
3230 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3232 if (outbufsz
> MAX_STRUCT_SIZE
) {
3233 /* We can't fit all the extents into the fixed size buffer.
3234 * Allocate one that is large enough and use it instead.
3236 fm
= malloc(outbufsz
);
3238 return -TARGET_ENOMEM
;
3240 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3243 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3244 if (!is_error(ret
)) {
3245 target_size_out
= target_size_in
;
3246 /* An extent_count of 0 means we were only counting the extents
3247 * so there are no structs to copy
3249 if (fm
->fm_extent_count
!= 0) {
3250 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3252 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3254 ret
= -TARGET_EFAULT
;
3256 /* Convert the struct fiemap */
3257 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3258 if (fm
->fm_extent_count
!= 0) {
3259 p
= argptr
+ target_size_in
;
3260 /* ...and then all the struct fiemap_extents */
3261 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3262 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3267 unlock_user(argptr
, arg
, target_size_out
);
3277 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3278 int fd
, abi_long cmd
, abi_long arg
)
3280 const argtype
*arg_type
= ie
->arg_type
;
3284 struct ifconf
*host_ifconf
;
3286 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3287 int target_ifreq_size
;
3292 abi_long target_ifc_buf
;
3296 assert(arg_type
[0] == TYPE_PTR
);
3297 assert(ie
->access
== IOC_RW
);
3300 target_size
= thunk_type_size(arg_type
, 0);
3302 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3304 return -TARGET_EFAULT
;
3305 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3306 unlock_user(argptr
, arg
, 0);
3308 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3309 target_ifc_len
= host_ifconf
->ifc_len
;
3310 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3312 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3313 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3314 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3316 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3317 if (outbufsz
> MAX_STRUCT_SIZE
) {
3318 /* We can't fit all the extents into the fixed size buffer.
3319 * Allocate one that is large enough and use it instead.
3321 host_ifconf
= malloc(outbufsz
);
3323 return -TARGET_ENOMEM
;
3325 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3328 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3330 host_ifconf
->ifc_len
= host_ifc_len
;
3331 host_ifconf
->ifc_buf
= host_ifc_buf
;
3333 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3334 if (!is_error(ret
)) {
3335 /* convert host ifc_len to target ifc_len */
3337 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3338 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3339 host_ifconf
->ifc_len
= target_ifc_len
;
3341 /* restore target ifc_buf */
3343 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3345 /* copy struct ifconf to target user */
3347 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3349 return -TARGET_EFAULT
;
3350 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3351 unlock_user(argptr
, arg
, target_size
);
3353 /* copy ifreq[] to target user */
3355 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3356 for (i
= 0; i
< nb_ifreq
; i
++) {
3357 thunk_convert(argptr
+ i
* target_ifreq_size
,
3358 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3359 ifreq_arg_type
, THUNK_TARGET
);
3361 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3371 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3372 abi_long cmd
, abi_long arg
)
3375 struct dm_ioctl
*host_dm
;
3376 abi_long guest_data
;
3377 uint32_t guest_data_size
;
3379 const argtype
*arg_type
= ie
->arg_type
;
3381 void *big_buf
= NULL
;
3385 target_size
= thunk_type_size(arg_type
, 0);
3386 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3388 ret
= -TARGET_EFAULT
;
3391 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3392 unlock_user(argptr
, arg
, 0);
3394 /* buf_temp is too small, so fetch things into a bigger buffer */
3395 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3396 memcpy(big_buf
, buf_temp
, target_size
);
3400 guest_data
= arg
+ host_dm
->data_start
;
3401 if ((guest_data
- arg
) < 0) {
3405 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3406 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3408 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3409 switch (ie
->host_cmd
) {
3411 case DM_LIST_DEVICES
:
3414 case DM_DEV_SUSPEND
:
3417 case DM_TABLE_STATUS
:
3418 case DM_TABLE_CLEAR
:
3420 case DM_LIST_VERSIONS
:
3424 case DM_DEV_SET_GEOMETRY
:
3425 /* data contains only strings */
3426 memcpy(host_data
, argptr
, guest_data_size
);
3429 memcpy(host_data
, argptr
, guest_data_size
);
3430 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3434 void *gspec
= argptr
;
3435 void *cur_data
= host_data
;
3436 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3437 int spec_size
= thunk_type_size(arg_type
, 0);
3440 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3441 struct dm_target_spec
*spec
= cur_data
;
3445 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3446 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3448 spec
->next
= sizeof(*spec
) + slen
;
3449 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3451 cur_data
+= spec
->next
;
3456 ret
= -TARGET_EINVAL
;
3459 unlock_user(argptr
, guest_data
, 0);
3461 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3462 if (!is_error(ret
)) {
3463 guest_data
= arg
+ host_dm
->data_start
;
3464 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3465 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3466 switch (ie
->host_cmd
) {
3471 case DM_DEV_SUSPEND
:
3474 case DM_TABLE_CLEAR
:
3476 case DM_DEV_SET_GEOMETRY
:
3477 /* no return data */
3479 case DM_LIST_DEVICES
:
3481 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3482 uint32_t remaining_data
= guest_data_size
;
3483 void *cur_data
= argptr
;
3484 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3485 int nl_size
= 12; /* can't use thunk_size due to alignment */
3488 uint32_t next
= nl
->next
;
3490 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3492 if (remaining_data
< nl
->next
) {
3493 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3496 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3497 strcpy(cur_data
+ nl_size
, nl
->name
);
3498 cur_data
+= nl
->next
;
3499 remaining_data
-= nl
->next
;
3503 nl
= (void*)nl
+ next
;
3508 case DM_TABLE_STATUS
:
3510 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3511 void *cur_data
= argptr
;
3512 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3513 int spec_size
= thunk_type_size(arg_type
, 0);
3516 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3517 uint32_t next
= spec
->next
;
3518 int slen
= strlen((char*)&spec
[1]) + 1;
3519 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3520 if (guest_data_size
< spec
->next
) {
3521 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3524 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3525 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3526 cur_data
= argptr
+ spec
->next
;
3527 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3533 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3534 int count
= *(uint32_t*)hdata
;
3535 uint64_t *hdev
= hdata
+ 8;
3536 uint64_t *gdev
= argptr
+ 8;
3539 *(uint32_t*)argptr
= tswap32(count
);
3540 for (i
= 0; i
< count
; i
++) {
3541 *gdev
= tswap64(*hdev
);
3547 case DM_LIST_VERSIONS
:
3549 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3550 uint32_t remaining_data
= guest_data_size
;
3551 void *cur_data
= argptr
;
3552 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3553 int vers_size
= thunk_type_size(arg_type
, 0);
3556 uint32_t next
= vers
->next
;
3558 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3560 if (remaining_data
< vers
->next
) {
3561 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3564 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3565 strcpy(cur_data
+ vers_size
, vers
->name
);
3566 cur_data
+= vers
->next
;
3567 remaining_data
-= vers
->next
;
3571 vers
= (void*)vers
+ next
;
3576 ret
= -TARGET_EINVAL
;
3579 unlock_user(argptr
, guest_data
, guest_data_size
);
3581 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3583 ret
= -TARGET_EFAULT
;
3586 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3587 unlock_user(argptr
, arg
, target_size
);
3596 static IOCTLEntry ioctl_entries
[] = {
3597 #define IOCTL(cmd, access, ...) \
3598 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3599 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3600 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3605 /* ??? Implement proper locking for ioctls. */
3606 /* do_ioctl() Must return target values and target errnos. */
3607 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3609 const IOCTLEntry
*ie
;
3610 const argtype
*arg_type
;
3612 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3618 if (ie
->target_cmd
== 0) {
3619 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3620 return -TARGET_ENOSYS
;
3622 if (ie
->target_cmd
== cmd
)
3626 arg_type
= ie
->arg_type
;
3628 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3631 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3634 switch(arg_type
[0]) {
3637 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3642 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3646 target_size
= thunk_type_size(arg_type
, 0);
3647 switch(ie
->access
) {
3649 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3650 if (!is_error(ret
)) {
3651 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3653 return -TARGET_EFAULT
;
3654 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3655 unlock_user(argptr
, arg
, target_size
);
3659 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3661 return -TARGET_EFAULT
;
3662 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3663 unlock_user(argptr
, arg
, 0);
3664 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3668 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3670 return -TARGET_EFAULT
;
3671 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3672 unlock_user(argptr
, arg
, 0);
3673 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3674 if (!is_error(ret
)) {
3675 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3677 return -TARGET_EFAULT
;
3678 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3679 unlock_user(argptr
, arg
, target_size
);
3685 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3686 (long)cmd
, arg_type
[0]);
3687 ret
= -TARGET_ENOSYS
;
3693 static const bitmask_transtbl iflag_tbl
[] = {
3694 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3695 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3696 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3697 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3698 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3699 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3700 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3701 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3702 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3703 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3704 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3705 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3706 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3707 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3711 static const bitmask_transtbl oflag_tbl
[] = {
3712 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3713 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3714 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3715 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3716 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3717 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3718 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3719 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3720 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3721 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3722 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3723 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3724 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3725 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3726 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3727 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3728 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3729 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3730 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3731 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3732 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3733 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3734 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3735 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3739 static const bitmask_transtbl cflag_tbl
[] = {
3740 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3741 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3742 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3743 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3744 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3745 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3746 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3747 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3748 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3749 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3750 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3751 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3752 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3753 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3754 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3755 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3756 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3757 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3758 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3759 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3760 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3761 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3762 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3763 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3764 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3765 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3766 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3767 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3768 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3769 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3770 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3774 static const bitmask_transtbl lflag_tbl
[] = {
3775 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3776 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3777 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3778 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3779 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3780 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3781 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3782 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3783 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3784 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3785 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3786 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3787 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3788 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3789 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3793 static void target_to_host_termios (void *dst
, const void *src
)
3795 struct host_termios
*host
= dst
;
3796 const struct target_termios
*target
= src
;
3799 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3801 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3803 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3805 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3806 host
->c_line
= target
->c_line
;
3808 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3809 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3810 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3811 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3812 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3813 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3814 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3815 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3816 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3817 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3818 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3819 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3820 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3821 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3822 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3823 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3824 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3825 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3828 static void host_to_target_termios (void *dst
, const void *src
)
3830 struct target_termios
*target
= dst
;
3831 const struct host_termios
*host
= src
;
3834 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3836 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3838 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3840 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3841 target
->c_line
= host
->c_line
;
3843 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3844 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3845 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3846 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3847 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3848 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3849 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3850 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3851 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3852 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3853 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3854 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3855 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3856 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3857 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3858 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3859 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3860 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3863 static const StructEntry struct_termios_def
= {
3864 .convert
= { host_to_target_termios
, target_to_host_termios
},
3865 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3866 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3869 static bitmask_transtbl mmap_flags_tbl
[] = {
3870 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3871 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3872 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3873 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3874 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3875 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3876 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3877 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3881 #if defined(TARGET_I386)
3883 /* NOTE: there is really one LDT for all the threads */
3884 static uint8_t *ldt_table
;
3886 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3893 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3894 if (size
> bytecount
)
3896 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3898 return -TARGET_EFAULT
;
3899 /* ??? Should this by byteswapped? */
3900 memcpy(p
, ldt_table
, size
);
3901 unlock_user(p
, ptr
, size
);
3905 /* XXX: add locking support */
3906 static abi_long
write_ldt(CPUX86State
*env
,
3907 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3909 struct target_modify_ldt_ldt_s ldt_info
;
3910 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3911 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3912 int seg_not_present
, useable
, lm
;
3913 uint32_t *lp
, entry_1
, entry_2
;
3915 if (bytecount
!= sizeof(ldt_info
))
3916 return -TARGET_EINVAL
;
3917 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3918 return -TARGET_EFAULT
;
3919 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3920 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
3921 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3922 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3923 unlock_user_struct(target_ldt_info
, ptr
, 0);
3925 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3926 return -TARGET_EINVAL
;
3927 seg_32bit
= ldt_info
.flags
& 1;
3928 contents
= (ldt_info
.flags
>> 1) & 3;
3929 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3930 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3931 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3932 useable
= (ldt_info
.flags
>> 6) & 1;
3936 lm
= (ldt_info
.flags
>> 7) & 1;
3938 if (contents
== 3) {
3940 return -TARGET_EINVAL
;
3941 if (seg_not_present
== 0)
3942 return -TARGET_EINVAL
;
3944 /* allocate the LDT */
3946 env
->ldt
.base
= target_mmap(0,
3947 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3948 PROT_READ
|PROT_WRITE
,
3949 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3950 if (env
->ldt
.base
== -1)
3951 return -TARGET_ENOMEM
;
3952 memset(g2h(env
->ldt
.base
), 0,
3953 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3954 env
->ldt
.limit
= 0xffff;
3955 ldt_table
= g2h(env
->ldt
.base
);
3958 /* NOTE: same code as Linux kernel */
3959 /* Allow LDTs to be cleared by the user. */
3960 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3963 read_exec_only
== 1 &&
3965 limit_in_pages
== 0 &&
3966 seg_not_present
== 1 &&
3974 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3975 (ldt_info
.limit
& 0x0ffff);
3976 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3977 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3978 (ldt_info
.limit
& 0xf0000) |
3979 ((read_exec_only
^ 1) << 9) |
3981 ((seg_not_present
^ 1) << 15) |
3983 (limit_in_pages
<< 23) |
3987 entry_2
|= (useable
<< 20);
3989 /* Install the new entry ... */
3991 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3992 lp
[0] = tswap32(entry_1
);
3993 lp
[1] = tswap32(entry_2
);
3997 /* specific and weird i386 syscalls */
3998 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3999 unsigned long bytecount
)
4005 ret
= read_ldt(ptr
, bytecount
);
4008 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4011 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4014 ret
= -TARGET_ENOSYS
;
4020 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4021 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4023 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4024 struct target_modify_ldt_ldt_s ldt_info
;
4025 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4026 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4027 int seg_not_present
, useable
, lm
;
4028 uint32_t *lp
, entry_1
, entry_2
;
4031 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4032 if (!target_ldt_info
)
4033 return -TARGET_EFAULT
;
4034 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4035 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4036 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4037 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4038 if (ldt_info
.entry_number
== -1) {
4039 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4040 if (gdt_table
[i
] == 0) {
4041 ldt_info
.entry_number
= i
;
4042 target_ldt_info
->entry_number
= tswap32(i
);
4047 unlock_user_struct(target_ldt_info
, ptr
, 1);
4049 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4050 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4051 return -TARGET_EINVAL
;
4052 seg_32bit
= ldt_info
.flags
& 1;
4053 contents
= (ldt_info
.flags
>> 1) & 3;
4054 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4055 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4056 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4057 useable
= (ldt_info
.flags
>> 6) & 1;
4061 lm
= (ldt_info
.flags
>> 7) & 1;
4064 if (contents
== 3) {
4065 if (seg_not_present
== 0)
4066 return -TARGET_EINVAL
;
4069 /* NOTE: same code as Linux kernel */
4070 /* Allow LDTs to be cleared by the user. */
4071 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4072 if ((contents
== 0 &&
4073 read_exec_only
== 1 &&
4075 limit_in_pages
== 0 &&
4076 seg_not_present
== 1 &&
4084 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4085 (ldt_info
.limit
& 0x0ffff);
4086 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4087 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4088 (ldt_info
.limit
& 0xf0000) |
4089 ((read_exec_only
^ 1) << 9) |
4091 ((seg_not_present
^ 1) << 15) |
4093 (limit_in_pages
<< 23) |
4098 /* Install the new entry ... */
4100 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4101 lp
[0] = tswap32(entry_1
);
4102 lp
[1] = tswap32(entry_2
);
4106 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4108 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4109 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4110 uint32_t base_addr
, limit
, flags
;
4111 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4112 int seg_not_present
, useable
, lm
;
4113 uint32_t *lp
, entry_1
, entry_2
;
4115 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4116 if (!target_ldt_info
)
4117 return -TARGET_EFAULT
;
4118 idx
= tswap32(target_ldt_info
->entry_number
);
4119 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4120 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4121 unlock_user_struct(target_ldt_info
, ptr
, 1);
4122 return -TARGET_EINVAL
;
4124 lp
= (uint32_t *)(gdt_table
+ idx
);
4125 entry_1
= tswap32(lp
[0]);
4126 entry_2
= tswap32(lp
[1]);
4128 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4129 contents
= (entry_2
>> 10) & 3;
4130 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4131 seg_32bit
= (entry_2
>> 22) & 1;
4132 limit_in_pages
= (entry_2
>> 23) & 1;
4133 useable
= (entry_2
>> 20) & 1;
4137 lm
= (entry_2
>> 21) & 1;
4139 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4140 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4141 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4142 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4143 base_addr
= (entry_1
>> 16) |
4144 (entry_2
& 0xff000000) |
4145 ((entry_2
& 0xff) << 16);
4146 target_ldt_info
->base_addr
= tswapal(base_addr
);
4147 target_ldt_info
->limit
= tswap32(limit
);
4148 target_ldt_info
->flags
= tswap32(flags
);
4149 unlock_user_struct(target_ldt_info
, ptr
, 1);
4152 #endif /* TARGET_I386 && TARGET_ABI32 */
4154 #ifndef TARGET_ABI32
4155 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4162 case TARGET_ARCH_SET_GS
:
4163 case TARGET_ARCH_SET_FS
:
4164 if (code
== TARGET_ARCH_SET_GS
)
4168 cpu_x86_load_seg(env
, idx
, 0);
4169 env
->segs
[idx
].base
= addr
;
4171 case TARGET_ARCH_GET_GS
:
4172 case TARGET_ARCH_GET_FS
:
4173 if (code
== TARGET_ARCH_GET_GS
)
4177 val
= env
->segs
[idx
].base
;
4178 if (put_user(val
, addr
, abi_ulong
))
4179 ret
= -TARGET_EFAULT
;
4182 ret
= -TARGET_EINVAL
;
4189 #endif /* defined(TARGET_I386) */
4191 #define NEW_STACK_SIZE 0x40000
4193 #if defined(CONFIG_USE_NPTL)
4195 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4198 pthread_mutex_t mutex
;
4199 pthread_cond_t cond
;
4202 abi_ulong child_tidptr
;
4203 abi_ulong parent_tidptr
;
4207 static void *clone_func(void *arg
)
4209 new_thread_info
*info
= arg
;
4215 ts
= (TaskState
*)thread_env
->opaque
;
4216 info
->tid
= gettid();
4217 env
->host_tid
= info
->tid
;
4219 if (info
->child_tidptr
)
4220 put_user_u32(info
->tid
, info
->child_tidptr
);
4221 if (info
->parent_tidptr
)
4222 put_user_u32(info
->tid
, info
->parent_tidptr
);
4223 /* Enable signals. */
4224 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4225 /* Signal to the parent that we're ready. */
4226 pthread_mutex_lock(&info
->mutex
);
4227 pthread_cond_broadcast(&info
->cond
);
4228 pthread_mutex_unlock(&info
->mutex
);
4229 /* Wait until the parent has finshed initializing the tls state. */
4230 pthread_mutex_lock(&clone_lock
);
4231 pthread_mutex_unlock(&clone_lock
);
4238 static int clone_func(void *arg
)
4240 CPUArchState
*env
= arg
;
4247 /* do_fork() Must return host values and target errnos (unlike most
4248 do_*() functions). */
4249 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4250 abi_ulong parent_tidptr
, target_ulong newtls
,
4251 abi_ulong child_tidptr
)
4255 CPUArchState
*new_env
;
4256 #if defined(CONFIG_USE_NPTL)
4257 unsigned int nptl_flags
;
4263 /* Emulate vfork() with fork() */
4264 if (flags
& CLONE_VFORK
)
4265 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4267 if (flags
& CLONE_VM
) {
4268 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
4269 #if defined(CONFIG_USE_NPTL)
4270 new_thread_info info
;
4271 pthread_attr_t attr
;
4273 ts
= g_malloc0(sizeof(TaskState
));
4274 init_task_state(ts
);
4275 /* we create a new CPU instance. */
4276 new_env
= cpu_copy(env
);
4277 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4278 cpu_reset(ENV_GET_CPU(new_env
));
4280 /* Init regs that differ from the parent. */
4281 cpu_clone_regs(new_env
, newsp
);
4282 new_env
->opaque
= ts
;
4283 ts
->bprm
= parent_ts
->bprm
;
4284 ts
->info
= parent_ts
->info
;
4285 #if defined(CONFIG_USE_NPTL)
4287 flags
&= ~CLONE_NPTL_FLAGS2
;
4289 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4290 ts
->child_tidptr
= child_tidptr
;
4293 if (nptl_flags
& CLONE_SETTLS
)
4294 cpu_set_tls (new_env
, newtls
);
4296 /* Grab a mutex so that thread setup appears atomic. */
4297 pthread_mutex_lock(&clone_lock
);
4299 memset(&info
, 0, sizeof(info
));
4300 pthread_mutex_init(&info
.mutex
, NULL
);
4301 pthread_mutex_lock(&info
.mutex
);
4302 pthread_cond_init(&info
.cond
, NULL
);
4304 if (nptl_flags
& CLONE_CHILD_SETTID
)
4305 info
.child_tidptr
= child_tidptr
;
4306 if (nptl_flags
& CLONE_PARENT_SETTID
)
4307 info
.parent_tidptr
= parent_tidptr
;
4309 ret
= pthread_attr_init(&attr
);
4310 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4311 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4312 /* It is not safe to deliver signals until the child has finished
4313 initializing, so temporarily block all signals. */
4314 sigfillset(&sigmask
);
4315 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4317 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4318 /* TODO: Free new CPU state if thread creation failed. */
4320 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4321 pthread_attr_destroy(&attr
);
4323 /* Wait for the child to initialize. */
4324 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4326 if (flags
& CLONE_PARENT_SETTID
)
4327 put_user_u32(ret
, parent_tidptr
);
4331 pthread_mutex_unlock(&info
.mutex
);
4332 pthread_cond_destroy(&info
.cond
);
4333 pthread_mutex_destroy(&info
.mutex
);
4334 pthread_mutex_unlock(&clone_lock
);
4336 if (flags
& CLONE_NPTL_FLAGS2
)
4338 /* This is probably going to die very quickly, but do it anyway. */
4339 new_stack
= g_malloc0 (NEW_STACK_SIZE
);
4341 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
4343 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
4347 /* if no CLONE_VM, we consider it is a fork */
4348 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4353 /* Child Process. */
4354 cpu_clone_regs(env
, newsp
);
4356 #if defined(CONFIG_USE_NPTL)
4357 /* There is a race condition here. The parent process could
4358 theoretically read the TID in the child process before the child
4359 tid is set. This would require using either ptrace
4360 (not implemented) or having *_tidptr to point at a shared memory
4361 mapping. We can't repeat the spinlock hack used above because
4362 the child process gets its own copy of the lock. */
4363 if (flags
& CLONE_CHILD_SETTID
)
4364 put_user_u32(gettid(), child_tidptr
);
4365 if (flags
& CLONE_PARENT_SETTID
)
4366 put_user_u32(gettid(), parent_tidptr
);
4367 ts
= (TaskState
*)env
->opaque
;
4368 if (flags
& CLONE_SETTLS
)
4369 cpu_set_tls (env
, newtls
);
4370 if (flags
& CLONE_CHILD_CLEARTID
)
4371 ts
->child_tidptr
= child_tidptr
;
4380 /* warning : doesn't handle linux specific flags... */
4381 static int target_to_host_fcntl_cmd(int cmd
)
4384 case TARGET_F_DUPFD
:
4385 case TARGET_F_GETFD
:
4386 case TARGET_F_SETFD
:
4387 case TARGET_F_GETFL
:
4388 case TARGET_F_SETFL
:
4390 case TARGET_F_GETLK
:
4392 case TARGET_F_SETLK
:
4394 case TARGET_F_SETLKW
:
4396 case TARGET_F_GETOWN
:
4398 case TARGET_F_SETOWN
:
4400 case TARGET_F_GETSIG
:
4402 case TARGET_F_SETSIG
:
4404 #if TARGET_ABI_BITS == 32
4405 case TARGET_F_GETLK64
:
4407 case TARGET_F_SETLK64
:
4409 case TARGET_F_SETLKW64
:
4412 case TARGET_F_SETLEASE
:
4414 case TARGET_F_GETLEASE
:
4416 #ifdef F_DUPFD_CLOEXEC
4417 case TARGET_F_DUPFD_CLOEXEC
:
4418 return F_DUPFD_CLOEXEC
;
4420 case TARGET_F_NOTIFY
:
4423 return -TARGET_EINVAL
;
4425 return -TARGET_EINVAL
;
4428 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4431 struct target_flock
*target_fl
;
4432 struct flock64 fl64
;
4433 struct target_flock64
*target_fl64
;
4435 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4437 if (host_cmd
== -TARGET_EINVAL
)
4441 case TARGET_F_GETLK
:
4442 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4443 return -TARGET_EFAULT
;
4444 fl
.l_type
= tswap16(target_fl
->l_type
);
4445 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4446 fl
.l_start
= tswapal(target_fl
->l_start
);
4447 fl
.l_len
= tswapal(target_fl
->l_len
);
4448 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4449 unlock_user_struct(target_fl
, arg
, 0);
4450 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4452 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4453 return -TARGET_EFAULT
;
4454 target_fl
->l_type
= tswap16(fl
.l_type
);
4455 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4456 target_fl
->l_start
= tswapal(fl
.l_start
);
4457 target_fl
->l_len
= tswapal(fl
.l_len
);
4458 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4459 unlock_user_struct(target_fl
, arg
, 1);
4463 case TARGET_F_SETLK
:
4464 case TARGET_F_SETLKW
:
4465 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4466 return -TARGET_EFAULT
;
4467 fl
.l_type
= tswap16(target_fl
->l_type
);
4468 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4469 fl
.l_start
= tswapal(target_fl
->l_start
);
4470 fl
.l_len
= tswapal(target_fl
->l_len
);
4471 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4472 unlock_user_struct(target_fl
, arg
, 0);
4473 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4476 case TARGET_F_GETLK64
:
4477 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4478 return -TARGET_EFAULT
;
4479 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4480 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4481 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4482 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4483 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4484 unlock_user_struct(target_fl64
, arg
, 0);
4485 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4487 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4488 return -TARGET_EFAULT
;
4489 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
4490 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4491 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4492 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4493 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4494 unlock_user_struct(target_fl64
, arg
, 1);
4497 case TARGET_F_SETLK64
:
4498 case TARGET_F_SETLKW64
:
4499 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4500 return -TARGET_EFAULT
;
4501 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4502 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4503 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4504 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4505 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4506 unlock_user_struct(target_fl64
, arg
, 0);
4507 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4510 case TARGET_F_GETFL
:
4511 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4513 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4517 case TARGET_F_SETFL
:
4518 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4521 case TARGET_F_SETOWN
:
4522 case TARGET_F_GETOWN
:
4523 case TARGET_F_SETSIG
:
4524 case TARGET_F_GETSIG
:
4525 case TARGET_F_SETLEASE
:
4526 case TARGET_F_GETLEASE
:
4527 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4531 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4539 static inline int high2lowuid(int uid
)
4547 static inline int high2lowgid(int gid
)
4555 static inline int low2highuid(int uid
)
4557 if ((int16_t)uid
== -1)
4563 static inline int low2highgid(int gid
)
4565 if ((int16_t)gid
== -1)
4570 static inline int tswapid(int id
)
4574 #else /* !USE_UID16 */
4575 static inline int high2lowuid(int uid
)
4579 static inline int high2lowgid(int gid
)
4583 static inline int low2highuid(int uid
)
4587 static inline int low2highgid(int gid
)
4591 static inline int tswapid(int id
)
4595 #endif /* USE_UID16 */
4597 void syscall_init(void)
4600 const argtype
*arg_type
;
4604 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4605 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4606 #include "syscall_types.h"
4608 #undef STRUCT_SPECIAL
4610 /* we patch the ioctl size if necessary. We rely on the fact that
4611 no ioctl has all the bits at '1' in the size field */
4613 while (ie
->target_cmd
!= 0) {
4614 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4615 TARGET_IOC_SIZEMASK
) {
4616 arg_type
= ie
->arg_type
;
4617 if (arg_type
[0] != TYPE_PTR
) {
4618 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4623 size
= thunk_type_size(arg_type
, 0);
4624 ie
->target_cmd
= (ie
->target_cmd
&
4625 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4626 (size
<< TARGET_IOC_SIZESHIFT
);
4629 /* Build target_to_host_errno_table[] table from
4630 * host_to_target_errno_table[]. */
4631 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
4632 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4634 /* automatic consistency check if same arch */
4635 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4636 (defined(__x86_64__) && defined(TARGET_X86_64))
4637 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4638 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4639 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4646 #if TARGET_ABI_BITS == 32
4647 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4649 #ifdef TARGET_WORDS_BIGENDIAN
4650 return ((uint64_t)word0
<< 32) | word1
;
4652 return ((uint64_t)word1
<< 32) | word0
;
4655 #else /* TARGET_ABI_BITS == 32 */
4656 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4660 #endif /* TARGET_ABI_BITS != 32 */
4662 #ifdef TARGET_NR_truncate64
4663 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4668 if (regpairs_aligned(cpu_env
)) {
4672 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4676 #ifdef TARGET_NR_ftruncate64
4677 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4682 if (regpairs_aligned(cpu_env
)) {
4686 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4690 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4691 abi_ulong target_addr
)
4693 struct target_timespec
*target_ts
;
4695 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4696 return -TARGET_EFAULT
;
4697 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
4698 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
4699 unlock_user_struct(target_ts
, target_addr
, 0);
4703 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4704 struct timespec
*host_ts
)
4706 struct target_timespec
*target_ts
;
4708 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4709 return -TARGET_EFAULT
;
4710 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
4711 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
4712 unlock_user_struct(target_ts
, target_addr
, 1);
4716 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4717 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4718 abi_ulong target_addr
,
4719 struct stat
*host_st
)
4722 if (((CPUARMState
*)cpu_env
)->eabi
) {
4723 struct target_eabi_stat64
*target_st
;
4725 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4726 return -TARGET_EFAULT
;
4727 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4728 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4729 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4730 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4731 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4733 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4734 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4735 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4736 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4737 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4738 __put_user(host_st
->st_size
, &target_st
->st_size
);
4739 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4740 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4741 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4742 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4743 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4744 unlock_user_struct(target_st
, target_addr
, 1);
4748 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4749 struct target_stat
*target_st
;
4751 struct target_stat64
*target_st
;
4754 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4755 return -TARGET_EFAULT
;
4756 memset(target_st
, 0, sizeof(*target_st
));
4757 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4758 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4759 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4760 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4762 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4763 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4764 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4765 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4766 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4767 /* XXX: better use of kernel struct */
4768 __put_user(host_st
->st_size
, &target_st
->st_size
);
4769 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4770 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4771 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4772 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4773 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4774 unlock_user_struct(target_st
, target_addr
, 1);
4781 #if defined(CONFIG_USE_NPTL)
4782 /* ??? Using host futex calls even when target atomic operations
4783 are not really atomic probably breaks things. However implementing
4784 futexes locally would make futexes shared between multiple processes
4785 tricky. However they're probably useless because guest atomic
4786 operations won't work either. */
4787 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4788 target_ulong uaddr2
, int val3
)
4790 struct timespec ts
, *pts
;
4793 /* ??? We assume FUTEX_* constants are the same on both host
4795 #ifdef FUTEX_CMD_MASK
4796 base_op
= op
& FUTEX_CMD_MASK
;
4804 target_to_host_timespec(pts
, timeout
);
4808 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4811 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4813 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4815 case FUTEX_CMP_REQUEUE
:
4817 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4818 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4819 But the prototype takes a `struct timespec *'; insert casts
4820 to satisfy the compiler. We do not need to tswap TIMEOUT
4821 since it's not compared to guest memory. */
4822 pts
= (struct timespec
*)(uintptr_t) timeout
;
4823 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4825 (base_op
== FUTEX_CMP_REQUEUE
4829 return -TARGET_ENOSYS
;
4834 /* Map host to target signal numbers for the wait family of syscalls.
4835 Assume all other status bits are the same. */
4836 static int host_to_target_waitstatus(int status
)
4838 if (WIFSIGNALED(status
)) {
4839 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4841 if (WIFSTOPPED(status
)) {
4842 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4848 int get_osversion(void)
4850 static int osversion
;
4851 struct new_utsname buf
;
4856 if (qemu_uname_release
&& *qemu_uname_release
) {
4857 s
= qemu_uname_release
;
4859 if (sys_uname(&buf
))
4864 for (i
= 0; i
< 3; i
++) {
4866 while (*s
>= '0' && *s
<= '9') {
4871 tmp
= (tmp
<< 8) + n
;
4880 static int open_self_maps(void *cpu_env
, int fd
)
4882 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4883 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
4890 fp
= fopen("/proc/self/maps", "r");
4895 while ((read
= getline(&line
, &len
, fp
)) != -1) {
4896 int fields
, dev_maj
, dev_min
, inode
;
4897 uint64_t min
, max
, offset
;
4898 char flag_r
, flag_w
, flag_x
, flag_p
;
4899 char path
[512] = "";
4900 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
4901 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
4902 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
4904 if ((fields
< 10) || (fields
> 11)) {
4907 if (!strncmp(path
, "[stack]", 7)) {
4910 if (h2g_valid(min
) && h2g_valid(max
)) {
4911 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
4912 " %c%c%c%c %08" PRIx64
" %02x:%02x %d%s%s\n",
4913 h2g(min
), h2g(max
), flag_r
, flag_w
,
4914 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
4915 path
[0] ? " " : "", path
);
4922 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4923 dprintf(fd
, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4924 (unsigned long long)ts
->info
->stack_limit
,
4925 (unsigned long long)(ts
->stack_base
+ (TARGET_PAGE_SIZE
- 1))
4927 (unsigned long long)0);
4933 static int open_self_stat(void *cpu_env
, int fd
)
4935 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
4936 abi_ulong start_stack
= ts
->info
->start_stack
;
4939 for (i
= 0; i
< 44; i
++) {
4947 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
4948 } else if (i
== 1) {
4950 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
4951 } else if (i
== 27) {
4954 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
4956 /* for the rest, there is MasterCard */
4957 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
4961 if (write(fd
, buf
, len
) != len
) {
4969 static int open_self_auxv(void *cpu_env
, int fd
)
4971 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
4972 abi_ulong auxv
= ts
->info
->saved_auxv
;
4973 abi_ulong len
= ts
->info
->auxv_len
;
4977 * Auxiliary vector is stored in target process stack.
4978 * read in whole auxv vector and copy it to file
4980 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
4984 r
= write(fd
, ptr
, len
);
4991 lseek(fd
, 0, SEEK_SET
);
4992 unlock_user(ptr
, auxv
, len
);
4998 static int do_open(void *cpu_env
, const char *pathname
, int flags
, mode_t mode
)
5001 const char *filename
;
5002 int (*fill
)(void *cpu_env
, int fd
);
5004 const struct fake_open
*fake_open
;
5005 static const struct fake_open fakes
[] = {
5006 { "/proc/self/maps", open_self_maps
},
5007 { "/proc/self/stat", open_self_stat
},
5008 { "/proc/self/auxv", open_self_auxv
},
5012 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5013 if (!strncmp(pathname
, fake_open
->filename
,
5014 strlen(fake_open
->filename
))) {
5019 if (fake_open
->filename
) {
5021 char filename
[PATH_MAX
];
5024 /* create temporary file to map stat to */
5025 tmpdir
= getenv("TMPDIR");
5028 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5029 fd
= mkstemp(filename
);
5035 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5039 lseek(fd
, 0, SEEK_SET
);
5044 return get_errno(open(path(pathname
), flags
, mode
));
5047 /* do_syscall() should always have a single exit point at the end so
5048 that actions, such as logging of syscall results, can be performed.
5049 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5050 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5051 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5052 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5061 gemu_log("syscall %d", num
);
5064 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5067 case TARGET_NR_exit
:
5068 #ifdef CONFIG_USE_NPTL
5069 /* In old applications this may be used to implement _exit(2).
5070 However in threaded applictions it is used for thread termination,
5071 and _exit_group is used for application termination.
5072 Do thread termination if we have more then one thread. */
5073 /* FIXME: This probably breaks if a signal arrives. We should probably
5074 be disabling signals. */
5075 if (first_cpu
->next_cpu
) {
5077 CPUArchState
**lastp
;
5083 while (p
&& p
!= (CPUArchState
*)cpu_env
) {
5084 lastp
= &p
->next_cpu
;
5087 /* If we didn't find the CPU for this thread then something is
5091 /* Remove the CPU from the list. */
5092 *lastp
= p
->next_cpu
;
5094 ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5095 if (ts
->child_tidptr
) {
5096 put_user_u32(0, ts
->child_tidptr
);
5097 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5101 object_delete(OBJECT(ENV_GET_CPU(cpu_env
)));
5109 gdb_exit(cpu_env
, arg1
);
5111 ret
= 0; /* avoid warning */
5113 case TARGET_NR_read
:
5117 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5119 ret
= get_errno(read(arg1
, p
, arg3
));
5120 unlock_user(p
, arg2
, ret
);
5123 case TARGET_NR_write
:
5124 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5126 ret
= get_errno(write(arg1
, p
, arg3
));
5127 unlock_user(p
, arg2
, 0);
5129 case TARGET_NR_open
:
5130 if (!(p
= lock_user_string(arg1
)))
5132 ret
= get_errno(do_open(cpu_env
, p
,
5133 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
5135 unlock_user(p
, arg1
, 0);
5137 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5138 case TARGET_NR_openat
:
5139 if (!(p
= lock_user_string(arg2
)))
5141 ret
= get_errno(sys_openat(arg1
,
5143 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
5145 unlock_user(p
, arg2
, 0);
5148 case TARGET_NR_close
:
5149 ret
= get_errno(close(arg1
));
5154 case TARGET_NR_fork
:
5155 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
5157 #ifdef TARGET_NR_waitpid
5158 case TARGET_NR_waitpid
:
5161 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
5162 if (!is_error(ret
) && arg2
&& ret
5163 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
5168 #ifdef TARGET_NR_waitid
5169 case TARGET_NR_waitid
:
5173 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
5174 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
5175 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
5177 host_to_target_siginfo(p
, &info
);
5178 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
5183 #ifdef TARGET_NR_creat /* not on alpha */
5184 case TARGET_NR_creat
:
5185 if (!(p
= lock_user_string(arg1
)))
5187 ret
= get_errno(creat(p
, arg2
));
5188 unlock_user(p
, arg1
, 0);
5191 case TARGET_NR_link
:
5194 p
= lock_user_string(arg1
);
5195 p2
= lock_user_string(arg2
);
5197 ret
= -TARGET_EFAULT
;
5199 ret
= get_errno(link(p
, p2
));
5200 unlock_user(p2
, arg2
, 0);
5201 unlock_user(p
, arg1
, 0);
5204 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5205 case TARGET_NR_linkat
:
5210 p
= lock_user_string(arg2
);
5211 p2
= lock_user_string(arg4
);
5213 ret
= -TARGET_EFAULT
;
5215 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
5216 unlock_user(p
, arg2
, 0);
5217 unlock_user(p2
, arg4
, 0);
5221 case TARGET_NR_unlink
:
5222 if (!(p
= lock_user_string(arg1
)))
5224 ret
= get_errno(unlink(p
));
5225 unlock_user(p
, arg1
, 0);
5227 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5228 case TARGET_NR_unlinkat
:
5229 if (!(p
= lock_user_string(arg2
)))
5231 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
5232 unlock_user(p
, arg2
, 0);
5235 case TARGET_NR_execve
:
5237 char **argp
, **envp
;
5240 abi_ulong guest_argp
;
5241 abi_ulong guest_envp
;
5248 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
5249 if (get_user_ual(addr
, gp
))
5257 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
5258 if (get_user_ual(addr
, gp
))
5265 argp
= alloca((argc
+ 1) * sizeof(void *));
5266 envp
= alloca((envc
+ 1) * sizeof(void *));
5268 for (gp
= guest_argp
, q
= argp
; gp
;
5269 gp
+= sizeof(abi_ulong
), q
++) {
5270 if (get_user_ual(addr
, gp
))
5274 if (!(*q
= lock_user_string(addr
)))
5276 total_size
+= strlen(*q
) + 1;
5280 for (gp
= guest_envp
, q
= envp
; gp
;
5281 gp
+= sizeof(abi_ulong
), q
++) {
5282 if (get_user_ual(addr
, gp
))
5286 if (!(*q
= lock_user_string(addr
)))
5288 total_size
+= strlen(*q
) + 1;
5292 /* This case will not be caught by the host's execve() if its
5293 page size is bigger than the target's. */
5294 if (total_size
> MAX_ARG_PAGES
* TARGET_PAGE_SIZE
) {
5295 ret
= -TARGET_E2BIG
;
5298 if (!(p
= lock_user_string(arg1
)))
5300 ret
= get_errno(execve(p
, argp
, envp
));
5301 unlock_user(p
, arg1
, 0);
5306 ret
= -TARGET_EFAULT
;
5309 for (gp
= guest_argp
, q
= argp
; *q
;
5310 gp
+= sizeof(abi_ulong
), q
++) {
5311 if (get_user_ual(addr
, gp
)
5314 unlock_user(*q
, addr
, 0);
5316 for (gp
= guest_envp
, q
= envp
; *q
;
5317 gp
+= sizeof(abi_ulong
), q
++) {
5318 if (get_user_ual(addr
, gp
)
5321 unlock_user(*q
, addr
, 0);
5325 case TARGET_NR_chdir
:
5326 if (!(p
= lock_user_string(arg1
)))
5328 ret
= get_errno(chdir(p
));
5329 unlock_user(p
, arg1
, 0);
5331 #ifdef TARGET_NR_time
5332 case TARGET_NR_time
:
5335 ret
= get_errno(time(&host_time
));
5338 && put_user_sal(host_time
, arg1
))
5343 case TARGET_NR_mknod
:
5344 if (!(p
= lock_user_string(arg1
)))
5346 ret
= get_errno(mknod(p
, arg2
, arg3
));
5347 unlock_user(p
, arg1
, 0);
5349 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5350 case TARGET_NR_mknodat
:
5351 if (!(p
= lock_user_string(arg2
)))
5353 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
5354 unlock_user(p
, arg2
, 0);
5357 case TARGET_NR_chmod
:
5358 if (!(p
= lock_user_string(arg1
)))
5360 ret
= get_errno(chmod(p
, arg2
));
5361 unlock_user(p
, arg1
, 0);
5363 #ifdef TARGET_NR_break
5364 case TARGET_NR_break
:
5367 #ifdef TARGET_NR_oldstat
5368 case TARGET_NR_oldstat
:
5371 case TARGET_NR_lseek
:
5372 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
5374 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5375 /* Alpha specific */
5376 case TARGET_NR_getxpid
:
5377 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
5378 ret
= get_errno(getpid());
5381 #ifdef TARGET_NR_getpid
5382 case TARGET_NR_getpid
:
5383 ret
= get_errno(getpid());
5386 case TARGET_NR_mount
:
5388 /* need to look at the data field */
5390 p
= lock_user_string(arg1
);
5391 p2
= lock_user_string(arg2
);
5392 p3
= lock_user_string(arg3
);
5393 if (!p
|| !p2
|| !p3
)
5394 ret
= -TARGET_EFAULT
;
5396 /* FIXME - arg5 should be locked, but it isn't clear how to
5397 * do that since it's not guaranteed to be a NULL-terminated
5401 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
5403 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
5405 unlock_user(p
, arg1
, 0);
5406 unlock_user(p2
, arg2
, 0);
5407 unlock_user(p3
, arg3
, 0);
5410 #ifdef TARGET_NR_umount
5411 case TARGET_NR_umount
:
5412 if (!(p
= lock_user_string(arg1
)))
5414 ret
= get_errno(umount(p
));
5415 unlock_user(p
, arg1
, 0);
5418 #ifdef TARGET_NR_stime /* not on alpha */
5419 case TARGET_NR_stime
:
5422 if (get_user_sal(host_time
, arg1
))
5424 ret
= get_errno(stime(&host_time
));
5428 case TARGET_NR_ptrace
:
5430 #ifdef TARGET_NR_alarm /* not on alpha */
5431 case TARGET_NR_alarm
:
5435 #ifdef TARGET_NR_oldfstat
5436 case TARGET_NR_oldfstat
:
5439 #ifdef TARGET_NR_pause /* not on alpha */
5440 case TARGET_NR_pause
:
5441 ret
= get_errno(pause());
5444 #ifdef TARGET_NR_utime
5445 case TARGET_NR_utime
:
5447 struct utimbuf tbuf
, *host_tbuf
;
5448 struct target_utimbuf
*target_tbuf
;
5450 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
5452 tbuf
.actime
= tswapal(target_tbuf
->actime
);
5453 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
5454 unlock_user_struct(target_tbuf
, arg2
, 0);
5459 if (!(p
= lock_user_string(arg1
)))
5461 ret
= get_errno(utime(p
, host_tbuf
));
5462 unlock_user(p
, arg1
, 0);
5466 case TARGET_NR_utimes
:
5468 struct timeval
*tvp
, tv
[2];
5470 if (copy_from_user_timeval(&tv
[0], arg2
)
5471 || copy_from_user_timeval(&tv
[1],
5472 arg2
+ sizeof(struct target_timeval
)))
5478 if (!(p
= lock_user_string(arg1
)))
5480 ret
= get_errno(utimes(p
, tvp
));
5481 unlock_user(p
, arg1
, 0);
5484 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5485 case TARGET_NR_futimesat
:
5487 struct timeval
*tvp
, tv
[2];
5489 if (copy_from_user_timeval(&tv
[0], arg3
)
5490 || copy_from_user_timeval(&tv
[1],
5491 arg3
+ sizeof(struct target_timeval
)))
5497 if (!(p
= lock_user_string(arg2
)))
5499 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
5500 unlock_user(p
, arg2
, 0);
5504 #ifdef TARGET_NR_stty
5505 case TARGET_NR_stty
:
5508 #ifdef TARGET_NR_gtty
5509 case TARGET_NR_gtty
:
5512 case TARGET_NR_access
:
5513 if (!(p
= lock_user_string(arg1
)))
5515 ret
= get_errno(access(path(p
), arg2
));
5516 unlock_user(p
, arg1
, 0);
5518 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5519 case TARGET_NR_faccessat
:
5520 if (!(p
= lock_user_string(arg2
)))
5522 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
5523 unlock_user(p
, arg2
, 0);
5526 #ifdef TARGET_NR_nice /* not on alpha */
5527 case TARGET_NR_nice
:
5528 ret
= get_errno(nice(arg1
));
5531 #ifdef TARGET_NR_ftime
5532 case TARGET_NR_ftime
:
5535 case TARGET_NR_sync
:
5539 case TARGET_NR_kill
:
5540 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
5542 case TARGET_NR_rename
:
5545 p
= lock_user_string(arg1
);
5546 p2
= lock_user_string(arg2
);
5548 ret
= -TARGET_EFAULT
;
5550 ret
= get_errno(rename(p
, p2
));
5551 unlock_user(p2
, arg2
, 0);
5552 unlock_user(p
, arg1
, 0);
5555 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5556 case TARGET_NR_renameat
:
5559 p
= lock_user_string(arg2
);
5560 p2
= lock_user_string(arg4
);
5562 ret
= -TARGET_EFAULT
;
5564 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
5565 unlock_user(p2
, arg4
, 0);
5566 unlock_user(p
, arg2
, 0);
5570 case TARGET_NR_mkdir
:
5571 if (!(p
= lock_user_string(arg1
)))
5573 ret
= get_errno(mkdir(p
, arg2
));
5574 unlock_user(p
, arg1
, 0);
5576 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5577 case TARGET_NR_mkdirat
:
5578 if (!(p
= lock_user_string(arg2
)))
5580 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
5581 unlock_user(p
, arg2
, 0);
5584 case TARGET_NR_rmdir
:
5585 if (!(p
= lock_user_string(arg1
)))
5587 ret
= get_errno(rmdir(p
));
5588 unlock_user(p
, arg1
, 0);
5591 ret
= get_errno(dup(arg1
));
5593 case TARGET_NR_pipe
:
5594 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5596 #ifdef TARGET_NR_pipe2
5597 case TARGET_NR_pipe2
:
5598 ret
= do_pipe(cpu_env
, arg1
, arg2
, 1);
5601 case TARGET_NR_times
:
5603 struct target_tms
*tmsp
;
5605 ret
= get_errno(times(&tms
));
5607 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5610 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
5611 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
5612 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
5613 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
5616 ret
= host_to_target_clock_t(ret
);
5619 #ifdef TARGET_NR_prof
5620 case TARGET_NR_prof
:
5623 #ifdef TARGET_NR_signal
5624 case TARGET_NR_signal
:
5627 case TARGET_NR_acct
:
5629 ret
= get_errno(acct(NULL
));
5631 if (!(p
= lock_user_string(arg1
)))
5633 ret
= get_errno(acct(path(p
)));
5634 unlock_user(p
, arg1
, 0);
5637 #ifdef TARGET_NR_umount2 /* not on alpha */
5638 case TARGET_NR_umount2
:
5639 if (!(p
= lock_user_string(arg1
)))
5641 ret
= get_errno(umount2(p
, arg2
));
5642 unlock_user(p
, arg1
, 0);
5645 #ifdef TARGET_NR_lock
5646 case TARGET_NR_lock
:
5649 case TARGET_NR_ioctl
:
5650 ret
= do_ioctl(arg1
, arg2
, arg3
);
5652 case TARGET_NR_fcntl
:
5653 ret
= do_fcntl(arg1
, arg2
, arg3
);
5655 #ifdef TARGET_NR_mpx
5659 case TARGET_NR_setpgid
:
5660 ret
= get_errno(setpgid(arg1
, arg2
));
5662 #ifdef TARGET_NR_ulimit
5663 case TARGET_NR_ulimit
:
5666 #ifdef TARGET_NR_oldolduname
5667 case TARGET_NR_oldolduname
:
5670 case TARGET_NR_umask
:
5671 ret
= get_errno(umask(arg1
));
5673 case TARGET_NR_chroot
:
5674 if (!(p
= lock_user_string(arg1
)))
5676 ret
= get_errno(chroot(p
));
5677 unlock_user(p
, arg1
, 0);
5679 case TARGET_NR_ustat
:
5681 case TARGET_NR_dup2
:
5682 ret
= get_errno(dup2(arg1
, arg2
));
5684 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5685 case TARGET_NR_dup3
:
5686 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
5689 #ifdef TARGET_NR_getppid /* not on alpha */
5690 case TARGET_NR_getppid
:
5691 ret
= get_errno(getppid());
5694 case TARGET_NR_getpgrp
:
5695 ret
= get_errno(getpgrp());
5697 case TARGET_NR_setsid
:
5698 ret
= get_errno(setsid());
5700 #ifdef TARGET_NR_sigaction
5701 case TARGET_NR_sigaction
:
5703 #if defined(TARGET_ALPHA)
5704 struct target_sigaction act
, oact
, *pact
= 0;
5705 struct target_old_sigaction
*old_act
;
5707 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5709 act
._sa_handler
= old_act
->_sa_handler
;
5710 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5711 act
.sa_flags
= old_act
->sa_flags
;
5712 act
.sa_restorer
= 0;
5713 unlock_user_struct(old_act
, arg2
, 0);
5716 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5717 if (!is_error(ret
) && arg3
) {
5718 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5720 old_act
->_sa_handler
= oact
._sa_handler
;
5721 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5722 old_act
->sa_flags
= oact
.sa_flags
;
5723 unlock_user_struct(old_act
, arg3
, 1);
5725 #elif defined(TARGET_MIPS)
5726 struct target_sigaction act
, oact
, *pact
, *old_act
;
5729 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5731 act
._sa_handler
= old_act
->_sa_handler
;
5732 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5733 act
.sa_flags
= old_act
->sa_flags
;
5734 unlock_user_struct(old_act
, arg2
, 0);
5740 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5742 if (!is_error(ret
) && arg3
) {
5743 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5745 old_act
->_sa_handler
= oact
._sa_handler
;
5746 old_act
->sa_flags
= oact
.sa_flags
;
5747 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5748 old_act
->sa_mask
.sig
[1] = 0;
5749 old_act
->sa_mask
.sig
[2] = 0;
5750 old_act
->sa_mask
.sig
[3] = 0;
5751 unlock_user_struct(old_act
, arg3
, 1);
5754 struct target_old_sigaction
*old_act
;
5755 struct target_sigaction act
, oact
, *pact
;
5757 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5759 act
._sa_handler
= old_act
->_sa_handler
;
5760 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5761 act
.sa_flags
= old_act
->sa_flags
;
5762 act
.sa_restorer
= old_act
->sa_restorer
;
5763 unlock_user_struct(old_act
, arg2
, 0);
5768 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5769 if (!is_error(ret
) && arg3
) {
5770 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5772 old_act
->_sa_handler
= oact
._sa_handler
;
5773 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5774 old_act
->sa_flags
= oact
.sa_flags
;
5775 old_act
->sa_restorer
= oact
.sa_restorer
;
5776 unlock_user_struct(old_act
, arg3
, 1);
5782 case TARGET_NR_rt_sigaction
:
5784 #if defined(TARGET_ALPHA)
5785 struct target_sigaction act
, oact
, *pact
= 0;
5786 struct target_rt_sigaction
*rt_act
;
5787 /* ??? arg4 == sizeof(sigset_t). */
5789 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5791 act
._sa_handler
= rt_act
->_sa_handler
;
5792 act
.sa_mask
= rt_act
->sa_mask
;
5793 act
.sa_flags
= rt_act
->sa_flags
;
5794 act
.sa_restorer
= arg5
;
5795 unlock_user_struct(rt_act
, arg2
, 0);
5798 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5799 if (!is_error(ret
) && arg3
) {
5800 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5802 rt_act
->_sa_handler
= oact
._sa_handler
;
5803 rt_act
->sa_mask
= oact
.sa_mask
;
5804 rt_act
->sa_flags
= oact
.sa_flags
;
5805 unlock_user_struct(rt_act
, arg3
, 1);
5808 struct target_sigaction
*act
;
5809 struct target_sigaction
*oact
;
5812 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5817 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5818 ret
= -TARGET_EFAULT
;
5819 goto rt_sigaction_fail
;
5823 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5826 unlock_user_struct(act
, arg2
, 0);
5828 unlock_user_struct(oact
, arg3
, 1);
5832 #ifdef TARGET_NR_sgetmask /* not on alpha */
5833 case TARGET_NR_sgetmask
:
5836 abi_ulong target_set
;
5837 sigprocmask(0, NULL
, &cur_set
);
5838 host_to_target_old_sigset(&target_set
, &cur_set
);
5843 #ifdef TARGET_NR_ssetmask /* not on alpha */
5844 case TARGET_NR_ssetmask
:
5846 sigset_t set
, oset
, cur_set
;
5847 abi_ulong target_set
= arg1
;
5848 sigprocmask(0, NULL
, &cur_set
);
5849 target_to_host_old_sigset(&set
, &target_set
);
5850 sigorset(&set
, &set
, &cur_set
);
5851 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5852 host_to_target_old_sigset(&target_set
, &oset
);
5857 #ifdef TARGET_NR_sigprocmask
5858 case TARGET_NR_sigprocmask
:
5860 #if defined(TARGET_ALPHA)
5861 sigset_t set
, oldset
;
5866 case TARGET_SIG_BLOCK
:
5869 case TARGET_SIG_UNBLOCK
:
5872 case TARGET_SIG_SETMASK
:
5876 ret
= -TARGET_EINVAL
;
5880 target_to_host_old_sigset(&set
, &mask
);
5882 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
5884 if (!is_error(ret
)) {
5885 host_to_target_old_sigset(&mask
, &oldset
);
5887 ((CPUAlphaState
*)cpu_env
)->[IR_V0
] = 0; /* force no error */
5890 sigset_t set
, oldset
, *set_ptr
;
5895 case TARGET_SIG_BLOCK
:
5898 case TARGET_SIG_UNBLOCK
:
5901 case TARGET_SIG_SETMASK
:
5905 ret
= -TARGET_EINVAL
;
5908 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5910 target_to_host_old_sigset(&set
, p
);
5911 unlock_user(p
, arg2
, 0);
5917 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5918 if (!is_error(ret
) && arg3
) {
5919 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5921 host_to_target_old_sigset(p
, &oldset
);
5922 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5928 case TARGET_NR_rt_sigprocmask
:
5931 sigset_t set
, oldset
, *set_ptr
;
5935 case TARGET_SIG_BLOCK
:
5938 case TARGET_SIG_UNBLOCK
:
5941 case TARGET_SIG_SETMASK
:
5945 ret
= -TARGET_EINVAL
;
5948 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5950 target_to_host_sigset(&set
, p
);
5951 unlock_user(p
, arg2
, 0);
5957 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5958 if (!is_error(ret
) && arg3
) {
5959 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5961 host_to_target_sigset(p
, &oldset
);
5962 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5966 #ifdef TARGET_NR_sigpending
5967 case TARGET_NR_sigpending
:
5970 ret
= get_errno(sigpending(&set
));
5971 if (!is_error(ret
)) {
5972 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5974 host_to_target_old_sigset(p
, &set
);
5975 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5980 case TARGET_NR_rt_sigpending
:
5983 ret
= get_errno(sigpending(&set
));
5984 if (!is_error(ret
)) {
5985 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5987 host_to_target_sigset(p
, &set
);
5988 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5992 #ifdef TARGET_NR_sigsuspend
5993 case TARGET_NR_sigsuspend
:
5996 #if defined(TARGET_ALPHA)
5997 abi_ulong mask
= arg1
;
5998 target_to_host_old_sigset(&set
, &mask
);
6000 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6002 target_to_host_old_sigset(&set
, p
);
6003 unlock_user(p
, arg1
, 0);
6005 ret
= get_errno(sigsuspend(&set
));
6009 case TARGET_NR_rt_sigsuspend
:
6012 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6014 target_to_host_sigset(&set
, p
);
6015 unlock_user(p
, arg1
, 0);
6016 ret
= get_errno(sigsuspend(&set
));
6019 case TARGET_NR_rt_sigtimedwait
:
6022 struct timespec uts
, *puts
;
6025 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6027 target_to_host_sigset(&set
, p
);
6028 unlock_user(p
, arg1
, 0);
6031 target_to_host_timespec(puts
, arg3
);
6035 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6036 if (!is_error(ret
) && arg2
) {
6037 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
6039 host_to_target_siginfo(p
, &uinfo
);
6040 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
6044 case TARGET_NR_rt_sigqueueinfo
:
6047 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
6049 target_to_host_siginfo(&uinfo
, p
);
6050 unlock_user(p
, arg1
, 0);
6051 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
6054 #ifdef TARGET_NR_sigreturn
6055 case TARGET_NR_sigreturn
:
6056 /* NOTE: ret is eax, so not transcoding must be done */
6057 ret
= do_sigreturn(cpu_env
);
6060 case TARGET_NR_rt_sigreturn
:
6061 /* NOTE: ret is eax, so not transcoding must be done */
6062 ret
= do_rt_sigreturn(cpu_env
);
6064 case TARGET_NR_sethostname
:
6065 if (!(p
= lock_user_string(arg1
)))
6067 ret
= get_errno(sethostname(p
, arg2
));
6068 unlock_user(p
, arg1
, 0);
6070 case TARGET_NR_setrlimit
:
6072 int resource
= target_to_host_resource(arg1
);
6073 struct target_rlimit
*target_rlim
;
6075 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
6077 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
6078 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
6079 unlock_user_struct(target_rlim
, arg2
, 0);
6080 ret
= get_errno(setrlimit(resource
, &rlim
));
6083 case TARGET_NR_getrlimit
:
6085 int resource
= target_to_host_resource(arg1
);
6086 struct target_rlimit
*target_rlim
;
6089 ret
= get_errno(getrlimit(resource
, &rlim
));
6090 if (!is_error(ret
)) {
6091 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6093 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6094 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6095 unlock_user_struct(target_rlim
, arg2
, 1);
6099 case TARGET_NR_getrusage
:
6101 struct rusage rusage
;
6102 ret
= get_errno(getrusage(arg1
, &rusage
));
6103 if (!is_error(ret
)) {
6104 host_to_target_rusage(arg2
, &rusage
);
6108 case TARGET_NR_gettimeofday
:
6111 ret
= get_errno(gettimeofday(&tv
, NULL
));
6112 if (!is_error(ret
)) {
6113 if (copy_to_user_timeval(arg1
, &tv
))
6118 case TARGET_NR_settimeofday
:
6121 if (copy_from_user_timeval(&tv
, arg1
))
6123 ret
= get_errno(settimeofday(&tv
, NULL
));
6126 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
6127 case TARGET_NR_select
:
6129 struct target_sel_arg_struct
*sel
;
6130 abi_ulong inp
, outp
, exp
, tvp
;
6133 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
6135 nsel
= tswapal(sel
->n
);
6136 inp
= tswapal(sel
->inp
);
6137 outp
= tswapal(sel
->outp
);
6138 exp
= tswapal(sel
->exp
);
6139 tvp
= tswapal(sel
->tvp
);
6140 unlock_user_struct(sel
, arg1
, 0);
6141 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
6145 #ifdef TARGET_NR_pselect6
6146 case TARGET_NR_pselect6
:
6148 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
6149 fd_set rfds
, wfds
, efds
;
6150 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
6151 struct timespec ts
, *ts_ptr
;
6154 * The 6th arg is actually two args smashed together,
6155 * so we cannot use the C library.
6163 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
6164 target_sigset_t
*target_sigset
;
6172 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
6176 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
6180 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
6186 * This takes a timespec, and not a timeval, so we cannot
6187 * use the do_select() helper ...
6190 if (target_to_host_timespec(&ts
, ts_addr
)) {
6198 /* Extract the two packed args for the sigset */
6201 sig
.size
= _NSIG
/ 8;
6203 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
6207 arg_sigset
= tswapal(arg7
[0]);
6208 arg_sigsize
= tswapal(arg7
[1]);
6209 unlock_user(arg7
, arg6
, 0);
6213 if (arg_sigsize
!= sizeof(*target_sigset
)) {
6214 /* Like the kernel, we enforce correct size sigsets */
6215 ret
= -TARGET_EINVAL
;
6218 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
6219 sizeof(*target_sigset
), 1);
6220 if (!target_sigset
) {
6223 target_to_host_sigset(&set
, target_sigset
);
6224 unlock_user(target_sigset
, arg_sigset
, 0);
6232 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
6235 if (!is_error(ret
)) {
6236 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
6238 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
6240 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
6243 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
6249 case TARGET_NR_symlink
:
6252 p
= lock_user_string(arg1
);
6253 p2
= lock_user_string(arg2
);
6255 ret
= -TARGET_EFAULT
;
6257 ret
= get_errno(symlink(p
, p2
));
6258 unlock_user(p2
, arg2
, 0);
6259 unlock_user(p
, arg1
, 0);
6262 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6263 case TARGET_NR_symlinkat
:
6266 p
= lock_user_string(arg1
);
6267 p2
= lock_user_string(arg3
);
6269 ret
= -TARGET_EFAULT
;
6271 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
6272 unlock_user(p2
, arg3
, 0);
6273 unlock_user(p
, arg1
, 0);
6277 #ifdef TARGET_NR_oldlstat
6278 case TARGET_NR_oldlstat
:
6281 case TARGET_NR_readlink
:
6284 p
= lock_user_string(arg1
);
6285 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
6287 ret
= -TARGET_EFAULT
;
6289 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
6290 char real
[PATH_MAX
];
6291 temp
= realpath(exec_path
,real
);
6292 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
6293 snprintf((char *)p2
, arg3
, "%s", real
);
6296 ret
= get_errno(readlink(path(p
), p2
, arg3
));
6298 unlock_user(p2
, arg2
, ret
);
6299 unlock_user(p
, arg1
, 0);
6302 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6303 case TARGET_NR_readlinkat
:
6306 p
= lock_user_string(arg2
);
6307 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
6309 ret
= -TARGET_EFAULT
;
6311 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
6312 unlock_user(p2
, arg3
, ret
);
6313 unlock_user(p
, arg2
, 0);
6317 #ifdef TARGET_NR_uselib
6318 case TARGET_NR_uselib
:
6321 #ifdef TARGET_NR_swapon
6322 case TARGET_NR_swapon
:
6323 if (!(p
= lock_user_string(arg1
)))
6325 ret
= get_errno(swapon(p
, arg2
));
6326 unlock_user(p
, arg1
, 0);
6329 case TARGET_NR_reboot
:
6330 if (!(p
= lock_user_string(arg4
)))
6332 ret
= reboot(arg1
, arg2
, arg3
, p
);
6333 unlock_user(p
, arg4
, 0);
6335 #ifdef TARGET_NR_readdir
6336 case TARGET_NR_readdir
:
6339 #ifdef TARGET_NR_mmap
6340 case TARGET_NR_mmap
:
6341 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6342 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6343 || defined(TARGET_S390X)
6346 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
6347 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
6355 unlock_user(v
, arg1
, 0);
6356 ret
= get_errno(target_mmap(v1
, v2
, v3
,
6357 target_to_host_bitmask(v4
, mmap_flags_tbl
),
6361 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6362 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6368 #ifdef TARGET_NR_mmap2
6369 case TARGET_NR_mmap2
:
6371 #define MMAP_SHIFT 12
6373 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6374 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6376 arg6
<< MMAP_SHIFT
));
6379 case TARGET_NR_munmap
:
6380 ret
= get_errno(target_munmap(arg1
, arg2
));
6382 case TARGET_NR_mprotect
:
6384 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
6385 /* Special hack to detect libc making the stack executable. */
6386 if ((arg3
& PROT_GROWSDOWN
)
6387 && arg1
>= ts
->info
->stack_limit
6388 && arg1
<= ts
->info
->start_stack
) {
6389 arg3
&= ~PROT_GROWSDOWN
;
6390 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
6391 arg1
= ts
->info
->stack_limit
;
6394 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
6396 #ifdef TARGET_NR_mremap
6397 case TARGET_NR_mremap
:
6398 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
6401 /* ??? msync/mlock/munlock are broken for softmmu. */
6402 #ifdef TARGET_NR_msync
6403 case TARGET_NR_msync
:
6404 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
6407 #ifdef TARGET_NR_mlock
6408 case TARGET_NR_mlock
:
6409 ret
= get_errno(mlock(g2h(arg1
), arg2
));
6412 #ifdef TARGET_NR_munlock
6413 case TARGET_NR_munlock
:
6414 ret
= get_errno(munlock(g2h(arg1
), arg2
));
6417 #ifdef TARGET_NR_mlockall
6418 case TARGET_NR_mlockall
:
6419 ret
= get_errno(mlockall(arg1
));
6422 #ifdef TARGET_NR_munlockall
6423 case TARGET_NR_munlockall
:
6424 ret
= get_errno(munlockall());
6427 case TARGET_NR_truncate
:
6428 if (!(p
= lock_user_string(arg1
)))
6430 ret
= get_errno(truncate(p
, arg2
));
6431 unlock_user(p
, arg1
, 0);
6433 case TARGET_NR_ftruncate
:
6434 ret
= get_errno(ftruncate(arg1
, arg2
));
6436 case TARGET_NR_fchmod
:
6437 ret
= get_errno(fchmod(arg1
, arg2
));
6439 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6440 case TARGET_NR_fchmodat
:
6441 if (!(p
= lock_user_string(arg2
)))
6443 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
6444 unlock_user(p
, arg2
, 0);
6447 case TARGET_NR_getpriority
:
6448 /* libc does special remapping of the return value of
6449 * sys_getpriority() so it's just easiest to call
6450 * sys_getpriority() directly rather than through libc. */
6451 ret
= get_errno(sys_getpriority(arg1
, arg2
));
6453 case TARGET_NR_setpriority
:
6454 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
6456 #ifdef TARGET_NR_profil
6457 case TARGET_NR_profil
:
6460 case TARGET_NR_statfs
:
6461 if (!(p
= lock_user_string(arg1
)))
6463 ret
= get_errno(statfs(path(p
), &stfs
));
6464 unlock_user(p
, arg1
, 0);
6466 if (!is_error(ret
)) {
6467 struct target_statfs
*target_stfs
;
6469 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
6471 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6472 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6473 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6474 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6475 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6476 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6477 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6478 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6479 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6480 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6481 unlock_user_struct(target_stfs
, arg2
, 1);
6484 case TARGET_NR_fstatfs
:
6485 ret
= get_errno(fstatfs(arg1
, &stfs
));
6486 goto convert_statfs
;
6487 #ifdef TARGET_NR_statfs64
6488 case TARGET_NR_statfs64
:
6489 if (!(p
= lock_user_string(arg1
)))
6491 ret
= get_errno(statfs(path(p
), &stfs
));
6492 unlock_user(p
, arg1
, 0);
6494 if (!is_error(ret
)) {
6495 struct target_statfs64
*target_stfs
;
6497 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
6499 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6500 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6501 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6502 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6503 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6504 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6505 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6506 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6507 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6508 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6509 unlock_user_struct(target_stfs
, arg3
, 1);
6512 case TARGET_NR_fstatfs64
:
6513 ret
= get_errno(fstatfs(arg1
, &stfs
));
6514 goto convert_statfs64
;
6516 #ifdef TARGET_NR_ioperm
6517 case TARGET_NR_ioperm
:
6520 #ifdef TARGET_NR_socketcall
6521 case TARGET_NR_socketcall
:
6522 ret
= do_socketcall(arg1
, arg2
);
6525 #ifdef TARGET_NR_accept
6526 case TARGET_NR_accept
:
6527 ret
= do_accept(arg1
, arg2
, arg3
);
6530 #ifdef TARGET_NR_bind
6531 case TARGET_NR_bind
:
6532 ret
= do_bind(arg1
, arg2
, arg3
);
6535 #ifdef TARGET_NR_connect
6536 case TARGET_NR_connect
:
6537 ret
= do_connect(arg1
, arg2
, arg3
);
6540 #ifdef TARGET_NR_getpeername
6541 case TARGET_NR_getpeername
:
6542 ret
= do_getpeername(arg1
, arg2
, arg3
);
6545 #ifdef TARGET_NR_getsockname
6546 case TARGET_NR_getsockname
:
6547 ret
= do_getsockname(arg1
, arg2
, arg3
);
6550 #ifdef TARGET_NR_getsockopt
6551 case TARGET_NR_getsockopt
:
6552 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
6555 #ifdef TARGET_NR_listen
6556 case TARGET_NR_listen
:
6557 ret
= get_errno(listen(arg1
, arg2
));
6560 #ifdef TARGET_NR_recv
6561 case TARGET_NR_recv
:
6562 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
6565 #ifdef TARGET_NR_recvfrom
6566 case TARGET_NR_recvfrom
:
6567 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6570 #ifdef TARGET_NR_recvmsg
6571 case TARGET_NR_recvmsg
:
6572 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
6575 #ifdef TARGET_NR_send
6576 case TARGET_NR_send
:
6577 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
6580 #ifdef TARGET_NR_sendmsg
6581 case TARGET_NR_sendmsg
:
6582 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
6585 #ifdef TARGET_NR_sendto
6586 case TARGET_NR_sendto
:
6587 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6590 #ifdef TARGET_NR_shutdown
6591 case TARGET_NR_shutdown
:
6592 ret
= get_errno(shutdown(arg1
, arg2
));
6595 #ifdef TARGET_NR_socket
6596 case TARGET_NR_socket
:
6597 ret
= do_socket(arg1
, arg2
, arg3
);
6600 #ifdef TARGET_NR_socketpair
6601 case TARGET_NR_socketpair
:
6602 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
6605 #ifdef TARGET_NR_setsockopt
6606 case TARGET_NR_setsockopt
:
6607 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
6611 case TARGET_NR_syslog
:
6612 if (!(p
= lock_user_string(arg2
)))
6614 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
6615 unlock_user(p
, arg2
, 0);
6618 case TARGET_NR_setitimer
:
6620 struct itimerval value
, ovalue
, *pvalue
;
6624 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
6625 || copy_from_user_timeval(&pvalue
->it_value
,
6626 arg2
+ sizeof(struct target_timeval
)))
6631 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
6632 if (!is_error(ret
) && arg3
) {
6633 if (copy_to_user_timeval(arg3
,
6634 &ovalue
.it_interval
)
6635 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
6641 case TARGET_NR_getitimer
:
6643 struct itimerval value
;
6645 ret
= get_errno(getitimer(arg1
, &value
));
6646 if (!is_error(ret
) && arg2
) {
6647 if (copy_to_user_timeval(arg2
,
6649 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
6655 case TARGET_NR_stat
:
6656 if (!(p
= lock_user_string(arg1
)))
6658 ret
= get_errno(stat(path(p
), &st
));
6659 unlock_user(p
, arg1
, 0);
6661 case TARGET_NR_lstat
:
6662 if (!(p
= lock_user_string(arg1
)))
6664 ret
= get_errno(lstat(path(p
), &st
));
6665 unlock_user(p
, arg1
, 0);
6667 case TARGET_NR_fstat
:
6669 ret
= get_errno(fstat(arg1
, &st
));
6671 if (!is_error(ret
)) {
6672 struct target_stat
*target_st
;
6674 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
6676 memset(target_st
, 0, sizeof(*target_st
));
6677 __put_user(st
.st_dev
, &target_st
->st_dev
);
6678 __put_user(st
.st_ino
, &target_st
->st_ino
);
6679 __put_user(st
.st_mode
, &target_st
->st_mode
);
6680 __put_user(st
.st_uid
, &target_st
->st_uid
);
6681 __put_user(st
.st_gid
, &target_st
->st_gid
);
6682 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
6683 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
6684 __put_user(st
.st_size
, &target_st
->st_size
);
6685 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
6686 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
6687 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
6688 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
6689 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
6690 unlock_user_struct(target_st
, arg2
, 1);
6694 #ifdef TARGET_NR_olduname
6695 case TARGET_NR_olduname
:
6698 #ifdef TARGET_NR_iopl
6699 case TARGET_NR_iopl
:
6702 case TARGET_NR_vhangup
:
6703 ret
= get_errno(vhangup());
6705 #ifdef TARGET_NR_idle
6706 case TARGET_NR_idle
:
6709 #ifdef TARGET_NR_syscall
6710 case TARGET_NR_syscall
:
6711 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
6712 arg6
, arg7
, arg8
, 0);
6715 case TARGET_NR_wait4
:
6718 abi_long status_ptr
= arg2
;
6719 struct rusage rusage
, *rusage_ptr
;
6720 abi_ulong target_rusage
= arg4
;
6722 rusage_ptr
= &rusage
;
6725 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
6726 if (!is_error(ret
)) {
6727 if (status_ptr
&& ret
) {
6728 status
= host_to_target_waitstatus(status
);
6729 if (put_user_s32(status
, status_ptr
))
6733 host_to_target_rusage(target_rusage
, &rusage
);
6737 #ifdef TARGET_NR_swapoff
6738 case TARGET_NR_swapoff
:
6739 if (!(p
= lock_user_string(arg1
)))
6741 ret
= get_errno(swapoff(p
));
6742 unlock_user(p
, arg1
, 0);
6745 case TARGET_NR_sysinfo
:
6747 struct target_sysinfo
*target_value
;
6748 struct sysinfo value
;
6749 ret
= get_errno(sysinfo(&value
));
6750 if (!is_error(ret
) && arg1
)
6752 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
6754 __put_user(value
.uptime
, &target_value
->uptime
);
6755 __put_user(value
.loads
[0], &target_value
->loads
[0]);
6756 __put_user(value
.loads
[1], &target_value
->loads
[1]);
6757 __put_user(value
.loads
[2], &target_value
->loads
[2]);
6758 __put_user(value
.totalram
, &target_value
->totalram
);
6759 __put_user(value
.freeram
, &target_value
->freeram
);
6760 __put_user(value
.sharedram
, &target_value
->sharedram
);
6761 __put_user(value
.bufferram
, &target_value
->bufferram
);
6762 __put_user(value
.totalswap
, &target_value
->totalswap
);
6763 __put_user(value
.freeswap
, &target_value
->freeswap
);
6764 __put_user(value
.procs
, &target_value
->procs
);
6765 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
6766 __put_user(value
.freehigh
, &target_value
->freehigh
);
6767 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
6768 unlock_user_struct(target_value
, arg1
, 1);
6772 #ifdef TARGET_NR_ipc
6774 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6777 #ifdef TARGET_NR_semget
6778 case TARGET_NR_semget
:
6779 ret
= get_errno(semget(arg1
, arg2
, arg3
));
6782 #ifdef TARGET_NR_semop
6783 case TARGET_NR_semop
:
6784 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
6787 #ifdef TARGET_NR_semctl
6788 case TARGET_NR_semctl
:
6789 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
6792 #ifdef TARGET_NR_msgctl
6793 case TARGET_NR_msgctl
:
6794 ret
= do_msgctl(arg1
, arg2
, arg3
);
6797 #ifdef TARGET_NR_msgget
6798 case TARGET_NR_msgget
:
6799 ret
= get_errno(msgget(arg1
, arg2
));
6802 #ifdef TARGET_NR_msgrcv
6803 case TARGET_NR_msgrcv
:
6804 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
6807 #ifdef TARGET_NR_msgsnd
6808 case TARGET_NR_msgsnd
:
6809 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
6812 #ifdef TARGET_NR_shmget
6813 case TARGET_NR_shmget
:
6814 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
6817 #ifdef TARGET_NR_shmctl
6818 case TARGET_NR_shmctl
:
6819 ret
= do_shmctl(arg1
, arg2
, arg3
);
6822 #ifdef TARGET_NR_shmat
6823 case TARGET_NR_shmat
:
6824 ret
= do_shmat(arg1
, arg2
, arg3
);
6827 #ifdef TARGET_NR_shmdt
6828 case TARGET_NR_shmdt
:
6829 ret
= do_shmdt(arg1
);
6832 case TARGET_NR_fsync
:
6833 ret
= get_errno(fsync(arg1
));
6835 case TARGET_NR_clone
:
6836 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6837 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
6838 #elif defined(TARGET_CRIS)
6839 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
6840 #elif defined(TARGET_S390X)
6841 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
6843 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
6846 #ifdef __NR_exit_group
6847 /* new thread calls */
6848 case TARGET_NR_exit_group
:
6852 gdb_exit(cpu_env
, arg1
);
6853 ret
= get_errno(exit_group(arg1
));
6856 case TARGET_NR_setdomainname
:
6857 if (!(p
= lock_user_string(arg1
)))
6859 ret
= get_errno(setdomainname(p
, arg2
));
6860 unlock_user(p
, arg1
, 0);
6862 case TARGET_NR_uname
:
6863 /* no need to transcode because we use the linux syscall */
6865 struct new_utsname
* buf
;
6867 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
6869 ret
= get_errno(sys_uname(buf
));
6870 if (!is_error(ret
)) {
6871 /* Overrite the native machine name with whatever is being
6873 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
6874 /* Allow the user to override the reported release. */
6875 if (qemu_uname_release
&& *qemu_uname_release
)
6876 strcpy (buf
->release
, qemu_uname_release
);
6878 unlock_user_struct(buf
, arg1
, 1);
6882 case TARGET_NR_modify_ldt
:
6883 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
6885 #if !defined(TARGET_X86_64)
6886 case TARGET_NR_vm86old
:
6888 case TARGET_NR_vm86
:
6889 ret
= do_vm86(cpu_env
, arg1
, arg2
);
6893 case TARGET_NR_adjtimex
:
6895 #ifdef TARGET_NR_create_module
6896 case TARGET_NR_create_module
:
6898 case TARGET_NR_init_module
:
6899 case TARGET_NR_delete_module
:
6900 #ifdef TARGET_NR_get_kernel_syms
6901 case TARGET_NR_get_kernel_syms
:
6904 case TARGET_NR_quotactl
:
6906 case TARGET_NR_getpgid
:
6907 ret
= get_errno(getpgid(arg1
));
6909 case TARGET_NR_fchdir
:
6910 ret
= get_errno(fchdir(arg1
));
6912 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6913 case TARGET_NR_bdflush
:
6916 #ifdef TARGET_NR_sysfs
6917 case TARGET_NR_sysfs
:
6920 case TARGET_NR_personality
:
6921 ret
= get_errno(personality(arg1
));
6923 #ifdef TARGET_NR_afs_syscall
6924 case TARGET_NR_afs_syscall
:
6927 #ifdef TARGET_NR__llseek /* Not on alpha */
6928 case TARGET_NR__llseek
:
6931 #if !defined(__NR_llseek)
6932 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
6934 ret
= get_errno(res
);
6939 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
6941 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
6947 case TARGET_NR_getdents
:
6948 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6950 struct target_dirent
*target_dirp
;
6951 struct linux_dirent
*dirp
;
6952 abi_long count
= arg3
;
6954 dirp
= malloc(count
);
6956 ret
= -TARGET_ENOMEM
;
6960 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6961 if (!is_error(ret
)) {
6962 struct linux_dirent
*de
;
6963 struct target_dirent
*tde
;
6965 int reclen
, treclen
;
6966 int count1
, tnamelen
;
6970 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6974 reclen
= de
->d_reclen
;
6975 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
6976 tde
->d_reclen
= tswap16(treclen
);
6977 tde
->d_ino
= tswapal(de
->d_ino
);
6978 tde
->d_off
= tswapal(de
->d_off
);
6979 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
6982 /* XXX: may not be correct */
6983 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
6984 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6986 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
6990 unlock_user(target_dirp
, arg2
, ret
);
6996 struct linux_dirent
*dirp
;
6997 abi_long count
= arg3
;
6999 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7001 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7002 if (!is_error(ret
)) {
7003 struct linux_dirent
*de
;
7008 reclen
= de
->d_reclen
;
7011 de
->d_reclen
= tswap16(reclen
);
7012 tswapls(&de
->d_ino
);
7013 tswapls(&de
->d_off
);
7014 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7018 unlock_user(dirp
, arg2
, ret
);
7022 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7023 case TARGET_NR_getdents64
:
7025 struct linux_dirent64
*dirp
;
7026 abi_long count
= arg3
;
7027 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7029 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7030 if (!is_error(ret
)) {
7031 struct linux_dirent64
*de
;
7036 reclen
= de
->d_reclen
;
7039 de
->d_reclen
= tswap16(reclen
);
7040 tswap64s((uint64_t *)&de
->d_ino
);
7041 tswap64s((uint64_t *)&de
->d_off
);
7042 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7046 unlock_user(dirp
, arg2
, ret
);
7049 #endif /* TARGET_NR_getdents64 */
7050 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
7052 case TARGET_NR_select
:
7054 case TARGET_NR__newselect
:
7056 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7059 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7060 # ifdef TARGET_NR_poll
7061 case TARGET_NR_poll
:
7063 # ifdef TARGET_NR_ppoll
7064 case TARGET_NR_ppoll
:
7067 struct target_pollfd
*target_pfd
;
7068 unsigned int nfds
= arg2
;
7073 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
7077 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
7078 for(i
= 0; i
< nfds
; i
++) {
7079 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
7080 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
7083 # ifdef TARGET_NR_ppoll
7084 if (num
== TARGET_NR_ppoll
) {
7085 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
7086 target_sigset_t
*target_set
;
7087 sigset_t _set
, *set
= &_set
;
7090 if (target_to_host_timespec(timeout_ts
, arg3
)) {
7091 unlock_user(target_pfd
, arg1
, 0);
7099 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
7101 unlock_user(target_pfd
, arg1
, 0);
7104 target_to_host_sigset(set
, target_set
);
7109 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
7111 if (!is_error(ret
) && arg3
) {
7112 host_to_target_timespec(arg3
, timeout_ts
);
7115 unlock_user(target_set
, arg4
, 0);
7119 ret
= get_errno(poll(pfd
, nfds
, timeout
));
7121 if (!is_error(ret
)) {
7122 for(i
= 0; i
< nfds
; i
++) {
7123 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
7126 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
7130 case TARGET_NR_flock
:
7131 /* NOTE: the flock constant seems to be the same for every
7133 ret
= get_errno(flock(arg1
, arg2
));
7135 case TARGET_NR_readv
:
7140 vec
= alloca(count
* sizeof(struct iovec
));
7141 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
7143 ret
= get_errno(readv(arg1
, vec
, count
));
7144 unlock_iovec(vec
, arg2
, count
, 1);
7147 case TARGET_NR_writev
:
7152 vec
= alloca(count
* sizeof(struct iovec
));
7153 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
7155 ret
= get_errno(writev(arg1
, vec
, count
));
7156 unlock_iovec(vec
, arg2
, count
, 0);
7159 case TARGET_NR_getsid
:
7160 ret
= get_errno(getsid(arg1
));
7162 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7163 case TARGET_NR_fdatasync
:
7164 ret
= get_errno(fdatasync(arg1
));
7167 case TARGET_NR__sysctl
:
7168 /* We don't implement this, but ENOTDIR is always a safe
7170 ret
= -TARGET_ENOTDIR
;
7172 case TARGET_NR_sched_getaffinity
:
7174 unsigned int mask_size
;
7175 unsigned long *mask
;
7178 * sched_getaffinity needs multiples of ulong, so need to take
7179 * care of mismatches between target ulong and host ulong sizes.
7181 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7182 ret
= -TARGET_EINVAL
;
7185 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7187 mask
= alloca(mask_size
);
7188 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
7190 if (!is_error(ret
)) {
7191 if (copy_to_user(arg3
, mask
, ret
)) {
7197 case TARGET_NR_sched_setaffinity
:
7199 unsigned int mask_size
;
7200 unsigned long *mask
;
7203 * sched_setaffinity needs multiples of ulong, so need to take
7204 * care of mismatches between target ulong and host ulong sizes.
7206 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7207 ret
= -TARGET_EINVAL
;
7210 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7212 mask
= alloca(mask_size
);
7213 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
7216 memcpy(mask
, p
, arg2
);
7217 unlock_user_struct(p
, arg2
, 0);
7219 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
7222 case TARGET_NR_sched_setparam
:
7224 struct sched_param
*target_schp
;
7225 struct sched_param schp
;
7227 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
7229 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7230 unlock_user_struct(target_schp
, arg2
, 0);
7231 ret
= get_errno(sched_setparam(arg1
, &schp
));
7234 case TARGET_NR_sched_getparam
:
7236 struct sched_param
*target_schp
;
7237 struct sched_param schp
;
7238 ret
= get_errno(sched_getparam(arg1
, &schp
));
7239 if (!is_error(ret
)) {
7240 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
7242 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
7243 unlock_user_struct(target_schp
, arg2
, 1);
7247 case TARGET_NR_sched_setscheduler
:
7249 struct sched_param
*target_schp
;
7250 struct sched_param schp
;
7251 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
7253 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7254 unlock_user_struct(target_schp
, arg3
, 0);
7255 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
7258 case TARGET_NR_sched_getscheduler
:
7259 ret
= get_errno(sched_getscheduler(arg1
));
7261 case TARGET_NR_sched_yield
:
7262 ret
= get_errno(sched_yield());
7264 case TARGET_NR_sched_get_priority_max
:
7265 ret
= get_errno(sched_get_priority_max(arg1
));
7267 case TARGET_NR_sched_get_priority_min
:
7268 ret
= get_errno(sched_get_priority_min(arg1
));
7270 case TARGET_NR_sched_rr_get_interval
:
7273 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
7274 if (!is_error(ret
)) {
7275 host_to_target_timespec(arg2
, &ts
);
7279 case TARGET_NR_nanosleep
:
7281 struct timespec req
, rem
;
7282 target_to_host_timespec(&req
, arg1
);
7283 ret
= get_errno(nanosleep(&req
, &rem
));
7284 if (is_error(ret
) && arg2
) {
7285 host_to_target_timespec(arg2
, &rem
);
7289 #ifdef TARGET_NR_query_module
7290 case TARGET_NR_query_module
:
7293 #ifdef TARGET_NR_nfsservctl
7294 case TARGET_NR_nfsservctl
:
7297 case TARGET_NR_prctl
:
7299 case PR_GET_PDEATHSIG
:
7302 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
7303 if (!is_error(ret
) && arg2
7304 && put_user_ual(deathsig
, arg2
)) {
7312 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
7316 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7318 unlock_user(name
, arg2
, 16);
7323 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
7327 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7329 unlock_user(name
, arg2
, 0);
7334 /* Most prctl options have no pointer arguments */
7335 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
7339 #ifdef TARGET_NR_arch_prctl
7340 case TARGET_NR_arch_prctl
:
7341 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7342 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
7348 #ifdef TARGET_NR_pread
7349 case TARGET_NR_pread
:
7350 if (regpairs_aligned(cpu_env
))
7352 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7354 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
7355 unlock_user(p
, arg2
, ret
);
7357 case TARGET_NR_pwrite
:
7358 if (regpairs_aligned(cpu_env
))
7360 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7362 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
7363 unlock_user(p
, arg2
, 0);
7366 #ifdef TARGET_NR_pread64
7367 case TARGET_NR_pread64
:
7368 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7370 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7371 unlock_user(p
, arg2
, ret
);
7373 case TARGET_NR_pwrite64
:
7374 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7376 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7377 unlock_user(p
, arg2
, 0);
7380 case TARGET_NR_getcwd
:
7381 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
7383 ret
= get_errno(sys_getcwd1(p
, arg2
));
7384 unlock_user(p
, arg1
, ret
);
7386 case TARGET_NR_capget
:
7388 case TARGET_NR_capset
:
7390 case TARGET_NR_sigaltstack
:
7391 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7392 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7393 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7394 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
7399 case TARGET_NR_sendfile
:
7401 #ifdef TARGET_NR_getpmsg
7402 case TARGET_NR_getpmsg
:
7405 #ifdef TARGET_NR_putpmsg
7406 case TARGET_NR_putpmsg
:
7409 #ifdef TARGET_NR_vfork
7410 case TARGET_NR_vfork
:
7411 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
7415 #ifdef TARGET_NR_ugetrlimit
7416 case TARGET_NR_ugetrlimit
:
7419 int resource
= target_to_host_resource(arg1
);
7420 ret
= get_errno(getrlimit(resource
, &rlim
));
7421 if (!is_error(ret
)) {
7422 struct target_rlimit
*target_rlim
;
7423 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7425 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7426 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7427 unlock_user_struct(target_rlim
, arg2
, 1);
7432 #ifdef TARGET_NR_truncate64
7433 case TARGET_NR_truncate64
:
7434 if (!(p
= lock_user_string(arg1
)))
7436 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
7437 unlock_user(p
, arg1
, 0);
7440 #ifdef TARGET_NR_ftruncate64
7441 case TARGET_NR_ftruncate64
:
7442 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
7445 #ifdef TARGET_NR_stat64
7446 case TARGET_NR_stat64
:
7447 if (!(p
= lock_user_string(arg1
)))
7449 ret
= get_errno(stat(path(p
), &st
));
7450 unlock_user(p
, arg1
, 0);
7452 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7455 #ifdef TARGET_NR_lstat64
7456 case TARGET_NR_lstat64
:
7457 if (!(p
= lock_user_string(arg1
)))
7459 ret
= get_errno(lstat(path(p
), &st
));
7460 unlock_user(p
, arg1
, 0);
7462 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7465 #ifdef TARGET_NR_fstat64
7466 case TARGET_NR_fstat64
:
7467 ret
= get_errno(fstat(arg1
, &st
));
7469 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7472 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7473 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7474 #ifdef TARGET_NR_fstatat64
7475 case TARGET_NR_fstatat64
:
7477 #ifdef TARGET_NR_newfstatat
7478 case TARGET_NR_newfstatat
:
7480 if (!(p
= lock_user_string(arg2
)))
7482 #ifdef __NR_fstatat64
7483 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
7485 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
7488 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
7491 case TARGET_NR_lchown
:
7492 if (!(p
= lock_user_string(arg1
)))
7494 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7495 unlock_user(p
, arg1
, 0);
7497 #ifdef TARGET_NR_getuid
7498 case TARGET_NR_getuid
:
7499 ret
= get_errno(high2lowuid(getuid()));
7502 #ifdef TARGET_NR_getgid
7503 case TARGET_NR_getgid
:
7504 ret
= get_errno(high2lowgid(getgid()));
7507 #ifdef TARGET_NR_geteuid
7508 case TARGET_NR_geteuid
:
7509 ret
= get_errno(high2lowuid(geteuid()));
7512 #ifdef TARGET_NR_getegid
7513 case TARGET_NR_getegid
:
7514 ret
= get_errno(high2lowgid(getegid()));
7517 case TARGET_NR_setreuid
:
7518 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
7520 case TARGET_NR_setregid
:
7521 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
7523 case TARGET_NR_getgroups
:
7525 int gidsetsize
= arg1
;
7526 target_id
*target_grouplist
;
7530 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7531 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7532 if (gidsetsize
== 0)
7534 if (!is_error(ret
)) {
7535 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
7536 if (!target_grouplist
)
7538 for(i
= 0;i
< ret
; i
++)
7539 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
7540 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
7544 case TARGET_NR_setgroups
:
7546 int gidsetsize
= arg1
;
7547 target_id
*target_grouplist
;
7551 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7552 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
7553 if (!target_grouplist
) {
7554 ret
= -TARGET_EFAULT
;
7557 for(i
= 0;i
< gidsetsize
; i
++)
7558 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
7559 unlock_user(target_grouplist
, arg2
, 0);
7560 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7563 case TARGET_NR_fchown
:
7564 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
7566 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7567 case TARGET_NR_fchownat
:
7568 if (!(p
= lock_user_string(arg2
)))
7570 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
7571 unlock_user(p
, arg2
, 0);
7574 #ifdef TARGET_NR_setresuid
7575 case TARGET_NR_setresuid
:
7576 ret
= get_errno(setresuid(low2highuid(arg1
),
7578 low2highuid(arg3
)));
7581 #ifdef TARGET_NR_getresuid
7582 case TARGET_NR_getresuid
:
7584 uid_t ruid
, euid
, suid
;
7585 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7586 if (!is_error(ret
)) {
7587 if (put_user_u16(high2lowuid(ruid
), arg1
)
7588 || put_user_u16(high2lowuid(euid
), arg2
)
7589 || put_user_u16(high2lowuid(suid
), arg3
))
7595 #ifdef TARGET_NR_getresgid
7596 case TARGET_NR_setresgid
:
7597 ret
= get_errno(setresgid(low2highgid(arg1
),
7599 low2highgid(arg3
)));
7602 #ifdef TARGET_NR_getresgid
7603 case TARGET_NR_getresgid
:
7605 gid_t rgid
, egid
, sgid
;
7606 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7607 if (!is_error(ret
)) {
7608 if (put_user_u16(high2lowgid(rgid
), arg1
)
7609 || put_user_u16(high2lowgid(egid
), arg2
)
7610 || put_user_u16(high2lowgid(sgid
), arg3
))
7616 case TARGET_NR_chown
:
7617 if (!(p
= lock_user_string(arg1
)))
7619 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7620 unlock_user(p
, arg1
, 0);
7622 case TARGET_NR_setuid
:
7623 ret
= get_errno(setuid(low2highuid(arg1
)));
7625 case TARGET_NR_setgid
:
7626 ret
= get_errno(setgid(low2highgid(arg1
)));
7628 case TARGET_NR_setfsuid
:
7629 ret
= get_errno(setfsuid(arg1
));
7631 case TARGET_NR_setfsgid
:
7632 ret
= get_errno(setfsgid(arg1
));
7635 #ifdef TARGET_NR_lchown32
7636 case TARGET_NR_lchown32
:
7637 if (!(p
= lock_user_string(arg1
)))
7639 ret
= get_errno(lchown(p
, arg2
, arg3
));
7640 unlock_user(p
, arg1
, 0);
7643 #ifdef TARGET_NR_getuid32
7644 case TARGET_NR_getuid32
:
7645 ret
= get_errno(getuid());
7649 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7650 /* Alpha specific */
7651 case TARGET_NR_getxuid
:
7655 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
7657 ret
= get_errno(getuid());
7660 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7661 /* Alpha specific */
7662 case TARGET_NR_getxgid
:
7666 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
7668 ret
= get_errno(getgid());
7671 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7672 /* Alpha specific */
7673 case TARGET_NR_osf_getsysinfo
:
7674 ret
= -TARGET_EOPNOTSUPP
;
7676 case TARGET_GSI_IEEE_FP_CONTROL
:
7678 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7680 /* Copied from linux ieee_fpcr_to_swcr. */
7681 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
7682 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
7683 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
7684 | SWCR_TRAP_ENABLE_DZE
7685 | SWCR_TRAP_ENABLE_OVF
);
7686 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
7687 | SWCR_TRAP_ENABLE_INE
);
7688 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
7689 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
7691 if (put_user_u64 (swcr
, arg2
))
7697 /* case GSI_IEEE_STATE_AT_SIGNAL:
7698 -- Not implemented in linux kernel.
7700 -- Retrieves current unaligned access state; not much used.
7702 -- Retrieves implver information; surely not used.
7704 -- Grabs a copy of the HWRPB; surely not used.
7709 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7710 /* Alpha specific */
7711 case TARGET_NR_osf_setsysinfo
:
7712 ret
= -TARGET_EOPNOTSUPP
;
7714 case TARGET_SSI_IEEE_FP_CONTROL
:
7716 uint64_t swcr
, fpcr
, orig_fpcr
;
7718 if (get_user_u64 (swcr
, arg2
)) {
7721 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
7722 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
7724 /* Copied from linux ieee_swcr_to_fpcr. */
7725 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
7726 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
7727 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
7728 | SWCR_TRAP_ENABLE_DZE
7729 | SWCR_TRAP_ENABLE_OVF
)) << 48;
7730 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
7731 | SWCR_TRAP_ENABLE_INE
)) << 57;
7732 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
7733 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
7735 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
7740 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
7742 uint64_t exc
, fpcr
, orig_fpcr
;
7745 if (get_user_u64(exc
, arg2
)) {
7749 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
7751 /* We only add to the exception status here. */
7752 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
7754 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
7757 /* Old exceptions are not signaled. */
7758 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
7760 /* If any exceptions set by this call,
7761 and are unmasked, send a signal. */
7763 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
7764 si_code
= TARGET_FPE_FLTRES
;
7766 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
7767 si_code
= TARGET_FPE_FLTUND
;
7769 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
7770 si_code
= TARGET_FPE_FLTOVF
;
7772 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
7773 si_code
= TARGET_FPE_FLTDIV
;
7775 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
7776 si_code
= TARGET_FPE_FLTINV
;
7779 target_siginfo_t info
;
7780 info
.si_signo
= SIGFPE
;
7782 info
.si_code
= si_code
;
7783 info
._sifields
._sigfault
._addr
7784 = ((CPUArchState
*)cpu_env
)->pc
;
7785 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
7790 /* case SSI_NVPAIRS:
7791 -- Used with SSIN_UACPROC to enable unaligned accesses.
7792 case SSI_IEEE_STATE_AT_SIGNAL:
7793 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7794 -- Not implemented in linux kernel
7799 #ifdef TARGET_NR_osf_sigprocmask
7800 /* Alpha specific. */
7801 case TARGET_NR_osf_sigprocmask
:
7805 sigset_t set
, oldset
;
7808 case TARGET_SIG_BLOCK
:
7811 case TARGET_SIG_UNBLOCK
:
7814 case TARGET_SIG_SETMASK
:
7818 ret
= -TARGET_EINVAL
;
7822 target_to_host_old_sigset(&set
, &mask
);
7823 sigprocmask(how
, &set
, &oldset
);
7824 host_to_target_old_sigset(&mask
, &oldset
);
7830 #ifdef TARGET_NR_getgid32
7831 case TARGET_NR_getgid32
:
7832 ret
= get_errno(getgid());
7835 #ifdef TARGET_NR_geteuid32
7836 case TARGET_NR_geteuid32
:
7837 ret
= get_errno(geteuid());
7840 #ifdef TARGET_NR_getegid32
7841 case TARGET_NR_getegid32
:
7842 ret
= get_errno(getegid());
7845 #ifdef TARGET_NR_setreuid32
7846 case TARGET_NR_setreuid32
:
7847 ret
= get_errno(setreuid(arg1
, arg2
));
7850 #ifdef TARGET_NR_setregid32
7851 case TARGET_NR_setregid32
:
7852 ret
= get_errno(setregid(arg1
, arg2
));
7855 #ifdef TARGET_NR_getgroups32
7856 case TARGET_NR_getgroups32
:
7858 int gidsetsize
= arg1
;
7859 uint32_t *target_grouplist
;
7863 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7864 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7865 if (gidsetsize
== 0)
7867 if (!is_error(ret
)) {
7868 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
7869 if (!target_grouplist
) {
7870 ret
= -TARGET_EFAULT
;
7873 for(i
= 0;i
< ret
; i
++)
7874 target_grouplist
[i
] = tswap32(grouplist
[i
]);
7875 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
7880 #ifdef TARGET_NR_setgroups32
7881 case TARGET_NR_setgroups32
:
7883 int gidsetsize
= arg1
;
7884 uint32_t *target_grouplist
;
7888 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7889 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
7890 if (!target_grouplist
) {
7891 ret
= -TARGET_EFAULT
;
7894 for(i
= 0;i
< gidsetsize
; i
++)
7895 grouplist
[i
] = tswap32(target_grouplist
[i
]);
7896 unlock_user(target_grouplist
, arg2
, 0);
7897 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7901 #ifdef TARGET_NR_fchown32
7902 case TARGET_NR_fchown32
:
7903 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
7906 #ifdef TARGET_NR_setresuid32
7907 case TARGET_NR_setresuid32
:
7908 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
7911 #ifdef TARGET_NR_getresuid32
7912 case TARGET_NR_getresuid32
:
7914 uid_t ruid
, euid
, suid
;
7915 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7916 if (!is_error(ret
)) {
7917 if (put_user_u32(ruid
, arg1
)
7918 || put_user_u32(euid
, arg2
)
7919 || put_user_u32(suid
, arg3
))
7925 #ifdef TARGET_NR_setresgid32
7926 case TARGET_NR_setresgid32
:
7927 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
7930 #ifdef TARGET_NR_getresgid32
7931 case TARGET_NR_getresgid32
:
7933 gid_t rgid
, egid
, sgid
;
7934 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7935 if (!is_error(ret
)) {
7936 if (put_user_u32(rgid
, arg1
)
7937 || put_user_u32(egid
, arg2
)
7938 || put_user_u32(sgid
, arg3
))
7944 #ifdef TARGET_NR_chown32
7945 case TARGET_NR_chown32
:
7946 if (!(p
= lock_user_string(arg1
)))
7948 ret
= get_errno(chown(p
, arg2
, arg3
));
7949 unlock_user(p
, arg1
, 0);
7952 #ifdef TARGET_NR_setuid32
7953 case TARGET_NR_setuid32
:
7954 ret
= get_errno(setuid(arg1
));
7957 #ifdef TARGET_NR_setgid32
7958 case TARGET_NR_setgid32
:
7959 ret
= get_errno(setgid(arg1
));
7962 #ifdef TARGET_NR_setfsuid32
7963 case TARGET_NR_setfsuid32
:
7964 ret
= get_errno(setfsuid(arg1
));
7967 #ifdef TARGET_NR_setfsgid32
7968 case TARGET_NR_setfsgid32
:
7969 ret
= get_errno(setfsgid(arg1
));
7973 case TARGET_NR_pivot_root
:
7975 #ifdef TARGET_NR_mincore
7976 case TARGET_NR_mincore
:
7979 ret
= -TARGET_EFAULT
;
7980 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
7982 if (!(p
= lock_user_string(arg3
)))
7984 ret
= get_errno(mincore(a
, arg2
, p
));
7985 unlock_user(p
, arg3
, ret
);
7987 unlock_user(a
, arg1
, 0);
7991 #ifdef TARGET_NR_arm_fadvise64_64
7992 case TARGET_NR_arm_fadvise64_64
:
7995 * arm_fadvise64_64 looks like fadvise64_64 but
7996 * with different argument order
8004 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8005 #ifdef TARGET_NR_fadvise64_64
8006 case TARGET_NR_fadvise64_64
:
8008 #ifdef TARGET_NR_fadvise64
8009 case TARGET_NR_fadvise64
:
8013 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
8014 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
8015 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
8016 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
8020 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
8023 #ifdef TARGET_NR_madvise
8024 case TARGET_NR_madvise
:
8025 /* A straight passthrough may not be safe because qemu sometimes
8026 turns private flie-backed mappings into anonymous mappings.
8027 This will break MADV_DONTNEED.
8028 This is a hint, so ignoring and returning success is ok. */
8032 #if TARGET_ABI_BITS == 32
8033 case TARGET_NR_fcntl64
:
8037 struct target_flock64
*target_fl
;
8039 struct target_eabi_flock64
*target_efl
;
8042 cmd
= target_to_host_fcntl_cmd(arg2
);
8043 if (cmd
== -TARGET_EINVAL
) {
8049 case TARGET_F_GETLK64
:
8051 if (((CPUARMState
*)cpu_env
)->eabi
) {
8052 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8054 fl
.l_type
= tswap16(target_efl
->l_type
);
8055 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8056 fl
.l_start
= tswap64(target_efl
->l_start
);
8057 fl
.l_len
= tswap64(target_efl
->l_len
);
8058 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8059 unlock_user_struct(target_efl
, arg3
, 0);
8063 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8065 fl
.l_type
= tswap16(target_fl
->l_type
);
8066 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8067 fl
.l_start
= tswap64(target_fl
->l_start
);
8068 fl
.l_len
= tswap64(target_fl
->l_len
);
8069 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8070 unlock_user_struct(target_fl
, arg3
, 0);
8072 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8075 if (((CPUARMState
*)cpu_env
)->eabi
) {
8076 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
8078 target_efl
->l_type
= tswap16(fl
.l_type
);
8079 target_efl
->l_whence
= tswap16(fl
.l_whence
);
8080 target_efl
->l_start
= tswap64(fl
.l_start
);
8081 target_efl
->l_len
= tswap64(fl
.l_len
);
8082 target_efl
->l_pid
= tswap32(fl
.l_pid
);
8083 unlock_user_struct(target_efl
, arg3
, 1);
8087 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
8089 target_fl
->l_type
= tswap16(fl
.l_type
);
8090 target_fl
->l_whence
= tswap16(fl
.l_whence
);
8091 target_fl
->l_start
= tswap64(fl
.l_start
);
8092 target_fl
->l_len
= tswap64(fl
.l_len
);
8093 target_fl
->l_pid
= tswap32(fl
.l_pid
);
8094 unlock_user_struct(target_fl
, arg3
, 1);
8099 case TARGET_F_SETLK64
:
8100 case TARGET_F_SETLKW64
:
8102 if (((CPUARMState
*)cpu_env
)->eabi
) {
8103 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8105 fl
.l_type
= tswap16(target_efl
->l_type
);
8106 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8107 fl
.l_start
= tswap64(target_efl
->l_start
);
8108 fl
.l_len
= tswap64(target_efl
->l_len
);
8109 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8110 unlock_user_struct(target_efl
, arg3
, 0);
8114 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8116 fl
.l_type
= tswap16(target_fl
->l_type
);
8117 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8118 fl
.l_start
= tswap64(target_fl
->l_start
);
8119 fl
.l_len
= tswap64(target_fl
->l_len
);
8120 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8121 unlock_user_struct(target_fl
, arg3
, 0);
8123 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8126 ret
= do_fcntl(arg1
, arg2
, arg3
);
8132 #ifdef TARGET_NR_cacheflush
8133 case TARGET_NR_cacheflush
:
8134 /* self-modifying code is handled automatically, so nothing needed */
8138 #ifdef TARGET_NR_security
8139 case TARGET_NR_security
:
8142 #ifdef TARGET_NR_getpagesize
8143 case TARGET_NR_getpagesize
:
8144 ret
= TARGET_PAGE_SIZE
;
8147 case TARGET_NR_gettid
:
8148 ret
= get_errno(gettid());
8150 #ifdef TARGET_NR_readahead
8151 case TARGET_NR_readahead
:
8152 #if TARGET_ABI_BITS == 32
8153 if (regpairs_aligned(cpu_env
)) {
8158 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
8160 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
8165 #ifdef TARGET_NR_setxattr
8166 case TARGET_NR_listxattr
:
8167 case TARGET_NR_llistxattr
:
8171 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8173 ret
= -TARGET_EFAULT
;
8177 p
= lock_user_string(arg1
);
8179 if (num
== TARGET_NR_listxattr
) {
8180 ret
= get_errno(listxattr(p
, b
, arg3
));
8182 ret
= get_errno(llistxattr(p
, b
, arg3
));
8185 ret
= -TARGET_EFAULT
;
8187 unlock_user(p
, arg1
, 0);
8188 unlock_user(b
, arg2
, arg3
);
8191 case TARGET_NR_flistxattr
:
8195 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8197 ret
= -TARGET_EFAULT
;
8201 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
8202 unlock_user(b
, arg2
, arg3
);
8205 case TARGET_NR_setxattr
:
8206 case TARGET_NR_lsetxattr
:
8208 void *p
, *n
, *v
= 0;
8210 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8212 ret
= -TARGET_EFAULT
;
8216 p
= lock_user_string(arg1
);
8217 n
= lock_user_string(arg2
);
8219 if (num
== TARGET_NR_setxattr
) {
8220 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
8222 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
8225 ret
= -TARGET_EFAULT
;
8227 unlock_user(p
, arg1
, 0);
8228 unlock_user(n
, arg2
, 0);
8229 unlock_user(v
, arg3
, 0);
8232 case TARGET_NR_fsetxattr
:
8236 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8238 ret
= -TARGET_EFAULT
;
8242 n
= lock_user_string(arg2
);
8244 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
8246 ret
= -TARGET_EFAULT
;
8248 unlock_user(n
, arg2
, 0);
8249 unlock_user(v
, arg3
, 0);
8252 case TARGET_NR_getxattr
:
8253 case TARGET_NR_lgetxattr
:
8255 void *p
, *n
, *v
= 0;
8257 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8259 ret
= -TARGET_EFAULT
;
8263 p
= lock_user_string(arg1
);
8264 n
= lock_user_string(arg2
);
8266 if (num
== TARGET_NR_getxattr
) {
8267 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
8269 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
8272 ret
= -TARGET_EFAULT
;
8274 unlock_user(p
, arg1
, 0);
8275 unlock_user(n
, arg2
, 0);
8276 unlock_user(v
, arg3
, arg4
);
8279 case TARGET_NR_fgetxattr
:
8283 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8285 ret
= -TARGET_EFAULT
;
8289 n
= lock_user_string(arg2
);
8291 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
8293 ret
= -TARGET_EFAULT
;
8295 unlock_user(n
, arg2
, 0);
8296 unlock_user(v
, arg3
, arg4
);
8299 case TARGET_NR_removexattr
:
8300 case TARGET_NR_lremovexattr
:
8303 p
= lock_user_string(arg1
);
8304 n
= lock_user_string(arg2
);
8306 if (num
== TARGET_NR_removexattr
) {
8307 ret
= get_errno(removexattr(p
, n
));
8309 ret
= get_errno(lremovexattr(p
, n
));
8312 ret
= -TARGET_EFAULT
;
8314 unlock_user(p
, arg1
, 0);
8315 unlock_user(n
, arg2
, 0);
8318 case TARGET_NR_fremovexattr
:
8321 n
= lock_user_string(arg2
);
8323 ret
= get_errno(fremovexattr(arg1
, n
));
8325 ret
= -TARGET_EFAULT
;
8327 unlock_user(n
, arg2
, 0);
8331 #endif /* CONFIG_ATTR */
8332 #ifdef TARGET_NR_set_thread_area
8333 case TARGET_NR_set_thread_area
:
8334 #if defined(TARGET_MIPS)
8335 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
8338 #elif defined(TARGET_CRIS)
8340 ret
= -TARGET_EINVAL
;
8342 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
8346 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8347 ret
= do_set_thread_area(cpu_env
, arg1
);
8350 goto unimplemented_nowarn
;
8353 #ifdef TARGET_NR_get_thread_area
8354 case TARGET_NR_get_thread_area
:
8355 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8356 ret
= do_get_thread_area(cpu_env
, arg1
);
8358 goto unimplemented_nowarn
;
8361 #ifdef TARGET_NR_getdomainname
8362 case TARGET_NR_getdomainname
:
8363 goto unimplemented_nowarn
;
8366 #ifdef TARGET_NR_clock_gettime
8367 case TARGET_NR_clock_gettime
:
8370 ret
= get_errno(clock_gettime(arg1
, &ts
));
8371 if (!is_error(ret
)) {
8372 host_to_target_timespec(arg2
, &ts
);
8377 #ifdef TARGET_NR_clock_getres
8378 case TARGET_NR_clock_getres
:
8381 ret
= get_errno(clock_getres(arg1
, &ts
));
8382 if (!is_error(ret
)) {
8383 host_to_target_timespec(arg2
, &ts
);
8388 #ifdef TARGET_NR_clock_nanosleep
8389 case TARGET_NR_clock_nanosleep
:
8392 target_to_host_timespec(&ts
, arg3
);
8393 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
8395 host_to_target_timespec(arg4
, &ts
);
8400 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8401 case TARGET_NR_set_tid_address
:
8402 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
8406 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8407 case TARGET_NR_tkill
:
8408 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
8412 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8413 case TARGET_NR_tgkill
:
8414 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
8415 target_to_host_signal(arg3
)));
8419 #ifdef TARGET_NR_set_robust_list
8420 case TARGET_NR_set_robust_list
:
8421 goto unimplemented_nowarn
;
8424 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8425 case TARGET_NR_utimensat
:
8427 struct timespec
*tsp
, ts
[2];
8431 target_to_host_timespec(ts
, arg3
);
8432 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
8436 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
8438 if (!(p
= lock_user_string(arg2
))) {
8439 ret
= -TARGET_EFAULT
;
8442 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
8443 unlock_user(p
, arg2
, 0);
8448 #if defined(CONFIG_USE_NPTL)
8449 case TARGET_NR_futex
:
8450 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8453 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8454 case TARGET_NR_inotify_init
:
8455 ret
= get_errno(sys_inotify_init());
8458 #ifdef CONFIG_INOTIFY1
8459 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8460 case TARGET_NR_inotify_init1
:
8461 ret
= get_errno(sys_inotify_init1(arg1
));
8465 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8466 case TARGET_NR_inotify_add_watch
:
8467 p
= lock_user_string(arg2
);
8468 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
8469 unlock_user(p
, arg2
, 0);
8472 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8473 case TARGET_NR_inotify_rm_watch
:
8474 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
8478 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8479 case TARGET_NR_mq_open
:
8481 struct mq_attr posix_mq_attr
;
8483 p
= lock_user_string(arg1
- 1);
8485 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
8486 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
8487 unlock_user (p
, arg1
, 0);
8491 case TARGET_NR_mq_unlink
:
8492 p
= lock_user_string(arg1
- 1);
8493 ret
= get_errno(mq_unlink(p
));
8494 unlock_user (p
, arg1
, 0);
8497 case TARGET_NR_mq_timedsend
:
8501 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8503 target_to_host_timespec(&ts
, arg5
);
8504 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
8505 host_to_target_timespec(arg5
, &ts
);
8508 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
8509 unlock_user (p
, arg2
, arg3
);
8513 case TARGET_NR_mq_timedreceive
:
8518 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8520 target_to_host_timespec(&ts
, arg5
);
8521 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
8522 host_to_target_timespec(arg5
, &ts
);
8525 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
8526 unlock_user (p
, arg2
, arg3
);
8528 put_user_u32(prio
, arg4
);
8532 /* Not implemented for now... */
8533 /* case TARGET_NR_mq_notify: */
8536 case TARGET_NR_mq_getsetattr
:
8538 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
8541 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
8542 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
8545 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
8546 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
8553 #ifdef CONFIG_SPLICE
8554 #ifdef TARGET_NR_tee
8557 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
8561 #ifdef TARGET_NR_splice
8562 case TARGET_NR_splice
:
8564 loff_t loff_in
, loff_out
;
8565 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
8567 get_user_u64(loff_in
, arg2
);
8568 ploff_in
= &loff_in
;
8571 get_user_u64(loff_out
, arg2
);
8572 ploff_out
= &loff_out
;
8574 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
8578 #ifdef TARGET_NR_vmsplice
8579 case TARGET_NR_vmsplice
:
8584 vec
= alloca(count
* sizeof(struct iovec
));
8585 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
8587 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
8588 unlock_iovec(vec
, arg2
, count
, 0);
8592 #endif /* CONFIG_SPLICE */
8593 #ifdef CONFIG_EVENTFD
8594 #if defined(TARGET_NR_eventfd)
8595 case TARGET_NR_eventfd
:
8596 ret
= get_errno(eventfd(arg1
, 0));
8599 #if defined(TARGET_NR_eventfd2)
8600 case TARGET_NR_eventfd2
:
8601 ret
= get_errno(eventfd(arg1
, arg2
));
8604 #endif /* CONFIG_EVENTFD */
8605 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8606 case TARGET_NR_fallocate
:
8607 #if TARGET_ABI_BITS == 32
8608 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
8609 target_offset64(arg5
, arg6
)));
8611 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
8615 #if defined(CONFIG_SYNC_FILE_RANGE)
8616 #if defined(TARGET_NR_sync_file_range)
8617 case TARGET_NR_sync_file_range
:
8618 #if TARGET_ABI_BITS == 32
8619 #if defined(TARGET_MIPS)
8620 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8621 target_offset64(arg5
, arg6
), arg7
));
8623 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
8624 target_offset64(arg4
, arg5
), arg6
));
8625 #endif /* !TARGET_MIPS */
8627 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
8631 #if defined(TARGET_NR_sync_file_range2)
8632 case TARGET_NR_sync_file_range2
:
8633 /* This is like sync_file_range but the arguments are reordered */
8634 #if TARGET_ABI_BITS == 32
8635 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8636 target_offset64(arg5
, arg6
), arg2
));
8638 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
8643 #if defined(CONFIG_EPOLL)
8644 #if defined(TARGET_NR_epoll_create)
8645 case TARGET_NR_epoll_create
:
8646 ret
= get_errno(epoll_create(arg1
));
8649 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8650 case TARGET_NR_epoll_create1
:
8651 ret
= get_errno(epoll_create1(arg1
));
8654 #if defined(TARGET_NR_epoll_ctl)
8655 case TARGET_NR_epoll_ctl
:
8657 struct epoll_event ep
;
8658 struct epoll_event
*epp
= 0;
8660 struct target_epoll_event
*target_ep
;
8661 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
8664 ep
.events
= tswap32(target_ep
->events
);
8665 /* The epoll_data_t union is just opaque data to the kernel,
8666 * so we transfer all 64 bits across and need not worry what
8667 * actual data type it is.
8669 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
8670 unlock_user_struct(target_ep
, arg4
, 0);
8673 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
8678 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8679 #define IMPLEMENT_EPOLL_PWAIT
8681 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8682 #if defined(TARGET_NR_epoll_wait)
8683 case TARGET_NR_epoll_wait
:
8685 #if defined(IMPLEMENT_EPOLL_PWAIT)
8686 case TARGET_NR_epoll_pwait
:
8689 struct target_epoll_event
*target_ep
;
8690 struct epoll_event
*ep
;
8692 int maxevents
= arg3
;
8695 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
8696 maxevents
* sizeof(struct target_epoll_event
), 1);
8701 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
8704 #if defined(IMPLEMENT_EPOLL_PWAIT)
8705 case TARGET_NR_epoll_pwait
:
8707 target_sigset_t
*target_set
;
8708 sigset_t _set
, *set
= &_set
;
8711 target_set
= lock_user(VERIFY_READ
, arg5
,
8712 sizeof(target_sigset_t
), 1);
8714 unlock_user(target_ep
, arg2
, 0);
8717 target_to_host_sigset(set
, target_set
);
8718 unlock_user(target_set
, arg5
, 0);
8723 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
8727 #if defined(TARGET_NR_epoll_wait)
8728 case TARGET_NR_epoll_wait
:
8729 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
8733 ret
= -TARGET_ENOSYS
;
8735 if (!is_error(ret
)) {
8737 for (i
= 0; i
< ret
; i
++) {
8738 target_ep
[i
].events
= tswap32(ep
[i
].events
);
8739 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
8742 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
8747 #ifdef TARGET_NR_prlimit64
8748 case TARGET_NR_prlimit64
:
8750 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8751 struct target_rlimit64
*target_rnew
, *target_rold
;
8752 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
8754 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
8757 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
8758 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
8759 unlock_user_struct(target_rnew
, arg3
, 0);
8763 ret
= get_errno(sys_prlimit64(arg1
, arg2
, rnewp
, arg4
? &rold
: 0));
8764 if (!is_error(ret
) && arg4
) {
8765 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
8768 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
8769 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
8770 unlock_user_struct(target_rold
, arg4
, 1);
8777 gemu_log("qemu: Unsupported syscall: %d\n", num
);
8778 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8779 unimplemented_nowarn
:
8781 ret
= -TARGET_ENOSYS
;
8786 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
8789 print_syscall_ret(num
, ret
);
8792 ret
= -TARGET_EFAULT
;