4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
32 #include <sys/types.h>
38 #include <sys/mount.h>
39 #include <sys/prctl.h>
40 #include <sys/resource.h>
45 #include <sys/socket.h>
49 #include <sys/times.h>
52 #include <sys/statfs.h>
54 #include <sys/sysinfo.h>
55 #include <sys/utsname.h>
56 //#include <sys/user.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <qemu-common.h>
64 #include <sys/eventfd.h>
67 #define termios host_termios
68 #define winsize host_winsize
69 #define termio host_termio
70 #define sgttyb host_sgttyb /* same as target */
71 #define tchars host_tchars /* same as target */
72 #define ltchars host_ltchars /* same as target */
74 #include <linux/termios.h>
75 #include <linux/unistd.h>
76 #include <linux/utsname.h>
77 #include <linux/cdrom.h>
78 #include <linux/hdreg.h>
79 #include <linux/soundcard.h>
81 #include <linux/mtio.h>
83 #include "linux_loop.h"
86 #include "qemu-common.h"
88 #if defined(CONFIG_USE_NPTL)
89 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
90 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
92 /* XXX: Hardcode the above values. */
93 #define CLONE_NPTL_FLAGS2 0
98 //#include <linux/msdos_fs.h>
99 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
100 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
111 #define _syscall0(type,name) \
112 static type name (void) \
114 return syscall(__NR_##name); \
117 #define _syscall1(type,name,type1,arg1) \
118 static type name (type1 arg1) \
120 return syscall(__NR_##name, arg1); \
123 #define _syscall2(type,name,type1,arg1,type2,arg2) \
124 static type name (type1 arg1,type2 arg2) \
126 return syscall(__NR_##name, arg1, arg2); \
129 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
130 static type name (type1 arg1,type2 arg2,type3 arg3) \
132 return syscall(__NR_##name, arg1, arg2, arg3); \
135 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
136 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
138 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
141 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
143 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
145 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
149 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
150 type5,arg5,type6,arg6) \
151 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
154 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
158 #define __NR_sys_uname __NR_uname
159 #define __NR_sys_faccessat __NR_faccessat
160 #define __NR_sys_fchmodat __NR_fchmodat
161 #define __NR_sys_fchownat __NR_fchownat
162 #define __NR_sys_fstatat64 __NR_fstatat64
163 #define __NR_sys_futimesat __NR_futimesat
164 #define __NR_sys_getcwd1 __NR_getcwd
165 #define __NR_sys_getdents __NR_getdents
166 #define __NR_sys_getdents64 __NR_getdents64
167 #define __NR_sys_getpriority __NR_getpriority
168 #define __NR_sys_linkat __NR_linkat
169 #define __NR_sys_mkdirat __NR_mkdirat
170 #define __NR_sys_mknodat __NR_mknodat
171 #define __NR_sys_newfstatat __NR_newfstatat
172 #define __NR_sys_openat __NR_openat
173 #define __NR_sys_readlinkat __NR_readlinkat
174 #define __NR_sys_renameat __NR_renameat
175 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
176 #define __NR_sys_symlinkat __NR_symlinkat
177 #define __NR_sys_syslog __NR_syslog
178 #define __NR_sys_tgkill __NR_tgkill
179 #define __NR_sys_tkill __NR_tkill
180 #define __NR_sys_unlinkat __NR_unlinkat
181 #define __NR_sys_utimensat __NR_utimensat
182 #define __NR_sys_futex __NR_futex
183 #define __NR_sys_inotify_init __NR_inotify_init
184 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
185 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
187 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
188 #define __NR__llseek __NR_lseek
192 _syscall0(int, gettid
)
194 /* This is a replacement for the host gettid() and must return a host
196 static int gettid(void) {
200 #if TARGET_ABI_BITS == 32
201 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
203 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
204 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
206 _syscall2(int, sys_getpriority
, int, which
, int, who
);
207 #if defined(TARGET_NR__llseek) && !defined (__x86_64__)
208 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
209 loff_t
*, res
, uint
, wh
);
211 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
212 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
213 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
214 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
216 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
217 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
219 #ifdef __NR_exit_group
220 _syscall1(int,exit_group
,int,error_code
)
222 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
223 _syscall1(int,set_tid_address
,int *,tidptr
)
225 #if defined(CONFIG_USE_NPTL)
226 #if defined(TARGET_NR_futex) && defined(__NR_futex)
227 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
228 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
232 static bitmask_transtbl fcntl_flags_tbl
[] = {
233 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
234 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
235 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
236 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
237 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
238 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
239 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
240 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
241 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
242 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
243 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
244 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
245 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
246 #if defined(O_DIRECT)
247 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
252 #define COPY_UTSNAME_FIELD(dest, src) \
254 /* __NEW_UTS_LEN doesn't include terminating null */ \
255 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
256 (dest)[__NEW_UTS_LEN] = '\0'; \
259 static int sys_uname(struct new_utsname
*buf
)
261 struct utsname uts_buf
;
263 if (uname(&uts_buf
) < 0)
267 * Just in case these have some differences, we
268 * translate utsname to new_utsname (which is the
269 * struct linux kernel uses).
272 bzero(buf
, sizeof (*buf
));
273 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
274 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
275 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
276 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
277 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
279 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
283 #undef COPY_UTSNAME_FIELD
286 static int sys_getcwd1(char *buf
, size_t size
)
288 if (getcwd(buf
, size
) == NULL
) {
289 /* getcwd() sets errno */
292 return strlen(buf
)+1;
297 * Host system seems to have atfile syscall stubs available. We
298 * now enable them one by one as specified by target syscall_nr.h.
301 #ifdef TARGET_NR_faccessat
302 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
304 return (faccessat(dirfd
, pathname
, mode
, 0));
307 #ifdef TARGET_NR_fchmodat
308 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
310 return (fchmodat(dirfd
, pathname
, mode
, 0));
313 #if defined(TARGET_NR_fchownat) && defined(USE_UID16)
314 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
315 gid_t group
, int flags
)
317 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
320 #ifdef __NR_fstatat64
321 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
324 return (fstatat(dirfd
, pathname
, buf
, flags
));
327 #ifdef __NR_newfstatat
328 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
331 return (fstatat(dirfd
, pathname
, buf
, flags
));
334 #ifdef TARGET_NR_futimesat
335 static int sys_futimesat(int dirfd
, const char *pathname
,
336 const struct timeval times
[2])
338 return (futimesat(dirfd
, pathname
, times
));
341 #ifdef TARGET_NR_linkat
342 static int sys_linkat(int olddirfd
, const char *oldpath
,
343 int newdirfd
, const char *newpath
, int flags
)
345 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
348 #ifdef TARGET_NR_mkdirat
349 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
351 return (mkdirat(dirfd
, pathname
, mode
));
354 #ifdef TARGET_NR_mknodat
355 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
358 return (mknodat(dirfd
, pathname
, mode
, dev
));
361 #ifdef TARGET_NR_openat
362 static int sys_openat(int dirfd
, const char *pathname
, int flags
, ...)
365 * open(2) has extra parameter 'mode' when called with
368 if ((flags
& O_CREAT
) != 0) {
373 * Get the 'mode' parameter and translate it to
377 mode
= va_arg(ap
, mode_t
);
378 mode
= target_to_host_bitmask(mode
, fcntl_flags_tbl
);
381 return (openat(dirfd
, pathname
, flags
, mode
));
383 return (openat(dirfd
, pathname
, flags
));
386 #ifdef TARGET_NR_readlinkat
387 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
389 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
392 #ifdef TARGET_NR_renameat
393 static int sys_renameat(int olddirfd
, const char *oldpath
,
394 int newdirfd
, const char *newpath
)
396 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
399 #ifdef TARGET_NR_symlinkat
400 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
402 return (symlinkat(oldpath
, newdirfd
, newpath
));
405 #ifdef TARGET_NR_unlinkat
406 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
408 return (unlinkat(dirfd
, pathname
, flags
));
411 #else /* !CONFIG_ATFILE */
414 * Try direct syscalls instead
416 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
417 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
419 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
420 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
422 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
423 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
424 uid_t
,owner
,gid_t
,group
,int,flags
)
426 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
427 defined(__NR_fstatat64)
428 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
429 struct stat
*,buf
,int,flags
)
431 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
432 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
433 const struct timeval
*,times
)
435 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
436 defined(__NR_newfstatat)
437 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
438 struct stat
*,buf
,int,flags
)
440 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
441 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
442 int,newdirfd
,const char *,newpath
,int,flags
)
444 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
445 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
447 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
448 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
449 mode_t
,mode
,dev_t
,dev
)
451 #if defined(TARGET_NR_openat) && defined(__NR_openat)
452 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
454 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
455 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
456 char *,buf
,size_t,bufsize
)
458 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
459 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
460 int,newdirfd
,const char *,newpath
)
462 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
463 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
464 int,newdirfd
,const char *,newpath
)
466 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
467 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
470 #endif /* CONFIG_ATFILE */
472 #ifdef CONFIG_UTIMENSAT
473 static int sys_utimensat(int dirfd
, const char *pathname
,
474 const struct timespec times
[2], int flags
)
476 if (pathname
== NULL
)
477 return futimens(dirfd
, times
);
479 return utimensat(dirfd
, pathname
, times
, flags
);
482 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
483 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
484 const struct timespec
*,tsp
,int,flags
)
486 #endif /* CONFIG_UTIMENSAT */
488 #ifdef CONFIG_INOTIFY
489 #include <sys/inotify.h>
491 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
492 static int sys_inotify_init(void)
494 return (inotify_init());
497 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
498 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
500 return (inotify_add_watch(fd
, pathname
, mask
));
503 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
504 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
506 return (inotify_rm_watch(fd
, wd
));
510 /* Userspace can usually survive runtime without inotify */
511 #undef TARGET_NR_inotify_init
512 #undef TARGET_NR_inotify_add_watch
513 #undef TARGET_NR_inotify_rm_watch
514 #endif /* CONFIG_INOTIFY */
517 extern int personality(int);
518 extern int flock(int, int);
519 extern int setfsuid(int);
520 extern int setfsgid(int);
521 extern int setgroups(int, gid_t
*);
523 #define ERRNO_TABLE_SIZE 1200
525 /* target_to_host_errno_table[] is initialized from
526 * host_to_target_errno_table[] in syscall_init(). */
527 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
531 * This list is the union of errno values overridden in asm-<arch>/errno.h
532 * minus the errnos that are not actually generic to all archs.
534 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
535 [EIDRM
] = TARGET_EIDRM
,
536 [ECHRNG
] = TARGET_ECHRNG
,
537 [EL2NSYNC
] = TARGET_EL2NSYNC
,
538 [EL3HLT
] = TARGET_EL3HLT
,
539 [EL3RST
] = TARGET_EL3RST
,
540 [ELNRNG
] = TARGET_ELNRNG
,
541 [EUNATCH
] = TARGET_EUNATCH
,
542 [ENOCSI
] = TARGET_ENOCSI
,
543 [EL2HLT
] = TARGET_EL2HLT
,
544 [EDEADLK
] = TARGET_EDEADLK
,
545 [ENOLCK
] = TARGET_ENOLCK
,
546 [EBADE
] = TARGET_EBADE
,
547 [EBADR
] = TARGET_EBADR
,
548 [EXFULL
] = TARGET_EXFULL
,
549 [ENOANO
] = TARGET_ENOANO
,
550 [EBADRQC
] = TARGET_EBADRQC
,
551 [EBADSLT
] = TARGET_EBADSLT
,
552 [EBFONT
] = TARGET_EBFONT
,
553 [ENOSTR
] = TARGET_ENOSTR
,
554 [ENODATA
] = TARGET_ENODATA
,
555 [ETIME
] = TARGET_ETIME
,
556 [ENOSR
] = TARGET_ENOSR
,
557 [ENONET
] = TARGET_ENONET
,
558 [ENOPKG
] = TARGET_ENOPKG
,
559 [EREMOTE
] = TARGET_EREMOTE
,
560 [ENOLINK
] = TARGET_ENOLINK
,
561 [EADV
] = TARGET_EADV
,
562 [ESRMNT
] = TARGET_ESRMNT
,
563 [ECOMM
] = TARGET_ECOMM
,
564 [EPROTO
] = TARGET_EPROTO
,
565 [EDOTDOT
] = TARGET_EDOTDOT
,
566 [EMULTIHOP
] = TARGET_EMULTIHOP
,
567 [EBADMSG
] = TARGET_EBADMSG
,
568 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
569 [EOVERFLOW
] = TARGET_EOVERFLOW
,
570 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
571 [EBADFD
] = TARGET_EBADFD
,
572 [EREMCHG
] = TARGET_EREMCHG
,
573 [ELIBACC
] = TARGET_ELIBACC
,
574 [ELIBBAD
] = TARGET_ELIBBAD
,
575 [ELIBSCN
] = TARGET_ELIBSCN
,
576 [ELIBMAX
] = TARGET_ELIBMAX
,
577 [ELIBEXEC
] = TARGET_ELIBEXEC
,
578 [EILSEQ
] = TARGET_EILSEQ
,
579 [ENOSYS
] = TARGET_ENOSYS
,
580 [ELOOP
] = TARGET_ELOOP
,
581 [ERESTART
] = TARGET_ERESTART
,
582 [ESTRPIPE
] = TARGET_ESTRPIPE
,
583 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
584 [EUSERS
] = TARGET_EUSERS
,
585 [ENOTSOCK
] = TARGET_ENOTSOCK
,
586 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
587 [EMSGSIZE
] = TARGET_EMSGSIZE
,
588 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
589 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
590 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
591 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
592 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
593 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
594 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
595 [EADDRINUSE
] = TARGET_EADDRINUSE
,
596 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
597 [ENETDOWN
] = TARGET_ENETDOWN
,
598 [ENETUNREACH
] = TARGET_ENETUNREACH
,
599 [ENETRESET
] = TARGET_ENETRESET
,
600 [ECONNABORTED
] = TARGET_ECONNABORTED
,
601 [ECONNRESET
] = TARGET_ECONNRESET
,
602 [ENOBUFS
] = TARGET_ENOBUFS
,
603 [EISCONN
] = TARGET_EISCONN
,
604 [ENOTCONN
] = TARGET_ENOTCONN
,
605 [EUCLEAN
] = TARGET_EUCLEAN
,
606 [ENOTNAM
] = TARGET_ENOTNAM
,
607 [ENAVAIL
] = TARGET_ENAVAIL
,
608 [EISNAM
] = TARGET_EISNAM
,
609 [EREMOTEIO
] = TARGET_EREMOTEIO
,
610 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
611 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
612 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
613 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
614 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
615 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
616 [EALREADY
] = TARGET_EALREADY
,
617 [EINPROGRESS
] = TARGET_EINPROGRESS
,
618 [ESTALE
] = TARGET_ESTALE
,
619 [ECANCELED
] = TARGET_ECANCELED
,
620 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
621 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
623 [ENOKEY
] = TARGET_ENOKEY
,
626 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
629 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
632 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
635 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
637 #ifdef ENOTRECOVERABLE
638 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
642 static inline int host_to_target_errno(int err
)
644 if(host_to_target_errno_table
[err
])
645 return host_to_target_errno_table
[err
];
649 static inline int target_to_host_errno(int err
)
651 if (target_to_host_errno_table
[err
])
652 return target_to_host_errno_table
[err
];
656 static inline abi_long
get_errno(abi_long ret
)
659 return -host_to_target_errno(errno
);
664 static inline int is_error(abi_long ret
)
666 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
669 char *target_strerror(int err
)
671 return strerror(target_to_host_errno(err
));
674 static abi_ulong target_brk
;
675 static abi_ulong target_original_brk
;
677 void target_set_brk(abi_ulong new_brk
)
679 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
682 /* do_brk() must return target values and target errnos. */
683 abi_long
do_brk(abi_ulong new_brk
)
686 abi_long mapped_addr
;
691 if (new_brk
< target_original_brk
)
694 brk_page
= HOST_PAGE_ALIGN(target_brk
);
696 /* If the new brk is less than this, set it and we're done... */
697 if (new_brk
< brk_page
) {
698 target_brk
= new_brk
;
702 /* We need to allocate more memory after the brk... */
703 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
+ 1);
704 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
705 PROT_READ
|PROT_WRITE
,
706 MAP_ANON
|MAP_FIXED
|MAP_PRIVATE
, 0, 0));
708 if (!is_error(mapped_addr
))
709 target_brk
= new_brk
;
714 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
715 abi_ulong target_fds_addr
,
719 abi_ulong b
, *target_fds
;
721 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
722 if (!(target_fds
= lock_user(VERIFY_READ
,
724 sizeof(abi_ulong
) * nw
,
726 return -TARGET_EFAULT
;
730 for (i
= 0; i
< nw
; i
++) {
731 /* grab the abi_ulong */
732 __get_user(b
, &target_fds
[i
]);
733 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
734 /* check the bit inside the abi_ulong */
741 unlock_user(target_fds
, target_fds_addr
, 0);
746 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
752 abi_ulong
*target_fds
;
754 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
755 if (!(target_fds
= lock_user(VERIFY_WRITE
,
757 sizeof(abi_ulong
) * nw
,
759 return -TARGET_EFAULT
;
762 for (i
= 0; i
< nw
; i
++) {
764 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
765 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
768 __put_user(v
, &target_fds
[i
]);
771 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
776 #if defined(__alpha__)
782 static inline abi_long
host_to_target_clock_t(long ticks
)
784 #if HOST_HZ == TARGET_HZ
787 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
791 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
792 const struct rusage
*rusage
)
794 struct target_rusage
*target_rusage
;
796 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
797 return -TARGET_EFAULT
;
798 target_rusage
->ru_utime
.tv_sec
= tswapl(rusage
->ru_utime
.tv_sec
);
799 target_rusage
->ru_utime
.tv_usec
= tswapl(rusage
->ru_utime
.tv_usec
);
800 target_rusage
->ru_stime
.tv_sec
= tswapl(rusage
->ru_stime
.tv_sec
);
801 target_rusage
->ru_stime
.tv_usec
= tswapl(rusage
->ru_stime
.tv_usec
);
802 target_rusage
->ru_maxrss
= tswapl(rusage
->ru_maxrss
);
803 target_rusage
->ru_ixrss
= tswapl(rusage
->ru_ixrss
);
804 target_rusage
->ru_idrss
= tswapl(rusage
->ru_idrss
);
805 target_rusage
->ru_isrss
= tswapl(rusage
->ru_isrss
);
806 target_rusage
->ru_minflt
= tswapl(rusage
->ru_minflt
);
807 target_rusage
->ru_majflt
= tswapl(rusage
->ru_majflt
);
808 target_rusage
->ru_nswap
= tswapl(rusage
->ru_nswap
);
809 target_rusage
->ru_inblock
= tswapl(rusage
->ru_inblock
);
810 target_rusage
->ru_oublock
= tswapl(rusage
->ru_oublock
);
811 target_rusage
->ru_msgsnd
= tswapl(rusage
->ru_msgsnd
);
812 target_rusage
->ru_msgrcv
= tswapl(rusage
->ru_msgrcv
);
813 target_rusage
->ru_nsignals
= tswapl(rusage
->ru_nsignals
);
814 target_rusage
->ru_nvcsw
= tswapl(rusage
->ru_nvcsw
);
815 target_rusage
->ru_nivcsw
= tswapl(rusage
->ru_nivcsw
);
816 unlock_user_struct(target_rusage
, target_addr
, 1);
821 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
822 abi_ulong target_tv_addr
)
824 struct target_timeval
*target_tv
;
826 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
827 return -TARGET_EFAULT
;
829 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
830 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
832 unlock_user_struct(target_tv
, target_tv_addr
, 0);
837 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
838 const struct timeval
*tv
)
840 struct target_timeval
*target_tv
;
842 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
843 return -TARGET_EFAULT
;
845 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
846 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
848 unlock_user_struct(target_tv
, target_tv_addr
, 1);
853 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
854 abi_ulong target_mq_attr_addr
)
856 struct target_mq_attr
*target_mq_attr
;
858 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
859 target_mq_attr_addr
, 1))
860 return -TARGET_EFAULT
;
862 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
863 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
864 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
865 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
867 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
872 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
873 const struct mq_attr
*attr
)
875 struct target_mq_attr
*target_mq_attr
;
877 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
878 target_mq_attr_addr
, 0))
879 return -TARGET_EFAULT
;
881 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
882 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
883 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
884 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
886 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
891 /* do_select() must return target values and target errnos. */
892 static abi_long
do_select(int n
,
893 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
894 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
896 fd_set rfds
, wfds
, efds
;
897 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
898 struct timeval tv
, *tv_ptr
;
902 if (copy_from_user_fdset(&rfds
, rfd_addr
, n
))
903 return -TARGET_EFAULT
;
909 if (copy_from_user_fdset(&wfds
, wfd_addr
, n
))
910 return -TARGET_EFAULT
;
916 if (copy_from_user_fdset(&efds
, efd_addr
, n
))
917 return -TARGET_EFAULT
;
923 if (target_tv_addr
) {
924 if (copy_from_user_timeval(&tv
, target_tv_addr
))
925 return -TARGET_EFAULT
;
931 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
933 if (!is_error(ret
)) {
934 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
935 return -TARGET_EFAULT
;
936 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
937 return -TARGET_EFAULT
;
938 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
939 return -TARGET_EFAULT
;
941 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
942 return -TARGET_EFAULT
;
948 static abi_long
do_pipe2(int host_pipe
[], int flags
)
951 return pipe2(host_pipe
, flags
);
957 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
, int flags
)
961 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
964 return get_errno(ret
);
965 #if defined(TARGET_MIPS)
966 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
968 #elif defined(TARGET_SH4)
969 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
972 if (put_user_s32(host_pipe
[0], pipedes
)
973 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
974 return -TARGET_EFAULT
;
976 return get_errno(ret
);
979 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
980 abi_ulong target_addr
,
983 struct target_ip_mreqn
*target_smreqn
;
985 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
987 return -TARGET_EFAULT
;
988 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
989 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
990 if (len
== sizeof(struct target_ip_mreqn
))
991 mreqn
->imr_ifindex
= tswapl(target_smreqn
->imr_ifindex
);
992 unlock_user(target_smreqn
, target_addr
, 0);
997 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
998 abi_ulong target_addr
,
1001 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1002 sa_family_t sa_family
;
1003 struct target_sockaddr
*target_saddr
;
1005 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1007 return -TARGET_EFAULT
;
1009 sa_family
= tswap16(target_saddr
->sa_family
);
1011 /* Oops. The caller might send a incomplete sun_path; sun_path
1012 * must be terminated by \0 (see the manual page), but
1013 * unfortunately it is quite common to specify sockaddr_un
1014 * length as "strlen(x->sun_path)" while it should be
1015 * "strlen(...) + 1". We'll fix that here if needed.
1016 * Linux kernel has a similar feature.
1019 if (sa_family
== AF_UNIX
) {
1020 if (len
< unix_maxlen
&& len
> 0) {
1021 char *cp
= (char*)target_saddr
;
1023 if ( cp
[len
-1] && !cp
[len
] )
1026 if (len
> unix_maxlen
)
1030 memcpy(addr
, target_saddr
, len
);
1031 addr
->sa_family
= sa_family
;
1032 unlock_user(target_saddr
, target_addr
, 0);
1037 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1038 struct sockaddr
*addr
,
1041 struct target_sockaddr
*target_saddr
;
1043 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1045 return -TARGET_EFAULT
;
1046 memcpy(target_saddr
, addr
, len
);
1047 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1048 unlock_user(target_saddr
, target_addr
, len
);
1053 /* ??? Should this also swap msgh->name? */
1054 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1055 struct target_msghdr
*target_msgh
)
1057 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1058 abi_long msg_controllen
;
1059 abi_ulong target_cmsg_addr
;
1060 struct target_cmsghdr
*target_cmsg
;
1061 socklen_t space
= 0;
1063 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1064 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1066 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1067 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1069 return -TARGET_EFAULT
;
1071 while (cmsg
&& target_cmsg
) {
1072 void *data
= CMSG_DATA(cmsg
);
1073 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1075 int len
= tswapl(target_cmsg
->cmsg_len
)
1076 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1078 space
+= CMSG_SPACE(len
);
1079 if (space
> msgh
->msg_controllen
) {
1080 space
-= CMSG_SPACE(len
);
1081 gemu_log("Host cmsg overflow\n");
1085 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1086 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1087 cmsg
->cmsg_len
= CMSG_LEN(len
);
1089 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1090 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1091 memcpy(data
, target_data
, len
);
1093 int *fd
= (int *)data
;
1094 int *target_fd
= (int *)target_data
;
1095 int i
, numfds
= len
/ sizeof(int);
1097 for (i
= 0; i
< numfds
; i
++)
1098 fd
[i
] = tswap32(target_fd
[i
]);
1101 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1102 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1104 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1106 msgh
->msg_controllen
= space
;
1110 /* ??? Should this also swap msgh->name? */
1111 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1112 struct msghdr
*msgh
)
1114 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1115 abi_long msg_controllen
;
1116 abi_ulong target_cmsg_addr
;
1117 struct target_cmsghdr
*target_cmsg
;
1118 socklen_t space
= 0;
1120 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1121 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1123 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1124 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1126 return -TARGET_EFAULT
;
1128 while (cmsg
&& target_cmsg
) {
1129 void *data
= CMSG_DATA(cmsg
);
1130 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1132 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1134 space
+= TARGET_CMSG_SPACE(len
);
1135 if (space
> msg_controllen
) {
1136 space
-= TARGET_CMSG_SPACE(len
);
1137 gemu_log("Target cmsg overflow\n");
1141 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1142 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1143 target_cmsg
->cmsg_len
= tswapl(TARGET_CMSG_LEN(len
));
1145 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1146 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1147 memcpy(target_data
, data
, len
);
1149 int *fd
= (int *)data
;
1150 int *target_fd
= (int *)target_data
;
1151 int i
, numfds
= len
/ sizeof(int);
1153 for (i
= 0; i
< numfds
; i
++)
1154 target_fd
[i
] = tswap32(fd
[i
]);
1157 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1158 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1160 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1162 target_msgh
->msg_controllen
= tswapl(space
);
1166 /* do_setsockopt() Must return target values and target errnos. */
1167 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1168 abi_ulong optval_addr
, socklen_t optlen
)
1172 struct ip_mreqn
*ip_mreq
;
1173 struct ip_mreq_source
*ip_mreq_source
;
1177 /* TCP options all take an 'int' value. */
1178 if (optlen
< sizeof(uint32_t))
1179 return -TARGET_EINVAL
;
1181 if (get_user_u32(val
, optval_addr
))
1182 return -TARGET_EFAULT
;
1183 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1190 case IP_ROUTER_ALERT
:
1194 case IP_MTU_DISCOVER
:
1200 case IP_MULTICAST_TTL
:
1201 case IP_MULTICAST_LOOP
:
1203 if (optlen
>= sizeof(uint32_t)) {
1204 if (get_user_u32(val
, optval_addr
))
1205 return -TARGET_EFAULT
;
1206 } else if (optlen
>= 1) {
1207 if (get_user_u8(val
, optval_addr
))
1208 return -TARGET_EFAULT
;
1210 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1212 case IP_ADD_MEMBERSHIP
:
1213 case IP_DROP_MEMBERSHIP
:
1214 if (optlen
< sizeof (struct target_ip_mreq
) ||
1215 optlen
> sizeof (struct target_ip_mreqn
))
1216 return -TARGET_EINVAL
;
1218 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1219 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1220 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1223 case IP_BLOCK_SOURCE
:
1224 case IP_UNBLOCK_SOURCE
:
1225 case IP_ADD_SOURCE_MEMBERSHIP
:
1226 case IP_DROP_SOURCE_MEMBERSHIP
:
1227 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1228 return -TARGET_EINVAL
;
1230 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1231 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1232 unlock_user (ip_mreq_source
, optval_addr
, 0);
1239 case TARGET_SOL_SOCKET
:
1241 /* Options with 'int' argument. */
1242 case TARGET_SO_DEBUG
:
1245 case TARGET_SO_REUSEADDR
:
1246 optname
= SO_REUSEADDR
;
1248 case TARGET_SO_TYPE
:
1251 case TARGET_SO_ERROR
:
1254 case TARGET_SO_DONTROUTE
:
1255 optname
= SO_DONTROUTE
;
1257 case TARGET_SO_BROADCAST
:
1258 optname
= SO_BROADCAST
;
1260 case TARGET_SO_SNDBUF
:
1261 optname
= SO_SNDBUF
;
1263 case TARGET_SO_RCVBUF
:
1264 optname
= SO_RCVBUF
;
1266 case TARGET_SO_KEEPALIVE
:
1267 optname
= SO_KEEPALIVE
;
1269 case TARGET_SO_OOBINLINE
:
1270 optname
= SO_OOBINLINE
;
1272 case TARGET_SO_NO_CHECK
:
1273 optname
= SO_NO_CHECK
;
1275 case TARGET_SO_PRIORITY
:
1276 optname
= SO_PRIORITY
;
1279 case TARGET_SO_BSDCOMPAT
:
1280 optname
= SO_BSDCOMPAT
;
1283 case TARGET_SO_PASSCRED
:
1284 optname
= SO_PASSCRED
;
1286 case TARGET_SO_TIMESTAMP
:
1287 optname
= SO_TIMESTAMP
;
1289 case TARGET_SO_RCVLOWAT
:
1290 optname
= SO_RCVLOWAT
;
1292 case TARGET_SO_RCVTIMEO
:
1293 optname
= SO_RCVTIMEO
;
1295 case TARGET_SO_SNDTIMEO
:
1296 optname
= SO_SNDTIMEO
;
1302 if (optlen
< sizeof(uint32_t))
1303 return -TARGET_EINVAL
;
1305 if (get_user_u32(val
, optval_addr
))
1306 return -TARGET_EFAULT
;
1307 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1311 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level
, optname
);
1312 ret
= -TARGET_ENOPROTOOPT
;
1317 /* do_getsockopt() Must return target values and target errnos. */
1318 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1319 abi_ulong optval_addr
, abi_ulong optlen
)
1326 case TARGET_SOL_SOCKET
:
1329 case TARGET_SO_LINGER
:
1330 case TARGET_SO_RCVTIMEO
:
1331 case TARGET_SO_SNDTIMEO
:
1332 case TARGET_SO_PEERCRED
:
1333 case TARGET_SO_PEERNAME
:
1334 /* These don't just return a single integer */
1341 /* TCP options all take an 'int' value. */
1343 if (get_user_u32(len
, optlen
))
1344 return -TARGET_EFAULT
;
1346 return -TARGET_EINVAL
;
1348 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1354 if (put_user_u32(val
, optval_addr
))
1355 return -TARGET_EFAULT
;
1357 if (put_user_u8(val
, optval_addr
))
1358 return -TARGET_EFAULT
;
1360 if (put_user_u32(len
, optlen
))
1361 return -TARGET_EFAULT
;
1368 case IP_ROUTER_ALERT
:
1372 case IP_MTU_DISCOVER
:
1378 case IP_MULTICAST_TTL
:
1379 case IP_MULTICAST_LOOP
:
1380 if (get_user_u32(len
, optlen
))
1381 return -TARGET_EFAULT
;
1383 return -TARGET_EINVAL
;
1385 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1388 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1390 if (put_user_u32(len
, optlen
)
1391 || put_user_u8(val
, optval_addr
))
1392 return -TARGET_EFAULT
;
1394 if (len
> sizeof(int))
1396 if (put_user_u32(len
, optlen
)
1397 || put_user_u32(val
, optval_addr
))
1398 return -TARGET_EFAULT
;
1402 ret
= -TARGET_ENOPROTOOPT
;
1408 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1410 ret
= -TARGET_EOPNOTSUPP
;
1417 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1418 * other lock functions have a return code of 0 for failure.
1420 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1421 int count
, int copy
)
1423 struct target_iovec
*target_vec
;
1427 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1429 return -TARGET_EFAULT
;
1430 for(i
= 0;i
< count
; i
++) {
1431 base
= tswapl(target_vec
[i
].iov_base
);
1432 vec
[i
].iov_len
= tswapl(target_vec
[i
].iov_len
);
1433 if (vec
[i
].iov_len
!= 0) {
1434 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1435 /* Don't check lock_user return value. We must call writev even
1436 if a element has invalid base address. */
1438 /* zero length pointer is ignored */
1439 vec
[i
].iov_base
= NULL
;
1442 unlock_user (target_vec
, target_addr
, 0);
1446 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1447 int count
, int copy
)
1449 struct target_iovec
*target_vec
;
1453 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1455 return -TARGET_EFAULT
;
1456 for(i
= 0;i
< count
; i
++) {
1457 if (target_vec
[i
].iov_base
) {
1458 base
= tswapl(target_vec
[i
].iov_base
);
1459 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1462 unlock_user (target_vec
, target_addr
, 0);
1467 /* do_socket() Must return target values and target errnos. */
1468 static abi_long
do_socket(int domain
, int type
, int protocol
)
1470 #if defined(TARGET_MIPS)
1472 case TARGET_SOCK_DGRAM
:
1475 case TARGET_SOCK_STREAM
:
1478 case TARGET_SOCK_RAW
:
1481 case TARGET_SOCK_RDM
:
1484 case TARGET_SOCK_SEQPACKET
:
1485 type
= SOCK_SEQPACKET
;
1487 case TARGET_SOCK_PACKET
:
1492 if (domain
== PF_NETLINK
)
1493 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1494 return get_errno(socket(domain
, type
, protocol
));
1497 /* do_bind() Must return target values and target errnos. */
1498 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1505 return -TARGET_EINVAL
;
1507 addr
= alloca(addrlen
+1);
1509 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1513 return get_errno(bind(sockfd
, addr
, addrlen
));
1516 /* do_connect() Must return target values and target errnos. */
1517 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1524 return -TARGET_EINVAL
;
1526 addr
= alloca(addrlen
);
1528 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1532 return get_errno(connect(sockfd
, addr
, addrlen
));
1535 /* do_sendrecvmsg() Must return target values and target errnos. */
1536 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1537 int flags
, int send
)
1540 struct target_msghdr
*msgp
;
1544 abi_ulong target_vec
;
1547 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1551 return -TARGET_EFAULT
;
1552 if (msgp
->msg_name
) {
1553 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1554 msg
.msg_name
= alloca(msg
.msg_namelen
);
1555 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapl(msgp
->msg_name
),
1558 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1562 msg
.msg_name
= NULL
;
1563 msg
.msg_namelen
= 0;
1565 msg
.msg_controllen
= 2 * tswapl(msgp
->msg_controllen
);
1566 msg
.msg_control
= alloca(msg
.msg_controllen
);
1567 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1569 count
= tswapl(msgp
->msg_iovlen
);
1570 vec
= alloca(count
* sizeof(struct iovec
));
1571 target_vec
= tswapl(msgp
->msg_iov
);
1572 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1573 msg
.msg_iovlen
= count
;
1577 ret
= target_to_host_cmsg(&msg
, msgp
);
1579 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1581 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1582 if (!is_error(ret
)) {
1584 ret
= host_to_target_cmsg(msgp
, &msg
);
1589 unlock_iovec(vec
, target_vec
, count
, !send
);
1590 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1594 /* do_accept() Must return target values and target errnos. */
1595 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1596 abi_ulong target_addrlen_addr
)
1602 if (target_addr
== 0)
1603 return get_errno(accept(fd
, NULL
, NULL
));
1605 /* linux returns EINVAL if addrlen pointer is invalid */
1606 if (get_user_u32(addrlen
, target_addrlen_addr
))
1607 return -TARGET_EINVAL
;
1610 return -TARGET_EINVAL
;
1612 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1613 return -TARGET_EINVAL
;
1615 addr
= alloca(addrlen
);
1617 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1618 if (!is_error(ret
)) {
1619 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1620 if (put_user_u32(addrlen
, target_addrlen_addr
))
1621 ret
= -TARGET_EFAULT
;
1626 /* do_getpeername() Must return target values and target errnos. */
1627 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1628 abi_ulong target_addrlen_addr
)
1634 if (get_user_u32(addrlen
, target_addrlen_addr
))
1635 return -TARGET_EFAULT
;
1638 return -TARGET_EINVAL
;
1640 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1641 return -TARGET_EFAULT
;
1643 addr
= alloca(addrlen
);
1645 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1646 if (!is_error(ret
)) {
1647 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1648 if (put_user_u32(addrlen
, target_addrlen_addr
))
1649 ret
= -TARGET_EFAULT
;
1654 /* do_getsockname() Must return target values and target errnos. */
1655 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1656 abi_ulong target_addrlen_addr
)
1662 if (get_user_u32(addrlen
, target_addrlen_addr
))
1663 return -TARGET_EFAULT
;
1666 return -TARGET_EINVAL
;
1668 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1669 return -TARGET_EFAULT
;
1671 addr
= alloca(addrlen
);
1673 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1674 if (!is_error(ret
)) {
1675 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1676 if (put_user_u32(addrlen
, target_addrlen_addr
))
1677 ret
= -TARGET_EFAULT
;
1682 /* do_socketpair() Must return target values and target errnos. */
1683 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1684 abi_ulong target_tab_addr
)
1689 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1690 if (!is_error(ret
)) {
1691 if (put_user_s32(tab
[0], target_tab_addr
)
1692 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1693 ret
= -TARGET_EFAULT
;
1698 /* do_sendto() Must return target values and target errnos. */
1699 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1700 abi_ulong target_addr
, socklen_t addrlen
)
1707 return -TARGET_EINVAL
;
1709 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1711 return -TARGET_EFAULT
;
1713 addr
= alloca(addrlen
);
1714 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1716 unlock_user(host_msg
, msg
, 0);
1719 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
1721 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
1723 unlock_user(host_msg
, msg
, 0);
1727 /* do_recvfrom() Must return target values and target errnos. */
1728 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
1729 abi_ulong target_addr
,
1730 abi_ulong target_addrlen
)
1737 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
1739 return -TARGET_EFAULT
;
1741 if (get_user_u32(addrlen
, target_addrlen
)) {
1742 ret
= -TARGET_EFAULT
;
1746 ret
= -TARGET_EINVAL
;
1749 addr
= alloca(addrlen
);
1750 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
1752 addr
= NULL
; /* To keep compiler quiet. */
1753 ret
= get_errno(recv(fd
, host_msg
, len
, flags
));
1755 if (!is_error(ret
)) {
1757 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1758 if (put_user_u32(addrlen
, target_addrlen
)) {
1759 ret
= -TARGET_EFAULT
;
1763 unlock_user(host_msg
, msg
, len
);
1766 unlock_user(host_msg
, msg
, 0);
1771 #ifdef TARGET_NR_socketcall
1772 /* do_socketcall() Must return target values and target errnos. */
1773 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
1776 const int n
= sizeof(abi_ulong
);
1781 abi_ulong domain
, type
, protocol
;
1783 if (get_user_ual(domain
, vptr
)
1784 || get_user_ual(type
, vptr
+ n
)
1785 || get_user_ual(protocol
, vptr
+ 2 * n
))
1786 return -TARGET_EFAULT
;
1788 ret
= do_socket(domain
, type
, protocol
);
1794 abi_ulong target_addr
;
1797 if (get_user_ual(sockfd
, vptr
)
1798 || get_user_ual(target_addr
, vptr
+ n
)
1799 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1800 return -TARGET_EFAULT
;
1802 ret
= do_bind(sockfd
, target_addr
, addrlen
);
1805 case SOCKOP_connect
:
1808 abi_ulong target_addr
;
1811 if (get_user_ual(sockfd
, vptr
)
1812 || get_user_ual(target_addr
, vptr
+ n
)
1813 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1814 return -TARGET_EFAULT
;
1816 ret
= do_connect(sockfd
, target_addr
, addrlen
);
1821 abi_ulong sockfd
, backlog
;
1823 if (get_user_ual(sockfd
, vptr
)
1824 || get_user_ual(backlog
, vptr
+ n
))
1825 return -TARGET_EFAULT
;
1827 ret
= get_errno(listen(sockfd
, backlog
));
1833 abi_ulong target_addr
, target_addrlen
;
1835 if (get_user_ual(sockfd
, vptr
)
1836 || get_user_ual(target_addr
, vptr
+ n
)
1837 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1838 return -TARGET_EFAULT
;
1840 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
1843 case SOCKOP_getsockname
:
1846 abi_ulong target_addr
, target_addrlen
;
1848 if (get_user_ual(sockfd
, vptr
)
1849 || get_user_ual(target_addr
, vptr
+ n
)
1850 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1851 return -TARGET_EFAULT
;
1853 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
1856 case SOCKOP_getpeername
:
1859 abi_ulong target_addr
, target_addrlen
;
1861 if (get_user_ual(sockfd
, vptr
)
1862 || get_user_ual(target_addr
, vptr
+ n
)
1863 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1864 return -TARGET_EFAULT
;
1866 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
1869 case SOCKOP_socketpair
:
1871 abi_ulong domain
, type
, protocol
;
1874 if (get_user_ual(domain
, vptr
)
1875 || get_user_ual(type
, vptr
+ n
)
1876 || get_user_ual(protocol
, vptr
+ 2 * n
)
1877 || get_user_ual(tab
, vptr
+ 3 * n
))
1878 return -TARGET_EFAULT
;
1880 ret
= do_socketpair(domain
, type
, protocol
, tab
);
1890 if (get_user_ual(sockfd
, vptr
)
1891 || get_user_ual(msg
, vptr
+ n
)
1892 || get_user_ual(len
, vptr
+ 2 * n
)
1893 || get_user_ual(flags
, vptr
+ 3 * n
))
1894 return -TARGET_EFAULT
;
1896 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
1906 if (get_user_ual(sockfd
, vptr
)
1907 || get_user_ual(msg
, vptr
+ n
)
1908 || get_user_ual(len
, vptr
+ 2 * n
)
1909 || get_user_ual(flags
, vptr
+ 3 * n
))
1910 return -TARGET_EFAULT
;
1912 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
1924 if (get_user_ual(sockfd
, vptr
)
1925 || get_user_ual(msg
, vptr
+ n
)
1926 || get_user_ual(len
, vptr
+ 2 * n
)
1927 || get_user_ual(flags
, vptr
+ 3 * n
)
1928 || get_user_ual(addr
, vptr
+ 4 * n
)
1929 || get_user_ual(addrlen
, vptr
+ 5 * n
))
1930 return -TARGET_EFAULT
;
1932 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
1935 case SOCKOP_recvfrom
:
1944 if (get_user_ual(sockfd
, vptr
)
1945 || get_user_ual(msg
, vptr
+ n
)
1946 || get_user_ual(len
, vptr
+ 2 * n
)
1947 || get_user_ual(flags
, vptr
+ 3 * n
)
1948 || get_user_ual(addr
, vptr
+ 4 * n
)
1949 || get_user_ual(addrlen
, vptr
+ 5 * n
))
1950 return -TARGET_EFAULT
;
1952 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
1955 case SOCKOP_shutdown
:
1957 abi_ulong sockfd
, how
;
1959 if (get_user_ual(sockfd
, vptr
)
1960 || get_user_ual(how
, vptr
+ n
))
1961 return -TARGET_EFAULT
;
1963 ret
= get_errno(shutdown(sockfd
, how
));
1966 case SOCKOP_sendmsg
:
1967 case SOCKOP_recvmsg
:
1970 abi_ulong target_msg
;
1973 if (get_user_ual(fd
, vptr
)
1974 || get_user_ual(target_msg
, vptr
+ n
)
1975 || get_user_ual(flags
, vptr
+ 2 * n
))
1976 return -TARGET_EFAULT
;
1978 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
1979 (num
== SOCKOP_sendmsg
));
1982 case SOCKOP_setsockopt
:
1990 if (get_user_ual(sockfd
, vptr
)
1991 || get_user_ual(level
, vptr
+ n
)
1992 || get_user_ual(optname
, vptr
+ 2 * n
)
1993 || get_user_ual(optval
, vptr
+ 3 * n
)
1994 || get_user_ual(optlen
, vptr
+ 4 * n
))
1995 return -TARGET_EFAULT
;
1997 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2000 case SOCKOP_getsockopt
:
2008 if (get_user_ual(sockfd
, vptr
)
2009 || get_user_ual(level
, vptr
+ n
)
2010 || get_user_ual(optname
, vptr
+ 2 * n
)
2011 || get_user_ual(optval
, vptr
+ 3 * n
)
2012 || get_user_ual(optlen
, vptr
+ 4 * n
))
2013 return -TARGET_EFAULT
;
2015 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2019 gemu_log("Unsupported socketcall: %d\n", num
);
2020 ret
= -TARGET_ENOSYS
;
2027 #define N_SHM_REGIONS 32
2029 static struct shm_region
{
2032 } shm_regions
[N_SHM_REGIONS
];
2034 struct target_ipc_perm
2041 unsigned short int mode
;
2042 unsigned short int __pad1
;
2043 unsigned short int __seq
;
2044 unsigned short int __pad2
;
2045 abi_ulong __unused1
;
2046 abi_ulong __unused2
;
2049 struct target_semid_ds
2051 struct target_ipc_perm sem_perm
;
2052 abi_ulong sem_otime
;
2053 abi_ulong __unused1
;
2054 abi_ulong sem_ctime
;
2055 abi_ulong __unused2
;
2056 abi_ulong sem_nsems
;
2057 abi_ulong __unused3
;
2058 abi_ulong __unused4
;
2061 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2062 abi_ulong target_addr
)
2064 struct target_ipc_perm
*target_ip
;
2065 struct target_semid_ds
*target_sd
;
2067 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2068 return -TARGET_EFAULT
;
2069 target_ip
=&(target_sd
->sem_perm
);
2070 host_ip
->__key
= tswapl(target_ip
->__key
);
2071 host_ip
->uid
= tswapl(target_ip
->uid
);
2072 host_ip
->gid
= tswapl(target_ip
->gid
);
2073 host_ip
->cuid
= tswapl(target_ip
->cuid
);
2074 host_ip
->cgid
= tswapl(target_ip
->cgid
);
2075 host_ip
->mode
= tswapl(target_ip
->mode
);
2076 unlock_user_struct(target_sd
, target_addr
, 0);
2080 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2081 struct ipc_perm
*host_ip
)
2083 struct target_ipc_perm
*target_ip
;
2084 struct target_semid_ds
*target_sd
;
2086 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2087 return -TARGET_EFAULT
;
2088 target_ip
= &(target_sd
->sem_perm
);
2089 target_ip
->__key
= tswapl(host_ip
->__key
);
2090 target_ip
->uid
= tswapl(host_ip
->uid
);
2091 target_ip
->gid
= tswapl(host_ip
->gid
);
2092 target_ip
->cuid
= tswapl(host_ip
->cuid
);
2093 target_ip
->cgid
= tswapl(host_ip
->cgid
);
2094 target_ip
->mode
= tswapl(host_ip
->mode
);
2095 unlock_user_struct(target_sd
, target_addr
, 1);
2099 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2100 abi_ulong target_addr
)
2102 struct target_semid_ds
*target_sd
;
2104 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2105 return -TARGET_EFAULT
;
2106 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2107 return -TARGET_EFAULT
;
2108 host_sd
->sem_nsems
= tswapl(target_sd
->sem_nsems
);
2109 host_sd
->sem_otime
= tswapl(target_sd
->sem_otime
);
2110 host_sd
->sem_ctime
= tswapl(target_sd
->sem_ctime
);
2111 unlock_user_struct(target_sd
, target_addr
, 0);
2115 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2116 struct semid_ds
*host_sd
)
2118 struct target_semid_ds
*target_sd
;
2120 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2121 return -TARGET_EFAULT
;
2122 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2123 return -TARGET_EFAULT
;;
2124 target_sd
->sem_nsems
= tswapl(host_sd
->sem_nsems
);
2125 target_sd
->sem_otime
= tswapl(host_sd
->sem_otime
);
2126 target_sd
->sem_ctime
= tswapl(host_sd
->sem_ctime
);
2127 unlock_user_struct(target_sd
, target_addr
, 1);
2131 struct target_seminfo
{
2144 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2145 struct seminfo
*host_seminfo
)
2147 struct target_seminfo
*target_seminfo
;
2148 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2149 return -TARGET_EFAULT
;
2150 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2151 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2152 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2153 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2154 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2155 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2156 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2157 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2158 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2159 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2160 unlock_user_struct(target_seminfo
, target_addr
, 1);
2166 struct semid_ds
*buf
;
2167 unsigned short *array
;
2168 struct seminfo
*__buf
;
2171 union target_semun
{
2178 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2179 abi_ulong target_addr
)
2182 unsigned short *array
;
2184 struct semid_ds semid_ds
;
2187 semun
.buf
= &semid_ds
;
2189 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2191 return get_errno(ret
);
2193 nsems
= semid_ds
.sem_nsems
;
2195 *host_array
= malloc(nsems
*sizeof(unsigned short));
2196 array
= lock_user(VERIFY_READ
, target_addr
,
2197 nsems
*sizeof(unsigned short), 1);
2199 return -TARGET_EFAULT
;
2201 for(i
=0; i
<nsems
; i
++) {
2202 __get_user((*host_array
)[i
], &array
[i
]);
2204 unlock_user(array
, target_addr
, 0);
2209 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2210 unsigned short **host_array
)
2213 unsigned short *array
;
2215 struct semid_ds semid_ds
;
2218 semun
.buf
= &semid_ds
;
2220 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2222 return get_errno(ret
);
2224 nsems
= semid_ds
.sem_nsems
;
2226 array
= lock_user(VERIFY_WRITE
, target_addr
,
2227 nsems
*sizeof(unsigned short), 0);
2229 return -TARGET_EFAULT
;
2231 for(i
=0; i
<nsems
; i
++) {
2232 __put_user((*host_array
)[i
], &array
[i
]);
2235 unlock_user(array
, target_addr
, 1);
2240 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2241 union target_semun target_su
)
2244 struct semid_ds dsarg
;
2245 unsigned short *array
= NULL
;
2246 struct seminfo seminfo
;
2247 abi_long ret
= -TARGET_EINVAL
;
2254 arg
.val
= tswapl(target_su
.val
);
2255 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2256 target_su
.val
= tswapl(arg
.val
);
2260 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2264 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2265 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2272 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2276 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2277 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2283 arg
.__buf
= &seminfo
;
2284 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2285 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2293 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2300 struct target_sembuf
{
2301 unsigned short sem_num
;
2306 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2307 abi_ulong target_addr
,
2310 struct target_sembuf
*target_sembuf
;
2313 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2314 nsops
*sizeof(struct target_sembuf
), 1);
2316 return -TARGET_EFAULT
;
2318 for(i
=0; i
<nsops
; i
++) {
2319 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2320 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2321 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2324 unlock_user(target_sembuf
, target_addr
, 0);
2329 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2331 struct sembuf sops
[nsops
];
2333 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2334 return -TARGET_EFAULT
;
2336 return semop(semid
, sops
, nsops
);
2339 struct target_msqid_ds
2341 struct target_ipc_perm msg_perm
;
2342 abi_ulong msg_stime
;
2343 #if TARGET_ABI_BITS == 32
2344 abi_ulong __unused1
;
2346 abi_ulong msg_rtime
;
2347 #if TARGET_ABI_BITS == 32
2348 abi_ulong __unused2
;
2350 abi_ulong msg_ctime
;
2351 #if TARGET_ABI_BITS == 32
2352 abi_ulong __unused3
;
2354 abi_ulong __msg_cbytes
;
2356 abi_ulong msg_qbytes
;
2357 abi_ulong msg_lspid
;
2358 abi_ulong msg_lrpid
;
2359 abi_ulong __unused4
;
2360 abi_ulong __unused5
;
2363 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2364 abi_ulong target_addr
)
2366 struct target_msqid_ds
*target_md
;
2368 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2369 return -TARGET_EFAULT
;
2370 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2371 return -TARGET_EFAULT
;
2372 host_md
->msg_stime
= tswapl(target_md
->msg_stime
);
2373 host_md
->msg_rtime
= tswapl(target_md
->msg_rtime
);
2374 host_md
->msg_ctime
= tswapl(target_md
->msg_ctime
);
2375 host_md
->__msg_cbytes
= tswapl(target_md
->__msg_cbytes
);
2376 host_md
->msg_qnum
= tswapl(target_md
->msg_qnum
);
2377 host_md
->msg_qbytes
= tswapl(target_md
->msg_qbytes
);
2378 host_md
->msg_lspid
= tswapl(target_md
->msg_lspid
);
2379 host_md
->msg_lrpid
= tswapl(target_md
->msg_lrpid
);
2380 unlock_user_struct(target_md
, target_addr
, 0);
2384 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2385 struct msqid_ds
*host_md
)
2387 struct target_msqid_ds
*target_md
;
2389 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2390 return -TARGET_EFAULT
;
2391 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2392 return -TARGET_EFAULT
;
2393 target_md
->msg_stime
= tswapl(host_md
->msg_stime
);
2394 target_md
->msg_rtime
= tswapl(host_md
->msg_rtime
);
2395 target_md
->msg_ctime
= tswapl(host_md
->msg_ctime
);
2396 target_md
->__msg_cbytes
= tswapl(host_md
->__msg_cbytes
);
2397 target_md
->msg_qnum
= tswapl(host_md
->msg_qnum
);
2398 target_md
->msg_qbytes
= tswapl(host_md
->msg_qbytes
);
2399 target_md
->msg_lspid
= tswapl(host_md
->msg_lspid
);
2400 target_md
->msg_lrpid
= tswapl(host_md
->msg_lrpid
);
2401 unlock_user_struct(target_md
, target_addr
, 1);
2405 struct target_msginfo
{
2413 unsigned short int msgseg
;
2416 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2417 struct msginfo
*host_msginfo
)
2419 struct target_msginfo
*target_msginfo
;
2420 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2421 return -TARGET_EFAULT
;
2422 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2423 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2424 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2425 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2426 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2427 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2428 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2429 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2430 unlock_user_struct(target_msginfo
, target_addr
, 1);
2434 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2436 struct msqid_ds dsarg
;
2437 struct msginfo msginfo
;
2438 abi_long ret
= -TARGET_EINVAL
;
2446 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2447 return -TARGET_EFAULT
;
2448 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2449 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2450 return -TARGET_EFAULT
;
2453 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2457 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2458 if (host_to_target_msginfo(ptr
, &msginfo
))
2459 return -TARGET_EFAULT
;
2466 struct target_msgbuf
{
2471 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2472 unsigned int msgsz
, int msgflg
)
2474 struct target_msgbuf
*target_mb
;
2475 struct msgbuf
*host_mb
;
2478 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2479 return -TARGET_EFAULT
;
2480 host_mb
= malloc(msgsz
+sizeof(long));
2481 host_mb
->mtype
= (abi_long
) tswapl(target_mb
->mtype
);
2482 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2483 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2485 unlock_user_struct(target_mb
, msgp
, 0);
2490 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2491 unsigned int msgsz
, abi_long msgtyp
,
2494 struct target_msgbuf
*target_mb
;
2496 struct msgbuf
*host_mb
;
2499 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2500 return -TARGET_EFAULT
;
2502 host_mb
= malloc(msgsz
+sizeof(long));
2503 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapl(msgtyp
), msgflg
));
2506 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2507 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2508 if (!target_mtext
) {
2509 ret
= -TARGET_EFAULT
;
2512 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2513 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2516 target_mb
->mtype
= tswapl(host_mb
->mtype
);
2521 unlock_user_struct(target_mb
, msgp
, 1);
2525 struct target_shmid_ds
2527 struct target_ipc_perm shm_perm
;
2528 abi_ulong shm_segsz
;
2529 abi_ulong shm_atime
;
2530 #if TARGET_ABI_BITS == 32
2531 abi_ulong __unused1
;
2533 abi_ulong shm_dtime
;
2534 #if TARGET_ABI_BITS == 32
2535 abi_ulong __unused2
;
2537 abi_ulong shm_ctime
;
2538 #if TARGET_ABI_BITS == 32
2539 abi_ulong __unused3
;
2543 abi_ulong shm_nattch
;
2544 unsigned long int __unused4
;
2545 unsigned long int __unused5
;
2548 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2549 abi_ulong target_addr
)
2551 struct target_shmid_ds
*target_sd
;
2553 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2554 return -TARGET_EFAULT
;
2555 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2556 return -TARGET_EFAULT
;
2557 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2558 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2559 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2560 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2561 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2562 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2563 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2564 unlock_user_struct(target_sd
, target_addr
, 0);
2568 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2569 struct shmid_ds
*host_sd
)
2571 struct target_shmid_ds
*target_sd
;
2573 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2574 return -TARGET_EFAULT
;
2575 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2576 return -TARGET_EFAULT
;
2577 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2578 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2579 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2580 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2581 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2582 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2583 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2584 unlock_user_struct(target_sd
, target_addr
, 1);
2588 struct target_shminfo
{
2596 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2597 struct shminfo
*host_shminfo
)
2599 struct target_shminfo
*target_shminfo
;
2600 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2601 return -TARGET_EFAULT
;
2602 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2603 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2604 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2605 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2606 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2607 unlock_user_struct(target_shminfo
, target_addr
, 1);
2611 struct target_shm_info
{
2616 abi_ulong swap_attempts
;
2617 abi_ulong swap_successes
;
2620 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2621 struct shm_info
*host_shm_info
)
2623 struct target_shm_info
*target_shm_info
;
2624 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2625 return -TARGET_EFAULT
;
2626 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2627 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2628 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2629 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2630 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2631 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2632 unlock_user_struct(target_shm_info
, target_addr
, 1);
2636 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2638 struct shmid_ds dsarg
;
2639 struct shminfo shminfo
;
2640 struct shm_info shm_info
;
2641 abi_long ret
= -TARGET_EINVAL
;
2649 if (target_to_host_shmid_ds(&dsarg
, buf
))
2650 return -TARGET_EFAULT
;
2651 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2652 if (host_to_target_shmid_ds(buf
, &dsarg
))
2653 return -TARGET_EFAULT
;
2656 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2657 if (host_to_target_shminfo(buf
, &shminfo
))
2658 return -TARGET_EFAULT
;
2661 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2662 if (host_to_target_shm_info(buf
, &shm_info
))
2663 return -TARGET_EFAULT
;
2668 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2675 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2679 struct shmid_ds shm_info
;
2682 /* find out the length of the shared memory segment */
2683 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2684 if (is_error(ret
)) {
2685 /* can't get length, bail out */
2692 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2694 abi_ulong mmap_start
;
2696 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2698 if (mmap_start
== -1) {
2700 host_raddr
= (void *)-1;
2702 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2705 if (host_raddr
== (void *)-1) {
2707 return get_errno((long)host_raddr
);
2709 raddr
=h2g((unsigned long)host_raddr
);
2711 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2712 PAGE_VALID
| PAGE_READ
|
2713 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2715 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
2716 if (shm_regions
[i
].start
== 0) {
2717 shm_regions
[i
].start
= raddr
;
2718 shm_regions
[i
].size
= shm_info
.shm_segsz
;
2728 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
2732 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2733 if (shm_regions
[i
].start
== shmaddr
) {
2734 shm_regions
[i
].start
= 0;
2735 page_set_flags(shmaddr
, shm_regions
[i
].size
, 0);
2740 return get_errno(shmdt(g2h(shmaddr
)));
2743 #ifdef TARGET_NR_ipc
2744 /* ??? This only works with linear mappings. */
2745 /* do_ipc() must return target values and target errnos. */
2746 static abi_long
do_ipc(unsigned int call
, int first
,
2747 int second
, int third
,
2748 abi_long ptr
, abi_long fifth
)
2753 version
= call
>> 16;
2758 ret
= do_semop(first
, ptr
, second
);
2762 ret
= get_errno(semget(first
, second
, third
));
2766 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
2770 ret
= get_errno(msgget(first
, second
));
2774 ret
= do_msgsnd(first
, ptr
, second
, third
);
2778 ret
= do_msgctl(first
, second
, ptr
);
2785 struct target_ipc_kludge
{
2790 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
2791 ret
= -TARGET_EFAULT
;
2795 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
2797 unlock_user_struct(tmp
, ptr
, 0);
2801 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
2810 raddr
= do_shmat(first
, ptr
, second
);
2811 if (is_error(raddr
))
2812 return get_errno(raddr
);
2813 if (put_user_ual(raddr
, third
))
2814 return -TARGET_EFAULT
;
2818 ret
= -TARGET_EINVAL
;
2823 ret
= do_shmdt(ptr
);
2827 /* IPC_* flag values are the same on all linux platforms */
2828 ret
= get_errno(shmget(first
, second
, third
));
2831 /* IPC_* and SHM_* command values are the same on all linux platforms */
2833 ret
= do_shmctl(first
, second
, third
);
2836 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
2837 ret
= -TARGET_ENOSYS
;
2844 /* kernel structure types definitions */
2847 #define STRUCT(name, ...) STRUCT_ ## name,
2848 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2850 #include "syscall_types.h"
2853 #undef STRUCT_SPECIAL
2855 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
2856 #define STRUCT_SPECIAL(name)
2857 #include "syscall_types.h"
2859 #undef STRUCT_SPECIAL
2861 typedef struct IOCTLEntry
{
2862 unsigned int target_cmd
;
2863 unsigned int host_cmd
;
2866 const argtype arg_type
[5];
2869 #define IOC_R 0x0001
2870 #define IOC_W 0x0002
2871 #define IOC_RW (IOC_R | IOC_W)
2873 #define MAX_STRUCT_SIZE 4096
2875 static IOCTLEntry ioctl_entries
[] = {
2876 #define IOCTL(cmd, access, ...) \
2877 { TARGET_ ## cmd, cmd, #cmd, access, { __VA_ARGS__ } },
2882 /* ??? Implement proper locking for ioctls. */
2883 /* do_ioctl() Must return target values and target errnos. */
2884 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
2886 const IOCTLEntry
*ie
;
2887 const argtype
*arg_type
;
2889 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
2895 if (ie
->target_cmd
== 0) {
2896 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
2897 return -TARGET_ENOSYS
;
2899 if (ie
->target_cmd
== cmd
)
2903 arg_type
= ie
->arg_type
;
2905 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
2907 switch(arg_type
[0]) {
2910 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
2915 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
2919 target_size
= thunk_type_size(arg_type
, 0);
2920 switch(ie
->access
) {
2922 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2923 if (!is_error(ret
)) {
2924 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
2926 return -TARGET_EFAULT
;
2927 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
2928 unlock_user(argptr
, arg
, target_size
);
2932 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
2934 return -TARGET_EFAULT
;
2935 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
2936 unlock_user(argptr
, arg
, 0);
2937 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2941 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
2943 return -TARGET_EFAULT
;
2944 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
2945 unlock_user(argptr
, arg
, 0);
2946 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2947 if (!is_error(ret
)) {
2948 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
2950 return -TARGET_EFAULT
;
2951 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
2952 unlock_user(argptr
, arg
, target_size
);
2958 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2959 (long)cmd
, arg_type
[0]);
2960 ret
= -TARGET_ENOSYS
;
2966 static const bitmask_transtbl iflag_tbl
[] = {
2967 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
2968 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
2969 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
2970 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
2971 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
2972 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
2973 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
2974 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
2975 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
2976 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
2977 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
2978 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
2979 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
2980 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
2984 static const bitmask_transtbl oflag_tbl
[] = {
2985 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
2986 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
2987 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
2988 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
2989 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
2990 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
2991 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
2992 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
2993 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
2994 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
2995 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
2996 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
2997 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
2998 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
2999 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3000 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3001 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3002 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3003 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3004 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3005 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3006 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3007 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3008 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3012 static const bitmask_transtbl cflag_tbl
[] = {
3013 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3014 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3015 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3016 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3017 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3018 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3019 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3020 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3021 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3022 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3023 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3024 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3025 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3026 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3027 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3028 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3029 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3030 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3031 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3032 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3033 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3034 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3035 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3036 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3037 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3038 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3039 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3040 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3041 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3042 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3043 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3047 static const bitmask_transtbl lflag_tbl
[] = {
3048 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3049 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3050 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3051 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3052 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3053 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3054 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3055 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3056 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3057 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3058 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3059 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3060 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3061 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3062 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3066 static void target_to_host_termios (void *dst
, const void *src
)
3068 struct host_termios
*host
= dst
;
3069 const struct target_termios
*target
= src
;
3072 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3074 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3076 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3078 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3079 host
->c_line
= target
->c_line
;
3081 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3082 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3083 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3084 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3085 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3086 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3087 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3088 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3089 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3090 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3091 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3092 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3093 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3094 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3095 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3096 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3097 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3098 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3101 static void host_to_target_termios (void *dst
, const void *src
)
3103 struct target_termios
*target
= dst
;
3104 const struct host_termios
*host
= src
;
3107 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3109 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3111 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3113 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3114 target
->c_line
= host
->c_line
;
3116 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3117 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3118 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3119 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3120 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3121 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3122 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3123 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3124 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3125 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3126 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3127 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3128 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3129 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3130 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3131 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3132 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3133 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3136 static const StructEntry struct_termios_def
= {
3137 .convert
= { host_to_target_termios
, target_to_host_termios
},
3138 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3139 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3142 static bitmask_transtbl mmap_flags_tbl
[] = {
3143 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3144 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3145 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3146 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3147 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3148 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3149 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3150 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3154 #if defined(TARGET_I386)
3156 /* NOTE: there is really one LDT for all the threads */
3157 static uint8_t *ldt_table
;
3159 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3166 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3167 if (size
> bytecount
)
3169 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3171 return -TARGET_EFAULT
;
3172 /* ??? Should this by byteswapped? */
3173 memcpy(p
, ldt_table
, size
);
3174 unlock_user(p
, ptr
, size
);
3178 /* XXX: add locking support */
3179 static abi_long
write_ldt(CPUX86State
*env
,
3180 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3182 struct target_modify_ldt_ldt_s ldt_info
;
3183 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3184 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3185 int seg_not_present
, useable
, lm
;
3186 uint32_t *lp
, entry_1
, entry_2
;
3188 if (bytecount
!= sizeof(ldt_info
))
3189 return -TARGET_EINVAL
;
3190 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3191 return -TARGET_EFAULT
;
3192 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3193 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3194 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3195 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3196 unlock_user_struct(target_ldt_info
, ptr
, 0);
3198 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3199 return -TARGET_EINVAL
;
3200 seg_32bit
= ldt_info
.flags
& 1;
3201 contents
= (ldt_info
.flags
>> 1) & 3;
3202 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3203 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3204 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3205 useable
= (ldt_info
.flags
>> 6) & 1;
3209 lm
= (ldt_info
.flags
>> 7) & 1;
3211 if (contents
== 3) {
3213 return -TARGET_EINVAL
;
3214 if (seg_not_present
== 0)
3215 return -TARGET_EINVAL
;
3217 /* allocate the LDT */
3219 env
->ldt
.base
= target_mmap(0,
3220 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3221 PROT_READ
|PROT_WRITE
,
3222 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3223 if (env
->ldt
.base
== -1)
3224 return -TARGET_ENOMEM
;
3225 memset(g2h(env
->ldt
.base
), 0,
3226 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3227 env
->ldt
.limit
= 0xffff;
3228 ldt_table
= g2h(env
->ldt
.base
);
3231 /* NOTE: same code as Linux kernel */
3232 /* Allow LDTs to be cleared by the user. */
3233 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3236 read_exec_only
== 1 &&
3238 limit_in_pages
== 0 &&
3239 seg_not_present
== 1 &&
3247 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3248 (ldt_info
.limit
& 0x0ffff);
3249 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3250 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3251 (ldt_info
.limit
& 0xf0000) |
3252 ((read_exec_only
^ 1) << 9) |
3254 ((seg_not_present
^ 1) << 15) |
3256 (limit_in_pages
<< 23) |
3260 entry_2
|= (useable
<< 20);
3262 /* Install the new entry ... */
3264 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3265 lp
[0] = tswap32(entry_1
);
3266 lp
[1] = tswap32(entry_2
);
3270 /* specific and weird i386 syscalls */
3271 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3272 unsigned long bytecount
)
3278 ret
= read_ldt(ptr
, bytecount
);
3281 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3284 ret
= write_ldt(env
, ptr
, bytecount
, 0);
3287 ret
= -TARGET_ENOSYS
;
3293 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3294 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3296 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3297 struct target_modify_ldt_ldt_s ldt_info
;
3298 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3299 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3300 int seg_not_present
, useable
, lm
;
3301 uint32_t *lp
, entry_1
, entry_2
;
3304 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3305 if (!target_ldt_info
)
3306 return -TARGET_EFAULT
;
3307 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3308 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3309 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3310 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3311 if (ldt_info
.entry_number
== -1) {
3312 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
3313 if (gdt_table
[i
] == 0) {
3314 ldt_info
.entry_number
= i
;
3315 target_ldt_info
->entry_number
= tswap32(i
);
3320 unlock_user_struct(target_ldt_info
, ptr
, 1);
3322 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
3323 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
3324 return -TARGET_EINVAL
;
3325 seg_32bit
= ldt_info
.flags
& 1;
3326 contents
= (ldt_info
.flags
>> 1) & 3;
3327 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3328 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3329 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3330 useable
= (ldt_info
.flags
>> 6) & 1;
3334 lm
= (ldt_info
.flags
>> 7) & 1;
3337 if (contents
== 3) {
3338 if (seg_not_present
== 0)
3339 return -TARGET_EINVAL
;
3342 /* NOTE: same code as Linux kernel */
3343 /* Allow LDTs to be cleared by the user. */
3344 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3345 if ((contents
== 0 &&
3346 read_exec_only
== 1 &&
3348 limit_in_pages
== 0 &&
3349 seg_not_present
== 1 &&
3357 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3358 (ldt_info
.limit
& 0x0ffff);
3359 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3360 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3361 (ldt_info
.limit
& 0xf0000) |
3362 ((read_exec_only
^ 1) << 9) |
3364 ((seg_not_present
^ 1) << 15) |
3366 (limit_in_pages
<< 23) |
3371 /* Install the new entry ... */
3373 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
3374 lp
[0] = tswap32(entry_1
);
3375 lp
[1] = tswap32(entry_2
);
3379 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3381 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3382 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3383 uint32_t base_addr
, limit
, flags
;
3384 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
3385 int seg_not_present
, useable
, lm
;
3386 uint32_t *lp
, entry_1
, entry_2
;
3388 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3389 if (!target_ldt_info
)
3390 return -TARGET_EFAULT
;
3391 idx
= tswap32(target_ldt_info
->entry_number
);
3392 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
3393 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
3394 unlock_user_struct(target_ldt_info
, ptr
, 1);
3395 return -TARGET_EINVAL
;
3397 lp
= (uint32_t *)(gdt_table
+ idx
);
3398 entry_1
= tswap32(lp
[0]);
3399 entry_2
= tswap32(lp
[1]);
3401 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
3402 contents
= (entry_2
>> 10) & 3;
3403 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
3404 seg_32bit
= (entry_2
>> 22) & 1;
3405 limit_in_pages
= (entry_2
>> 23) & 1;
3406 useable
= (entry_2
>> 20) & 1;
3410 lm
= (entry_2
>> 21) & 1;
3412 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
3413 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
3414 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
3415 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
3416 base_addr
= (entry_1
>> 16) |
3417 (entry_2
& 0xff000000) |
3418 ((entry_2
& 0xff) << 16);
3419 target_ldt_info
->base_addr
= tswapl(base_addr
);
3420 target_ldt_info
->limit
= tswap32(limit
);
3421 target_ldt_info
->flags
= tswap32(flags
);
3422 unlock_user_struct(target_ldt_info
, ptr
, 1);
3425 #endif /* TARGET_I386 && TARGET_ABI32 */
3427 #ifndef TARGET_ABI32
3428 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
3435 case TARGET_ARCH_SET_GS
:
3436 case TARGET_ARCH_SET_FS
:
3437 if (code
== TARGET_ARCH_SET_GS
)
3441 cpu_x86_load_seg(env
, idx
, 0);
3442 env
->segs
[idx
].base
= addr
;
3444 case TARGET_ARCH_GET_GS
:
3445 case TARGET_ARCH_GET_FS
:
3446 if (code
== TARGET_ARCH_GET_GS
)
3450 val
= env
->segs
[idx
].base
;
3451 if (put_user(val
, addr
, abi_ulong
))
3452 return -TARGET_EFAULT
;
3455 ret
= -TARGET_EINVAL
;
3462 #endif /* defined(TARGET_I386) */
3464 #if defined(CONFIG_USE_NPTL)
3466 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3468 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
3471 pthread_mutex_t mutex
;
3472 pthread_cond_t cond
;
3475 abi_ulong child_tidptr
;
3476 abi_ulong parent_tidptr
;
3480 static void *clone_func(void *arg
)
3482 new_thread_info
*info
= arg
;
3488 ts
= (TaskState
*)thread_env
->opaque
;
3489 info
->tid
= gettid();
3490 env
->host_tid
= info
->tid
;
3492 if (info
->child_tidptr
)
3493 put_user_u32(info
->tid
, info
->child_tidptr
);
3494 if (info
->parent_tidptr
)
3495 put_user_u32(info
->tid
, info
->parent_tidptr
);
3496 /* Enable signals. */
3497 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
3498 /* Signal to the parent that we're ready. */
3499 pthread_mutex_lock(&info
->mutex
);
3500 pthread_cond_broadcast(&info
->cond
);
3501 pthread_mutex_unlock(&info
->mutex
);
3502 /* Wait until the parent has finshed initializing the tls state. */
3503 pthread_mutex_lock(&clone_lock
);
3504 pthread_mutex_unlock(&clone_lock
);
3510 /* this stack is the equivalent of the kernel stack associated with a
3512 #define NEW_STACK_SIZE 8192
3514 static int clone_func(void *arg
)
3516 CPUState
*env
= arg
;
3523 /* do_fork() Must return host values and target errnos (unlike most
3524 do_*() functions). */
3525 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
3526 abi_ulong parent_tidptr
, target_ulong newtls
,
3527 abi_ulong child_tidptr
)
3533 #if defined(CONFIG_USE_NPTL)
3534 unsigned int nptl_flags
;
3538 /* Emulate vfork() with fork() */
3539 if (flags
& CLONE_VFORK
)
3540 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
3542 if (flags
& CLONE_VM
) {
3543 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
3544 #if defined(CONFIG_USE_NPTL)
3545 new_thread_info info
;
3546 pthread_attr_t attr
;
3548 ts
= qemu_mallocz(sizeof(TaskState
) + NEW_STACK_SIZE
);
3549 init_task_state(ts
);
3550 new_stack
= ts
->stack
;
3551 /* we create a new CPU instance. */
3552 new_env
= cpu_copy(env
);
3553 /* Init regs that differ from the parent. */
3554 cpu_clone_regs(new_env
, newsp
);
3555 new_env
->opaque
= ts
;
3556 ts
->bprm
= parent_ts
->bprm
;
3557 ts
->info
= parent_ts
->info
;
3558 #if defined(CONFIG_USE_NPTL)
3560 flags
&= ~CLONE_NPTL_FLAGS2
;
3562 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
3563 ts
->child_tidptr
= child_tidptr
;
3566 if (nptl_flags
& CLONE_SETTLS
)
3567 cpu_set_tls (new_env
, newtls
);
3569 /* Grab a mutex so that thread setup appears atomic. */
3570 pthread_mutex_lock(&clone_lock
);
3572 memset(&info
, 0, sizeof(info
));
3573 pthread_mutex_init(&info
.mutex
, NULL
);
3574 pthread_mutex_lock(&info
.mutex
);
3575 pthread_cond_init(&info
.cond
, NULL
);
3577 if (nptl_flags
& CLONE_CHILD_SETTID
)
3578 info
.child_tidptr
= child_tidptr
;
3579 if (nptl_flags
& CLONE_PARENT_SETTID
)
3580 info
.parent_tidptr
= parent_tidptr
;
3582 ret
= pthread_attr_init(&attr
);
3583 ret
= pthread_attr_setstack(&attr
, new_stack
, NEW_STACK_SIZE
);
3584 /* It is not safe to deliver signals until the child has finished
3585 initializing, so temporarily block all signals. */
3586 sigfillset(&sigmask
);
3587 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
3589 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
3590 /* TODO: Free new CPU state if thread creation failed. */
3592 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
3593 pthread_attr_destroy(&attr
);
3595 /* Wait for the child to initialize. */
3596 pthread_cond_wait(&info
.cond
, &info
.mutex
);
3598 if (flags
& CLONE_PARENT_SETTID
)
3599 put_user_u32(ret
, parent_tidptr
);
3603 pthread_mutex_unlock(&info
.mutex
);
3604 pthread_cond_destroy(&info
.cond
);
3605 pthread_mutex_destroy(&info
.mutex
);
3606 pthread_mutex_unlock(&clone_lock
);
3608 if (flags
& CLONE_NPTL_FLAGS2
)
3610 /* This is probably going to die very quickly, but do it anyway. */
3612 ret
= __clone2(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3614 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3618 /* if no CLONE_VM, we consider it is a fork */
3619 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
3624 /* Child Process. */
3625 cpu_clone_regs(env
, newsp
);
3627 #if defined(CONFIG_USE_NPTL)
3628 /* There is a race condition here. The parent process could
3629 theoretically read the TID in the child process before the child
3630 tid is set. This would require using either ptrace
3631 (not implemented) or having *_tidptr to point at a shared memory
3632 mapping. We can't repeat the spinlock hack used above because
3633 the child process gets its own copy of the lock. */
3634 if (flags
& CLONE_CHILD_SETTID
)
3635 put_user_u32(gettid(), child_tidptr
);
3636 if (flags
& CLONE_PARENT_SETTID
)
3637 put_user_u32(gettid(), parent_tidptr
);
3638 ts
= (TaskState
*)env
->opaque
;
3639 if (flags
& CLONE_SETTLS
)
3640 cpu_set_tls (env
, newtls
);
3641 if (flags
& CLONE_CHILD_CLEARTID
)
3642 ts
->child_tidptr
= child_tidptr
;
3651 /* warning : doesn't handle linux specific flags... */
3652 static int target_to_host_fcntl_cmd(int cmd
)
3655 case TARGET_F_DUPFD
:
3656 case TARGET_F_GETFD
:
3657 case TARGET_F_SETFD
:
3658 case TARGET_F_GETFL
:
3659 case TARGET_F_SETFL
:
3661 case TARGET_F_GETLK
:
3663 case TARGET_F_SETLK
:
3665 case TARGET_F_SETLKW
:
3667 case TARGET_F_GETOWN
:
3669 case TARGET_F_SETOWN
:
3671 case TARGET_F_GETSIG
:
3673 case TARGET_F_SETSIG
:
3675 #if TARGET_ABI_BITS == 32
3676 case TARGET_F_GETLK64
:
3678 case TARGET_F_SETLK64
:
3680 case TARGET_F_SETLKW64
:
3684 return -TARGET_EINVAL
;
3686 return -TARGET_EINVAL
;
3689 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
3692 struct target_flock
*target_fl
;
3693 struct flock64 fl64
;
3694 struct target_flock64
*target_fl64
;
3696 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
3698 if (host_cmd
== -TARGET_EINVAL
)
3702 case TARGET_F_GETLK
:
3703 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3704 return -TARGET_EFAULT
;
3705 fl
.l_type
= tswap16(target_fl
->l_type
);
3706 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3707 fl
.l_start
= tswapl(target_fl
->l_start
);
3708 fl
.l_len
= tswapl(target_fl
->l_len
);
3709 fl
.l_pid
= tswapl(target_fl
->l_pid
);
3710 unlock_user_struct(target_fl
, arg
, 0);
3711 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3713 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
3714 return -TARGET_EFAULT
;
3715 target_fl
->l_type
= tswap16(fl
.l_type
);
3716 target_fl
->l_whence
= tswap16(fl
.l_whence
);
3717 target_fl
->l_start
= tswapl(fl
.l_start
);
3718 target_fl
->l_len
= tswapl(fl
.l_len
);
3719 target_fl
->l_pid
= tswapl(fl
.l_pid
);
3720 unlock_user_struct(target_fl
, arg
, 1);
3724 case TARGET_F_SETLK
:
3725 case TARGET_F_SETLKW
:
3726 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3727 return -TARGET_EFAULT
;
3728 fl
.l_type
= tswap16(target_fl
->l_type
);
3729 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3730 fl
.l_start
= tswapl(target_fl
->l_start
);
3731 fl
.l_len
= tswapl(target_fl
->l_len
);
3732 fl
.l_pid
= tswapl(target_fl
->l_pid
);
3733 unlock_user_struct(target_fl
, arg
, 0);
3734 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3737 case TARGET_F_GETLK64
:
3738 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3739 return -TARGET_EFAULT
;
3740 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3741 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3742 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3743 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3744 fl64
.l_pid
= tswap16(target_fl64
->l_pid
);
3745 unlock_user_struct(target_fl64
, arg
, 0);
3746 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
3748 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
3749 return -TARGET_EFAULT
;
3750 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
3751 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
3752 target_fl64
->l_start
= tswapl(fl64
.l_start
);
3753 target_fl64
->l_len
= tswapl(fl64
.l_len
);
3754 target_fl64
->l_pid
= tswapl(fl64
.l_pid
);
3755 unlock_user_struct(target_fl64
, arg
, 1);
3758 case TARGET_F_SETLK64
:
3759 case TARGET_F_SETLKW64
:
3760 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3761 return -TARGET_EFAULT
;
3762 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3763 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3764 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3765 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3766 fl64
.l_pid
= tswap16(target_fl64
->l_pid
);
3767 unlock_user_struct(target_fl64
, arg
, 0);
3768 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
3771 case TARGET_F_GETFL
:
3772 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
3774 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
3778 case TARGET_F_SETFL
:
3779 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
3782 case TARGET_F_SETOWN
:
3783 case TARGET_F_GETOWN
:
3784 case TARGET_F_SETSIG
:
3785 case TARGET_F_GETSIG
:
3786 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
3790 ret
= get_errno(fcntl(fd
, cmd
, arg
));
3798 static inline int high2lowuid(int uid
)
3806 static inline int high2lowgid(int gid
)
3814 static inline int low2highuid(int uid
)
3816 if ((int16_t)uid
== -1)
3822 static inline int low2highgid(int gid
)
3824 if ((int16_t)gid
== -1)
3830 #endif /* USE_UID16 */
3832 void syscall_init(void)
3835 const argtype
*arg_type
;
3839 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3840 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3841 #include "syscall_types.h"
3843 #undef STRUCT_SPECIAL
3845 /* we patch the ioctl size if necessary. We rely on the fact that
3846 no ioctl has all the bits at '1' in the size field */
3848 while (ie
->target_cmd
!= 0) {
3849 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
3850 TARGET_IOC_SIZEMASK
) {
3851 arg_type
= ie
->arg_type
;
3852 if (arg_type
[0] != TYPE_PTR
) {
3853 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
3858 size
= thunk_type_size(arg_type
, 0);
3859 ie
->target_cmd
= (ie
->target_cmd
&
3860 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
3861 (size
<< TARGET_IOC_SIZESHIFT
);
3864 /* Build target_to_host_errno_table[] table from
3865 * host_to_target_errno_table[]. */
3866 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
3867 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
3869 /* automatic consistency check if same arch */
3870 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3871 (defined(__x86_64__) && defined(TARGET_X86_64))
3872 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
3873 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3874 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
3881 #if TARGET_ABI_BITS == 32
3882 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
3884 #ifdef TARGET_WORDS_BIGENDIAN
3885 return ((uint64_t)word0
<< 32) | word1
;
3887 return ((uint64_t)word1
<< 32) | word0
;
3890 #else /* TARGET_ABI_BITS == 32 */
3891 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
3895 #endif /* TARGET_ABI_BITS != 32 */
3897 #ifdef TARGET_NR_truncate64
3898 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
3904 if (((CPUARMState
*)cpu_env
)->eabi
)
3910 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
3914 #ifdef TARGET_NR_ftruncate64
3915 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
3921 if (((CPUARMState
*)cpu_env
)->eabi
)
3927 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
3931 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
3932 abi_ulong target_addr
)
3934 struct target_timespec
*target_ts
;
3936 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
3937 return -TARGET_EFAULT
;
3938 host_ts
->tv_sec
= tswapl(target_ts
->tv_sec
);
3939 host_ts
->tv_nsec
= tswapl(target_ts
->tv_nsec
);
3940 unlock_user_struct(target_ts
, target_addr
, 0);
3944 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
3945 struct timespec
*host_ts
)
3947 struct target_timespec
*target_ts
;
3949 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
3950 return -TARGET_EFAULT
;
3951 target_ts
->tv_sec
= tswapl(host_ts
->tv_sec
);
3952 target_ts
->tv_nsec
= tswapl(host_ts
->tv_nsec
);
3953 unlock_user_struct(target_ts
, target_addr
, 1);
3957 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
3958 static inline abi_long
host_to_target_stat64(void *cpu_env
,
3959 abi_ulong target_addr
,
3960 struct stat
*host_st
)
3963 if (((CPUARMState
*)cpu_env
)->eabi
) {
3964 struct target_eabi_stat64
*target_st
;
3966 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
3967 return -TARGET_EFAULT
;
3968 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
3969 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
3970 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
3971 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3972 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
3974 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
3975 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
3976 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
3977 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
3978 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
3979 __put_user(host_st
->st_size
, &target_st
->st_size
);
3980 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
3981 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
3982 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
3983 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
3984 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
3985 unlock_user_struct(target_st
, target_addr
, 1);
3989 #if TARGET_LONG_BITS == 64
3990 struct target_stat
*target_st
;
3992 struct target_stat64
*target_st
;
3995 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
3996 return -TARGET_EFAULT
;
3997 memset(target_st
, 0, sizeof(*target_st
));
3998 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
3999 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4000 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4001 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4003 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4004 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4005 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4006 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4007 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4008 /* XXX: better use of kernel struct */
4009 __put_user(host_st
->st_size
, &target_st
->st_size
);
4010 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4011 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4012 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4013 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4014 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4015 unlock_user_struct(target_st
, target_addr
, 1);
4022 #if defined(CONFIG_USE_NPTL)
4023 /* ??? Using host futex calls even when target atomic operations
4024 are not really atomic probably breaks things. However implementing
4025 futexes locally would make futexes shared between multiple processes
4026 tricky. However they're probably useless because guest atomic
4027 operations won't work either. */
4028 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4029 target_ulong uaddr2
, int val3
)
4031 struct timespec ts
, *pts
;
4034 /* ??? We assume FUTEX_* constants are the same on both host
4036 #ifdef FUTEX_CMD_MASK
4037 base_op
= op
& FUTEX_CMD_MASK
;
4045 target_to_host_timespec(pts
, timeout
);
4049 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4052 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4054 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4056 case FUTEX_CMP_REQUEUE
:
4058 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4059 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4060 But the prototype takes a `struct timespec *'; insert casts
4061 to satisfy the compiler. We do not need to tswap TIMEOUT
4062 since it's not compared to guest memory. */
4063 pts
= (struct timespec
*)(uintptr_t) timeout
;
4064 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4066 (base_op
== FUTEX_CMP_REQUEUE
4070 return -TARGET_ENOSYS
;
4075 /* Map host to target signal numbers for the wait family of syscalls.
4076 Assume all other status bits are the same. */
4077 static int host_to_target_waitstatus(int status
)
4079 if (WIFSIGNALED(status
)) {
4080 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4082 if (WIFSTOPPED(status
)) {
4083 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4089 int get_osversion(void)
4091 static int osversion
;
4092 struct new_utsname buf
;
4097 if (qemu_uname_release
&& *qemu_uname_release
) {
4098 s
= qemu_uname_release
;
4100 if (sys_uname(&buf
))
4105 for (i
= 0; i
< 3; i
++) {
4107 while (*s
>= '0' && *s
<= '9') {
4112 tmp
= (tmp
<< 8) + n
;
4120 /* do_syscall() should always have a single exit point at the end so
4121 that actions, such as logging of syscall results, can be performed.
4122 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4123 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
4124 abi_long arg2
, abi_long arg3
, abi_long arg4
,
4125 abi_long arg5
, abi_long arg6
)
4133 gemu_log("syscall %d", num
);
4136 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4139 case TARGET_NR_exit
:
4140 #ifdef CONFIG_USE_NPTL
4141 /* In old applications this may be used to implement _exit(2).
4142 However in threaded applictions it is used for thread termination,
4143 and _exit_group is used for application termination.
4144 Do thread termination if we have more then one thread. */
4145 /* FIXME: This probably breaks if a signal arrives. We should probably
4146 be disabling signals. */
4147 if (first_cpu
->next_cpu
) {
4155 while (p
&& p
!= (CPUState
*)cpu_env
) {
4156 lastp
= &p
->next_cpu
;
4159 /* If we didn't find the CPU for this thread then something is
4163 /* Remove the CPU from the list. */
4164 *lastp
= p
->next_cpu
;
4166 ts
= ((CPUState
*)cpu_env
)->opaque
;
4167 if (ts
->child_tidptr
) {
4168 put_user_u32(0, ts
->child_tidptr
);
4169 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
4172 /* TODO: Free CPU state. */
4179 gdb_exit(cpu_env
, arg1
);
4181 ret
= 0; /* avoid warning */
4183 case TARGET_NR_read
:
4187 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
4189 ret
= get_errno(read(arg1
, p
, arg3
));
4190 unlock_user(p
, arg2
, ret
);
4193 case TARGET_NR_write
:
4194 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
4196 ret
= get_errno(write(arg1
, p
, arg3
));
4197 unlock_user(p
, arg2
, 0);
4199 case TARGET_NR_open
:
4200 if (!(p
= lock_user_string(arg1
)))
4202 ret
= get_errno(open(path(p
),
4203 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
4205 unlock_user(p
, arg1
, 0);
4207 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4208 case TARGET_NR_openat
:
4209 if (!(p
= lock_user_string(arg2
)))
4211 ret
= get_errno(sys_openat(arg1
,
4213 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
4215 unlock_user(p
, arg2
, 0);
4218 case TARGET_NR_close
:
4219 ret
= get_errno(close(arg1
));
4224 case TARGET_NR_fork
:
4225 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
4227 #ifdef TARGET_NR_waitpid
4228 case TARGET_NR_waitpid
:
4231 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
4232 if (!is_error(ret
) && arg2
4233 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
4238 #ifdef TARGET_NR_waitid
4239 case TARGET_NR_waitid
:
4243 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
4244 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
4245 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
4247 host_to_target_siginfo(p
, &info
);
4248 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
4253 #ifdef TARGET_NR_creat /* not on alpha */
4254 case TARGET_NR_creat
:
4255 if (!(p
= lock_user_string(arg1
)))
4257 ret
= get_errno(creat(p
, arg2
));
4258 unlock_user(p
, arg1
, 0);
4261 case TARGET_NR_link
:
4264 p
= lock_user_string(arg1
);
4265 p2
= lock_user_string(arg2
);
4267 ret
= -TARGET_EFAULT
;
4269 ret
= get_errno(link(p
, p2
));
4270 unlock_user(p2
, arg2
, 0);
4271 unlock_user(p
, arg1
, 0);
4274 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4275 case TARGET_NR_linkat
:
4280 p
= lock_user_string(arg2
);
4281 p2
= lock_user_string(arg4
);
4283 ret
= -TARGET_EFAULT
;
4285 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
4286 unlock_user(p
, arg2
, 0);
4287 unlock_user(p2
, arg4
, 0);
4291 case TARGET_NR_unlink
:
4292 if (!(p
= lock_user_string(arg1
)))
4294 ret
= get_errno(unlink(p
));
4295 unlock_user(p
, arg1
, 0);
4297 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4298 case TARGET_NR_unlinkat
:
4299 if (!(p
= lock_user_string(arg2
)))
4301 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
4302 unlock_user(p
, arg2
, 0);
4305 case TARGET_NR_execve
:
4307 char **argp
, **envp
;
4310 abi_ulong guest_argp
;
4311 abi_ulong guest_envp
;
4317 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
4318 if (get_user_ual(addr
, gp
))
4326 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
4327 if (get_user_ual(addr
, gp
))
4334 argp
= alloca((argc
+ 1) * sizeof(void *));
4335 envp
= alloca((envc
+ 1) * sizeof(void *));
4337 for (gp
= guest_argp
, q
= argp
; gp
;
4338 gp
+= sizeof(abi_ulong
), q
++) {
4339 if (get_user_ual(addr
, gp
))
4343 if (!(*q
= lock_user_string(addr
)))
4348 for (gp
= guest_envp
, q
= envp
; gp
;
4349 gp
+= sizeof(abi_ulong
), q
++) {
4350 if (get_user_ual(addr
, gp
))
4354 if (!(*q
= lock_user_string(addr
)))
4359 if (!(p
= lock_user_string(arg1
)))
4361 ret
= get_errno(execve(p
, argp
, envp
));
4362 unlock_user(p
, arg1
, 0);
4367 ret
= -TARGET_EFAULT
;
4370 for (gp
= guest_argp
, q
= argp
; *q
;
4371 gp
+= sizeof(abi_ulong
), q
++) {
4372 if (get_user_ual(addr
, gp
)
4375 unlock_user(*q
, addr
, 0);
4377 for (gp
= guest_envp
, q
= envp
; *q
;
4378 gp
+= sizeof(abi_ulong
), q
++) {
4379 if (get_user_ual(addr
, gp
)
4382 unlock_user(*q
, addr
, 0);
4386 case TARGET_NR_chdir
:
4387 if (!(p
= lock_user_string(arg1
)))
4389 ret
= get_errno(chdir(p
));
4390 unlock_user(p
, arg1
, 0);
4392 #ifdef TARGET_NR_time
4393 case TARGET_NR_time
:
4396 ret
= get_errno(time(&host_time
));
4399 && put_user_sal(host_time
, arg1
))
4404 case TARGET_NR_mknod
:
4405 if (!(p
= lock_user_string(arg1
)))
4407 ret
= get_errno(mknod(p
, arg2
, arg3
));
4408 unlock_user(p
, arg1
, 0);
4410 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4411 case TARGET_NR_mknodat
:
4412 if (!(p
= lock_user_string(arg2
)))
4414 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
4415 unlock_user(p
, arg2
, 0);
4418 case TARGET_NR_chmod
:
4419 if (!(p
= lock_user_string(arg1
)))
4421 ret
= get_errno(chmod(p
, arg2
));
4422 unlock_user(p
, arg1
, 0);
4424 #ifdef TARGET_NR_break
4425 case TARGET_NR_break
:
4428 #ifdef TARGET_NR_oldstat
4429 case TARGET_NR_oldstat
:
4432 case TARGET_NR_lseek
:
4433 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
4435 #ifdef TARGET_NR_getxpid
4436 case TARGET_NR_getxpid
:
4438 case TARGET_NR_getpid
:
4440 ret
= get_errno(getpid());
4442 case TARGET_NR_mount
:
4444 /* need to look at the data field */
4446 p
= lock_user_string(arg1
);
4447 p2
= lock_user_string(arg2
);
4448 p3
= lock_user_string(arg3
);
4449 if (!p
|| !p2
|| !p3
)
4450 ret
= -TARGET_EFAULT
;
4452 /* FIXME - arg5 should be locked, but it isn't clear how to
4453 * do that since it's not guaranteed to be a NULL-terminated
4456 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
4457 unlock_user(p
, arg1
, 0);
4458 unlock_user(p2
, arg2
, 0);
4459 unlock_user(p3
, arg3
, 0);
4462 #ifdef TARGET_NR_umount
4463 case TARGET_NR_umount
:
4464 if (!(p
= lock_user_string(arg1
)))
4466 ret
= get_errno(umount(p
));
4467 unlock_user(p
, arg1
, 0);
4470 #ifdef TARGET_NR_stime /* not on alpha */
4471 case TARGET_NR_stime
:
4474 if (get_user_sal(host_time
, arg1
))
4476 ret
= get_errno(stime(&host_time
));
4480 case TARGET_NR_ptrace
:
4482 #ifdef TARGET_NR_alarm /* not on alpha */
4483 case TARGET_NR_alarm
:
4487 #ifdef TARGET_NR_oldfstat
4488 case TARGET_NR_oldfstat
:
4491 #ifdef TARGET_NR_pause /* not on alpha */
4492 case TARGET_NR_pause
:
4493 ret
= get_errno(pause());
4496 #ifdef TARGET_NR_utime
4497 case TARGET_NR_utime
:
4499 struct utimbuf tbuf
, *host_tbuf
;
4500 struct target_utimbuf
*target_tbuf
;
4502 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
4504 tbuf
.actime
= tswapl(target_tbuf
->actime
);
4505 tbuf
.modtime
= tswapl(target_tbuf
->modtime
);
4506 unlock_user_struct(target_tbuf
, arg2
, 0);
4511 if (!(p
= lock_user_string(arg1
)))
4513 ret
= get_errno(utime(p
, host_tbuf
));
4514 unlock_user(p
, arg1
, 0);
4518 case TARGET_NR_utimes
:
4520 struct timeval
*tvp
, tv
[2];
4522 if (copy_from_user_timeval(&tv
[0], arg2
)
4523 || copy_from_user_timeval(&tv
[1],
4524 arg2
+ sizeof(struct target_timeval
)))
4530 if (!(p
= lock_user_string(arg1
)))
4532 ret
= get_errno(utimes(p
, tvp
));
4533 unlock_user(p
, arg1
, 0);
4536 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4537 case TARGET_NR_futimesat
:
4539 struct timeval
*tvp
, tv
[2];
4541 if (copy_from_user_timeval(&tv
[0], arg3
)
4542 || copy_from_user_timeval(&tv
[1],
4543 arg3
+ sizeof(struct target_timeval
)))
4549 if (!(p
= lock_user_string(arg2
)))
4551 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
4552 unlock_user(p
, arg2
, 0);
4556 #ifdef TARGET_NR_stty
4557 case TARGET_NR_stty
:
4560 #ifdef TARGET_NR_gtty
4561 case TARGET_NR_gtty
:
4564 case TARGET_NR_access
:
4565 if (!(p
= lock_user_string(arg1
)))
4567 ret
= get_errno(access(path(p
), arg2
));
4568 unlock_user(p
, arg1
, 0);
4570 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4571 case TARGET_NR_faccessat
:
4572 if (!(p
= lock_user_string(arg2
)))
4574 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
4575 unlock_user(p
, arg2
, 0);
4578 #ifdef TARGET_NR_nice /* not on alpha */
4579 case TARGET_NR_nice
:
4580 ret
= get_errno(nice(arg1
));
4583 #ifdef TARGET_NR_ftime
4584 case TARGET_NR_ftime
:
4587 case TARGET_NR_sync
:
4591 case TARGET_NR_kill
:
4592 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
4594 case TARGET_NR_rename
:
4597 p
= lock_user_string(arg1
);
4598 p2
= lock_user_string(arg2
);
4600 ret
= -TARGET_EFAULT
;
4602 ret
= get_errno(rename(p
, p2
));
4603 unlock_user(p2
, arg2
, 0);
4604 unlock_user(p
, arg1
, 0);
4607 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4608 case TARGET_NR_renameat
:
4611 p
= lock_user_string(arg2
);
4612 p2
= lock_user_string(arg4
);
4614 ret
= -TARGET_EFAULT
;
4616 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
4617 unlock_user(p2
, arg4
, 0);
4618 unlock_user(p
, arg2
, 0);
4622 case TARGET_NR_mkdir
:
4623 if (!(p
= lock_user_string(arg1
)))
4625 ret
= get_errno(mkdir(p
, arg2
));
4626 unlock_user(p
, arg1
, 0);
4628 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4629 case TARGET_NR_mkdirat
:
4630 if (!(p
= lock_user_string(arg2
)))
4632 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
4633 unlock_user(p
, arg2
, 0);
4636 case TARGET_NR_rmdir
:
4637 if (!(p
= lock_user_string(arg1
)))
4639 ret
= get_errno(rmdir(p
));
4640 unlock_user(p
, arg1
, 0);
4643 ret
= get_errno(dup(arg1
));
4645 case TARGET_NR_pipe
:
4646 ret
= do_pipe(cpu_env
, arg1
, 0);
4648 #ifdef TARGET_NR_pipe2
4649 case TARGET_NR_pipe2
:
4650 ret
= do_pipe(cpu_env
, arg1
, arg2
);
4653 case TARGET_NR_times
:
4655 struct target_tms
*tmsp
;
4657 ret
= get_errno(times(&tms
));
4659 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
4662 tmsp
->tms_utime
= tswapl(host_to_target_clock_t(tms
.tms_utime
));
4663 tmsp
->tms_stime
= tswapl(host_to_target_clock_t(tms
.tms_stime
));
4664 tmsp
->tms_cutime
= tswapl(host_to_target_clock_t(tms
.tms_cutime
));
4665 tmsp
->tms_cstime
= tswapl(host_to_target_clock_t(tms
.tms_cstime
));
4668 ret
= host_to_target_clock_t(ret
);
4671 #ifdef TARGET_NR_prof
4672 case TARGET_NR_prof
:
4675 #ifdef TARGET_NR_signal
4676 case TARGET_NR_signal
:
4679 case TARGET_NR_acct
:
4681 ret
= get_errno(acct(NULL
));
4683 if (!(p
= lock_user_string(arg1
)))
4685 ret
= get_errno(acct(path(p
)));
4686 unlock_user(p
, arg1
, 0);
4689 #ifdef TARGET_NR_umount2 /* not on alpha */
4690 case TARGET_NR_umount2
:
4691 if (!(p
= lock_user_string(arg1
)))
4693 ret
= get_errno(umount2(p
, arg2
));
4694 unlock_user(p
, arg1
, 0);
4697 #ifdef TARGET_NR_lock
4698 case TARGET_NR_lock
:
4701 case TARGET_NR_ioctl
:
4702 ret
= do_ioctl(arg1
, arg2
, arg3
);
4704 case TARGET_NR_fcntl
:
4705 ret
= do_fcntl(arg1
, arg2
, arg3
);
4707 #ifdef TARGET_NR_mpx
4711 case TARGET_NR_setpgid
:
4712 ret
= get_errno(setpgid(arg1
, arg2
));
4714 #ifdef TARGET_NR_ulimit
4715 case TARGET_NR_ulimit
:
4718 #ifdef TARGET_NR_oldolduname
4719 case TARGET_NR_oldolduname
:
4722 case TARGET_NR_umask
:
4723 ret
= get_errno(umask(arg1
));
4725 case TARGET_NR_chroot
:
4726 if (!(p
= lock_user_string(arg1
)))
4728 ret
= get_errno(chroot(p
));
4729 unlock_user(p
, arg1
, 0);
4731 case TARGET_NR_ustat
:
4733 case TARGET_NR_dup2
:
4734 ret
= get_errno(dup2(arg1
, arg2
));
4736 #ifdef TARGET_NR_getppid /* not on alpha */
4737 case TARGET_NR_getppid
:
4738 ret
= get_errno(getppid());
4741 case TARGET_NR_getpgrp
:
4742 ret
= get_errno(getpgrp());
4744 case TARGET_NR_setsid
:
4745 ret
= get_errno(setsid());
4747 #ifdef TARGET_NR_sigaction
4748 case TARGET_NR_sigaction
:
4750 #if !defined(TARGET_MIPS)
4751 struct target_old_sigaction
*old_act
;
4752 struct target_sigaction act
, oact
, *pact
;
4754 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4756 act
._sa_handler
= old_act
->_sa_handler
;
4757 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
4758 act
.sa_flags
= old_act
->sa_flags
;
4759 act
.sa_restorer
= old_act
->sa_restorer
;
4760 unlock_user_struct(old_act
, arg2
, 0);
4765 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4766 if (!is_error(ret
) && arg3
) {
4767 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4769 old_act
->_sa_handler
= oact
._sa_handler
;
4770 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
4771 old_act
->sa_flags
= oact
.sa_flags
;
4772 old_act
->sa_restorer
= oact
.sa_restorer
;
4773 unlock_user_struct(old_act
, arg3
, 1);
4776 struct target_sigaction act
, oact
, *pact
, *old_act
;
4779 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4781 act
._sa_handler
= old_act
->_sa_handler
;
4782 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
4783 act
.sa_flags
= old_act
->sa_flags
;
4784 unlock_user_struct(old_act
, arg2
, 0);
4790 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4792 if (!is_error(ret
) && arg3
) {
4793 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4795 old_act
->_sa_handler
= oact
._sa_handler
;
4796 old_act
->sa_flags
= oact
.sa_flags
;
4797 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
4798 old_act
->sa_mask
.sig
[1] = 0;
4799 old_act
->sa_mask
.sig
[2] = 0;
4800 old_act
->sa_mask
.sig
[3] = 0;
4801 unlock_user_struct(old_act
, arg3
, 1);
4807 case TARGET_NR_rt_sigaction
:
4809 struct target_sigaction
*act
;
4810 struct target_sigaction
*oact
;
4813 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
4818 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
4819 ret
= -TARGET_EFAULT
;
4820 goto rt_sigaction_fail
;
4824 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
4827 unlock_user_struct(act
, arg2
, 0);
4829 unlock_user_struct(oact
, arg3
, 1);
4832 #ifdef TARGET_NR_sgetmask /* not on alpha */
4833 case TARGET_NR_sgetmask
:
4836 abi_ulong target_set
;
4837 sigprocmask(0, NULL
, &cur_set
);
4838 host_to_target_old_sigset(&target_set
, &cur_set
);
4843 #ifdef TARGET_NR_ssetmask /* not on alpha */
4844 case TARGET_NR_ssetmask
:
4846 sigset_t set
, oset
, cur_set
;
4847 abi_ulong target_set
= arg1
;
4848 sigprocmask(0, NULL
, &cur_set
);
4849 target_to_host_old_sigset(&set
, &target_set
);
4850 sigorset(&set
, &set
, &cur_set
);
4851 sigprocmask(SIG_SETMASK
, &set
, &oset
);
4852 host_to_target_old_sigset(&target_set
, &oset
);
4857 #ifdef TARGET_NR_sigprocmask
4858 case TARGET_NR_sigprocmask
:
4861 sigset_t set
, oldset
, *set_ptr
;
4865 case TARGET_SIG_BLOCK
:
4868 case TARGET_SIG_UNBLOCK
:
4871 case TARGET_SIG_SETMASK
:
4875 ret
= -TARGET_EINVAL
;
4878 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
4880 target_to_host_old_sigset(&set
, p
);
4881 unlock_user(p
, arg2
, 0);
4887 ret
= get_errno(sigprocmask(arg1
, set_ptr
, &oldset
));
4888 if (!is_error(ret
) && arg3
) {
4889 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
4891 host_to_target_old_sigset(p
, &oldset
);
4892 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
4897 case TARGET_NR_rt_sigprocmask
:
4900 sigset_t set
, oldset
, *set_ptr
;
4904 case TARGET_SIG_BLOCK
:
4907 case TARGET_SIG_UNBLOCK
:
4910 case TARGET_SIG_SETMASK
:
4914 ret
= -TARGET_EINVAL
;
4917 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
4919 target_to_host_sigset(&set
, p
);
4920 unlock_user(p
, arg2
, 0);
4926 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
4927 if (!is_error(ret
) && arg3
) {
4928 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
4930 host_to_target_sigset(p
, &oldset
);
4931 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
4935 #ifdef TARGET_NR_sigpending
4936 case TARGET_NR_sigpending
:
4939 ret
= get_errno(sigpending(&set
));
4940 if (!is_error(ret
)) {
4941 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
4943 host_to_target_old_sigset(p
, &set
);
4944 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
4949 case TARGET_NR_rt_sigpending
:
4952 ret
= get_errno(sigpending(&set
));
4953 if (!is_error(ret
)) {
4954 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
4956 host_to_target_sigset(p
, &set
);
4957 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
4961 #ifdef TARGET_NR_sigsuspend
4962 case TARGET_NR_sigsuspend
:
4965 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
4967 target_to_host_old_sigset(&set
, p
);
4968 unlock_user(p
, arg1
, 0);
4969 ret
= get_errno(sigsuspend(&set
));
4973 case TARGET_NR_rt_sigsuspend
:
4976 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
4978 target_to_host_sigset(&set
, p
);
4979 unlock_user(p
, arg1
, 0);
4980 ret
= get_errno(sigsuspend(&set
));
4983 case TARGET_NR_rt_sigtimedwait
:
4986 struct timespec uts
, *puts
;
4989 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
4991 target_to_host_sigset(&set
, p
);
4992 unlock_user(p
, arg1
, 0);
4995 target_to_host_timespec(puts
, arg3
);
4999 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
5000 if (!is_error(ret
) && arg2
) {
5001 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
5003 host_to_target_siginfo(p
, &uinfo
);
5004 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
5008 case TARGET_NR_rt_sigqueueinfo
:
5011 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
5013 target_to_host_siginfo(&uinfo
, p
);
5014 unlock_user(p
, arg1
, 0);
5015 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
5018 #ifdef TARGET_NR_sigreturn
5019 case TARGET_NR_sigreturn
:
5020 /* NOTE: ret is eax, so not transcoding must be done */
5021 ret
= do_sigreturn(cpu_env
);
5024 case TARGET_NR_rt_sigreturn
:
5025 /* NOTE: ret is eax, so not transcoding must be done */
5026 ret
= do_rt_sigreturn(cpu_env
);
5028 case TARGET_NR_sethostname
:
5029 if (!(p
= lock_user_string(arg1
)))
5031 ret
= get_errno(sethostname(p
, arg2
));
5032 unlock_user(p
, arg1
, 0);
5034 case TARGET_NR_setrlimit
:
5036 /* XXX: convert resource ? */
5037 int resource
= arg1
;
5038 struct target_rlimit
*target_rlim
;
5040 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
5042 rlim
.rlim_cur
= tswapl(target_rlim
->rlim_cur
);
5043 rlim
.rlim_max
= tswapl(target_rlim
->rlim_max
);
5044 unlock_user_struct(target_rlim
, arg2
, 0);
5045 ret
= get_errno(setrlimit(resource
, &rlim
));
5048 case TARGET_NR_getrlimit
:
5050 /* XXX: convert resource ? */
5051 int resource
= arg1
;
5052 struct target_rlimit
*target_rlim
;
5055 ret
= get_errno(getrlimit(resource
, &rlim
));
5056 if (!is_error(ret
)) {
5057 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5059 target_rlim
->rlim_cur
= tswapl(rlim
.rlim_cur
);
5060 target_rlim
->rlim_max
= tswapl(rlim
.rlim_max
);
5061 unlock_user_struct(target_rlim
, arg2
, 1);
5065 case TARGET_NR_getrusage
:
5067 struct rusage rusage
;
5068 ret
= get_errno(getrusage(arg1
, &rusage
));
5069 if (!is_error(ret
)) {
5070 host_to_target_rusage(arg2
, &rusage
);
5074 case TARGET_NR_gettimeofday
:
5077 ret
= get_errno(gettimeofday(&tv
, NULL
));
5078 if (!is_error(ret
)) {
5079 if (copy_to_user_timeval(arg1
, &tv
))
5084 case TARGET_NR_settimeofday
:
5087 if (copy_from_user_timeval(&tv
, arg1
))
5089 ret
= get_errno(settimeofday(&tv
, NULL
));
5092 #ifdef TARGET_NR_select
5093 case TARGET_NR_select
:
5095 struct target_sel_arg_struct
*sel
;
5096 abi_ulong inp
, outp
, exp
, tvp
;
5099 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
5101 nsel
= tswapl(sel
->n
);
5102 inp
= tswapl(sel
->inp
);
5103 outp
= tswapl(sel
->outp
);
5104 exp
= tswapl(sel
->exp
);
5105 tvp
= tswapl(sel
->tvp
);
5106 unlock_user_struct(sel
, arg1
, 0);
5107 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
5111 case TARGET_NR_symlink
:
5114 p
= lock_user_string(arg1
);
5115 p2
= lock_user_string(arg2
);
5117 ret
= -TARGET_EFAULT
;
5119 ret
= get_errno(symlink(p
, p2
));
5120 unlock_user(p2
, arg2
, 0);
5121 unlock_user(p
, arg1
, 0);
5124 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5125 case TARGET_NR_symlinkat
:
5128 p
= lock_user_string(arg1
);
5129 p2
= lock_user_string(arg3
);
5131 ret
= -TARGET_EFAULT
;
5133 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
5134 unlock_user(p2
, arg3
, 0);
5135 unlock_user(p
, arg1
, 0);
5139 #ifdef TARGET_NR_oldlstat
5140 case TARGET_NR_oldlstat
:
5143 case TARGET_NR_readlink
:
5146 p
= lock_user_string(arg1
);
5147 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
5149 ret
= -TARGET_EFAULT
;
5151 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
5152 char real
[PATH_MAX
];
5153 temp
= realpath(exec_path
,real
);
5154 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
5155 snprintf((char *)p2
, arg3
, "%s", real
);
5158 ret
= get_errno(readlink(path(p
), p2
, arg3
));
5160 unlock_user(p2
, arg2
, ret
);
5161 unlock_user(p
, arg1
, 0);
5164 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5165 case TARGET_NR_readlinkat
:
5168 p
= lock_user_string(arg2
);
5169 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
5171 ret
= -TARGET_EFAULT
;
5173 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
5174 unlock_user(p2
, arg3
, ret
);
5175 unlock_user(p
, arg2
, 0);
5179 #ifdef TARGET_NR_uselib
5180 case TARGET_NR_uselib
:
5183 #ifdef TARGET_NR_swapon
5184 case TARGET_NR_swapon
:
5185 if (!(p
= lock_user_string(arg1
)))
5187 ret
= get_errno(swapon(p
, arg2
));
5188 unlock_user(p
, arg1
, 0);
5191 case TARGET_NR_reboot
:
5193 #ifdef TARGET_NR_readdir
5194 case TARGET_NR_readdir
:
5197 #ifdef TARGET_NR_mmap
5198 case TARGET_NR_mmap
:
5199 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE)
5202 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
5203 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
5211 unlock_user(v
, arg1
, 0);
5212 ret
= get_errno(target_mmap(v1
, v2
, v3
,
5213 target_to_host_bitmask(v4
, mmap_flags_tbl
),
5217 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5218 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5224 #ifdef TARGET_NR_mmap2
5225 case TARGET_NR_mmap2
:
5227 #define MMAP_SHIFT 12
5229 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5230 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5232 arg6
<< MMAP_SHIFT
));
5235 case TARGET_NR_munmap
:
5236 ret
= get_errno(target_munmap(arg1
, arg2
));
5238 case TARGET_NR_mprotect
:
5239 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
5241 #ifdef TARGET_NR_mremap
5242 case TARGET_NR_mremap
:
5243 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
5246 /* ??? msync/mlock/munlock are broken for softmmu. */
5247 #ifdef TARGET_NR_msync
5248 case TARGET_NR_msync
:
5249 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
5252 #ifdef TARGET_NR_mlock
5253 case TARGET_NR_mlock
:
5254 ret
= get_errno(mlock(g2h(arg1
), arg2
));
5257 #ifdef TARGET_NR_munlock
5258 case TARGET_NR_munlock
:
5259 ret
= get_errno(munlock(g2h(arg1
), arg2
));
5262 #ifdef TARGET_NR_mlockall
5263 case TARGET_NR_mlockall
:
5264 ret
= get_errno(mlockall(arg1
));
5267 #ifdef TARGET_NR_munlockall
5268 case TARGET_NR_munlockall
:
5269 ret
= get_errno(munlockall());
5272 case TARGET_NR_truncate
:
5273 if (!(p
= lock_user_string(arg1
)))
5275 ret
= get_errno(truncate(p
, arg2
));
5276 unlock_user(p
, arg1
, 0);
5278 case TARGET_NR_ftruncate
:
5279 ret
= get_errno(ftruncate(arg1
, arg2
));
5281 case TARGET_NR_fchmod
:
5282 ret
= get_errno(fchmod(arg1
, arg2
));
5284 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5285 case TARGET_NR_fchmodat
:
5286 if (!(p
= lock_user_string(arg2
)))
5288 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
5289 unlock_user(p
, arg2
, 0);
5292 case TARGET_NR_getpriority
:
5293 /* libc does special remapping of the return value of
5294 * sys_getpriority() so it's just easiest to call
5295 * sys_getpriority() directly rather than through libc. */
5296 ret
= sys_getpriority(arg1
, arg2
);
5298 case TARGET_NR_setpriority
:
5299 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
5301 #ifdef TARGET_NR_profil
5302 case TARGET_NR_profil
:
5305 case TARGET_NR_statfs
:
5306 if (!(p
= lock_user_string(arg1
)))
5308 ret
= get_errno(statfs(path(p
), &stfs
));
5309 unlock_user(p
, arg1
, 0);
5311 if (!is_error(ret
)) {
5312 struct target_statfs
*target_stfs
;
5314 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
5316 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5317 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5318 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5319 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5320 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5321 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5322 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5323 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5324 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5325 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5326 unlock_user_struct(target_stfs
, arg2
, 1);
5329 case TARGET_NR_fstatfs
:
5330 ret
= get_errno(fstatfs(arg1
, &stfs
));
5331 goto convert_statfs
;
5332 #ifdef TARGET_NR_statfs64
5333 case TARGET_NR_statfs64
:
5334 if (!(p
= lock_user_string(arg1
)))
5336 ret
= get_errno(statfs(path(p
), &stfs
));
5337 unlock_user(p
, arg1
, 0);
5339 if (!is_error(ret
)) {
5340 struct target_statfs64
*target_stfs
;
5342 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
5344 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5345 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5346 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5347 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5348 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5349 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5350 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5351 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5352 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5353 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5354 unlock_user_struct(target_stfs
, arg3
, 1);
5357 case TARGET_NR_fstatfs64
:
5358 ret
= get_errno(fstatfs(arg1
, &stfs
));
5359 goto convert_statfs64
;
5361 #ifdef TARGET_NR_ioperm
5362 case TARGET_NR_ioperm
:
5365 #ifdef TARGET_NR_socketcall
5366 case TARGET_NR_socketcall
:
5367 ret
= do_socketcall(arg1
, arg2
);
5370 #ifdef TARGET_NR_accept
5371 case TARGET_NR_accept
:
5372 ret
= do_accept(arg1
, arg2
, arg3
);
5375 #ifdef TARGET_NR_bind
5376 case TARGET_NR_bind
:
5377 ret
= do_bind(arg1
, arg2
, arg3
);
5380 #ifdef TARGET_NR_connect
5381 case TARGET_NR_connect
:
5382 ret
= do_connect(arg1
, arg2
, arg3
);
5385 #ifdef TARGET_NR_getpeername
5386 case TARGET_NR_getpeername
:
5387 ret
= do_getpeername(arg1
, arg2
, arg3
);
5390 #ifdef TARGET_NR_getsockname
5391 case TARGET_NR_getsockname
:
5392 ret
= do_getsockname(arg1
, arg2
, arg3
);
5395 #ifdef TARGET_NR_getsockopt
5396 case TARGET_NR_getsockopt
:
5397 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
5400 #ifdef TARGET_NR_listen
5401 case TARGET_NR_listen
:
5402 ret
= get_errno(listen(arg1
, arg2
));
5405 #ifdef TARGET_NR_recv
5406 case TARGET_NR_recv
:
5407 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
5410 #ifdef TARGET_NR_recvfrom
5411 case TARGET_NR_recvfrom
:
5412 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5415 #ifdef TARGET_NR_recvmsg
5416 case TARGET_NR_recvmsg
:
5417 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
5420 #ifdef TARGET_NR_send
5421 case TARGET_NR_send
:
5422 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
5425 #ifdef TARGET_NR_sendmsg
5426 case TARGET_NR_sendmsg
:
5427 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
5430 #ifdef TARGET_NR_sendto
5431 case TARGET_NR_sendto
:
5432 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5435 #ifdef TARGET_NR_shutdown
5436 case TARGET_NR_shutdown
:
5437 ret
= get_errno(shutdown(arg1
, arg2
));
5440 #ifdef TARGET_NR_socket
5441 case TARGET_NR_socket
:
5442 ret
= do_socket(arg1
, arg2
, arg3
);
5445 #ifdef TARGET_NR_socketpair
5446 case TARGET_NR_socketpair
:
5447 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
5450 #ifdef TARGET_NR_setsockopt
5451 case TARGET_NR_setsockopt
:
5452 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
5456 case TARGET_NR_syslog
:
5457 if (!(p
= lock_user_string(arg2
)))
5459 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
5460 unlock_user(p
, arg2
, 0);
5463 case TARGET_NR_setitimer
:
5465 struct itimerval value
, ovalue
, *pvalue
;
5469 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
5470 || copy_from_user_timeval(&pvalue
->it_value
,
5471 arg2
+ sizeof(struct target_timeval
)))
5476 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
5477 if (!is_error(ret
) && arg3
) {
5478 if (copy_to_user_timeval(arg3
,
5479 &ovalue
.it_interval
)
5480 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
5486 case TARGET_NR_getitimer
:
5488 struct itimerval value
;
5490 ret
= get_errno(getitimer(arg1
, &value
));
5491 if (!is_error(ret
) && arg2
) {
5492 if (copy_to_user_timeval(arg2
,
5494 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
5500 case TARGET_NR_stat
:
5501 if (!(p
= lock_user_string(arg1
)))
5503 ret
= get_errno(stat(path(p
), &st
));
5504 unlock_user(p
, arg1
, 0);
5506 case TARGET_NR_lstat
:
5507 if (!(p
= lock_user_string(arg1
)))
5509 ret
= get_errno(lstat(path(p
), &st
));
5510 unlock_user(p
, arg1
, 0);
5512 case TARGET_NR_fstat
:
5514 ret
= get_errno(fstat(arg1
, &st
));
5516 if (!is_error(ret
)) {
5517 struct target_stat
*target_st
;
5519 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
5521 __put_user(st
.st_dev
, &target_st
->st_dev
);
5522 __put_user(st
.st_ino
, &target_st
->st_ino
);
5523 __put_user(st
.st_mode
, &target_st
->st_mode
);
5524 __put_user(st
.st_uid
, &target_st
->st_uid
);
5525 __put_user(st
.st_gid
, &target_st
->st_gid
);
5526 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
5527 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
5528 __put_user(st
.st_size
, &target_st
->st_size
);
5529 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
5530 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
5531 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
5532 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
5533 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
5534 unlock_user_struct(target_st
, arg2
, 1);
5538 #ifdef TARGET_NR_olduname
5539 case TARGET_NR_olduname
:
5542 #ifdef TARGET_NR_iopl
5543 case TARGET_NR_iopl
:
5546 case TARGET_NR_vhangup
:
5547 ret
= get_errno(vhangup());
5549 #ifdef TARGET_NR_idle
5550 case TARGET_NR_idle
:
5553 #ifdef TARGET_NR_syscall
5554 case TARGET_NR_syscall
:
5555 ret
= do_syscall(cpu_env
,arg1
& 0xffff,arg2
,arg3
,arg4
,arg5
,arg6
,0);
5558 case TARGET_NR_wait4
:
5561 abi_long status_ptr
= arg2
;
5562 struct rusage rusage
, *rusage_ptr
;
5563 abi_ulong target_rusage
= arg4
;
5565 rusage_ptr
= &rusage
;
5568 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
5569 if (!is_error(ret
)) {
5571 status
= host_to_target_waitstatus(status
);
5572 if (put_user_s32(status
, status_ptr
))
5576 host_to_target_rusage(target_rusage
, &rusage
);
5580 #ifdef TARGET_NR_swapoff
5581 case TARGET_NR_swapoff
:
5582 if (!(p
= lock_user_string(arg1
)))
5584 ret
= get_errno(swapoff(p
));
5585 unlock_user(p
, arg1
, 0);
5588 case TARGET_NR_sysinfo
:
5590 struct target_sysinfo
*target_value
;
5591 struct sysinfo value
;
5592 ret
= get_errno(sysinfo(&value
));
5593 if (!is_error(ret
) && arg1
)
5595 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
5597 __put_user(value
.uptime
, &target_value
->uptime
);
5598 __put_user(value
.loads
[0], &target_value
->loads
[0]);
5599 __put_user(value
.loads
[1], &target_value
->loads
[1]);
5600 __put_user(value
.loads
[2], &target_value
->loads
[2]);
5601 __put_user(value
.totalram
, &target_value
->totalram
);
5602 __put_user(value
.freeram
, &target_value
->freeram
);
5603 __put_user(value
.sharedram
, &target_value
->sharedram
);
5604 __put_user(value
.bufferram
, &target_value
->bufferram
);
5605 __put_user(value
.totalswap
, &target_value
->totalswap
);
5606 __put_user(value
.freeswap
, &target_value
->freeswap
);
5607 __put_user(value
.procs
, &target_value
->procs
);
5608 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
5609 __put_user(value
.freehigh
, &target_value
->freehigh
);
5610 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
5611 unlock_user_struct(target_value
, arg1
, 1);
5615 #ifdef TARGET_NR_ipc
5617 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5620 #ifdef TARGET_NR_semget
5621 case TARGET_NR_semget
:
5622 ret
= get_errno(semget(arg1
, arg2
, arg3
));
5625 #ifdef TARGET_NR_semop
5626 case TARGET_NR_semop
:
5627 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
5630 #ifdef TARGET_NR_semctl
5631 case TARGET_NR_semctl
:
5632 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
5635 #ifdef TARGET_NR_msgctl
5636 case TARGET_NR_msgctl
:
5637 ret
= do_msgctl(arg1
, arg2
, arg3
);
5640 #ifdef TARGET_NR_msgget
5641 case TARGET_NR_msgget
:
5642 ret
= get_errno(msgget(arg1
, arg2
));
5645 #ifdef TARGET_NR_msgrcv
5646 case TARGET_NR_msgrcv
:
5647 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
5650 #ifdef TARGET_NR_msgsnd
5651 case TARGET_NR_msgsnd
:
5652 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
5655 #ifdef TARGET_NR_shmget
5656 case TARGET_NR_shmget
:
5657 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
5660 #ifdef TARGET_NR_shmctl
5661 case TARGET_NR_shmctl
:
5662 ret
= do_shmctl(arg1
, arg2
, arg3
);
5665 #ifdef TARGET_NR_shmat
5666 case TARGET_NR_shmat
:
5667 ret
= do_shmat(arg1
, arg2
, arg3
);
5670 #ifdef TARGET_NR_shmdt
5671 case TARGET_NR_shmdt
:
5672 ret
= do_shmdt(arg1
);
5675 case TARGET_NR_fsync
:
5676 ret
= get_errno(fsync(arg1
));
5678 case TARGET_NR_clone
:
5679 #if defined(TARGET_SH4)
5680 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
5681 #elif defined(TARGET_CRIS)
5682 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
5684 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
5687 #ifdef __NR_exit_group
5688 /* new thread calls */
5689 case TARGET_NR_exit_group
:
5693 gdb_exit(cpu_env
, arg1
);
5694 ret
= get_errno(exit_group(arg1
));
5697 case TARGET_NR_setdomainname
:
5698 if (!(p
= lock_user_string(arg1
)))
5700 ret
= get_errno(setdomainname(p
, arg2
));
5701 unlock_user(p
, arg1
, 0);
5703 case TARGET_NR_uname
:
5704 /* no need to transcode because we use the linux syscall */
5706 struct new_utsname
* buf
;
5708 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
5710 ret
= get_errno(sys_uname(buf
));
5711 if (!is_error(ret
)) {
5712 /* Overrite the native machine name with whatever is being
5714 strcpy (buf
->machine
, UNAME_MACHINE
);
5715 /* Allow the user to override the reported release. */
5716 if (qemu_uname_release
&& *qemu_uname_release
)
5717 strcpy (buf
->release
, qemu_uname_release
);
5719 unlock_user_struct(buf
, arg1
, 1);
5723 case TARGET_NR_modify_ldt
:
5724 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
5726 #if !defined(TARGET_X86_64)
5727 case TARGET_NR_vm86old
:
5729 case TARGET_NR_vm86
:
5730 ret
= do_vm86(cpu_env
, arg1
, arg2
);
5734 case TARGET_NR_adjtimex
:
5736 #ifdef TARGET_NR_create_module
5737 case TARGET_NR_create_module
:
5739 case TARGET_NR_init_module
:
5740 case TARGET_NR_delete_module
:
5741 #ifdef TARGET_NR_get_kernel_syms
5742 case TARGET_NR_get_kernel_syms
:
5745 case TARGET_NR_quotactl
:
5747 case TARGET_NR_getpgid
:
5748 ret
= get_errno(getpgid(arg1
));
5750 case TARGET_NR_fchdir
:
5751 ret
= get_errno(fchdir(arg1
));
5753 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5754 case TARGET_NR_bdflush
:
5757 #ifdef TARGET_NR_sysfs
5758 case TARGET_NR_sysfs
:
5761 case TARGET_NR_personality
:
5762 ret
= get_errno(personality(arg1
));
5764 #ifdef TARGET_NR_afs_syscall
5765 case TARGET_NR_afs_syscall
:
5768 #ifdef TARGET_NR__llseek /* Not on alpha */
5769 case TARGET_NR__llseek
:
5771 #if defined (__x86_64__)
5772 ret
= get_errno(lseek(arg1
, ((uint64_t )arg2
<< 32) | arg3
, arg5
));
5773 if (put_user_s64(ret
, arg4
))
5777 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
5778 if (put_user_s64(res
, arg4
))
5784 case TARGET_NR_getdents
:
5785 #if TARGET_ABI_BITS != 32
5787 #elif TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5789 struct target_dirent
*target_dirp
;
5790 struct linux_dirent
*dirp
;
5791 abi_long count
= arg3
;
5793 dirp
= malloc(count
);
5795 ret
= -TARGET_ENOMEM
;
5799 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
5800 if (!is_error(ret
)) {
5801 struct linux_dirent
*de
;
5802 struct target_dirent
*tde
;
5804 int reclen
, treclen
;
5805 int count1
, tnamelen
;
5809 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5813 reclen
= de
->d_reclen
;
5814 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
5815 tde
->d_reclen
= tswap16(treclen
);
5816 tde
->d_ino
= tswapl(de
->d_ino
);
5817 tde
->d_off
= tswapl(de
->d_off
);
5818 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
5821 /* XXX: may not be correct */
5822 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
5823 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
5825 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
5829 unlock_user(target_dirp
, arg2
, ret
);
5835 struct linux_dirent
*dirp
;
5836 abi_long count
= arg3
;
5838 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5840 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
5841 if (!is_error(ret
)) {
5842 struct linux_dirent
*de
;
5847 reclen
= de
->d_reclen
;
5850 de
->d_reclen
= tswap16(reclen
);
5851 tswapls(&de
->d_ino
);
5852 tswapls(&de
->d_off
);
5853 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
5857 unlock_user(dirp
, arg2
, ret
);
5861 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
5862 case TARGET_NR_getdents64
:
5864 struct linux_dirent64
*dirp
;
5865 abi_long count
= arg3
;
5866 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5868 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
5869 if (!is_error(ret
)) {
5870 struct linux_dirent64
*de
;
5875 reclen
= de
->d_reclen
;
5878 de
->d_reclen
= tswap16(reclen
);
5879 tswap64s((uint64_t *)&de
->d_ino
);
5880 tswap64s((uint64_t *)&de
->d_off
);
5881 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
5885 unlock_user(dirp
, arg2
, ret
);
5888 #endif /* TARGET_NR_getdents64 */
5889 #ifdef TARGET_NR__newselect
5890 case TARGET_NR__newselect
:
5891 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
5894 #ifdef TARGET_NR_poll
5895 case TARGET_NR_poll
:
5897 struct target_pollfd
*target_pfd
;
5898 unsigned int nfds
= arg2
;
5903 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
5906 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
5907 for(i
= 0; i
< nfds
; i
++) {
5908 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
5909 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
5911 ret
= get_errno(poll(pfd
, nfds
, timeout
));
5912 if (!is_error(ret
)) {
5913 for(i
= 0; i
< nfds
; i
++) {
5914 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
5916 ret
+= nfds
* (sizeof(struct target_pollfd
)
5917 - sizeof(struct pollfd
));
5919 unlock_user(target_pfd
, arg1
, ret
);
5923 case TARGET_NR_flock
:
5924 /* NOTE: the flock constant seems to be the same for every
5926 ret
= get_errno(flock(arg1
, arg2
));
5928 case TARGET_NR_readv
:
5933 vec
= alloca(count
* sizeof(struct iovec
));
5934 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
5936 ret
= get_errno(readv(arg1
, vec
, count
));
5937 unlock_iovec(vec
, arg2
, count
, 1);
5940 case TARGET_NR_writev
:
5945 vec
= alloca(count
* sizeof(struct iovec
));
5946 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
5948 ret
= get_errno(writev(arg1
, vec
, count
));
5949 unlock_iovec(vec
, arg2
, count
, 0);
5952 case TARGET_NR_getsid
:
5953 ret
= get_errno(getsid(arg1
));
5955 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
5956 case TARGET_NR_fdatasync
:
5957 ret
= get_errno(fdatasync(arg1
));
5960 case TARGET_NR__sysctl
:
5961 /* We don't implement this, but ENOTDIR is always a safe
5963 ret
= -TARGET_ENOTDIR
;
5965 case TARGET_NR_sched_setparam
:
5967 struct sched_param
*target_schp
;
5968 struct sched_param schp
;
5970 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
5972 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
5973 unlock_user_struct(target_schp
, arg2
, 0);
5974 ret
= get_errno(sched_setparam(arg1
, &schp
));
5977 case TARGET_NR_sched_getparam
:
5979 struct sched_param
*target_schp
;
5980 struct sched_param schp
;
5981 ret
= get_errno(sched_getparam(arg1
, &schp
));
5982 if (!is_error(ret
)) {
5983 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
5985 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
5986 unlock_user_struct(target_schp
, arg2
, 1);
5990 case TARGET_NR_sched_setscheduler
:
5992 struct sched_param
*target_schp
;
5993 struct sched_param schp
;
5994 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
5996 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
5997 unlock_user_struct(target_schp
, arg3
, 0);
5998 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
6001 case TARGET_NR_sched_getscheduler
:
6002 ret
= get_errno(sched_getscheduler(arg1
));
6004 case TARGET_NR_sched_yield
:
6005 ret
= get_errno(sched_yield());
6007 case TARGET_NR_sched_get_priority_max
:
6008 ret
= get_errno(sched_get_priority_max(arg1
));
6010 case TARGET_NR_sched_get_priority_min
:
6011 ret
= get_errno(sched_get_priority_min(arg1
));
6013 case TARGET_NR_sched_rr_get_interval
:
6016 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
6017 if (!is_error(ret
)) {
6018 host_to_target_timespec(arg2
, &ts
);
6022 case TARGET_NR_nanosleep
:
6024 struct timespec req
, rem
;
6025 target_to_host_timespec(&req
, arg1
);
6026 ret
= get_errno(nanosleep(&req
, &rem
));
6027 if (is_error(ret
) && arg2
) {
6028 host_to_target_timespec(arg2
, &rem
);
6032 #ifdef TARGET_NR_query_module
6033 case TARGET_NR_query_module
:
6036 #ifdef TARGET_NR_nfsservctl
6037 case TARGET_NR_nfsservctl
:
6040 case TARGET_NR_prctl
:
6043 case PR_GET_PDEATHSIG
:
6046 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
6047 if (!is_error(ret
) && arg2
6048 && put_user_ual(deathsig
, arg2
))
6053 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
6057 #ifdef TARGET_NR_arch_prctl
6058 case TARGET_NR_arch_prctl
:
6059 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6060 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
6066 #ifdef TARGET_NR_pread
6067 case TARGET_NR_pread
:
6069 if (((CPUARMState
*)cpu_env
)->eabi
)
6072 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6074 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
6075 unlock_user(p
, arg2
, ret
);
6077 case TARGET_NR_pwrite
:
6079 if (((CPUARMState
*)cpu_env
)->eabi
)
6082 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6084 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
6085 unlock_user(p
, arg2
, 0);
6088 #ifdef TARGET_NR_pread64
6089 case TARGET_NR_pread64
:
6090 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6092 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6093 unlock_user(p
, arg2
, ret
);
6095 case TARGET_NR_pwrite64
:
6096 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6098 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6099 unlock_user(p
, arg2
, 0);
6102 case TARGET_NR_getcwd
:
6103 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
6105 ret
= get_errno(sys_getcwd1(p
, arg2
));
6106 unlock_user(p
, arg1
, ret
);
6108 case TARGET_NR_capget
:
6110 case TARGET_NR_capset
:
6112 case TARGET_NR_sigaltstack
:
6113 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6114 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA)
6115 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
6120 case TARGET_NR_sendfile
:
6122 #ifdef TARGET_NR_getpmsg
6123 case TARGET_NR_getpmsg
:
6126 #ifdef TARGET_NR_putpmsg
6127 case TARGET_NR_putpmsg
:
6130 #ifdef TARGET_NR_vfork
6131 case TARGET_NR_vfork
:
6132 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
6136 #ifdef TARGET_NR_ugetrlimit
6137 case TARGET_NR_ugetrlimit
:
6140 ret
= get_errno(getrlimit(arg1
, &rlim
));
6141 if (!is_error(ret
)) {
6142 struct target_rlimit
*target_rlim
;
6143 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6145 target_rlim
->rlim_cur
= tswapl(rlim
.rlim_cur
);
6146 target_rlim
->rlim_max
= tswapl(rlim
.rlim_max
);
6147 unlock_user_struct(target_rlim
, arg2
, 1);
6152 #ifdef TARGET_NR_truncate64
6153 case TARGET_NR_truncate64
:
6154 if (!(p
= lock_user_string(arg1
)))
6156 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
6157 unlock_user(p
, arg1
, 0);
6160 #ifdef TARGET_NR_ftruncate64
6161 case TARGET_NR_ftruncate64
:
6162 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
6165 #ifdef TARGET_NR_stat64
6166 case TARGET_NR_stat64
:
6167 if (!(p
= lock_user_string(arg1
)))
6169 ret
= get_errno(stat(path(p
), &st
));
6170 unlock_user(p
, arg1
, 0);
6172 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6175 #ifdef TARGET_NR_lstat64
6176 case TARGET_NR_lstat64
:
6177 if (!(p
= lock_user_string(arg1
)))
6179 ret
= get_errno(lstat(path(p
), &st
));
6180 unlock_user(p
, arg1
, 0);
6182 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6185 #ifdef TARGET_NR_fstat64
6186 case TARGET_NR_fstat64
:
6187 ret
= get_errno(fstat(arg1
, &st
));
6189 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6192 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6193 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6194 #ifdef TARGET_NR_fstatat64
6195 case TARGET_NR_fstatat64
:
6197 #ifdef TARGET_NR_newfstatat
6198 case TARGET_NR_newfstatat
:
6200 if (!(p
= lock_user_string(arg2
)))
6202 #ifdef __NR_fstatat64
6203 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
6205 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
6208 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
6212 case TARGET_NR_lchown
:
6213 if (!(p
= lock_user_string(arg1
)))
6215 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6216 unlock_user(p
, arg1
, 0);
6218 case TARGET_NR_getuid
:
6219 ret
= get_errno(high2lowuid(getuid()));
6221 case TARGET_NR_getgid
:
6222 ret
= get_errno(high2lowgid(getgid()));
6224 case TARGET_NR_geteuid
:
6225 ret
= get_errno(high2lowuid(geteuid()));
6227 case TARGET_NR_getegid
:
6228 ret
= get_errno(high2lowgid(getegid()));
6230 case TARGET_NR_setreuid
:
6231 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
6233 case TARGET_NR_setregid
:
6234 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
6236 case TARGET_NR_getgroups
:
6238 int gidsetsize
= arg1
;
6239 uint16_t *target_grouplist
;
6243 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6244 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6245 if (gidsetsize
== 0)
6247 if (!is_error(ret
)) {
6248 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
6249 if (!target_grouplist
)
6251 for(i
= 0;i
< ret
; i
++)
6252 target_grouplist
[i
] = tswap16(grouplist
[i
]);
6253 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
6257 case TARGET_NR_setgroups
:
6259 int gidsetsize
= arg1
;
6260 uint16_t *target_grouplist
;
6264 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6265 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
6266 if (!target_grouplist
) {
6267 ret
= -TARGET_EFAULT
;
6270 for(i
= 0;i
< gidsetsize
; i
++)
6271 grouplist
[i
] = tswap16(target_grouplist
[i
]);
6272 unlock_user(target_grouplist
, arg2
, 0);
6273 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6276 case TARGET_NR_fchown
:
6277 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
6279 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6280 case TARGET_NR_fchownat
:
6281 if (!(p
= lock_user_string(arg2
)))
6283 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
6284 unlock_user(p
, arg2
, 0);
6287 #ifdef TARGET_NR_setresuid
6288 case TARGET_NR_setresuid
:
6289 ret
= get_errno(setresuid(low2highuid(arg1
),
6291 low2highuid(arg3
)));
6294 #ifdef TARGET_NR_getresuid
6295 case TARGET_NR_getresuid
:
6297 uid_t ruid
, euid
, suid
;
6298 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6299 if (!is_error(ret
)) {
6300 if (put_user_u16(high2lowuid(ruid
), arg1
)
6301 || put_user_u16(high2lowuid(euid
), arg2
)
6302 || put_user_u16(high2lowuid(suid
), arg3
))
6308 #ifdef TARGET_NR_getresgid
6309 case TARGET_NR_setresgid
:
6310 ret
= get_errno(setresgid(low2highgid(arg1
),
6312 low2highgid(arg3
)));
6315 #ifdef TARGET_NR_getresgid
6316 case TARGET_NR_getresgid
:
6318 gid_t rgid
, egid
, sgid
;
6319 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6320 if (!is_error(ret
)) {
6321 if (put_user_u16(high2lowgid(rgid
), arg1
)
6322 || put_user_u16(high2lowgid(egid
), arg2
)
6323 || put_user_u16(high2lowgid(sgid
), arg3
))
6329 case TARGET_NR_chown
:
6330 if (!(p
= lock_user_string(arg1
)))
6332 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6333 unlock_user(p
, arg1
, 0);
6335 case TARGET_NR_setuid
:
6336 ret
= get_errno(setuid(low2highuid(arg1
)));
6338 case TARGET_NR_setgid
:
6339 ret
= get_errno(setgid(low2highgid(arg1
)));
6341 case TARGET_NR_setfsuid
:
6342 ret
= get_errno(setfsuid(arg1
));
6344 case TARGET_NR_setfsgid
:
6345 ret
= get_errno(setfsgid(arg1
));
6347 #endif /* USE_UID16 */
6349 #ifdef TARGET_NR_lchown32
6350 case TARGET_NR_lchown32
:
6351 if (!(p
= lock_user_string(arg1
)))
6353 ret
= get_errno(lchown(p
, arg2
, arg3
));
6354 unlock_user(p
, arg1
, 0);
6357 #ifdef TARGET_NR_getuid32
6358 case TARGET_NR_getuid32
:
6359 ret
= get_errno(getuid());
6363 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6364 /* Alpha specific */
6365 case TARGET_NR_getxuid
:
6369 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
6371 ret
= get_errno(getuid());
6374 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6375 /* Alpha specific */
6376 case TARGET_NR_getxgid
:
6380 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
6382 ret
= get_errno(getgid());
6386 #ifdef TARGET_NR_getgid32
6387 case TARGET_NR_getgid32
:
6388 ret
= get_errno(getgid());
6391 #ifdef TARGET_NR_geteuid32
6392 case TARGET_NR_geteuid32
:
6393 ret
= get_errno(geteuid());
6396 #ifdef TARGET_NR_getegid32
6397 case TARGET_NR_getegid32
:
6398 ret
= get_errno(getegid());
6401 #ifdef TARGET_NR_setreuid32
6402 case TARGET_NR_setreuid32
:
6403 ret
= get_errno(setreuid(arg1
, arg2
));
6406 #ifdef TARGET_NR_setregid32
6407 case TARGET_NR_setregid32
:
6408 ret
= get_errno(setregid(arg1
, arg2
));
6411 #ifdef TARGET_NR_getgroups32
6412 case TARGET_NR_getgroups32
:
6414 int gidsetsize
= arg1
;
6415 uint32_t *target_grouplist
;
6419 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6420 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6421 if (gidsetsize
== 0)
6423 if (!is_error(ret
)) {
6424 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
6425 if (!target_grouplist
) {
6426 ret
= -TARGET_EFAULT
;
6429 for(i
= 0;i
< ret
; i
++)
6430 target_grouplist
[i
] = tswap32(grouplist
[i
]);
6431 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
6436 #ifdef TARGET_NR_setgroups32
6437 case TARGET_NR_setgroups32
:
6439 int gidsetsize
= arg1
;
6440 uint32_t *target_grouplist
;
6444 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6445 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
6446 if (!target_grouplist
) {
6447 ret
= -TARGET_EFAULT
;
6450 for(i
= 0;i
< gidsetsize
; i
++)
6451 grouplist
[i
] = tswap32(target_grouplist
[i
]);
6452 unlock_user(target_grouplist
, arg2
, 0);
6453 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6457 #ifdef TARGET_NR_fchown32
6458 case TARGET_NR_fchown32
:
6459 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
6462 #ifdef TARGET_NR_setresuid32
6463 case TARGET_NR_setresuid32
:
6464 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
6467 #ifdef TARGET_NR_getresuid32
6468 case TARGET_NR_getresuid32
:
6470 uid_t ruid
, euid
, suid
;
6471 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6472 if (!is_error(ret
)) {
6473 if (put_user_u32(ruid
, arg1
)
6474 || put_user_u32(euid
, arg2
)
6475 || put_user_u32(suid
, arg3
))
6481 #ifdef TARGET_NR_setresgid32
6482 case TARGET_NR_setresgid32
:
6483 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
6486 #ifdef TARGET_NR_getresgid32
6487 case TARGET_NR_getresgid32
:
6489 gid_t rgid
, egid
, sgid
;
6490 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6491 if (!is_error(ret
)) {
6492 if (put_user_u32(rgid
, arg1
)
6493 || put_user_u32(egid
, arg2
)
6494 || put_user_u32(sgid
, arg3
))
6500 #ifdef TARGET_NR_chown32
6501 case TARGET_NR_chown32
:
6502 if (!(p
= lock_user_string(arg1
)))
6504 ret
= get_errno(chown(p
, arg2
, arg3
));
6505 unlock_user(p
, arg1
, 0);
6508 #ifdef TARGET_NR_setuid32
6509 case TARGET_NR_setuid32
:
6510 ret
= get_errno(setuid(arg1
));
6513 #ifdef TARGET_NR_setgid32
6514 case TARGET_NR_setgid32
:
6515 ret
= get_errno(setgid(arg1
));
6518 #ifdef TARGET_NR_setfsuid32
6519 case TARGET_NR_setfsuid32
:
6520 ret
= get_errno(setfsuid(arg1
));
6523 #ifdef TARGET_NR_setfsgid32
6524 case TARGET_NR_setfsgid32
:
6525 ret
= get_errno(setfsgid(arg1
));
6529 case TARGET_NR_pivot_root
:
6531 #ifdef TARGET_NR_mincore
6532 case TARGET_NR_mincore
:
6535 ret
= -TARGET_EFAULT
;
6536 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
6538 if (!(p
= lock_user_string(arg3
)))
6540 ret
= get_errno(mincore(a
, arg2
, p
));
6541 unlock_user(p
, arg3
, ret
);
6543 unlock_user(a
, arg1
, 0);
6547 #ifdef TARGET_NR_arm_fadvise64_64
6548 case TARGET_NR_arm_fadvise64_64
:
6551 * arm_fadvise64_64 looks like fadvise64_64 but
6552 * with different argument order
6560 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64)
6561 #ifdef TARGET_NR_fadvise64_64
6562 case TARGET_NR_fadvise64_64
:
6564 /* This is a hint, so ignoring and returning success is ok. */
6568 #ifdef TARGET_NR_madvise
6569 case TARGET_NR_madvise
:
6570 /* A straight passthrough may not be safe because qemu sometimes
6571 turns private flie-backed mappings into anonymous mappings.
6572 This will break MADV_DONTNEED.
6573 This is a hint, so ignoring and returning success is ok. */
6577 #if TARGET_ABI_BITS == 32
6578 case TARGET_NR_fcntl64
:
6582 struct target_flock64
*target_fl
;
6584 struct target_eabi_flock64
*target_efl
;
6587 cmd
= target_to_host_fcntl_cmd(arg2
);
6588 if (cmd
== -TARGET_EINVAL
)
6592 case TARGET_F_GETLK64
:
6594 if (((CPUARMState
*)cpu_env
)->eabi
) {
6595 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
6597 fl
.l_type
= tswap16(target_efl
->l_type
);
6598 fl
.l_whence
= tswap16(target_efl
->l_whence
);
6599 fl
.l_start
= tswap64(target_efl
->l_start
);
6600 fl
.l_len
= tswap64(target_efl
->l_len
);
6601 fl
.l_pid
= tswapl(target_efl
->l_pid
);
6602 unlock_user_struct(target_efl
, arg3
, 0);
6606 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
6608 fl
.l_type
= tswap16(target_fl
->l_type
);
6609 fl
.l_whence
= tswap16(target_fl
->l_whence
);
6610 fl
.l_start
= tswap64(target_fl
->l_start
);
6611 fl
.l_len
= tswap64(target_fl
->l_len
);
6612 fl
.l_pid
= tswapl(target_fl
->l_pid
);
6613 unlock_user_struct(target_fl
, arg3
, 0);
6615 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
6618 if (((CPUARMState
*)cpu_env
)->eabi
) {
6619 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
6621 target_efl
->l_type
= tswap16(fl
.l_type
);
6622 target_efl
->l_whence
= tswap16(fl
.l_whence
);
6623 target_efl
->l_start
= tswap64(fl
.l_start
);
6624 target_efl
->l_len
= tswap64(fl
.l_len
);
6625 target_efl
->l_pid
= tswapl(fl
.l_pid
);
6626 unlock_user_struct(target_efl
, arg3
, 1);
6630 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
6632 target_fl
->l_type
= tswap16(fl
.l_type
);
6633 target_fl
->l_whence
= tswap16(fl
.l_whence
);
6634 target_fl
->l_start
= tswap64(fl
.l_start
);
6635 target_fl
->l_len
= tswap64(fl
.l_len
);
6636 target_fl
->l_pid
= tswapl(fl
.l_pid
);
6637 unlock_user_struct(target_fl
, arg3
, 1);
6642 case TARGET_F_SETLK64
:
6643 case TARGET_F_SETLKW64
:
6645 if (((CPUARMState
*)cpu_env
)->eabi
) {
6646 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
6648 fl
.l_type
= tswap16(target_efl
->l_type
);
6649 fl
.l_whence
= tswap16(target_efl
->l_whence
);
6650 fl
.l_start
= tswap64(target_efl
->l_start
);
6651 fl
.l_len
= tswap64(target_efl
->l_len
);
6652 fl
.l_pid
= tswapl(target_efl
->l_pid
);
6653 unlock_user_struct(target_efl
, arg3
, 0);
6657 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
6659 fl
.l_type
= tswap16(target_fl
->l_type
);
6660 fl
.l_whence
= tswap16(target_fl
->l_whence
);
6661 fl
.l_start
= tswap64(target_fl
->l_start
);
6662 fl
.l_len
= tswap64(target_fl
->l_len
);
6663 fl
.l_pid
= tswapl(target_fl
->l_pid
);
6664 unlock_user_struct(target_fl
, arg3
, 0);
6666 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
6669 ret
= do_fcntl(arg1
, arg2
, arg3
);
6675 #ifdef TARGET_NR_cacheflush
6676 case TARGET_NR_cacheflush
:
6677 /* self-modifying code is handled automatically, so nothing needed */
6681 #ifdef TARGET_NR_security
6682 case TARGET_NR_security
:
6685 #ifdef TARGET_NR_getpagesize
6686 case TARGET_NR_getpagesize
:
6687 ret
= TARGET_PAGE_SIZE
;
6690 case TARGET_NR_gettid
:
6691 ret
= get_errno(gettid());
6693 #ifdef TARGET_NR_readahead
6694 case TARGET_NR_readahead
:
6695 #if TARGET_ABI_BITS == 32
6697 if (((CPUARMState
*)cpu_env
)->eabi
)
6704 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
6706 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
6710 #ifdef TARGET_NR_setxattr
6711 case TARGET_NR_setxattr
:
6712 case TARGET_NR_lsetxattr
:
6713 case TARGET_NR_fsetxattr
:
6714 case TARGET_NR_getxattr
:
6715 case TARGET_NR_lgetxattr
:
6716 case TARGET_NR_fgetxattr
:
6717 case TARGET_NR_listxattr
:
6718 case TARGET_NR_llistxattr
:
6719 case TARGET_NR_flistxattr
:
6720 case TARGET_NR_removexattr
:
6721 case TARGET_NR_lremovexattr
:
6722 case TARGET_NR_fremovexattr
:
6723 ret
= -TARGET_EOPNOTSUPP
;
6726 #ifdef TARGET_NR_set_thread_area
6727 case TARGET_NR_set_thread_area
:
6728 #if defined(TARGET_MIPS)
6729 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
6732 #elif defined(TARGET_CRIS)
6734 ret
= -TARGET_EINVAL
;
6736 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
6740 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
6741 ret
= do_set_thread_area(cpu_env
, arg1
);
6744 goto unimplemented_nowarn
;
6747 #ifdef TARGET_NR_get_thread_area
6748 case TARGET_NR_get_thread_area
:
6749 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6750 ret
= do_get_thread_area(cpu_env
, arg1
);
6752 goto unimplemented_nowarn
;
6755 #ifdef TARGET_NR_getdomainname
6756 case TARGET_NR_getdomainname
:
6757 goto unimplemented_nowarn
;
6760 #ifdef TARGET_NR_clock_gettime
6761 case TARGET_NR_clock_gettime
:
6764 ret
= get_errno(clock_gettime(arg1
, &ts
));
6765 if (!is_error(ret
)) {
6766 host_to_target_timespec(arg2
, &ts
);
6771 #ifdef TARGET_NR_clock_getres
6772 case TARGET_NR_clock_getres
:
6775 ret
= get_errno(clock_getres(arg1
, &ts
));
6776 if (!is_error(ret
)) {
6777 host_to_target_timespec(arg2
, &ts
);
6782 #ifdef TARGET_NR_clock_nanosleep
6783 case TARGET_NR_clock_nanosleep
:
6786 target_to_host_timespec(&ts
, arg3
);
6787 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
6789 host_to_target_timespec(arg4
, &ts
);
6794 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
6795 case TARGET_NR_set_tid_address
:
6796 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
6800 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
6801 case TARGET_NR_tkill
:
6802 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
6806 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
6807 case TARGET_NR_tgkill
:
6808 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
6809 target_to_host_signal(arg3
)));
6813 #ifdef TARGET_NR_set_robust_list
6814 case TARGET_NR_set_robust_list
:
6815 goto unimplemented_nowarn
;
6818 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
6819 case TARGET_NR_utimensat
:
6821 struct timespec
*tsp
, ts
[2];
6825 target_to_host_timespec(ts
, arg3
);
6826 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
6830 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
6832 if (!(p
= lock_user_string(arg2
))) {
6833 ret
= -TARGET_EFAULT
;
6836 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
6837 unlock_user(p
, arg2
, 0);
6842 #if defined(CONFIG_USE_NPTL)
6843 case TARGET_NR_futex
:
6844 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6847 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
6848 case TARGET_NR_inotify_init
:
6849 ret
= get_errno(sys_inotify_init());
6852 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
6853 case TARGET_NR_inotify_add_watch
:
6854 p
= lock_user_string(arg2
);
6855 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
6856 unlock_user(p
, arg2
, 0);
6859 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
6860 case TARGET_NR_inotify_rm_watch
:
6861 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
6865 #ifdef TARGET_NR_mq_open
6866 case TARGET_NR_mq_open
:
6868 struct mq_attr posix_mq_attr
;
6870 p
= lock_user_string(arg1
- 1);
6872 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
6873 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
6874 unlock_user (p
, arg1
, 0);
6878 case TARGET_NR_mq_unlink
:
6879 p
= lock_user_string(arg1
- 1);
6880 ret
= get_errno(mq_unlink(p
));
6881 unlock_user (p
, arg1
, 0);
6884 case TARGET_NR_mq_timedsend
:
6888 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
6890 target_to_host_timespec(&ts
, arg5
);
6891 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
6892 host_to_target_timespec(arg5
, &ts
);
6895 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
6896 unlock_user (p
, arg2
, arg3
);
6900 case TARGET_NR_mq_timedreceive
:
6905 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
6907 target_to_host_timespec(&ts
, arg5
);
6908 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
6909 host_to_target_timespec(arg5
, &ts
);
6912 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
6913 unlock_user (p
, arg2
, arg3
);
6915 put_user_u32(prio
, arg4
);
6919 /* Not implemented for now... */
6920 /* case TARGET_NR_mq_notify: */
6923 case TARGET_NR_mq_getsetattr
:
6925 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
6928 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
6929 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
6932 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
6933 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
6940 #ifdef CONFIG_SPLICE
6941 #ifdef TARGET_NR_tee
6944 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
6948 #ifdef TARGET_NR_splice
6949 case TARGET_NR_splice
:
6951 loff_t loff_in
, loff_out
;
6952 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
6954 get_user_u64(loff_in
, arg2
);
6955 ploff_in
= &loff_in
;
6958 get_user_u64(loff_out
, arg2
);
6959 ploff_out
= &loff_out
;
6961 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
6965 #ifdef TARGET_NR_vmsplice
6966 case TARGET_NR_vmsplice
:
6971 vec
= alloca(count
* sizeof(struct iovec
));
6972 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
6974 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
6975 unlock_iovec(vec
, arg2
, count
, 0);
6979 #endif /* CONFIG_SPLICE */
6980 #ifdef CONFIG_EVENTFD
6981 #if defined(TARGET_NR_eventfd)
6982 case TARGET_NR_eventfd
:
6983 ret
= get_errno(eventfd(arg1
, 0));
6986 #if defined(TARGET_NR_eventfd2)
6987 case TARGET_NR_eventfd2
:
6988 ret
= get_errno(eventfd(arg1
, arg2
));
6991 #endif /* CONFIG_EVENTFD */
6994 gemu_log("qemu: Unsupported syscall: %d\n", num
);
6995 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
6996 unimplemented_nowarn
:
6998 ret
= -TARGET_ENOSYS
;
7003 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
7006 print_syscall_ret(num
, ret
);
7009 ret
= -TARGET_EFAULT
;