4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
43 #include <sys/times.h>
46 #include <sys/statfs.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
63 #include <sys/eventfd.h>
66 #include <sys/epoll.h>
69 #include "qemu/xattr.h"
71 #ifdef CONFIG_SENDFILE
72 #include <sys/sendfile.h>
75 #define termios host_termios
76 #define winsize host_winsize
77 #define termio host_termio
78 #define sgttyb host_sgttyb /* same as target */
79 #define tchars host_tchars /* same as target */
80 #define ltchars host_ltchars /* same as target */
82 #include <linux/termios.h>
83 #include <linux/unistd.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
88 #include <linux/mtio.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
94 #if defined(CONFIG_USBFS)
95 #include <linux/usbdevice_fs.h>
96 #include <linux/usb/ch9.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <netpacket/packet.h>
105 #include <linux/netlink.h>
106 #include "linux_loop.h"
110 #include "qemu/guest-random.h"
111 #include "qapi/error.h"
112 #include "fd-trans.h"
115 #define CLONE_IO 0x80000000 /* Clone io context */
118 /* We can't directly call the host clone syscall, because this will
119 * badly confuse libc (breaking mutexes, for example). So we must
120 * divide clone flags into:
121 * * flag combinations that look like pthread_create()
122 * * flag combinations that look like fork()
123 * * flags we can implement within QEMU itself
124 * * flags we can't support and will return an error for
126 /* For thread creation, all these flags must be present; for
127 * fork, none must be present.
129 #define CLONE_THREAD_FLAGS \
130 (CLONE_VM | CLONE_FS | CLONE_FILES | \
131 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
133 /* These flags are ignored:
134 * CLONE_DETACHED is now ignored by the kernel;
135 * CLONE_IO is just an optimisation hint to the I/O scheduler
137 #define CLONE_IGNORED_FLAGS \
138 (CLONE_DETACHED | CLONE_IO)
140 /* Flags for fork which we can implement within QEMU itself */
141 #define CLONE_OPTIONAL_FORK_FLAGS \
142 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
143 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
145 /* Flags for thread creation which we can implement within QEMU itself */
146 #define CLONE_OPTIONAL_THREAD_FLAGS \
147 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
148 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
150 #define CLONE_INVALID_FORK_FLAGS \
151 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
153 #define CLONE_INVALID_THREAD_FLAGS \
154 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
155 CLONE_IGNORED_FLAGS))
157 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
158 * have almost all been allocated. We cannot support any of
159 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
160 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
161 * The checks against the invalid thread masks above will catch these.
162 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
165 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
166 * once. This exercises the codepaths for restart.
168 //#define DEBUG_ERESTARTSYS
170 //#include <linux/msdos_fs.h>
171 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
172 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
182 #define _syscall0(type,name) \
183 static type name (void) \
185 return syscall(__NR_##name); \
188 #define _syscall1(type,name,type1,arg1) \
189 static type name (type1 arg1) \
191 return syscall(__NR_##name, arg1); \
194 #define _syscall2(type,name,type1,arg1,type2,arg2) \
195 static type name (type1 arg1,type2 arg2) \
197 return syscall(__NR_##name, arg1, arg2); \
200 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
201 static type name (type1 arg1,type2 arg2,type3 arg3) \
203 return syscall(__NR_##name, arg1, arg2, arg3); \
206 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
207 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
209 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
212 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
214 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
216 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
220 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
221 type5,arg5,type6,arg6) \
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
225 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
229 #define __NR_sys_uname __NR_uname
230 #define __NR_sys_getcwd1 __NR_getcwd
231 #define __NR_sys_getdents __NR_getdents
232 #define __NR_sys_getdents64 __NR_getdents64
233 #define __NR_sys_getpriority __NR_getpriority
234 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
235 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
236 #define __NR_sys_syslog __NR_syslog
237 #define __NR_sys_futex __NR_futex
238 #define __NR_sys_inotify_init __NR_inotify_init
239 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
240 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
242 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
243 #define __NR__llseek __NR_lseek
246 /* Newer kernel ports have llseek() instead of _llseek() */
247 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
248 #define TARGET_NR__llseek TARGET_NR_llseek
251 #define __NR_sys_gettid __NR_gettid
252 _syscall0(int, sys_gettid
)
254 /* For the 64-bit guest on 32-bit host case we must emulate
255 * getdents using getdents64, because otherwise the host
256 * might hand us back more dirent records than we can fit
257 * into the guest buffer after structure format conversion.
258 * Otherwise we emulate getdents with getdents if the host has it.
260 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
261 #define EMULATE_GETDENTS_WITH_GETDENTS
264 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
265 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
267 #if (defined(TARGET_NR_getdents) && \
268 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
269 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
270 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
272 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
273 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
274 loff_t
*, res
, uint
, wh
);
276 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
277 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
279 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
280 #ifdef __NR_exit_group
281 _syscall1(int,exit_group
,int,error_code
)
283 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
284 _syscall1(int,set_tid_address
,int *,tidptr
)
286 #if defined(TARGET_NR_futex) && defined(__NR_futex)
287 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
288 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
290 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
291 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
292 unsigned long *, user_mask_ptr
);
293 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
294 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
295 unsigned long *, user_mask_ptr
);
296 #define __NR_sys_getcpu __NR_getcpu
297 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
298 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
300 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
301 struct __user_cap_data_struct
*, data
);
302 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
303 struct __user_cap_data_struct
*, data
);
304 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
305 _syscall2(int, ioprio_get
, int, which
, int, who
)
307 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
308 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
310 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
311 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
314 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
315 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
316 unsigned long, idx1
, unsigned long, idx2
)
319 static bitmask_transtbl fcntl_flags_tbl
[] = {
320 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
321 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
322 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
323 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
324 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
325 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
326 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
327 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
328 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
329 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
330 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
331 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
332 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
333 #if defined(O_DIRECT)
334 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
336 #if defined(O_NOATIME)
337 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
339 #if defined(O_CLOEXEC)
340 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
343 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
345 #if defined(O_TMPFILE)
346 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
348 /* Don't terminate the list prematurely on 64-bit host+guest. */
349 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
350 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
355 static int sys_getcwd1(char *buf
, size_t size
)
357 if (getcwd(buf
, size
) == NULL
) {
358 /* getcwd() sets errno */
361 return strlen(buf
)+1;
364 #ifdef TARGET_NR_utimensat
365 #if defined(__NR_utimensat)
366 #define __NR_sys_utimensat __NR_utimensat
367 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
368 const struct timespec
*,tsp
,int,flags
)
370 static int sys_utimensat(int dirfd
, const char *pathname
,
371 const struct timespec times
[2], int flags
)
377 #endif /* TARGET_NR_utimensat */
379 #ifdef TARGET_NR_renameat2
380 #if defined(__NR_renameat2)
381 #define __NR_sys_renameat2 __NR_renameat2
382 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
383 const char *, new, unsigned int, flags
)
385 static int sys_renameat2(int oldfd
, const char *old
,
386 int newfd
, const char *new, int flags
)
389 return renameat(oldfd
, old
, newfd
, new);
395 #endif /* TARGET_NR_renameat2 */
397 #ifdef CONFIG_INOTIFY
398 #include <sys/inotify.h>
400 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
401 static int sys_inotify_init(void)
403 return (inotify_init());
406 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
407 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
409 return (inotify_add_watch(fd
, pathname
, mask
));
412 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
413 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
415 return (inotify_rm_watch(fd
, wd
));
418 #ifdef CONFIG_INOTIFY1
419 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
420 static int sys_inotify_init1(int flags
)
422 return (inotify_init1(flags
));
427 /* Userspace can usually survive runtime without inotify */
428 #undef TARGET_NR_inotify_init
429 #undef TARGET_NR_inotify_init1
430 #undef TARGET_NR_inotify_add_watch
431 #undef TARGET_NR_inotify_rm_watch
432 #endif /* CONFIG_INOTIFY */
434 #if defined(TARGET_NR_prlimit64)
435 #ifndef __NR_prlimit64
436 # define __NR_prlimit64 -1
438 #define __NR_sys_prlimit64 __NR_prlimit64
439 /* The glibc rlimit structure may not be that used by the underlying syscall */
440 struct host_rlimit64
{
444 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
445 const struct host_rlimit64
*, new_limit
,
446 struct host_rlimit64
*, old_limit
)
450 #if defined(TARGET_NR_timer_create)
451 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
452 static timer_t g_posix_timers
[32] = { 0, } ;
454 static inline int next_free_host_timer(void)
457 /* FIXME: Does finding the next free slot require a lock? */
458 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
459 if (g_posix_timers
[k
] == 0) {
460 g_posix_timers
[k
] = (timer_t
) 1;
468 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
470 static inline int regpairs_aligned(void *cpu_env
, int num
)
472 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
474 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
475 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
476 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
477 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
478 * of registers which translates to the same as ARM/MIPS, because we start with
480 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
481 #elif defined(TARGET_SH4)
482 /* SH4 doesn't align register pairs, except for p{read,write}64 */
483 static inline int regpairs_aligned(void *cpu_env
, int num
)
486 case TARGET_NR_pread64
:
487 case TARGET_NR_pwrite64
:
494 #elif defined(TARGET_XTENSA)
495 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
497 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
500 #define ERRNO_TABLE_SIZE 1200
502 /* target_to_host_errno_table[] is initialized from
503 * host_to_target_errno_table[] in syscall_init(). */
504 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
508 * This list is the union of errno values overridden in asm-<arch>/errno.h
509 * minus the errnos that are not actually generic to all archs.
511 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
512 [EAGAIN
] = TARGET_EAGAIN
,
513 [EIDRM
] = TARGET_EIDRM
,
514 [ECHRNG
] = TARGET_ECHRNG
,
515 [EL2NSYNC
] = TARGET_EL2NSYNC
,
516 [EL3HLT
] = TARGET_EL3HLT
,
517 [EL3RST
] = TARGET_EL3RST
,
518 [ELNRNG
] = TARGET_ELNRNG
,
519 [EUNATCH
] = TARGET_EUNATCH
,
520 [ENOCSI
] = TARGET_ENOCSI
,
521 [EL2HLT
] = TARGET_EL2HLT
,
522 [EDEADLK
] = TARGET_EDEADLK
,
523 [ENOLCK
] = TARGET_ENOLCK
,
524 [EBADE
] = TARGET_EBADE
,
525 [EBADR
] = TARGET_EBADR
,
526 [EXFULL
] = TARGET_EXFULL
,
527 [ENOANO
] = TARGET_ENOANO
,
528 [EBADRQC
] = TARGET_EBADRQC
,
529 [EBADSLT
] = TARGET_EBADSLT
,
530 [EBFONT
] = TARGET_EBFONT
,
531 [ENOSTR
] = TARGET_ENOSTR
,
532 [ENODATA
] = TARGET_ENODATA
,
533 [ETIME
] = TARGET_ETIME
,
534 [ENOSR
] = TARGET_ENOSR
,
535 [ENONET
] = TARGET_ENONET
,
536 [ENOPKG
] = TARGET_ENOPKG
,
537 [EREMOTE
] = TARGET_EREMOTE
,
538 [ENOLINK
] = TARGET_ENOLINK
,
539 [EADV
] = TARGET_EADV
,
540 [ESRMNT
] = TARGET_ESRMNT
,
541 [ECOMM
] = TARGET_ECOMM
,
542 [EPROTO
] = TARGET_EPROTO
,
543 [EDOTDOT
] = TARGET_EDOTDOT
,
544 [EMULTIHOP
] = TARGET_EMULTIHOP
,
545 [EBADMSG
] = TARGET_EBADMSG
,
546 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
547 [EOVERFLOW
] = TARGET_EOVERFLOW
,
548 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
549 [EBADFD
] = TARGET_EBADFD
,
550 [EREMCHG
] = TARGET_EREMCHG
,
551 [ELIBACC
] = TARGET_ELIBACC
,
552 [ELIBBAD
] = TARGET_ELIBBAD
,
553 [ELIBSCN
] = TARGET_ELIBSCN
,
554 [ELIBMAX
] = TARGET_ELIBMAX
,
555 [ELIBEXEC
] = TARGET_ELIBEXEC
,
556 [EILSEQ
] = TARGET_EILSEQ
,
557 [ENOSYS
] = TARGET_ENOSYS
,
558 [ELOOP
] = TARGET_ELOOP
,
559 [ERESTART
] = TARGET_ERESTART
,
560 [ESTRPIPE
] = TARGET_ESTRPIPE
,
561 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
562 [EUSERS
] = TARGET_EUSERS
,
563 [ENOTSOCK
] = TARGET_ENOTSOCK
,
564 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
565 [EMSGSIZE
] = TARGET_EMSGSIZE
,
566 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
567 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
568 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
569 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
570 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
571 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
572 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
573 [EADDRINUSE
] = TARGET_EADDRINUSE
,
574 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
575 [ENETDOWN
] = TARGET_ENETDOWN
,
576 [ENETUNREACH
] = TARGET_ENETUNREACH
,
577 [ENETRESET
] = TARGET_ENETRESET
,
578 [ECONNABORTED
] = TARGET_ECONNABORTED
,
579 [ECONNRESET
] = TARGET_ECONNRESET
,
580 [ENOBUFS
] = TARGET_ENOBUFS
,
581 [EISCONN
] = TARGET_EISCONN
,
582 [ENOTCONN
] = TARGET_ENOTCONN
,
583 [EUCLEAN
] = TARGET_EUCLEAN
,
584 [ENOTNAM
] = TARGET_ENOTNAM
,
585 [ENAVAIL
] = TARGET_ENAVAIL
,
586 [EISNAM
] = TARGET_EISNAM
,
587 [EREMOTEIO
] = TARGET_EREMOTEIO
,
588 [EDQUOT
] = TARGET_EDQUOT
,
589 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
590 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
591 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
592 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
593 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
594 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
595 [EALREADY
] = TARGET_EALREADY
,
596 [EINPROGRESS
] = TARGET_EINPROGRESS
,
597 [ESTALE
] = TARGET_ESTALE
,
598 [ECANCELED
] = TARGET_ECANCELED
,
599 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
600 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
602 [ENOKEY
] = TARGET_ENOKEY
,
605 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
608 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
611 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
614 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
616 #ifdef ENOTRECOVERABLE
617 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
620 [ENOMSG
] = TARGET_ENOMSG
,
623 [ERFKILL
] = TARGET_ERFKILL
,
626 [EHWPOISON
] = TARGET_EHWPOISON
,
630 static inline int host_to_target_errno(int err
)
632 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
633 host_to_target_errno_table
[err
]) {
634 return host_to_target_errno_table
[err
];
639 static inline int target_to_host_errno(int err
)
641 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
642 target_to_host_errno_table
[err
]) {
643 return target_to_host_errno_table
[err
];
648 static inline abi_long
get_errno(abi_long ret
)
651 return -host_to_target_errno(errno
);
656 const char *target_strerror(int err
)
658 if (err
== TARGET_ERESTARTSYS
) {
659 return "To be restarted";
661 if (err
== TARGET_QEMU_ESIGRETURN
) {
662 return "Successful exit from sigreturn";
665 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
668 return strerror(target_to_host_errno(err
));
671 #define safe_syscall0(type, name) \
672 static type safe_##name(void) \
674 return safe_syscall(__NR_##name); \
677 #define safe_syscall1(type, name, type1, arg1) \
678 static type safe_##name(type1 arg1) \
680 return safe_syscall(__NR_##name, arg1); \
683 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
684 static type safe_##name(type1 arg1, type2 arg2) \
686 return safe_syscall(__NR_##name, arg1, arg2); \
689 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
690 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
692 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
695 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
697 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
699 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
702 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
703 type4, arg4, type5, arg5) \
704 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
707 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
710 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
711 type4, arg4, type5, arg5, type6, arg6) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
713 type5 arg5, type6 arg6) \
715 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
718 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
719 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
720 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
721 int, flags
, mode_t
, mode
)
722 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
723 struct rusage
*, rusage
)
724 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
725 int, options
, struct rusage
*, rusage
)
726 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
727 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
728 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
729 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
730 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
732 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
733 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
735 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
736 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
737 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
738 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
739 safe_syscall2(int, tkill
, int, tid
, int, sig
)
740 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
741 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
742 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
743 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
744 unsigned long, pos_l
, unsigned long, pos_h
)
745 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
746 unsigned long, pos_l
, unsigned long, pos_h
)
747 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
749 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
750 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
751 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
752 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
753 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
754 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
755 safe_syscall2(int, flock
, int, fd
, int, operation
)
756 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
757 const struct timespec
*, uts
, size_t, sigsetsize
)
758 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
760 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
761 struct timespec
*, rem
)
762 #ifdef TARGET_NR_clock_nanosleep
763 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
764 const struct timespec
*, req
, struct timespec
*, rem
)
766 #if !defined(__NR_msgsnd) || !defined(__NR_msgrcv) || !defined(__NR_semtimedop)
767 /* This host kernel architecture uses a single ipc syscall; fake up
768 * wrappers for the sub-operations to hide this implementation detail.
769 * Annoyingly we can't include linux/ipc.h to get the constant definitions
770 * for the call parameter because some structs in there conflict with the
771 * sys/ipc.h ones. So we just define them here, and rely on them being
772 * the same for all host architectures.
774 #define Q_SEMTIMEDOP 4
777 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
779 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
780 void *, ptr
, long, fifth
)
783 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
786 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
788 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
792 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
793 long, msgtype
, int, flags
)
795 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
797 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
800 #ifdef __NR_semtimedop
801 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
802 unsigned, nsops
, const struct timespec
*, timeout
)
804 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
805 const struct timespec
*timeout
)
807 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
811 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
812 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
813 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
814 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
815 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
817 /* We do ioctl like this rather than via safe_syscall3 to preserve the
818 * "third argument might be integer or pointer or not present" behaviour of
821 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
822 /* Similarly for fcntl. Note that callers must always:
823 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
824 * use the flock64 struct rather than unsuffixed flock
825 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
828 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
830 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
833 static inline int host_to_target_sock_type(int host_type
)
837 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
839 target_type
= TARGET_SOCK_DGRAM
;
842 target_type
= TARGET_SOCK_STREAM
;
845 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
849 #if defined(SOCK_CLOEXEC)
850 if (host_type
& SOCK_CLOEXEC
) {
851 target_type
|= TARGET_SOCK_CLOEXEC
;
855 #if defined(SOCK_NONBLOCK)
856 if (host_type
& SOCK_NONBLOCK
) {
857 target_type
|= TARGET_SOCK_NONBLOCK
;
864 static abi_ulong target_brk
;
865 static abi_ulong target_original_brk
;
866 static abi_ulong brk_page
;
868 void target_set_brk(abi_ulong new_brk
)
870 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
871 brk_page
= HOST_PAGE_ALIGN(target_brk
);
874 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
875 #define DEBUGF_BRK(message, args...)
877 /* do_brk() must return target values and target errnos. */
878 abi_long
do_brk(abi_ulong new_brk
)
880 abi_long mapped_addr
;
881 abi_ulong new_alloc_size
;
883 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
886 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
889 if (new_brk
< target_original_brk
) {
890 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
895 /* If the new brk is less than the highest page reserved to the
896 * target heap allocation, set it and we're almost done... */
897 if (new_brk
<= brk_page
) {
898 /* Heap contents are initialized to zero, as for anonymous
900 if (new_brk
> target_brk
) {
901 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
903 target_brk
= new_brk
;
904 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
908 /* We need to allocate more memory after the brk... Note that
909 * we don't use MAP_FIXED because that will map over the top of
910 * any existing mapping (like the one with the host libc or qemu
911 * itself); instead we treat "mapped but at wrong address" as
912 * a failure and unmap again.
914 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
915 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
916 PROT_READ
|PROT_WRITE
,
917 MAP_ANON
|MAP_PRIVATE
, 0, 0));
919 if (mapped_addr
== brk_page
) {
920 /* Heap contents are initialized to zero, as for anonymous
921 * mapped pages. Technically the new pages are already
922 * initialized to zero since they *are* anonymous mapped
923 * pages, however we have to take care with the contents that
924 * come from the remaining part of the previous page: it may
925 * contains garbage data due to a previous heap usage (grown
927 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
929 target_brk
= new_brk
;
930 brk_page
= HOST_PAGE_ALIGN(target_brk
);
931 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
934 } else if (mapped_addr
!= -1) {
935 /* Mapped but at wrong address, meaning there wasn't actually
936 * enough space for this brk.
938 target_munmap(mapped_addr
, new_alloc_size
);
940 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
943 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
946 #if defined(TARGET_ALPHA)
947 /* We (partially) emulate OSF/1 on Alpha, which requires we
948 return a proper errno, not an unchanged brk value. */
949 return -TARGET_ENOMEM
;
951 /* For everything else, return the previous break. */
955 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
956 abi_ulong target_fds_addr
,
960 abi_ulong b
, *target_fds
;
962 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
963 if (!(target_fds
= lock_user(VERIFY_READ
,
965 sizeof(abi_ulong
) * nw
,
967 return -TARGET_EFAULT
;
971 for (i
= 0; i
< nw
; i
++) {
972 /* grab the abi_ulong */
973 __get_user(b
, &target_fds
[i
]);
974 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
975 /* check the bit inside the abi_ulong */
982 unlock_user(target_fds
, target_fds_addr
, 0);
987 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
988 abi_ulong target_fds_addr
,
991 if (target_fds_addr
) {
992 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
993 return -TARGET_EFAULT
;
1001 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1007 abi_ulong
*target_fds
;
1009 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1010 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1012 sizeof(abi_ulong
) * nw
,
1014 return -TARGET_EFAULT
;
1017 for (i
= 0; i
< nw
; i
++) {
1019 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1020 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1023 __put_user(v
, &target_fds
[i
]);
1026 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1031 #if defined(__alpha__)
1032 #define HOST_HZ 1024
1037 static inline abi_long
host_to_target_clock_t(long ticks
)
1039 #if HOST_HZ == TARGET_HZ
1042 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1046 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1047 const struct rusage
*rusage
)
1049 struct target_rusage
*target_rusage
;
1051 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1052 return -TARGET_EFAULT
;
1053 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1054 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1055 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1056 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1057 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1058 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1059 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1060 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1061 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1062 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1063 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1064 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1065 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1066 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1067 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1068 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1069 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1070 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1071 unlock_user_struct(target_rusage
, target_addr
, 1);
1076 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1078 abi_ulong target_rlim_swap
;
1081 target_rlim_swap
= tswapal(target_rlim
);
1082 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1083 return RLIM_INFINITY
;
1085 result
= target_rlim_swap
;
1086 if (target_rlim_swap
!= (rlim_t
)result
)
1087 return RLIM_INFINITY
;
1092 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1094 abi_ulong target_rlim_swap
;
1097 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1098 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1100 target_rlim_swap
= rlim
;
1101 result
= tswapal(target_rlim_swap
);
1106 static inline int target_to_host_resource(int code
)
1109 case TARGET_RLIMIT_AS
:
1111 case TARGET_RLIMIT_CORE
:
1113 case TARGET_RLIMIT_CPU
:
1115 case TARGET_RLIMIT_DATA
:
1117 case TARGET_RLIMIT_FSIZE
:
1118 return RLIMIT_FSIZE
;
1119 case TARGET_RLIMIT_LOCKS
:
1120 return RLIMIT_LOCKS
;
1121 case TARGET_RLIMIT_MEMLOCK
:
1122 return RLIMIT_MEMLOCK
;
1123 case TARGET_RLIMIT_MSGQUEUE
:
1124 return RLIMIT_MSGQUEUE
;
1125 case TARGET_RLIMIT_NICE
:
1127 case TARGET_RLIMIT_NOFILE
:
1128 return RLIMIT_NOFILE
;
1129 case TARGET_RLIMIT_NPROC
:
1130 return RLIMIT_NPROC
;
1131 case TARGET_RLIMIT_RSS
:
1133 case TARGET_RLIMIT_RTPRIO
:
1134 return RLIMIT_RTPRIO
;
1135 case TARGET_RLIMIT_SIGPENDING
:
1136 return RLIMIT_SIGPENDING
;
1137 case TARGET_RLIMIT_STACK
:
1138 return RLIMIT_STACK
;
1144 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1145 abi_ulong target_tv_addr
)
1147 struct target_timeval
*target_tv
;
1149 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1150 return -TARGET_EFAULT
;
1152 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1153 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1155 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1160 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1161 const struct timeval
*tv
)
1163 struct target_timeval
*target_tv
;
1165 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1166 return -TARGET_EFAULT
;
1168 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1169 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1171 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1176 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1177 abi_ulong target_tz_addr
)
1179 struct target_timezone
*target_tz
;
1181 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1182 return -TARGET_EFAULT
;
1185 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1186 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1188 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1193 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1196 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1197 abi_ulong target_mq_attr_addr
)
1199 struct target_mq_attr
*target_mq_attr
;
1201 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1202 target_mq_attr_addr
, 1))
1203 return -TARGET_EFAULT
;
1205 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1206 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1207 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1208 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1210 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1215 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1216 const struct mq_attr
*attr
)
1218 struct target_mq_attr
*target_mq_attr
;
1220 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1221 target_mq_attr_addr
, 0))
1222 return -TARGET_EFAULT
;
1224 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1225 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1226 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1227 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1229 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1235 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1236 /* do_select() must return target values and target errnos. */
1237 static abi_long
do_select(int n
,
1238 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1239 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1241 fd_set rfds
, wfds
, efds
;
1242 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1244 struct timespec ts
, *ts_ptr
;
1247 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1251 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1255 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1260 if (target_tv_addr
) {
1261 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1262 return -TARGET_EFAULT
;
1263 ts
.tv_sec
= tv
.tv_sec
;
1264 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1270 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1273 if (!is_error(ret
)) {
1274 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1275 return -TARGET_EFAULT
;
1276 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1277 return -TARGET_EFAULT
;
1278 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1279 return -TARGET_EFAULT
;
1281 if (target_tv_addr
) {
1282 tv
.tv_sec
= ts
.tv_sec
;
1283 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1284 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1285 return -TARGET_EFAULT
;
1293 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1294 static abi_long
do_old_select(abi_ulong arg1
)
1296 struct target_sel_arg_struct
*sel
;
1297 abi_ulong inp
, outp
, exp
, tvp
;
1300 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1301 return -TARGET_EFAULT
;
1304 nsel
= tswapal(sel
->n
);
1305 inp
= tswapal(sel
->inp
);
1306 outp
= tswapal(sel
->outp
);
1307 exp
= tswapal(sel
->exp
);
1308 tvp
= tswapal(sel
->tvp
);
1310 unlock_user_struct(sel
, arg1
, 0);
1312 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1317 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1320 return pipe2(host_pipe
, flags
);
1326 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1327 int flags
, int is_pipe2
)
1331 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1334 return get_errno(ret
);
1336 /* Several targets have special calling conventions for the original
1337 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1339 #if defined(TARGET_ALPHA)
1340 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1341 return host_pipe
[0];
1342 #elif defined(TARGET_MIPS)
1343 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1344 return host_pipe
[0];
1345 #elif defined(TARGET_SH4)
1346 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1347 return host_pipe
[0];
1348 #elif defined(TARGET_SPARC)
1349 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1350 return host_pipe
[0];
1354 if (put_user_s32(host_pipe
[0], pipedes
)
1355 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1356 return -TARGET_EFAULT
;
1357 return get_errno(ret
);
1360 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1361 abi_ulong target_addr
,
1364 struct target_ip_mreqn
*target_smreqn
;
1366 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1368 return -TARGET_EFAULT
;
1369 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1370 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1371 if (len
== sizeof(struct target_ip_mreqn
))
1372 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1373 unlock_user(target_smreqn
, target_addr
, 0);
1378 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1379 abi_ulong target_addr
,
1382 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1383 sa_family_t sa_family
;
1384 struct target_sockaddr
*target_saddr
;
1386 if (fd_trans_target_to_host_addr(fd
)) {
1387 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1390 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1392 return -TARGET_EFAULT
;
1394 sa_family
= tswap16(target_saddr
->sa_family
);
1396 /* Oops. The caller might send a incomplete sun_path; sun_path
1397 * must be terminated by \0 (see the manual page), but
1398 * unfortunately it is quite common to specify sockaddr_un
1399 * length as "strlen(x->sun_path)" while it should be
1400 * "strlen(...) + 1". We'll fix that here if needed.
1401 * Linux kernel has a similar feature.
1404 if (sa_family
== AF_UNIX
) {
1405 if (len
< unix_maxlen
&& len
> 0) {
1406 char *cp
= (char*)target_saddr
;
1408 if ( cp
[len
-1] && !cp
[len
] )
1411 if (len
> unix_maxlen
)
1415 memcpy(addr
, target_saddr
, len
);
1416 addr
->sa_family
= sa_family
;
1417 if (sa_family
== AF_NETLINK
) {
1418 struct sockaddr_nl
*nladdr
;
1420 nladdr
= (struct sockaddr_nl
*)addr
;
1421 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1422 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1423 } else if (sa_family
== AF_PACKET
) {
1424 struct target_sockaddr_ll
*lladdr
;
1426 lladdr
= (struct target_sockaddr_ll
*)addr
;
1427 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1428 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1430 unlock_user(target_saddr
, target_addr
, 0);
1435 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1436 struct sockaddr
*addr
,
1439 struct target_sockaddr
*target_saddr
;
1446 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1448 return -TARGET_EFAULT
;
1449 memcpy(target_saddr
, addr
, len
);
1450 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1451 sizeof(target_saddr
->sa_family
)) {
1452 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1454 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1455 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1456 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1457 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1458 } else if (addr
->sa_family
== AF_PACKET
) {
1459 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1460 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1461 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1462 } else if (addr
->sa_family
== AF_INET6
&&
1463 len
>= sizeof(struct target_sockaddr_in6
)) {
1464 struct target_sockaddr_in6
*target_in6
=
1465 (struct target_sockaddr_in6
*)target_saddr
;
1466 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1468 unlock_user(target_saddr
, target_addr
, len
);
1473 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1474 struct target_msghdr
*target_msgh
)
1476 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1477 abi_long msg_controllen
;
1478 abi_ulong target_cmsg_addr
;
1479 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1480 socklen_t space
= 0;
1482 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1483 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1485 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1486 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1487 target_cmsg_start
= target_cmsg
;
1489 return -TARGET_EFAULT
;
1491 while (cmsg
&& target_cmsg
) {
1492 void *data
= CMSG_DATA(cmsg
);
1493 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1495 int len
= tswapal(target_cmsg
->cmsg_len
)
1496 - sizeof(struct target_cmsghdr
);
1498 space
+= CMSG_SPACE(len
);
1499 if (space
> msgh
->msg_controllen
) {
1500 space
-= CMSG_SPACE(len
);
1501 /* This is a QEMU bug, since we allocated the payload
1502 * area ourselves (unlike overflow in host-to-target
1503 * conversion, which is just the guest giving us a buffer
1504 * that's too small). It can't happen for the payload types
1505 * we currently support; if it becomes an issue in future
1506 * we would need to improve our allocation strategy to
1507 * something more intelligent than "twice the size of the
1508 * target buffer we're reading from".
1510 gemu_log("Host cmsg overflow\n");
1514 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1515 cmsg
->cmsg_level
= SOL_SOCKET
;
1517 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1519 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1520 cmsg
->cmsg_len
= CMSG_LEN(len
);
1522 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1523 int *fd
= (int *)data
;
1524 int *target_fd
= (int *)target_data
;
1525 int i
, numfds
= len
/ sizeof(int);
1527 for (i
= 0; i
< numfds
; i
++) {
1528 __get_user(fd
[i
], target_fd
+ i
);
1530 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1531 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1532 struct ucred
*cred
= (struct ucred
*)data
;
1533 struct target_ucred
*target_cred
=
1534 (struct target_ucred
*)target_data
;
1536 __get_user(cred
->pid
, &target_cred
->pid
);
1537 __get_user(cred
->uid
, &target_cred
->uid
);
1538 __get_user(cred
->gid
, &target_cred
->gid
);
1540 gemu_log("Unsupported ancillary data: %d/%d\n",
1541 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1542 memcpy(data
, target_data
, len
);
1545 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1546 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1549 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1551 msgh
->msg_controllen
= space
;
1555 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1556 struct msghdr
*msgh
)
1558 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1559 abi_long msg_controllen
;
1560 abi_ulong target_cmsg_addr
;
1561 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1562 socklen_t space
= 0;
1564 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1565 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1567 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1568 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1569 target_cmsg_start
= target_cmsg
;
1571 return -TARGET_EFAULT
;
1573 while (cmsg
&& target_cmsg
) {
1574 void *data
= CMSG_DATA(cmsg
);
1575 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1577 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1578 int tgt_len
, tgt_space
;
1580 /* We never copy a half-header but may copy half-data;
1581 * this is Linux's behaviour in put_cmsg(). Note that
1582 * truncation here is a guest problem (which we report
1583 * to the guest via the CTRUNC bit), unlike truncation
1584 * in target_to_host_cmsg, which is a QEMU bug.
1586 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1587 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1591 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1592 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1594 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1596 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1598 /* Payload types which need a different size of payload on
1599 * the target must adjust tgt_len here.
1602 switch (cmsg
->cmsg_level
) {
1604 switch (cmsg
->cmsg_type
) {
1606 tgt_len
= sizeof(struct target_timeval
);
1616 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1617 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1618 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1621 /* We must now copy-and-convert len bytes of payload
1622 * into tgt_len bytes of destination space. Bear in mind
1623 * that in both source and destination we may be dealing
1624 * with a truncated value!
1626 switch (cmsg
->cmsg_level
) {
1628 switch (cmsg
->cmsg_type
) {
1631 int *fd
= (int *)data
;
1632 int *target_fd
= (int *)target_data
;
1633 int i
, numfds
= tgt_len
/ sizeof(int);
1635 for (i
= 0; i
< numfds
; i
++) {
1636 __put_user(fd
[i
], target_fd
+ i
);
1642 struct timeval
*tv
= (struct timeval
*)data
;
1643 struct target_timeval
*target_tv
=
1644 (struct target_timeval
*)target_data
;
1646 if (len
!= sizeof(struct timeval
) ||
1647 tgt_len
!= sizeof(struct target_timeval
)) {
1651 /* copy struct timeval to target */
1652 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1653 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1656 case SCM_CREDENTIALS
:
1658 struct ucred
*cred
= (struct ucred
*)data
;
1659 struct target_ucred
*target_cred
=
1660 (struct target_ucred
*)target_data
;
1662 __put_user(cred
->pid
, &target_cred
->pid
);
1663 __put_user(cred
->uid
, &target_cred
->uid
);
1664 __put_user(cred
->gid
, &target_cred
->gid
);
1673 switch (cmsg
->cmsg_type
) {
1676 uint32_t *v
= (uint32_t *)data
;
1677 uint32_t *t_int
= (uint32_t *)target_data
;
1679 if (len
!= sizeof(uint32_t) ||
1680 tgt_len
!= sizeof(uint32_t)) {
1683 __put_user(*v
, t_int
);
1689 struct sock_extended_err ee
;
1690 struct sockaddr_in offender
;
1692 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1693 struct errhdr_t
*target_errh
=
1694 (struct errhdr_t
*)target_data
;
1696 if (len
!= sizeof(struct errhdr_t
) ||
1697 tgt_len
!= sizeof(struct errhdr_t
)) {
1700 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1701 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1702 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1703 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1704 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1705 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1706 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1707 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1708 (void *) &errh
->offender
, sizeof(errh
->offender
));
1717 switch (cmsg
->cmsg_type
) {
1720 uint32_t *v
= (uint32_t *)data
;
1721 uint32_t *t_int
= (uint32_t *)target_data
;
1723 if (len
!= sizeof(uint32_t) ||
1724 tgt_len
!= sizeof(uint32_t)) {
1727 __put_user(*v
, t_int
);
1733 struct sock_extended_err ee
;
1734 struct sockaddr_in6 offender
;
1736 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1737 struct errhdr6_t
*target_errh
=
1738 (struct errhdr6_t
*)target_data
;
1740 if (len
!= sizeof(struct errhdr6_t
) ||
1741 tgt_len
!= sizeof(struct errhdr6_t
)) {
1744 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1745 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1746 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1747 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1748 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1749 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1750 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1751 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1752 (void *) &errh
->offender
, sizeof(errh
->offender
));
1762 gemu_log("Unsupported ancillary data: %d/%d\n",
1763 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1764 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1765 if (tgt_len
> len
) {
1766 memset(target_data
+ len
, 0, tgt_len
- len
);
1770 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1771 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1772 if (msg_controllen
< tgt_space
) {
1773 tgt_space
= msg_controllen
;
1775 msg_controllen
-= tgt_space
;
1777 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1778 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1781 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1783 target_msgh
->msg_controllen
= tswapal(space
);
1787 /* do_setsockopt() Must return target values and target errnos. */
1788 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1789 abi_ulong optval_addr
, socklen_t optlen
)
1793 struct ip_mreqn
*ip_mreq
;
1794 struct ip_mreq_source
*ip_mreq_source
;
1798 /* TCP options all take an 'int' value. */
1799 if (optlen
< sizeof(uint32_t))
1800 return -TARGET_EINVAL
;
1802 if (get_user_u32(val
, optval_addr
))
1803 return -TARGET_EFAULT
;
1804 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1811 case IP_ROUTER_ALERT
:
1815 case IP_MTU_DISCOVER
:
1822 case IP_MULTICAST_TTL
:
1823 case IP_MULTICAST_LOOP
:
1825 if (optlen
>= sizeof(uint32_t)) {
1826 if (get_user_u32(val
, optval_addr
))
1827 return -TARGET_EFAULT
;
1828 } else if (optlen
>= 1) {
1829 if (get_user_u8(val
, optval_addr
))
1830 return -TARGET_EFAULT
;
1832 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1834 case IP_ADD_MEMBERSHIP
:
1835 case IP_DROP_MEMBERSHIP
:
1836 if (optlen
< sizeof (struct target_ip_mreq
) ||
1837 optlen
> sizeof (struct target_ip_mreqn
))
1838 return -TARGET_EINVAL
;
1840 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1841 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1842 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1845 case IP_BLOCK_SOURCE
:
1846 case IP_UNBLOCK_SOURCE
:
1847 case IP_ADD_SOURCE_MEMBERSHIP
:
1848 case IP_DROP_SOURCE_MEMBERSHIP
:
1849 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1850 return -TARGET_EINVAL
;
1852 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1853 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1854 unlock_user (ip_mreq_source
, optval_addr
, 0);
1863 case IPV6_MTU_DISCOVER
:
1866 case IPV6_RECVPKTINFO
:
1867 case IPV6_UNICAST_HOPS
:
1868 case IPV6_MULTICAST_HOPS
:
1869 case IPV6_MULTICAST_LOOP
:
1871 case IPV6_RECVHOPLIMIT
:
1872 case IPV6_2292HOPLIMIT
:
1875 case IPV6_2292PKTINFO
:
1876 case IPV6_RECVTCLASS
:
1877 case IPV6_RECVRTHDR
:
1878 case IPV6_2292RTHDR
:
1879 case IPV6_RECVHOPOPTS
:
1880 case IPV6_2292HOPOPTS
:
1881 case IPV6_RECVDSTOPTS
:
1882 case IPV6_2292DSTOPTS
:
1884 #ifdef IPV6_RECVPATHMTU
1885 case IPV6_RECVPATHMTU
:
1887 #ifdef IPV6_TRANSPARENT
1888 case IPV6_TRANSPARENT
:
1890 #ifdef IPV6_FREEBIND
1893 #ifdef IPV6_RECVORIGDSTADDR
1894 case IPV6_RECVORIGDSTADDR
:
1897 if (optlen
< sizeof(uint32_t)) {
1898 return -TARGET_EINVAL
;
1900 if (get_user_u32(val
, optval_addr
)) {
1901 return -TARGET_EFAULT
;
1903 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1904 &val
, sizeof(val
)));
1908 struct in6_pktinfo pki
;
1910 if (optlen
< sizeof(pki
)) {
1911 return -TARGET_EINVAL
;
1914 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
1915 return -TARGET_EFAULT
;
1918 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
1920 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1921 &pki
, sizeof(pki
)));
1932 struct icmp6_filter icmp6f
;
1934 if (optlen
> sizeof(icmp6f
)) {
1935 optlen
= sizeof(icmp6f
);
1938 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
1939 return -TARGET_EFAULT
;
1942 for (val
= 0; val
< 8; val
++) {
1943 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
1946 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1958 /* those take an u32 value */
1959 if (optlen
< sizeof(uint32_t)) {
1960 return -TARGET_EINVAL
;
1963 if (get_user_u32(val
, optval_addr
)) {
1964 return -TARGET_EFAULT
;
1966 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1967 &val
, sizeof(val
)));
1974 case TARGET_SOL_SOCKET
:
1976 case TARGET_SO_RCVTIMEO
:
1980 optname
= SO_RCVTIMEO
;
1983 if (optlen
!= sizeof(struct target_timeval
)) {
1984 return -TARGET_EINVAL
;
1987 if (copy_from_user_timeval(&tv
, optval_addr
)) {
1988 return -TARGET_EFAULT
;
1991 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
1995 case TARGET_SO_SNDTIMEO
:
1996 optname
= SO_SNDTIMEO
;
1998 case TARGET_SO_ATTACH_FILTER
:
2000 struct target_sock_fprog
*tfprog
;
2001 struct target_sock_filter
*tfilter
;
2002 struct sock_fprog fprog
;
2003 struct sock_filter
*filter
;
2006 if (optlen
!= sizeof(*tfprog
)) {
2007 return -TARGET_EINVAL
;
2009 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2010 return -TARGET_EFAULT
;
2012 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2013 tswapal(tfprog
->filter
), 0)) {
2014 unlock_user_struct(tfprog
, optval_addr
, 1);
2015 return -TARGET_EFAULT
;
2018 fprog
.len
= tswap16(tfprog
->len
);
2019 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2020 if (filter
== NULL
) {
2021 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2022 unlock_user_struct(tfprog
, optval_addr
, 1);
2023 return -TARGET_ENOMEM
;
2025 for (i
= 0; i
< fprog
.len
; i
++) {
2026 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2027 filter
[i
].jt
= tfilter
[i
].jt
;
2028 filter
[i
].jf
= tfilter
[i
].jf
;
2029 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2031 fprog
.filter
= filter
;
2033 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2034 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2037 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2038 unlock_user_struct(tfprog
, optval_addr
, 1);
2041 case TARGET_SO_BINDTODEVICE
:
2043 char *dev_ifname
, *addr_ifname
;
2045 if (optlen
> IFNAMSIZ
- 1) {
2046 optlen
= IFNAMSIZ
- 1;
2048 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2050 return -TARGET_EFAULT
;
2052 optname
= SO_BINDTODEVICE
;
2053 addr_ifname
= alloca(IFNAMSIZ
);
2054 memcpy(addr_ifname
, dev_ifname
, optlen
);
2055 addr_ifname
[optlen
] = 0;
2056 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2057 addr_ifname
, optlen
));
2058 unlock_user (dev_ifname
, optval_addr
, 0);
2061 case TARGET_SO_LINGER
:
2064 struct target_linger
*tlg
;
2066 if (optlen
!= sizeof(struct target_linger
)) {
2067 return -TARGET_EINVAL
;
2069 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2070 return -TARGET_EFAULT
;
2072 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2073 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2074 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2076 unlock_user_struct(tlg
, optval_addr
, 0);
2079 /* Options with 'int' argument. */
2080 case TARGET_SO_DEBUG
:
2083 case TARGET_SO_REUSEADDR
:
2084 optname
= SO_REUSEADDR
;
2087 case TARGET_SO_REUSEPORT
:
2088 optname
= SO_REUSEPORT
;
2091 case TARGET_SO_TYPE
:
2094 case TARGET_SO_ERROR
:
2097 case TARGET_SO_DONTROUTE
:
2098 optname
= SO_DONTROUTE
;
2100 case TARGET_SO_BROADCAST
:
2101 optname
= SO_BROADCAST
;
2103 case TARGET_SO_SNDBUF
:
2104 optname
= SO_SNDBUF
;
2106 case TARGET_SO_SNDBUFFORCE
:
2107 optname
= SO_SNDBUFFORCE
;
2109 case TARGET_SO_RCVBUF
:
2110 optname
= SO_RCVBUF
;
2112 case TARGET_SO_RCVBUFFORCE
:
2113 optname
= SO_RCVBUFFORCE
;
2115 case TARGET_SO_KEEPALIVE
:
2116 optname
= SO_KEEPALIVE
;
2118 case TARGET_SO_OOBINLINE
:
2119 optname
= SO_OOBINLINE
;
2121 case TARGET_SO_NO_CHECK
:
2122 optname
= SO_NO_CHECK
;
2124 case TARGET_SO_PRIORITY
:
2125 optname
= SO_PRIORITY
;
2128 case TARGET_SO_BSDCOMPAT
:
2129 optname
= SO_BSDCOMPAT
;
2132 case TARGET_SO_PASSCRED
:
2133 optname
= SO_PASSCRED
;
2135 case TARGET_SO_PASSSEC
:
2136 optname
= SO_PASSSEC
;
2138 case TARGET_SO_TIMESTAMP
:
2139 optname
= SO_TIMESTAMP
;
2141 case TARGET_SO_RCVLOWAT
:
2142 optname
= SO_RCVLOWAT
;
2147 if (optlen
< sizeof(uint32_t))
2148 return -TARGET_EINVAL
;
2150 if (get_user_u32(val
, optval_addr
))
2151 return -TARGET_EFAULT
;
2152 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2156 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2157 ret
= -TARGET_ENOPROTOOPT
;
2162 /* do_getsockopt() Must return target values and target errnos. */
2163 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2164 abi_ulong optval_addr
, abi_ulong optlen
)
2171 case TARGET_SOL_SOCKET
:
2174 /* These don't just return a single integer */
2175 case TARGET_SO_RCVTIMEO
:
2176 case TARGET_SO_SNDTIMEO
:
2177 case TARGET_SO_PEERNAME
:
2179 case TARGET_SO_PEERCRED
: {
2182 struct target_ucred
*tcr
;
2184 if (get_user_u32(len
, optlen
)) {
2185 return -TARGET_EFAULT
;
2188 return -TARGET_EINVAL
;
2192 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2200 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2201 return -TARGET_EFAULT
;
2203 __put_user(cr
.pid
, &tcr
->pid
);
2204 __put_user(cr
.uid
, &tcr
->uid
);
2205 __put_user(cr
.gid
, &tcr
->gid
);
2206 unlock_user_struct(tcr
, optval_addr
, 1);
2207 if (put_user_u32(len
, optlen
)) {
2208 return -TARGET_EFAULT
;
2212 case TARGET_SO_LINGER
:
2216 struct target_linger
*tlg
;
2218 if (get_user_u32(len
, optlen
)) {
2219 return -TARGET_EFAULT
;
2222 return -TARGET_EINVAL
;
2226 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2234 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2235 return -TARGET_EFAULT
;
2237 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2238 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2239 unlock_user_struct(tlg
, optval_addr
, 1);
2240 if (put_user_u32(len
, optlen
)) {
2241 return -TARGET_EFAULT
;
2245 /* Options with 'int' argument. */
2246 case TARGET_SO_DEBUG
:
2249 case TARGET_SO_REUSEADDR
:
2250 optname
= SO_REUSEADDR
;
2253 case TARGET_SO_REUSEPORT
:
2254 optname
= SO_REUSEPORT
;
2257 case TARGET_SO_TYPE
:
2260 case TARGET_SO_ERROR
:
2263 case TARGET_SO_DONTROUTE
:
2264 optname
= SO_DONTROUTE
;
2266 case TARGET_SO_BROADCAST
:
2267 optname
= SO_BROADCAST
;
2269 case TARGET_SO_SNDBUF
:
2270 optname
= SO_SNDBUF
;
2272 case TARGET_SO_RCVBUF
:
2273 optname
= SO_RCVBUF
;
2275 case TARGET_SO_KEEPALIVE
:
2276 optname
= SO_KEEPALIVE
;
2278 case TARGET_SO_OOBINLINE
:
2279 optname
= SO_OOBINLINE
;
2281 case TARGET_SO_NO_CHECK
:
2282 optname
= SO_NO_CHECK
;
2284 case TARGET_SO_PRIORITY
:
2285 optname
= SO_PRIORITY
;
2288 case TARGET_SO_BSDCOMPAT
:
2289 optname
= SO_BSDCOMPAT
;
2292 case TARGET_SO_PASSCRED
:
2293 optname
= SO_PASSCRED
;
2295 case TARGET_SO_TIMESTAMP
:
2296 optname
= SO_TIMESTAMP
;
2298 case TARGET_SO_RCVLOWAT
:
2299 optname
= SO_RCVLOWAT
;
2301 case TARGET_SO_ACCEPTCONN
:
2302 optname
= SO_ACCEPTCONN
;
2309 /* TCP options all take an 'int' value. */
2311 if (get_user_u32(len
, optlen
))
2312 return -TARGET_EFAULT
;
2314 return -TARGET_EINVAL
;
2316 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2319 if (optname
== SO_TYPE
) {
2320 val
= host_to_target_sock_type(val
);
2325 if (put_user_u32(val
, optval_addr
))
2326 return -TARGET_EFAULT
;
2328 if (put_user_u8(val
, optval_addr
))
2329 return -TARGET_EFAULT
;
2331 if (put_user_u32(len
, optlen
))
2332 return -TARGET_EFAULT
;
2339 case IP_ROUTER_ALERT
:
2343 case IP_MTU_DISCOVER
:
2349 case IP_MULTICAST_TTL
:
2350 case IP_MULTICAST_LOOP
:
2351 if (get_user_u32(len
, optlen
))
2352 return -TARGET_EFAULT
;
2354 return -TARGET_EINVAL
;
2356 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2359 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2361 if (put_user_u32(len
, optlen
)
2362 || put_user_u8(val
, optval_addr
))
2363 return -TARGET_EFAULT
;
2365 if (len
> sizeof(int))
2367 if (put_user_u32(len
, optlen
)
2368 || put_user_u32(val
, optval_addr
))
2369 return -TARGET_EFAULT
;
2373 ret
= -TARGET_ENOPROTOOPT
;
2379 case IPV6_MTU_DISCOVER
:
2382 case IPV6_RECVPKTINFO
:
2383 case IPV6_UNICAST_HOPS
:
2384 case IPV6_MULTICAST_HOPS
:
2385 case IPV6_MULTICAST_LOOP
:
2387 case IPV6_RECVHOPLIMIT
:
2388 case IPV6_2292HOPLIMIT
:
2391 case IPV6_2292PKTINFO
:
2392 case IPV6_RECVTCLASS
:
2393 case IPV6_RECVRTHDR
:
2394 case IPV6_2292RTHDR
:
2395 case IPV6_RECVHOPOPTS
:
2396 case IPV6_2292HOPOPTS
:
2397 case IPV6_RECVDSTOPTS
:
2398 case IPV6_2292DSTOPTS
:
2400 #ifdef IPV6_RECVPATHMTU
2401 case IPV6_RECVPATHMTU
:
2403 #ifdef IPV6_TRANSPARENT
2404 case IPV6_TRANSPARENT
:
2406 #ifdef IPV6_FREEBIND
2409 #ifdef IPV6_RECVORIGDSTADDR
2410 case IPV6_RECVORIGDSTADDR
:
2412 if (get_user_u32(len
, optlen
))
2413 return -TARGET_EFAULT
;
2415 return -TARGET_EINVAL
;
2417 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2420 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2422 if (put_user_u32(len
, optlen
)
2423 || put_user_u8(val
, optval_addr
))
2424 return -TARGET_EFAULT
;
2426 if (len
> sizeof(int))
2428 if (put_user_u32(len
, optlen
)
2429 || put_user_u32(val
, optval_addr
))
2430 return -TARGET_EFAULT
;
2434 ret
= -TARGET_ENOPROTOOPT
;
2440 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2442 ret
= -TARGET_EOPNOTSUPP
;
2448 /* Convert target low/high pair representing file offset into the host
2449 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2450 * as the kernel doesn't handle them either.
2452 static void target_to_host_low_high(abi_ulong tlow
,
2454 unsigned long *hlow
,
2455 unsigned long *hhigh
)
2457 uint64_t off
= tlow
|
2458 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2459 TARGET_LONG_BITS
/ 2;
2462 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2465 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2466 abi_ulong count
, int copy
)
2468 struct target_iovec
*target_vec
;
2470 abi_ulong total_len
, max_len
;
2473 bool bad_address
= false;
2479 if (count
> IOV_MAX
) {
2484 vec
= g_try_new0(struct iovec
, count
);
2490 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2491 count
* sizeof(struct target_iovec
), 1);
2492 if (target_vec
== NULL
) {
2497 /* ??? If host page size > target page size, this will result in a
2498 value larger than what we can actually support. */
2499 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2502 for (i
= 0; i
< count
; i
++) {
2503 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2504 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2509 } else if (len
== 0) {
2510 /* Zero length pointer is ignored. */
2511 vec
[i
].iov_base
= 0;
2513 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2514 /* If the first buffer pointer is bad, this is a fault. But
2515 * subsequent bad buffers will result in a partial write; this
2516 * is realized by filling the vector with null pointers and
2518 if (!vec
[i
].iov_base
) {
2529 if (len
> max_len
- total_len
) {
2530 len
= max_len
- total_len
;
2533 vec
[i
].iov_len
= len
;
2537 unlock_user(target_vec
, target_addr
, 0);
2542 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2543 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2546 unlock_user(target_vec
, target_addr
, 0);
2553 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2554 abi_ulong count
, int copy
)
2556 struct target_iovec
*target_vec
;
2559 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2560 count
* sizeof(struct target_iovec
), 1);
2562 for (i
= 0; i
< count
; i
++) {
2563 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2564 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2568 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2570 unlock_user(target_vec
, target_addr
, 0);
2576 static inline int target_to_host_sock_type(int *type
)
2579 int target_type
= *type
;
2581 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2582 case TARGET_SOCK_DGRAM
:
2583 host_type
= SOCK_DGRAM
;
2585 case TARGET_SOCK_STREAM
:
2586 host_type
= SOCK_STREAM
;
2589 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2592 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2593 #if defined(SOCK_CLOEXEC)
2594 host_type
|= SOCK_CLOEXEC
;
2596 return -TARGET_EINVAL
;
2599 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2600 #if defined(SOCK_NONBLOCK)
2601 host_type
|= SOCK_NONBLOCK
;
2602 #elif !defined(O_NONBLOCK)
2603 return -TARGET_EINVAL
;
2610 /* Try to emulate socket type flags after socket creation. */
2611 static int sock_flags_fixup(int fd
, int target_type
)
2613 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2614 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2615 int flags
= fcntl(fd
, F_GETFL
);
2616 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2618 return -TARGET_EINVAL
;
2625 /* do_socket() Must return target values and target errnos. */
2626 static abi_long
do_socket(int domain
, int type
, int protocol
)
2628 int target_type
= type
;
2631 ret
= target_to_host_sock_type(&type
);
2636 if (domain
== PF_NETLINK
&& !(
2637 #ifdef CONFIG_RTNETLINK
2638 protocol
== NETLINK_ROUTE
||
2640 protocol
== NETLINK_KOBJECT_UEVENT
||
2641 protocol
== NETLINK_AUDIT
)) {
2642 return -EPFNOSUPPORT
;
2645 if (domain
== AF_PACKET
||
2646 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2647 protocol
= tswap16(protocol
);
2650 ret
= get_errno(socket(domain
, type
, protocol
));
2652 ret
= sock_flags_fixup(ret
, target_type
);
2653 if (type
== SOCK_PACKET
) {
2654 /* Manage an obsolete case :
2655 * if socket type is SOCK_PACKET, bind by name
2657 fd_trans_register(ret
, &target_packet_trans
);
2658 } else if (domain
== PF_NETLINK
) {
2660 #ifdef CONFIG_RTNETLINK
2662 fd_trans_register(ret
, &target_netlink_route_trans
);
2665 case NETLINK_KOBJECT_UEVENT
:
2666 /* nothing to do: messages are strings */
2669 fd_trans_register(ret
, &target_netlink_audit_trans
);
2672 g_assert_not_reached();
2679 /* do_bind() Must return target values and target errnos. */
2680 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2686 if ((int)addrlen
< 0) {
2687 return -TARGET_EINVAL
;
2690 addr
= alloca(addrlen
+1);
2692 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2696 return get_errno(bind(sockfd
, addr
, addrlen
));
2699 /* do_connect() Must return target values and target errnos. */
2700 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2706 if ((int)addrlen
< 0) {
2707 return -TARGET_EINVAL
;
2710 addr
= alloca(addrlen
+1);
2712 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2716 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
2719 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2720 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2721 int flags
, int send
)
2727 abi_ulong target_vec
;
2729 if (msgp
->msg_name
) {
2730 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2731 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2732 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2733 tswapal(msgp
->msg_name
),
2735 if (ret
== -TARGET_EFAULT
) {
2736 /* For connected sockets msg_name and msg_namelen must
2737 * be ignored, so returning EFAULT immediately is wrong.
2738 * Instead, pass a bad msg_name to the host kernel, and
2739 * let it decide whether to return EFAULT or not.
2741 msg
.msg_name
= (void *)-1;
2746 msg
.msg_name
= NULL
;
2747 msg
.msg_namelen
= 0;
2749 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2750 msg
.msg_control
= alloca(msg
.msg_controllen
);
2751 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
2753 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2755 count
= tswapal(msgp
->msg_iovlen
);
2756 target_vec
= tswapal(msgp
->msg_iov
);
2758 if (count
> IOV_MAX
) {
2759 /* sendrcvmsg returns a different errno for this condition than
2760 * readv/writev, so we must catch it here before lock_iovec() does.
2762 ret
= -TARGET_EMSGSIZE
;
2766 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2767 target_vec
, count
, send
);
2769 ret
= -host_to_target_errno(errno
);
2772 msg
.msg_iovlen
= count
;
2776 if (fd_trans_target_to_host_data(fd
)) {
2779 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
2780 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
2781 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
2782 msg
.msg_iov
->iov_len
);
2784 msg
.msg_iov
->iov_base
= host_msg
;
2785 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2789 ret
= target_to_host_cmsg(&msg
, msgp
);
2791 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2795 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
2796 if (!is_error(ret
)) {
2798 if (fd_trans_host_to_target_data(fd
)) {
2799 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
2800 MIN(msg
.msg_iov
->iov_len
, len
));
2802 ret
= host_to_target_cmsg(msgp
, &msg
);
2804 if (!is_error(ret
)) {
2805 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2806 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
2807 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
2808 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2809 msg
.msg_name
, msg
.msg_namelen
);
2821 unlock_iovec(vec
, target_vec
, count
, !send
);
2826 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2827 int flags
, int send
)
2830 struct target_msghdr
*msgp
;
2832 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2836 return -TARGET_EFAULT
;
2838 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2839 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2843 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2844 * so it might not have this *mmsg-specific flag either.
2846 #ifndef MSG_WAITFORONE
2847 #define MSG_WAITFORONE 0x10000
2850 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2851 unsigned int vlen
, unsigned int flags
,
2854 struct target_mmsghdr
*mmsgp
;
2858 if (vlen
> UIO_MAXIOV
) {
2862 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2864 return -TARGET_EFAULT
;
2867 for (i
= 0; i
< vlen
; i
++) {
2868 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2869 if (is_error(ret
)) {
2872 mmsgp
[i
].msg_len
= tswap32(ret
);
2873 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2874 if (flags
& MSG_WAITFORONE
) {
2875 flags
|= MSG_DONTWAIT
;
2879 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2881 /* Return number of datagrams sent if we sent any at all;
2882 * otherwise return the error.
2890 /* do_accept4() Must return target values and target errnos. */
2891 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2892 abi_ulong target_addrlen_addr
, int flags
)
2894 socklen_t addrlen
, ret_addrlen
;
2899 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
2901 if (target_addr
== 0) {
2902 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
2905 /* linux returns EINVAL if addrlen pointer is invalid */
2906 if (get_user_u32(addrlen
, target_addrlen_addr
))
2907 return -TARGET_EINVAL
;
2909 if ((int)addrlen
< 0) {
2910 return -TARGET_EINVAL
;
2913 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2914 return -TARGET_EINVAL
;
2916 addr
= alloca(addrlen
);
2918 ret_addrlen
= addrlen
;
2919 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
2920 if (!is_error(ret
)) {
2921 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
2922 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
2923 ret
= -TARGET_EFAULT
;
2929 /* do_getpeername() Must return target values and target errnos. */
2930 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2931 abi_ulong target_addrlen_addr
)
2933 socklen_t addrlen
, ret_addrlen
;
2937 if (get_user_u32(addrlen
, target_addrlen_addr
))
2938 return -TARGET_EFAULT
;
2940 if ((int)addrlen
< 0) {
2941 return -TARGET_EINVAL
;
2944 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2945 return -TARGET_EFAULT
;
2947 addr
= alloca(addrlen
);
2949 ret_addrlen
= addrlen
;
2950 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
2951 if (!is_error(ret
)) {
2952 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
2953 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
2954 ret
= -TARGET_EFAULT
;
2960 /* do_getsockname() Must return target values and target errnos. */
2961 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2962 abi_ulong target_addrlen_addr
)
2964 socklen_t addrlen
, ret_addrlen
;
2968 if (get_user_u32(addrlen
, target_addrlen_addr
))
2969 return -TARGET_EFAULT
;
2971 if ((int)addrlen
< 0) {
2972 return -TARGET_EINVAL
;
2975 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2976 return -TARGET_EFAULT
;
2978 addr
= alloca(addrlen
);
2980 ret_addrlen
= addrlen
;
2981 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
2982 if (!is_error(ret
)) {
2983 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
2984 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
2985 ret
= -TARGET_EFAULT
;
2991 /* do_socketpair() Must return target values and target errnos. */
2992 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2993 abi_ulong target_tab_addr
)
2998 target_to_host_sock_type(&type
);
3000 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3001 if (!is_error(ret
)) {
3002 if (put_user_s32(tab
[0], target_tab_addr
)
3003 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3004 ret
= -TARGET_EFAULT
;
3009 /* do_sendto() Must return target values and target errnos. */
3010 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3011 abi_ulong target_addr
, socklen_t addrlen
)
3015 void *copy_msg
= NULL
;
3018 if ((int)addrlen
< 0) {
3019 return -TARGET_EINVAL
;
3022 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3024 return -TARGET_EFAULT
;
3025 if (fd_trans_target_to_host_data(fd
)) {
3026 copy_msg
= host_msg
;
3027 host_msg
= g_malloc(len
);
3028 memcpy(host_msg
, copy_msg
, len
);
3029 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3035 addr
= alloca(addrlen
+1);
3036 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3040 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3042 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3047 host_msg
= copy_msg
;
3049 unlock_user(host_msg
, msg
, 0);
3053 /* do_recvfrom() Must return target values and target errnos. */
3054 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3055 abi_ulong target_addr
,
3056 abi_ulong target_addrlen
)
3058 socklen_t addrlen
, ret_addrlen
;
3063 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3065 return -TARGET_EFAULT
;
3067 if (get_user_u32(addrlen
, target_addrlen
)) {
3068 ret
= -TARGET_EFAULT
;
3071 if ((int)addrlen
< 0) {
3072 ret
= -TARGET_EINVAL
;
3075 addr
= alloca(addrlen
);
3076 ret_addrlen
= addrlen
;
3077 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3078 addr
, &ret_addrlen
));
3080 addr
= NULL
; /* To keep compiler quiet. */
3081 addrlen
= 0; /* To keep compiler quiet. */
3082 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3084 if (!is_error(ret
)) {
3085 if (fd_trans_host_to_target_data(fd
)) {
3087 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3088 if (is_error(trans
)) {
3094 host_to_target_sockaddr(target_addr
, addr
,
3095 MIN(addrlen
, ret_addrlen
));
3096 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3097 ret
= -TARGET_EFAULT
;
3101 unlock_user(host_msg
, msg
, len
);
3104 unlock_user(host_msg
, msg
, 0);
3109 #ifdef TARGET_NR_socketcall
3110 /* do_socketcall() must return target values and target errnos. */
3111 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3113 static const unsigned nargs
[] = { /* number of arguments per operation */
3114 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3115 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3116 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3117 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3118 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3119 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3120 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3121 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3122 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3123 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3124 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3125 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3126 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3127 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3128 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3129 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3130 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3131 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3132 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3133 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3135 abi_long a
[6]; /* max 6 args */
3138 /* check the range of the first argument num */
3139 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3140 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3141 return -TARGET_EINVAL
;
3143 /* ensure we have space for args */
3144 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3145 return -TARGET_EINVAL
;
3147 /* collect the arguments in a[] according to nargs[] */
3148 for (i
= 0; i
< nargs
[num
]; ++i
) {
3149 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3150 return -TARGET_EFAULT
;
3153 /* now when we have the args, invoke the appropriate underlying function */
3155 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3156 return do_socket(a
[0], a
[1], a
[2]);
3157 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3158 return do_bind(a
[0], a
[1], a
[2]);
3159 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3160 return do_connect(a
[0], a
[1], a
[2]);
3161 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3162 return get_errno(listen(a
[0], a
[1]));
3163 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3164 return do_accept4(a
[0], a
[1], a
[2], 0);
3165 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3166 return do_getsockname(a
[0], a
[1], a
[2]);
3167 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3168 return do_getpeername(a
[0], a
[1], a
[2]);
3169 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3170 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3171 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3172 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3173 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3174 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3175 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3176 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3177 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3178 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3179 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3180 return get_errno(shutdown(a
[0], a
[1]));
3181 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3182 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3183 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3184 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3185 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3186 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3187 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3188 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3189 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3190 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3191 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3192 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3193 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3194 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3196 gemu_log("Unsupported socketcall: %d\n", num
);
3197 return -TARGET_EINVAL
;
3202 #define N_SHM_REGIONS 32
3204 static struct shm_region
{
3208 } shm_regions
[N_SHM_REGIONS
];
3210 #ifndef TARGET_SEMID64_DS
3211 /* asm-generic version of this struct */
3212 struct target_semid64_ds
3214 struct target_ipc_perm sem_perm
;
3215 abi_ulong sem_otime
;
3216 #if TARGET_ABI_BITS == 32
3217 abi_ulong __unused1
;
3219 abi_ulong sem_ctime
;
3220 #if TARGET_ABI_BITS == 32
3221 abi_ulong __unused2
;
3223 abi_ulong sem_nsems
;
3224 abi_ulong __unused3
;
3225 abi_ulong __unused4
;
3229 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3230 abi_ulong target_addr
)
3232 struct target_ipc_perm
*target_ip
;
3233 struct target_semid64_ds
*target_sd
;
3235 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3236 return -TARGET_EFAULT
;
3237 target_ip
= &(target_sd
->sem_perm
);
3238 host_ip
->__key
= tswap32(target_ip
->__key
);
3239 host_ip
->uid
= tswap32(target_ip
->uid
);
3240 host_ip
->gid
= tswap32(target_ip
->gid
);
3241 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3242 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3243 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3244 host_ip
->mode
= tswap32(target_ip
->mode
);
3246 host_ip
->mode
= tswap16(target_ip
->mode
);
3248 #if defined(TARGET_PPC)
3249 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3251 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3253 unlock_user_struct(target_sd
, target_addr
, 0);
3257 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3258 struct ipc_perm
*host_ip
)
3260 struct target_ipc_perm
*target_ip
;
3261 struct target_semid64_ds
*target_sd
;
3263 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3264 return -TARGET_EFAULT
;
3265 target_ip
= &(target_sd
->sem_perm
);
3266 target_ip
->__key
= tswap32(host_ip
->__key
);
3267 target_ip
->uid
= tswap32(host_ip
->uid
);
3268 target_ip
->gid
= tswap32(host_ip
->gid
);
3269 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3270 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3271 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3272 target_ip
->mode
= tswap32(host_ip
->mode
);
3274 target_ip
->mode
= tswap16(host_ip
->mode
);
3276 #if defined(TARGET_PPC)
3277 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3279 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3281 unlock_user_struct(target_sd
, target_addr
, 1);
3285 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3286 abi_ulong target_addr
)
3288 struct target_semid64_ds
*target_sd
;
3290 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3291 return -TARGET_EFAULT
;
3292 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3293 return -TARGET_EFAULT
;
3294 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3295 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3296 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3297 unlock_user_struct(target_sd
, target_addr
, 0);
3301 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3302 struct semid_ds
*host_sd
)
3304 struct target_semid64_ds
*target_sd
;
3306 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3307 return -TARGET_EFAULT
;
3308 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3309 return -TARGET_EFAULT
;
3310 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3311 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3312 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3313 unlock_user_struct(target_sd
, target_addr
, 1);
3317 struct target_seminfo
{
3330 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3331 struct seminfo
*host_seminfo
)
3333 struct target_seminfo
*target_seminfo
;
3334 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3335 return -TARGET_EFAULT
;
3336 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3337 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3338 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3339 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3340 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3341 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3342 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3343 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3344 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3345 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3346 unlock_user_struct(target_seminfo
, target_addr
, 1);
3352 struct semid_ds
*buf
;
3353 unsigned short *array
;
3354 struct seminfo
*__buf
;
3357 union target_semun
{
3364 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3365 abi_ulong target_addr
)
3368 unsigned short *array
;
3370 struct semid_ds semid_ds
;
3373 semun
.buf
= &semid_ds
;
3375 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3377 return get_errno(ret
);
3379 nsems
= semid_ds
.sem_nsems
;
3381 *host_array
= g_try_new(unsigned short, nsems
);
3383 return -TARGET_ENOMEM
;
3385 array
= lock_user(VERIFY_READ
, target_addr
,
3386 nsems
*sizeof(unsigned short), 1);
3388 g_free(*host_array
);
3389 return -TARGET_EFAULT
;
3392 for(i
=0; i
<nsems
; i
++) {
3393 __get_user((*host_array
)[i
], &array
[i
]);
3395 unlock_user(array
, target_addr
, 0);
3400 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3401 unsigned short **host_array
)
3404 unsigned short *array
;
3406 struct semid_ds semid_ds
;
3409 semun
.buf
= &semid_ds
;
3411 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3413 return get_errno(ret
);
3415 nsems
= semid_ds
.sem_nsems
;
3417 array
= lock_user(VERIFY_WRITE
, target_addr
,
3418 nsems
*sizeof(unsigned short), 0);
3420 return -TARGET_EFAULT
;
3422 for(i
=0; i
<nsems
; i
++) {
3423 __put_user((*host_array
)[i
], &array
[i
]);
3425 g_free(*host_array
);
3426 unlock_user(array
, target_addr
, 1);
3431 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3432 abi_ulong target_arg
)
3434 union target_semun target_su
= { .buf
= target_arg
};
3436 struct semid_ds dsarg
;
3437 unsigned short *array
= NULL
;
3438 struct seminfo seminfo
;
3439 abi_long ret
= -TARGET_EINVAL
;
3446 /* In 64 bit cross-endian situations, we will erroneously pick up
3447 * the wrong half of the union for the "val" element. To rectify
3448 * this, the entire 8-byte structure is byteswapped, followed by
3449 * a swap of the 4 byte val field. In other cases, the data is
3450 * already in proper host byte order. */
3451 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3452 target_su
.buf
= tswapal(target_su
.buf
);
3453 arg
.val
= tswap32(target_su
.val
);
3455 arg
.val
= target_su
.val
;
3457 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3461 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3465 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3466 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3473 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3477 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3478 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3484 arg
.__buf
= &seminfo
;
3485 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3486 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3494 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3501 struct target_sembuf
{
3502 unsigned short sem_num
;
3507 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3508 abi_ulong target_addr
,
3511 struct target_sembuf
*target_sembuf
;
3514 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3515 nsops
*sizeof(struct target_sembuf
), 1);
3517 return -TARGET_EFAULT
;
3519 for(i
=0; i
<nsops
; i
++) {
3520 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3521 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3522 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3525 unlock_user(target_sembuf
, target_addr
, 0);
3530 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3532 struct sembuf sops
[nsops
];
3534 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3535 return -TARGET_EFAULT
;
3537 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
3540 struct target_msqid_ds
3542 struct target_ipc_perm msg_perm
;
3543 abi_ulong msg_stime
;
3544 #if TARGET_ABI_BITS == 32
3545 abi_ulong __unused1
;
3547 abi_ulong msg_rtime
;
3548 #if TARGET_ABI_BITS == 32
3549 abi_ulong __unused2
;
3551 abi_ulong msg_ctime
;
3552 #if TARGET_ABI_BITS == 32
3553 abi_ulong __unused3
;
3555 abi_ulong __msg_cbytes
;
3557 abi_ulong msg_qbytes
;
3558 abi_ulong msg_lspid
;
3559 abi_ulong msg_lrpid
;
3560 abi_ulong __unused4
;
3561 abi_ulong __unused5
;
3564 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3565 abi_ulong target_addr
)
3567 struct target_msqid_ds
*target_md
;
3569 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3570 return -TARGET_EFAULT
;
3571 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3572 return -TARGET_EFAULT
;
3573 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3574 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3575 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3576 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3577 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3578 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3579 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3580 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3581 unlock_user_struct(target_md
, target_addr
, 0);
3585 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3586 struct msqid_ds
*host_md
)
3588 struct target_msqid_ds
*target_md
;
3590 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3591 return -TARGET_EFAULT
;
3592 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3593 return -TARGET_EFAULT
;
3594 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3595 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3596 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3597 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3598 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3599 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3600 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3601 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3602 unlock_user_struct(target_md
, target_addr
, 1);
3606 struct target_msginfo
{
3614 unsigned short int msgseg
;
3617 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3618 struct msginfo
*host_msginfo
)
3620 struct target_msginfo
*target_msginfo
;
3621 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3622 return -TARGET_EFAULT
;
3623 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3624 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3625 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3626 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3627 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3628 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3629 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3630 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3631 unlock_user_struct(target_msginfo
, target_addr
, 1);
3635 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3637 struct msqid_ds dsarg
;
3638 struct msginfo msginfo
;
3639 abi_long ret
= -TARGET_EINVAL
;
3647 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3648 return -TARGET_EFAULT
;
3649 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3650 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3651 return -TARGET_EFAULT
;
3654 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3658 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3659 if (host_to_target_msginfo(ptr
, &msginfo
))
3660 return -TARGET_EFAULT
;
3667 struct target_msgbuf
{
3672 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3673 ssize_t msgsz
, int msgflg
)
3675 struct target_msgbuf
*target_mb
;
3676 struct msgbuf
*host_mb
;
3680 return -TARGET_EINVAL
;
3683 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3684 return -TARGET_EFAULT
;
3685 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3687 unlock_user_struct(target_mb
, msgp
, 0);
3688 return -TARGET_ENOMEM
;
3690 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3691 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3692 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3694 unlock_user_struct(target_mb
, msgp
, 0);
3699 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3700 ssize_t msgsz
, abi_long msgtyp
,
3703 struct target_msgbuf
*target_mb
;
3705 struct msgbuf
*host_mb
;
3709 return -TARGET_EINVAL
;
3712 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3713 return -TARGET_EFAULT
;
3715 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3717 ret
= -TARGET_ENOMEM
;
3720 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3723 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3724 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3725 if (!target_mtext
) {
3726 ret
= -TARGET_EFAULT
;
3729 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3730 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3733 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3737 unlock_user_struct(target_mb
, msgp
, 1);
3742 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3743 abi_ulong target_addr
)
3745 struct target_shmid_ds
*target_sd
;
3747 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3748 return -TARGET_EFAULT
;
3749 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3750 return -TARGET_EFAULT
;
3751 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3752 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3753 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3754 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3755 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3756 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3757 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3758 unlock_user_struct(target_sd
, target_addr
, 0);
3762 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3763 struct shmid_ds
*host_sd
)
3765 struct target_shmid_ds
*target_sd
;
3767 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3768 return -TARGET_EFAULT
;
3769 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3770 return -TARGET_EFAULT
;
3771 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3772 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3773 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3774 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3775 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3776 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3777 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3778 unlock_user_struct(target_sd
, target_addr
, 1);
3782 struct target_shminfo
{
3790 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3791 struct shminfo
*host_shminfo
)
3793 struct target_shminfo
*target_shminfo
;
3794 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3795 return -TARGET_EFAULT
;
3796 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3797 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3798 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3799 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3800 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3801 unlock_user_struct(target_shminfo
, target_addr
, 1);
3805 struct target_shm_info
{
3810 abi_ulong swap_attempts
;
3811 abi_ulong swap_successes
;
3814 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3815 struct shm_info
*host_shm_info
)
3817 struct target_shm_info
*target_shm_info
;
3818 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3819 return -TARGET_EFAULT
;
3820 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3821 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3822 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3823 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3824 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3825 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3826 unlock_user_struct(target_shm_info
, target_addr
, 1);
3830 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3832 struct shmid_ds dsarg
;
3833 struct shminfo shminfo
;
3834 struct shm_info shm_info
;
3835 abi_long ret
= -TARGET_EINVAL
;
3843 if (target_to_host_shmid_ds(&dsarg
, buf
))
3844 return -TARGET_EFAULT
;
3845 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3846 if (host_to_target_shmid_ds(buf
, &dsarg
))
3847 return -TARGET_EFAULT
;
3850 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3851 if (host_to_target_shminfo(buf
, &shminfo
))
3852 return -TARGET_EFAULT
;
3855 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3856 if (host_to_target_shm_info(buf
, &shm_info
))
3857 return -TARGET_EFAULT
;
3862 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3869 #ifndef TARGET_FORCE_SHMLBA
3870 /* For most architectures, SHMLBA is the same as the page size;
3871 * some architectures have larger values, in which case they should
3872 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3873 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3874 * and defining its own value for SHMLBA.
3876 * The kernel also permits SHMLBA to be set by the architecture to a
3877 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3878 * this means that addresses are rounded to the large size if
3879 * SHM_RND is set but addresses not aligned to that size are not rejected
3880 * as long as they are at least page-aligned. Since the only architecture
3881 * which uses this is ia64 this code doesn't provide for that oddity.
3883 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
3885 return TARGET_PAGE_SIZE
;
3889 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
3890 int shmid
, abi_ulong shmaddr
, int shmflg
)
3894 struct shmid_ds shm_info
;
3898 /* find out the length of the shared memory segment */
3899 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3900 if (is_error(ret
)) {
3901 /* can't get length, bail out */
3905 shmlba
= target_shmlba(cpu_env
);
3907 if (shmaddr
& (shmlba
- 1)) {
3908 if (shmflg
& SHM_RND
) {
3909 shmaddr
&= ~(shmlba
- 1);
3911 return -TARGET_EINVAL
;
3914 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
3915 return -TARGET_EINVAL
;
3921 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3923 abi_ulong mmap_start
;
3925 /* In order to use the host shmat, we need to honor host SHMLBA. */
3926 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
3928 if (mmap_start
== -1) {
3930 host_raddr
= (void *)-1;
3932 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3935 if (host_raddr
== (void *)-1) {
3937 return get_errno((long)host_raddr
);
3939 raddr
=h2g((unsigned long)host_raddr
);
3941 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3942 PAGE_VALID
| PAGE_READ
|
3943 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3945 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3946 if (!shm_regions
[i
].in_use
) {
3947 shm_regions
[i
].in_use
= true;
3948 shm_regions
[i
].start
= raddr
;
3949 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3959 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3966 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3967 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
3968 shm_regions
[i
].in_use
= false;
3969 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3973 rv
= get_errno(shmdt(g2h(shmaddr
)));
3980 #ifdef TARGET_NR_ipc
3981 /* ??? This only works with linear mappings. */
3982 /* do_ipc() must return target values and target errnos. */
3983 static abi_long
do_ipc(CPUArchState
*cpu_env
,
3984 unsigned int call
, abi_long first
,
3985 abi_long second
, abi_long third
,
3986 abi_long ptr
, abi_long fifth
)
3991 version
= call
>> 16;
3996 ret
= do_semop(first
, ptr
, second
);
4000 ret
= get_errno(semget(first
, second
, third
));
4003 case IPCOP_semctl
: {
4004 /* The semun argument to semctl is passed by value, so dereference the
4007 get_user_ual(atptr
, ptr
);
4008 ret
= do_semctl(first
, second
, third
, atptr
);
4013 ret
= get_errno(msgget(first
, second
));
4017 ret
= do_msgsnd(first
, ptr
, second
, third
);
4021 ret
= do_msgctl(first
, second
, ptr
);
4028 struct target_ipc_kludge
{
4033 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4034 ret
= -TARGET_EFAULT
;
4038 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4040 unlock_user_struct(tmp
, ptr
, 0);
4044 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4053 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4054 if (is_error(raddr
))
4055 return get_errno(raddr
);
4056 if (put_user_ual(raddr
, third
))
4057 return -TARGET_EFAULT
;
4061 ret
= -TARGET_EINVAL
;
4066 ret
= do_shmdt(ptr
);
4070 /* IPC_* flag values are the same on all linux platforms */
4071 ret
= get_errno(shmget(first
, second
, third
));
4074 /* IPC_* and SHM_* command values are the same on all linux platforms */
4076 ret
= do_shmctl(first
, second
, ptr
);
4079 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4080 ret
= -TARGET_ENOSYS
;
4087 /* kernel structure types definitions */
4089 #define STRUCT(name, ...) STRUCT_ ## name,
4090 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4092 #include "syscall_types.h"
4096 #undef STRUCT_SPECIAL
4098 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4099 #define STRUCT_SPECIAL(name)
4100 #include "syscall_types.h"
4102 #undef STRUCT_SPECIAL
4104 typedef struct IOCTLEntry IOCTLEntry
;
4106 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4107 int fd
, int cmd
, abi_long arg
);
4111 unsigned int host_cmd
;
4114 do_ioctl_fn
*do_ioctl
;
4115 const argtype arg_type
[5];
4118 #define IOC_R 0x0001
4119 #define IOC_W 0x0002
4120 #define IOC_RW (IOC_R | IOC_W)
4122 #define MAX_STRUCT_SIZE 4096
4124 #ifdef CONFIG_FIEMAP
4125 /* So fiemap access checks don't overflow on 32 bit systems.
4126 * This is very slightly smaller than the limit imposed by
4127 * the underlying kernel.
4129 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4130 / sizeof(struct fiemap_extent))
4132 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4133 int fd
, int cmd
, abi_long arg
)
4135 /* The parameter for this ioctl is a struct fiemap followed
4136 * by an array of struct fiemap_extent whose size is set
4137 * in fiemap->fm_extent_count. The array is filled in by the
4140 int target_size_in
, target_size_out
;
4142 const argtype
*arg_type
= ie
->arg_type
;
4143 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4146 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4150 assert(arg_type
[0] == TYPE_PTR
);
4151 assert(ie
->access
== IOC_RW
);
4153 target_size_in
= thunk_type_size(arg_type
, 0);
4154 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4156 return -TARGET_EFAULT
;
4158 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4159 unlock_user(argptr
, arg
, 0);
4160 fm
= (struct fiemap
*)buf_temp
;
4161 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4162 return -TARGET_EINVAL
;
4165 outbufsz
= sizeof (*fm
) +
4166 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4168 if (outbufsz
> MAX_STRUCT_SIZE
) {
4169 /* We can't fit all the extents into the fixed size buffer.
4170 * Allocate one that is large enough and use it instead.
4172 fm
= g_try_malloc(outbufsz
);
4174 return -TARGET_ENOMEM
;
4176 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4179 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4180 if (!is_error(ret
)) {
4181 target_size_out
= target_size_in
;
4182 /* An extent_count of 0 means we were only counting the extents
4183 * so there are no structs to copy
4185 if (fm
->fm_extent_count
!= 0) {
4186 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4188 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4190 ret
= -TARGET_EFAULT
;
4192 /* Convert the struct fiemap */
4193 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4194 if (fm
->fm_extent_count
!= 0) {
4195 p
= argptr
+ target_size_in
;
4196 /* ...and then all the struct fiemap_extents */
4197 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4198 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4203 unlock_user(argptr
, arg
, target_size_out
);
4213 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4214 int fd
, int cmd
, abi_long arg
)
4216 const argtype
*arg_type
= ie
->arg_type
;
4220 struct ifconf
*host_ifconf
;
4222 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4223 int target_ifreq_size
;
4228 abi_long target_ifc_buf
;
4232 assert(arg_type
[0] == TYPE_PTR
);
4233 assert(ie
->access
== IOC_RW
);
4236 target_size
= thunk_type_size(arg_type
, 0);
4238 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4240 return -TARGET_EFAULT
;
4241 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4242 unlock_user(argptr
, arg
, 0);
4244 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4245 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4246 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4248 if (target_ifc_buf
!= 0) {
4249 target_ifc_len
= host_ifconf
->ifc_len
;
4250 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4251 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4253 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4254 if (outbufsz
> MAX_STRUCT_SIZE
) {
4256 * We can't fit all the extents into the fixed size buffer.
4257 * Allocate one that is large enough and use it instead.
4259 host_ifconf
= malloc(outbufsz
);
4261 return -TARGET_ENOMEM
;
4263 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4266 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4268 host_ifconf
->ifc_len
= host_ifc_len
;
4270 host_ifc_buf
= NULL
;
4272 host_ifconf
->ifc_buf
= host_ifc_buf
;
4274 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4275 if (!is_error(ret
)) {
4276 /* convert host ifc_len to target ifc_len */
4278 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4279 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4280 host_ifconf
->ifc_len
= target_ifc_len
;
4282 /* restore target ifc_buf */
4284 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4286 /* copy struct ifconf to target user */
4288 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4290 return -TARGET_EFAULT
;
4291 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4292 unlock_user(argptr
, arg
, target_size
);
4294 if (target_ifc_buf
!= 0) {
4295 /* copy ifreq[] to target user */
4296 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4297 for (i
= 0; i
< nb_ifreq
; i
++) {
4298 thunk_convert(argptr
+ i
* target_ifreq_size
,
4299 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4300 ifreq_arg_type
, THUNK_TARGET
);
4302 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4313 #if defined(CONFIG_USBFS)
4314 #if HOST_LONG_BITS > 64
4315 #error USBDEVFS thunks do not support >64 bit hosts yet.
4318 uint64_t target_urb_adr
;
4319 uint64_t target_buf_adr
;
4320 char *target_buf_ptr
;
4321 struct usbdevfs_urb host_urb
;
4324 static GHashTable
*usbdevfs_urb_hashtable(void)
4326 static GHashTable
*urb_hashtable
;
4328 if (!urb_hashtable
) {
4329 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4331 return urb_hashtable
;
4334 static void urb_hashtable_insert(struct live_urb
*urb
)
4336 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4337 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4340 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4342 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4343 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4346 static void urb_hashtable_remove(struct live_urb
*urb
)
4348 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4349 g_hash_table_remove(urb_hashtable
, urb
);
4353 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4354 int fd
, int cmd
, abi_long arg
)
4356 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4357 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4358 struct live_urb
*lurb
;
4362 uintptr_t target_urb_adr
;
4365 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4367 memset(buf_temp
, 0, sizeof(uint64_t));
4368 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4369 if (is_error(ret
)) {
4373 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4374 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4375 if (!lurb
->target_urb_adr
) {
4376 return -TARGET_EFAULT
;
4378 urb_hashtable_remove(lurb
);
4379 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4380 lurb
->host_urb
.buffer_length
);
4381 lurb
->target_buf_ptr
= NULL
;
4383 /* restore the guest buffer pointer */
4384 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4386 /* update the guest urb struct */
4387 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4390 return -TARGET_EFAULT
;
4392 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4393 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4395 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4396 /* write back the urb handle */
4397 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4400 return -TARGET_EFAULT
;
4403 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4404 target_urb_adr
= lurb
->target_urb_adr
;
4405 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
4406 unlock_user(argptr
, arg
, target_size
);
4413 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
4414 uint8_t *buf_temp
__attribute__((unused
)),
4415 int fd
, int cmd
, abi_long arg
)
4417 struct live_urb
*lurb
;
4419 /* map target address back to host URB with metadata. */
4420 lurb
= urb_hashtable_lookup(arg
);
4422 return -TARGET_EFAULT
;
4424 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4428 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4429 int fd
, int cmd
, abi_long arg
)
4431 const argtype
*arg_type
= ie
->arg_type
;
4436 struct live_urb
*lurb
;
4439 * each submitted URB needs to map to a unique ID for the
4440 * kernel, and that unique ID needs to be a pointer to
4441 * host memory. hence, we need to malloc for each URB.
4442 * isochronous transfers have a variable length struct.
4445 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
4447 /* construct host copy of urb and metadata */
4448 lurb
= g_try_malloc0(sizeof(struct live_urb
));
4450 return -TARGET_ENOMEM
;
4453 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4456 return -TARGET_EFAULT
;
4458 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
4459 unlock_user(argptr
, arg
, 0);
4461 lurb
->target_urb_adr
= arg
;
4462 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
4464 /* buffer space used depends on endpoint type so lock the entire buffer */
4465 /* control type urbs should check the buffer contents for true direction */
4466 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
4467 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
4468 lurb
->host_urb
.buffer_length
, 1);
4469 if (lurb
->target_buf_ptr
== NULL
) {
4471 return -TARGET_EFAULT
;
4474 /* update buffer pointer in host copy */
4475 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
4477 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4478 if (is_error(ret
)) {
4479 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
4482 urb_hashtable_insert(lurb
);
4487 #endif /* CONFIG_USBFS */
4489 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4490 int cmd
, abi_long arg
)
4493 struct dm_ioctl
*host_dm
;
4494 abi_long guest_data
;
4495 uint32_t guest_data_size
;
4497 const argtype
*arg_type
= ie
->arg_type
;
4499 void *big_buf
= NULL
;
4503 target_size
= thunk_type_size(arg_type
, 0);
4504 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4506 ret
= -TARGET_EFAULT
;
4509 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4510 unlock_user(argptr
, arg
, 0);
4512 /* buf_temp is too small, so fetch things into a bigger buffer */
4513 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4514 memcpy(big_buf
, buf_temp
, target_size
);
4518 guest_data
= arg
+ host_dm
->data_start
;
4519 if ((guest_data
- arg
) < 0) {
4520 ret
= -TARGET_EINVAL
;
4523 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4524 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4526 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4528 ret
= -TARGET_EFAULT
;
4532 switch (ie
->host_cmd
) {
4534 case DM_LIST_DEVICES
:
4537 case DM_DEV_SUSPEND
:
4540 case DM_TABLE_STATUS
:
4541 case DM_TABLE_CLEAR
:
4543 case DM_LIST_VERSIONS
:
4547 case DM_DEV_SET_GEOMETRY
:
4548 /* data contains only strings */
4549 memcpy(host_data
, argptr
, guest_data_size
);
4552 memcpy(host_data
, argptr
, guest_data_size
);
4553 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4557 void *gspec
= argptr
;
4558 void *cur_data
= host_data
;
4559 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4560 int spec_size
= thunk_type_size(arg_type
, 0);
4563 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4564 struct dm_target_spec
*spec
= cur_data
;
4568 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4569 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4571 spec
->next
= sizeof(*spec
) + slen
;
4572 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4574 cur_data
+= spec
->next
;
4579 ret
= -TARGET_EINVAL
;
4580 unlock_user(argptr
, guest_data
, 0);
4583 unlock_user(argptr
, guest_data
, 0);
4585 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4586 if (!is_error(ret
)) {
4587 guest_data
= arg
+ host_dm
->data_start
;
4588 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4589 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4590 switch (ie
->host_cmd
) {
4595 case DM_DEV_SUSPEND
:
4598 case DM_TABLE_CLEAR
:
4600 case DM_DEV_SET_GEOMETRY
:
4601 /* no return data */
4603 case DM_LIST_DEVICES
:
4605 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4606 uint32_t remaining_data
= guest_data_size
;
4607 void *cur_data
= argptr
;
4608 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4609 int nl_size
= 12; /* can't use thunk_size due to alignment */
4612 uint32_t next
= nl
->next
;
4614 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4616 if (remaining_data
< nl
->next
) {
4617 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4620 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4621 strcpy(cur_data
+ nl_size
, nl
->name
);
4622 cur_data
+= nl
->next
;
4623 remaining_data
-= nl
->next
;
4627 nl
= (void*)nl
+ next
;
4632 case DM_TABLE_STATUS
:
4634 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4635 void *cur_data
= argptr
;
4636 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4637 int spec_size
= thunk_type_size(arg_type
, 0);
4640 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4641 uint32_t next
= spec
->next
;
4642 int slen
= strlen((char*)&spec
[1]) + 1;
4643 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4644 if (guest_data_size
< spec
->next
) {
4645 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4648 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4649 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4650 cur_data
= argptr
+ spec
->next
;
4651 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4657 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4658 int count
= *(uint32_t*)hdata
;
4659 uint64_t *hdev
= hdata
+ 8;
4660 uint64_t *gdev
= argptr
+ 8;
4663 *(uint32_t*)argptr
= tswap32(count
);
4664 for (i
= 0; i
< count
; i
++) {
4665 *gdev
= tswap64(*hdev
);
4671 case DM_LIST_VERSIONS
:
4673 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4674 uint32_t remaining_data
= guest_data_size
;
4675 void *cur_data
= argptr
;
4676 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4677 int vers_size
= thunk_type_size(arg_type
, 0);
4680 uint32_t next
= vers
->next
;
4682 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
4684 if (remaining_data
< vers
->next
) {
4685 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4688 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
4689 strcpy(cur_data
+ vers_size
, vers
->name
);
4690 cur_data
+= vers
->next
;
4691 remaining_data
-= vers
->next
;
4695 vers
= (void*)vers
+ next
;
4700 unlock_user(argptr
, guest_data
, 0);
4701 ret
= -TARGET_EINVAL
;
4704 unlock_user(argptr
, guest_data
, guest_data_size
);
4706 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4708 ret
= -TARGET_EFAULT
;
4711 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4712 unlock_user(argptr
, arg
, target_size
);
4719 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4720 int cmd
, abi_long arg
)
4724 const argtype
*arg_type
= ie
->arg_type
;
4725 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
4728 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
4729 struct blkpg_partition host_part
;
4731 /* Read and convert blkpg */
4733 target_size
= thunk_type_size(arg_type
, 0);
4734 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4736 ret
= -TARGET_EFAULT
;
4739 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4740 unlock_user(argptr
, arg
, 0);
4742 switch (host_blkpg
->op
) {
4743 case BLKPG_ADD_PARTITION
:
4744 case BLKPG_DEL_PARTITION
:
4745 /* payload is struct blkpg_partition */
4748 /* Unknown opcode */
4749 ret
= -TARGET_EINVAL
;
4753 /* Read and convert blkpg->data */
4754 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
4755 target_size
= thunk_type_size(part_arg_type
, 0);
4756 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4758 ret
= -TARGET_EFAULT
;
4761 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
4762 unlock_user(argptr
, arg
, 0);
4764 /* Swizzle the data pointer to our local copy and call! */
4765 host_blkpg
->data
= &host_part
;
4766 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
4772 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4773 int fd
, int cmd
, abi_long arg
)
4775 const argtype
*arg_type
= ie
->arg_type
;
4776 const StructEntry
*se
;
4777 const argtype
*field_types
;
4778 const int *dst_offsets
, *src_offsets
;
4781 abi_ulong
*target_rt_dev_ptr
= NULL
;
4782 unsigned long *host_rt_dev_ptr
= NULL
;
4786 assert(ie
->access
== IOC_W
);
4787 assert(*arg_type
== TYPE_PTR
);
4789 assert(*arg_type
== TYPE_STRUCT
);
4790 target_size
= thunk_type_size(arg_type
, 0);
4791 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4793 return -TARGET_EFAULT
;
4796 assert(*arg_type
== (int)STRUCT_rtentry
);
4797 se
= struct_entries
+ *arg_type
++;
4798 assert(se
->convert
[0] == NULL
);
4799 /* convert struct here to be able to catch rt_dev string */
4800 field_types
= se
->field_types
;
4801 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
4802 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
4803 for (i
= 0; i
< se
->nb_fields
; i
++) {
4804 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
4805 assert(*field_types
== TYPE_PTRVOID
);
4806 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
4807 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
4808 if (*target_rt_dev_ptr
!= 0) {
4809 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
4810 tswapal(*target_rt_dev_ptr
));
4811 if (!*host_rt_dev_ptr
) {
4812 unlock_user(argptr
, arg
, 0);
4813 return -TARGET_EFAULT
;
4816 *host_rt_dev_ptr
= 0;
4821 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
4822 argptr
+ src_offsets
[i
],
4823 field_types
, THUNK_HOST
);
4825 unlock_user(argptr
, arg
, 0);
4827 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4829 assert(host_rt_dev_ptr
!= NULL
);
4830 assert(target_rt_dev_ptr
!= NULL
);
4831 if (*host_rt_dev_ptr
!= 0) {
4832 unlock_user((void *)*host_rt_dev_ptr
,
4833 *target_rt_dev_ptr
, 0);
4838 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4839 int fd
, int cmd
, abi_long arg
)
4841 int sig
= target_to_host_signal(arg
);
4842 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
4846 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4847 int fd
, int cmd
, abi_long arg
)
4849 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
4850 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
4854 static IOCTLEntry ioctl_entries
[] = {
4855 #define IOCTL(cmd, access, ...) \
4856 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4857 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4858 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4859 #define IOCTL_IGNORE(cmd) \
4860 { TARGET_ ## cmd, 0, #cmd },
4865 /* ??? Implement proper locking for ioctls. */
4866 /* do_ioctl() Must return target values and target errnos. */
4867 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
4869 const IOCTLEntry
*ie
;
4870 const argtype
*arg_type
;
4872 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
4878 if (ie
->target_cmd
== 0) {
4879 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
4880 return -TARGET_ENOSYS
;
4882 if (ie
->target_cmd
== cmd
)
4886 arg_type
= ie
->arg_type
;
4888 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
4889 } else if (!ie
->host_cmd
) {
4890 /* Some architectures define BSD ioctls in their headers
4891 that are not implemented in Linux. */
4892 return -TARGET_ENOSYS
;
4895 switch(arg_type
[0]) {
4898 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
4902 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
4906 target_size
= thunk_type_size(arg_type
, 0);
4907 switch(ie
->access
) {
4909 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4910 if (!is_error(ret
)) {
4911 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4913 return -TARGET_EFAULT
;
4914 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4915 unlock_user(argptr
, arg
, target_size
);
4919 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4921 return -TARGET_EFAULT
;
4922 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4923 unlock_user(argptr
, arg
, 0);
4924 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4928 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4930 return -TARGET_EFAULT
;
4931 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4932 unlock_user(argptr
, arg
, 0);
4933 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4934 if (!is_error(ret
)) {
4935 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4937 return -TARGET_EFAULT
;
4938 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4939 unlock_user(argptr
, arg
, target_size
);
4945 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4946 (long)cmd
, arg_type
[0]);
4947 ret
= -TARGET_ENOSYS
;
4953 static const bitmask_transtbl iflag_tbl
[] = {
4954 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
4955 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
4956 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
4957 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
4958 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
4959 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
4960 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
4961 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
4962 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
4963 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
4964 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
4965 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
4966 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
4967 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
4971 static const bitmask_transtbl oflag_tbl
[] = {
4972 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
4973 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
4974 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
4975 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
4976 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
4977 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
4978 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
4979 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
4980 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
4981 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
4982 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
4983 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
4984 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
4985 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
4986 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
4987 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
4988 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
4989 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
4990 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
4991 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
4992 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
4993 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
4994 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
4995 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
4999 static const bitmask_transtbl cflag_tbl
[] = {
5000 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5001 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5002 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5003 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5004 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5005 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5006 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5007 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5008 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5009 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5010 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5011 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5012 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5013 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5014 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5015 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5016 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5017 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5018 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5019 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5020 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5021 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5022 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5023 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5024 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5025 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5026 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5027 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5028 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5029 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5030 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5034 static const bitmask_transtbl lflag_tbl
[] = {
5035 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5036 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5037 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5038 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5039 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5040 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5041 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5042 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5043 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5044 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5045 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5046 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5047 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5048 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5049 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5053 static void target_to_host_termios (void *dst
, const void *src
)
5055 struct host_termios
*host
= dst
;
5056 const struct target_termios
*target
= src
;
5059 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5061 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5063 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5065 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5066 host
->c_line
= target
->c_line
;
5068 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5069 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5070 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5071 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5072 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5073 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5074 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5075 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5076 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5077 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5078 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5079 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5080 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5081 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5082 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5083 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5084 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5085 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5088 static void host_to_target_termios (void *dst
, const void *src
)
5090 struct target_termios
*target
= dst
;
5091 const struct host_termios
*host
= src
;
5094 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5096 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5098 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5100 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5101 target
->c_line
= host
->c_line
;
5103 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5104 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5105 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5106 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5107 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5108 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5109 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5110 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5111 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5112 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5113 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5114 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5115 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5116 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5117 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5118 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5119 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5120 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5123 static const StructEntry struct_termios_def
= {
5124 .convert
= { host_to_target_termios
, target_to_host_termios
},
5125 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5126 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5129 static bitmask_transtbl mmap_flags_tbl
[] = {
5130 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5131 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5132 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5133 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5134 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5135 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5136 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5137 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5138 MAP_DENYWRITE
, MAP_DENYWRITE
},
5139 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5140 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5141 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5142 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5143 MAP_NORESERVE
, MAP_NORESERVE
},
5144 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5145 /* MAP_STACK had been ignored by the kernel for quite some time.
5146 Recognize it for the target insofar as we do not want to pass
5147 it through to the host. */
5148 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5152 #if defined(TARGET_I386)
5154 /* NOTE: there is really one LDT for all the threads */
5155 static uint8_t *ldt_table
;
5157 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5164 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5165 if (size
> bytecount
)
5167 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5169 return -TARGET_EFAULT
;
5170 /* ??? Should this by byteswapped? */
5171 memcpy(p
, ldt_table
, size
);
5172 unlock_user(p
, ptr
, size
);
5176 /* XXX: add locking support */
5177 static abi_long
write_ldt(CPUX86State
*env
,
5178 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5180 struct target_modify_ldt_ldt_s ldt_info
;
5181 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5182 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5183 int seg_not_present
, useable
, lm
;
5184 uint32_t *lp
, entry_1
, entry_2
;
5186 if (bytecount
!= sizeof(ldt_info
))
5187 return -TARGET_EINVAL
;
5188 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5189 return -TARGET_EFAULT
;
5190 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5191 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5192 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5193 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5194 unlock_user_struct(target_ldt_info
, ptr
, 0);
5196 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5197 return -TARGET_EINVAL
;
5198 seg_32bit
= ldt_info
.flags
& 1;
5199 contents
= (ldt_info
.flags
>> 1) & 3;
5200 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5201 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5202 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5203 useable
= (ldt_info
.flags
>> 6) & 1;
5207 lm
= (ldt_info
.flags
>> 7) & 1;
5209 if (contents
== 3) {
5211 return -TARGET_EINVAL
;
5212 if (seg_not_present
== 0)
5213 return -TARGET_EINVAL
;
5215 /* allocate the LDT */
5217 env
->ldt
.base
= target_mmap(0,
5218 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5219 PROT_READ
|PROT_WRITE
,
5220 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5221 if (env
->ldt
.base
== -1)
5222 return -TARGET_ENOMEM
;
5223 memset(g2h(env
->ldt
.base
), 0,
5224 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5225 env
->ldt
.limit
= 0xffff;
5226 ldt_table
= g2h(env
->ldt
.base
);
5229 /* NOTE: same code as Linux kernel */
5230 /* Allow LDTs to be cleared by the user. */
5231 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5234 read_exec_only
== 1 &&
5236 limit_in_pages
== 0 &&
5237 seg_not_present
== 1 &&
5245 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5246 (ldt_info
.limit
& 0x0ffff);
5247 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5248 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5249 (ldt_info
.limit
& 0xf0000) |
5250 ((read_exec_only
^ 1) << 9) |
5252 ((seg_not_present
^ 1) << 15) |
5254 (limit_in_pages
<< 23) |
5258 entry_2
|= (useable
<< 20);
5260 /* Install the new entry ... */
5262 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5263 lp
[0] = tswap32(entry_1
);
5264 lp
[1] = tswap32(entry_2
);
5268 /* specific and weird i386 syscalls */
5269 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5270 unsigned long bytecount
)
5276 ret
= read_ldt(ptr
, bytecount
);
5279 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5282 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5285 ret
= -TARGET_ENOSYS
;
5291 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5292 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5294 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5295 struct target_modify_ldt_ldt_s ldt_info
;
5296 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5297 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5298 int seg_not_present
, useable
, lm
;
5299 uint32_t *lp
, entry_1
, entry_2
;
5302 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5303 if (!target_ldt_info
)
5304 return -TARGET_EFAULT
;
5305 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5306 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5307 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5308 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5309 if (ldt_info
.entry_number
== -1) {
5310 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5311 if (gdt_table
[i
] == 0) {
5312 ldt_info
.entry_number
= i
;
5313 target_ldt_info
->entry_number
= tswap32(i
);
5318 unlock_user_struct(target_ldt_info
, ptr
, 1);
5320 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5321 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5322 return -TARGET_EINVAL
;
5323 seg_32bit
= ldt_info
.flags
& 1;
5324 contents
= (ldt_info
.flags
>> 1) & 3;
5325 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5326 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5327 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5328 useable
= (ldt_info
.flags
>> 6) & 1;
5332 lm
= (ldt_info
.flags
>> 7) & 1;
5335 if (contents
== 3) {
5336 if (seg_not_present
== 0)
5337 return -TARGET_EINVAL
;
5340 /* NOTE: same code as Linux kernel */
5341 /* Allow LDTs to be cleared by the user. */
5342 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5343 if ((contents
== 0 &&
5344 read_exec_only
== 1 &&
5346 limit_in_pages
== 0 &&
5347 seg_not_present
== 1 &&
5355 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5356 (ldt_info
.limit
& 0x0ffff);
5357 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5358 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5359 (ldt_info
.limit
& 0xf0000) |
5360 ((read_exec_only
^ 1) << 9) |
5362 ((seg_not_present
^ 1) << 15) |
5364 (limit_in_pages
<< 23) |
5369 /* Install the new entry ... */
5371 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5372 lp
[0] = tswap32(entry_1
);
5373 lp
[1] = tswap32(entry_2
);
5377 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5379 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5380 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5381 uint32_t base_addr
, limit
, flags
;
5382 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5383 int seg_not_present
, useable
, lm
;
5384 uint32_t *lp
, entry_1
, entry_2
;
5386 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5387 if (!target_ldt_info
)
5388 return -TARGET_EFAULT
;
5389 idx
= tswap32(target_ldt_info
->entry_number
);
5390 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5391 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5392 unlock_user_struct(target_ldt_info
, ptr
, 1);
5393 return -TARGET_EINVAL
;
5395 lp
= (uint32_t *)(gdt_table
+ idx
);
5396 entry_1
= tswap32(lp
[0]);
5397 entry_2
= tswap32(lp
[1]);
5399 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5400 contents
= (entry_2
>> 10) & 3;
5401 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5402 seg_32bit
= (entry_2
>> 22) & 1;
5403 limit_in_pages
= (entry_2
>> 23) & 1;
5404 useable
= (entry_2
>> 20) & 1;
5408 lm
= (entry_2
>> 21) & 1;
5410 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5411 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5412 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5413 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5414 base_addr
= (entry_1
>> 16) |
5415 (entry_2
& 0xff000000) |
5416 ((entry_2
& 0xff) << 16);
5417 target_ldt_info
->base_addr
= tswapal(base_addr
);
5418 target_ldt_info
->limit
= tswap32(limit
);
5419 target_ldt_info
->flags
= tswap32(flags
);
5420 unlock_user_struct(target_ldt_info
, ptr
, 1);
5423 #endif /* TARGET_I386 && TARGET_ABI32 */
5425 #ifndef TARGET_ABI32
5426 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5433 case TARGET_ARCH_SET_GS
:
5434 case TARGET_ARCH_SET_FS
:
5435 if (code
== TARGET_ARCH_SET_GS
)
5439 cpu_x86_load_seg(env
, idx
, 0);
5440 env
->segs
[idx
].base
= addr
;
5442 case TARGET_ARCH_GET_GS
:
5443 case TARGET_ARCH_GET_FS
:
5444 if (code
== TARGET_ARCH_GET_GS
)
5448 val
= env
->segs
[idx
].base
;
5449 if (put_user(val
, addr
, abi_ulong
))
5450 ret
= -TARGET_EFAULT
;
5453 ret
= -TARGET_EINVAL
;
5460 #endif /* defined(TARGET_I386) */
5462 #define NEW_STACK_SIZE 0x40000
5465 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5468 pthread_mutex_t mutex
;
5469 pthread_cond_t cond
;
5472 abi_ulong child_tidptr
;
5473 abi_ulong parent_tidptr
;
5477 static void *clone_func(void *arg
)
5479 new_thread_info
*info
= arg
;
5484 rcu_register_thread();
5485 tcg_register_thread();
5487 cpu
= ENV_GET_CPU(env
);
5489 ts
= (TaskState
*)cpu
->opaque
;
5490 info
->tid
= sys_gettid();
5492 if (info
->child_tidptr
)
5493 put_user_u32(info
->tid
, info
->child_tidptr
);
5494 if (info
->parent_tidptr
)
5495 put_user_u32(info
->tid
, info
->parent_tidptr
);
5496 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
5497 /* Enable signals. */
5498 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5499 /* Signal to the parent that we're ready. */
5500 pthread_mutex_lock(&info
->mutex
);
5501 pthread_cond_broadcast(&info
->cond
);
5502 pthread_mutex_unlock(&info
->mutex
);
5503 /* Wait until the parent has finished initializing the tls state. */
5504 pthread_mutex_lock(&clone_lock
);
5505 pthread_mutex_unlock(&clone_lock
);
5511 /* do_fork() Must return host values and target errnos (unlike most
5512 do_*() functions). */
5513 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5514 abi_ulong parent_tidptr
, target_ulong newtls
,
5515 abi_ulong child_tidptr
)
5517 CPUState
*cpu
= ENV_GET_CPU(env
);
5521 CPUArchState
*new_env
;
5524 flags
&= ~CLONE_IGNORED_FLAGS
;
5526 /* Emulate vfork() with fork() */
5527 if (flags
& CLONE_VFORK
)
5528 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5530 if (flags
& CLONE_VM
) {
5531 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5532 new_thread_info info
;
5533 pthread_attr_t attr
;
5535 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
5536 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
5537 return -TARGET_EINVAL
;
5540 ts
= g_new0(TaskState
, 1);
5541 init_task_state(ts
);
5543 /* Grab a mutex so that thread setup appears atomic. */
5544 pthread_mutex_lock(&clone_lock
);
5546 /* we create a new CPU instance. */
5547 new_env
= cpu_copy(env
);
5548 /* Init regs that differ from the parent. */
5549 cpu_clone_regs(new_env
, newsp
);
5550 new_cpu
= ENV_GET_CPU(new_env
);
5551 new_cpu
->opaque
= ts
;
5552 ts
->bprm
= parent_ts
->bprm
;
5553 ts
->info
= parent_ts
->info
;
5554 ts
->signal_mask
= parent_ts
->signal_mask
;
5556 if (flags
& CLONE_CHILD_CLEARTID
) {
5557 ts
->child_tidptr
= child_tidptr
;
5560 if (flags
& CLONE_SETTLS
) {
5561 cpu_set_tls (new_env
, newtls
);
5564 memset(&info
, 0, sizeof(info
));
5565 pthread_mutex_init(&info
.mutex
, NULL
);
5566 pthread_mutex_lock(&info
.mutex
);
5567 pthread_cond_init(&info
.cond
, NULL
);
5569 if (flags
& CLONE_CHILD_SETTID
) {
5570 info
.child_tidptr
= child_tidptr
;
5572 if (flags
& CLONE_PARENT_SETTID
) {
5573 info
.parent_tidptr
= parent_tidptr
;
5576 ret
= pthread_attr_init(&attr
);
5577 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5578 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5579 /* It is not safe to deliver signals until the child has finished
5580 initializing, so temporarily block all signals. */
5581 sigfillset(&sigmask
);
5582 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5583 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
5585 /* If this is our first additional thread, we need to ensure we
5586 * generate code for parallel execution and flush old translations.
5588 if (!parallel_cpus
) {
5589 parallel_cpus
= true;
5593 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5594 /* TODO: Free new CPU state if thread creation failed. */
5596 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5597 pthread_attr_destroy(&attr
);
5599 /* Wait for the child to initialize. */
5600 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5605 pthread_mutex_unlock(&info
.mutex
);
5606 pthread_cond_destroy(&info
.cond
);
5607 pthread_mutex_destroy(&info
.mutex
);
5608 pthread_mutex_unlock(&clone_lock
);
5610 /* if no CLONE_VM, we consider it is a fork */
5611 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
5612 return -TARGET_EINVAL
;
5615 /* We can't support custom termination signals */
5616 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
5617 return -TARGET_EINVAL
;
5620 if (block_signals()) {
5621 return -TARGET_ERESTARTSYS
;
5627 /* Child Process. */
5628 cpu_clone_regs(env
, newsp
);
5630 /* There is a race condition here. The parent process could
5631 theoretically read the TID in the child process before the child
5632 tid is set. This would require using either ptrace
5633 (not implemented) or having *_tidptr to point at a shared memory
5634 mapping. We can't repeat the spinlock hack used above because
5635 the child process gets its own copy of the lock. */
5636 if (flags
& CLONE_CHILD_SETTID
)
5637 put_user_u32(sys_gettid(), child_tidptr
);
5638 if (flags
& CLONE_PARENT_SETTID
)
5639 put_user_u32(sys_gettid(), parent_tidptr
);
5640 ts
= (TaskState
*)cpu
->opaque
;
5641 if (flags
& CLONE_SETTLS
)
5642 cpu_set_tls (env
, newtls
);
5643 if (flags
& CLONE_CHILD_CLEARTID
)
5644 ts
->child_tidptr
= child_tidptr
;
5652 /* warning : doesn't handle linux specific flags... */
5653 static int target_to_host_fcntl_cmd(int cmd
)
5658 case TARGET_F_DUPFD
:
5659 case TARGET_F_GETFD
:
5660 case TARGET_F_SETFD
:
5661 case TARGET_F_GETFL
:
5662 case TARGET_F_SETFL
:
5665 case TARGET_F_GETLK
:
5668 case TARGET_F_SETLK
:
5671 case TARGET_F_SETLKW
:
5674 case TARGET_F_GETOWN
:
5677 case TARGET_F_SETOWN
:
5680 case TARGET_F_GETSIG
:
5683 case TARGET_F_SETSIG
:
5686 #if TARGET_ABI_BITS == 32
5687 case TARGET_F_GETLK64
:
5690 case TARGET_F_SETLK64
:
5693 case TARGET_F_SETLKW64
:
5697 case TARGET_F_SETLEASE
:
5700 case TARGET_F_GETLEASE
:
5703 #ifdef F_DUPFD_CLOEXEC
5704 case TARGET_F_DUPFD_CLOEXEC
:
5705 ret
= F_DUPFD_CLOEXEC
;
5708 case TARGET_F_NOTIFY
:
5712 case TARGET_F_GETOWN_EX
:
5717 case TARGET_F_SETOWN_EX
:
5722 case TARGET_F_SETPIPE_SZ
:
5725 case TARGET_F_GETPIPE_SZ
:
5730 ret
= -TARGET_EINVAL
;
5734 #if defined(__powerpc64__)
5735 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5736 * is not supported by kernel. The glibc fcntl call actually adjusts
5737 * them to 5, 6 and 7 before making the syscall(). Since we make the
5738 * syscall directly, adjust to what is supported by the kernel.
5740 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
5741 ret
-= F_GETLK64
- 5;
5748 #define FLOCK_TRANSTBL \
5750 TRANSTBL_CONVERT(F_RDLCK); \
5751 TRANSTBL_CONVERT(F_WRLCK); \
5752 TRANSTBL_CONVERT(F_UNLCK); \
5753 TRANSTBL_CONVERT(F_EXLCK); \
5754 TRANSTBL_CONVERT(F_SHLCK); \
5757 static int target_to_host_flock(int type
)
5759 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5761 #undef TRANSTBL_CONVERT
5762 return -TARGET_EINVAL
;
5765 static int host_to_target_flock(int type
)
5767 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5769 #undef TRANSTBL_CONVERT
5770 /* if we don't know how to convert the value coming
5771 * from the host we copy to the target field as-is
5776 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
5777 abi_ulong target_flock_addr
)
5779 struct target_flock
*target_fl
;
5782 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5783 return -TARGET_EFAULT
;
5786 __get_user(l_type
, &target_fl
->l_type
);
5787 l_type
= target_to_host_flock(l_type
);
5791 fl
->l_type
= l_type
;
5792 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5793 __get_user(fl
->l_start
, &target_fl
->l_start
);
5794 __get_user(fl
->l_len
, &target_fl
->l_len
);
5795 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5796 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5800 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
5801 const struct flock64
*fl
)
5803 struct target_flock
*target_fl
;
5806 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5807 return -TARGET_EFAULT
;
5810 l_type
= host_to_target_flock(fl
->l_type
);
5811 __put_user(l_type
, &target_fl
->l_type
);
5812 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5813 __put_user(fl
->l_start
, &target_fl
->l_start
);
5814 __put_user(fl
->l_len
, &target_fl
->l_len
);
5815 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5816 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5820 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
5821 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
5823 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5824 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
5825 abi_ulong target_flock_addr
)
5827 struct target_oabi_flock64
*target_fl
;
5830 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5831 return -TARGET_EFAULT
;
5834 __get_user(l_type
, &target_fl
->l_type
);
5835 l_type
= target_to_host_flock(l_type
);
5839 fl
->l_type
= l_type
;
5840 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5841 __get_user(fl
->l_start
, &target_fl
->l_start
);
5842 __get_user(fl
->l_len
, &target_fl
->l_len
);
5843 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5844 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5848 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
5849 const struct flock64
*fl
)
5851 struct target_oabi_flock64
*target_fl
;
5854 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5855 return -TARGET_EFAULT
;
5858 l_type
= host_to_target_flock(fl
->l_type
);
5859 __put_user(l_type
, &target_fl
->l_type
);
5860 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5861 __put_user(fl
->l_start
, &target_fl
->l_start
);
5862 __put_user(fl
->l_len
, &target_fl
->l_len
);
5863 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5864 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5869 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
5870 abi_ulong target_flock_addr
)
5872 struct target_flock64
*target_fl
;
5875 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5876 return -TARGET_EFAULT
;
5879 __get_user(l_type
, &target_fl
->l_type
);
5880 l_type
= target_to_host_flock(l_type
);
5884 fl
->l_type
= l_type
;
5885 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5886 __get_user(fl
->l_start
, &target_fl
->l_start
);
5887 __get_user(fl
->l_len
, &target_fl
->l_len
);
5888 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5889 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5893 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
5894 const struct flock64
*fl
)
5896 struct target_flock64
*target_fl
;
5899 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5900 return -TARGET_EFAULT
;
5903 l_type
= host_to_target_flock(fl
->l_type
);
5904 __put_user(l_type
, &target_fl
->l_type
);
5905 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5906 __put_user(fl
->l_start
, &target_fl
->l_start
);
5907 __put_user(fl
->l_len
, &target_fl
->l_len
);
5908 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5909 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5913 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
5915 struct flock64 fl64
;
5917 struct f_owner_ex fox
;
5918 struct target_f_owner_ex
*target_fox
;
5921 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
5923 if (host_cmd
== -TARGET_EINVAL
)
5927 case TARGET_F_GETLK
:
5928 ret
= copy_from_user_flock(&fl64
, arg
);
5932 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5934 ret
= copy_to_user_flock(arg
, &fl64
);
5938 case TARGET_F_SETLK
:
5939 case TARGET_F_SETLKW
:
5940 ret
= copy_from_user_flock(&fl64
, arg
);
5944 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5947 case TARGET_F_GETLK64
:
5948 ret
= copy_from_user_flock64(&fl64
, arg
);
5952 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5954 ret
= copy_to_user_flock64(arg
, &fl64
);
5957 case TARGET_F_SETLK64
:
5958 case TARGET_F_SETLKW64
:
5959 ret
= copy_from_user_flock64(&fl64
, arg
);
5963 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
5966 case TARGET_F_GETFL
:
5967 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
5969 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
5973 case TARGET_F_SETFL
:
5974 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
5975 target_to_host_bitmask(arg
,
5980 case TARGET_F_GETOWN_EX
:
5981 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
5983 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
5984 return -TARGET_EFAULT
;
5985 target_fox
->type
= tswap32(fox
.type
);
5986 target_fox
->pid
= tswap32(fox
.pid
);
5987 unlock_user_struct(target_fox
, arg
, 1);
5993 case TARGET_F_SETOWN_EX
:
5994 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
5995 return -TARGET_EFAULT
;
5996 fox
.type
= tswap32(target_fox
->type
);
5997 fox
.pid
= tswap32(target_fox
->pid
);
5998 unlock_user_struct(target_fox
, arg
, 0);
5999 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6003 case TARGET_F_SETOWN
:
6004 case TARGET_F_GETOWN
:
6005 case TARGET_F_SETSIG
:
6006 case TARGET_F_GETSIG
:
6007 case TARGET_F_SETLEASE
:
6008 case TARGET_F_GETLEASE
:
6009 case TARGET_F_SETPIPE_SZ
:
6010 case TARGET_F_GETPIPE_SZ
:
6011 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6015 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6023 static inline int high2lowuid(int uid
)
6031 static inline int high2lowgid(int gid
)
6039 static inline int low2highuid(int uid
)
6041 if ((int16_t)uid
== -1)
6047 static inline int low2highgid(int gid
)
6049 if ((int16_t)gid
== -1)
6054 static inline int tswapid(int id
)
6059 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6061 #else /* !USE_UID16 */
6062 static inline int high2lowuid(int uid
)
6066 static inline int high2lowgid(int gid
)
6070 static inline int low2highuid(int uid
)
6074 static inline int low2highgid(int gid
)
6078 static inline int tswapid(int id
)
6083 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6085 #endif /* USE_UID16 */
6087 /* We must do direct syscalls for setting UID/GID, because we want to
6088 * implement the Linux system call semantics of "change only for this thread",
6089 * not the libc/POSIX semantics of "change for all threads in process".
6090 * (See http://ewontfix.com/17/ for more details.)
6091 * We use the 32-bit version of the syscalls if present; if it is not
6092 * then either the host architecture supports 32-bit UIDs natively with
6093 * the standard syscall, or the 16-bit UID is the best we can do.
6095 #ifdef __NR_setuid32
6096 #define __NR_sys_setuid __NR_setuid32
6098 #define __NR_sys_setuid __NR_setuid
6100 #ifdef __NR_setgid32
6101 #define __NR_sys_setgid __NR_setgid32
6103 #define __NR_sys_setgid __NR_setgid
6105 #ifdef __NR_setresuid32
6106 #define __NR_sys_setresuid __NR_setresuid32
6108 #define __NR_sys_setresuid __NR_setresuid
6110 #ifdef __NR_setresgid32
6111 #define __NR_sys_setresgid __NR_setresgid32
6113 #define __NR_sys_setresgid __NR_setresgid
6116 _syscall1(int, sys_setuid
, uid_t
, uid
)
6117 _syscall1(int, sys_setgid
, gid_t
, gid
)
6118 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6119 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6121 void syscall_init(void)
6124 const argtype
*arg_type
;
6128 thunk_init(STRUCT_MAX
);
6130 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6131 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6132 #include "syscall_types.h"
6134 #undef STRUCT_SPECIAL
6136 /* Build target_to_host_errno_table[] table from
6137 * host_to_target_errno_table[]. */
6138 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6139 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6142 /* we patch the ioctl size if necessary. We rely on the fact that
6143 no ioctl has all the bits at '1' in the size field */
6145 while (ie
->target_cmd
!= 0) {
6146 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6147 TARGET_IOC_SIZEMASK
) {
6148 arg_type
= ie
->arg_type
;
6149 if (arg_type
[0] != TYPE_PTR
) {
6150 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6155 size
= thunk_type_size(arg_type
, 0);
6156 ie
->target_cmd
= (ie
->target_cmd
&
6157 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6158 (size
<< TARGET_IOC_SIZESHIFT
);
6161 /* automatic consistency check if same arch */
6162 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6163 (defined(__x86_64__) && defined(TARGET_X86_64))
6164 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6165 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6166 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6173 #if TARGET_ABI_BITS == 32
6174 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6176 #ifdef TARGET_WORDS_BIGENDIAN
6177 return ((uint64_t)word0
<< 32) | word1
;
6179 return ((uint64_t)word1
<< 32) | word0
;
6182 #else /* TARGET_ABI_BITS == 32 */
6183 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6187 #endif /* TARGET_ABI_BITS != 32 */
6189 #ifdef TARGET_NR_truncate64
6190 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6195 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6199 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6203 #ifdef TARGET_NR_ftruncate64
6204 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6209 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6213 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6217 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6218 abi_ulong target_addr
)
6220 struct target_timespec
*target_ts
;
6222 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6223 return -TARGET_EFAULT
;
6224 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6225 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6226 unlock_user_struct(target_ts
, target_addr
, 0);
6230 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6231 struct timespec
*host_ts
)
6233 struct target_timespec
*target_ts
;
6235 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6236 return -TARGET_EFAULT
;
6237 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6238 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6239 unlock_user_struct(target_ts
, target_addr
, 1);
6243 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6244 abi_ulong target_addr
)
6246 struct target_itimerspec
*target_itspec
;
6248 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6249 return -TARGET_EFAULT
;
6252 host_itspec
->it_interval
.tv_sec
=
6253 tswapal(target_itspec
->it_interval
.tv_sec
);
6254 host_itspec
->it_interval
.tv_nsec
=
6255 tswapal(target_itspec
->it_interval
.tv_nsec
);
6256 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6257 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6259 unlock_user_struct(target_itspec
, target_addr
, 1);
6263 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6264 struct itimerspec
*host_its
)
6266 struct target_itimerspec
*target_itspec
;
6268 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6269 return -TARGET_EFAULT
;
6272 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6273 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6275 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6276 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6278 unlock_user_struct(target_itspec
, target_addr
, 0);
6282 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6283 abi_long target_addr
)
6285 struct target_timex
*target_tx
;
6287 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6288 return -TARGET_EFAULT
;
6291 __get_user(host_tx
->modes
, &target_tx
->modes
);
6292 __get_user(host_tx
->offset
, &target_tx
->offset
);
6293 __get_user(host_tx
->freq
, &target_tx
->freq
);
6294 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6295 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6296 __get_user(host_tx
->status
, &target_tx
->status
);
6297 __get_user(host_tx
->constant
, &target_tx
->constant
);
6298 __get_user(host_tx
->precision
, &target_tx
->precision
);
6299 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6300 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6301 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6302 __get_user(host_tx
->tick
, &target_tx
->tick
);
6303 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6304 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6305 __get_user(host_tx
->shift
, &target_tx
->shift
);
6306 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6307 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6308 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6309 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6310 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6311 __get_user(host_tx
->tai
, &target_tx
->tai
);
6313 unlock_user_struct(target_tx
, target_addr
, 0);
6317 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6318 struct timex
*host_tx
)
6320 struct target_timex
*target_tx
;
6322 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6323 return -TARGET_EFAULT
;
6326 __put_user(host_tx
->modes
, &target_tx
->modes
);
6327 __put_user(host_tx
->offset
, &target_tx
->offset
);
6328 __put_user(host_tx
->freq
, &target_tx
->freq
);
6329 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6330 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6331 __put_user(host_tx
->status
, &target_tx
->status
);
6332 __put_user(host_tx
->constant
, &target_tx
->constant
);
6333 __put_user(host_tx
->precision
, &target_tx
->precision
);
6334 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6335 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6336 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6337 __put_user(host_tx
->tick
, &target_tx
->tick
);
6338 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6339 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6340 __put_user(host_tx
->shift
, &target_tx
->shift
);
6341 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6342 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6343 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6344 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6345 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6346 __put_user(host_tx
->tai
, &target_tx
->tai
);
6348 unlock_user_struct(target_tx
, target_addr
, 1);
6353 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6354 abi_ulong target_addr
)
6356 struct target_sigevent
*target_sevp
;
6358 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6359 return -TARGET_EFAULT
;
6362 /* This union is awkward on 64 bit systems because it has a 32 bit
6363 * integer and a pointer in it; we follow the conversion approach
6364 * used for handling sigval types in signal.c so the guest should get
6365 * the correct value back even if we did a 64 bit byteswap and it's
6366 * using the 32 bit integer.
6368 host_sevp
->sigev_value
.sival_ptr
=
6369 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6370 host_sevp
->sigev_signo
=
6371 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6372 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6373 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6375 unlock_user_struct(target_sevp
, target_addr
, 1);
6379 #if defined(TARGET_NR_mlockall)
6380 static inline int target_to_host_mlockall_arg(int arg
)
6384 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6385 result
|= MCL_CURRENT
;
6387 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6388 result
|= MCL_FUTURE
;
6394 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6395 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6396 defined(TARGET_NR_newfstatat))
6397 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6398 abi_ulong target_addr
,
6399 struct stat
*host_st
)
6401 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6402 if (((CPUARMState
*)cpu_env
)->eabi
) {
6403 struct target_eabi_stat64
*target_st
;
6405 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6406 return -TARGET_EFAULT
;
6407 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6408 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6409 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6410 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6411 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6413 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6414 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6415 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6416 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6417 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6418 __put_user(host_st
->st_size
, &target_st
->st_size
);
6419 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6420 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6421 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6422 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6423 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6424 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6425 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6426 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6427 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6429 unlock_user_struct(target_st
, target_addr
, 1);
6433 #if defined(TARGET_HAS_STRUCT_STAT64)
6434 struct target_stat64
*target_st
;
6436 struct target_stat
*target_st
;
6439 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6440 return -TARGET_EFAULT
;
6441 memset(target_st
, 0, sizeof(*target_st
));
6442 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6443 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6444 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6445 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6447 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6448 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6449 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6450 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6451 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6452 /* XXX: better use of kernel struct */
6453 __put_user(host_st
->st_size
, &target_st
->st_size
);
6454 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6455 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6456 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6457 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6458 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6459 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6460 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6461 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6462 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6464 unlock_user_struct(target_st
, target_addr
, 1);
6471 /* ??? Using host futex calls even when target atomic operations
6472 are not really atomic probably breaks things. However implementing
6473 futexes locally would make futexes shared between multiple processes
6474 tricky. However they're probably useless because guest atomic
6475 operations won't work either. */
6476 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6477 target_ulong uaddr2
, int val3
)
6479 struct timespec ts
, *pts
;
6482 /* ??? We assume FUTEX_* constants are the same on both host
6484 #ifdef FUTEX_CMD_MASK
6485 base_op
= op
& FUTEX_CMD_MASK
;
6491 case FUTEX_WAIT_BITSET
:
6494 target_to_host_timespec(pts
, timeout
);
6498 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6501 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6503 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6505 case FUTEX_CMP_REQUEUE
:
6507 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6508 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6509 But the prototype takes a `struct timespec *'; insert casts
6510 to satisfy the compiler. We do not need to tswap TIMEOUT
6511 since it's not compared to guest memory. */
6512 pts
= (struct timespec
*)(uintptr_t) timeout
;
6513 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6515 (base_op
== FUTEX_CMP_REQUEUE
6519 return -TARGET_ENOSYS
;
6522 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6523 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6524 abi_long handle
, abi_long mount_id
,
6527 struct file_handle
*target_fh
;
6528 struct file_handle
*fh
;
6532 unsigned int size
, total_size
;
6534 if (get_user_s32(size
, handle
)) {
6535 return -TARGET_EFAULT
;
6538 name
= lock_user_string(pathname
);
6540 return -TARGET_EFAULT
;
6543 total_size
= sizeof(struct file_handle
) + size
;
6544 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6546 unlock_user(name
, pathname
, 0);
6547 return -TARGET_EFAULT
;
6550 fh
= g_malloc0(total_size
);
6551 fh
->handle_bytes
= size
;
6553 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6554 unlock_user(name
, pathname
, 0);
6556 /* man name_to_handle_at(2):
6557 * Other than the use of the handle_bytes field, the caller should treat
6558 * the file_handle structure as an opaque data type
6561 memcpy(target_fh
, fh
, total_size
);
6562 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6563 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6565 unlock_user(target_fh
, handle
, total_size
);
6567 if (put_user_s32(mid
, mount_id
)) {
6568 return -TARGET_EFAULT
;
6576 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6577 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6580 struct file_handle
*target_fh
;
6581 struct file_handle
*fh
;
6582 unsigned int size
, total_size
;
6585 if (get_user_s32(size
, handle
)) {
6586 return -TARGET_EFAULT
;
6589 total_size
= sizeof(struct file_handle
) + size
;
6590 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6592 return -TARGET_EFAULT
;
6595 fh
= g_memdup(target_fh
, total_size
);
6596 fh
->handle_bytes
= size
;
6597 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6599 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6600 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6604 unlock_user(target_fh
, handle
, total_size
);
6610 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6612 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6615 target_sigset_t
*target_mask
;
6619 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6620 return -TARGET_EINVAL
;
6622 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6623 return -TARGET_EFAULT
;
6626 target_to_host_sigset(&host_mask
, target_mask
);
6628 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6630 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6632 fd_trans_register(ret
, &target_signalfd_trans
);
6635 unlock_user_struct(target_mask
, mask
, 0);
6641 /* Map host to target signal numbers for the wait family of syscalls.
6642 Assume all other status bits are the same. */
6643 int host_to_target_waitstatus(int status
)
6645 if (WIFSIGNALED(status
)) {
6646 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6648 if (WIFSTOPPED(status
)) {
6649 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6655 static int open_self_cmdline(void *cpu_env
, int fd
)
6657 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6658 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
6661 for (i
= 0; i
< bprm
->argc
; i
++) {
6662 size_t len
= strlen(bprm
->argv
[i
]) + 1;
6664 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
6672 static int open_self_maps(void *cpu_env
, int fd
)
6674 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6675 TaskState
*ts
= cpu
->opaque
;
6681 fp
= fopen("/proc/self/maps", "r");
6686 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6687 int fields
, dev_maj
, dev_min
, inode
;
6688 uint64_t min
, max
, offset
;
6689 char flag_r
, flag_w
, flag_x
, flag_p
;
6690 char path
[512] = "";
6691 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6692 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6693 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6695 if ((fields
< 10) || (fields
> 11)) {
6698 if (h2g_valid(min
)) {
6699 int flags
= page_get_flags(h2g(min
));
6700 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
) + 1;
6701 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
6704 if (h2g(min
) == ts
->info
->stack_limit
) {
6705 pstrcpy(path
, sizeof(path
), " [stack]");
6707 dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
6708 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
6709 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
6710 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
6711 path
[0] ? " " : "", path
);
6721 static int open_self_stat(void *cpu_env
, int fd
)
6723 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6724 TaskState
*ts
= cpu
->opaque
;
6725 abi_ulong start_stack
= ts
->info
->start_stack
;
6728 for (i
= 0; i
< 44; i
++) {
6736 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6737 } else if (i
== 1) {
6739 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
6740 } else if (i
== 27) {
6743 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6745 /* for the rest, there is MasterCard */
6746 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
6750 if (write(fd
, buf
, len
) != len
) {
6758 static int open_self_auxv(void *cpu_env
, int fd
)
6760 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
6761 TaskState
*ts
= cpu
->opaque
;
6762 abi_ulong auxv
= ts
->info
->saved_auxv
;
6763 abi_ulong len
= ts
->info
->auxv_len
;
6767 * Auxiliary vector is stored in target process stack.
6768 * read in whole auxv vector and copy it to file
6770 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
6774 r
= write(fd
, ptr
, len
);
6781 lseek(fd
, 0, SEEK_SET
);
6782 unlock_user(ptr
, auxv
, len
);
6788 static int is_proc_myself(const char *filename
, const char *entry
)
6790 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
6791 filename
+= strlen("/proc/");
6792 if (!strncmp(filename
, "self/", strlen("self/"))) {
6793 filename
+= strlen("self/");
6794 } else if (*filename
>= '1' && *filename
<= '9') {
6796 snprintf(myself
, sizeof(myself
), "%d/", getpid());
6797 if (!strncmp(filename
, myself
, strlen(myself
))) {
6798 filename
+= strlen(myself
);
6805 if (!strcmp(filename
, entry
)) {
6812 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
6813 defined(TARGET_SPARC) || defined(TARGET_M68K)
6814 static int is_proc(const char *filename
, const char *entry
)
6816 return strcmp(filename
, entry
) == 0;
6820 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6821 static int open_net_route(void *cpu_env
, int fd
)
6828 fp
= fopen("/proc/net/route", "r");
6835 read
= getline(&line
, &len
, fp
);
6836 dprintf(fd
, "%s", line
);
6840 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6842 uint32_t dest
, gw
, mask
;
6843 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
6846 fields
= sscanf(line
,
6847 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6848 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
6849 &mask
, &mtu
, &window
, &irtt
);
6853 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6854 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
6855 metric
, tswap32(mask
), mtu
, window
, irtt
);
6865 #if defined(TARGET_SPARC)
6866 static int open_cpuinfo(void *cpu_env
, int fd
)
6868 dprintf(fd
, "type\t\t: sun4u\n");
6873 #if defined(TARGET_M68K)
6874 static int open_hardware(void *cpu_env
, int fd
)
6876 dprintf(fd
, "Model:\t\tqemu-m68k\n");
6881 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
6884 const char *filename
;
6885 int (*fill
)(void *cpu_env
, int fd
);
6886 int (*cmp
)(const char *s1
, const char *s2
);
6888 const struct fake_open
*fake_open
;
6889 static const struct fake_open fakes
[] = {
6890 { "maps", open_self_maps
, is_proc_myself
},
6891 { "stat", open_self_stat
, is_proc_myself
},
6892 { "auxv", open_self_auxv
, is_proc_myself
},
6893 { "cmdline", open_self_cmdline
, is_proc_myself
},
6894 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6895 { "/proc/net/route", open_net_route
, is_proc
},
6897 #if defined(TARGET_SPARC)
6898 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
6900 #if defined(TARGET_M68K)
6901 { "/proc/hardware", open_hardware
, is_proc
},
6903 { NULL
, NULL
, NULL
}
6906 if (is_proc_myself(pathname
, "exe")) {
6907 int execfd
= qemu_getauxval(AT_EXECFD
);
6908 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
6911 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
6912 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
6917 if (fake_open
->filename
) {
6919 char filename
[PATH_MAX
];
6922 /* create temporary file to map stat to */
6923 tmpdir
= getenv("TMPDIR");
6926 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
6927 fd
= mkstemp(filename
);
6933 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
6939 lseek(fd
, 0, SEEK_SET
);
6944 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
6947 #define TIMER_MAGIC 0x0caf0000
6948 #define TIMER_MAGIC_MASK 0xffff0000
6950 /* Convert QEMU provided timer ID back to internal 16bit index format */
6951 static target_timer_t
get_timer_id(abi_long arg
)
6953 target_timer_t timerid
= arg
;
6955 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
6956 return -TARGET_EINVAL
;
6961 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
6962 return -TARGET_EINVAL
;
6968 static int target_to_host_cpu_mask(unsigned long *host_mask
,
6970 abi_ulong target_addr
,
6973 unsigned target_bits
= sizeof(abi_ulong
) * 8;
6974 unsigned host_bits
= sizeof(*host_mask
) * 8;
6975 abi_ulong
*target_mask
;
6978 assert(host_size
>= target_size
);
6980 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
6982 return -TARGET_EFAULT
;
6984 memset(host_mask
, 0, host_size
);
6986 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
6987 unsigned bit
= i
* target_bits
;
6990 __get_user(val
, &target_mask
[i
]);
6991 for (j
= 0; j
< target_bits
; j
++, bit
++) {
6992 if (val
& (1UL << j
)) {
6993 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
6998 unlock_user(target_mask
, target_addr
, 0);
7002 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7004 abi_ulong target_addr
,
7007 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7008 unsigned host_bits
= sizeof(*host_mask
) * 8;
7009 abi_ulong
*target_mask
;
7012 assert(host_size
>= target_size
);
7014 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7016 return -TARGET_EFAULT
;
7019 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7020 unsigned bit
= i
* target_bits
;
7023 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7024 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7028 __put_user(val
, &target_mask
[i
]);
7031 unlock_user(target_mask
, target_addr
, target_size
);
7035 /* This is an internal helper for do_syscall so that it is easier
7036 * to have a single return point, so that actions, such as logging
7037 * of syscall results, can be performed.
7038 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7040 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
7041 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7042 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7045 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7047 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7048 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7049 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
7052 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7053 || defined(TARGET_NR_fstatfs)
7059 case TARGET_NR_exit
:
7060 /* In old applications this may be used to implement _exit(2).
7061 However in threaded applictions it is used for thread termination,
7062 and _exit_group is used for application termination.
7063 Do thread termination if we have more then one thread. */
7065 if (block_signals()) {
7066 return -TARGET_ERESTARTSYS
;
7071 if (CPU_NEXT(first_cpu
)) {
7074 /* Remove the CPU from the list. */
7075 QTAILQ_REMOVE_RCU(&cpus
, cpu
, node
);
7080 if (ts
->child_tidptr
) {
7081 put_user_u32(0, ts
->child_tidptr
);
7082 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7086 object_unref(OBJECT(cpu
));
7088 rcu_unregister_thread();
7093 preexit_cleanup(cpu_env
, arg1
);
7095 return 0; /* avoid warning */
7096 case TARGET_NR_read
:
7097 if (arg2
== 0 && arg3
== 0) {
7098 return get_errno(safe_read(arg1
, 0, 0));
7100 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7101 return -TARGET_EFAULT
;
7102 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7104 fd_trans_host_to_target_data(arg1
)) {
7105 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7107 unlock_user(p
, arg2
, ret
);
7110 case TARGET_NR_write
:
7111 if (arg2
== 0 && arg3
== 0) {
7112 return get_errno(safe_write(arg1
, 0, 0));
7114 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7115 return -TARGET_EFAULT
;
7116 if (fd_trans_target_to_host_data(arg1
)) {
7117 void *copy
= g_malloc(arg3
);
7118 memcpy(copy
, p
, arg3
);
7119 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7121 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7125 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7127 unlock_user(p
, arg2
, 0);
7130 #ifdef TARGET_NR_open
7131 case TARGET_NR_open
:
7132 if (!(p
= lock_user_string(arg1
)))
7133 return -TARGET_EFAULT
;
7134 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7135 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7137 fd_trans_unregister(ret
);
7138 unlock_user(p
, arg1
, 0);
7141 case TARGET_NR_openat
:
7142 if (!(p
= lock_user_string(arg2
)))
7143 return -TARGET_EFAULT
;
7144 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7145 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7147 fd_trans_unregister(ret
);
7148 unlock_user(p
, arg2
, 0);
7150 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7151 case TARGET_NR_name_to_handle_at
:
7152 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7155 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7156 case TARGET_NR_open_by_handle_at
:
7157 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7158 fd_trans_unregister(ret
);
7161 case TARGET_NR_close
:
7162 fd_trans_unregister(arg1
);
7163 return get_errno(close(arg1
));
7166 return do_brk(arg1
);
7167 #ifdef TARGET_NR_fork
7168 case TARGET_NR_fork
:
7169 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7171 #ifdef TARGET_NR_waitpid
7172 case TARGET_NR_waitpid
:
7175 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7176 if (!is_error(ret
) && arg2
&& ret
7177 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7178 return -TARGET_EFAULT
;
7182 #ifdef TARGET_NR_waitid
7183 case TARGET_NR_waitid
:
7187 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7188 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7189 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7190 return -TARGET_EFAULT
;
7191 host_to_target_siginfo(p
, &info
);
7192 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7197 #ifdef TARGET_NR_creat /* not on alpha */
7198 case TARGET_NR_creat
:
7199 if (!(p
= lock_user_string(arg1
)))
7200 return -TARGET_EFAULT
;
7201 ret
= get_errno(creat(p
, arg2
));
7202 fd_trans_unregister(ret
);
7203 unlock_user(p
, arg1
, 0);
7206 #ifdef TARGET_NR_link
7207 case TARGET_NR_link
:
7210 p
= lock_user_string(arg1
);
7211 p2
= lock_user_string(arg2
);
7213 ret
= -TARGET_EFAULT
;
7215 ret
= get_errno(link(p
, p2
));
7216 unlock_user(p2
, arg2
, 0);
7217 unlock_user(p
, arg1
, 0);
7221 #if defined(TARGET_NR_linkat)
7222 case TARGET_NR_linkat
:
7226 return -TARGET_EFAULT
;
7227 p
= lock_user_string(arg2
);
7228 p2
= lock_user_string(arg4
);
7230 ret
= -TARGET_EFAULT
;
7232 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7233 unlock_user(p
, arg2
, 0);
7234 unlock_user(p2
, arg4
, 0);
7238 #ifdef TARGET_NR_unlink
7239 case TARGET_NR_unlink
:
7240 if (!(p
= lock_user_string(arg1
)))
7241 return -TARGET_EFAULT
;
7242 ret
= get_errno(unlink(p
));
7243 unlock_user(p
, arg1
, 0);
7246 #if defined(TARGET_NR_unlinkat)
7247 case TARGET_NR_unlinkat
:
7248 if (!(p
= lock_user_string(arg2
)))
7249 return -TARGET_EFAULT
;
7250 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7251 unlock_user(p
, arg2
, 0);
7254 case TARGET_NR_execve
:
7256 char **argp
, **envp
;
7259 abi_ulong guest_argp
;
7260 abi_ulong guest_envp
;
7267 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7268 if (get_user_ual(addr
, gp
))
7269 return -TARGET_EFAULT
;
7276 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7277 if (get_user_ual(addr
, gp
))
7278 return -TARGET_EFAULT
;
7284 argp
= g_new0(char *, argc
+ 1);
7285 envp
= g_new0(char *, envc
+ 1);
7287 for (gp
= guest_argp
, q
= argp
; gp
;
7288 gp
+= sizeof(abi_ulong
), q
++) {
7289 if (get_user_ual(addr
, gp
))
7293 if (!(*q
= lock_user_string(addr
)))
7295 total_size
+= strlen(*q
) + 1;
7299 for (gp
= guest_envp
, q
= envp
; gp
;
7300 gp
+= sizeof(abi_ulong
), q
++) {
7301 if (get_user_ual(addr
, gp
))
7305 if (!(*q
= lock_user_string(addr
)))
7307 total_size
+= strlen(*q
) + 1;
7311 if (!(p
= lock_user_string(arg1
)))
7313 /* Although execve() is not an interruptible syscall it is
7314 * a special case where we must use the safe_syscall wrapper:
7315 * if we allow a signal to happen before we make the host
7316 * syscall then we will 'lose' it, because at the point of
7317 * execve the process leaves QEMU's control. So we use the
7318 * safe syscall wrapper to ensure that we either take the
7319 * signal as a guest signal, or else it does not happen
7320 * before the execve completes and makes it the other
7321 * program's problem.
7323 ret
= get_errno(safe_execve(p
, argp
, envp
));
7324 unlock_user(p
, arg1
, 0);
7329 ret
= -TARGET_EFAULT
;
7332 for (gp
= guest_argp
, q
= argp
; *q
;
7333 gp
+= sizeof(abi_ulong
), q
++) {
7334 if (get_user_ual(addr
, gp
)
7337 unlock_user(*q
, addr
, 0);
7339 for (gp
= guest_envp
, q
= envp
; *q
;
7340 gp
+= sizeof(abi_ulong
), q
++) {
7341 if (get_user_ual(addr
, gp
)
7344 unlock_user(*q
, addr
, 0);
7351 case TARGET_NR_chdir
:
7352 if (!(p
= lock_user_string(arg1
)))
7353 return -TARGET_EFAULT
;
7354 ret
= get_errno(chdir(p
));
7355 unlock_user(p
, arg1
, 0);
7357 #ifdef TARGET_NR_time
7358 case TARGET_NR_time
:
7361 ret
= get_errno(time(&host_time
));
7364 && put_user_sal(host_time
, arg1
))
7365 return -TARGET_EFAULT
;
7369 #ifdef TARGET_NR_mknod
7370 case TARGET_NR_mknod
:
7371 if (!(p
= lock_user_string(arg1
)))
7372 return -TARGET_EFAULT
;
7373 ret
= get_errno(mknod(p
, arg2
, arg3
));
7374 unlock_user(p
, arg1
, 0);
7377 #if defined(TARGET_NR_mknodat)
7378 case TARGET_NR_mknodat
:
7379 if (!(p
= lock_user_string(arg2
)))
7380 return -TARGET_EFAULT
;
7381 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7382 unlock_user(p
, arg2
, 0);
7385 #ifdef TARGET_NR_chmod
7386 case TARGET_NR_chmod
:
7387 if (!(p
= lock_user_string(arg1
)))
7388 return -TARGET_EFAULT
;
7389 ret
= get_errno(chmod(p
, arg2
));
7390 unlock_user(p
, arg1
, 0);
7393 #ifdef TARGET_NR_lseek
7394 case TARGET_NR_lseek
:
7395 return get_errno(lseek(arg1
, arg2
, arg3
));
7397 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7398 /* Alpha specific */
7399 case TARGET_NR_getxpid
:
7400 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7401 return get_errno(getpid());
7403 #ifdef TARGET_NR_getpid
7404 case TARGET_NR_getpid
:
7405 return get_errno(getpid());
7407 case TARGET_NR_mount
:
7409 /* need to look at the data field */
7413 p
= lock_user_string(arg1
);
7415 return -TARGET_EFAULT
;
7421 p2
= lock_user_string(arg2
);
7424 unlock_user(p
, arg1
, 0);
7426 return -TARGET_EFAULT
;
7430 p3
= lock_user_string(arg3
);
7433 unlock_user(p
, arg1
, 0);
7435 unlock_user(p2
, arg2
, 0);
7436 return -TARGET_EFAULT
;
7442 /* FIXME - arg5 should be locked, but it isn't clear how to
7443 * do that since it's not guaranteed to be a NULL-terminated
7447 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7449 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7451 ret
= get_errno(ret
);
7454 unlock_user(p
, arg1
, 0);
7456 unlock_user(p2
, arg2
, 0);
7458 unlock_user(p3
, arg3
, 0);
7462 #ifdef TARGET_NR_umount
7463 case TARGET_NR_umount
:
7464 if (!(p
= lock_user_string(arg1
)))
7465 return -TARGET_EFAULT
;
7466 ret
= get_errno(umount(p
));
7467 unlock_user(p
, arg1
, 0);
7470 #ifdef TARGET_NR_stime /* not on alpha */
7471 case TARGET_NR_stime
:
7474 if (get_user_sal(host_time
, arg1
))
7475 return -TARGET_EFAULT
;
7476 return get_errno(stime(&host_time
));
7479 #ifdef TARGET_NR_alarm /* not on alpha */
7480 case TARGET_NR_alarm
:
7483 #ifdef TARGET_NR_pause /* not on alpha */
7484 case TARGET_NR_pause
:
7485 if (!block_signals()) {
7486 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7488 return -TARGET_EINTR
;
7490 #ifdef TARGET_NR_utime
7491 case TARGET_NR_utime
:
7493 struct utimbuf tbuf
, *host_tbuf
;
7494 struct target_utimbuf
*target_tbuf
;
7496 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7497 return -TARGET_EFAULT
;
7498 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7499 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7500 unlock_user_struct(target_tbuf
, arg2
, 0);
7505 if (!(p
= lock_user_string(arg1
)))
7506 return -TARGET_EFAULT
;
7507 ret
= get_errno(utime(p
, host_tbuf
));
7508 unlock_user(p
, arg1
, 0);
7512 #ifdef TARGET_NR_utimes
7513 case TARGET_NR_utimes
:
7515 struct timeval
*tvp
, tv
[2];
7517 if (copy_from_user_timeval(&tv
[0], arg2
)
7518 || copy_from_user_timeval(&tv
[1],
7519 arg2
+ sizeof(struct target_timeval
)))
7520 return -TARGET_EFAULT
;
7525 if (!(p
= lock_user_string(arg1
)))
7526 return -TARGET_EFAULT
;
7527 ret
= get_errno(utimes(p
, tvp
));
7528 unlock_user(p
, arg1
, 0);
7532 #if defined(TARGET_NR_futimesat)
7533 case TARGET_NR_futimesat
:
7535 struct timeval
*tvp
, tv
[2];
7537 if (copy_from_user_timeval(&tv
[0], arg3
)
7538 || copy_from_user_timeval(&tv
[1],
7539 arg3
+ sizeof(struct target_timeval
)))
7540 return -TARGET_EFAULT
;
7545 if (!(p
= lock_user_string(arg2
))) {
7546 return -TARGET_EFAULT
;
7548 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7549 unlock_user(p
, arg2
, 0);
7553 #ifdef TARGET_NR_access
7554 case TARGET_NR_access
:
7555 if (!(p
= lock_user_string(arg1
))) {
7556 return -TARGET_EFAULT
;
7558 ret
= get_errno(access(path(p
), arg2
));
7559 unlock_user(p
, arg1
, 0);
7562 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7563 case TARGET_NR_faccessat
:
7564 if (!(p
= lock_user_string(arg2
))) {
7565 return -TARGET_EFAULT
;
7567 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7568 unlock_user(p
, arg2
, 0);
7571 #ifdef TARGET_NR_nice /* not on alpha */
7572 case TARGET_NR_nice
:
7573 return get_errno(nice(arg1
));
7575 case TARGET_NR_sync
:
7578 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7579 case TARGET_NR_syncfs
:
7580 return get_errno(syncfs(arg1
));
7582 case TARGET_NR_kill
:
7583 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7584 #ifdef TARGET_NR_rename
7585 case TARGET_NR_rename
:
7588 p
= lock_user_string(arg1
);
7589 p2
= lock_user_string(arg2
);
7591 ret
= -TARGET_EFAULT
;
7593 ret
= get_errno(rename(p
, p2
));
7594 unlock_user(p2
, arg2
, 0);
7595 unlock_user(p
, arg1
, 0);
7599 #if defined(TARGET_NR_renameat)
7600 case TARGET_NR_renameat
:
7603 p
= lock_user_string(arg2
);
7604 p2
= lock_user_string(arg4
);
7606 ret
= -TARGET_EFAULT
;
7608 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7609 unlock_user(p2
, arg4
, 0);
7610 unlock_user(p
, arg2
, 0);
7614 #if defined(TARGET_NR_renameat2)
7615 case TARGET_NR_renameat2
:
7618 p
= lock_user_string(arg2
);
7619 p2
= lock_user_string(arg4
);
7621 ret
= -TARGET_EFAULT
;
7623 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
7625 unlock_user(p2
, arg4
, 0);
7626 unlock_user(p
, arg2
, 0);
7630 #ifdef TARGET_NR_mkdir
7631 case TARGET_NR_mkdir
:
7632 if (!(p
= lock_user_string(arg1
)))
7633 return -TARGET_EFAULT
;
7634 ret
= get_errno(mkdir(p
, arg2
));
7635 unlock_user(p
, arg1
, 0);
7638 #if defined(TARGET_NR_mkdirat)
7639 case TARGET_NR_mkdirat
:
7640 if (!(p
= lock_user_string(arg2
)))
7641 return -TARGET_EFAULT
;
7642 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7643 unlock_user(p
, arg2
, 0);
7646 #ifdef TARGET_NR_rmdir
7647 case TARGET_NR_rmdir
:
7648 if (!(p
= lock_user_string(arg1
)))
7649 return -TARGET_EFAULT
;
7650 ret
= get_errno(rmdir(p
));
7651 unlock_user(p
, arg1
, 0);
7655 ret
= get_errno(dup(arg1
));
7657 fd_trans_dup(arg1
, ret
);
7660 #ifdef TARGET_NR_pipe
7661 case TARGET_NR_pipe
:
7662 return do_pipe(cpu_env
, arg1
, 0, 0);
7664 #ifdef TARGET_NR_pipe2
7665 case TARGET_NR_pipe2
:
7666 return do_pipe(cpu_env
, arg1
,
7667 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7669 case TARGET_NR_times
:
7671 struct target_tms
*tmsp
;
7673 ret
= get_errno(times(&tms
));
7675 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7677 return -TARGET_EFAULT
;
7678 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7679 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7680 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7681 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7684 ret
= host_to_target_clock_t(ret
);
7687 case TARGET_NR_acct
:
7689 ret
= get_errno(acct(NULL
));
7691 if (!(p
= lock_user_string(arg1
))) {
7692 return -TARGET_EFAULT
;
7694 ret
= get_errno(acct(path(p
)));
7695 unlock_user(p
, arg1
, 0);
7698 #ifdef TARGET_NR_umount2
7699 case TARGET_NR_umount2
:
7700 if (!(p
= lock_user_string(arg1
)))
7701 return -TARGET_EFAULT
;
7702 ret
= get_errno(umount2(p
, arg2
));
7703 unlock_user(p
, arg1
, 0);
7706 case TARGET_NR_ioctl
:
7707 return do_ioctl(arg1
, arg2
, arg3
);
7708 #ifdef TARGET_NR_fcntl
7709 case TARGET_NR_fcntl
:
7710 return do_fcntl(arg1
, arg2
, arg3
);
7712 case TARGET_NR_setpgid
:
7713 return get_errno(setpgid(arg1
, arg2
));
7714 case TARGET_NR_umask
:
7715 return get_errno(umask(arg1
));
7716 case TARGET_NR_chroot
:
7717 if (!(p
= lock_user_string(arg1
)))
7718 return -TARGET_EFAULT
;
7719 ret
= get_errno(chroot(p
));
7720 unlock_user(p
, arg1
, 0);
7722 #ifdef TARGET_NR_dup2
7723 case TARGET_NR_dup2
:
7724 ret
= get_errno(dup2(arg1
, arg2
));
7726 fd_trans_dup(arg1
, arg2
);
7730 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7731 case TARGET_NR_dup3
:
7735 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
7738 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
7739 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
7741 fd_trans_dup(arg1
, arg2
);
7746 #ifdef TARGET_NR_getppid /* not on alpha */
7747 case TARGET_NR_getppid
:
7748 return get_errno(getppid());
7750 #ifdef TARGET_NR_getpgrp
7751 case TARGET_NR_getpgrp
:
7752 return get_errno(getpgrp());
7754 case TARGET_NR_setsid
:
7755 return get_errno(setsid());
7756 #ifdef TARGET_NR_sigaction
7757 case TARGET_NR_sigaction
:
7759 #if defined(TARGET_ALPHA)
7760 struct target_sigaction act
, oact
, *pact
= 0;
7761 struct target_old_sigaction
*old_act
;
7763 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7764 return -TARGET_EFAULT
;
7765 act
._sa_handler
= old_act
->_sa_handler
;
7766 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7767 act
.sa_flags
= old_act
->sa_flags
;
7768 act
.sa_restorer
= 0;
7769 unlock_user_struct(old_act
, arg2
, 0);
7772 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7773 if (!is_error(ret
) && arg3
) {
7774 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7775 return -TARGET_EFAULT
;
7776 old_act
->_sa_handler
= oact
._sa_handler
;
7777 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7778 old_act
->sa_flags
= oact
.sa_flags
;
7779 unlock_user_struct(old_act
, arg3
, 1);
7781 #elif defined(TARGET_MIPS)
7782 struct target_sigaction act
, oact
, *pact
, *old_act
;
7785 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7786 return -TARGET_EFAULT
;
7787 act
._sa_handler
= old_act
->_sa_handler
;
7788 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
7789 act
.sa_flags
= old_act
->sa_flags
;
7790 unlock_user_struct(old_act
, arg2
, 0);
7796 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7798 if (!is_error(ret
) && arg3
) {
7799 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7800 return -TARGET_EFAULT
;
7801 old_act
->_sa_handler
= oact
._sa_handler
;
7802 old_act
->sa_flags
= oact
.sa_flags
;
7803 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
7804 old_act
->sa_mask
.sig
[1] = 0;
7805 old_act
->sa_mask
.sig
[2] = 0;
7806 old_act
->sa_mask
.sig
[3] = 0;
7807 unlock_user_struct(old_act
, arg3
, 1);
7810 struct target_old_sigaction
*old_act
;
7811 struct target_sigaction act
, oact
, *pact
;
7813 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7814 return -TARGET_EFAULT
;
7815 act
._sa_handler
= old_act
->_sa_handler
;
7816 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7817 act
.sa_flags
= old_act
->sa_flags
;
7818 act
.sa_restorer
= old_act
->sa_restorer
;
7819 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7820 act
.ka_restorer
= 0;
7822 unlock_user_struct(old_act
, arg2
, 0);
7827 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7828 if (!is_error(ret
) && arg3
) {
7829 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7830 return -TARGET_EFAULT
;
7831 old_act
->_sa_handler
= oact
._sa_handler
;
7832 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7833 old_act
->sa_flags
= oact
.sa_flags
;
7834 old_act
->sa_restorer
= oact
.sa_restorer
;
7835 unlock_user_struct(old_act
, arg3
, 1);
7841 case TARGET_NR_rt_sigaction
:
7843 #if defined(TARGET_ALPHA)
7844 /* For Alpha and SPARC this is a 5 argument syscall, with
7845 * a 'restorer' parameter which must be copied into the
7846 * sa_restorer field of the sigaction struct.
7847 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7848 * and arg5 is the sigsetsize.
7849 * Alpha also has a separate rt_sigaction struct that it uses
7850 * here; SPARC uses the usual sigaction struct.
7852 struct target_rt_sigaction
*rt_act
;
7853 struct target_sigaction act
, oact
, *pact
= 0;
7855 if (arg4
!= sizeof(target_sigset_t
)) {
7856 return -TARGET_EINVAL
;
7859 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
7860 return -TARGET_EFAULT
;
7861 act
._sa_handler
= rt_act
->_sa_handler
;
7862 act
.sa_mask
= rt_act
->sa_mask
;
7863 act
.sa_flags
= rt_act
->sa_flags
;
7864 act
.sa_restorer
= arg5
;
7865 unlock_user_struct(rt_act
, arg2
, 0);
7868 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7869 if (!is_error(ret
) && arg3
) {
7870 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
7871 return -TARGET_EFAULT
;
7872 rt_act
->_sa_handler
= oact
._sa_handler
;
7873 rt_act
->sa_mask
= oact
.sa_mask
;
7874 rt_act
->sa_flags
= oact
.sa_flags
;
7875 unlock_user_struct(rt_act
, arg3
, 1);
7879 target_ulong restorer
= arg4
;
7880 target_ulong sigsetsize
= arg5
;
7882 target_ulong sigsetsize
= arg4
;
7884 struct target_sigaction
*act
;
7885 struct target_sigaction
*oact
;
7887 if (sigsetsize
!= sizeof(target_sigset_t
)) {
7888 return -TARGET_EINVAL
;
7891 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
7892 return -TARGET_EFAULT
;
7894 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7895 act
->ka_restorer
= restorer
;
7901 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
7902 ret
= -TARGET_EFAULT
;
7903 goto rt_sigaction_fail
;
7907 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
7910 unlock_user_struct(act
, arg2
, 0);
7912 unlock_user_struct(oact
, arg3
, 1);
7916 #ifdef TARGET_NR_sgetmask /* not on alpha */
7917 case TARGET_NR_sgetmask
:
7920 abi_ulong target_set
;
7921 ret
= do_sigprocmask(0, NULL
, &cur_set
);
7923 host_to_target_old_sigset(&target_set
, &cur_set
);
7929 #ifdef TARGET_NR_ssetmask /* not on alpha */
7930 case TARGET_NR_ssetmask
:
7933 abi_ulong target_set
= arg1
;
7934 target_to_host_old_sigset(&set
, &target_set
);
7935 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
7937 host_to_target_old_sigset(&target_set
, &oset
);
7943 #ifdef TARGET_NR_sigprocmask
7944 case TARGET_NR_sigprocmask
:
7946 #if defined(TARGET_ALPHA)
7947 sigset_t set
, oldset
;
7952 case TARGET_SIG_BLOCK
:
7955 case TARGET_SIG_UNBLOCK
:
7958 case TARGET_SIG_SETMASK
:
7962 return -TARGET_EINVAL
;
7965 target_to_host_old_sigset(&set
, &mask
);
7967 ret
= do_sigprocmask(how
, &set
, &oldset
);
7968 if (!is_error(ret
)) {
7969 host_to_target_old_sigset(&mask
, &oldset
);
7971 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
7974 sigset_t set
, oldset
, *set_ptr
;
7979 case TARGET_SIG_BLOCK
:
7982 case TARGET_SIG_UNBLOCK
:
7985 case TARGET_SIG_SETMASK
:
7989 return -TARGET_EINVAL
;
7991 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
7992 return -TARGET_EFAULT
;
7993 target_to_host_old_sigset(&set
, p
);
7994 unlock_user(p
, arg2
, 0);
8000 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8001 if (!is_error(ret
) && arg3
) {
8002 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8003 return -TARGET_EFAULT
;
8004 host_to_target_old_sigset(p
, &oldset
);
8005 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8011 case TARGET_NR_rt_sigprocmask
:
8014 sigset_t set
, oldset
, *set_ptr
;
8016 if (arg4
!= sizeof(target_sigset_t
)) {
8017 return -TARGET_EINVAL
;
8022 case TARGET_SIG_BLOCK
:
8025 case TARGET_SIG_UNBLOCK
:
8028 case TARGET_SIG_SETMASK
:
8032 return -TARGET_EINVAL
;
8034 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8035 return -TARGET_EFAULT
;
8036 target_to_host_sigset(&set
, p
);
8037 unlock_user(p
, arg2
, 0);
8043 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8044 if (!is_error(ret
) && arg3
) {
8045 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8046 return -TARGET_EFAULT
;
8047 host_to_target_sigset(p
, &oldset
);
8048 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8052 #ifdef TARGET_NR_sigpending
8053 case TARGET_NR_sigpending
:
8056 ret
= get_errno(sigpending(&set
));
8057 if (!is_error(ret
)) {
8058 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8059 return -TARGET_EFAULT
;
8060 host_to_target_old_sigset(p
, &set
);
8061 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8066 case TARGET_NR_rt_sigpending
:
8070 /* Yes, this check is >, not != like most. We follow the kernel's
8071 * logic and it does it like this because it implements
8072 * NR_sigpending through the same code path, and in that case
8073 * the old_sigset_t is smaller in size.
8075 if (arg2
> sizeof(target_sigset_t
)) {
8076 return -TARGET_EINVAL
;
8079 ret
= get_errno(sigpending(&set
));
8080 if (!is_error(ret
)) {
8081 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8082 return -TARGET_EFAULT
;
8083 host_to_target_sigset(p
, &set
);
8084 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8088 #ifdef TARGET_NR_sigsuspend
8089 case TARGET_NR_sigsuspend
:
8091 TaskState
*ts
= cpu
->opaque
;
8092 #if defined(TARGET_ALPHA)
8093 abi_ulong mask
= arg1
;
8094 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8096 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8097 return -TARGET_EFAULT
;
8098 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8099 unlock_user(p
, arg1
, 0);
8101 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8103 if (ret
!= -TARGET_ERESTARTSYS
) {
8104 ts
->in_sigsuspend
= 1;
8109 case TARGET_NR_rt_sigsuspend
:
8111 TaskState
*ts
= cpu
->opaque
;
8113 if (arg2
!= sizeof(target_sigset_t
)) {
8114 return -TARGET_EINVAL
;
8116 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8117 return -TARGET_EFAULT
;
8118 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8119 unlock_user(p
, arg1
, 0);
8120 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8122 if (ret
!= -TARGET_ERESTARTSYS
) {
8123 ts
->in_sigsuspend
= 1;
8127 case TARGET_NR_rt_sigtimedwait
:
8130 struct timespec uts
, *puts
;
8133 if (arg4
!= sizeof(target_sigset_t
)) {
8134 return -TARGET_EINVAL
;
8137 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8138 return -TARGET_EFAULT
;
8139 target_to_host_sigset(&set
, p
);
8140 unlock_user(p
, arg1
, 0);
8143 target_to_host_timespec(puts
, arg3
);
8147 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8149 if (!is_error(ret
)) {
8151 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8154 return -TARGET_EFAULT
;
8156 host_to_target_siginfo(p
, &uinfo
);
8157 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8159 ret
= host_to_target_signal(ret
);
8163 case TARGET_NR_rt_sigqueueinfo
:
8167 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8169 return -TARGET_EFAULT
;
8171 target_to_host_siginfo(&uinfo
, p
);
8172 unlock_user(p
, arg3
, 0);
8173 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8176 case TARGET_NR_rt_tgsigqueueinfo
:
8180 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8182 return -TARGET_EFAULT
;
8184 target_to_host_siginfo(&uinfo
, p
);
8185 unlock_user(p
, arg4
, 0);
8186 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8189 #ifdef TARGET_NR_sigreturn
8190 case TARGET_NR_sigreturn
:
8191 if (block_signals()) {
8192 return -TARGET_ERESTARTSYS
;
8194 return do_sigreturn(cpu_env
);
8196 case TARGET_NR_rt_sigreturn
:
8197 if (block_signals()) {
8198 return -TARGET_ERESTARTSYS
;
8200 return do_rt_sigreturn(cpu_env
);
8201 case TARGET_NR_sethostname
:
8202 if (!(p
= lock_user_string(arg1
)))
8203 return -TARGET_EFAULT
;
8204 ret
= get_errno(sethostname(p
, arg2
));
8205 unlock_user(p
, arg1
, 0);
8207 #ifdef TARGET_NR_setrlimit
8208 case TARGET_NR_setrlimit
:
8210 int resource
= target_to_host_resource(arg1
);
8211 struct target_rlimit
*target_rlim
;
8213 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8214 return -TARGET_EFAULT
;
8215 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8216 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8217 unlock_user_struct(target_rlim
, arg2
, 0);
8219 * If we just passed through resource limit settings for memory then
8220 * they would also apply to QEMU's own allocations, and QEMU will
8221 * crash or hang or die if its allocations fail. Ideally we would
8222 * track the guest allocations in QEMU and apply the limits ourselves.
8223 * For now, just tell the guest the call succeeded but don't actually
8226 if (resource
!= RLIMIT_AS
&&
8227 resource
!= RLIMIT_DATA
&&
8228 resource
!= RLIMIT_STACK
) {
8229 return get_errno(setrlimit(resource
, &rlim
));
8235 #ifdef TARGET_NR_getrlimit
8236 case TARGET_NR_getrlimit
:
8238 int resource
= target_to_host_resource(arg1
);
8239 struct target_rlimit
*target_rlim
;
8242 ret
= get_errno(getrlimit(resource
, &rlim
));
8243 if (!is_error(ret
)) {
8244 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8245 return -TARGET_EFAULT
;
8246 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8247 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8248 unlock_user_struct(target_rlim
, arg2
, 1);
8253 case TARGET_NR_getrusage
:
8255 struct rusage rusage
;
8256 ret
= get_errno(getrusage(arg1
, &rusage
));
8257 if (!is_error(ret
)) {
8258 ret
= host_to_target_rusage(arg2
, &rusage
);
8262 case TARGET_NR_gettimeofday
:
8265 ret
= get_errno(gettimeofday(&tv
, NULL
));
8266 if (!is_error(ret
)) {
8267 if (copy_to_user_timeval(arg1
, &tv
))
8268 return -TARGET_EFAULT
;
8272 case TARGET_NR_settimeofday
:
8274 struct timeval tv
, *ptv
= NULL
;
8275 struct timezone tz
, *ptz
= NULL
;
8278 if (copy_from_user_timeval(&tv
, arg1
)) {
8279 return -TARGET_EFAULT
;
8285 if (copy_from_user_timezone(&tz
, arg2
)) {
8286 return -TARGET_EFAULT
;
8291 return get_errno(settimeofday(ptv
, ptz
));
8293 #if defined(TARGET_NR_select)
8294 case TARGET_NR_select
:
8295 #if defined(TARGET_WANT_NI_OLD_SELECT)
8296 /* some architectures used to have old_select here
8297 * but now ENOSYS it.
8299 ret
= -TARGET_ENOSYS
;
8300 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8301 ret
= do_old_select(arg1
);
8303 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8307 #ifdef TARGET_NR_pselect6
8308 case TARGET_NR_pselect6
:
8310 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8311 fd_set rfds
, wfds
, efds
;
8312 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8313 struct timespec ts
, *ts_ptr
;
8316 * The 6th arg is actually two args smashed together,
8317 * so we cannot use the C library.
8325 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8326 target_sigset_t
*target_sigset
;
8334 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8338 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8342 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8348 * This takes a timespec, and not a timeval, so we cannot
8349 * use the do_select() helper ...
8352 if (target_to_host_timespec(&ts
, ts_addr
)) {
8353 return -TARGET_EFAULT
;
8360 /* Extract the two packed args for the sigset */
8363 sig
.size
= SIGSET_T_SIZE
;
8365 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8367 return -TARGET_EFAULT
;
8369 arg_sigset
= tswapal(arg7
[0]);
8370 arg_sigsize
= tswapal(arg7
[1]);
8371 unlock_user(arg7
, arg6
, 0);
8375 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8376 /* Like the kernel, we enforce correct size sigsets */
8377 return -TARGET_EINVAL
;
8379 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8380 sizeof(*target_sigset
), 1);
8381 if (!target_sigset
) {
8382 return -TARGET_EFAULT
;
8384 target_to_host_sigset(&set
, target_sigset
);
8385 unlock_user(target_sigset
, arg_sigset
, 0);
8393 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8396 if (!is_error(ret
)) {
8397 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8398 return -TARGET_EFAULT
;
8399 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8400 return -TARGET_EFAULT
;
8401 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8402 return -TARGET_EFAULT
;
8404 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8405 return -TARGET_EFAULT
;
8410 #ifdef TARGET_NR_symlink
8411 case TARGET_NR_symlink
:
8414 p
= lock_user_string(arg1
);
8415 p2
= lock_user_string(arg2
);
8417 ret
= -TARGET_EFAULT
;
8419 ret
= get_errno(symlink(p
, p2
));
8420 unlock_user(p2
, arg2
, 0);
8421 unlock_user(p
, arg1
, 0);
8425 #if defined(TARGET_NR_symlinkat)
8426 case TARGET_NR_symlinkat
:
8429 p
= lock_user_string(arg1
);
8430 p2
= lock_user_string(arg3
);
8432 ret
= -TARGET_EFAULT
;
8434 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8435 unlock_user(p2
, arg3
, 0);
8436 unlock_user(p
, arg1
, 0);
8440 #ifdef TARGET_NR_readlink
8441 case TARGET_NR_readlink
:
8444 p
= lock_user_string(arg1
);
8445 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8447 ret
= -TARGET_EFAULT
;
8449 /* Short circuit this for the magic exe check. */
8450 ret
= -TARGET_EINVAL
;
8451 } else if (is_proc_myself((const char *)p
, "exe")) {
8452 char real
[PATH_MAX
], *temp
;
8453 temp
= realpath(exec_path
, real
);
8454 /* Return value is # of bytes that we wrote to the buffer. */
8456 ret
= get_errno(-1);
8458 /* Don't worry about sign mismatch as earlier mapping
8459 * logic would have thrown a bad address error. */
8460 ret
= MIN(strlen(real
), arg3
);
8461 /* We cannot NUL terminate the string. */
8462 memcpy(p2
, real
, ret
);
8465 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8467 unlock_user(p2
, arg2
, ret
);
8468 unlock_user(p
, arg1
, 0);
8472 #if defined(TARGET_NR_readlinkat)
8473 case TARGET_NR_readlinkat
:
8476 p
= lock_user_string(arg2
);
8477 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8479 ret
= -TARGET_EFAULT
;
8480 } else if (is_proc_myself((const char *)p
, "exe")) {
8481 char real
[PATH_MAX
], *temp
;
8482 temp
= realpath(exec_path
, real
);
8483 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8484 snprintf((char *)p2
, arg4
, "%s", real
);
8486 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8488 unlock_user(p2
, arg3
, ret
);
8489 unlock_user(p
, arg2
, 0);
8493 #ifdef TARGET_NR_swapon
8494 case TARGET_NR_swapon
:
8495 if (!(p
= lock_user_string(arg1
)))
8496 return -TARGET_EFAULT
;
8497 ret
= get_errno(swapon(p
, arg2
));
8498 unlock_user(p
, arg1
, 0);
8501 case TARGET_NR_reboot
:
8502 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8503 /* arg4 must be ignored in all other cases */
8504 p
= lock_user_string(arg4
);
8506 return -TARGET_EFAULT
;
8508 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8509 unlock_user(p
, arg4
, 0);
8511 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8514 #ifdef TARGET_NR_mmap
8515 case TARGET_NR_mmap
:
8516 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8517 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8518 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8519 || defined(TARGET_S390X)
8522 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8523 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8524 return -TARGET_EFAULT
;
8531 unlock_user(v
, arg1
, 0);
8532 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8533 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8537 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8538 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8544 #ifdef TARGET_NR_mmap2
8545 case TARGET_NR_mmap2
:
8547 #define MMAP_SHIFT 12
8549 ret
= target_mmap(arg1
, arg2
, arg3
,
8550 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8551 arg5
, arg6
<< MMAP_SHIFT
);
8552 return get_errno(ret
);
8554 case TARGET_NR_munmap
:
8555 return get_errno(target_munmap(arg1
, arg2
));
8556 case TARGET_NR_mprotect
:
8558 TaskState
*ts
= cpu
->opaque
;
8559 /* Special hack to detect libc making the stack executable. */
8560 if ((arg3
& PROT_GROWSDOWN
)
8561 && arg1
>= ts
->info
->stack_limit
8562 && arg1
<= ts
->info
->start_stack
) {
8563 arg3
&= ~PROT_GROWSDOWN
;
8564 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8565 arg1
= ts
->info
->stack_limit
;
8568 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
8569 #ifdef TARGET_NR_mremap
8570 case TARGET_NR_mremap
:
8571 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8573 /* ??? msync/mlock/munlock are broken for softmmu. */
8574 #ifdef TARGET_NR_msync
8575 case TARGET_NR_msync
:
8576 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
8578 #ifdef TARGET_NR_mlock
8579 case TARGET_NR_mlock
:
8580 return get_errno(mlock(g2h(arg1
), arg2
));
8582 #ifdef TARGET_NR_munlock
8583 case TARGET_NR_munlock
:
8584 return get_errno(munlock(g2h(arg1
), arg2
));
8586 #ifdef TARGET_NR_mlockall
8587 case TARGET_NR_mlockall
:
8588 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8590 #ifdef TARGET_NR_munlockall
8591 case TARGET_NR_munlockall
:
8592 return get_errno(munlockall());
8594 #ifdef TARGET_NR_truncate
8595 case TARGET_NR_truncate
:
8596 if (!(p
= lock_user_string(arg1
)))
8597 return -TARGET_EFAULT
;
8598 ret
= get_errno(truncate(p
, arg2
));
8599 unlock_user(p
, arg1
, 0);
8602 #ifdef TARGET_NR_ftruncate
8603 case TARGET_NR_ftruncate
:
8604 return get_errno(ftruncate(arg1
, arg2
));
8606 case TARGET_NR_fchmod
:
8607 return get_errno(fchmod(arg1
, arg2
));
8608 #if defined(TARGET_NR_fchmodat)
8609 case TARGET_NR_fchmodat
:
8610 if (!(p
= lock_user_string(arg2
)))
8611 return -TARGET_EFAULT
;
8612 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8613 unlock_user(p
, arg2
, 0);
8616 case TARGET_NR_getpriority
:
8617 /* Note that negative values are valid for getpriority, so we must
8618 differentiate based on errno settings. */
8620 ret
= getpriority(arg1
, arg2
);
8621 if (ret
== -1 && errno
!= 0) {
8622 return -host_to_target_errno(errno
);
8625 /* Return value is the unbiased priority. Signal no error. */
8626 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8628 /* Return value is a biased priority to avoid negative numbers. */
8632 case TARGET_NR_setpriority
:
8633 return get_errno(setpriority(arg1
, arg2
, arg3
));
8634 #ifdef TARGET_NR_statfs
8635 case TARGET_NR_statfs
:
8636 if (!(p
= lock_user_string(arg1
))) {
8637 return -TARGET_EFAULT
;
8639 ret
= get_errno(statfs(path(p
), &stfs
));
8640 unlock_user(p
, arg1
, 0);
8642 if (!is_error(ret
)) {
8643 struct target_statfs
*target_stfs
;
8645 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8646 return -TARGET_EFAULT
;
8647 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8648 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8649 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8650 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8651 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8652 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8653 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8654 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8655 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8656 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8657 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8658 #ifdef _STATFS_F_FLAGS
8659 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
8661 __put_user(0, &target_stfs
->f_flags
);
8663 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8664 unlock_user_struct(target_stfs
, arg2
, 1);
8668 #ifdef TARGET_NR_fstatfs
8669 case TARGET_NR_fstatfs
:
8670 ret
= get_errno(fstatfs(arg1
, &stfs
));
8671 goto convert_statfs
;
8673 #ifdef TARGET_NR_statfs64
8674 case TARGET_NR_statfs64
:
8675 if (!(p
= lock_user_string(arg1
))) {
8676 return -TARGET_EFAULT
;
8678 ret
= get_errno(statfs(path(p
), &stfs
));
8679 unlock_user(p
, arg1
, 0);
8681 if (!is_error(ret
)) {
8682 struct target_statfs64
*target_stfs
;
8684 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8685 return -TARGET_EFAULT
;
8686 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8687 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8688 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8689 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8690 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8691 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8692 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8693 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8694 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8695 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8696 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8697 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8698 unlock_user_struct(target_stfs
, arg3
, 1);
8701 case TARGET_NR_fstatfs64
:
8702 ret
= get_errno(fstatfs(arg1
, &stfs
));
8703 goto convert_statfs64
;
8705 #ifdef TARGET_NR_socketcall
8706 case TARGET_NR_socketcall
:
8707 return do_socketcall(arg1
, arg2
);
8709 #ifdef TARGET_NR_accept
8710 case TARGET_NR_accept
:
8711 return do_accept4(arg1
, arg2
, arg3
, 0);
8713 #ifdef TARGET_NR_accept4
8714 case TARGET_NR_accept4
:
8715 return do_accept4(arg1
, arg2
, arg3
, arg4
);
8717 #ifdef TARGET_NR_bind
8718 case TARGET_NR_bind
:
8719 return do_bind(arg1
, arg2
, arg3
);
8721 #ifdef TARGET_NR_connect
8722 case TARGET_NR_connect
:
8723 return do_connect(arg1
, arg2
, arg3
);
8725 #ifdef TARGET_NR_getpeername
8726 case TARGET_NR_getpeername
:
8727 return do_getpeername(arg1
, arg2
, arg3
);
8729 #ifdef TARGET_NR_getsockname
8730 case TARGET_NR_getsockname
:
8731 return do_getsockname(arg1
, arg2
, arg3
);
8733 #ifdef TARGET_NR_getsockopt
8734 case TARGET_NR_getsockopt
:
8735 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
8737 #ifdef TARGET_NR_listen
8738 case TARGET_NR_listen
:
8739 return get_errno(listen(arg1
, arg2
));
8741 #ifdef TARGET_NR_recv
8742 case TARGET_NR_recv
:
8743 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
8745 #ifdef TARGET_NR_recvfrom
8746 case TARGET_NR_recvfrom
:
8747 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8749 #ifdef TARGET_NR_recvmsg
8750 case TARGET_NR_recvmsg
:
8751 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
8753 #ifdef TARGET_NR_send
8754 case TARGET_NR_send
:
8755 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
8757 #ifdef TARGET_NR_sendmsg
8758 case TARGET_NR_sendmsg
:
8759 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
8761 #ifdef TARGET_NR_sendmmsg
8762 case TARGET_NR_sendmmsg
:
8763 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
8764 case TARGET_NR_recvmmsg
:
8765 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
8767 #ifdef TARGET_NR_sendto
8768 case TARGET_NR_sendto
:
8769 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8771 #ifdef TARGET_NR_shutdown
8772 case TARGET_NR_shutdown
:
8773 return get_errno(shutdown(arg1
, arg2
));
8775 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8776 case TARGET_NR_getrandom
:
8777 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8779 return -TARGET_EFAULT
;
8781 ret
= get_errno(getrandom(p
, arg2
, arg3
));
8782 unlock_user(p
, arg1
, ret
);
8785 #ifdef TARGET_NR_socket
8786 case TARGET_NR_socket
:
8787 return do_socket(arg1
, arg2
, arg3
);
8789 #ifdef TARGET_NR_socketpair
8790 case TARGET_NR_socketpair
:
8791 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
8793 #ifdef TARGET_NR_setsockopt
8794 case TARGET_NR_setsockopt
:
8795 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
8797 #if defined(TARGET_NR_syslog)
8798 case TARGET_NR_syslog
:
8803 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
8804 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
8805 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
8806 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
8807 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
8808 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
8809 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
8810 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
8811 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
8812 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
8813 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
8814 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
8817 return -TARGET_EINVAL
;
8822 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8824 return -TARGET_EFAULT
;
8826 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
8827 unlock_user(p
, arg2
, arg3
);
8831 return -TARGET_EINVAL
;
8836 case TARGET_NR_setitimer
:
8838 struct itimerval value
, ovalue
, *pvalue
;
8842 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
8843 || copy_from_user_timeval(&pvalue
->it_value
,
8844 arg2
+ sizeof(struct target_timeval
)))
8845 return -TARGET_EFAULT
;
8849 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
8850 if (!is_error(ret
) && arg3
) {
8851 if (copy_to_user_timeval(arg3
,
8852 &ovalue
.it_interval
)
8853 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
8855 return -TARGET_EFAULT
;
8859 case TARGET_NR_getitimer
:
8861 struct itimerval value
;
8863 ret
= get_errno(getitimer(arg1
, &value
));
8864 if (!is_error(ret
) && arg2
) {
8865 if (copy_to_user_timeval(arg2
,
8867 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
8869 return -TARGET_EFAULT
;
8873 #ifdef TARGET_NR_stat
8874 case TARGET_NR_stat
:
8875 if (!(p
= lock_user_string(arg1
))) {
8876 return -TARGET_EFAULT
;
8878 ret
= get_errno(stat(path(p
), &st
));
8879 unlock_user(p
, arg1
, 0);
8882 #ifdef TARGET_NR_lstat
8883 case TARGET_NR_lstat
:
8884 if (!(p
= lock_user_string(arg1
))) {
8885 return -TARGET_EFAULT
;
8887 ret
= get_errno(lstat(path(p
), &st
));
8888 unlock_user(p
, arg1
, 0);
8891 #ifdef TARGET_NR_fstat
8892 case TARGET_NR_fstat
:
8894 ret
= get_errno(fstat(arg1
, &st
));
8895 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8898 if (!is_error(ret
)) {
8899 struct target_stat
*target_st
;
8901 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
8902 return -TARGET_EFAULT
;
8903 memset(target_st
, 0, sizeof(*target_st
));
8904 __put_user(st
.st_dev
, &target_st
->st_dev
);
8905 __put_user(st
.st_ino
, &target_st
->st_ino
);
8906 __put_user(st
.st_mode
, &target_st
->st_mode
);
8907 __put_user(st
.st_uid
, &target_st
->st_uid
);
8908 __put_user(st
.st_gid
, &target_st
->st_gid
);
8909 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
8910 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
8911 __put_user(st
.st_size
, &target_st
->st_size
);
8912 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
8913 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
8914 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
8915 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
8916 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
8917 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
8918 defined(TARGET_STAT_HAVE_NSEC)
8919 __put_user(st
.st_atim
.tv_nsec
,
8920 &target_st
->target_st_atime_nsec
);
8921 __put_user(st
.st_mtim
.tv_nsec
,
8922 &target_st
->target_st_mtime_nsec
);
8923 __put_user(st
.st_ctim
.tv_nsec
,
8924 &target_st
->target_st_ctime_nsec
);
8926 unlock_user_struct(target_st
, arg2
, 1);
8931 case TARGET_NR_vhangup
:
8932 return get_errno(vhangup());
8933 #ifdef TARGET_NR_syscall
8934 case TARGET_NR_syscall
:
8935 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
8936 arg6
, arg7
, arg8
, 0);
8938 case TARGET_NR_wait4
:
8941 abi_long status_ptr
= arg2
;
8942 struct rusage rusage
, *rusage_ptr
;
8943 abi_ulong target_rusage
= arg4
;
8944 abi_long rusage_err
;
8946 rusage_ptr
= &rusage
;
8949 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
8950 if (!is_error(ret
)) {
8951 if (status_ptr
&& ret
) {
8952 status
= host_to_target_waitstatus(status
);
8953 if (put_user_s32(status
, status_ptr
))
8954 return -TARGET_EFAULT
;
8956 if (target_rusage
) {
8957 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
8965 #ifdef TARGET_NR_swapoff
8966 case TARGET_NR_swapoff
:
8967 if (!(p
= lock_user_string(arg1
)))
8968 return -TARGET_EFAULT
;
8969 ret
= get_errno(swapoff(p
));
8970 unlock_user(p
, arg1
, 0);
8973 case TARGET_NR_sysinfo
:
8975 struct target_sysinfo
*target_value
;
8976 struct sysinfo value
;
8977 ret
= get_errno(sysinfo(&value
));
8978 if (!is_error(ret
) && arg1
)
8980 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
8981 return -TARGET_EFAULT
;
8982 __put_user(value
.uptime
, &target_value
->uptime
);
8983 __put_user(value
.loads
[0], &target_value
->loads
[0]);
8984 __put_user(value
.loads
[1], &target_value
->loads
[1]);
8985 __put_user(value
.loads
[2], &target_value
->loads
[2]);
8986 __put_user(value
.totalram
, &target_value
->totalram
);
8987 __put_user(value
.freeram
, &target_value
->freeram
);
8988 __put_user(value
.sharedram
, &target_value
->sharedram
);
8989 __put_user(value
.bufferram
, &target_value
->bufferram
);
8990 __put_user(value
.totalswap
, &target_value
->totalswap
);
8991 __put_user(value
.freeswap
, &target_value
->freeswap
);
8992 __put_user(value
.procs
, &target_value
->procs
);
8993 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
8994 __put_user(value
.freehigh
, &target_value
->freehigh
);
8995 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
8996 unlock_user_struct(target_value
, arg1
, 1);
9000 #ifdef TARGET_NR_ipc
9002 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9004 #ifdef TARGET_NR_semget
9005 case TARGET_NR_semget
:
9006 return get_errno(semget(arg1
, arg2
, arg3
));
9008 #ifdef TARGET_NR_semop
9009 case TARGET_NR_semop
:
9010 return do_semop(arg1
, arg2
, arg3
);
9012 #ifdef TARGET_NR_semctl
9013 case TARGET_NR_semctl
:
9014 return do_semctl(arg1
, arg2
, arg3
, arg4
);
9016 #ifdef TARGET_NR_msgctl
9017 case TARGET_NR_msgctl
:
9018 return do_msgctl(arg1
, arg2
, arg3
);
9020 #ifdef TARGET_NR_msgget
9021 case TARGET_NR_msgget
:
9022 return get_errno(msgget(arg1
, arg2
));
9024 #ifdef TARGET_NR_msgrcv
9025 case TARGET_NR_msgrcv
:
9026 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9028 #ifdef TARGET_NR_msgsnd
9029 case TARGET_NR_msgsnd
:
9030 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9032 #ifdef TARGET_NR_shmget
9033 case TARGET_NR_shmget
:
9034 return get_errno(shmget(arg1
, arg2
, arg3
));
9036 #ifdef TARGET_NR_shmctl
9037 case TARGET_NR_shmctl
:
9038 return do_shmctl(arg1
, arg2
, arg3
);
9040 #ifdef TARGET_NR_shmat
9041 case TARGET_NR_shmat
:
9042 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9044 #ifdef TARGET_NR_shmdt
9045 case TARGET_NR_shmdt
:
9046 return do_shmdt(arg1
);
9048 case TARGET_NR_fsync
:
9049 return get_errno(fsync(arg1
));
9050 case TARGET_NR_clone
:
9051 /* Linux manages to have three different orderings for its
9052 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9053 * match the kernel's CONFIG_CLONE_* settings.
9054 * Microblaze is further special in that it uses a sixth
9055 * implicit argument to clone for the TLS pointer.
9057 #if defined(TARGET_MICROBLAZE)
9058 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9059 #elif defined(TARGET_CLONE_BACKWARDS)
9060 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9061 #elif defined(TARGET_CLONE_BACKWARDS2)
9062 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9064 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9067 #ifdef __NR_exit_group
9068 /* new thread calls */
9069 case TARGET_NR_exit_group
:
9070 preexit_cleanup(cpu_env
, arg1
);
9071 return get_errno(exit_group(arg1
));
9073 case TARGET_NR_setdomainname
:
9074 if (!(p
= lock_user_string(arg1
)))
9075 return -TARGET_EFAULT
;
9076 ret
= get_errno(setdomainname(p
, arg2
));
9077 unlock_user(p
, arg1
, 0);
9079 case TARGET_NR_uname
:
9080 /* no need to transcode because we use the linux syscall */
9082 struct new_utsname
* buf
;
9084 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9085 return -TARGET_EFAULT
;
9086 ret
= get_errno(sys_uname(buf
));
9087 if (!is_error(ret
)) {
9088 /* Overwrite the native machine name with whatever is being
9090 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
9091 sizeof(buf
->machine
));
9092 /* Allow the user to override the reported release. */
9093 if (qemu_uname_release
&& *qemu_uname_release
) {
9094 g_strlcpy(buf
->release
, qemu_uname_release
,
9095 sizeof(buf
->release
));
9098 unlock_user_struct(buf
, arg1
, 1);
9102 case TARGET_NR_modify_ldt
:
9103 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9104 #if !defined(TARGET_X86_64)
9105 case TARGET_NR_vm86
:
9106 return do_vm86(cpu_env
, arg1
, arg2
);
9109 case TARGET_NR_adjtimex
:
9111 struct timex host_buf
;
9113 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9114 return -TARGET_EFAULT
;
9116 ret
= get_errno(adjtimex(&host_buf
));
9117 if (!is_error(ret
)) {
9118 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9119 return -TARGET_EFAULT
;
9124 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9125 case TARGET_NR_clock_adjtime
:
9127 struct timex htx
, *phtx
= &htx
;
9129 if (target_to_host_timex(phtx
, arg2
) != 0) {
9130 return -TARGET_EFAULT
;
9132 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9133 if (!is_error(ret
) && phtx
) {
9134 if (host_to_target_timex(arg2
, phtx
) != 0) {
9135 return -TARGET_EFAULT
;
9141 case TARGET_NR_getpgid
:
9142 return get_errno(getpgid(arg1
));
9143 case TARGET_NR_fchdir
:
9144 return get_errno(fchdir(arg1
));
9145 case TARGET_NR_personality
:
9146 return get_errno(personality(arg1
));
9147 #ifdef TARGET_NR__llseek /* Not on alpha */
9148 case TARGET_NR__llseek
:
9151 #if !defined(__NR_llseek)
9152 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9154 ret
= get_errno(res
);
9159 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9161 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9162 return -TARGET_EFAULT
;
9167 #ifdef TARGET_NR_getdents
9168 case TARGET_NR_getdents
:
9169 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9170 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9172 struct target_dirent
*target_dirp
;
9173 struct linux_dirent
*dirp
;
9174 abi_long count
= arg3
;
9176 dirp
= g_try_malloc(count
);
9178 return -TARGET_ENOMEM
;
9181 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9182 if (!is_error(ret
)) {
9183 struct linux_dirent
*de
;
9184 struct target_dirent
*tde
;
9186 int reclen
, treclen
;
9187 int count1
, tnamelen
;
9191 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9192 return -TARGET_EFAULT
;
9195 reclen
= de
->d_reclen
;
9196 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9197 assert(tnamelen
>= 0);
9198 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9199 assert(count1
+ treclen
<= count
);
9200 tde
->d_reclen
= tswap16(treclen
);
9201 tde
->d_ino
= tswapal(de
->d_ino
);
9202 tde
->d_off
= tswapal(de
->d_off
);
9203 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9204 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9206 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9210 unlock_user(target_dirp
, arg2
, ret
);
9216 struct linux_dirent
*dirp
;
9217 abi_long count
= arg3
;
9219 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9220 return -TARGET_EFAULT
;
9221 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9222 if (!is_error(ret
)) {
9223 struct linux_dirent
*de
;
9228 reclen
= de
->d_reclen
;
9231 de
->d_reclen
= tswap16(reclen
);
9232 tswapls(&de
->d_ino
);
9233 tswapls(&de
->d_off
);
9234 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9238 unlock_user(dirp
, arg2
, ret
);
9242 /* Implement getdents in terms of getdents64 */
9244 struct linux_dirent64
*dirp
;
9245 abi_long count
= arg3
;
9247 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9249 return -TARGET_EFAULT
;
9251 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9252 if (!is_error(ret
)) {
9253 /* Convert the dirent64 structs to target dirent. We do this
9254 * in-place, since we can guarantee that a target_dirent is no
9255 * larger than a dirent64; however this means we have to be
9256 * careful to read everything before writing in the new format.
9258 struct linux_dirent64
*de
;
9259 struct target_dirent
*tde
;
9264 tde
= (struct target_dirent
*)dirp
;
9266 int namelen
, treclen
;
9267 int reclen
= de
->d_reclen
;
9268 uint64_t ino
= de
->d_ino
;
9269 int64_t off
= de
->d_off
;
9270 uint8_t type
= de
->d_type
;
9272 namelen
= strlen(de
->d_name
);
9273 treclen
= offsetof(struct target_dirent
, d_name
)
9275 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9277 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9278 tde
->d_ino
= tswapal(ino
);
9279 tde
->d_off
= tswapal(off
);
9280 tde
->d_reclen
= tswap16(treclen
);
9281 /* The target_dirent type is in what was formerly a padding
9282 * byte at the end of the structure:
9284 *(((char *)tde
) + treclen
- 1) = type
;
9286 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9287 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9293 unlock_user(dirp
, arg2
, ret
);
9297 #endif /* TARGET_NR_getdents */
9298 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9299 case TARGET_NR_getdents64
:
9301 struct linux_dirent64
*dirp
;
9302 abi_long count
= arg3
;
9303 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9304 return -TARGET_EFAULT
;
9305 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9306 if (!is_error(ret
)) {
9307 struct linux_dirent64
*de
;
9312 reclen
= de
->d_reclen
;
9315 de
->d_reclen
= tswap16(reclen
);
9316 tswap64s((uint64_t *)&de
->d_ino
);
9317 tswap64s((uint64_t *)&de
->d_off
);
9318 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9322 unlock_user(dirp
, arg2
, ret
);
9325 #endif /* TARGET_NR_getdents64 */
9326 #if defined(TARGET_NR__newselect)
9327 case TARGET_NR__newselect
:
9328 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9330 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9331 # ifdef TARGET_NR_poll
9332 case TARGET_NR_poll
:
9334 # ifdef TARGET_NR_ppoll
9335 case TARGET_NR_ppoll
:
9338 struct target_pollfd
*target_pfd
;
9339 unsigned int nfds
= arg2
;
9346 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9347 return -TARGET_EINVAL
;
9350 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9351 sizeof(struct target_pollfd
) * nfds
, 1);
9353 return -TARGET_EFAULT
;
9356 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9357 for (i
= 0; i
< nfds
; i
++) {
9358 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9359 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9364 # ifdef TARGET_NR_ppoll
9365 case TARGET_NR_ppoll
:
9367 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9368 target_sigset_t
*target_set
;
9369 sigset_t _set
, *set
= &_set
;
9372 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9373 unlock_user(target_pfd
, arg1
, 0);
9374 return -TARGET_EFAULT
;
9381 if (arg5
!= sizeof(target_sigset_t
)) {
9382 unlock_user(target_pfd
, arg1
, 0);
9383 return -TARGET_EINVAL
;
9386 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9388 unlock_user(target_pfd
, arg1
, 0);
9389 return -TARGET_EFAULT
;
9391 target_to_host_sigset(set
, target_set
);
9396 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9397 set
, SIGSET_T_SIZE
));
9399 if (!is_error(ret
) && arg3
) {
9400 host_to_target_timespec(arg3
, timeout_ts
);
9403 unlock_user(target_set
, arg4
, 0);
9408 # ifdef TARGET_NR_poll
9409 case TARGET_NR_poll
:
9411 struct timespec ts
, *pts
;
9414 /* Convert ms to secs, ns */
9415 ts
.tv_sec
= arg3
/ 1000;
9416 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9419 /* -ve poll() timeout means "infinite" */
9422 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9427 g_assert_not_reached();
9430 if (!is_error(ret
)) {
9431 for(i
= 0; i
< nfds
; i
++) {
9432 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9435 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9439 case TARGET_NR_flock
:
9440 /* NOTE: the flock constant seems to be the same for every
9442 return get_errno(safe_flock(arg1
, arg2
));
9443 case TARGET_NR_readv
:
9445 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9447 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9448 unlock_iovec(vec
, arg2
, arg3
, 1);
9450 ret
= -host_to_target_errno(errno
);
9454 case TARGET_NR_writev
:
9456 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9458 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9459 unlock_iovec(vec
, arg2
, arg3
, 0);
9461 ret
= -host_to_target_errno(errno
);
9465 #if defined(TARGET_NR_preadv)
9466 case TARGET_NR_preadv
:
9468 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9470 unsigned long low
, high
;
9472 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9473 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
9474 unlock_iovec(vec
, arg2
, arg3
, 1);
9476 ret
= -host_to_target_errno(errno
);
9481 #if defined(TARGET_NR_pwritev)
9482 case TARGET_NR_pwritev
:
9484 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9486 unsigned long low
, high
;
9488 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9489 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
9490 unlock_iovec(vec
, arg2
, arg3
, 0);
9492 ret
= -host_to_target_errno(errno
);
9497 case TARGET_NR_getsid
:
9498 return get_errno(getsid(arg1
));
9499 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9500 case TARGET_NR_fdatasync
:
9501 return get_errno(fdatasync(arg1
));
9503 #ifdef TARGET_NR__sysctl
9504 case TARGET_NR__sysctl
:
9505 /* We don't implement this, but ENOTDIR is always a safe
9507 return -TARGET_ENOTDIR
;
9509 case TARGET_NR_sched_getaffinity
:
9511 unsigned int mask_size
;
9512 unsigned long *mask
;
9515 * sched_getaffinity needs multiples of ulong, so need to take
9516 * care of mismatches between target ulong and host ulong sizes.
9518 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9519 return -TARGET_EINVAL
;
9521 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9523 mask
= alloca(mask_size
);
9524 memset(mask
, 0, mask_size
);
9525 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9527 if (!is_error(ret
)) {
9529 /* More data returned than the caller's buffer will fit.
9530 * This only happens if sizeof(abi_long) < sizeof(long)
9531 * and the caller passed us a buffer holding an odd number
9532 * of abi_longs. If the host kernel is actually using the
9533 * extra 4 bytes then fail EINVAL; otherwise we can just
9534 * ignore them and only copy the interesting part.
9536 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9537 if (numcpus
> arg2
* 8) {
9538 return -TARGET_EINVAL
;
9543 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
9544 return -TARGET_EFAULT
;
9549 case TARGET_NR_sched_setaffinity
:
9551 unsigned int mask_size
;
9552 unsigned long *mask
;
9555 * sched_setaffinity needs multiples of ulong, so need to take
9556 * care of mismatches between target ulong and host ulong sizes.
9558 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9559 return -TARGET_EINVAL
;
9561 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9562 mask
= alloca(mask_size
);
9564 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
9569 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9571 case TARGET_NR_getcpu
:
9574 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
9575 arg2
? &node
: NULL
,
9577 if (is_error(ret
)) {
9580 if (arg1
&& put_user_u32(cpu
, arg1
)) {
9581 return -TARGET_EFAULT
;
9583 if (arg2
&& put_user_u32(node
, arg2
)) {
9584 return -TARGET_EFAULT
;
9588 case TARGET_NR_sched_setparam
:
9590 struct sched_param
*target_schp
;
9591 struct sched_param schp
;
9594 return -TARGET_EINVAL
;
9596 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9597 return -TARGET_EFAULT
;
9598 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9599 unlock_user_struct(target_schp
, arg2
, 0);
9600 return get_errno(sched_setparam(arg1
, &schp
));
9602 case TARGET_NR_sched_getparam
:
9604 struct sched_param
*target_schp
;
9605 struct sched_param schp
;
9608 return -TARGET_EINVAL
;
9610 ret
= get_errno(sched_getparam(arg1
, &schp
));
9611 if (!is_error(ret
)) {
9612 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9613 return -TARGET_EFAULT
;
9614 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9615 unlock_user_struct(target_schp
, arg2
, 1);
9619 case TARGET_NR_sched_setscheduler
:
9621 struct sched_param
*target_schp
;
9622 struct sched_param schp
;
9624 return -TARGET_EINVAL
;
9626 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9627 return -TARGET_EFAULT
;
9628 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9629 unlock_user_struct(target_schp
, arg3
, 0);
9630 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9632 case TARGET_NR_sched_getscheduler
:
9633 return get_errno(sched_getscheduler(arg1
));
9634 case TARGET_NR_sched_yield
:
9635 return get_errno(sched_yield());
9636 case TARGET_NR_sched_get_priority_max
:
9637 return get_errno(sched_get_priority_max(arg1
));
9638 case TARGET_NR_sched_get_priority_min
:
9639 return get_errno(sched_get_priority_min(arg1
));
9640 case TARGET_NR_sched_rr_get_interval
:
9643 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9644 if (!is_error(ret
)) {
9645 ret
= host_to_target_timespec(arg2
, &ts
);
9649 case TARGET_NR_nanosleep
:
9651 struct timespec req
, rem
;
9652 target_to_host_timespec(&req
, arg1
);
9653 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9654 if (is_error(ret
) && arg2
) {
9655 host_to_target_timespec(arg2
, &rem
);
9659 case TARGET_NR_prctl
:
9661 case PR_GET_PDEATHSIG
:
9664 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9665 if (!is_error(ret
) && arg2
9666 && put_user_ual(deathsig
, arg2
)) {
9667 return -TARGET_EFAULT
;
9674 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9676 return -TARGET_EFAULT
;
9678 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9680 unlock_user(name
, arg2
, 16);
9685 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9687 return -TARGET_EFAULT
;
9689 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9691 unlock_user(name
, arg2
, 0);
9696 case TARGET_PR_GET_FP_MODE
:
9698 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
9700 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
9701 ret
|= TARGET_PR_FP_MODE_FR
;
9703 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
9704 ret
|= TARGET_PR_FP_MODE_FRE
;
9708 case TARGET_PR_SET_FP_MODE
:
9710 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
9711 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
9712 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
9713 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
9714 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
9716 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
9717 TARGET_PR_FP_MODE_FRE
;
9719 /* If nothing to change, return right away, successfully. */
9720 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
9723 /* Check the value is valid */
9724 if (arg2
& ~known_bits
) {
9725 return -TARGET_EOPNOTSUPP
;
9727 /* Setting FRE without FR is not supported. */
9728 if (new_fre
&& !new_fr
) {
9729 return -TARGET_EOPNOTSUPP
;
9731 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
9732 /* FR1 is not supported */
9733 return -TARGET_EOPNOTSUPP
;
9735 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
9736 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
9737 /* cannot set FR=0 */
9738 return -TARGET_EOPNOTSUPP
;
9740 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
9741 /* Cannot set FRE=1 */
9742 return -TARGET_EOPNOTSUPP
;
9746 fpr_t
*fpr
= env
->active_fpu
.fpr
;
9747 for (i
= 0; i
< 32 ; i
+= 2) {
9748 if (!old_fr
&& new_fr
) {
9749 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
9750 } else if (old_fr
&& !new_fr
) {
9751 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
9756 env
->CP0_Status
|= (1 << CP0St_FR
);
9757 env
->hflags
|= MIPS_HFLAG_F64
;
9759 env
->CP0_Status
&= ~(1 << CP0St_FR
);
9760 env
->hflags
&= ~MIPS_HFLAG_F64
;
9763 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
9764 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
9765 env
->hflags
|= MIPS_HFLAG_FRE
;
9768 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
9769 env
->hflags
&= ~MIPS_HFLAG_FRE
;
9775 #ifdef TARGET_AARCH64
9776 case TARGET_PR_SVE_SET_VL
:
9778 * We cannot support either PR_SVE_SET_VL_ONEXEC or
9779 * PR_SVE_VL_INHERIT. Note the kernel definition
9780 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9781 * even though the current architectural maximum is VQ=16.
9783 ret
= -TARGET_EINVAL
;
9784 if (cpu_isar_feature(aa64_sve
, arm_env_get_cpu(cpu_env
))
9785 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
9786 CPUARMState
*env
= cpu_env
;
9787 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9788 uint32_t vq
, old_vq
;
9790 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
9791 vq
= MAX(arg2
/ 16, 1);
9792 vq
= MIN(vq
, cpu
->sve_max_vq
);
9795 aarch64_sve_narrow_vq(env
, vq
);
9797 env
->vfp
.zcr_el
[1] = vq
- 1;
9801 case TARGET_PR_SVE_GET_VL
:
9802 ret
= -TARGET_EINVAL
;
9804 ARMCPU
*cpu
= arm_env_get_cpu(cpu_env
);
9805 if (cpu_isar_feature(aa64_sve
, cpu
)) {
9806 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
9810 case TARGET_PR_PAC_RESET_KEYS
:
9812 CPUARMState
*env
= cpu_env
;
9813 ARMCPU
*cpu
= arm_env_get_cpu(env
);
9815 if (arg3
|| arg4
|| arg5
) {
9816 return -TARGET_EINVAL
;
9818 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
9819 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
9820 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
9821 TARGET_PR_PAC_APGAKEY
);
9827 } else if (arg2
& ~all
) {
9828 return -TARGET_EINVAL
;
9830 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
9831 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
9832 sizeof(ARMPACKey
), &err
);
9834 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
9835 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
9836 sizeof(ARMPACKey
), &err
);
9838 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
9839 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
9840 sizeof(ARMPACKey
), &err
);
9842 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
9843 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
9844 sizeof(ARMPACKey
), &err
);
9846 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
9847 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
9848 sizeof(ARMPACKey
), &err
);
9852 * Some unknown failure in the crypto. The best
9853 * we can do is log it and fail the syscall.
9854 * The real syscall cannot fail this way.
9856 qemu_log_mask(LOG_UNIMP
,
9857 "PR_PAC_RESET_KEYS: Crypto failure: %s",
9858 error_get_pretty(err
));
9865 return -TARGET_EINVAL
;
9866 #endif /* AARCH64 */
9867 case PR_GET_SECCOMP
:
9868 case PR_SET_SECCOMP
:
9869 /* Disable seccomp to prevent the target disabling syscalls we
9871 return -TARGET_EINVAL
;
9873 /* Most prctl options have no pointer arguments */
9874 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
9877 #ifdef TARGET_NR_arch_prctl
9878 case TARGET_NR_arch_prctl
:
9879 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9880 return do_arch_prctl(cpu_env
, arg1
, arg2
);
9885 #ifdef TARGET_NR_pread64
9886 case TARGET_NR_pread64
:
9887 if (regpairs_aligned(cpu_env
, num
)) {
9891 if (arg2
== 0 && arg3
== 0) {
9892 /* Special-case NULL buffer and zero length, which should succeed */
9895 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9897 return -TARGET_EFAULT
;
9900 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9901 unlock_user(p
, arg2
, ret
);
9903 case TARGET_NR_pwrite64
:
9904 if (regpairs_aligned(cpu_env
, num
)) {
9908 if (arg2
== 0 && arg3
== 0) {
9909 /* Special-case NULL buffer and zero length, which should succeed */
9912 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
9914 return -TARGET_EFAULT
;
9917 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
9918 unlock_user(p
, arg2
, 0);
9921 case TARGET_NR_getcwd
:
9922 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
9923 return -TARGET_EFAULT
;
9924 ret
= get_errno(sys_getcwd1(p
, arg2
));
9925 unlock_user(p
, arg1
, ret
);
9927 case TARGET_NR_capget
:
9928 case TARGET_NR_capset
:
9930 struct target_user_cap_header
*target_header
;
9931 struct target_user_cap_data
*target_data
= NULL
;
9932 struct __user_cap_header_struct header
;
9933 struct __user_cap_data_struct data
[2];
9934 struct __user_cap_data_struct
*dataptr
= NULL
;
9935 int i
, target_datalen
;
9938 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
9939 return -TARGET_EFAULT
;
9941 header
.version
= tswap32(target_header
->version
);
9942 header
.pid
= tswap32(target_header
->pid
);
9944 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
9945 /* Version 2 and up takes pointer to two user_data structs */
9949 target_datalen
= sizeof(*target_data
) * data_items
;
9952 if (num
== TARGET_NR_capget
) {
9953 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
9955 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
9958 unlock_user_struct(target_header
, arg1
, 0);
9959 return -TARGET_EFAULT
;
9962 if (num
== TARGET_NR_capset
) {
9963 for (i
= 0; i
< data_items
; i
++) {
9964 data
[i
].effective
= tswap32(target_data
[i
].effective
);
9965 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
9966 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
9973 if (num
== TARGET_NR_capget
) {
9974 ret
= get_errno(capget(&header
, dataptr
));
9976 ret
= get_errno(capset(&header
, dataptr
));
9979 /* The kernel always updates version for both capget and capset */
9980 target_header
->version
= tswap32(header
.version
);
9981 unlock_user_struct(target_header
, arg1
, 1);
9984 if (num
== TARGET_NR_capget
) {
9985 for (i
= 0; i
< data_items
; i
++) {
9986 target_data
[i
].effective
= tswap32(data
[i
].effective
);
9987 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
9988 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
9990 unlock_user(target_data
, arg2
, target_datalen
);
9992 unlock_user(target_data
, arg2
, 0);
9997 case TARGET_NR_sigaltstack
:
9998 return do_sigaltstack(arg1
, arg2
,
9999 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10001 #ifdef CONFIG_SENDFILE
10002 #ifdef TARGET_NR_sendfile
10003 case TARGET_NR_sendfile
:
10005 off_t
*offp
= NULL
;
10008 ret
= get_user_sal(off
, arg3
);
10009 if (is_error(ret
)) {
10014 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10015 if (!is_error(ret
) && arg3
) {
10016 abi_long ret2
= put_user_sal(off
, arg3
);
10017 if (is_error(ret2
)) {
10024 #ifdef TARGET_NR_sendfile64
10025 case TARGET_NR_sendfile64
:
10027 off_t
*offp
= NULL
;
10030 ret
= get_user_s64(off
, arg3
);
10031 if (is_error(ret
)) {
10036 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10037 if (!is_error(ret
) && arg3
) {
10038 abi_long ret2
= put_user_s64(off
, arg3
);
10039 if (is_error(ret2
)) {
10047 #ifdef TARGET_NR_vfork
10048 case TARGET_NR_vfork
:
10049 return get_errno(do_fork(cpu_env
,
10050 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10053 #ifdef TARGET_NR_ugetrlimit
10054 case TARGET_NR_ugetrlimit
:
10056 struct rlimit rlim
;
10057 int resource
= target_to_host_resource(arg1
);
10058 ret
= get_errno(getrlimit(resource
, &rlim
));
10059 if (!is_error(ret
)) {
10060 struct target_rlimit
*target_rlim
;
10061 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10062 return -TARGET_EFAULT
;
10063 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10064 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10065 unlock_user_struct(target_rlim
, arg2
, 1);
10070 #ifdef TARGET_NR_truncate64
10071 case TARGET_NR_truncate64
:
10072 if (!(p
= lock_user_string(arg1
)))
10073 return -TARGET_EFAULT
;
10074 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10075 unlock_user(p
, arg1
, 0);
10078 #ifdef TARGET_NR_ftruncate64
10079 case TARGET_NR_ftruncate64
:
10080 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10082 #ifdef TARGET_NR_stat64
10083 case TARGET_NR_stat64
:
10084 if (!(p
= lock_user_string(arg1
))) {
10085 return -TARGET_EFAULT
;
10087 ret
= get_errno(stat(path(p
), &st
));
10088 unlock_user(p
, arg1
, 0);
10089 if (!is_error(ret
))
10090 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10093 #ifdef TARGET_NR_lstat64
10094 case TARGET_NR_lstat64
:
10095 if (!(p
= lock_user_string(arg1
))) {
10096 return -TARGET_EFAULT
;
10098 ret
= get_errno(lstat(path(p
), &st
));
10099 unlock_user(p
, arg1
, 0);
10100 if (!is_error(ret
))
10101 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10104 #ifdef TARGET_NR_fstat64
10105 case TARGET_NR_fstat64
:
10106 ret
= get_errno(fstat(arg1
, &st
));
10107 if (!is_error(ret
))
10108 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10111 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10112 #ifdef TARGET_NR_fstatat64
10113 case TARGET_NR_fstatat64
:
10115 #ifdef TARGET_NR_newfstatat
10116 case TARGET_NR_newfstatat
:
10118 if (!(p
= lock_user_string(arg2
))) {
10119 return -TARGET_EFAULT
;
10121 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10122 unlock_user(p
, arg2
, 0);
10123 if (!is_error(ret
))
10124 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10127 #ifdef TARGET_NR_lchown
10128 case TARGET_NR_lchown
:
10129 if (!(p
= lock_user_string(arg1
)))
10130 return -TARGET_EFAULT
;
10131 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10132 unlock_user(p
, arg1
, 0);
10135 #ifdef TARGET_NR_getuid
10136 case TARGET_NR_getuid
:
10137 return get_errno(high2lowuid(getuid()));
10139 #ifdef TARGET_NR_getgid
10140 case TARGET_NR_getgid
:
10141 return get_errno(high2lowgid(getgid()));
10143 #ifdef TARGET_NR_geteuid
10144 case TARGET_NR_geteuid
:
10145 return get_errno(high2lowuid(geteuid()));
10147 #ifdef TARGET_NR_getegid
10148 case TARGET_NR_getegid
:
10149 return get_errno(high2lowgid(getegid()));
10151 case TARGET_NR_setreuid
:
10152 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10153 case TARGET_NR_setregid
:
10154 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10155 case TARGET_NR_getgroups
:
10157 int gidsetsize
= arg1
;
10158 target_id
*target_grouplist
;
10162 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10163 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10164 if (gidsetsize
== 0)
10166 if (!is_error(ret
)) {
10167 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10168 if (!target_grouplist
)
10169 return -TARGET_EFAULT
;
10170 for(i
= 0;i
< ret
; i
++)
10171 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10172 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10176 case TARGET_NR_setgroups
:
10178 int gidsetsize
= arg1
;
10179 target_id
*target_grouplist
;
10180 gid_t
*grouplist
= NULL
;
10183 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10184 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10185 if (!target_grouplist
) {
10186 return -TARGET_EFAULT
;
10188 for (i
= 0; i
< gidsetsize
; i
++) {
10189 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10191 unlock_user(target_grouplist
, arg2
, 0);
10193 return get_errno(setgroups(gidsetsize
, grouplist
));
10195 case TARGET_NR_fchown
:
10196 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10197 #if defined(TARGET_NR_fchownat)
10198 case TARGET_NR_fchownat
:
10199 if (!(p
= lock_user_string(arg2
)))
10200 return -TARGET_EFAULT
;
10201 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10202 low2highgid(arg4
), arg5
));
10203 unlock_user(p
, arg2
, 0);
10206 #ifdef TARGET_NR_setresuid
10207 case TARGET_NR_setresuid
:
10208 return get_errno(sys_setresuid(low2highuid(arg1
),
10210 low2highuid(arg3
)));
10212 #ifdef TARGET_NR_getresuid
10213 case TARGET_NR_getresuid
:
10215 uid_t ruid
, euid
, suid
;
10216 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10217 if (!is_error(ret
)) {
10218 if (put_user_id(high2lowuid(ruid
), arg1
)
10219 || put_user_id(high2lowuid(euid
), arg2
)
10220 || put_user_id(high2lowuid(suid
), arg3
))
10221 return -TARGET_EFAULT
;
10226 #ifdef TARGET_NR_getresgid
10227 case TARGET_NR_setresgid
:
10228 return get_errno(sys_setresgid(low2highgid(arg1
),
10230 low2highgid(arg3
)));
10232 #ifdef TARGET_NR_getresgid
10233 case TARGET_NR_getresgid
:
10235 gid_t rgid
, egid
, sgid
;
10236 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10237 if (!is_error(ret
)) {
10238 if (put_user_id(high2lowgid(rgid
), arg1
)
10239 || put_user_id(high2lowgid(egid
), arg2
)
10240 || put_user_id(high2lowgid(sgid
), arg3
))
10241 return -TARGET_EFAULT
;
10246 #ifdef TARGET_NR_chown
10247 case TARGET_NR_chown
:
10248 if (!(p
= lock_user_string(arg1
)))
10249 return -TARGET_EFAULT
;
10250 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10251 unlock_user(p
, arg1
, 0);
10254 case TARGET_NR_setuid
:
10255 return get_errno(sys_setuid(low2highuid(arg1
)));
10256 case TARGET_NR_setgid
:
10257 return get_errno(sys_setgid(low2highgid(arg1
)));
10258 case TARGET_NR_setfsuid
:
10259 return get_errno(setfsuid(arg1
));
10260 case TARGET_NR_setfsgid
:
10261 return get_errno(setfsgid(arg1
));
10263 #ifdef TARGET_NR_lchown32
10264 case TARGET_NR_lchown32
:
10265 if (!(p
= lock_user_string(arg1
)))
10266 return -TARGET_EFAULT
;
10267 ret
= get_errno(lchown(p
, arg2
, arg3
));
10268 unlock_user(p
, arg1
, 0);
10271 #ifdef TARGET_NR_getuid32
10272 case TARGET_NR_getuid32
:
10273 return get_errno(getuid());
10276 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10277 /* Alpha specific */
10278 case TARGET_NR_getxuid
:
10282 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10284 return get_errno(getuid());
10286 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10287 /* Alpha specific */
10288 case TARGET_NR_getxgid
:
10292 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10294 return get_errno(getgid());
10296 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10297 /* Alpha specific */
10298 case TARGET_NR_osf_getsysinfo
:
10299 ret
= -TARGET_EOPNOTSUPP
;
10301 case TARGET_GSI_IEEE_FP_CONTROL
:
10303 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10304 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
10306 swcr
&= ~SWCR_STATUS_MASK
;
10307 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10309 if (put_user_u64 (swcr
, arg2
))
10310 return -TARGET_EFAULT
;
10315 /* case GSI_IEEE_STATE_AT_SIGNAL:
10316 -- Not implemented in linux kernel.
10318 -- Retrieves current unaligned access state; not much used.
10319 case GSI_PROC_TYPE:
10320 -- Retrieves implver information; surely not used.
10321 case GSI_GET_HWRPB:
10322 -- Grabs a copy of the HWRPB; surely not used.
10327 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10328 /* Alpha specific */
10329 case TARGET_NR_osf_setsysinfo
:
10330 ret
= -TARGET_EOPNOTSUPP
;
10332 case TARGET_SSI_IEEE_FP_CONTROL
:
10334 uint64_t swcr
, fpcr
;
10336 if (get_user_u64 (swcr
, arg2
)) {
10337 return -TARGET_EFAULT
;
10341 * The kernel calls swcr_update_status to update the
10342 * status bits from the fpcr at every point that it
10343 * could be queried. Therefore, we store the status
10344 * bits only in FPCR.
10346 ((CPUAlphaState
*)cpu_env
)->swcr
10347 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
10349 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10350 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
10351 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
10352 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10357 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10359 uint64_t exc
, fpcr
, fex
;
10361 if (get_user_u64(exc
, arg2
)) {
10362 return -TARGET_EFAULT
;
10364 exc
&= SWCR_STATUS_MASK
;
10365 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10367 /* Old exceptions are not signaled. */
10368 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
10370 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
10371 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
10373 /* Update the hardware fpcr. */
10374 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
10375 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10378 int si_code
= TARGET_FPE_FLTUNK
;
10379 target_siginfo_t info
;
10381 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
10382 si_code
= TARGET_FPE_FLTUND
;
10384 if (fex
& SWCR_TRAP_ENABLE_INE
) {
10385 si_code
= TARGET_FPE_FLTRES
;
10387 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
10388 si_code
= TARGET_FPE_FLTUND
;
10390 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
10391 si_code
= TARGET_FPE_FLTOVF
;
10393 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
10394 si_code
= TARGET_FPE_FLTDIV
;
10396 if (fex
& SWCR_TRAP_ENABLE_INV
) {
10397 si_code
= TARGET_FPE_FLTINV
;
10400 info
.si_signo
= SIGFPE
;
10402 info
.si_code
= si_code
;
10403 info
._sifields
._sigfault
._addr
10404 = ((CPUArchState
*)cpu_env
)->pc
;
10405 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
10406 QEMU_SI_FAULT
, &info
);
10412 /* case SSI_NVPAIRS:
10413 -- Used with SSIN_UACPROC to enable unaligned accesses.
10414 case SSI_IEEE_STATE_AT_SIGNAL:
10415 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10416 -- Not implemented in linux kernel
10421 #ifdef TARGET_NR_osf_sigprocmask
10422 /* Alpha specific. */
10423 case TARGET_NR_osf_sigprocmask
:
10427 sigset_t set
, oldset
;
10430 case TARGET_SIG_BLOCK
:
10433 case TARGET_SIG_UNBLOCK
:
10436 case TARGET_SIG_SETMASK
:
10440 return -TARGET_EINVAL
;
10443 target_to_host_old_sigset(&set
, &mask
);
10444 ret
= do_sigprocmask(how
, &set
, &oldset
);
10446 host_to_target_old_sigset(&mask
, &oldset
);
10453 #ifdef TARGET_NR_getgid32
10454 case TARGET_NR_getgid32
:
10455 return get_errno(getgid());
10457 #ifdef TARGET_NR_geteuid32
10458 case TARGET_NR_geteuid32
:
10459 return get_errno(geteuid());
10461 #ifdef TARGET_NR_getegid32
10462 case TARGET_NR_getegid32
:
10463 return get_errno(getegid());
10465 #ifdef TARGET_NR_setreuid32
10466 case TARGET_NR_setreuid32
:
10467 return get_errno(setreuid(arg1
, arg2
));
10469 #ifdef TARGET_NR_setregid32
10470 case TARGET_NR_setregid32
:
10471 return get_errno(setregid(arg1
, arg2
));
10473 #ifdef TARGET_NR_getgroups32
10474 case TARGET_NR_getgroups32
:
10476 int gidsetsize
= arg1
;
10477 uint32_t *target_grouplist
;
10481 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10482 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10483 if (gidsetsize
== 0)
10485 if (!is_error(ret
)) {
10486 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10487 if (!target_grouplist
) {
10488 return -TARGET_EFAULT
;
10490 for(i
= 0;i
< ret
; i
++)
10491 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10492 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10497 #ifdef TARGET_NR_setgroups32
10498 case TARGET_NR_setgroups32
:
10500 int gidsetsize
= arg1
;
10501 uint32_t *target_grouplist
;
10505 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10506 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10507 if (!target_grouplist
) {
10508 return -TARGET_EFAULT
;
10510 for(i
= 0;i
< gidsetsize
; i
++)
10511 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10512 unlock_user(target_grouplist
, arg2
, 0);
10513 return get_errno(setgroups(gidsetsize
, grouplist
));
10516 #ifdef TARGET_NR_fchown32
10517 case TARGET_NR_fchown32
:
10518 return get_errno(fchown(arg1
, arg2
, arg3
));
10520 #ifdef TARGET_NR_setresuid32
10521 case TARGET_NR_setresuid32
:
10522 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10524 #ifdef TARGET_NR_getresuid32
10525 case TARGET_NR_getresuid32
:
10527 uid_t ruid
, euid
, suid
;
10528 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10529 if (!is_error(ret
)) {
10530 if (put_user_u32(ruid
, arg1
)
10531 || put_user_u32(euid
, arg2
)
10532 || put_user_u32(suid
, arg3
))
10533 return -TARGET_EFAULT
;
10538 #ifdef TARGET_NR_setresgid32
10539 case TARGET_NR_setresgid32
:
10540 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10542 #ifdef TARGET_NR_getresgid32
10543 case TARGET_NR_getresgid32
:
10545 gid_t rgid
, egid
, sgid
;
10546 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10547 if (!is_error(ret
)) {
10548 if (put_user_u32(rgid
, arg1
)
10549 || put_user_u32(egid
, arg2
)
10550 || put_user_u32(sgid
, arg3
))
10551 return -TARGET_EFAULT
;
10556 #ifdef TARGET_NR_chown32
10557 case TARGET_NR_chown32
:
10558 if (!(p
= lock_user_string(arg1
)))
10559 return -TARGET_EFAULT
;
10560 ret
= get_errno(chown(p
, arg2
, arg3
));
10561 unlock_user(p
, arg1
, 0);
10564 #ifdef TARGET_NR_setuid32
10565 case TARGET_NR_setuid32
:
10566 return get_errno(sys_setuid(arg1
));
10568 #ifdef TARGET_NR_setgid32
10569 case TARGET_NR_setgid32
:
10570 return get_errno(sys_setgid(arg1
));
10572 #ifdef TARGET_NR_setfsuid32
10573 case TARGET_NR_setfsuid32
:
10574 return get_errno(setfsuid(arg1
));
10576 #ifdef TARGET_NR_setfsgid32
10577 case TARGET_NR_setfsgid32
:
10578 return get_errno(setfsgid(arg1
));
10580 #ifdef TARGET_NR_mincore
10581 case TARGET_NR_mincore
:
10583 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
10585 return -TARGET_ENOMEM
;
10587 p
= lock_user_string(arg3
);
10589 ret
= -TARGET_EFAULT
;
10591 ret
= get_errno(mincore(a
, arg2
, p
));
10592 unlock_user(p
, arg3
, ret
);
10594 unlock_user(a
, arg1
, 0);
10598 #ifdef TARGET_NR_arm_fadvise64_64
10599 case TARGET_NR_arm_fadvise64_64
:
10600 /* arm_fadvise64_64 looks like fadvise64_64 but
10601 * with different argument order: fd, advice, offset, len
10602 * rather than the usual fd, offset, len, advice.
10603 * Note that offset and len are both 64-bit so appear as
10604 * pairs of 32-bit registers.
10606 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10607 target_offset64(arg5
, arg6
), arg2
);
10608 return -host_to_target_errno(ret
);
10611 #if TARGET_ABI_BITS == 32
10613 #ifdef TARGET_NR_fadvise64_64
10614 case TARGET_NR_fadvise64_64
:
10615 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10616 /* 6 args: fd, advice, offset (high, low), len (high, low) */
10624 /* 6 args: fd, offset (high, low), len (high, low), advice */
10625 if (regpairs_aligned(cpu_env
, num
)) {
10626 /* offset is in (3,4), len in (5,6) and advice in 7 */
10634 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
10635 target_offset64(arg4
, arg5
), arg6
);
10636 return -host_to_target_errno(ret
);
10639 #ifdef TARGET_NR_fadvise64
10640 case TARGET_NR_fadvise64
:
10641 /* 5 args: fd, offset (high, low), len, advice */
10642 if (regpairs_aligned(cpu_env
, num
)) {
10643 /* offset is in (3,4), len in 5 and advice in 6 */
10649 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
10650 return -host_to_target_errno(ret
);
10653 #else /* not a 32-bit ABI */
10654 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10655 #ifdef TARGET_NR_fadvise64_64
10656 case TARGET_NR_fadvise64_64
:
10658 #ifdef TARGET_NR_fadvise64
10659 case TARGET_NR_fadvise64
:
10661 #ifdef TARGET_S390X
10663 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10664 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10665 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10666 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10670 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10672 #endif /* end of 64-bit ABI fadvise handling */
10674 #ifdef TARGET_NR_madvise
10675 case TARGET_NR_madvise
:
10676 /* A straight passthrough may not be safe because qemu sometimes
10677 turns private file-backed mappings into anonymous mappings.
10678 This will break MADV_DONTNEED.
10679 This is a hint, so ignoring and returning success is ok. */
10682 #if TARGET_ABI_BITS == 32
10683 case TARGET_NR_fcntl64
:
10687 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
10688 to_flock64_fn
*copyto
= copy_to_user_flock64
;
10691 if (!((CPUARMState
*)cpu_env
)->eabi
) {
10692 copyfrom
= copy_from_user_oabi_flock64
;
10693 copyto
= copy_to_user_oabi_flock64
;
10697 cmd
= target_to_host_fcntl_cmd(arg2
);
10698 if (cmd
== -TARGET_EINVAL
) {
10703 case TARGET_F_GETLK64
:
10704 ret
= copyfrom(&fl
, arg3
);
10708 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10710 ret
= copyto(arg3
, &fl
);
10714 case TARGET_F_SETLK64
:
10715 case TARGET_F_SETLKW64
:
10716 ret
= copyfrom(&fl
, arg3
);
10720 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10723 ret
= do_fcntl(arg1
, arg2
, arg3
);
10729 #ifdef TARGET_NR_cacheflush
10730 case TARGET_NR_cacheflush
:
10731 /* self-modifying code is handled automatically, so nothing needed */
10734 #ifdef TARGET_NR_getpagesize
10735 case TARGET_NR_getpagesize
:
10736 return TARGET_PAGE_SIZE
;
10738 case TARGET_NR_gettid
:
10739 return get_errno(sys_gettid());
10740 #ifdef TARGET_NR_readahead
10741 case TARGET_NR_readahead
:
10742 #if TARGET_ABI_BITS == 32
10743 if (regpairs_aligned(cpu_env
, num
)) {
10748 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
10750 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10755 #ifdef TARGET_NR_setxattr
10756 case TARGET_NR_listxattr
:
10757 case TARGET_NR_llistxattr
:
10761 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10763 return -TARGET_EFAULT
;
10766 p
= lock_user_string(arg1
);
10768 if (num
== TARGET_NR_listxattr
) {
10769 ret
= get_errno(listxattr(p
, b
, arg3
));
10771 ret
= get_errno(llistxattr(p
, b
, arg3
));
10774 ret
= -TARGET_EFAULT
;
10776 unlock_user(p
, arg1
, 0);
10777 unlock_user(b
, arg2
, arg3
);
10780 case TARGET_NR_flistxattr
:
10784 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10786 return -TARGET_EFAULT
;
10789 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10790 unlock_user(b
, arg2
, arg3
);
10793 case TARGET_NR_setxattr
:
10794 case TARGET_NR_lsetxattr
:
10796 void *p
, *n
, *v
= 0;
10798 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10800 return -TARGET_EFAULT
;
10803 p
= lock_user_string(arg1
);
10804 n
= lock_user_string(arg2
);
10806 if (num
== TARGET_NR_setxattr
) {
10807 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
10809 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
10812 ret
= -TARGET_EFAULT
;
10814 unlock_user(p
, arg1
, 0);
10815 unlock_user(n
, arg2
, 0);
10816 unlock_user(v
, arg3
, 0);
10819 case TARGET_NR_fsetxattr
:
10823 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10825 return -TARGET_EFAULT
;
10828 n
= lock_user_string(arg2
);
10830 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
10832 ret
= -TARGET_EFAULT
;
10834 unlock_user(n
, arg2
, 0);
10835 unlock_user(v
, arg3
, 0);
10838 case TARGET_NR_getxattr
:
10839 case TARGET_NR_lgetxattr
:
10841 void *p
, *n
, *v
= 0;
10843 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10845 return -TARGET_EFAULT
;
10848 p
= lock_user_string(arg1
);
10849 n
= lock_user_string(arg2
);
10851 if (num
== TARGET_NR_getxattr
) {
10852 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
10854 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
10857 ret
= -TARGET_EFAULT
;
10859 unlock_user(p
, arg1
, 0);
10860 unlock_user(n
, arg2
, 0);
10861 unlock_user(v
, arg3
, arg4
);
10864 case TARGET_NR_fgetxattr
:
10868 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10870 return -TARGET_EFAULT
;
10873 n
= lock_user_string(arg2
);
10875 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
10877 ret
= -TARGET_EFAULT
;
10879 unlock_user(n
, arg2
, 0);
10880 unlock_user(v
, arg3
, arg4
);
10883 case TARGET_NR_removexattr
:
10884 case TARGET_NR_lremovexattr
:
10887 p
= lock_user_string(arg1
);
10888 n
= lock_user_string(arg2
);
10890 if (num
== TARGET_NR_removexattr
) {
10891 ret
= get_errno(removexattr(p
, n
));
10893 ret
= get_errno(lremovexattr(p
, n
));
10896 ret
= -TARGET_EFAULT
;
10898 unlock_user(p
, arg1
, 0);
10899 unlock_user(n
, arg2
, 0);
10902 case TARGET_NR_fremovexattr
:
10905 n
= lock_user_string(arg2
);
10907 ret
= get_errno(fremovexattr(arg1
, n
));
10909 ret
= -TARGET_EFAULT
;
10911 unlock_user(n
, arg2
, 0);
10915 #endif /* CONFIG_ATTR */
10916 #ifdef TARGET_NR_set_thread_area
10917 case TARGET_NR_set_thread_area
:
10918 #if defined(TARGET_MIPS)
10919 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
10921 #elif defined(TARGET_CRIS)
10923 ret
= -TARGET_EINVAL
;
10925 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
10929 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10930 return do_set_thread_area(cpu_env
, arg1
);
10931 #elif defined(TARGET_M68K)
10933 TaskState
*ts
= cpu
->opaque
;
10934 ts
->tp_value
= arg1
;
10938 return -TARGET_ENOSYS
;
10941 #ifdef TARGET_NR_get_thread_area
10942 case TARGET_NR_get_thread_area
:
10943 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10944 return do_get_thread_area(cpu_env
, arg1
);
10945 #elif defined(TARGET_M68K)
10947 TaskState
*ts
= cpu
->opaque
;
10948 return ts
->tp_value
;
10951 return -TARGET_ENOSYS
;
10954 #ifdef TARGET_NR_getdomainname
10955 case TARGET_NR_getdomainname
:
10956 return -TARGET_ENOSYS
;
10959 #ifdef TARGET_NR_clock_settime
10960 case TARGET_NR_clock_settime
:
10962 struct timespec ts
;
10964 ret
= target_to_host_timespec(&ts
, arg2
);
10965 if (!is_error(ret
)) {
10966 ret
= get_errno(clock_settime(arg1
, &ts
));
10971 #ifdef TARGET_NR_clock_gettime
10972 case TARGET_NR_clock_gettime
:
10974 struct timespec ts
;
10975 ret
= get_errno(clock_gettime(arg1
, &ts
));
10976 if (!is_error(ret
)) {
10977 ret
= host_to_target_timespec(arg2
, &ts
);
10982 #ifdef TARGET_NR_clock_getres
10983 case TARGET_NR_clock_getres
:
10985 struct timespec ts
;
10986 ret
= get_errno(clock_getres(arg1
, &ts
));
10987 if (!is_error(ret
)) {
10988 host_to_target_timespec(arg2
, &ts
);
10993 #ifdef TARGET_NR_clock_nanosleep
10994 case TARGET_NR_clock_nanosleep
:
10996 struct timespec ts
;
10997 target_to_host_timespec(&ts
, arg3
);
10998 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
10999 &ts
, arg4
? &ts
: NULL
));
11001 host_to_target_timespec(arg4
, &ts
);
11003 #if defined(TARGET_PPC)
11004 /* clock_nanosleep is odd in that it returns positive errno values.
11005 * On PPC, CR0 bit 3 should be set in such a situation. */
11006 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11007 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11014 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11015 case TARGET_NR_set_tid_address
:
11016 return get_errno(set_tid_address((int *)g2h(arg1
)));
11019 case TARGET_NR_tkill
:
11020 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11022 case TARGET_NR_tgkill
:
11023 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11024 target_to_host_signal(arg3
)));
11026 #ifdef TARGET_NR_set_robust_list
11027 case TARGET_NR_set_robust_list
:
11028 case TARGET_NR_get_robust_list
:
11029 /* The ABI for supporting robust futexes has userspace pass
11030 * the kernel a pointer to a linked list which is updated by
11031 * userspace after the syscall; the list is walked by the kernel
11032 * when the thread exits. Since the linked list in QEMU guest
11033 * memory isn't a valid linked list for the host and we have
11034 * no way to reliably intercept the thread-death event, we can't
11035 * support these. Silently return ENOSYS so that guest userspace
11036 * falls back to a non-robust futex implementation (which should
11037 * be OK except in the corner case of the guest crashing while
11038 * holding a mutex that is shared with another process via
11041 return -TARGET_ENOSYS
;
11044 #if defined(TARGET_NR_utimensat)
11045 case TARGET_NR_utimensat
:
11047 struct timespec
*tsp
, ts
[2];
11051 target_to_host_timespec(ts
, arg3
);
11052 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11056 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11058 if (!(p
= lock_user_string(arg2
))) {
11059 return -TARGET_EFAULT
;
11061 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11062 unlock_user(p
, arg2
, 0);
11067 case TARGET_NR_futex
:
11068 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11069 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11070 case TARGET_NR_inotify_init
:
11071 ret
= get_errno(sys_inotify_init());
11073 fd_trans_register(ret
, &target_inotify_trans
);
11077 #ifdef CONFIG_INOTIFY1
11078 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11079 case TARGET_NR_inotify_init1
:
11080 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11081 fcntl_flags_tbl
)));
11083 fd_trans_register(ret
, &target_inotify_trans
);
11088 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11089 case TARGET_NR_inotify_add_watch
:
11090 p
= lock_user_string(arg2
);
11091 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11092 unlock_user(p
, arg2
, 0);
11095 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11096 case TARGET_NR_inotify_rm_watch
:
11097 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11100 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11101 case TARGET_NR_mq_open
:
11103 struct mq_attr posix_mq_attr
;
11104 struct mq_attr
*pposix_mq_attr
;
11107 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11108 pposix_mq_attr
= NULL
;
11110 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11111 return -TARGET_EFAULT
;
11113 pposix_mq_attr
= &posix_mq_attr
;
11115 p
= lock_user_string(arg1
- 1);
11117 return -TARGET_EFAULT
;
11119 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
11120 unlock_user (p
, arg1
, 0);
11124 case TARGET_NR_mq_unlink
:
11125 p
= lock_user_string(arg1
- 1);
11127 return -TARGET_EFAULT
;
11129 ret
= get_errno(mq_unlink(p
));
11130 unlock_user (p
, arg1
, 0);
11133 case TARGET_NR_mq_timedsend
:
11135 struct timespec ts
;
11137 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11139 target_to_host_timespec(&ts
, arg5
);
11140 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11141 host_to_target_timespec(arg5
, &ts
);
11143 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11145 unlock_user (p
, arg2
, arg3
);
11149 case TARGET_NR_mq_timedreceive
:
11151 struct timespec ts
;
11154 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11156 target_to_host_timespec(&ts
, arg5
);
11157 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11159 host_to_target_timespec(arg5
, &ts
);
11161 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11164 unlock_user (p
, arg2
, arg3
);
11166 put_user_u32(prio
, arg4
);
11170 /* Not implemented for now... */
11171 /* case TARGET_NR_mq_notify: */
11174 case TARGET_NR_mq_getsetattr
:
11176 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11179 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11180 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
11181 &posix_mq_attr_out
));
11182 } else if (arg3
!= 0) {
11183 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
11185 if (ret
== 0 && arg3
!= 0) {
11186 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11192 #ifdef CONFIG_SPLICE
11193 #ifdef TARGET_NR_tee
11194 case TARGET_NR_tee
:
11196 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11200 #ifdef TARGET_NR_splice
11201 case TARGET_NR_splice
:
11203 loff_t loff_in
, loff_out
;
11204 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11206 if (get_user_u64(loff_in
, arg2
)) {
11207 return -TARGET_EFAULT
;
11209 ploff_in
= &loff_in
;
11212 if (get_user_u64(loff_out
, arg4
)) {
11213 return -TARGET_EFAULT
;
11215 ploff_out
= &loff_out
;
11217 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11219 if (put_user_u64(loff_in
, arg2
)) {
11220 return -TARGET_EFAULT
;
11224 if (put_user_u64(loff_out
, arg4
)) {
11225 return -TARGET_EFAULT
;
11231 #ifdef TARGET_NR_vmsplice
11232 case TARGET_NR_vmsplice
:
11234 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11236 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11237 unlock_iovec(vec
, arg2
, arg3
, 0);
11239 ret
= -host_to_target_errno(errno
);
11244 #endif /* CONFIG_SPLICE */
11245 #ifdef CONFIG_EVENTFD
11246 #if defined(TARGET_NR_eventfd)
11247 case TARGET_NR_eventfd
:
11248 ret
= get_errno(eventfd(arg1
, 0));
11250 fd_trans_register(ret
, &target_eventfd_trans
);
11254 #if defined(TARGET_NR_eventfd2)
11255 case TARGET_NR_eventfd2
:
11257 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11258 if (arg2
& TARGET_O_NONBLOCK
) {
11259 host_flags
|= O_NONBLOCK
;
11261 if (arg2
& TARGET_O_CLOEXEC
) {
11262 host_flags
|= O_CLOEXEC
;
11264 ret
= get_errno(eventfd(arg1
, host_flags
));
11266 fd_trans_register(ret
, &target_eventfd_trans
);
11271 #endif /* CONFIG_EVENTFD */
11272 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11273 case TARGET_NR_fallocate
:
11274 #if TARGET_ABI_BITS == 32
11275 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11276 target_offset64(arg5
, arg6
)));
11278 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11282 #if defined(CONFIG_SYNC_FILE_RANGE)
11283 #if defined(TARGET_NR_sync_file_range)
11284 case TARGET_NR_sync_file_range
:
11285 #if TARGET_ABI_BITS == 32
11286 #if defined(TARGET_MIPS)
11287 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11288 target_offset64(arg5
, arg6
), arg7
));
11290 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11291 target_offset64(arg4
, arg5
), arg6
));
11292 #endif /* !TARGET_MIPS */
11294 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11298 #if defined(TARGET_NR_sync_file_range2)
11299 case TARGET_NR_sync_file_range2
:
11300 /* This is like sync_file_range but the arguments are reordered */
11301 #if TARGET_ABI_BITS == 32
11302 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11303 target_offset64(arg5
, arg6
), arg2
));
11305 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11310 #if defined(TARGET_NR_signalfd4)
11311 case TARGET_NR_signalfd4
:
11312 return do_signalfd4(arg1
, arg2
, arg4
);
11314 #if defined(TARGET_NR_signalfd)
11315 case TARGET_NR_signalfd
:
11316 return do_signalfd4(arg1
, arg2
, 0);
11318 #if defined(CONFIG_EPOLL)
11319 #if defined(TARGET_NR_epoll_create)
11320 case TARGET_NR_epoll_create
:
11321 return get_errno(epoll_create(arg1
));
11323 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11324 case TARGET_NR_epoll_create1
:
11325 return get_errno(epoll_create1(arg1
));
11327 #if defined(TARGET_NR_epoll_ctl)
11328 case TARGET_NR_epoll_ctl
:
11330 struct epoll_event ep
;
11331 struct epoll_event
*epp
= 0;
11333 struct target_epoll_event
*target_ep
;
11334 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11335 return -TARGET_EFAULT
;
11337 ep
.events
= tswap32(target_ep
->events
);
11338 /* The epoll_data_t union is just opaque data to the kernel,
11339 * so we transfer all 64 bits across and need not worry what
11340 * actual data type it is.
11342 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11343 unlock_user_struct(target_ep
, arg4
, 0);
11346 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11350 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11351 #if defined(TARGET_NR_epoll_wait)
11352 case TARGET_NR_epoll_wait
:
11354 #if defined(TARGET_NR_epoll_pwait)
11355 case TARGET_NR_epoll_pwait
:
11358 struct target_epoll_event
*target_ep
;
11359 struct epoll_event
*ep
;
11361 int maxevents
= arg3
;
11362 int timeout
= arg4
;
11364 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11365 return -TARGET_EINVAL
;
11368 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11369 maxevents
* sizeof(struct target_epoll_event
), 1);
11371 return -TARGET_EFAULT
;
11374 ep
= g_try_new(struct epoll_event
, maxevents
);
11376 unlock_user(target_ep
, arg2
, 0);
11377 return -TARGET_ENOMEM
;
11381 #if defined(TARGET_NR_epoll_pwait)
11382 case TARGET_NR_epoll_pwait
:
11384 target_sigset_t
*target_set
;
11385 sigset_t _set
, *set
= &_set
;
11388 if (arg6
!= sizeof(target_sigset_t
)) {
11389 ret
= -TARGET_EINVAL
;
11393 target_set
= lock_user(VERIFY_READ
, arg5
,
11394 sizeof(target_sigset_t
), 1);
11396 ret
= -TARGET_EFAULT
;
11399 target_to_host_sigset(set
, target_set
);
11400 unlock_user(target_set
, arg5
, 0);
11405 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11406 set
, SIGSET_T_SIZE
));
11410 #if defined(TARGET_NR_epoll_wait)
11411 case TARGET_NR_epoll_wait
:
11412 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11417 ret
= -TARGET_ENOSYS
;
11419 if (!is_error(ret
)) {
11421 for (i
= 0; i
< ret
; i
++) {
11422 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11423 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11425 unlock_user(target_ep
, arg2
,
11426 ret
* sizeof(struct target_epoll_event
));
11428 unlock_user(target_ep
, arg2
, 0);
11435 #ifdef TARGET_NR_prlimit64
11436 case TARGET_NR_prlimit64
:
11438 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11439 struct target_rlimit64
*target_rnew
, *target_rold
;
11440 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11441 int resource
= target_to_host_resource(arg2
);
11443 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11444 return -TARGET_EFAULT
;
11446 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11447 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11448 unlock_user_struct(target_rnew
, arg3
, 0);
11452 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11453 if (!is_error(ret
) && arg4
) {
11454 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11455 return -TARGET_EFAULT
;
11457 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11458 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11459 unlock_user_struct(target_rold
, arg4
, 1);
11464 #ifdef TARGET_NR_gethostname
11465 case TARGET_NR_gethostname
:
11467 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11469 ret
= get_errno(gethostname(name
, arg2
));
11470 unlock_user(name
, arg1
, arg2
);
11472 ret
= -TARGET_EFAULT
;
11477 #ifdef TARGET_NR_atomic_cmpxchg_32
11478 case TARGET_NR_atomic_cmpxchg_32
:
11480 /* should use start_exclusive from main.c */
11481 abi_ulong mem_value
;
11482 if (get_user_u32(mem_value
, arg6
)) {
11483 target_siginfo_t info
;
11484 info
.si_signo
= SIGSEGV
;
11486 info
.si_code
= TARGET_SEGV_MAPERR
;
11487 info
._sifields
._sigfault
._addr
= arg6
;
11488 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11489 QEMU_SI_FAULT
, &info
);
11493 if (mem_value
== arg2
)
11494 put_user_u32(arg1
, arg6
);
11498 #ifdef TARGET_NR_atomic_barrier
11499 case TARGET_NR_atomic_barrier
:
11500 /* Like the kernel implementation and the
11501 qemu arm barrier, no-op this? */
11505 #ifdef TARGET_NR_timer_create
11506 case TARGET_NR_timer_create
:
11508 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11510 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11513 int timer_index
= next_free_host_timer();
11515 if (timer_index
< 0) {
11516 ret
= -TARGET_EAGAIN
;
11518 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11521 phost_sevp
= &host_sevp
;
11522 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11528 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11532 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11533 return -TARGET_EFAULT
;
11541 #ifdef TARGET_NR_timer_settime
11542 case TARGET_NR_timer_settime
:
11544 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11545 * struct itimerspec * old_value */
11546 target_timer_t timerid
= get_timer_id(arg1
);
11550 } else if (arg3
== 0) {
11551 ret
= -TARGET_EINVAL
;
11553 timer_t htimer
= g_posix_timers
[timerid
];
11554 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11556 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
11557 return -TARGET_EFAULT
;
11560 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11561 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
11562 return -TARGET_EFAULT
;
11569 #ifdef TARGET_NR_timer_gettime
11570 case TARGET_NR_timer_gettime
:
11572 /* args: timer_t timerid, struct itimerspec *curr_value */
11573 target_timer_t timerid
= get_timer_id(arg1
);
11577 } else if (!arg2
) {
11578 ret
= -TARGET_EFAULT
;
11580 timer_t htimer
= g_posix_timers
[timerid
];
11581 struct itimerspec hspec
;
11582 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11584 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11585 ret
= -TARGET_EFAULT
;
11592 #ifdef TARGET_NR_timer_getoverrun
11593 case TARGET_NR_timer_getoverrun
:
11595 /* args: timer_t timerid */
11596 target_timer_t timerid
= get_timer_id(arg1
);
11601 timer_t htimer
= g_posix_timers
[timerid
];
11602 ret
= get_errno(timer_getoverrun(htimer
));
11604 fd_trans_unregister(ret
);
11609 #ifdef TARGET_NR_timer_delete
11610 case TARGET_NR_timer_delete
:
11612 /* args: timer_t timerid */
11613 target_timer_t timerid
= get_timer_id(arg1
);
11618 timer_t htimer
= g_posix_timers
[timerid
];
11619 ret
= get_errno(timer_delete(htimer
));
11620 g_posix_timers
[timerid
] = 0;
11626 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11627 case TARGET_NR_timerfd_create
:
11628 return get_errno(timerfd_create(arg1
,
11629 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11632 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11633 case TARGET_NR_timerfd_gettime
:
11635 struct itimerspec its_curr
;
11637 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11639 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11640 return -TARGET_EFAULT
;
11646 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11647 case TARGET_NR_timerfd_settime
:
11649 struct itimerspec its_new
, its_old
, *p_new
;
11652 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11653 return -TARGET_EFAULT
;
11660 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11662 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11663 return -TARGET_EFAULT
;
11669 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11670 case TARGET_NR_ioprio_get
:
11671 return get_errno(ioprio_get(arg1
, arg2
));
11674 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11675 case TARGET_NR_ioprio_set
:
11676 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
11679 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11680 case TARGET_NR_setns
:
11681 return get_errno(setns(arg1
, arg2
));
11683 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11684 case TARGET_NR_unshare
:
11685 return get_errno(unshare(arg1
));
11687 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11688 case TARGET_NR_kcmp
:
11689 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
11691 #ifdef TARGET_NR_swapcontext
11692 case TARGET_NR_swapcontext
:
11693 /* PowerPC specific. */
11694 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
11698 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
11699 return -TARGET_ENOSYS
;
11704 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
11705 abi_long arg2
, abi_long arg3
, abi_long arg4
,
11706 abi_long arg5
, abi_long arg6
, abi_long arg7
,
11709 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
11712 #ifdef DEBUG_ERESTARTSYS
11713 /* Debug-only code for exercising the syscall-restart code paths
11714 * in the per-architecture cpu main loops: restart every syscall
11715 * the guest makes once before letting it through.
11721 return -TARGET_ERESTARTSYS
;
11726 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
,
11727 arg5
, arg6
, arg7
, arg8
);
11729 if (unlikely(do_strace
)) {
11730 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11731 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
11732 arg5
, arg6
, arg7
, arg8
);
11733 print_syscall_ret(num
, ret
);
11735 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
11736 arg5
, arg6
, arg7
, arg8
);
11739 trace_guest_user_syscall_ret(cpu
, num
, ret
);