4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
62 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/cdrom.h>
87 #include <linux/hdreg.h>
88 #include <linux/soundcard.h>
90 #include <linux/mtio.h>
92 #if defined(CONFIG_FIEMAP)
93 #include <linux/fiemap.h>
96 #if defined(CONFIG_USBFS)
97 #include <linux/usbdevice_fs.h>
98 #include <linux/usb/ch9.h>
100 #include <linux/vt.h>
101 #include <linux/dm-ioctl.h>
102 #include <linux/reboot.h>
103 #include <linux/route.h>
104 #include <linux/filter.h>
105 #include <linux/blkpg.h>
106 #include <netpacket/packet.h>
107 #include <linux/netlink.h>
108 #include <linux/if_alg.h>
109 #include "linux_loop.h"
113 #include "qemu/guest-random.h"
114 #include "qapi/error.h"
115 #include "fd-trans.h"
118 #define CLONE_IO 0x80000000 /* Clone io context */
121 /* We can't directly call the host clone syscall, because this will
122 * badly confuse libc (breaking mutexes, for example). So we must
123 * divide clone flags into:
124 * * flag combinations that look like pthread_create()
125 * * flag combinations that look like fork()
126 * * flags we can implement within QEMU itself
127 * * flags we can't support and will return an error for
129 /* For thread creation, all these flags must be present; for
130 * fork, none must be present.
132 #define CLONE_THREAD_FLAGS \
133 (CLONE_VM | CLONE_FS | CLONE_FILES | \
134 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
136 /* These flags are ignored:
137 * CLONE_DETACHED is now ignored by the kernel;
138 * CLONE_IO is just an optimisation hint to the I/O scheduler
140 #define CLONE_IGNORED_FLAGS \
141 (CLONE_DETACHED | CLONE_IO)
143 /* Flags for fork which we can implement within QEMU itself */
144 #define CLONE_OPTIONAL_FORK_FLAGS \
145 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
146 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
148 /* Flags for thread creation which we can implement within QEMU itself */
149 #define CLONE_OPTIONAL_THREAD_FLAGS \
150 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
151 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
153 #define CLONE_INVALID_FORK_FLAGS \
154 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
156 #define CLONE_INVALID_THREAD_FLAGS \
157 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
158 CLONE_IGNORED_FLAGS))
160 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
161 * have almost all been allocated. We cannot support any of
162 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
163 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
164 * The checks against the invalid thread masks above will catch these.
165 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
168 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
169 * once. This exercises the codepaths for restart.
171 //#define DEBUG_ERESTARTSYS
173 //#include <linux/msdos_fs.h>
174 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
175 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
185 #define _syscall0(type,name) \
186 static type name (void) \
188 return syscall(__NR_##name); \
191 #define _syscall1(type,name,type1,arg1) \
192 static type name (type1 arg1) \
194 return syscall(__NR_##name, arg1); \
197 #define _syscall2(type,name,type1,arg1,type2,arg2) \
198 static type name (type1 arg1,type2 arg2) \
200 return syscall(__NR_##name, arg1, arg2); \
203 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
204 static type name (type1 arg1,type2 arg2,type3 arg3) \
206 return syscall(__NR_##name, arg1, arg2, arg3); \
209 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
210 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
212 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
215 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
217 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
219 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
223 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
224 type5,arg5,type6,arg6) \
225 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
228 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
232 #define __NR_sys_uname __NR_uname
233 #define __NR_sys_getcwd1 __NR_getcwd
234 #define __NR_sys_getdents __NR_getdents
235 #define __NR_sys_getdents64 __NR_getdents64
236 #define __NR_sys_getpriority __NR_getpriority
237 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
238 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
239 #define __NR_sys_syslog __NR_syslog
240 #define __NR_sys_futex __NR_futex
241 #define __NR_sys_inotify_init __NR_inotify_init
242 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
243 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #define __NR_sys_statx __NR_statx
246 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
247 #define __NR__llseek __NR_lseek
250 /* Newer kernel ports have llseek() instead of _llseek() */
251 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
252 #define TARGET_NR__llseek TARGET_NR_llseek
255 #define __NR_sys_gettid __NR_gettid
256 _syscall0(int, sys_gettid
)
258 /* For the 64-bit guest on 32-bit host case we must emulate
259 * getdents using getdents64, because otherwise the host
260 * might hand us back more dirent records than we can fit
261 * into the guest buffer after structure format conversion.
262 * Otherwise we emulate getdents with getdents if the host has it.
264 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
265 #define EMULATE_GETDENTS_WITH_GETDENTS
268 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
269 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
271 #if (defined(TARGET_NR_getdents) && \
272 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
273 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
274 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
276 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
277 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
278 loff_t
*, res
, uint
, wh
);
280 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
281 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
283 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
284 #ifdef __NR_exit_group
285 _syscall1(int,exit_group
,int,error_code
)
287 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
288 _syscall1(int,set_tid_address
,int *,tidptr
)
290 #if defined(TARGET_NR_futex) && defined(__NR_futex)
291 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
292 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
294 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
295 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
296 unsigned long *, user_mask_ptr
);
297 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
298 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
299 unsigned long *, user_mask_ptr
);
300 #define __NR_sys_getcpu __NR_getcpu
301 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
302 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
304 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
305 struct __user_cap_data_struct
*, data
);
306 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
307 struct __user_cap_data_struct
*, data
);
308 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
309 _syscall2(int, ioprio_get
, int, which
, int, who
)
311 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
312 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
314 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
315 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
318 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
319 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
320 unsigned long, idx1
, unsigned long, idx2
)
324 * It is assumed that struct statx is architecture independent.
326 #if defined(TARGET_NR_statx) && defined(__NR_statx)
327 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
328 unsigned int, mask
, struct target_statx
*, statxbuf
)
331 static bitmask_transtbl fcntl_flags_tbl
[] = {
332 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
333 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
334 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
335 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
336 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
337 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
338 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
339 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
340 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
341 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
342 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
343 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
344 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
345 #if defined(O_DIRECT)
346 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
348 #if defined(O_NOATIME)
349 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
351 #if defined(O_CLOEXEC)
352 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
355 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
357 #if defined(O_TMPFILE)
358 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
360 /* Don't terminate the list prematurely on 64-bit host+guest. */
361 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
362 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
367 static int sys_getcwd1(char *buf
, size_t size
)
369 if (getcwd(buf
, size
) == NULL
) {
370 /* getcwd() sets errno */
373 return strlen(buf
)+1;
376 #ifdef TARGET_NR_utimensat
377 #if defined(__NR_utimensat)
378 #define __NR_sys_utimensat __NR_utimensat
379 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
380 const struct timespec
*,tsp
,int,flags
)
382 static int sys_utimensat(int dirfd
, const char *pathname
,
383 const struct timespec times
[2], int flags
)
389 #endif /* TARGET_NR_utimensat */
391 #ifdef TARGET_NR_renameat2
392 #if defined(__NR_renameat2)
393 #define __NR_sys_renameat2 __NR_renameat2
394 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
395 const char *, new, unsigned int, flags
)
397 static int sys_renameat2(int oldfd
, const char *old
,
398 int newfd
, const char *new, int flags
)
401 return renameat(oldfd
, old
, newfd
, new);
407 #endif /* TARGET_NR_renameat2 */
409 #ifdef CONFIG_INOTIFY
410 #include <sys/inotify.h>
412 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
413 static int sys_inotify_init(void)
415 return (inotify_init());
418 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
419 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
421 return (inotify_add_watch(fd
, pathname
, mask
));
424 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
425 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
427 return (inotify_rm_watch(fd
, wd
));
430 #ifdef CONFIG_INOTIFY1
431 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
432 static int sys_inotify_init1(int flags
)
434 return (inotify_init1(flags
));
439 /* Userspace can usually survive runtime without inotify */
440 #undef TARGET_NR_inotify_init
441 #undef TARGET_NR_inotify_init1
442 #undef TARGET_NR_inotify_add_watch
443 #undef TARGET_NR_inotify_rm_watch
444 #endif /* CONFIG_INOTIFY */
446 #if defined(TARGET_NR_prlimit64)
447 #ifndef __NR_prlimit64
448 # define __NR_prlimit64 -1
450 #define __NR_sys_prlimit64 __NR_prlimit64
451 /* The glibc rlimit structure may not be that used by the underlying syscall */
452 struct host_rlimit64
{
456 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
457 const struct host_rlimit64
*, new_limit
,
458 struct host_rlimit64
*, old_limit
)
462 #if defined(TARGET_NR_timer_create)
463 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
464 static timer_t g_posix_timers
[32] = { 0, } ;
466 static inline int next_free_host_timer(void)
469 /* FIXME: Does finding the next free slot require a lock? */
470 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
471 if (g_posix_timers
[k
] == 0) {
472 g_posix_timers
[k
] = (timer_t
) 1;
480 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
482 static inline int regpairs_aligned(void *cpu_env
, int num
)
484 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
486 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
487 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
488 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
489 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
490 * of registers which translates to the same as ARM/MIPS, because we start with
492 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
493 #elif defined(TARGET_SH4)
494 /* SH4 doesn't align register pairs, except for p{read,write}64 */
495 static inline int regpairs_aligned(void *cpu_env
, int num
)
498 case TARGET_NR_pread64
:
499 case TARGET_NR_pwrite64
:
506 #elif defined(TARGET_XTENSA)
507 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
509 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
512 #define ERRNO_TABLE_SIZE 1200
514 /* target_to_host_errno_table[] is initialized from
515 * host_to_target_errno_table[] in syscall_init(). */
516 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
520 * This list is the union of errno values overridden in asm-<arch>/errno.h
521 * minus the errnos that are not actually generic to all archs.
523 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
524 [EAGAIN
] = TARGET_EAGAIN
,
525 [EIDRM
] = TARGET_EIDRM
,
526 [ECHRNG
] = TARGET_ECHRNG
,
527 [EL2NSYNC
] = TARGET_EL2NSYNC
,
528 [EL3HLT
] = TARGET_EL3HLT
,
529 [EL3RST
] = TARGET_EL3RST
,
530 [ELNRNG
] = TARGET_ELNRNG
,
531 [EUNATCH
] = TARGET_EUNATCH
,
532 [ENOCSI
] = TARGET_ENOCSI
,
533 [EL2HLT
] = TARGET_EL2HLT
,
534 [EDEADLK
] = TARGET_EDEADLK
,
535 [ENOLCK
] = TARGET_ENOLCK
,
536 [EBADE
] = TARGET_EBADE
,
537 [EBADR
] = TARGET_EBADR
,
538 [EXFULL
] = TARGET_EXFULL
,
539 [ENOANO
] = TARGET_ENOANO
,
540 [EBADRQC
] = TARGET_EBADRQC
,
541 [EBADSLT
] = TARGET_EBADSLT
,
542 [EBFONT
] = TARGET_EBFONT
,
543 [ENOSTR
] = TARGET_ENOSTR
,
544 [ENODATA
] = TARGET_ENODATA
,
545 [ETIME
] = TARGET_ETIME
,
546 [ENOSR
] = TARGET_ENOSR
,
547 [ENONET
] = TARGET_ENONET
,
548 [ENOPKG
] = TARGET_ENOPKG
,
549 [EREMOTE
] = TARGET_EREMOTE
,
550 [ENOLINK
] = TARGET_ENOLINK
,
551 [EADV
] = TARGET_EADV
,
552 [ESRMNT
] = TARGET_ESRMNT
,
553 [ECOMM
] = TARGET_ECOMM
,
554 [EPROTO
] = TARGET_EPROTO
,
555 [EDOTDOT
] = TARGET_EDOTDOT
,
556 [EMULTIHOP
] = TARGET_EMULTIHOP
,
557 [EBADMSG
] = TARGET_EBADMSG
,
558 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
559 [EOVERFLOW
] = TARGET_EOVERFLOW
,
560 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
561 [EBADFD
] = TARGET_EBADFD
,
562 [EREMCHG
] = TARGET_EREMCHG
,
563 [ELIBACC
] = TARGET_ELIBACC
,
564 [ELIBBAD
] = TARGET_ELIBBAD
,
565 [ELIBSCN
] = TARGET_ELIBSCN
,
566 [ELIBMAX
] = TARGET_ELIBMAX
,
567 [ELIBEXEC
] = TARGET_ELIBEXEC
,
568 [EILSEQ
] = TARGET_EILSEQ
,
569 [ENOSYS
] = TARGET_ENOSYS
,
570 [ELOOP
] = TARGET_ELOOP
,
571 [ERESTART
] = TARGET_ERESTART
,
572 [ESTRPIPE
] = TARGET_ESTRPIPE
,
573 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
574 [EUSERS
] = TARGET_EUSERS
,
575 [ENOTSOCK
] = TARGET_ENOTSOCK
,
576 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
577 [EMSGSIZE
] = TARGET_EMSGSIZE
,
578 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
579 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
580 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
581 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
582 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
583 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
584 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
585 [EADDRINUSE
] = TARGET_EADDRINUSE
,
586 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
587 [ENETDOWN
] = TARGET_ENETDOWN
,
588 [ENETUNREACH
] = TARGET_ENETUNREACH
,
589 [ENETRESET
] = TARGET_ENETRESET
,
590 [ECONNABORTED
] = TARGET_ECONNABORTED
,
591 [ECONNRESET
] = TARGET_ECONNRESET
,
592 [ENOBUFS
] = TARGET_ENOBUFS
,
593 [EISCONN
] = TARGET_EISCONN
,
594 [ENOTCONN
] = TARGET_ENOTCONN
,
595 [EUCLEAN
] = TARGET_EUCLEAN
,
596 [ENOTNAM
] = TARGET_ENOTNAM
,
597 [ENAVAIL
] = TARGET_ENAVAIL
,
598 [EISNAM
] = TARGET_EISNAM
,
599 [EREMOTEIO
] = TARGET_EREMOTEIO
,
600 [EDQUOT
] = TARGET_EDQUOT
,
601 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
602 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
603 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
604 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
605 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
606 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
607 [EALREADY
] = TARGET_EALREADY
,
608 [EINPROGRESS
] = TARGET_EINPROGRESS
,
609 [ESTALE
] = TARGET_ESTALE
,
610 [ECANCELED
] = TARGET_ECANCELED
,
611 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
612 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
614 [ENOKEY
] = TARGET_ENOKEY
,
617 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
620 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
623 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
626 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
628 #ifdef ENOTRECOVERABLE
629 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
632 [ENOMSG
] = TARGET_ENOMSG
,
635 [ERFKILL
] = TARGET_ERFKILL
,
638 [EHWPOISON
] = TARGET_EHWPOISON
,
642 static inline int host_to_target_errno(int err
)
644 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
645 host_to_target_errno_table
[err
]) {
646 return host_to_target_errno_table
[err
];
651 static inline int target_to_host_errno(int err
)
653 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
654 target_to_host_errno_table
[err
]) {
655 return target_to_host_errno_table
[err
];
660 static inline abi_long
get_errno(abi_long ret
)
663 return -host_to_target_errno(errno
);
668 const char *target_strerror(int err
)
670 if (err
== TARGET_ERESTARTSYS
) {
671 return "To be restarted";
673 if (err
== TARGET_QEMU_ESIGRETURN
) {
674 return "Successful exit from sigreturn";
677 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
680 return strerror(target_to_host_errno(err
));
683 #define safe_syscall0(type, name) \
684 static type safe_##name(void) \
686 return safe_syscall(__NR_##name); \
689 #define safe_syscall1(type, name, type1, arg1) \
690 static type safe_##name(type1 arg1) \
692 return safe_syscall(__NR_##name, arg1); \
695 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
696 static type safe_##name(type1 arg1, type2 arg2) \
698 return safe_syscall(__NR_##name, arg1, arg2); \
701 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
702 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
704 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
707 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
709 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
711 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
714 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
715 type4, arg4, type5, arg5) \
716 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
719 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
722 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
723 type4, arg4, type5, arg5, type6, arg6) \
724 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
725 type5 arg5, type6 arg6) \
727 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
730 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
731 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
732 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
733 int, flags
, mode_t
, mode
)
734 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
735 struct rusage
*, rusage
)
736 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
737 int, options
, struct rusage
*, rusage
)
738 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
739 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
740 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
741 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
742 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
744 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
745 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
747 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
748 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
749 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
750 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
751 safe_syscall2(int, tkill
, int, tid
, int, sig
)
752 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
753 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
754 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
755 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
756 unsigned long, pos_l
, unsigned long, pos_h
)
757 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
758 unsigned long, pos_l
, unsigned long, pos_h
)
759 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
761 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
762 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
763 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
764 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
765 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
766 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
767 safe_syscall2(int, flock
, int, fd
, int, operation
)
768 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
769 const struct timespec
*, uts
, size_t, sigsetsize
)
770 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
772 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
773 struct timespec
*, rem
)
774 #ifdef TARGET_NR_clock_nanosleep
775 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
776 const struct timespec
*, req
, struct timespec
*, rem
)
779 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
780 void *, ptr
, long, fifth
)
783 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
787 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
788 long, msgtype
, int, flags
)
790 #ifdef __NR_semtimedop
791 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
792 unsigned, nsops
, const struct timespec
*, timeout
)
794 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
795 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
796 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
797 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
798 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
800 /* We do ioctl like this rather than via safe_syscall3 to preserve the
801 * "third argument might be integer or pointer or not present" behaviour of
804 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
805 /* Similarly for fcntl. Note that callers must always:
806 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
807 * use the flock64 struct rather than unsuffixed flock
808 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
811 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
813 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
816 static inline int host_to_target_sock_type(int host_type
)
820 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
822 target_type
= TARGET_SOCK_DGRAM
;
825 target_type
= TARGET_SOCK_STREAM
;
828 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
832 #if defined(SOCK_CLOEXEC)
833 if (host_type
& SOCK_CLOEXEC
) {
834 target_type
|= TARGET_SOCK_CLOEXEC
;
838 #if defined(SOCK_NONBLOCK)
839 if (host_type
& SOCK_NONBLOCK
) {
840 target_type
|= TARGET_SOCK_NONBLOCK
;
847 static abi_ulong target_brk
;
848 static abi_ulong target_original_brk
;
849 static abi_ulong brk_page
;
851 void target_set_brk(abi_ulong new_brk
)
853 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
854 brk_page
= HOST_PAGE_ALIGN(target_brk
);
857 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
858 #define DEBUGF_BRK(message, args...)
860 /* do_brk() must return target values and target errnos. */
861 abi_long
do_brk(abi_ulong new_brk
)
863 abi_long mapped_addr
;
864 abi_ulong new_alloc_size
;
866 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
869 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
872 if (new_brk
< target_original_brk
) {
873 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
878 /* If the new brk is less than the highest page reserved to the
879 * target heap allocation, set it and we're almost done... */
880 if (new_brk
<= brk_page
) {
881 /* Heap contents are initialized to zero, as for anonymous
883 if (new_brk
> target_brk
) {
884 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
886 target_brk
= new_brk
;
887 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
891 /* We need to allocate more memory after the brk... Note that
892 * we don't use MAP_FIXED because that will map over the top of
893 * any existing mapping (like the one with the host libc or qemu
894 * itself); instead we treat "mapped but at wrong address" as
895 * a failure and unmap again.
897 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
898 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
899 PROT_READ
|PROT_WRITE
,
900 MAP_ANON
|MAP_PRIVATE
, 0, 0));
902 if (mapped_addr
== brk_page
) {
903 /* Heap contents are initialized to zero, as for anonymous
904 * mapped pages. Technically the new pages are already
905 * initialized to zero since they *are* anonymous mapped
906 * pages, however we have to take care with the contents that
907 * come from the remaining part of the previous page: it may
908 * contains garbage data due to a previous heap usage (grown
910 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
912 target_brk
= new_brk
;
913 brk_page
= HOST_PAGE_ALIGN(target_brk
);
914 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
917 } else if (mapped_addr
!= -1) {
918 /* Mapped but at wrong address, meaning there wasn't actually
919 * enough space for this brk.
921 target_munmap(mapped_addr
, new_alloc_size
);
923 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
926 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
929 #if defined(TARGET_ALPHA)
930 /* We (partially) emulate OSF/1 on Alpha, which requires we
931 return a proper errno, not an unchanged brk value. */
932 return -TARGET_ENOMEM
;
934 /* For everything else, return the previous break. */
938 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
939 abi_ulong target_fds_addr
,
943 abi_ulong b
, *target_fds
;
945 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
946 if (!(target_fds
= lock_user(VERIFY_READ
,
948 sizeof(abi_ulong
) * nw
,
950 return -TARGET_EFAULT
;
954 for (i
= 0; i
< nw
; i
++) {
955 /* grab the abi_ulong */
956 __get_user(b
, &target_fds
[i
]);
957 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
958 /* check the bit inside the abi_ulong */
965 unlock_user(target_fds
, target_fds_addr
, 0);
970 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
971 abi_ulong target_fds_addr
,
974 if (target_fds_addr
) {
975 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
976 return -TARGET_EFAULT
;
984 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
990 abi_ulong
*target_fds
;
992 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
993 if (!(target_fds
= lock_user(VERIFY_WRITE
,
995 sizeof(abi_ulong
) * nw
,
997 return -TARGET_EFAULT
;
1000 for (i
= 0; i
< nw
; i
++) {
1002 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1003 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1006 __put_user(v
, &target_fds
[i
]);
1009 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1014 #if defined(__alpha__)
1015 #define HOST_HZ 1024
1020 static inline abi_long
host_to_target_clock_t(long ticks
)
1022 #if HOST_HZ == TARGET_HZ
1025 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1029 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1030 const struct rusage
*rusage
)
1032 struct target_rusage
*target_rusage
;
1034 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1035 return -TARGET_EFAULT
;
1036 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1037 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1038 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1039 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1040 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1041 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1042 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1043 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1044 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1045 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1046 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1047 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1048 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1049 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1050 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1051 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1052 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1053 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1054 unlock_user_struct(target_rusage
, target_addr
, 1);
1059 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1061 abi_ulong target_rlim_swap
;
1064 target_rlim_swap
= tswapal(target_rlim
);
1065 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1066 return RLIM_INFINITY
;
1068 result
= target_rlim_swap
;
1069 if (target_rlim_swap
!= (rlim_t
)result
)
1070 return RLIM_INFINITY
;
1075 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1077 abi_ulong target_rlim_swap
;
1080 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1081 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1083 target_rlim_swap
= rlim
;
1084 result
= tswapal(target_rlim_swap
);
1089 static inline int target_to_host_resource(int code
)
1092 case TARGET_RLIMIT_AS
:
1094 case TARGET_RLIMIT_CORE
:
1096 case TARGET_RLIMIT_CPU
:
1098 case TARGET_RLIMIT_DATA
:
1100 case TARGET_RLIMIT_FSIZE
:
1101 return RLIMIT_FSIZE
;
1102 case TARGET_RLIMIT_LOCKS
:
1103 return RLIMIT_LOCKS
;
1104 case TARGET_RLIMIT_MEMLOCK
:
1105 return RLIMIT_MEMLOCK
;
1106 case TARGET_RLIMIT_MSGQUEUE
:
1107 return RLIMIT_MSGQUEUE
;
1108 case TARGET_RLIMIT_NICE
:
1110 case TARGET_RLIMIT_NOFILE
:
1111 return RLIMIT_NOFILE
;
1112 case TARGET_RLIMIT_NPROC
:
1113 return RLIMIT_NPROC
;
1114 case TARGET_RLIMIT_RSS
:
1116 case TARGET_RLIMIT_RTPRIO
:
1117 return RLIMIT_RTPRIO
;
1118 case TARGET_RLIMIT_SIGPENDING
:
1119 return RLIMIT_SIGPENDING
;
1120 case TARGET_RLIMIT_STACK
:
1121 return RLIMIT_STACK
;
1127 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1128 abi_ulong target_tv_addr
)
1130 struct target_timeval
*target_tv
;
1132 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1133 return -TARGET_EFAULT
;
1136 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1137 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1139 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1144 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1145 const struct timeval
*tv
)
1147 struct target_timeval
*target_tv
;
1149 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1150 return -TARGET_EFAULT
;
1153 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1154 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1156 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1161 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1162 const struct timeval
*tv
)
1164 struct target__kernel_sock_timeval
*target_tv
;
1166 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1167 return -TARGET_EFAULT
;
1170 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1171 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1173 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1178 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1179 abi_ulong target_addr
)
1181 struct target_timespec
*target_ts
;
1183 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1184 return -TARGET_EFAULT
;
1186 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1187 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1188 unlock_user_struct(target_ts
, target_addr
, 0);
1192 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1193 struct timespec
*host_ts
)
1195 struct target_timespec
*target_ts
;
1197 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1198 return -TARGET_EFAULT
;
1200 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1201 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1202 unlock_user_struct(target_ts
, target_addr
, 1);
1206 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1207 struct timespec
*host_ts
)
1209 struct target__kernel_timespec
*target_ts
;
1211 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1212 return -TARGET_EFAULT
;
1214 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1215 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1216 unlock_user_struct(target_ts
, target_addr
, 1);
1220 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1221 abi_ulong target_tz_addr
)
1223 struct target_timezone
*target_tz
;
1225 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1226 return -TARGET_EFAULT
;
1229 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1230 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1232 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1237 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1240 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1241 abi_ulong target_mq_attr_addr
)
1243 struct target_mq_attr
*target_mq_attr
;
1245 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1246 target_mq_attr_addr
, 1))
1247 return -TARGET_EFAULT
;
1249 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1250 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1251 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1252 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1254 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1259 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1260 const struct mq_attr
*attr
)
1262 struct target_mq_attr
*target_mq_attr
;
1264 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1265 target_mq_attr_addr
, 0))
1266 return -TARGET_EFAULT
;
1268 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1269 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1270 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1271 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1273 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1279 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1280 /* do_select() must return target values and target errnos. */
1281 static abi_long
do_select(int n
,
1282 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1283 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1285 fd_set rfds
, wfds
, efds
;
1286 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1288 struct timespec ts
, *ts_ptr
;
1291 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1295 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1299 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1304 if (target_tv_addr
) {
1305 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1306 return -TARGET_EFAULT
;
1307 ts
.tv_sec
= tv
.tv_sec
;
1308 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1314 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1317 if (!is_error(ret
)) {
1318 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1319 return -TARGET_EFAULT
;
1320 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1321 return -TARGET_EFAULT
;
1322 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1323 return -TARGET_EFAULT
;
1325 if (target_tv_addr
) {
1326 tv
.tv_sec
= ts
.tv_sec
;
1327 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1328 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1329 return -TARGET_EFAULT
;
1337 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1338 static abi_long
do_old_select(abi_ulong arg1
)
1340 struct target_sel_arg_struct
*sel
;
1341 abi_ulong inp
, outp
, exp
, tvp
;
1344 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1345 return -TARGET_EFAULT
;
1348 nsel
= tswapal(sel
->n
);
1349 inp
= tswapal(sel
->inp
);
1350 outp
= tswapal(sel
->outp
);
1351 exp
= tswapal(sel
->exp
);
1352 tvp
= tswapal(sel
->tvp
);
1354 unlock_user_struct(sel
, arg1
, 0);
1356 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1361 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1364 return pipe2(host_pipe
, flags
);
1370 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1371 int flags
, int is_pipe2
)
1375 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1378 return get_errno(ret
);
1380 /* Several targets have special calling conventions for the original
1381 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1383 #if defined(TARGET_ALPHA)
1384 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1385 return host_pipe
[0];
1386 #elif defined(TARGET_MIPS)
1387 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1388 return host_pipe
[0];
1389 #elif defined(TARGET_SH4)
1390 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1391 return host_pipe
[0];
1392 #elif defined(TARGET_SPARC)
1393 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1394 return host_pipe
[0];
1398 if (put_user_s32(host_pipe
[0], pipedes
)
1399 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1400 return -TARGET_EFAULT
;
1401 return get_errno(ret
);
1404 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1405 abi_ulong target_addr
,
1408 struct target_ip_mreqn
*target_smreqn
;
1410 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1412 return -TARGET_EFAULT
;
1413 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1414 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1415 if (len
== sizeof(struct target_ip_mreqn
))
1416 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1417 unlock_user(target_smreqn
, target_addr
, 0);
1422 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1423 abi_ulong target_addr
,
1426 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1427 sa_family_t sa_family
;
1428 struct target_sockaddr
*target_saddr
;
1430 if (fd_trans_target_to_host_addr(fd
)) {
1431 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1434 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1436 return -TARGET_EFAULT
;
1438 sa_family
= tswap16(target_saddr
->sa_family
);
1440 /* Oops. The caller might send a incomplete sun_path; sun_path
1441 * must be terminated by \0 (see the manual page), but
1442 * unfortunately it is quite common to specify sockaddr_un
1443 * length as "strlen(x->sun_path)" while it should be
1444 * "strlen(...) + 1". We'll fix that here if needed.
1445 * Linux kernel has a similar feature.
1448 if (sa_family
== AF_UNIX
) {
1449 if (len
< unix_maxlen
&& len
> 0) {
1450 char *cp
= (char*)target_saddr
;
1452 if ( cp
[len
-1] && !cp
[len
] )
1455 if (len
> unix_maxlen
)
1459 memcpy(addr
, target_saddr
, len
);
1460 addr
->sa_family
= sa_family
;
1461 if (sa_family
== AF_NETLINK
) {
1462 struct sockaddr_nl
*nladdr
;
1464 nladdr
= (struct sockaddr_nl
*)addr
;
1465 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1466 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1467 } else if (sa_family
== AF_PACKET
) {
1468 struct target_sockaddr_ll
*lladdr
;
1470 lladdr
= (struct target_sockaddr_ll
*)addr
;
1471 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1472 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1474 unlock_user(target_saddr
, target_addr
, 0);
1479 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1480 struct sockaddr
*addr
,
1483 struct target_sockaddr
*target_saddr
;
1490 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1492 return -TARGET_EFAULT
;
1493 memcpy(target_saddr
, addr
, len
);
1494 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1495 sizeof(target_saddr
->sa_family
)) {
1496 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1498 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1499 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1500 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1501 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1502 } else if (addr
->sa_family
== AF_PACKET
) {
1503 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1504 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1505 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1506 } else if (addr
->sa_family
== AF_INET6
&&
1507 len
>= sizeof(struct target_sockaddr_in6
)) {
1508 struct target_sockaddr_in6
*target_in6
=
1509 (struct target_sockaddr_in6
*)target_saddr
;
1510 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1512 unlock_user(target_saddr
, target_addr
, len
);
1517 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1518 struct target_msghdr
*target_msgh
)
1520 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1521 abi_long msg_controllen
;
1522 abi_ulong target_cmsg_addr
;
1523 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1524 socklen_t space
= 0;
1526 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1527 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1529 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1530 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1531 target_cmsg_start
= target_cmsg
;
1533 return -TARGET_EFAULT
;
1535 while (cmsg
&& target_cmsg
) {
1536 void *data
= CMSG_DATA(cmsg
);
1537 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1539 int len
= tswapal(target_cmsg
->cmsg_len
)
1540 - sizeof(struct target_cmsghdr
);
1542 space
+= CMSG_SPACE(len
);
1543 if (space
> msgh
->msg_controllen
) {
1544 space
-= CMSG_SPACE(len
);
1545 /* This is a QEMU bug, since we allocated the payload
1546 * area ourselves (unlike overflow in host-to-target
1547 * conversion, which is just the guest giving us a buffer
1548 * that's too small). It can't happen for the payload types
1549 * we currently support; if it becomes an issue in future
1550 * we would need to improve our allocation strategy to
1551 * something more intelligent than "twice the size of the
1552 * target buffer we're reading from".
1554 gemu_log("Host cmsg overflow\n");
1558 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1559 cmsg
->cmsg_level
= SOL_SOCKET
;
1561 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1563 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1564 cmsg
->cmsg_len
= CMSG_LEN(len
);
1566 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1567 int *fd
= (int *)data
;
1568 int *target_fd
= (int *)target_data
;
1569 int i
, numfds
= len
/ sizeof(int);
1571 for (i
= 0; i
< numfds
; i
++) {
1572 __get_user(fd
[i
], target_fd
+ i
);
1574 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1575 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1576 struct ucred
*cred
= (struct ucred
*)data
;
1577 struct target_ucred
*target_cred
=
1578 (struct target_ucred
*)target_data
;
1580 __get_user(cred
->pid
, &target_cred
->pid
);
1581 __get_user(cred
->uid
, &target_cred
->uid
);
1582 __get_user(cred
->gid
, &target_cred
->gid
);
1584 gemu_log("Unsupported ancillary data: %d/%d\n",
1585 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1586 memcpy(data
, target_data
, len
);
1589 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1590 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1593 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1595 msgh
->msg_controllen
= space
;
1599 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1600 struct msghdr
*msgh
)
1602 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1603 abi_long msg_controllen
;
1604 abi_ulong target_cmsg_addr
;
1605 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1606 socklen_t space
= 0;
1608 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1609 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1611 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1612 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1613 target_cmsg_start
= target_cmsg
;
1615 return -TARGET_EFAULT
;
1617 while (cmsg
&& target_cmsg
) {
1618 void *data
= CMSG_DATA(cmsg
);
1619 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1621 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1622 int tgt_len
, tgt_space
;
1624 /* We never copy a half-header but may copy half-data;
1625 * this is Linux's behaviour in put_cmsg(). Note that
1626 * truncation here is a guest problem (which we report
1627 * to the guest via the CTRUNC bit), unlike truncation
1628 * in target_to_host_cmsg, which is a QEMU bug.
1630 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1631 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1635 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1636 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1638 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1640 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1642 /* Payload types which need a different size of payload on
1643 * the target must adjust tgt_len here.
1646 switch (cmsg
->cmsg_level
) {
1648 switch (cmsg
->cmsg_type
) {
1650 tgt_len
= sizeof(struct target_timeval
);
1660 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1661 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1662 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1665 /* We must now copy-and-convert len bytes of payload
1666 * into tgt_len bytes of destination space. Bear in mind
1667 * that in both source and destination we may be dealing
1668 * with a truncated value!
1670 switch (cmsg
->cmsg_level
) {
1672 switch (cmsg
->cmsg_type
) {
1675 int *fd
= (int *)data
;
1676 int *target_fd
= (int *)target_data
;
1677 int i
, numfds
= tgt_len
/ sizeof(int);
1679 for (i
= 0; i
< numfds
; i
++) {
1680 __put_user(fd
[i
], target_fd
+ i
);
1686 struct timeval
*tv
= (struct timeval
*)data
;
1687 struct target_timeval
*target_tv
=
1688 (struct target_timeval
*)target_data
;
1690 if (len
!= sizeof(struct timeval
) ||
1691 tgt_len
!= sizeof(struct target_timeval
)) {
1695 /* copy struct timeval to target */
1696 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1697 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1700 case SCM_CREDENTIALS
:
1702 struct ucred
*cred
= (struct ucred
*)data
;
1703 struct target_ucred
*target_cred
=
1704 (struct target_ucred
*)target_data
;
1706 __put_user(cred
->pid
, &target_cred
->pid
);
1707 __put_user(cred
->uid
, &target_cred
->uid
);
1708 __put_user(cred
->gid
, &target_cred
->gid
);
1717 switch (cmsg
->cmsg_type
) {
1720 uint32_t *v
= (uint32_t *)data
;
1721 uint32_t *t_int
= (uint32_t *)target_data
;
1723 if (len
!= sizeof(uint32_t) ||
1724 tgt_len
!= sizeof(uint32_t)) {
1727 __put_user(*v
, t_int
);
1733 struct sock_extended_err ee
;
1734 struct sockaddr_in offender
;
1736 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1737 struct errhdr_t
*target_errh
=
1738 (struct errhdr_t
*)target_data
;
1740 if (len
!= sizeof(struct errhdr_t
) ||
1741 tgt_len
!= sizeof(struct errhdr_t
)) {
1744 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1745 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1746 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1747 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1748 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1749 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1750 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1751 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1752 (void *) &errh
->offender
, sizeof(errh
->offender
));
1761 switch (cmsg
->cmsg_type
) {
1764 uint32_t *v
= (uint32_t *)data
;
1765 uint32_t *t_int
= (uint32_t *)target_data
;
1767 if (len
!= sizeof(uint32_t) ||
1768 tgt_len
!= sizeof(uint32_t)) {
1771 __put_user(*v
, t_int
);
1777 struct sock_extended_err ee
;
1778 struct sockaddr_in6 offender
;
1780 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1781 struct errhdr6_t
*target_errh
=
1782 (struct errhdr6_t
*)target_data
;
1784 if (len
!= sizeof(struct errhdr6_t
) ||
1785 tgt_len
!= sizeof(struct errhdr6_t
)) {
1788 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1789 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1790 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1791 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1792 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1793 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1794 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1795 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1796 (void *) &errh
->offender
, sizeof(errh
->offender
));
1806 gemu_log("Unsupported ancillary data: %d/%d\n",
1807 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1808 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1809 if (tgt_len
> len
) {
1810 memset(target_data
+ len
, 0, tgt_len
- len
);
1814 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1815 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1816 if (msg_controllen
< tgt_space
) {
1817 tgt_space
= msg_controllen
;
1819 msg_controllen
-= tgt_space
;
1821 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1822 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1825 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1827 target_msgh
->msg_controllen
= tswapal(space
);
1831 /* do_setsockopt() Must return target values and target errnos. */
1832 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1833 abi_ulong optval_addr
, socklen_t optlen
)
1837 struct ip_mreqn
*ip_mreq
;
1838 struct ip_mreq_source
*ip_mreq_source
;
1842 /* TCP options all take an 'int' value. */
1843 if (optlen
< sizeof(uint32_t))
1844 return -TARGET_EINVAL
;
1846 if (get_user_u32(val
, optval_addr
))
1847 return -TARGET_EFAULT
;
1848 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1855 case IP_ROUTER_ALERT
:
1859 case IP_MTU_DISCOVER
:
1866 case IP_MULTICAST_TTL
:
1867 case IP_MULTICAST_LOOP
:
1869 if (optlen
>= sizeof(uint32_t)) {
1870 if (get_user_u32(val
, optval_addr
))
1871 return -TARGET_EFAULT
;
1872 } else if (optlen
>= 1) {
1873 if (get_user_u8(val
, optval_addr
))
1874 return -TARGET_EFAULT
;
1876 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1878 case IP_ADD_MEMBERSHIP
:
1879 case IP_DROP_MEMBERSHIP
:
1880 if (optlen
< sizeof (struct target_ip_mreq
) ||
1881 optlen
> sizeof (struct target_ip_mreqn
))
1882 return -TARGET_EINVAL
;
1884 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1885 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1886 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1889 case IP_BLOCK_SOURCE
:
1890 case IP_UNBLOCK_SOURCE
:
1891 case IP_ADD_SOURCE_MEMBERSHIP
:
1892 case IP_DROP_SOURCE_MEMBERSHIP
:
1893 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1894 return -TARGET_EINVAL
;
1896 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1897 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1898 unlock_user (ip_mreq_source
, optval_addr
, 0);
1907 case IPV6_MTU_DISCOVER
:
1910 case IPV6_RECVPKTINFO
:
1911 case IPV6_UNICAST_HOPS
:
1912 case IPV6_MULTICAST_HOPS
:
1913 case IPV6_MULTICAST_LOOP
:
1915 case IPV6_RECVHOPLIMIT
:
1916 case IPV6_2292HOPLIMIT
:
1919 case IPV6_2292PKTINFO
:
1920 case IPV6_RECVTCLASS
:
1921 case IPV6_RECVRTHDR
:
1922 case IPV6_2292RTHDR
:
1923 case IPV6_RECVHOPOPTS
:
1924 case IPV6_2292HOPOPTS
:
1925 case IPV6_RECVDSTOPTS
:
1926 case IPV6_2292DSTOPTS
:
1928 #ifdef IPV6_RECVPATHMTU
1929 case IPV6_RECVPATHMTU
:
1931 #ifdef IPV6_TRANSPARENT
1932 case IPV6_TRANSPARENT
:
1934 #ifdef IPV6_FREEBIND
1937 #ifdef IPV6_RECVORIGDSTADDR
1938 case IPV6_RECVORIGDSTADDR
:
1941 if (optlen
< sizeof(uint32_t)) {
1942 return -TARGET_EINVAL
;
1944 if (get_user_u32(val
, optval_addr
)) {
1945 return -TARGET_EFAULT
;
1947 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1948 &val
, sizeof(val
)));
1952 struct in6_pktinfo pki
;
1954 if (optlen
< sizeof(pki
)) {
1955 return -TARGET_EINVAL
;
1958 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
1959 return -TARGET_EFAULT
;
1962 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
1964 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1965 &pki
, sizeof(pki
)));
1968 case IPV6_ADD_MEMBERSHIP
:
1969 case IPV6_DROP_MEMBERSHIP
:
1971 struct ipv6_mreq ipv6mreq
;
1973 if (optlen
< sizeof(ipv6mreq
)) {
1974 return -TARGET_EINVAL
;
1977 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
1978 return -TARGET_EFAULT
;
1981 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
1983 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1984 &ipv6mreq
, sizeof(ipv6mreq
)));
1995 struct icmp6_filter icmp6f
;
1997 if (optlen
> sizeof(icmp6f
)) {
1998 optlen
= sizeof(icmp6f
);
2001 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2002 return -TARGET_EFAULT
;
2005 for (val
= 0; val
< 8; val
++) {
2006 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2009 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2021 /* those take an u32 value */
2022 if (optlen
< sizeof(uint32_t)) {
2023 return -TARGET_EINVAL
;
2026 if (get_user_u32(val
, optval_addr
)) {
2027 return -TARGET_EFAULT
;
2029 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2030 &val
, sizeof(val
)));
2037 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2042 char *alg_key
= g_malloc(optlen
);
2045 return -TARGET_ENOMEM
;
2047 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2049 return -TARGET_EFAULT
;
2051 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2056 case ALG_SET_AEAD_AUTHSIZE
:
2058 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2067 case TARGET_SOL_SOCKET
:
2069 case TARGET_SO_RCVTIMEO
:
2073 optname
= SO_RCVTIMEO
;
2076 if (optlen
!= sizeof(struct target_timeval
)) {
2077 return -TARGET_EINVAL
;
2080 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2081 return -TARGET_EFAULT
;
2084 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2088 case TARGET_SO_SNDTIMEO
:
2089 optname
= SO_SNDTIMEO
;
2091 case TARGET_SO_ATTACH_FILTER
:
2093 struct target_sock_fprog
*tfprog
;
2094 struct target_sock_filter
*tfilter
;
2095 struct sock_fprog fprog
;
2096 struct sock_filter
*filter
;
2099 if (optlen
!= sizeof(*tfprog
)) {
2100 return -TARGET_EINVAL
;
2102 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2103 return -TARGET_EFAULT
;
2105 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2106 tswapal(tfprog
->filter
), 0)) {
2107 unlock_user_struct(tfprog
, optval_addr
, 1);
2108 return -TARGET_EFAULT
;
2111 fprog
.len
= tswap16(tfprog
->len
);
2112 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2113 if (filter
== NULL
) {
2114 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2115 unlock_user_struct(tfprog
, optval_addr
, 1);
2116 return -TARGET_ENOMEM
;
2118 for (i
= 0; i
< fprog
.len
; i
++) {
2119 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2120 filter
[i
].jt
= tfilter
[i
].jt
;
2121 filter
[i
].jf
= tfilter
[i
].jf
;
2122 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2124 fprog
.filter
= filter
;
2126 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2127 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2130 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2131 unlock_user_struct(tfprog
, optval_addr
, 1);
2134 case TARGET_SO_BINDTODEVICE
:
2136 char *dev_ifname
, *addr_ifname
;
2138 if (optlen
> IFNAMSIZ
- 1) {
2139 optlen
= IFNAMSIZ
- 1;
2141 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2143 return -TARGET_EFAULT
;
2145 optname
= SO_BINDTODEVICE
;
2146 addr_ifname
= alloca(IFNAMSIZ
);
2147 memcpy(addr_ifname
, dev_ifname
, optlen
);
2148 addr_ifname
[optlen
] = 0;
2149 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2150 addr_ifname
, optlen
));
2151 unlock_user (dev_ifname
, optval_addr
, 0);
2154 case TARGET_SO_LINGER
:
2157 struct target_linger
*tlg
;
2159 if (optlen
!= sizeof(struct target_linger
)) {
2160 return -TARGET_EINVAL
;
2162 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2163 return -TARGET_EFAULT
;
2165 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2166 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2167 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2169 unlock_user_struct(tlg
, optval_addr
, 0);
2172 /* Options with 'int' argument. */
2173 case TARGET_SO_DEBUG
:
2176 case TARGET_SO_REUSEADDR
:
2177 optname
= SO_REUSEADDR
;
2180 case TARGET_SO_REUSEPORT
:
2181 optname
= SO_REUSEPORT
;
2184 case TARGET_SO_TYPE
:
2187 case TARGET_SO_ERROR
:
2190 case TARGET_SO_DONTROUTE
:
2191 optname
= SO_DONTROUTE
;
2193 case TARGET_SO_BROADCAST
:
2194 optname
= SO_BROADCAST
;
2196 case TARGET_SO_SNDBUF
:
2197 optname
= SO_SNDBUF
;
2199 case TARGET_SO_SNDBUFFORCE
:
2200 optname
= SO_SNDBUFFORCE
;
2202 case TARGET_SO_RCVBUF
:
2203 optname
= SO_RCVBUF
;
2205 case TARGET_SO_RCVBUFFORCE
:
2206 optname
= SO_RCVBUFFORCE
;
2208 case TARGET_SO_KEEPALIVE
:
2209 optname
= SO_KEEPALIVE
;
2211 case TARGET_SO_OOBINLINE
:
2212 optname
= SO_OOBINLINE
;
2214 case TARGET_SO_NO_CHECK
:
2215 optname
= SO_NO_CHECK
;
2217 case TARGET_SO_PRIORITY
:
2218 optname
= SO_PRIORITY
;
2221 case TARGET_SO_BSDCOMPAT
:
2222 optname
= SO_BSDCOMPAT
;
2225 case TARGET_SO_PASSCRED
:
2226 optname
= SO_PASSCRED
;
2228 case TARGET_SO_PASSSEC
:
2229 optname
= SO_PASSSEC
;
2231 case TARGET_SO_TIMESTAMP
:
2232 optname
= SO_TIMESTAMP
;
2234 case TARGET_SO_RCVLOWAT
:
2235 optname
= SO_RCVLOWAT
;
2240 if (optlen
< sizeof(uint32_t))
2241 return -TARGET_EINVAL
;
2243 if (get_user_u32(val
, optval_addr
))
2244 return -TARGET_EFAULT
;
2245 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2249 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2250 ret
= -TARGET_ENOPROTOOPT
;
2255 /* do_getsockopt() Must return target values and target errnos. */
2256 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2257 abi_ulong optval_addr
, abi_ulong optlen
)
2264 case TARGET_SOL_SOCKET
:
2267 /* These don't just return a single integer */
2268 case TARGET_SO_RCVTIMEO
:
2269 case TARGET_SO_SNDTIMEO
:
2270 case TARGET_SO_PEERNAME
:
2272 case TARGET_SO_PEERCRED
: {
2275 struct target_ucred
*tcr
;
2277 if (get_user_u32(len
, optlen
)) {
2278 return -TARGET_EFAULT
;
2281 return -TARGET_EINVAL
;
2285 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2293 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2294 return -TARGET_EFAULT
;
2296 __put_user(cr
.pid
, &tcr
->pid
);
2297 __put_user(cr
.uid
, &tcr
->uid
);
2298 __put_user(cr
.gid
, &tcr
->gid
);
2299 unlock_user_struct(tcr
, optval_addr
, 1);
2300 if (put_user_u32(len
, optlen
)) {
2301 return -TARGET_EFAULT
;
2305 case TARGET_SO_LINGER
:
2309 struct target_linger
*tlg
;
2311 if (get_user_u32(len
, optlen
)) {
2312 return -TARGET_EFAULT
;
2315 return -TARGET_EINVAL
;
2319 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2327 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2328 return -TARGET_EFAULT
;
2330 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2331 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2332 unlock_user_struct(tlg
, optval_addr
, 1);
2333 if (put_user_u32(len
, optlen
)) {
2334 return -TARGET_EFAULT
;
2338 /* Options with 'int' argument. */
2339 case TARGET_SO_DEBUG
:
2342 case TARGET_SO_REUSEADDR
:
2343 optname
= SO_REUSEADDR
;
2346 case TARGET_SO_REUSEPORT
:
2347 optname
= SO_REUSEPORT
;
2350 case TARGET_SO_TYPE
:
2353 case TARGET_SO_ERROR
:
2356 case TARGET_SO_DONTROUTE
:
2357 optname
= SO_DONTROUTE
;
2359 case TARGET_SO_BROADCAST
:
2360 optname
= SO_BROADCAST
;
2362 case TARGET_SO_SNDBUF
:
2363 optname
= SO_SNDBUF
;
2365 case TARGET_SO_RCVBUF
:
2366 optname
= SO_RCVBUF
;
2368 case TARGET_SO_KEEPALIVE
:
2369 optname
= SO_KEEPALIVE
;
2371 case TARGET_SO_OOBINLINE
:
2372 optname
= SO_OOBINLINE
;
2374 case TARGET_SO_NO_CHECK
:
2375 optname
= SO_NO_CHECK
;
2377 case TARGET_SO_PRIORITY
:
2378 optname
= SO_PRIORITY
;
2381 case TARGET_SO_BSDCOMPAT
:
2382 optname
= SO_BSDCOMPAT
;
2385 case TARGET_SO_PASSCRED
:
2386 optname
= SO_PASSCRED
;
2388 case TARGET_SO_TIMESTAMP
:
2389 optname
= SO_TIMESTAMP
;
2391 case TARGET_SO_RCVLOWAT
:
2392 optname
= SO_RCVLOWAT
;
2394 case TARGET_SO_ACCEPTCONN
:
2395 optname
= SO_ACCEPTCONN
;
2402 /* TCP options all take an 'int' value. */
2404 if (get_user_u32(len
, optlen
))
2405 return -TARGET_EFAULT
;
2407 return -TARGET_EINVAL
;
2409 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2412 if (optname
== SO_TYPE
) {
2413 val
= host_to_target_sock_type(val
);
2418 if (put_user_u32(val
, optval_addr
))
2419 return -TARGET_EFAULT
;
2421 if (put_user_u8(val
, optval_addr
))
2422 return -TARGET_EFAULT
;
2424 if (put_user_u32(len
, optlen
))
2425 return -TARGET_EFAULT
;
2432 case IP_ROUTER_ALERT
:
2436 case IP_MTU_DISCOVER
:
2442 case IP_MULTICAST_TTL
:
2443 case IP_MULTICAST_LOOP
:
2444 if (get_user_u32(len
, optlen
))
2445 return -TARGET_EFAULT
;
2447 return -TARGET_EINVAL
;
2449 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2452 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2454 if (put_user_u32(len
, optlen
)
2455 || put_user_u8(val
, optval_addr
))
2456 return -TARGET_EFAULT
;
2458 if (len
> sizeof(int))
2460 if (put_user_u32(len
, optlen
)
2461 || put_user_u32(val
, optval_addr
))
2462 return -TARGET_EFAULT
;
2466 ret
= -TARGET_ENOPROTOOPT
;
2472 case IPV6_MTU_DISCOVER
:
2475 case IPV6_RECVPKTINFO
:
2476 case IPV6_UNICAST_HOPS
:
2477 case IPV6_MULTICAST_HOPS
:
2478 case IPV6_MULTICAST_LOOP
:
2480 case IPV6_RECVHOPLIMIT
:
2481 case IPV6_2292HOPLIMIT
:
2484 case IPV6_2292PKTINFO
:
2485 case IPV6_RECVTCLASS
:
2486 case IPV6_RECVRTHDR
:
2487 case IPV6_2292RTHDR
:
2488 case IPV6_RECVHOPOPTS
:
2489 case IPV6_2292HOPOPTS
:
2490 case IPV6_RECVDSTOPTS
:
2491 case IPV6_2292DSTOPTS
:
2493 #ifdef IPV6_RECVPATHMTU
2494 case IPV6_RECVPATHMTU
:
2496 #ifdef IPV6_TRANSPARENT
2497 case IPV6_TRANSPARENT
:
2499 #ifdef IPV6_FREEBIND
2502 #ifdef IPV6_RECVORIGDSTADDR
2503 case IPV6_RECVORIGDSTADDR
:
2505 if (get_user_u32(len
, optlen
))
2506 return -TARGET_EFAULT
;
2508 return -TARGET_EINVAL
;
2510 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2513 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2515 if (put_user_u32(len
, optlen
)
2516 || put_user_u8(val
, optval_addr
))
2517 return -TARGET_EFAULT
;
2519 if (len
> sizeof(int))
2521 if (put_user_u32(len
, optlen
)
2522 || put_user_u32(val
, optval_addr
))
2523 return -TARGET_EFAULT
;
2527 ret
= -TARGET_ENOPROTOOPT
;
2533 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2535 ret
= -TARGET_EOPNOTSUPP
;
2541 /* Convert target low/high pair representing file offset into the host
2542 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2543 * as the kernel doesn't handle them either.
2545 static void target_to_host_low_high(abi_ulong tlow
,
2547 unsigned long *hlow
,
2548 unsigned long *hhigh
)
2550 uint64_t off
= tlow
|
2551 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2552 TARGET_LONG_BITS
/ 2;
2555 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2558 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2559 abi_ulong count
, int copy
)
2561 struct target_iovec
*target_vec
;
2563 abi_ulong total_len
, max_len
;
2566 bool bad_address
= false;
2572 if (count
> IOV_MAX
) {
2577 vec
= g_try_new0(struct iovec
, count
);
2583 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2584 count
* sizeof(struct target_iovec
), 1);
2585 if (target_vec
== NULL
) {
2590 /* ??? If host page size > target page size, this will result in a
2591 value larger than what we can actually support. */
2592 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2595 for (i
= 0; i
< count
; i
++) {
2596 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2597 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2602 } else if (len
== 0) {
2603 /* Zero length pointer is ignored. */
2604 vec
[i
].iov_base
= 0;
2606 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2607 /* If the first buffer pointer is bad, this is a fault. But
2608 * subsequent bad buffers will result in a partial write; this
2609 * is realized by filling the vector with null pointers and
2611 if (!vec
[i
].iov_base
) {
2622 if (len
> max_len
- total_len
) {
2623 len
= max_len
- total_len
;
2626 vec
[i
].iov_len
= len
;
2630 unlock_user(target_vec
, target_addr
, 0);
2635 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2636 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2639 unlock_user(target_vec
, target_addr
, 0);
2646 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2647 abi_ulong count
, int copy
)
2649 struct target_iovec
*target_vec
;
2652 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2653 count
* sizeof(struct target_iovec
), 1);
2655 for (i
= 0; i
< count
; i
++) {
2656 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2657 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2661 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
2663 unlock_user(target_vec
, target_addr
, 0);
2669 static inline int target_to_host_sock_type(int *type
)
2672 int target_type
= *type
;
2674 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2675 case TARGET_SOCK_DGRAM
:
2676 host_type
= SOCK_DGRAM
;
2678 case TARGET_SOCK_STREAM
:
2679 host_type
= SOCK_STREAM
;
2682 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2685 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2686 #if defined(SOCK_CLOEXEC)
2687 host_type
|= SOCK_CLOEXEC
;
2689 return -TARGET_EINVAL
;
2692 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2693 #if defined(SOCK_NONBLOCK)
2694 host_type
|= SOCK_NONBLOCK
;
2695 #elif !defined(O_NONBLOCK)
2696 return -TARGET_EINVAL
;
2703 /* Try to emulate socket type flags after socket creation. */
2704 static int sock_flags_fixup(int fd
, int target_type
)
2706 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2707 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2708 int flags
= fcntl(fd
, F_GETFL
);
2709 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2711 return -TARGET_EINVAL
;
2718 /* do_socket() Must return target values and target errnos. */
2719 static abi_long
do_socket(int domain
, int type
, int protocol
)
2721 int target_type
= type
;
2724 ret
= target_to_host_sock_type(&type
);
2729 if (domain
== PF_NETLINK
&& !(
2730 #ifdef CONFIG_RTNETLINK
2731 protocol
== NETLINK_ROUTE
||
2733 protocol
== NETLINK_KOBJECT_UEVENT
||
2734 protocol
== NETLINK_AUDIT
)) {
2735 return -EPFNOSUPPORT
;
2738 if (domain
== AF_PACKET
||
2739 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2740 protocol
= tswap16(protocol
);
2743 ret
= get_errno(socket(domain
, type
, protocol
));
2745 ret
= sock_flags_fixup(ret
, target_type
);
2746 if (type
== SOCK_PACKET
) {
2747 /* Manage an obsolete case :
2748 * if socket type is SOCK_PACKET, bind by name
2750 fd_trans_register(ret
, &target_packet_trans
);
2751 } else if (domain
== PF_NETLINK
) {
2753 #ifdef CONFIG_RTNETLINK
2755 fd_trans_register(ret
, &target_netlink_route_trans
);
2758 case NETLINK_KOBJECT_UEVENT
:
2759 /* nothing to do: messages are strings */
2762 fd_trans_register(ret
, &target_netlink_audit_trans
);
2765 g_assert_not_reached();
2772 /* do_bind() Must return target values and target errnos. */
2773 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2779 if ((int)addrlen
< 0) {
2780 return -TARGET_EINVAL
;
2783 addr
= alloca(addrlen
+1);
2785 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2789 return get_errno(bind(sockfd
, addr
, addrlen
));
2792 /* do_connect() Must return target values and target errnos. */
2793 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2799 if ((int)addrlen
< 0) {
2800 return -TARGET_EINVAL
;
2803 addr
= alloca(addrlen
+1);
2805 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2809 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
2812 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2813 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2814 int flags
, int send
)
2820 abi_ulong target_vec
;
2822 if (msgp
->msg_name
) {
2823 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2824 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2825 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2826 tswapal(msgp
->msg_name
),
2828 if (ret
== -TARGET_EFAULT
) {
2829 /* For connected sockets msg_name and msg_namelen must
2830 * be ignored, so returning EFAULT immediately is wrong.
2831 * Instead, pass a bad msg_name to the host kernel, and
2832 * let it decide whether to return EFAULT or not.
2834 msg
.msg_name
= (void *)-1;
2839 msg
.msg_name
= NULL
;
2840 msg
.msg_namelen
= 0;
2842 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2843 msg
.msg_control
= alloca(msg
.msg_controllen
);
2844 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
2846 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2848 count
= tswapal(msgp
->msg_iovlen
);
2849 target_vec
= tswapal(msgp
->msg_iov
);
2851 if (count
> IOV_MAX
) {
2852 /* sendrcvmsg returns a different errno for this condition than
2853 * readv/writev, so we must catch it here before lock_iovec() does.
2855 ret
= -TARGET_EMSGSIZE
;
2859 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
2860 target_vec
, count
, send
);
2862 ret
= -host_to_target_errno(errno
);
2865 msg
.msg_iovlen
= count
;
2869 if (fd_trans_target_to_host_data(fd
)) {
2872 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
2873 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
2874 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
2875 msg
.msg_iov
->iov_len
);
2877 msg
.msg_iov
->iov_base
= host_msg
;
2878 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2882 ret
= target_to_host_cmsg(&msg
, msgp
);
2884 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2888 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
2889 if (!is_error(ret
)) {
2891 if (fd_trans_host_to_target_data(fd
)) {
2892 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
2893 MIN(msg
.msg_iov
->iov_len
, len
));
2895 ret
= host_to_target_cmsg(msgp
, &msg
);
2897 if (!is_error(ret
)) {
2898 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
2899 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
2900 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
2901 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
2902 msg
.msg_name
, msg
.msg_namelen
);
2914 unlock_iovec(vec
, target_vec
, count
, !send
);
2919 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
2920 int flags
, int send
)
2923 struct target_msghdr
*msgp
;
2925 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
2929 return -TARGET_EFAULT
;
2931 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
2932 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
2936 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2937 * so it might not have this *mmsg-specific flag either.
2939 #ifndef MSG_WAITFORONE
2940 #define MSG_WAITFORONE 0x10000
2943 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
2944 unsigned int vlen
, unsigned int flags
,
2947 struct target_mmsghdr
*mmsgp
;
2951 if (vlen
> UIO_MAXIOV
) {
2955 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
2957 return -TARGET_EFAULT
;
2960 for (i
= 0; i
< vlen
; i
++) {
2961 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
2962 if (is_error(ret
)) {
2965 mmsgp
[i
].msg_len
= tswap32(ret
);
2966 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2967 if (flags
& MSG_WAITFORONE
) {
2968 flags
|= MSG_DONTWAIT
;
2972 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
2974 /* Return number of datagrams sent if we sent any at all;
2975 * otherwise return the error.
2983 /* do_accept4() Must return target values and target errnos. */
2984 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
2985 abi_ulong target_addrlen_addr
, int flags
)
2987 socklen_t addrlen
, ret_addrlen
;
2992 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
2994 if (target_addr
== 0) {
2995 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
2998 /* linux returns EINVAL if addrlen pointer is invalid */
2999 if (get_user_u32(addrlen
, target_addrlen_addr
))
3000 return -TARGET_EINVAL
;
3002 if ((int)addrlen
< 0) {
3003 return -TARGET_EINVAL
;
3006 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3007 return -TARGET_EINVAL
;
3009 addr
= alloca(addrlen
);
3011 ret_addrlen
= addrlen
;
3012 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3013 if (!is_error(ret
)) {
3014 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3015 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3016 ret
= -TARGET_EFAULT
;
3022 /* do_getpeername() Must return target values and target errnos. */
3023 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3024 abi_ulong target_addrlen_addr
)
3026 socklen_t addrlen
, ret_addrlen
;
3030 if (get_user_u32(addrlen
, target_addrlen_addr
))
3031 return -TARGET_EFAULT
;
3033 if ((int)addrlen
< 0) {
3034 return -TARGET_EINVAL
;
3037 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3038 return -TARGET_EFAULT
;
3040 addr
= alloca(addrlen
);
3042 ret_addrlen
= addrlen
;
3043 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3044 if (!is_error(ret
)) {
3045 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3046 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3047 ret
= -TARGET_EFAULT
;
3053 /* do_getsockname() Must return target values and target errnos. */
3054 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3055 abi_ulong target_addrlen_addr
)
3057 socklen_t addrlen
, ret_addrlen
;
3061 if (get_user_u32(addrlen
, target_addrlen_addr
))
3062 return -TARGET_EFAULT
;
3064 if ((int)addrlen
< 0) {
3065 return -TARGET_EINVAL
;
3068 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3069 return -TARGET_EFAULT
;
3071 addr
= alloca(addrlen
);
3073 ret_addrlen
= addrlen
;
3074 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3075 if (!is_error(ret
)) {
3076 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3077 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3078 ret
= -TARGET_EFAULT
;
3084 /* do_socketpair() Must return target values and target errnos. */
3085 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3086 abi_ulong target_tab_addr
)
3091 target_to_host_sock_type(&type
);
3093 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3094 if (!is_error(ret
)) {
3095 if (put_user_s32(tab
[0], target_tab_addr
)
3096 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3097 ret
= -TARGET_EFAULT
;
3102 /* do_sendto() Must return target values and target errnos. */
3103 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3104 abi_ulong target_addr
, socklen_t addrlen
)
3108 void *copy_msg
= NULL
;
3111 if ((int)addrlen
< 0) {
3112 return -TARGET_EINVAL
;
3115 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3117 return -TARGET_EFAULT
;
3118 if (fd_trans_target_to_host_data(fd
)) {
3119 copy_msg
= host_msg
;
3120 host_msg
= g_malloc(len
);
3121 memcpy(host_msg
, copy_msg
, len
);
3122 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3128 addr
= alloca(addrlen
+1);
3129 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3133 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3135 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3140 host_msg
= copy_msg
;
3142 unlock_user(host_msg
, msg
, 0);
3146 /* do_recvfrom() Must return target values and target errnos. */
3147 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3148 abi_ulong target_addr
,
3149 abi_ulong target_addrlen
)
3151 socklen_t addrlen
, ret_addrlen
;
3156 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3158 return -TARGET_EFAULT
;
3160 if (get_user_u32(addrlen
, target_addrlen
)) {
3161 ret
= -TARGET_EFAULT
;
3164 if ((int)addrlen
< 0) {
3165 ret
= -TARGET_EINVAL
;
3168 addr
= alloca(addrlen
);
3169 ret_addrlen
= addrlen
;
3170 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3171 addr
, &ret_addrlen
));
3173 addr
= NULL
; /* To keep compiler quiet. */
3174 addrlen
= 0; /* To keep compiler quiet. */
3175 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3177 if (!is_error(ret
)) {
3178 if (fd_trans_host_to_target_data(fd
)) {
3180 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3181 if (is_error(trans
)) {
3187 host_to_target_sockaddr(target_addr
, addr
,
3188 MIN(addrlen
, ret_addrlen
));
3189 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3190 ret
= -TARGET_EFAULT
;
3194 unlock_user(host_msg
, msg
, len
);
3197 unlock_user(host_msg
, msg
, 0);
3202 #ifdef TARGET_NR_socketcall
3203 /* do_socketcall() must return target values and target errnos. */
3204 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3206 static const unsigned nargs
[] = { /* number of arguments per operation */
3207 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3208 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3209 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3210 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3211 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3212 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3213 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3214 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3215 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3216 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3217 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3218 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3219 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3220 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3221 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3222 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3223 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3224 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3225 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3226 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3228 abi_long a
[6]; /* max 6 args */
3231 /* check the range of the first argument num */
3232 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3233 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3234 return -TARGET_EINVAL
;
3236 /* ensure we have space for args */
3237 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3238 return -TARGET_EINVAL
;
3240 /* collect the arguments in a[] according to nargs[] */
3241 for (i
= 0; i
< nargs
[num
]; ++i
) {
3242 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3243 return -TARGET_EFAULT
;
3246 /* now when we have the args, invoke the appropriate underlying function */
3248 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3249 return do_socket(a
[0], a
[1], a
[2]);
3250 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3251 return do_bind(a
[0], a
[1], a
[2]);
3252 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3253 return do_connect(a
[0], a
[1], a
[2]);
3254 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3255 return get_errno(listen(a
[0], a
[1]));
3256 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3257 return do_accept4(a
[0], a
[1], a
[2], 0);
3258 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3259 return do_getsockname(a
[0], a
[1], a
[2]);
3260 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3261 return do_getpeername(a
[0], a
[1], a
[2]);
3262 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3263 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3264 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3265 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3266 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3267 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3268 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3269 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3270 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3271 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3272 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3273 return get_errno(shutdown(a
[0], a
[1]));
3274 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3275 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3276 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3277 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3278 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3279 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3280 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3281 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3282 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3283 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3284 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3285 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3286 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3287 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3289 gemu_log("Unsupported socketcall: %d\n", num
);
3290 return -TARGET_EINVAL
;
3295 #define N_SHM_REGIONS 32
3297 static struct shm_region
{
3301 } shm_regions
[N_SHM_REGIONS
];
3303 #ifndef TARGET_SEMID64_DS
3304 /* asm-generic version of this struct */
3305 struct target_semid64_ds
3307 struct target_ipc_perm sem_perm
;
3308 abi_ulong sem_otime
;
3309 #if TARGET_ABI_BITS == 32
3310 abi_ulong __unused1
;
3312 abi_ulong sem_ctime
;
3313 #if TARGET_ABI_BITS == 32
3314 abi_ulong __unused2
;
3316 abi_ulong sem_nsems
;
3317 abi_ulong __unused3
;
3318 abi_ulong __unused4
;
3322 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3323 abi_ulong target_addr
)
3325 struct target_ipc_perm
*target_ip
;
3326 struct target_semid64_ds
*target_sd
;
3328 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3329 return -TARGET_EFAULT
;
3330 target_ip
= &(target_sd
->sem_perm
);
3331 host_ip
->__key
= tswap32(target_ip
->__key
);
3332 host_ip
->uid
= tswap32(target_ip
->uid
);
3333 host_ip
->gid
= tswap32(target_ip
->gid
);
3334 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3335 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3336 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3337 host_ip
->mode
= tswap32(target_ip
->mode
);
3339 host_ip
->mode
= tswap16(target_ip
->mode
);
3341 #if defined(TARGET_PPC)
3342 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3344 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3346 unlock_user_struct(target_sd
, target_addr
, 0);
3350 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3351 struct ipc_perm
*host_ip
)
3353 struct target_ipc_perm
*target_ip
;
3354 struct target_semid64_ds
*target_sd
;
3356 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3357 return -TARGET_EFAULT
;
3358 target_ip
= &(target_sd
->sem_perm
);
3359 target_ip
->__key
= tswap32(host_ip
->__key
);
3360 target_ip
->uid
= tswap32(host_ip
->uid
);
3361 target_ip
->gid
= tswap32(host_ip
->gid
);
3362 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3363 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3364 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3365 target_ip
->mode
= tswap32(host_ip
->mode
);
3367 target_ip
->mode
= tswap16(host_ip
->mode
);
3369 #if defined(TARGET_PPC)
3370 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3372 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3374 unlock_user_struct(target_sd
, target_addr
, 1);
3378 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3379 abi_ulong target_addr
)
3381 struct target_semid64_ds
*target_sd
;
3383 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3384 return -TARGET_EFAULT
;
3385 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3386 return -TARGET_EFAULT
;
3387 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3388 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3389 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3390 unlock_user_struct(target_sd
, target_addr
, 0);
3394 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3395 struct semid_ds
*host_sd
)
3397 struct target_semid64_ds
*target_sd
;
3399 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3400 return -TARGET_EFAULT
;
3401 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3402 return -TARGET_EFAULT
;
3403 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3404 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3405 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3406 unlock_user_struct(target_sd
, target_addr
, 1);
3410 struct target_seminfo
{
3423 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3424 struct seminfo
*host_seminfo
)
3426 struct target_seminfo
*target_seminfo
;
3427 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3428 return -TARGET_EFAULT
;
3429 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3430 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3431 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3432 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3433 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3434 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3435 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3436 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3437 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3438 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3439 unlock_user_struct(target_seminfo
, target_addr
, 1);
3445 struct semid_ds
*buf
;
3446 unsigned short *array
;
3447 struct seminfo
*__buf
;
3450 union target_semun
{
3457 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3458 abi_ulong target_addr
)
3461 unsigned short *array
;
3463 struct semid_ds semid_ds
;
3466 semun
.buf
= &semid_ds
;
3468 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3470 return get_errno(ret
);
3472 nsems
= semid_ds
.sem_nsems
;
3474 *host_array
= g_try_new(unsigned short, nsems
);
3476 return -TARGET_ENOMEM
;
3478 array
= lock_user(VERIFY_READ
, target_addr
,
3479 nsems
*sizeof(unsigned short), 1);
3481 g_free(*host_array
);
3482 return -TARGET_EFAULT
;
3485 for(i
=0; i
<nsems
; i
++) {
3486 __get_user((*host_array
)[i
], &array
[i
]);
3488 unlock_user(array
, target_addr
, 0);
3493 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3494 unsigned short **host_array
)
3497 unsigned short *array
;
3499 struct semid_ds semid_ds
;
3502 semun
.buf
= &semid_ds
;
3504 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3506 return get_errno(ret
);
3508 nsems
= semid_ds
.sem_nsems
;
3510 array
= lock_user(VERIFY_WRITE
, target_addr
,
3511 nsems
*sizeof(unsigned short), 0);
3513 return -TARGET_EFAULT
;
3515 for(i
=0; i
<nsems
; i
++) {
3516 __put_user((*host_array
)[i
], &array
[i
]);
3518 g_free(*host_array
);
3519 unlock_user(array
, target_addr
, 1);
3524 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3525 abi_ulong target_arg
)
3527 union target_semun target_su
= { .buf
= target_arg
};
3529 struct semid_ds dsarg
;
3530 unsigned short *array
= NULL
;
3531 struct seminfo seminfo
;
3532 abi_long ret
= -TARGET_EINVAL
;
3539 /* In 64 bit cross-endian situations, we will erroneously pick up
3540 * the wrong half of the union for the "val" element. To rectify
3541 * this, the entire 8-byte structure is byteswapped, followed by
3542 * a swap of the 4 byte val field. In other cases, the data is
3543 * already in proper host byte order. */
3544 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3545 target_su
.buf
= tswapal(target_su
.buf
);
3546 arg
.val
= tswap32(target_su
.val
);
3548 arg
.val
= target_su
.val
;
3550 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3554 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3558 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3559 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3566 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3570 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3571 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3577 arg
.__buf
= &seminfo
;
3578 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3579 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3587 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
3594 struct target_sembuf
{
3595 unsigned short sem_num
;
3600 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
3601 abi_ulong target_addr
,
3604 struct target_sembuf
*target_sembuf
;
3607 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
3608 nsops
*sizeof(struct target_sembuf
), 1);
3610 return -TARGET_EFAULT
;
3612 for(i
=0; i
<nsops
; i
++) {
3613 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
3614 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
3615 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
3618 unlock_user(target_sembuf
, target_addr
, 0);
3623 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
3625 struct sembuf sops
[nsops
];
3628 if (target_to_host_sembuf(sops
, ptr
, nsops
))
3629 return -TARGET_EFAULT
;
3631 ret
= -TARGET_ENOSYS
;
3632 #ifdef __NR_semtimedop
3633 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
3636 if (ret
== -TARGET_ENOSYS
) {
3637 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
, nsops
, 0, sops
, 0));
3643 struct target_msqid_ds
3645 struct target_ipc_perm msg_perm
;
3646 abi_ulong msg_stime
;
3647 #if TARGET_ABI_BITS == 32
3648 abi_ulong __unused1
;
3650 abi_ulong msg_rtime
;
3651 #if TARGET_ABI_BITS == 32
3652 abi_ulong __unused2
;
3654 abi_ulong msg_ctime
;
3655 #if TARGET_ABI_BITS == 32
3656 abi_ulong __unused3
;
3658 abi_ulong __msg_cbytes
;
3660 abi_ulong msg_qbytes
;
3661 abi_ulong msg_lspid
;
3662 abi_ulong msg_lrpid
;
3663 abi_ulong __unused4
;
3664 abi_ulong __unused5
;
3667 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
3668 abi_ulong target_addr
)
3670 struct target_msqid_ds
*target_md
;
3672 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
3673 return -TARGET_EFAULT
;
3674 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
3675 return -TARGET_EFAULT
;
3676 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
3677 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
3678 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
3679 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
3680 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
3681 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
3682 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
3683 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
3684 unlock_user_struct(target_md
, target_addr
, 0);
3688 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
3689 struct msqid_ds
*host_md
)
3691 struct target_msqid_ds
*target_md
;
3693 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
3694 return -TARGET_EFAULT
;
3695 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
3696 return -TARGET_EFAULT
;
3697 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
3698 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
3699 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
3700 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
3701 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
3702 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
3703 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
3704 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
3705 unlock_user_struct(target_md
, target_addr
, 1);
3709 struct target_msginfo
{
3717 unsigned short int msgseg
;
3720 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
3721 struct msginfo
*host_msginfo
)
3723 struct target_msginfo
*target_msginfo
;
3724 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
3725 return -TARGET_EFAULT
;
3726 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
3727 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
3728 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
3729 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
3730 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
3731 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
3732 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
3733 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
3734 unlock_user_struct(target_msginfo
, target_addr
, 1);
3738 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
3740 struct msqid_ds dsarg
;
3741 struct msginfo msginfo
;
3742 abi_long ret
= -TARGET_EINVAL
;
3750 if (target_to_host_msqid_ds(&dsarg
,ptr
))
3751 return -TARGET_EFAULT
;
3752 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
3753 if (host_to_target_msqid_ds(ptr
,&dsarg
))
3754 return -TARGET_EFAULT
;
3757 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
3761 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
3762 if (host_to_target_msginfo(ptr
, &msginfo
))
3763 return -TARGET_EFAULT
;
3770 struct target_msgbuf
{
3775 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
3776 ssize_t msgsz
, int msgflg
)
3778 struct target_msgbuf
*target_mb
;
3779 struct msgbuf
*host_mb
;
3783 return -TARGET_EINVAL
;
3786 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
3787 return -TARGET_EFAULT
;
3788 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3790 unlock_user_struct(target_mb
, msgp
, 0);
3791 return -TARGET_ENOMEM
;
3793 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
3794 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
3795 ret
= -TARGET_ENOSYS
;
3797 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
3800 if (ret
== -TARGET_ENOSYS
) {
3801 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
3806 unlock_user_struct(target_mb
, msgp
, 0);
3811 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
3812 ssize_t msgsz
, abi_long msgtyp
,
3815 struct target_msgbuf
*target_mb
;
3817 struct msgbuf
*host_mb
;
3821 return -TARGET_EINVAL
;
3824 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
3825 return -TARGET_EFAULT
;
3827 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
3829 ret
= -TARGET_ENOMEM
;
3832 ret
= -TARGET_ENOSYS
;
3834 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
3837 if (ret
== -TARGET_ENOSYS
) {
3838 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
3839 msgflg
, host_mb
, msgtyp
));
3844 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
3845 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
3846 if (!target_mtext
) {
3847 ret
= -TARGET_EFAULT
;
3850 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
3851 unlock_user(target_mtext
, target_mtext_addr
, ret
);
3854 target_mb
->mtype
= tswapal(host_mb
->mtype
);
3858 unlock_user_struct(target_mb
, msgp
, 1);
3863 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
3864 abi_ulong target_addr
)
3866 struct target_shmid_ds
*target_sd
;
3868 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3869 return -TARGET_EFAULT
;
3870 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
3871 return -TARGET_EFAULT
;
3872 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3873 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3874 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3875 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3876 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3877 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3878 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3879 unlock_user_struct(target_sd
, target_addr
, 0);
3883 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
3884 struct shmid_ds
*host_sd
)
3886 struct target_shmid_ds
*target_sd
;
3888 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3889 return -TARGET_EFAULT
;
3890 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
3891 return -TARGET_EFAULT
;
3892 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
3893 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
3894 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
3895 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
3896 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
3897 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
3898 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
3899 unlock_user_struct(target_sd
, target_addr
, 1);
3903 struct target_shminfo
{
3911 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
3912 struct shminfo
*host_shminfo
)
3914 struct target_shminfo
*target_shminfo
;
3915 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
3916 return -TARGET_EFAULT
;
3917 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3918 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3919 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3920 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3921 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3922 unlock_user_struct(target_shminfo
, target_addr
, 1);
3926 struct target_shm_info
{
3931 abi_ulong swap_attempts
;
3932 abi_ulong swap_successes
;
3935 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3936 struct shm_info
*host_shm_info
)
3938 struct target_shm_info
*target_shm_info
;
3939 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3940 return -TARGET_EFAULT
;
3941 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3942 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3943 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3944 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3945 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3946 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3947 unlock_user_struct(target_shm_info
, target_addr
, 1);
3951 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3953 struct shmid_ds dsarg
;
3954 struct shminfo shminfo
;
3955 struct shm_info shm_info
;
3956 abi_long ret
= -TARGET_EINVAL
;
3964 if (target_to_host_shmid_ds(&dsarg
, buf
))
3965 return -TARGET_EFAULT
;
3966 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3967 if (host_to_target_shmid_ds(buf
, &dsarg
))
3968 return -TARGET_EFAULT
;
3971 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3972 if (host_to_target_shminfo(buf
, &shminfo
))
3973 return -TARGET_EFAULT
;
3976 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3977 if (host_to_target_shm_info(buf
, &shm_info
))
3978 return -TARGET_EFAULT
;
3983 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3990 #ifndef TARGET_FORCE_SHMLBA
3991 /* For most architectures, SHMLBA is the same as the page size;
3992 * some architectures have larger values, in which case they should
3993 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3994 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3995 * and defining its own value for SHMLBA.
3997 * The kernel also permits SHMLBA to be set by the architecture to a
3998 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3999 * this means that addresses are rounded to the large size if
4000 * SHM_RND is set but addresses not aligned to that size are not rejected
4001 * as long as they are at least page-aligned. Since the only architecture
4002 * which uses this is ia64 this code doesn't provide for that oddity.
4004 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4006 return TARGET_PAGE_SIZE
;
4010 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4011 int shmid
, abi_ulong shmaddr
, int shmflg
)
4015 struct shmid_ds shm_info
;
4019 /* find out the length of the shared memory segment */
4020 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4021 if (is_error(ret
)) {
4022 /* can't get length, bail out */
4026 shmlba
= target_shmlba(cpu_env
);
4028 if (shmaddr
& (shmlba
- 1)) {
4029 if (shmflg
& SHM_RND
) {
4030 shmaddr
&= ~(shmlba
- 1);
4032 return -TARGET_EINVAL
;
4035 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4036 return -TARGET_EINVAL
;
4042 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4044 abi_ulong mmap_start
;
4046 /* In order to use the host shmat, we need to honor host SHMLBA. */
4047 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4049 if (mmap_start
== -1) {
4051 host_raddr
= (void *)-1;
4053 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4056 if (host_raddr
== (void *)-1) {
4058 return get_errno((long)host_raddr
);
4060 raddr
=h2g((unsigned long)host_raddr
);
4062 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4063 PAGE_VALID
| PAGE_READ
|
4064 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4066 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4067 if (!shm_regions
[i
].in_use
) {
4068 shm_regions
[i
].in_use
= true;
4069 shm_regions
[i
].start
= raddr
;
4070 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4080 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4087 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4088 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4089 shm_regions
[i
].in_use
= false;
4090 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4094 rv
= get_errno(shmdt(g2h(shmaddr
)));
4101 #ifdef TARGET_NR_ipc
4102 /* ??? This only works with linear mappings. */
4103 /* do_ipc() must return target values and target errnos. */
4104 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4105 unsigned int call
, abi_long first
,
4106 abi_long second
, abi_long third
,
4107 abi_long ptr
, abi_long fifth
)
4112 version
= call
>> 16;
4117 ret
= do_semop(first
, ptr
, second
);
4121 ret
= get_errno(semget(first
, second
, third
));
4124 case IPCOP_semctl
: {
4125 /* The semun argument to semctl is passed by value, so dereference the
4128 get_user_ual(atptr
, ptr
);
4129 ret
= do_semctl(first
, second
, third
, atptr
);
4134 ret
= get_errno(msgget(first
, second
));
4138 ret
= do_msgsnd(first
, ptr
, second
, third
);
4142 ret
= do_msgctl(first
, second
, ptr
);
4149 struct target_ipc_kludge
{
4154 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4155 ret
= -TARGET_EFAULT
;
4159 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4161 unlock_user_struct(tmp
, ptr
, 0);
4165 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4174 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4175 if (is_error(raddr
))
4176 return get_errno(raddr
);
4177 if (put_user_ual(raddr
, third
))
4178 return -TARGET_EFAULT
;
4182 ret
= -TARGET_EINVAL
;
4187 ret
= do_shmdt(ptr
);
4191 /* IPC_* flag values are the same on all linux platforms */
4192 ret
= get_errno(shmget(first
, second
, third
));
4195 /* IPC_* and SHM_* command values are the same on all linux platforms */
4197 ret
= do_shmctl(first
, second
, ptr
);
4200 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4201 ret
= -TARGET_ENOSYS
;
4208 /* kernel structure types definitions */
4210 #define STRUCT(name, ...) STRUCT_ ## name,
4211 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4213 #include "syscall_types.h"
4217 #undef STRUCT_SPECIAL
4219 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4220 #define STRUCT_SPECIAL(name)
4221 #include "syscall_types.h"
4223 #undef STRUCT_SPECIAL
4225 typedef struct IOCTLEntry IOCTLEntry
;
4227 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4228 int fd
, int cmd
, abi_long arg
);
4232 unsigned int host_cmd
;
4235 do_ioctl_fn
*do_ioctl
;
4236 const argtype arg_type
[5];
4239 #define IOC_R 0x0001
4240 #define IOC_W 0x0002
4241 #define IOC_RW (IOC_R | IOC_W)
4243 #define MAX_STRUCT_SIZE 4096
4245 #ifdef CONFIG_FIEMAP
4246 /* So fiemap access checks don't overflow on 32 bit systems.
4247 * This is very slightly smaller than the limit imposed by
4248 * the underlying kernel.
4250 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4251 / sizeof(struct fiemap_extent))
4253 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4254 int fd
, int cmd
, abi_long arg
)
4256 /* The parameter for this ioctl is a struct fiemap followed
4257 * by an array of struct fiemap_extent whose size is set
4258 * in fiemap->fm_extent_count. The array is filled in by the
4261 int target_size_in
, target_size_out
;
4263 const argtype
*arg_type
= ie
->arg_type
;
4264 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4267 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4271 assert(arg_type
[0] == TYPE_PTR
);
4272 assert(ie
->access
== IOC_RW
);
4274 target_size_in
= thunk_type_size(arg_type
, 0);
4275 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4277 return -TARGET_EFAULT
;
4279 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4280 unlock_user(argptr
, arg
, 0);
4281 fm
= (struct fiemap
*)buf_temp
;
4282 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4283 return -TARGET_EINVAL
;
4286 outbufsz
= sizeof (*fm
) +
4287 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4289 if (outbufsz
> MAX_STRUCT_SIZE
) {
4290 /* We can't fit all the extents into the fixed size buffer.
4291 * Allocate one that is large enough and use it instead.
4293 fm
= g_try_malloc(outbufsz
);
4295 return -TARGET_ENOMEM
;
4297 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4300 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4301 if (!is_error(ret
)) {
4302 target_size_out
= target_size_in
;
4303 /* An extent_count of 0 means we were only counting the extents
4304 * so there are no structs to copy
4306 if (fm
->fm_extent_count
!= 0) {
4307 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4309 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4311 ret
= -TARGET_EFAULT
;
4313 /* Convert the struct fiemap */
4314 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4315 if (fm
->fm_extent_count
!= 0) {
4316 p
= argptr
+ target_size_in
;
4317 /* ...and then all the struct fiemap_extents */
4318 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4319 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4324 unlock_user(argptr
, arg
, target_size_out
);
4334 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4335 int fd
, int cmd
, abi_long arg
)
4337 const argtype
*arg_type
= ie
->arg_type
;
4341 struct ifconf
*host_ifconf
;
4343 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4344 int target_ifreq_size
;
4349 abi_long target_ifc_buf
;
4353 assert(arg_type
[0] == TYPE_PTR
);
4354 assert(ie
->access
== IOC_RW
);
4357 target_size
= thunk_type_size(arg_type
, 0);
4359 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4361 return -TARGET_EFAULT
;
4362 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4363 unlock_user(argptr
, arg
, 0);
4365 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4366 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4367 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4369 if (target_ifc_buf
!= 0) {
4370 target_ifc_len
= host_ifconf
->ifc_len
;
4371 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4372 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4374 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4375 if (outbufsz
> MAX_STRUCT_SIZE
) {
4377 * We can't fit all the extents into the fixed size buffer.
4378 * Allocate one that is large enough and use it instead.
4380 host_ifconf
= malloc(outbufsz
);
4382 return -TARGET_ENOMEM
;
4384 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4387 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4389 host_ifconf
->ifc_len
= host_ifc_len
;
4391 host_ifc_buf
= NULL
;
4393 host_ifconf
->ifc_buf
= host_ifc_buf
;
4395 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4396 if (!is_error(ret
)) {
4397 /* convert host ifc_len to target ifc_len */
4399 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4400 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4401 host_ifconf
->ifc_len
= target_ifc_len
;
4403 /* restore target ifc_buf */
4405 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4407 /* copy struct ifconf to target user */
4409 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4411 return -TARGET_EFAULT
;
4412 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4413 unlock_user(argptr
, arg
, target_size
);
4415 if (target_ifc_buf
!= 0) {
4416 /* copy ifreq[] to target user */
4417 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4418 for (i
= 0; i
< nb_ifreq
; i
++) {
4419 thunk_convert(argptr
+ i
* target_ifreq_size
,
4420 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4421 ifreq_arg_type
, THUNK_TARGET
);
4423 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4434 #if defined(CONFIG_USBFS)
4435 #if HOST_LONG_BITS > 64
4436 #error USBDEVFS thunks do not support >64 bit hosts yet.
4439 uint64_t target_urb_adr
;
4440 uint64_t target_buf_adr
;
4441 char *target_buf_ptr
;
4442 struct usbdevfs_urb host_urb
;
4445 static GHashTable
*usbdevfs_urb_hashtable(void)
4447 static GHashTable
*urb_hashtable
;
4449 if (!urb_hashtable
) {
4450 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4452 return urb_hashtable
;
4455 static void urb_hashtable_insert(struct live_urb
*urb
)
4457 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4458 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4461 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4463 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4464 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4467 static void urb_hashtable_remove(struct live_urb
*urb
)
4469 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4470 g_hash_table_remove(urb_hashtable
, urb
);
4474 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4475 int fd
, int cmd
, abi_long arg
)
4477 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4478 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4479 struct live_urb
*lurb
;
4483 uintptr_t target_urb_adr
;
4486 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4488 memset(buf_temp
, 0, sizeof(uint64_t));
4489 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4490 if (is_error(ret
)) {
4494 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4495 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4496 if (!lurb
->target_urb_adr
) {
4497 return -TARGET_EFAULT
;
4499 urb_hashtable_remove(lurb
);
4500 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4501 lurb
->host_urb
.buffer_length
);
4502 lurb
->target_buf_ptr
= NULL
;
4504 /* restore the guest buffer pointer */
4505 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4507 /* update the guest urb struct */
4508 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4511 return -TARGET_EFAULT
;
4513 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4514 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4516 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4517 /* write back the urb handle */
4518 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4521 return -TARGET_EFAULT
;
4524 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4525 target_urb_adr
= lurb
->target_urb_adr
;
4526 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
4527 unlock_user(argptr
, arg
, target_size
);
4534 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
4535 uint8_t *buf_temp
__attribute__((unused
)),
4536 int fd
, int cmd
, abi_long arg
)
4538 struct live_urb
*lurb
;
4540 /* map target address back to host URB with metadata. */
4541 lurb
= urb_hashtable_lookup(arg
);
4543 return -TARGET_EFAULT
;
4545 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4549 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4550 int fd
, int cmd
, abi_long arg
)
4552 const argtype
*arg_type
= ie
->arg_type
;
4557 struct live_urb
*lurb
;
4560 * each submitted URB needs to map to a unique ID for the
4561 * kernel, and that unique ID needs to be a pointer to
4562 * host memory. hence, we need to malloc for each URB.
4563 * isochronous transfers have a variable length struct.
4566 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
4568 /* construct host copy of urb and metadata */
4569 lurb
= g_try_malloc0(sizeof(struct live_urb
));
4571 return -TARGET_ENOMEM
;
4574 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4577 return -TARGET_EFAULT
;
4579 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
4580 unlock_user(argptr
, arg
, 0);
4582 lurb
->target_urb_adr
= arg
;
4583 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
4585 /* buffer space used depends on endpoint type so lock the entire buffer */
4586 /* control type urbs should check the buffer contents for true direction */
4587 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
4588 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
4589 lurb
->host_urb
.buffer_length
, 1);
4590 if (lurb
->target_buf_ptr
== NULL
) {
4592 return -TARGET_EFAULT
;
4595 /* update buffer pointer in host copy */
4596 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
4598 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4599 if (is_error(ret
)) {
4600 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
4603 urb_hashtable_insert(lurb
);
4608 #endif /* CONFIG_USBFS */
4610 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4611 int cmd
, abi_long arg
)
4614 struct dm_ioctl
*host_dm
;
4615 abi_long guest_data
;
4616 uint32_t guest_data_size
;
4618 const argtype
*arg_type
= ie
->arg_type
;
4620 void *big_buf
= NULL
;
4624 target_size
= thunk_type_size(arg_type
, 0);
4625 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4627 ret
= -TARGET_EFAULT
;
4630 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4631 unlock_user(argptr
, arg
, 0);
4633 /* buf_temp is too small, so fetch things into a bigger buffer */
4634 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4635 memcpy(big_buf
, buf_temp
, target_size
);
4639 guest_data
= arg
+ host_dm
->data_start
;
4640 if ((guest_data
- arg
) < 0) {
4641 ret
= -TARGET_EINVAL
;
4644 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4645 host_data
= (char*)host_dm
+ host_dm
->data_start
;
4647 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
4649 ret
= -TARGET_EFAULT
;
4653 switch (ie
->host_cmd
) {
4655 case DM_LIST_DEVICES
:
4658 case DM_DEV_SUSPEND
:
4661 case DM_TABLE_STATUS
:
4662 case DM_TABLE_CLEAR
:
4664 case DM_LIST_VERSIONS
:
4668 case DM_DEV_SET_GEOMETRY
:
4669 /* data contains only strings */
4670 memcpy(host_data
, argptr
, guest_data_size
);
4673 memcpy(host_data
, argptr
, guest_data_size
);
4674 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
4678 void *gspec
= argptr
;
4679 void *cur_data
= host_data
;
4680 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4681 int spec_size
= thunk_type_size(arg_type
, 0);
4684 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4685 struct dm_target_spec
*spec
= cur_data
;
4689 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
4690 slen
= strlen((char*)gspec
+ spec_size
) + 1;
4692 spec
->next
= sizeof(*spec
) + slen
;
4693 strcpy((char*)&spec
[1], gspec
+ spec_size
);
4695 cur_data
+= spec
->next
;
4700 ret
= -TARGET_EINVAL
;
4701 unlock_user(argptr
, guest_data
, 0);
4704 unlock_user(argptr
, guest_data
, 0);
4706 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4707 if (!is_error(ret
)) {
4708 guest_data
= arg
+ host_dm
->data_start
;
4709 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
4710 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
4711 switch (ie
->host_cmd
) {
4716 case DM_DEV_SUSPEND
:
4719 case DM_TABLE_CLEAR
:
4721 case DM_DEV_SET_GEOMETRY
:
4722 /* no return data */
4724 case DM_LIST_DEVICES
:
4726 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
4727 uint32_t remaining_data
= guest_data_size
;
4728 void *cur_data
= argptr
;
4729 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
4730 int nl_size
= 12; /* can't use thunk_size due to alignment */
4733 uint32_t next
= nl
->next
;
4735 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
4737 if (remaining_data
< nl
->next
) {
4738 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4741 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
4742 strcpy(cur_data
+ nl_size
, nl
->name
);
4743 cur_data
+= nl
->next
;
4744 remaining_data
-= nl
->next
;
4748 nl
= (void*)nl
+ next
;
4753 case DM_TABLE_STATUS
:
4755 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
4756 void *cur_data
= argptr
;
4757 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
4758 int spec_size
= thunk_type_size(arg_type
, 0);
4761 for (i
= 0; i
< host_dm
->target_count
; i
++) {
4762 uint32_t next
= spec
->next
;
4763 int slen
= strlen((char*)&spec
[1]) + 1;
4764 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
4765 if (guest_data_size
< spec
->next
) {
4766 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4769 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
4770 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
4771 cur_data
= argptr
+ spec
->next
;
4772 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
4778 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
4779 int count
= *(uint32_t*)hdata
;
4780 uint64_t *hdev
= hdata
+ 8;
4781 uint64_t *gdev
= argptr
+ 8;
4784 *(uint32_t*)argptr
= tswap32(count
);
4785 for (i
= 0; i
< count
; i
++) {
4786 *gdev
= tswap64(*hdev
);
4792 case DM_LIST_VERSIONS
:
4794 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
4795 uint32_t remaining_data
= guest_data_size
;
4796 void *cur_data
= argptr
;
4797 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
4798 int vers_size
= thunk_type_size(arg_type
, 0);
4801 uint32_t next
= vers
->next
;
4803 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
4805 if (remaining_data
< vers
->next
) {
4806 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
4809 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
4810 strcpy(cur_data
+ vers_size
, vers
->name
);
4811 cur_data
+= vers
->next
;
4812 remaining_data
-= vers
->next
;
4816 vers
= (void*)vers
+ next
;
4821 unlock_user(argptr
, guest_data
, 0);
4822 ret
= -TARGET_EINVAL
;
4825 unlock_user(argptr
, guest_data
, guest_data_size
);
4827 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4829 ret
= -TARGET_EFAULT
;
4832 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
4833 unlock_user(argptr
, arg
, target_size
);
4840 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4841 int cmd
, abi_long arg
)
4845 const argtype
*arg_type
= ie
->arg_type
;
4846 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
4849 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
4850 struct blkpg_partition host_part
;
4852 /* Read and convert blkpg */
4854 target_size
= thunk_type_size(arg_type
, 0);
4855 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4857 ret
= -TARGET_EFAULT
;
4860 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4861 unlock_user(argptr
, arg
, 0);
4863 switch (host_blkpg
->op
) {
4864 case BLKPG_ADD_PARTITION
:
4865 case BLKPG_DEL_PARTITION
:
4866 /* payload is struct blkpg_partition */
4869 /* Unknown opcode */
4870 ret
= -TARGET_EINVAL
;
4874 /* Read and convert blkpg->data */
4875 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
4876 target_size
= thunk_type_size(part_arg_type
, 0);
4877 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4879 ret
= -TARGET_EFAULT
;
4882 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
4883 unlock_user(argptr
, arg
, 0);
4885 /* Swizzle the data pointer to our local copy and call! */
4886 host_blkpg
->data
= &host_part
;
4887 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
4893 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4894 int fd
, int cmd
, abi_long arg
)
4896 const argtype
*arg_type
= ie
->arg_type
;
4897 const StructEntry
*se
;
4898 const argtype
*field_types
;
4899 const int *dst_offsets
, *src_offsets
;
4902 abi_ulong
*target_rt_dev_ptr
= NULL
;
4903 unsigned long *host_rt_dev_ptr
= NULL
;
4907 assert(ie
->access
== IOC_W
);
4908 assert(*arg_type
== TYPE_PTR
);
4910 assert(*arg_type
== TYPE_STRUCT
);
4911 target_size
= thunk_type_size(arg_type
, 0);
4912 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4914 return -TARGET_EFAULT
;
4917 assert(*arg_type
== (int)STRUCT_rtentry
);
4918 se
= struct_entries
+ *arg_type
++;
4919 assert(se
->convert
[0] == NULL
);
4920 /* convert struct here to be able to catch rt_dev string */
4921 field_types
= se
->field_types
;
4922 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
4923 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
4924 for (i
= 0; i
< se
->nb_fields
; i
++) {
4925 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
4926 assert(*field_types
== TYPE_PTRVOID
);
4927 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
4928 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
4929 if (*target_rt_dev_ptr
!= 0) {
4930 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
4931 tswapal(*target_rt_dev_ptr
));
4932 if (!*host_rt_dev_ptr
) {
4933 unlock_user(argptr
, arg
, 0);
4934 return -TARGET_EFAULT
;
4937 *host_rt_dev_ptr
= 0;
4942 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
4943 argptr
+ src_offsets
[i
],
4944 field_types
, THUNK_HOST
);
4946 unlock_user(argptr
, arg
, 0);
4948 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4950 assert(host_rt_dev_ptr
!= NULL
);
4951 assert(target_rt_dev_ptr
!= NULL
);
4952 if (*host_rt_dev_ptr
!= 0) {
4953 unlock_user((void *)*host_rt_dev_ptr
,
4954 *target_rt_dev_ptr
, 0);
4959 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4960 int fd
, int cmd
, abi_long arg
)
4962 int sig
= target_to_host_signal(arg
);
4963 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
4966 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4967 int fd
, int cmd
, abi_long arg
)
4972 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
4973 if (is_error(ret
)) {
4977 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
4978 if (copy_to_user_timeval(arg
, &tv
)) {
4979 return -TARGET_EFAULT
;
4982 if (copy_to_user_timeval64(arg
, &tv
)) {
4983 return -TARGET_EFAULT
;
4990 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4991 int fd
, int cmd
, abi_long arg
)
4996 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
4997 if (is_error(ret
)) {
5001 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5002 if (host_to_target_timespec(arg
, &ts
)) {
5003 return -TARGET_EFAULT
;
5006 if (host_to_target_timespec64(arg
, &ts
)) {
5007 return -TARGET_EFAULT
;
5015 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5016 int fd
, int cmd
, abi_long arg
)
5018 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5019 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5023 static IOCTLEntry ioctl_entries
[] = {
5024 #define IOCTL(cmd, access, ...) \
5025 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5026 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5027 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5028 #define IOCTL_IGNORE(cmd) \
5029 { TARGET_ ## cmd, 0, #cmd },
5034 /* ??? Implement proper locking for ioctls. */
5035 /* do_ioctl() Must return target values and target errnos. */
5036 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5038 const IOCTLEntry
*ie
;
5039 const argtype
*arg_type
;
5041 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5047 if (ie
->target_cmd
== 0) {
5048 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5049 return -TARGET_ENOSYS
;
5051 if (ie
->target_cmd
== cmd
)
5055 arg_type
= ie
->arg_type
;
5057 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5058 } else if (!ie
->host_cmd
) {
5059 /* Some architectures define BSD ioctls in their headers
5060 that are not implemented in Linux. */
5061 return -TARGET_ENOSYS
;
5064 switch(arg_type
[0]) {
5067 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5071 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5075 target_size
= thunk_type_size(arg_type
, 0);
5076 switch(ie
->access
) {
5078 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5079 if (!is_error(ret
)) {
5080 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5082 return -TARGET_EFAULT
;
5083 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5084 unlock_user(argptr
, arg
, target_size
);
5088 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5090 return -TARGET_EFAULT
;
5091 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5092 unlock_user(argptr
, arg
, 0);
5093 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5097 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5099 return -TARGET_EFAULT
;
5100 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5101 unlock_user(argptr
, arg
, 0);
5102 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5103 if (!is_error(ret
)) {
5104 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5106 return -TARGET_EFAULT
;
5107 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5108 unlock_user(argptr
, arg
, target_size
);
5114 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5115 (long)cmd
, arg_type
[0]);
5116 ret
= -TARGET_ENOSYS
;
5122 static const bitmask_transtbl iflag_tbl
[] = {
5123 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5124 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5125 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5126 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5127 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5128 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5129 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5130 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5131 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5132 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5133 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5134 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5135 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5136 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5140 static const bitmask_transtbl oflag_tbl
[] = {
5141 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5142 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5143 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5144 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5145 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5146 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5147 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5148 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5149 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5150 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5151 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5152 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5153 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5154 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5155 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5156 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5157 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5158 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5159 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5160 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5161 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5162 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5163 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5164 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5168 static const bitmask_transtbl cflag_tbl
[] = {
5169 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5170 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5171 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5172 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5173 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5174 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5175 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5176 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5177 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5178 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5179 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5180 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5181 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5182 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5183 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5184 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5185 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5186 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5187 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5188 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5189 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5190 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5191 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5192 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5193 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5194 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5195 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5196 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5197 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5198 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5199 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5203 static const bitmask_transtbl lflag_tbl
[] = {
5204 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5205 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5206 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5207 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5208 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5209 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5210 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5211 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5212 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5213 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5214 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5215 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5216 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5217 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5218 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5222 static void target_to_host_termios (void *dst
, const void *src
)
5224 struct host_termios
*host
= dst
;
5225 const struct target_termios
*target
= src
;
5228 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5230 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5232 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5234 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5235 host
->c_line
= target
->c_line
;
5237 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5238 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5239 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5240 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5241 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5242 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5243 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5244 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5245 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5246 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5247 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5248 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5249 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5250 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5251 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5252 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5253 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5254 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5257 static void host_to_target_termios (void *dst
, const void *src
)
5259 struct target_termios
*target
= dst
;
5260 const struct host_termios
*host
= src
;
5263 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5265 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5267 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5269 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5270 target
->c_line
= host
->c_line
;
5272 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5273 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5274 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5275 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5276 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5277 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5278 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5279 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5280 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5281 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5282 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5283 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5284 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5285 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5286 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5287 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5288 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5289 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5292 static const StructEntry struct_termios_def
= {
5293 .convert
= { host_to_target_termios
, target_to_host_termios
},
5294 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5295 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5298 static bitmask_transtbl mmap_flags_tbl
[] = {
5299 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5300 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5301 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5302 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5303 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5304 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5305 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5306 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5307 MAP_DENYWRITE
, MAP_DENYWRITE
},
5308 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5309 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5310 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5311 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5312 MAP_NORESERVE
, MAP_NORESERVE
},
5313 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5314 /* MAP_STACK had been ignored by the kernel for quite some time.
5315 Recognize it for the target insofar as we do not want to pass
5316 it through to the host. */
5317 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5321 #if defined(TARGET_I386)
5323 /* NOTE: there is really one LDT for all the threads */
5324 static uint8_t *ldt_table
;
5326 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5333 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5334 if (size
> bytecount
)
5336 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5338 return -TARGET_EFAULT
;
5339 /* ??? Should this by byteswapped? */
5340 memcpy(p
, ldt_table
, size
);
5341 unlock_user(p
, ptr
, size
);
5345 /* XXX: add locking support */
5346 static abi_long
write_ldt(CPUX86State
*env
,
5347 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5349 struct target_modify_ldt_ldt_s ldt_info
;
5350 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5351 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5352 int seg_not_present
, useable
, lm
;
5353 uint32_t *lp
, entry_1
, entry_2
;
5355 if (bytecount
!= sizeof(ldt_info
))
5356 return -TARGET_EINVAL
;
5357 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5358 return -TARGET_EFAULT
;
5359 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5360 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5361 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5362 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5363 unlock_user_struct(target_ldt_info
, ptr
, 0);
5365 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5366 return -TARGET_EINVAL
;
5367 seg_32bit
= ldt_info
.flags
& 1;
5368 contents
= (ldt_info
.flags
>> 1) & 3;
5369 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5370 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5371 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5372 useable
= (ldt_info
.flags
>> 6) & 1;
5376 lm
= (ldt_info
.flags
>> 7) & 1;
5378 if (contents
== 3) {
5380 return -TARGET_EINVAL
;
5381 if (seg_not_present
== 0)
5382 return -TARGET_EINVAL
;
5384 /* allocate the LDT */
5386 env
->ldt
.base
= target_mmap(0,
5387 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5388 PROT_READ
|PROT_WRITE
,
5389 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5390 if (env
->ldt
.base
== -1)
5391 return -TARGET_ENOMEM
;
5392 memset(g2h(env
->ldt
.base
), 0,
5393 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5394 env
->ldt
.limit
= 0xffff;
5395 ldt_table
= g2h(env
->ldt
.base
);
5398 /* NOTE: same code as Linux kernel */
5399 /* Allow LDTs to be cleared by the user. */
5400 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5403 read_exec_only
== 1 &&
5405 limit_in_pages
== 0 &&
5406 seg_not_present
== 1 &&
5414 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5415 (ldt_info
.limit
& 0x0ffff);
5416 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5417 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5418 (ldt_info
.limit
& 0xf0000) |
5419 ((read_exec_only
^ 1) << 9) |
5421 ((seg_not_present
^ 1) << 15) |
5423 (limit_in_pages
<< 23) |
5427 entry_2
|= (useable
<< 20);
5429 /* Install the new entry ... */
5431 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5432 lp
[0] = tswap32(entry_1
);
5433 lp
[1] = tswap32(entry_2
);
5437 /* specific and weird i386 syscalls */
5438 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5439 unsigned long bytecount
)
5445 ret
= read_ldt(ptr
, bytecount
);
5448 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5451 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5454 ret
= -TARGET_ENOSYS
;
5460 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5461 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5463 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5464 struct target_modify_ldt_ldt_s ldt_info
;
5465 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5466 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5467 int seg_not_present
, useable
, lm
;
5468 uint32_t *lp
, entry_1
, entry_2
;
5471 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5472 if (!target_ldt_info
)
5473 return -TARGET_EFAULT
;
5474 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5475 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5476 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5477 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5478 if (ldt_info
.entry_number
== -1) {
5479 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5480 if (gdt_table
[i
] == 0) {
5481 ldt_info
.entry_number
= i
;
5482 target_ldt_info
->entry_number
= tswap32(i
);
5487 unlock_user_struct(target_ldt_info
, ptr
, 1);
5489 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5490 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5491 return -TARGET_EINVAL
;
5492 seg_32bit
= ldt_info
.flags
& 1;
5493 contents
= (ldt_info
.flags
>> 1) & 3;
5494 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5495 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5496 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5497 useable
= (ldt_info
.flags
>> 6) & 1;
5501 lm
= (ldt_info
.flags
>> 7) & 1;
5504 if (contents
== 3) {
5505 if (seg_not_present
== 0)
5506 return -TARGET_EINVAL
;
5509 /* NOTE: same code as Linux kernel */
5510 /* Allow LDTs to be cleared by the user. */
5511 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5512 if ((contents
== 0 &&
5513 read_exec_only
== 1 &&
5515 limit_in_pages
== 0 &&
5516 seg_not_present
== 1 &&
5524 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5525 (ldt_info
.limit
& 0x0ffff);
5526 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5527 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5528 (ldt_info
.limit
& 0xf0000) |
5529 ((read_exec_only
^ 1) << 9) |
5531 ((seg_not_present
^ 1) << 15) |
5533 (limit_in_pages
<< 23) |
5538 /* Install the new entry ... */
5540 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5541 lp
[0] = tswap32(entry_1
);
5542 lp
[1] = tswap32(entry_2
);
5546 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5548 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5549 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5550 uint32_t base_addr
, limit
, flags
;
5551 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5552 int seg_not_present
, useable
, lm
;
5553 uint32_t *lp
, entry_1
, entry_2
;
5555 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5556 if (!target_ldt_info
)
5557 return -TARGET_EFAULT
;
5558 idx
= tswap32(target_ldt_info
->entry_number
);
5559 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5560 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5561 unlock_user_struct(target_ldt_info
, ptr
, 1);
5562 return -TARGET_EINVAL
;
5564 lp
= (uint32_t *)(gdt_table
+ idx
);
5565 entry_1
= tswap32(lp
[0]);
5566 entry_2
= tswap32(lp
[1]);
5568 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5569 contents
= (entry_2
>> 10) & 3;
5570 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5571 seg_32bit
= (entry_2
>> 22) & 1;
5572 limit_in_pages
= (entry_2
>> 23) & 1;
5573 useable
= (entry_2
>> 20) & 1;
5577 lm
= (entry_2
>> 21) & 1;
5579 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5580 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5581 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5582 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5583 base_addr
= (entry_1
>> 16) |
5584 (entry_2
& 0xff000000) |
5585 ((entry_2
& 0xff) << 16);
5586 target_ldt_info
->base_addr
= tswapal(base_addr
);
5587 target_ldt_info
->limit
= tswap32(limit
);
5588 target_ldt_info
->flags
= tswap32(flags
);
5589 unlock_user_struct(target_ldt_info
, ptr
, 1);
5592 #endif /* TARGET_I386 && TARGET_ABI32 */
5594 #ifndef TARGET_ABI32
5595 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5602 case TARGET_ARCH_SET_GS
:
5603 case TARGET_ARCH_SET_FS
:
5604 if (code
== TARGET_ARCH_SET_GS
)
5608 cpu_x86_load_seg(env
, idx
, 0);
5609 env
->segs
[idx
].base
= addr
;
5611 case TARGET_ARCH_GET_GS
:
5612 case TARGET_ARCH_GET_FS
:
5613 if (code
== TARGET_ARCH_GET_GS
)
5617 val
= env
->segs
[idx
].base
;
5618 if (put_user(val
, addr
, abi_ulong
))
5619 ret
= -TARGET_EFAULT
;
5622 ret
= -TARGET_EINVAL
;
5629 #endif /* defined(TARGET_I386) */
5631 #define NEW_STACK_SIZE 0x40000
5634 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5637 pthread_mutex_t mutex
;
5638 pthread_cond_t cond
;
5641 abi_ulong child_tidptr
;
5642 abi_ulong parent_tidptr
;
5646 static void *clone_func(void *arg
)
5648 new_thread_info
*info
= arg
;
5653 rcu_register_thread();
5654 tcg_register_thread();
5658 ts
= (TaskState
*)cpu
->opaque
;
5659 info
->tid
= sys_gettid();
5661 if (info
->child_tidptr
)
5662 put_user_u32(info
->tid
, info
->child_tidptr
);
5663 if (info
->parent_tidptr
)
5664 put_user_u32(info
->tid
, info
->parent_tidptr
);
5665 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
5666 /* Enable signals. */
5667 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5668 /* Signal to the parent that we're ready. */
5669 pthread_mutex_lock(&info
->mutex
);
5670 pthread_cond_broadcast(&info
->cond
);
5671 pthread_mutex_unlock(&info
->mutex
);
5672 /* Wait until the parent has finished initializing the tls state. */
5673 pthread_mutex_lock(&clone_lock
);
5674 pthread_mutex_unlock(&clone_lock
);
5680 /* do_fork() Must return host values and target errnos (unlike most
5681 do_*() functions). */
5682 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5683 abi_ulong parent_tidptr
, target_ulong newtls
,
5684 abi_ulong child_tidptr
)
5686 CPUState
*cpu
= env_cpu(env
);
5690 CPUArchState
*new_env
;
5693 flags
&= ~CLONE_IGNORED_FLAGS
;
5695 /* Emulate vfork() with fork() */
5696 if (flags
& CLONE_VFORK
)
5697 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5699 if (flags
& CLONE_VM
) {
5700 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5701 new_thread_info info
;
5702 pthread_attr_t attr
;
5704 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
5705 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
5706 return -TARGET_EINVAL
;
5709 ts
= g_new0(TaskState
, 1);
5710 init_task_state(ts
);
5712 /* Grab a mutex so that thread setup appears atomic. */
5713 pthread_mutex_lock(&clone_lock
);
5715 /* we create a new CPU instance. */
5716 new_env
= cpu_copy(env
);
5717 /* Init regs that differ from the parent. */
5718 cpu_clone_regs(new_env
, newsp
);
5719 new_cpu
= env_cpu(new_env
);
5720 new_cpu
->opaque
= ts
;
5721 ts
->bprm
= parent_ts
->bprm
;
5722 ts
->info
= parent_ts
->info
;
5723 ts
->signal_mask
= parent_ts
->signal_mask
;
5725 if (flags
& CLONE_CHILD_CLEARTID
) {
5726 ts
->child_tidptr
= child_tidptr
;
5729 if (flags
& CLONE_SETTLS
) {
5730 cpu_set_tls (new_env
, newtls
);
5733 memset(&info
, 0, sizeof(info
));
5734 pthread_mutex_init(&info
.mutex
, NULL
);
5735 pthread_mutex_lock(&info
.mutex
);
5736 pthread_cond_init(&info
.cond
, NULL
);
5738 if (flags
& CLONE_CHILD_SETTID
) {
5739 info
.child_tidptr
= child_tidptr
;
5741 if (flags
& CLONE_PARENT_SETTID
) {
5742 info
.parent_tidptr
= parent_tidptr
;
5745 ret
= pthread_attr_init(&attr
);
5746 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
5747 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
5748 /* It is not safe to deliver signals until the child has finished
5749 initializing, so temporarily block all signals. */
5750 sigfillset(&sigmask
);
5751 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
5752 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
5754 /* If this is our first additional thread, we need to ensure we
5755 * generate code for parallel execution and flush old translations.
5757 if (!parallel_cpus
) {
5758 parallel_cpus
= true;
5762 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
5763 /* TODO: Free new CPU state if thread creation failed. */
5765 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
5766 pthread_attr_destroy(&attr
);
5768 /* Wait for the child to initialize. */
5769 pthread_cond_wait(&info
.cond
, &info
.mutex
);
5774 pthread_mutex_unlock(&info
.mutex
);
5775 pthread_cond_destroy(&info
.cond
);
5776 pthread_mutex_destroy(&info
.mutex
);
5777 pthread_mutex_unlock(&clone_lock
);
5779 /* if no CLONE_VM, we consider it is a fork */
5780 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
5781 return -TARGET_EINVAL
;
5784 /* We can't support custom termination signals */
5785 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
5786 return -TARGET_EINVAL
;
5789 if (block_signals()) {
5790 return -TARGET_ERESTARTSYS
;
5796 /* Child Process. */
5797 cpu_clone_regs(env
, newsp
);
5799 /* There is a race condition here. The parent process could
5800 theoretically read the TID in the child process before the child
5801 tid is set. This would require using either ptrace
5802 (not implemented) or having *_tidptr to point at a shared memory
5803 mapping. We can't repeat the spinlock hack used above because
5804 the child process gets its own copy of the lock. */
5805 if (flags
& CLONE_CHILD_SETTID
)
5806 put_user_u32(sys_gettid(), child_tidptr
);
5807 if (flags
& CLONE_PARENT_SETTID
)
5808 put_user_u32(sys_gettid(), parent_tidptr
);
5809 ts
= (TaskState
*)cpu
->opaque
;
5810 if (flags
& CLONE_SETTLS
)
5811 cpu_set_tls (env
, newtls
);
5812 if (flags
& CLONE_CHILD_CLEARTID
)
5813 ts
->child_tidptr
= child_tidptr
;
5821 /* warning : doesn't handle linux specific flags... */
5822 static int target_to_host_fcntl_cmd(int cmd
)
5827 case TARGET_F_DUPFD
:
5828 case TARGET_F_GETFD
:
5829 case TARGET_F_SETFD
:
5830 case TARGET_F_GETFL
:
5831 case TARGET_F_SETFL
:
5834 case TARGET_F_GETLK
:
5837 case TARGET_F_SETLK
:
5840 case TARGET_F_SETLKW
:
5843 case TARGET_F_GETOWN
:
5846 case TARGET_F_SETOWN
:
5849 case TARGET_F_GETSIG
:
5852 case TARGET_F_SETSIG
:
5855 #if TARGET_ABI_BITS == 32
5856 case TARGET_F_GETLK64
:
5859 case TARGET_F_SETLK64
:
5862 case TARGET_F_SETLKW64
:
5866 case TARGET_F_SETLEASE
:
5869 case TARGET_F_GETLEASE
:
5872 #ifdef F_DUPFD_CLOEXEC
5873 case TARGET_F_DUPFD_CLOEXEC
:
5874 ret
= F_DUPFD_CLOEXEC
;
5877 case TARGET_F_NOTIFY
:
5881 case TARGET_F_GETOWN_EX
:
5886 case TARGET_F_SETOWN_EX
:
5891 case TARGET_F_SETPIPE_SZ
:
5894 case TARGET_F_GETPIPE_SZ
:
5899 ret
= -TARGET_EINVAL
;
5903 #if defined(__powerpc64__)
5904 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5905 * is not supported by kernel. The glibc fcntl call actually adjusts
5906 * them to 5, 6 and 7 before making the syscall(). Since we make the
5907 * syscall directly, adjust to what is supported by the kernel.
5909 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
5910 ret
-= F_GETLK64
- 5;
5917 #define FLOCK_TRANSTBL \
5919 TRANSTBL_CONVERT(F_RDLCK); \
5920 TRANSTBL_CONVERT(F_WRLCK); \
5921 TRANSTBL_CONVERT(F_UNLCK); \
5922 TRANSTBL_CONVERT(F_EXLCK); \
5923 TRANSTBL_CONVERT(F_SHLCK); \
5926 static int target_to_host_flock(int type
)
5928 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5930 #undef TRANSTBL_CONVERT
5931 return -TARGET_EINVAL
;
5934 static int host_to_target_flock(int type
)
5936 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5938 #undef TRANSTBL_CONVERT
5939 /* if we don't know how to convert the value coming
5940 * from the host we copy to the target field as-is
5945 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
5946 abi_ulong target_flock_addr
)
5948 struct target_flock
*target_fl
;
5951 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
5952 return -TARGET_EFAULT
;
5955 __get_user(l_type
, &target_fl
->l_type
);
5956 l_type
= target_to_host_flock(l_type
);
5960 fl
->l_type
= l_type
;
5961 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
5962 __get_user(fl
->l_start
, &target_fl
->l_start
);
5963 __get_user(fl
->l_len
, &target_fl
->l_len
);
5964 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
5965 unlock_user_struct(target_fl
, target_flock_addr
, 0);
5969 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
5970 const struct flock64
*fl
)
5972 struct target_flock
*target_fl
;
5975 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
5976 return -TARGET_EFAULT
;
5979 l_type
= host_to_target_flock(fl
->l_type
);
5980 __put_user(l_type
, &target_fl
->l_type
);
5981 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
5982 __put_user(fl
->l_start
, &target_fl
->l_start
);
5983 __put_user(fl
->l_len
, &target_fl
->l_len
);
5984 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
5985 unlock_user_struct(target_fl
, target_flock_addr
, 1);
5989 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
5990 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
5992 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5993 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
5994 abi_ulong target_flock_addr
)
5996 struct target_oabi_flock64
*target_fl
;
5999 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6000 return -TARGET_EFAULT
;
6003 __get_user(l_type
, &target_fl
->l_type
);
6004 l_type
= target_to_host_flock(l_type
);
6008 fl
->l_type
= l_type
;
6009 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6010 __get_user(fl
->l_start
, &target_fl
->l_start
);
6011 __get_user(fl
->l_len
, &target_fl
->l_len
);
6012 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6013 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6017 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6018 const struct flock64
*fl
)
6020 struct target_oabi_flock64
*target_fl
;
6023 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6024 return -TARGET_EFAULT
;
6027 l_type
= host_to_target_flock(fl
->l_type
);
6028 __put_user(l_type
, &target_fl
->l_type
);
6029 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6030 __put_user(fl
->l_start
, &target_fl
->l_start
);
6031 __put_user(fl
->l_len
, &target_fl
->l_len
);
6032 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6033 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6038 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6039 abi_ulong target_flock_addr
)
6041 struct target_flock64
*target_fl
;
6044 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6045 return -TARGET_EFAULT
;
6048 __get_user(l_type
, &target_fl
->l_type
);
6049 l_type
= target_to_host_flock(l_type
);
6053 fl
->l_type
= l_type
;
6054 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6055 __get_user(fl
->l_start
, &target_fl
->l_start
);
6056 __get_user(fl
->l_len
, &target_fl
->l_len
);
6057 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6058 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6062 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6063 const struct flock64
*fl
)
6065 struct target_flock64
*target_fl
;
6068 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6069 return -TARGET_EFAULT
;
6072 l_type
= host_to_target_flock(fl
->l_type
);
6073 __put_user(l_type
, &target_fl
->l_type
);
6074 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6075 __put_user(fl
->l_start
, &target_fl
->l_start
);
6076 __put_user(fl
->l_len
, &target_fl
->l_len
);
6077 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6078 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6082 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6084 struct flock64 fl64
;
6086 struct f_owner_ex fox
;
6087 struct target_f_owner_ex
*target_fox
;
6090 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6092 if (host_cmd
== -TARGET_EINVAL
)
6096 case TARGET_F_GETLK
:
6097 ret
= copy_from_user_flock(&fl64
, arg
);
6101 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6103 ret
= copy_to_user_flock(arg
, &fl64
);
6107 case TARGET_F_SETLK
:
6108 case TARGET_F_SETLKW
:
6109 ret
= copy_from_user_flock(&fl64
, arg
);
6113 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6116 case TARGET_F_GETLK64
:
6117 ret
= copy_from_user_flock64(&fl64
, arg
);
6121 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6123 ret
= copy_to_user_flock64(arg
, &fl64
);
6126 case TARGET_F_SETLK64
:
6127 case TARGET_F_SETLKW64
:
6128 ret
= copy_from_user_flock64(&fl64
, arg
);
6132 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6135 case TARGET_F_GETFL
:
6136 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6138 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6142 case TARGET_F_SETFL
:
6143 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6144 target_to_host_bitmask(arg
,
6149 case TARGET_F_GETOWN_EX
:
6150 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6152 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6153 return -TARGET_EFAULT
;
6154 target_fox
->type
= tswap32(fox
.type
);
6155 target_fox
->pid
= tswap32(fox
.pid
);
6156 unlock_user_struct(target_fox
, arg
, 1);
6162 case TARGET_F_SETOWN_EX
:
6163 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6164 return -TARGET_EFAULT
;
6165 fox
.type
= tswap32(target_fox
->type
);
6166 fox
.pid
= tswap32(target_fox
->pid
);
6167 unlock_user_struct(target_fox
, arg
, 0);
6168 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6172 case TARGET_F_SETOWN
:
6173 case TARGET_F_GETOWN
:
6174 case TARGET_F_SETSIG
:
6175 case TARGET_F_GETSIG
:
6176 case TARGET_F_SETLEASE
:
6177 case TARGET_F_GETLEASE
:
6178 case TARGET_F_SETPIPE_SZ
:
6179 case TARGET_F_GETPIPE_SZ
:
6180 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6184 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6192 static inline int high2lowuid(int uid
)
6200 static inline int high2lowgid(int gid
)
6208 static inline int low2highuid(int uid
)
6210 if ((int16_t)uid
== -1)
6216 static inline int low2highgid(int gid
)
6218 if ((int16_t)gid
== -1)
6223 static inline int tswapid(int id
)
6228 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6230 #else /* !USE_UID16 */
6231 static inline int high2lowuid(int uid
)
6235 static inline int high2lowgid(int gid
)
6239 static inline int low2highuid(int uid
)
6243 static inline int low2highgid(int gid
)
6247 static inline int tswapid(int id
)
6252 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6254 #endif /* USE_UID16 */
6256 /* We must do direct syscalls for setting UID/GID, because we want to
6257 * implement the Linux system call semantics of "change only for this thread",
6258 * not the libc/POSIX semantics of "change for all threads in process".
6259 * (See http://ewontfix.com/17/ for more details.)
6260 * We use the 32-bit version of the syscalls if present; if it is not
6261 * then either the host architecture supports 32-bit UIDs natively with
6262 * the standard syscall, or the 16-bit UID is the best we can do.
6264 #ifdef __NR_setuid32
6265 #define __NR_sys_setuid __NR_setuid32
6267 #define __NR_sys_setuid __NR_setuid
6269 #ifdef __NR_setgid32
6270 #define __NR_sys_setgid __NR_setgid32
6272 #define __NR_sys_setgid __NR_setgid
6274 #ifdef __NR_setresuid32
6275 #define __NR_sys_setresuid __NR_setresuid32
6277 #define __NR_sys_setresuid __NR_setresuid
6279 #ifdef __NR_setresgid32
6280 #define __NR_sys_setresgid __NR_setresgid32
6282 #define __NR_sys_setresgid __NR_setresgid
6285 _syscall1(int, sys_setuid
, uid_t
, uid
)
6286 _syscall1(int, sys_setgid
, gid_t
, gid
)
6287 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6288 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6290 void syscall_init(void)
6293 const argtype
*arg_type
;
6297 thunk_init(STRUCT_MAX
);
6299 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6300 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6301 #include "syscall_types.h"
6303 #undef STRUCT_SPECIAL
6305 /* Build target_to_host_errno_table[] table from
6306 * host_to_target_errno_table[]. */
6307 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6308 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6311 /* we patch the ioctl size if necessary. We rely on the fact that
6312 no ioctl has all the bits at '1' in the size field */
6314 while (ie
->target_cmd
!= 0) {
6315 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6316 TARGET_IOC_SIZEMASK
) {
6317 arg_type
= ie
->arg_type
;
6318 if (arg_type
[0] != TYPE_PTR
) {
6319 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6324 size
= thunk_type_size(arg_type
, 0);
6325 ie
->target_cmd
= (ie
->target_cmd
&
6326 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6327 (size
<< TARGET_IOC_SIZESHIFT
);
6330 /* automatic consistency check if same arch */
6331 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6332 (defined(__x86_64__) && defined(TARGET_X86_64))
6333 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6334 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6335 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6342 #if TARGET_ABI_BITS == 32
6343 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6345 #ifdef TARGET_WORDS_BIGENDIAN
6346 return ((uint64_t)word0
<< 32) | word1
;
6348 return ((uint64_t)word1
<< 32) | word0
;
6351 #else /* TARGET_ABI_BITS == 32 */
6352 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6356 #endif /* TARGET_ABI_BITS != 32 */
6358 #ifdef TARGET_NR_truncate64
6359 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6364 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6368 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6372 #ifdef TARGET_NR_ftruncate64
6373 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6378 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6382 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6386 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6387 abi_ulong target_addr
)
6389 struct target_itimerspec
*target_itspec
;
6391 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6392 return -TARGET_EFAULT
;
6395 host_itspec
->it_interval
.tv_sec
=
6396 tswapal(target_itspec
->it_interval
.tv_sec
);
6397 host_itspec
->it_interval
.tv_nsec
=
6398 tswapal(target_itspec
->it_interval
.tv_nsec
);
6399 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6400 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6402 unlock_user_struct(target_itspec
, target_addr
, 1);
6406 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6407 struct itimerspec
*host_its
)
6409 struct target_itimerspec
*target_itspec
;
6411 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6412 return -TARGET_EFAULT
;
6415 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6416 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6418 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6419 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6421 unlock_user_struct(target_itspec
, target_addr
, 0);
6425 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6426 abi_long target_addr
)
6428 struct target_timex
*target_tx
;
6430 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6431 return -TARGET_EFAULT
;
6434 __get_user(host_tx
->modes
, &target_tx
->modes
);
6435 __get_user(host_tx
->offset
, &target_tx
->offset
);
6436 __get_user(host_tx
->freq
, &target_tx
->freq
);
6437 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6438 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6439 __get_user(host_tx
->status
, &target_tx
->status
);
6440 __get_user(host_tx
->constant
, &target_tx
->constant
);
6441 __get_user(host_tx
->precision
, &target_tx
->precision
);
6442 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6443 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6444 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6445 __get_user(host_tx
->tick
, &target_tx
->tick
);
6446 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6447 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6448 __get_user(host_tx
->shift
, &target_tx
->shift
);
6449 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6450 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6451 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6452 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6453 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6454 __get_user(host_tx
->tai
, &target_tx
->tai
);
6456 unlock_user_struct(target_tx
, target_addr
, 0);
6460 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6461 struct timex
*host_tx
)
6463 struct target_timex
*target_tx
;
6465 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6466 return -TARGET_EFAULT
;
6469 __put_user(host_tx
->modes
, &target_tx
->modes
);
6470 __put_user(host_tx
->offset
, &target_tx
->offset
);
6471 __put_user(host_tx
->freq
, &target_tx
->freq
);
6472 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6473 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6474 __put_user(host_tx
->status
, &target_tx
->status
);
6475 __put_user(host_tx
->constant
, &target_tx
->constant
);
6476 __put_user(host_tx
->precision
, &target_tx
->precision
);
6477 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6478 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6479 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6480 __put_user(host_tx
->tick
, &target_tx
->tick
);
6481 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6482 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6483 __put_user(host_tx
->shift
, &target_tx
->shift
);
6484 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6485 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6486 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6487 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6488 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6489 __put_user(host_tx
->tai
, &target_tx
->tai
);
6491 unlock_user_struct(target_tx
, target_addr
, 1);
6496 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6497 abi_ulong target_addr
)
6499 struct target_sigevent
*target_sevp
;
6501 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6502 return -TARGET_EFAULT
;
6505 /* This union is awkward on 64 bit systems because it has a 32 bit
6506 * integer and a pointer in it; we follow the conversion approach
6507 * used for handling sigval types in signal.c so the guest should get
6508 * the correct value back even if we did a 64 bit byteswap and it's
6509 * using the 32 bit integer.
6511 host_sevp
->sigev_value
.sival_ptr
=
6512 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6513 host_sevp
->sigev_signo
=
6514 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6515 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6516 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6518 unlock_user_struct(target_sevp
, target_addr
, 1);
6522 #if defined(TARGET_NR_mlockall)
6523 static inline int target_to_host_mlockall_arg(int arg
)
6527 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6528 result
|= MCL_CURRENT
;
6530 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6531 result
|= MCL_FUTURE
;
6537 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6538 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6539 defined(TARGET_NR_newfstatat))
6540 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6541 abi_ulong target_addr
,
6542 struct stat
*host_st
)
6544 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6545 if (((CPUARMState
*)cpu_env
)->eabi
) {
6546 struct target_eabi_stat64
*target_st
;
6548 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6549 return -TARGET_EFAULT
;
6550 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6551 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6552 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6553 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6554 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6556 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6557 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6558 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6559 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6560 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6561 __put_user(host_st
->st_size
, &target_st
->st_size
);
6562 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6563 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6564 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6565 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6566 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6567 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6568 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6569 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6570 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6572 unlock_user_struct(target_st
, target_addr
, 1);
6576 #if defined(TARGET_HAS_STRUCT_STAT64)
6577 struct target_stat64
*target_st
;
6579 struct target_stat
*target_st
;
6582 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6583 return -TARGET_EFAULT
;
6584 memset(target_st
, 0, sizeof(*target_st
));
6585 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6586 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6587 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6588 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6590 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6591 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6592 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6593 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6594 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6595 /* XXX: better use of kernel struct */
6596 __put_user(host_st
->st_size
, &target_st
->st_size
);
6597 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6598 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6599 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6600 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6601 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6602 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6603 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
6604 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
6605 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
6607 unlock_user_struct(target_st
, target_addr
, 1);
6614 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6615 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
6616 abi_ulong target_addr
)
6618 struct target_statx
*target_stx
;
6620 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
6621 return -TARGET_EFAULT
;
6623 memset(target_stx
, 0, sizeof(*target_stx
));
6625 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
6626 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
6627 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
6628 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
6629 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
6630 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
6631 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
6632 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
6633 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
6634 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
6635 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
6636 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
6637 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
6638 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
6639 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
6640 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
6641 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
6642 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
6643 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
6644 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
6645 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
6646 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
6647 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
6649 unlock_user_struct(target_stx
, target_addr
, 1);
6656 /* ??? Using host futex calls even when target atomic operations
6657 are not really atomic probably breaks things. However implementing
6658 futexes locally would make futexes shared between multiple processes
6659 tricky. However they're probably useless because guest atomic
6660 operations won't work either. */
6661 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6662 target_ulong uaddr2
, int val3
)
6664 struct timespec ts
, *pts
;
6667 /* ??? We assume FUTEX_* constants are the same on both host
6669 #ifdef FUTEX_CMD_MASK
6670 base_op
= op
& FUTEX_CMD_MASK
;
6676 case FUTEX_WAIT_BITSET
:
6679 target_to_host_timespec(pts
, timeout
);
6683 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6686 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6688 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6690 case FUTEX_CMP_REQUEUE
:
6692 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6693 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6694 But the prototype takes a `struct timespec *'; insert casts
6695 to satisfy the compiler. We do not need to tswap TIMEOUT
6696 since it's not compared to guest memory. */
6697 pts
= (struct timespec
*)(uintptr_t) timeout
;
6698 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6700 (base_op
== FUTEX_CMP_REQUEUE
6704 return -TARGET_ENOSYS
;
6707 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6708 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6709 abi_long handle
, abi_long mount_id
,
6712 struct file_handle
*target_fh
;
6713 struct file_handle
*fh
;
6717 unsigned int size
, total_size
;
6719 if (get_user_s32(size
, handle
)) {
6720 return -TARGET_EFAULT
;
6723 name
= lock_user_string(pathname
);
6725 return -TARGET_EFAULT
;
6728 total_size
= sizeof(struct file_handle
) + size
;
6729 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6731 unlock_user(name
, pathname
, 0);
6732 return -TARGET_EFAULT
;
6735 fh
= g_malloc0(total_size
);
6736 fh
->handle_bytes
= size
;
6738 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6739 unlock_user(name
, pathname
, 0);
6741 /* man name_to_handle_at(2):
6742 * Other than the use of the handle_bytes field, the caller should treat
6743 * the file_handle structure as an opaque data type
6746 memcpy(target_fh
, fh
, total_size
);
6747 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6748 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6750 unlock_user(target_fh
, handle
, total_size
);
6752 if (put_user_s32(mid
, mount_id
)) {
6753 return -TARGET_EFAULT
;
6761 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6762 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6765 struct file_handle
*target_fh
;
6766 struct file_handle
*fh
;
6767 unsigned int size
, total_size
;
6770 if (get_user_s32(size
, handle
)) {
6771 return -TARGET_EFAULT
;
6774 total_size
= sizeof(struct file_handle
) + size
;
6775 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6777 return -TARGET_EFAULT
;
6780 fh
= g_memdup(target_fh
, total_size
);
6781 fh
->handle_bytes
= size
;
6782 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6784 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6785 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6789 unlock_user(target_fh
, handle
, total_size
);
6795 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6797 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6800 target_sigset_t
*target_mask
;
6804 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6805 return -TARGET_EINVAL
;
6807 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6808 return -TARGET_EFAULT
;
6811 target_to_host_sigset(&host_mask
, target_mask
);
6813 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6815 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6817 fd_trans_register(ret
, &target_signalfd_trans
);
6820 unlock_user_struct(target_mask
, mask
, 0);
6826 /* Map host to target signal numbers for the wait family of syscalls.
6827 Assume all other status bits are the same. */
6828 int host_to_target_waitstatus(int status
)
6830 if (WIFSIGNALED(status
)) {
6831 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6833 if (WIFSTOPPED(status
)) {
6834 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6840 static int open_self_cmdline(void *cpu_env
, int fd
)
6842 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
6843 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
6846 for (i
= 0; i
< bprm
->argc
; i
++) {
6847 size_t len
= strlen(bprm
->argv
[i
]) + 1;
6849 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
6857 static int open_self_maps(void *cpu_env
, int fd
)
6859 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
6860 TaskState
*ts
= cpu
->opaque
;
6866 fp
= fopen("/proc/self/maps", "r");
6871 while ((read
= getline(&line
, &len
, fp
)) != -1) {
6872 int fields
, dev_maj
, dev_min
, inode
;
6873 uint64_t min
, max
, offset
;
6874 char flag_r
, flag_w
, flag_x
, flag_p
;
6875 char path
[512] = "";
6876 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
6877 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
6878 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
6880 if ((fields
< 10) || (fields
> 11)) {
6883 if (h2g_valid(min
)) {
6884 int flags
= page_get_flags(h2g(min
));
6885 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
) + 1;
6886 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
6889 if (h2g(min
) == ts
->info
->stack_limit
) {
6890 pstrcpy(path
, sizeof(path
), " [stack]");
6892 dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
6893 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
6894 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
6895 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
6896 path
[0] ? " " : "", path
);
6906 static int open_self_stat(void *cpu_env
, int fd
)
6908 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
6909 TaskState
*ts
= cpu
->opaque
;
6910 abi_ulong start_stack
= ts
->info
->start_stack
;
6913 for (i
= 0; i
< 44; i
++) {
6921 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6922 } else if (i
== 1) {
6924 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
6925 } else if (i
== 27) {
6928 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
6930 /* for the rest, there is MasterCard */
6931 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
6935 if (write(fd
, buf
, len
) != len
) {
6943 static int open_self_auxv(void *cpu_env
, int fd
)
6945 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
6946 TaskState
*ts
= cpu
->opaque
;
6947 abi_ulong auxv
= ts
->info
->saved_auxv
;
6948 abi_ulong len
= ts
->info
->auxv_len
;
6952 * Auxiliary vector is stored in target process stack.
6953 * read in whole auxv vector and copy it to file
6955 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
6959 r
= write(fd
, ptr
, len
);
6966 lseek(fd
, 0, SEEK_SET
);
6967 unlock_user(ptr
, auxv
, len
);
6973 static int is_proc_myself(const char *filename
, const char *entry
)
6975 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
6976 filename
+= strlen("/proc/");
6977 if (!strncmp(filename
, "self/", strlen("self/"))) {
6978 filename
+= strlen("self/");
6979 } else if (*filename
>= '1' && *filename
<= '9') {
6981 snprintf(myself
, sizeof(myself
), "%d/", getpid());
6982 if (!strncmp(filename
, myself
, strlen(myself
))) {
6983 filename
+= strlen(myself
);
6990 if (!strcmp(filename
, entry
)) {
6997 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
6998 defined(TARGET_SPARC) || defined(TARGET_M68K)
6999 static int is_proc(const char *filename
, const char *entry
)
7001 return strcmp(filename
, entry
) == 0;
7005 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7006 static int open_net_route(void *cpu_env
, int fd
)
7013 fp
= fopen("/proc/net/route", "r");
7020 read
= getline(&line
, &len
, fp
);
7021 dprintf(fd
, "%s", line
);
7025 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7027 uint32_t dest
, gw
, mask
;
7028 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7031 fields
= sscanf(line
,
7032 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7033 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7034 &mask
, &mtu
, &window
, &irtt
);
7038 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7039 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7040 metric
, tswap32(mask
), mtu
, window
, irtt
);
7050 #if defined(TARGET_SPARC)
7051 static int open_cpuinfo(void *cpu_env
, int fd
)
7053 dprintf(fd
, "type\t\t: sun4u\n");
7058 #if defined(TARGET_M68K)
7059 static int open_hardware(void *cpu_env
, int fd
)
7061 dprintf(fd
, "Model:\t\tqemu-m68k\n");
7066 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7069 const char *filename
;
7070 int (*fill
)(void *cpu_env
, int fd
);
7071 int (*cmp
)(const char *s1
, const char *s2
);
7073 const struct fake_open
*fake_open
;
7074 static const struct fake_open fakes
[] = {
7075 { "maps", open_self_maps
, is_proc_myself
},
7076 { "stat", open_self_stat
, is_proc_myself
},
7077 { "auxv", open_self_auxv
, is_proc_myself
},
7078 { "cmdline", open_self_cmdline
, is_proc_myself
},
7079 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7080 { "/proc/net/route", open_net_route
, is_proc
},
7082 #if defined(TARGET_SPARC)
7083 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
7085 #if defined(TARGET_M68K)
7086 { "/proc/hardware", open_hardware
, is_proc
},
7088 { NULL
, NULL
, NULL
}
7091 if (is_proc_myself(pathname
, "exe")) {
7092 int execfd
= qemu_getauxval(AT_EXECFD
);
7093 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7096 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7097 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7102 if (fake_open
->filename
) {
7104 char filename
[PATH_MAX
];
7107 /* create temporary file to map stat to */
7108 tmpdir
= getenv("TMPDIR");
7111 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7112 fd
= mkstemp(filename
);
7118 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7124 lseek(fd
, 0, SEEK_SET
);
7129 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7132 #define TIMER_MAGIC 0x0caf0000
7133 #define TIMER_MAGIC_MASK 0xffff0000
7135 /* Convert QEMU provided timer ID back to internal 16bit index format */
7136 static target_timer_t
get_timer_id(abi_long arg
)
7138 target_timer_t timerid
= arg
;
7140 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7141 return -TARGET_EINVAL
;
7146 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7147 return -TARGET_EINVAL
;
7153 static int target_to_host_cpu_mask(unsigned long *host_mask
,
7155 abi_ulong target_addr
,
7158 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7159 unsigned host_bits
= sizeof(*host_mask
) * 8;
7160 abi_ulong
*target_mask
;
7163 assert(host_size
>= target_size
);
7165 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
7167 return -TARGET_EFAULT
;
7169 memset(host_mask
, 0, host_size
);
7171 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7172 unsigned bit
= i
* target_bits
;
7175 __get_user(val
, &target_mask
[i
]);
7176 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7177 if (val
& (1UL << j
)) {
7178 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
7183 unlock_user(target_mask
, target_addr
, 0);
7187 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7189 abi_ulong target_addr
,
7192 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7193 unsigned host_bits
= sizeof(*host_mask
) * 8;
7194 abi_ulong
*target_mask
;
7197 assert(host_size
>= target_size
);
7199 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7201 return -TARGET_EFAULT
;
7204 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7205 unsigned bit
= i
* target_bits
;
7208 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7209 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7213 __put_user(val
, &target_mask
[i
]);
7216 unlock_user(target_mask
, target_addr
, target_size
);
7220 /* This is an internal helper for do_syscall so that it is easier
7221 * to have a single return point, so that actions, such as logging
7222 * of syscall results, can be performed.
7223 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7225 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
7226 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7227 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7230 CPUState
*cpu
= env_cpu(cpu_env
);
7232 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7233 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7234 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7235 || defined(TARGET_NR_statx)
7238 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7239 || defined(TARGET_NR_fstatfs)
7245 case TARGET_NR_exit
:
7246 /* In old applications this may be used to implement _exit(2).
7247 However in threaded applictions it is used for thread termination,
7248 and _exit_group is used for application termination.
7249 Do thread termination if we have more then one thread. */
7251 if (block_signals()) {
7252 return -TARGET_ERESTARTSYS
;
7257 if (CPU_NEXT(first_cpu
)) {
7260 /* Remove the CPU from the list. */
7261 QTAILQ_REMOVE_RCU(&cpus
, cpu
, node
);
7266 if (ts
->child_tidptr
) {
7267 put_user_u32(0, ts
->child_tidptr
);
7268 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7272 object_unref(OBJECT(cpu
));
7274 rcu_unregister_thread();
7279 preexit_cleanup(cpu_env
, arg1
);
7281 return 0; /* avoid warning */
7282 case TARGET_NR_read
:
7283 if (arg2
== 0 && arg3
== 0) {
7284 return get_errno(safe_read(arg1
, 0, 0));
7286 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7287 return -TARGET_EFAULT
;
7288 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7290 fd_trans_host_to_target_data(arg1
)) {
7291 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7293 unlock_user(p
, arg2
, ret
);
7296 case TARGET_NR_write
:
7297 if (arg2
== 0 && arg3
== 0) {
7298 return get_errno(safe_write(arg1
, 0, 0));
7300 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7301 return -TARGET_EFAULT
;
7302 if (fd_trans_target_to_host_data(arg1
)) {
7303 void *copy
= g_malloc(arg3
);
7304 memcpy(copy
, p
, arg3
);
7305 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7307 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7311 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7313 unlock_user(p
, arg2
, 0);
7316 #ifdef TARGET_NR_open
7317 case TARGET_NR_open
:
7318 if (!(p
= lock_user_string(arg1
)))
7319 return -TARGET_EFAULT
;
7320 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7321 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7323 fd_trans_unregister(ret
);
7324 unlock_user(p
, arg1
, 0);
7327 case TARGET_NR_openat
:
7328 if (!(p
= lock_user_string(arg2
)))
7329 return -TARGET_EFAULT
;
7330 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7331 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7333 fd_trans_unregister(ret
);
7334 unlock_user(p
, arg2
, 0);
7336 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7337 case TARGET_NR_name_to_handle_at
:
7338 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7341 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7342 case TARGET_NR_open_by_handle_at
:
7343 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7344 fd_trans_unregister(ret
);
7347 case TARGET_NR_close
:
7348 fd_trans_unregister(arg1
);
7349 return get_errno(close(arg1
));
7352 return do_brk(arg1
);
7353 #ifdef TARGET_NR_fork
7354 case TARGET_NR_fork
:
7355 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7357 #ifdef TARGET_NR_waitpid
7358 case TARGET_NR_waitpid
:
7361 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7362 if (!is_error(ret
) && arg2
&& ret
7363 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7364 return -TARGET_EFAULT
;
7368 #ifdef TARGET_NR_waitid
7369 case TARGET_NR_waitid
:
7373 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7374 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7375 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7376 return -TARGET_EFAULT
;
7377 host_to_target_siginfo(p
, &info
);
7378 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7383 #ifdef TARGET_NR_creat /* not on alpha */
7384 case TARGET_NR_creat
:
7385 if (!(p
= lock_user_string(arg1
)))
7386 return -TARGET_EFAULT
;
7387 ret
= get_errno(creat(p
, arg2
));
7388 fd_trans_unregister(ret
);
7389 unlock_user(p
, arg1
, 0);
7392 #ifdef TARGET_NR_link
7393 case TARGET_NR_link
:
7396 p
= lock_user_string(arg1
);
7397 p2
= lock_user_string(arg2
);
7399 ret
= -TARGET_EFAULT
;
7401 ret
= get_errno(link(p
, p2
));
7402 unlock_user(p2
, arg2
, 0);
7403 unlock_user(p
, arg1
, 0);
7407 #if defined(TARGET_NR_linkat)
7408 case TARGET_NR_linkat
:
7412 return -TARGET_EFAULT
;
7413 p
= lock_user_string(arg2
);
7414 p2
= lock_user_string(arg4
);
7416 ret
= -TARGET_EFAULT
;
7418 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7419 unlock_user(p
, arg2
, 0);
7420 unlock_user(p2
, arg4
, 0);
7424 #ifdef TARGET_NR_unlink
7425 case TARGET_NR_unlink
:
7426 if (!(p
= lock_user_string(arg1
)))
7427 return -TARGET_EFAULT
;
7428 ret
= get_errno(unlink(p
));
7429 unlock_user(p
, arg1
, 0);
7432 #if defined(TARGET_NR_unlinkat)
7433 case TARGET_NR_unlinkat
:
7434 if (!(p
= lock_user_string(arg2
)))
7435 return -TARGET_EFAULT
;
7436 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7437 unlock_user(p
, arg2
, 0);
7440 case TARGET_NR_execve
:
7442 char **argp
, **envp
;
7445 abi_ulong guest_argp
;
7446 abi_ulong guest_envp
;
7453 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7454 if (get_user_ual(addr
, gp
))
7455 return -TARGET_EFAULT
;
7462 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7463 if (get_user_ual(addr
, gp
))
7464 return -TARGET_EFAULT
;
7470 argp
= g_new0(char *, argc
+ 1);
7471 envp
= g_new0(char *, envc
+ 1);
7473 for (gp
= guest_argp
, q
= argp
; gp
;
7474 gp
+= sizeof(abi_ulong
), q
++) {
7475 if (get_user_ual(addr
, gp
))
7479 if (!(*q
= lock_user_string(addr
)))
7481 total_size
+= strlen(*q
) + 1;
7485 for (gp
= guest_envp
, q
= envp
; gp
;
7486 gp
+= sizeof(abi_ulong
), q
++) {
7487 if (get_user_ual(addr
, gp
))
7491 if (!(*q
= lock_user_string(addr
)))
7493 total_size
+= strlen(*q
) + 1;
7497 if (!(p
= lock_user_string(arg1
)))
7499 /* Although execve() is not an interruptible syscall it is
7500 * a special case where we must use the safe_syscall wrapper:
7501 * if we allow a signal to happen before we make the host
7502 * syscall then we will 'lose' it, because at the point of
7503 * execve the process leaves QEMU's control. So we use the
7504 * safe syscall wrapper to ensure that we either take the
7505 * signal as a guest signal, or else it does not happen
7506 * before the execve completes and makes it the other
7507 * program's problem.
7509 ret
= get_errno(safe_execve(p
, argp
, envp
));
7510 unlock_user(p
, arg1
, 0);
7515 ret
= -TARGET_EFAULT
;
7518 for (gp
= guest_argp
, q
= argp
; *q
;
7519 gp
+= sizeof(abi_ulong
), q
++) {
7520 if (get_user_ual(addr
, gp
)
7523 unlock_user(*q
, addr
, 0);
7525 for (gp
= guest_envp
, q
= envp
; *q
;
7526 gp
+= sizeof(abi_ulong
), q
++) {
7527 if (get_user_ual(addr
, gp
)
7530 unlock_user(*q
, addr
, 0);
7537 case TARGET_NR_chdir
:
7538 if (!(p
= lock_user_string(arg1
)))
7539 return -TARGET_EFAULT
;
7540 ret
= get_errno(chdir(p
));
7541 unlock_user(p
, arg1
, 0);
7543 #ifdef TARGET_NR_time
7544 case TARGET_NR_time
:
7547 ret
= get_errno(time(&host_time
));
7550 && put_user_sal(host_time
, arg1
))
7551 return -TARGET_EFAULT
;
7555 #ifdef TARGET_NR_mknod
7556 case TARGET_NR_mknod
:
7557 if (!(p
= lock_user_string(arg1
)))
7558 return -TARGET_EFAULT
;
7559 ret
= get_errno(mknod(p
, arg2
, arg3
));
7560 unlock_user(p
, arg1
, 0);
7563 #if defined(TARGET_NR_mknodat)
7564 case TARGET_NR_mknodat
:
7565 if (!(p
= lock_user_string(arg2
)))
7566 return -TARGET_EFAULT
;
7567 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7568 unlock_user(p
, arg2
, 0);
7571 #ifdef TARGET_NR_chmod
7572 case TARGET_NR_chmod
:
7573 if (!(p
= lock_user_string(arg1
)))
7574 return -TARGET_EFAULT
;
7575 ret
= get_errno(chmod(p
, arg2
));
7576 unlock_user(p
, arg1
, 0);
7579 #ifdef TARGET_NR_lseek
7580 case TARGET_NR_lseek
:
7581 return get_errno(lseek(arg1
, arg2
, arg3
));
7583 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7584 /* Alpha specific */
7585 case TARGET_NR_getxpid
:
7586 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7587 return get_errno(getpid());
7589 #ifdef TARGET_NR_getpid
7590 case TARGET_NR_getpid
:
7591 return get_errno(getpid());
7593 case TARGET_NR_mount
:
7595 /* need to look at the data field */
7599 p
= lock_user_string(arg1
);
7601 return -TARGET_EFAULT
;
7607 p2
= lock_user_string(arg2
);
7610 unlock_user(p
, arg1
, 0);
7612 return -TARGET_EFAULT
;
7616 p3
= lock_user_string(arg3
);
7619 unlock_user(p
, arg1
, 0);
7621 unlock_user(p2
, arg2
, 0);
7622 return -TARGET_EFAULT
;
7628 /* FIXME - arg5 should be locked, but it isn't clear how to
7629 * do that since it's not guaranteed to be a NULL-terminated
7633 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7635 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7637 ret
= get_errno(ret
);
7640 unlock_user(p
, arg1
, 0);
7642 unlock_user(p2
, arg2
, 0);
7644 unlock_user(p3
, arg3
, 0);
7648 #ifdef TARGET_NR_umount
7649 case TARGET_NR_umount
:
7650 if (!(p
= lock_user_string(arg1
)))
7651 return -TARGET_EFAULT
;
7652 ret
= get_errno(umount(p
));
7653 unlock_user(p
, arg1
, 0);
7656 #ifdef TARGET_NR_stime /* not on alpha */
7657 case TARGET_NR_stime
:
7660 if (get_user_sal(host_time
, arg1
))
7661 return -TARGET_EFAULT
;
7662 return get_errno(stime(&host_time
));
7665 #ifdef TARGET_NR_alarm /* not on alpha */
7666 case TARGET_NR_alarm
:
7669 #ifdef TARGET_NR_pause /* not on alpha */
7670 case TARGET_NR_pause
:
7671 if (!block_signals()) {
7672 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7674 return -TARGET_EINTR
;
7676 #ifdef TARGET_NR_utime
7677 case TARGET_NR_utime
:
7679 struct utimbuf tbuf
, *host_tbuf
;
7680 struct target_utimbuf
*target_tbuf
;
7682 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7683 return -TARGET_EFAULT
;
7684 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7685 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7686 unlock_user_struct(target_tbuf
, arg2
, 0);
7691 if (!(p
= lock_user_string(arg1
)))
7692 return -TARGET_EFAULT
;
7693 ret
= get_errno(utime(p
, host_tbuf
));
7694 unlock_user(p
, arg1
, 0);
7698 #ifdef TARGET_NR_utimes
7699 case TARGET_NR_utimes
:
7701 struct timeval
*tvp
, tv
[2];
7703 if (copy_from_user_timeval(&tv
[0], arg2
)
7704 || copy_from_user_timeval(&tv
[1],
7705 arg2
+ sizeof(struct target_timeval
)))
7706 return -TARGET_EFAULT
;
7711 if (!(p
= lock_user_string(arg1
)))
7712 return -TARGET_EFAULT
;
7713 ret
= get_errno(utimes(p
, tvp
));
7714 unlock_user(p
, arg1
, 0);
7718 #if defined(TARGET_NR_futimesat)
7719 case TARGET_NR_futimesat
:
7721 struct timeval
*tvp
, tv
[2];
7723 if (copy_from_user_timeval(&tv
[0], arg3
)
7724 || copy_from_user_timeval(&tv
[1],
7725 arg3
+ sizeof(struct target_timeval
)))
7726 return -TARGET_EFAULT
;
7731 if (!(p
= lock_user_string(arg2
))) {
7732 return -TARGET_EFAULT
;
7734 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7735 unlock_user(p
, arg2
, 0);
7739 #ifdef TARGET_NR_access
7740 case TARGET_NR_access
:
7741 if (!(p
= lock_user_string(arg1
))) {
7742 return -TARGET_EFAULT
;
7744 ret
= get_errno(access(path(p
), arg2
));
7745 unlock_user(p
, arg1
, 0);
7748 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7749 case TARGET_NR_faccessat
:
7750 if (!(p
= lock_user_string(arg2
))) {
7751 return -TARGET_EFAULT
;
7753 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7754 unlock_user(p
, arg2
, 0);
7757 #ifdef TARGET_NR_nice /* not on alpha */
7758 case TARGET_NR_nice
:
7759 return get_errno(nice(arg1
));
7761 case TARGET_NR_sync
:
7764 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7765 case TARGET_NR_syncfs
:
7766 return get_errno(syncfs(arg1
));
7768 case TARGET_NR_kill
:
7769 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7770 #ifdef TARGET_NR_rename
7771 case TARGET_NR_rename
:
7774 p
= lock_user_string(arg1
);
7775 p2
= lock_user_string(arg2
);
7777 ret
= -TARGET_EFAULT
;
7779 ret
= get_errno(rename(p
, p2
));
7780 unlock_user(p2
, arg2
, 0);
7781 unlock_user(p
, arg1
, 0);
7785 #if defined(TARGET_NR_renameat)
7786 case TARGET_NR_renameat
:
7789 p
= lock_user_string(arg2
);
7790 p2
= lock_user_string(arg4
);
7792 ret
= -TARGET_EFAULT
;
7794 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7795 unlock_user(p2
, arg4
, 0);
7796 unlock_user(p
, arg2
, 0);
7800 #if defined(TARGET_NR_renameat2)
7801 case TARGET_NR_renameat2
:
7804 p
= lock_user_string(arg2
);
7805 p2
= lock_user_string(arg4
);
7807 ret
= -TARGET_EFAULT
;
7809 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
7811 unlock_user(p2
, arg4
, 0);
7812 unlock_user(p
, arg2
, 0);
7816 #ifdef TARGET_NR_mkdir
7817 case TARGET_NR_mkdir
:
7818 if (!(p
= lock_user_string(arg1
)))
7819 return -TARGET_EFAULT
;
7820 ret
= get_errno(mkdir(p
, arg2
));
7821 unlock_user(p
, arg1
, 0);
7824 #if defined(TARGET_NR_mkdirat)
7825 case TARGET_NR_mkdirat
:
7826 if (!(p
= lock_user_string(arg2
)))
7827 return -TARGET_EFAULT
;
7828 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7829 unlock_user(p
, arg2
, 0);
7832 #ifdef TARGET_NR_rmdir
7833 case TARGET_NR_rmdir
:
7834 if (!(p
= lock_user_string(arg1
)))
7835 return -TARGET_EFAULT
;
7836 ret
= get_errno(rmdir(p
));
7837 unlock_user(p
, arg1
, 0);
7841 ret
= get_errno(dup(arg1
));
7843 fd_trans_dup(arg1
, ret
);
7846 #ifdef TARGET_NR_pipe
7847 case TARGET_NR_pipe
:
7848 return do_pipe(cpu_env
, arg1
, 0, 0);
7850 #ifdef TARGET_NR_pipe2
7851 case TARGET_NR_pipe2
:
7852 return do_pipe(cpu_env
, arg1
,
7853 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7855 case TARGET_NR_times
:
7857 struct target_tms
*tmsp
;
7859 ret
= get_errno(times(&tms
));
7861 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7863 return -TARGET_EFAULT
;
7864 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7865 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7866 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7867 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7870 ret
= host_to_target_clock_t(ret
);
7873 case TARGET_NR_acct
:
7875 ret
= get_errno(acct(NULL
));
7877 if (!(p
= lock_user_string(arg1
))) {
7878 return -TARGET_EFAULT
;
7880 ret
= get_errno(acct(path(p
)));
7881 unlock_user(p
, arg1
, 0);
7884 #ifdef TARGET_NR_umount2
7885 case TARGET_NR_umount2
:
7886 if (!(p
= lock_user_string(arg1
)))
7887 return -TARGET_EFAULT
;
7888 ret
= get_errno(umount2(p
, arg2
));
7889 unlock_user(p
, arg1
, 0);
7892 case TARGET_NR_ioctl
:
7893 return do_ioctl(arg1
, arg2
, arg3
);
7894 #ifdef TARGET_NR_fcntl
7895 case TARGET_NR_fcntl
:
7896 return do_fcntl(arg1
, arg2
, arg3
);
7898 case TARGET_NR_setpgid
:
7899 return get_errno(setpgid(arg1
, arg2
));
7900 case TARGET_NR_umask
:
7901 return get_errno(umask(arg1
));
7902 case TARGET_NR_chroot
:
7903 if (!(p
= lock_user_string(arg1
)))
7904 return -TARGET_EFAULT
;
7905 ret
= get_errno(chroot(p
));
7906 unlock_user(p
, arg1
, 0);
7908 #ifdef TARGET_NR_dup2
7909 case TARGET_NR_dup2
:
7910 ret
= get_errno(dup2(arg1
, arg2
));
7912 fd_trans_dup(arg1
, arg2
);
7916 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7917 case TARGET_NR_dup3
:
7921 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
7924 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
7925 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
7927 fd_trans_dup(arg1
, arg2
);
7932 #ifdef TARGET_NR_getppid /* not on alpha */
7933 case TARGET_NR_getppid
:
7934 return get_errno(getppid());
7936 #ifdef TARGET_NR_getpgrp
7937 case TARGET_NR_getpgrp
:
7938 return get_errno(getpgrp());
7940 case TARGET_NR_setsid
:
7941 return get_errno(setsid());
7942 #ifdef TARGET_NR_sigaction
7943 case TARGET_NR_sigaction
:
7945 #if defined(TARGET_ALPHA)
7946 struct target_sigaction act
, oact
, *pact
= 0;
7947 struct target_old_sigaction
*old_act
;
7949 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7950 return -TARGET_EFAULT
;
7951 act
._sa_handler
= old_act
->_sa_handler
;
7952 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
7953 act
.sa_flags
= old_act
->sa_flags
;
7954 act
.sa_restorer
= 0;
7955 unlock_user_struct(old_act
, arg2
, 0);
7958 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7959 if (!is_error(ret
) && arg3
) {
7960 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7961 return -TARGET_EFAULT
;
7962 old_act
->_sa_handler
= oact
._sa_handler
;
7963 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
7964 old_act
->sa_flags
= oact
.sa_flags
;
7965 unlock_user_struct(old_act
, arg3
, 1);
7967 #elif defined(TARGET_MIPS)
7968 struct target_sigaction act
, oact
, *pact
, *old_act
;
7971 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
7972 return -TARGET_EFAULT
;
7973 act
._sa_handler
= old_act
->_sa_handler
;
7974 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
7975 act
.sa_flags
= old_act
->sa_flags
;
7976 unlock_user_struct(old_act
, arg2
, 0);
7982 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
7984 if (!is_error(ret
) && arg3
) {
7985 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
7986 return -TARGET_EFAULT
;
7987 old_act
->_sa_handler
= oact
._sa_handler
;
7988 old_act
->sa_flags
= oact
.sa_flags
;
7989 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
7990 old_act
->sa_mask
.sig
[1] = 0;
7991 old_act
->sa_mask
.sig
[2] = 0;
7992 old_act
->sa_mask
.sig
[3] = 0;
7993 unlock_user_struct(old_act
, arg3
, 1);
7996 struct target_old_sigaction
*old_act
;
7997 struct target_sigaction act
, oact
, *pact
;
7999 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8000 return -TARGET_EFAULT
;
8001 act
._sa_handler
= old_act
->_sa_handler
;
8002 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8003 act
.sa_flags
= old_act
->sa_flags
;
8004 act
.sa_restorer
= old_act
->sa_restorer
;
8005 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8006 act
.ka_restorer
= 0;
8008 unlock_user_struct(old_act
, arg2
, 0);
8013 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8014 if (!is_error(ret
) && arg3
) {
8015 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8016 return -TARGET_EFAULT
;
8017 old_act
->_sa_handler
= oact
._sa_handler
;
8018 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8019 old_act
->sa_flags
= oact
.sa_flags
;
8020 old_act
->sa_restorer
= oact
.sa_restorer
;
8021 unlock_user_struct(old_act
, arg3
, 1);
8027 case TARGET_NR_rt_sigaction
:
8029 #if defined(TARGET_ALPHA)
8030 /* For Alpha and SPARC this is a 5 argument syscall, with
8031 * a 'restorer' parameter which must be copied into the
8032 * sa_restorer field of the sigaction struct.
8033 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8034 * and arg5 is the sigsetsize.
8035 * Alpha also has a separate rt_sigaction struct that it uses
8036 * here; SPARC uses the usual sigaction struct.
8038 struct target_rt_sigaction
*rt_act
;
8039 struct target_sigaction act
, oact
, *pact
= 0;
8041 if (arg4
!= sizeof(target_sigset_t
)) {
8042 return -TARGET_EINVAL
;
8045 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8046 return -TARGET_EFAULT
;
8047 act
._sa_handler
= rt_act
->_sa_handler
;
8048 act
.sa_mask
= rt_act
->sa_mask
;
8049 act
.sa_flags
= rt_act
->sa_flags
;
8050 act
.sa_restorer
= arg5
;
8051 unlock_user_struct(rt_act
, arg2
, 0);
8054 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8055 if (!is_error(ret
) && arg3
) {
8056 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8057 return -TARGET_EFAULT
;
8058 rt_act
->_sa_handler
= oact
._sa_handler
;
8059 rt_act
->sa_mask
= oact
.sa_mask
;
8060 rt_act
->sa_flags
= oact
.sa_flags
;
8061 unlock_user_struct(rt_act
, arg3
, 1);
8065 target_ulong restorer
= arg4
;
8066 target_ulong sigsetsize
= arg5
;
8068 target_ulong sigsetsize
= arg4
;
8070 struct target_sigaction
*act
;
8071 struct target_sigaction
*oact
;
8073 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8074 return -TARGET_EINVAL
;
8077 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8078 return -TARGET_EFAULT
;
8080 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8081 act
->ka_restorer
= restorer
;
8087 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8088 ret
= -TARGET_EFAULT
;
8089 goto rt_sigaction_fail
;
8093 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8096 unlock_user_struct(act
, arg2
, 0);
8098 unlock_user_struct(oact
, arg3
, 1);
8102 #ifdef TARGET_NR_sgetmask /* not on alpha */
8103 case TARGET_NR_sgetmask
:
8106 abi_ulong target_set
;
8107 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8109 host_to_target_old_sigset(&target_set
, &cur_set
);
8115 #ifdef TARGET_NR_ssetmask /* not on alpha */
8116 case TARGET_NR_ssetmask
:
8119 abi_ulong target_set
= arg1
;
8120 target_to_host_old_sigset(&set
, &target_set
);
8121 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8123 host_to_target_old_sigset(&target_set
, &oset
);
8129 #ifdef TARGET_NR_sigprocmask
8130 case TARGET_NR_sigprocmask
:
8132 #if defined(TARGET_ALPHA)
8133 sigset_t set
, oldset
;
8138 case TARGET_SIG_BLOCK
:
8141 case TARGET_SIG_UNBLOCK
:
8144 case TARGET_SIG_SETMASK
:
8148 return -TARGET_EINVAL
;
8151 target_to_host_old_sigset(&set
, &mask
);
8153 ret
= do_sigprocmask(how
, &set
, &oldset
);
8154 if (!is_error(ret
)) {
8155 host_to_target_old_sigset(&mask
, &oldset
);
8157 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8160 sigset_t set
, oldset
, *set_ptr
;
8165 case TARGET_SIG_BLOCK
:
8168 case TARGET_SIG_UNBLOCK
:
8171 case TARGET_SIG_SETMASK
:
8175 return -TARGET_EINVAL
;
8177 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8178 return -TARGET_EFAULT
;
8179 target_to_host_old_sigset(&set
, p
);
8180 unlock_user(p
, arg2
, 0);
8186 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8187 if (!is_error(ret
) && arg3
) {
8188 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8189 return -TARGET_EFAULT
;
8190 host_to_target_old_sigset(p
, &oldset
);
8191 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8197 case TARGET_NR_rt_sigprocmask
:
8200 sigset_t set
, oldset
, *set_ptr
;
8202 if (arg4
!= sizeof(target_sigset_t
)) {
8203 return -TARGET_EINVAL
;
8208 case TARGET_SIG_BLOCK
:
8211 case TARGET_SIG_UNBLOCK
:
8214 case TARGET_SIG_SETMASK
:
8218 return -TARGET_EINVAL
;
8220 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8221 return -TARGET_EFAULT
;
8222 target_to_host_sigset(&set
, p
);
8223 unlock_user(p
, arg2
, 0);
8229 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8230 if (!is_error(ret
) && arg3
) {
8231 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8232 return -TARGET_EFAULT
;
8233 host_to_target_sigset(p
, &oldset
);
8234 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8238 #ifdef TARGET_NR_sigpending
8239 case TARGET_NR_sigpending
:
8242 ret
= get_errno(sigpending(&set
));
8243 if (!is_error(ret
)) {
8244 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8245 return -TARGET_EFAULT
;
8246 host_to_target_old_sigset(p
, &set
);
8247 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8252 case TARGET_NR_rt_sigpending
:
8256 /* Yes, this check is >, not != like most. We follow the kernel's
8257 * logic and it does it like this because it implements
8258 * NR_sigpending through the same code path, and in that case
8259 * the old_sigset_t is smaller in size.
8261 if (arg2
> sizeof(target_sigset_t
)) {
8262 return -TARGET_EINVAL
;
8265 ret
= get_errno(sigpending(&set
));
8266 if (!is_error(ret
)) {
8267 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8268 return -TARGET_EFAULT
;
8269 host_to_target_sigset(p
, &set
);
8270 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8274 #ifdef TARGET_NR_sigsuspend
8275 case TARGET_NR_sigsuspend
:
8277 TaskState
*ts
= cpu
->opaque
;
8278 #if defined(TARGET_ALPHA)
8279 abi_ulong mask
= arg1
;
8280 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8282 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8283 return -TARGET_EFAULT
;
8284 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8285 unlock_user(p
, arg1
, 0);
8287 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8289 if (ret
!= -TARGET_ERESTARTSYS
) {
8290 ts
->in_sigsuspend
= 1;
8295 case TARGET_NR_rt_sigsuspend
:
8297 TaskState
*ts
= cpu
->opaque
;
8299 if (arg2
!= sizeof(target_sigset_t
)) {
8300 return -TARGET_EINVAL
;
8302 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8303 return -TARGET_EFAULT
;
8304 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8305 unlock_user(p
, arg1
, 0);
8306 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8308 if (ret
!= -TARGET_ERESTARTSYS
) {
8309 ts
->in_sigsuspend
= 1;
8313 case TARGET_NR_rt_sigtimedwait
:
8316 struct timespec uts
, *puts
;
8319 if (arg4
!= sizeof(target_sigset_t
)) {
8320 return -TARGET_EINVAL
;
8323 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8324 return -TARGET_EFAULT
;
8325 target_to_host_sigset(&set
, p
);
8326 unlock_user(p
, arg1
, 0);
8329 target_to_host_timespec(puts
, arg3
);
8333 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8335 if (!is_error(ret
)) {
8337 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8340 return -TARGET_EFAULT
;
8342 host_to_target_siginfo(p
, &uinfo
);
8343 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8345 ret
= host_to_target_signal(ret
);
8349 case TARGET_NR_rt_sigqueueinfo
:
8353 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8355 return -TARGET_EFAULT
;
8357 target_to_host_siginfo(&uinfo
, p
);
8358 unlock_user(p
, arg3
, 0);
8359 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8362 case TARGET_NR_rt_tgsigqueueinfo
:
8366 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8368 return -TARGET_EFAULT
;
8370 target_to_host_siginfo(&uinfo
, p
);
8371 unlock_user(p
, arg4
, 0);
8372 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8375 #ifdef TARGET_NR_sigreturn
8376 case TARGET_NR_sigreturn
:
8377 if (block_signals()) {
8378 return -TARGET_ERESTARTSYS
;
8380 return do_sigreturn(cpu_env
);
8382 case TARGET_NR_rt_sigreturn
:
8383 if (block_signals()) {
8384 return -TARGET_ERESTARTSYS
;
8386 return do_rt_sigreturn(cpu_env
);
8387 case TARGET_NR_sethostname
:
8388 if (!(p
= lock_user_string(arg1
)))
8389 return -TARGET_EFAULT
;
8390 ret
= get_errno(sethostname(p
, arg2
));
8391 unlock_user(p
, arg1
, 0);
8393 #ifdef TARGET_NR_setrlimit
8394 case TARGET_NR_setrlimit
:
8396 int resource
= target_to_host_resource(arg1
);
8397 struct target_rlimit
*target_rlim
;
8399 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8400 return -TARGET_EFAULT
;
8401 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8402 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8403 unlock_user_struct(target_rlim
, arg2
, 0);
8405 * If we just passed through resource limit settings for memory then
8406 * they would also apply to QEMU's own allocations, and QEMU will
8407 * crash or hang or die if its allocations fail. Ideally we would
8408 * track the guest allocations in QEMU and apply the limits ourselves.
8409 * For now, just tell the guest the call succeeded but don't actually
8412 if (resource
!= RLIMIT_AS
&&
8413 resource
!= RLIMIT_DATA
&&
8414 resource
!= RLIMIT_STACK
) {
8415 return get_errno(setrlimit(resource
, &rlim
));
8421 #ifdef TARGET_NR_getrlimit
8422 case TARGET_NR_getrlimit
:
8424 int resource
= target_to_host_resource(arg1
);
8425 struct target_rlimit
*target_rlim
;
8428 ret
= get_errno(getrlimit(resource
, &rlim
));
8429 if (!is_error(ret
)) {
8430 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8431 return -TARGET_EFAULT
;
8432 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8433 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8434 unlock_user_struct(target_rlim
, arg2
, 1);
8439 case TARGET_NR_getrusage
:
8441 struct rusage rusage
;
8442 ret
= get_errno(getrusage(arg1
, &rusage
));
8443 if (!is_error(ret
)) {
8444 ret
= host_to_target_rusage(arg2
, &rusage
);
8448 case TARGET_NR_gettimeofday
:
8451 ret
= get_errno(gettimeofday(&tv
, NULL
));
8452 if (!is_error(ret
)) {
8453 if (copy_to_user_timeval(arg1
, &tv
))
8454 return -TARGET_EFAULT
;
8458 case TARGET_NR_settimeofday
:
8460 struct timeval tv
, *ptv
= NULL
;
8461 struct timezone tz
, *ptz
= NULL
;
8464 if (copy_from_user_timeval(&tv
, arg1
)) {
8465 return -TARGET_EFAULT
;
8471 if (copy_from_user_timezone(&tz
, arg2
)) {
8472 return -TARGET_EFAULT
;
8477 return get_errno(settimeofday(ptv
, ptz
));
8479 #if defined(TARGET_NR_select)
8480 case TARGET_NR_select
:
8481 #if defined(TARGET_WANT_NI_OLD_SELECT)
8482 /* some architectures used to have old_select here
8483 * but now ENOSYS it.
8485 ret
= -TARGET_ENOSYS
;
8486 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8487 ret
= do_old_select(arg1
);
8489 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8493 #ifdef TARGET_NR_pselect6
8494 case TARGET_NR_pselect6
:
8496 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8497 fd_set rfds
, wfds
, efds
;
8498 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8499 struct timespec ts
, *ts_ptr
;
8502 * The 6th arg is actually two args smashed together,
8503 * so we cannot use the C library.
8511 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8512 target_sigset_t
*target_sigset
;
8520 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8524 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8528 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8534 * This takes a timespec, and not a timeval, so we cannot
8535 * use the do_select() helper ...
8538 if (target_to_host_timespec(&ts
, ts_addr
)) {
8539 return -TARGET_EFAULT
;
8546 /* Extract the two packed args for the sigset */
8549 sig
.size
= SIGSET_T_SIZE
;
8551 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8553 return -TARGET_EFAULT
;
8555 arg_sigset
= tswapal(arg7
[0]);
8556 arg_sigsize
= tswapal(arg7
[1]);
8557 unlock_user(arg7
, arg6
, 0);
8561 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8562 /* Like the kernel, we enforce correct size sigsets */
8563 return -TARGET_EINVAL
;
8565 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8566 sizeof(*target_sigset
), 1);
8567 if (!target_sigset
) {
8568 return -TARGET_EFAULT
;
8570 target_to_host_sigset(&set
, target_sigset
);
8571 unlock_user(target_sigset
, arg_sigset
, 0);
8579 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8582 if (!is_error(ret
)) {
8583 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8584 return -TARGET_EFAULT
;
8585 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8586 return -TARGET_EFAULT
;
8587 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8588 return -TARGET_EFAULT
;
8590 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8591 return -TARGET_EFAULT
;
8596 #ifdef TARGET_NR_symlink
8597 case TARGET_NR_symlink
:
8600 p
= lock_user_string(arg1
);
8601 p2
= lock_user_string(arg2
);
8603 ret
= -TARGET_EFAULT
;
8605 ret
= get_errno(symlink(p
, p2
));
8606 unlock_user(p2
, arg2
, 0);
8607 unlock_user(p
, arg1
, 0);
8611 #if defined(TARGET_NR_symlinkat)
8612 case TARGET_NR_symlinkat
:
8615 p
= lock_user_string(arg1
);
8616 p2
= lock_user_string(arg3
);
8618 ret
= -TARGET_EFAULT
;
8620 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8621 unlock_user(p2
, arg3
, 0);
8622 unlock_user(p
, arg1
, 0);
8626 #ifdef TARGET_NR_readlink
8627 case TARGET_NR_readlink
:
8630 p
= lock_user_string(arg1
);
8631 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8633 ret
= -TARGET_EFAULT
;
8635 /* Short circuit this for the magic exe check. */
8636 ret
= -TARGET_EINVAL
;
8637 } else if (is_proc_myself((const char *)p
, "exe")) {
8638 char real
[PATH_MAX
], *temp
;
8639 temp
= realpath(exec_path
, real
);
8640 /* Return value is # of bytes that we wrote to the buffer. */
8642 ret
= get_errno(-1);
8644 /* Don't worry about sign mismatch as earlier mapping
8645 * logic would have thrown a bad address error. */
8646 ret
= MIN(strlen(real
), arg3
);
8647 /* We cannot NUL terminate the string. */
8648 memcpy(p2
, real
, ret
);
8651 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8653 unlock_user(p2
, arg2
, ret
);
8654 unlock_user(p
, arg1
, 0);
8658 #if defined(TARGET_NR_readlinkat)
8659 case TARGET_NR_readlinkat
:
8662 p
= lock_user_string(arg2
);
8663 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8665 ret
= -TARGET_EFAULT
;
8666 } else if (is_proc_myself((const char *)p
, "exe")) {
8667 char real
[PATH_MAX
], *temp
;
8668 temp
= realpath(exec_path
, real
);
8669 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8670 snprintf((char *)p2
, arg4
, "%s", real
);
8672 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8674 unlock_user(p2
, arg3
, ret
);
8675 unlock_user(p
, arg2
, 0);
8679 #ifdef TARGET_NR_swapon
8680 case TARGET_NR_swapon
:
8681 if (!(p
= lock_user_string(arg1
)))
8682 return -TARGET_EFAULT
;
8683 ret
= get_errno(swapon(p
, arg2
));
8684 unlock_user(p
, arg1
, 0);
8687 case TARGET_NR_reboot
:
8688 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8689 /* arg4 must be ignored in all other cases */
8690 p
= lock_user_string(arg4
);
8692 return -TARGET_EFAULT
;
8694 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8695 unlock_user(p
, arg4
, 0);
8697 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8700 #ifdef TARGET_NR_mmap
8701 case TARGET_NR_mmap
:
8702 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8703 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8704 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8705 || defined(TARGET_S390X)
8708 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8709 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8710 return -TARGET_EFAULT
;
8717 unlock_user(v
, arg1
, 0);
8718 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8719 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8723 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8724 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8730 #ifdef TARGET_NR_mmap2
8731 case TARGET_NR_mmap2
:
8733 #define MMAP_SHIFT 12
8735 ret
= target_mmap(arg1
, arg2
, arg3
,
8736 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8737 arg5
, arg6
<< MMAP_SHIFT
);
8738 return get_errno(ret
);
8740 case TARGET_NR_munmap
:
8741 return get_errno(target_munmap(arg1
, arg2
));
8742 case TARGET_NR_mprotect
:
8744 TaskState
*ts
= cpu
->opaque
;
8745 /* Special hack to detect libc making the stack executable. */
8746 if ((arg3
& PROT_GROWSDOWN
)
8747 && arg1
>= ts
->info
->stack_limit
8748 && arg1
<= ts
->info
->start_stack
) {
8749 arg3
&= ~PROT_GROWSDOWN
;
8750 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8751 arg1
= ts
->info
->stack_limit
;
8754 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
8755 #ifdef TARGET_NR_mremap
8756 case TARGET_NR_mremap
:
8757 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8759 /* ??? msync/mlock/munlock are broken for softmmu. */
8760 #ifdef TARGET_NR_msync
8761 case TARGET_NR_msync
:
8762 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
8764 #ifdef TARGET_NR_mlock
8765 case TARGET_NR_mlock
:
8766 return get_errno(mlock(g2h(arg1
), arg2
));
8768 #ifdef TARGET_NR_munlock
8769 case TARGET_NR_munlock
:
8770 return get_errno(munlock(g2h(arg1
), arg2
));
8772 #ifdef TARGET_NR_mlockall
8773 case TARGET_NR_mlockall
:
8774 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8776 #ifdef TARGET_NR_munlockall
8777 case TARGET_NR_munlockall
:
8778 return get_errno(munlockall());
8780 #ifdef TARGET_NR_truncate
8781 case TARGET_NR_truncate
:
8782 if (!(p
= lock_user_string(arg1
)))
8783 return -TARGET_EFAULT
;
8784 ret
= get_errno(truncate(p
, arg2
));
8785 unlock_user(p
, arg1
, 0);
8788 #ifdef TARGET_NR_ftruncate
8789 case TARGET_NR_ftruncate
:
8790 return get_errno(ftruncate(arg1
, arg2
));
8792 case TARGET_NR_fchmod
:
8793 return get_errno(fchmod(arg1
, arg2
));
8794 #if defined(TARGET_NR_fchmodat)
8795 case TARGET_NR_fchmodat
:
8796 if (!(p
= lock_user_string(arg2
)))
8797 return -TARGET_EFAULT
;
8798 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8799 unlock_user(p
, arg2
, 0);
8802 case TARGET_NR_getpriority
:
8803 /* Note that negative values are valid for getpriority, so we must
8804 differentiate based on errno settings. */
8806 ret
= getpriority(arg1
, arg2
);
8807 if (ret
== -1 && errno
!= 0) {
8808 return -host_to_target_errno(errno
);
8811 /* Return value is the unbiased priority. Signal no error. */
8812 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8814 /* Return value is a biased priority to avoid negative numbers. */
8818 case TARGET_NR_setpriority
:
8819 return get_errno(setpriority(arg1
, arg2
, arg3
));
8820 #ifdef TARGET_NR_statfs
8821 case TARGET_NR_statfs
:
8822 if (!(p
= lock_user_string(arg1
))) {
8823 return -TARGET_EFAULT
;
8825 ret
= get_errno(statfs(path(p
), &stfs
));
8826 unlock_user(p
, arg1
, 0);
8828 if (!is_error(ret
)) {
8829 struct target_statfs
*target_stfs
;
8831 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8832 return -TARGET_EFAULT
;
8833 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8834 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8835 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8836 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8837 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8838 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8839 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8840 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8841 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8842 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8843 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8844 #ifdef _STATFS_F_FLAGS
8845 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
8847 __put_user(0, &target_stfs
->f_flags
);
8849 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8850 unlock_user_struct(target_stfs
, arg2
, 1);
8854 #ifdef TARGET_NR_fstatfs
8855 case TARGET_NR_fstatfs
:
8856 ret
= get_errno(fstatfs(arg1
, &stfs
));
8857 goto convert_statfs
;
8859 #ifdef TARGET_NR_statfs64
8860 case TARGET_NR_statfs64
:
8861 if (!(p
= lock_user_string(arg1
))) {
8862 return -TARGET_EFAULT
;
8864 ret
= get_errno(statfs(path(p
), &stfs
));
8865 unlock_user(p
, arg1
, 0);
8867 if (!is_error(ret
)) {
8868 struct target_statfs64
*target_stfs
;
8870 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8871 return -TARGET_EFAULT
;
8872 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8873 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8874 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8875 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8876 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8877 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8878 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8879 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8880 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8881 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8882 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8883 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8884 unlock_user_struct(target_stfs
, arg3
, 1);
8887 case TARGET_NR_fstatfs64
:
8888 ret
= get_errno(fstatfs(arg1
, &stfs
));
8889 goto convert_statfs64
;
8891 #ifdef TARGET_NR_socketcall
8892 case TARGET_NR_socketcall
:
8893 return do_socketcall(arg1
, arg2
);
8895 #ifdef TARGET_NR_accept
8896 case TARGET_NR_accept
:
8897 return do_accept4(arg1
, arg2
, arg3
, 0);
8899 #ifdef TARGET_NR_accept4
8900 case TARGET_NR_accept4
:
8901 return do_accept4(arg1
, arg2
, arg3
, arg4
);
8903 #ifdef TARGET_NR_bind
8904 case TARGET_NR_bind
:
8905 return do_bind(arg1
, arg2
, arg3
);
8907 #ifdef TARGET_NR_connect
8908 case TARGET_NR_connect
:
8909 return do_connect(arg1
, arg2
, arg3
);
8911 #ifdef TARGET_NR_getpeername
8912 case TARGET_NR_getpeername
:
8913 return do_getpeername(arg1
, arg2
, arg3
);
8915 #ifdef TARGET_NR_getsockname
8916 case TARGET_NR_getsockname
:
8917 return do_getsockname(arg1
, arg2
, arg3
);
8919 #ifdef TARGET_NR_getsockopt
8920 case TARGET_NR_getsockopt
:
8921 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
8923 #ifdef TARGET_NR_listen
8924 case TARGET_NR_listen
:
8925 return get_errno(listen(arg1
, arg2
));
8927 #ifdef TARGET_NR_recv
8928 case TARGET_NR_recv
:
8929 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
8931 #ifdef TARGET_NR_recvfrom
8932 case TARGET_NR_recvfrom
:
8933 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8935 #ifdef TARGET_NR_recvmsg
8936 case TARGET_NR_recvmsg
:
8937 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
8939 #ifdef TARGET_NR_send
8940 case TARGET_NR_send
:
8941 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
8943 #ifdef TARGET_NR_sendmsg
8944 case TARGET_NR_sendmsg
:
8945 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
8947 #ifdef TARGET_NR_sendmmsg
8948 case TARGET_NR_sendmmsg
:
8949 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
8950 case TARGET_NR_recvmmsg
:
8951 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
8953 #ifdef TARGET_NR_sendto
8954 case TARGET_NR_sendto
:
8955 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8957 #ifdef TARGET_NR_shutdown
8958 case TARGET_NR_shutdown
:
8959 return get_errno(shutdown(arg1
, arg2
));
8961 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8962 case TARGET_NR_getrandom
:
8963 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8965 return -TARGET_EFAULT
;
8967 ret
= get_errno(getrandom(p
, arg2
, arg3
));
8968 unlock_user(p
, arg1
, ret
);
8971 #ifdef TARGET_NR_socket
8972 case TARGET_NR_socket
:
8973 return do_socket(arg1
, arg2
, arg3
);
8975 #ifdef TARGET_NR_socketpair
8976 case TARGET_NR_socketpair
:
8977 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
8979 #ifdef TARGET_NR_setsockopt
8980 case TARGET_NR_setsockopt
:
8981 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
8983 #if defined(TARGET_NR_syslog)
8984 case TARGET_NR_syslog
:
8989 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
8990 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
8991 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
8992 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
8993 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
8994 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
8995 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
8996 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
8997 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
8998 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
8999 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9000 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9003 return -TARGET_EINVAL
;
9008 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9010 return -TARGET_EFAULT
;
9012 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9013 unlock_user(p
, arg2
, arg3
);
9017 return -TARGET_EINVAL
;
9022 case TARGET_NR_setitimer
:
9024 struct itimerval value
, ovalue
, *pvalue
;
9028 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9029 || copy_from_user_timeval(&pvalue
->it_value
,
9030 arg2
+ sizeof(struct target_timeval
)))
9031 return -TARGET_EFAULT
;
9035 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9036 if (!is_error(ret
) && arg3
) {
9037 if (copy_to_user_timeval(arg3
,
9038 &ovalue
.it_interval
)
9039 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9041 return -TARGET_EFAULT
;
9045 case TARGET_NR_getitimer
:
9047 struct itimerval value
;
9049 ret
= get_errno(getitimer(arg1
, &value
));
9050 if (!is_error(ret
) && arg2
) {
9051 if (copy_to_user_timeval(arg2
,
9053 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9055 return -TARGET_EFAULT
;
9059 #ifdef TARGET_NR_stat
9060 case TARGET_NR_stat
:
9061 if (!(p
= lock_user_string(arg1
))) {
9062 return -TARGET_EFAULT
;
9064 ret
= get_errno(stat(path(p
), &st
));
9065 unlock_user(p
, arg1
, 0);
9068 #ifdef TARGET_NR_lstat
9069 case TARGET_NR_lstat
:
9070 if (!(p
= lock_user_string(arg1
))) {
9071 return -TARGET_EFAULT
;
9073 ret
= get_errno(lstat(path(p
), &st
));
9074 unlock_user(p
, arg1
, 0);
9077 #ifdef TARGET_NR_fstat
9078 case TARGET_NR_fstat
:
9080 ret
= get_errno(fstat(arg1
, &st
));
9081 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9084 if (!is_error(ret
)) {
9085 struct target_stat
*target_st
;
9087 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9088 return -TARGET_EFAULT
;
9089 memset(target_st
, 0, sizeof(*target_st
));
9090 __put_user(st
.st_dev
, &target_st
->st_dev
);
9091 __put_user(st
.st_ino
, &target_st
->st_ino
);
9092 __put_user(st
.st_mode
, &target_st
->st_mode
);
9093 __put_user(st
.st_uid
, &target_st
->st_uid
);
9094 __put_user(st
.st_gid
, &target_st
->st_gid
);
9095 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9096 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9097 __put_user(st
.st_size
, &target_st
->st_size
);
9098 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9099 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9100 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9101 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9102 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9103 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9104 defined(TARGET_STAT_HAVE_NSEC)
9105 __put_user(st
.st_atim
.tv_nsec
,
9106 &target_st
->target_st_atime_nsec
);
9107 __put_user(st
.st_mtim
.tv_nsec
,
9108 &target_st
->target_st_mtime_nsec
);
9109 __put_user(st
.st_ctim
.tv_nsec
,
9110 &target_st
->target_st_ctime_nsec
);
9112 unlock_user_struct(target_st
, arg2
, 1);
9117 case TARGET_NR_vhangup
:
9118 return get_errno(vhangup());
9119 #ifdef TARGET_NR_syscall
9120 case TARGET_NR_syscall
:
9121 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9122 arg6
, arg7
, arg8
, 0);
9124 case TARGET_NR_wait4
:
9127 abi_long status_ptr
= arg2
;
9128 struct rusage rusage
, *rusage_ptr
;
9129 abi_ulong target_rusage
= arg4
;
9130 abi_long rusage_err
;
9132 rusage_ptr
= &rusage
;
9135 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9136 if (!is_error(ret
)) {
9137 if (status_ptr
&& ret
) {
9138 status
= host_to_target_waitstatus(status
);
9139 if (put_user_s32(status
, status_ptr
))
9140 return -TARGET_EFAULT
;
9142 if (target_rusage
) {
9143 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9151 #ifdef TARGET_NR_swapoff
9152 case TARGET_NR_swapoff
:
9153 if (!(p
= lock_user_string(arg1
)))
9154 return -TARGET_EFAULT
;
9155 ret
= get_errno(swapoff(p
));
9156 unlock_user(p
, arg1
, 0);
9159 case TARGET_NR_sysinfo
:
9161 struct target_sysinfo
*target_value
;
9162 struct sysinfo value
;
9163 ret
= get_errno(sysinfo(&value
));
9164 if (!is_error(ret
) && arg1
)
9166 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9167 return -TARGET_EFAULT
;
9168 __put_user(value
.uptime
, &target_value
->uptime
);
9169 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9170 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9171 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9172 __put_user(value
.totalram
, &target_value
->totalram
);
9173 __put_user(value
.freeram
, &target_value
->freeram
);
9174 __put_user(value
.sharedram
, &target_value
->sharedram
);
9175 __put_user(value
.bufferram
, &target_value
->bufferram
);
9176 __put_user(value
.totalswap
, &target_value
->totalswap
);
9177 __put_user(value
.freeswap
, &target_value
->freeswap
);
9178 __put_user(value
.procs
, &target_value
->procs
);
9179 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9180 __put_user(value
.freehigh
, &target_value
->freehigh
);
9181 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9182 unlock_user_struct(target_value
, arg1
, 1);
9186 #ifdef TARGET_NR_ipc
9188 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9190 #ifdef TARGET_NR_semget
9191 case TARGET_NR_semget
:
9192 return get_errno(semget(arg1
, arg2
, arg3
));
9194 #ifdef TARGET_NR_semop
9195 case TARGET_NR_semop
:
9196 return do_semop(arg1
, arg2
, arg3
);
9198 #ifdef TARGET_NR_semctl
9199 case TARGET_NR_semctl
:
9200 return do_semctl(arg1
, arg2
, arg3
, arg4
);
9202 #ifdef TARGET_NR_msgctl
9203 case TARGET_NR_msgctl
:
9204 return do_msgctl(arg1
, arg2
, arg3
);
9206 #ifdef TARGET_NR_msgget
9207 case TARGET_NR_msgget
:
9208 return get_errno(msgget(arg1
, arg2
));
9210 #ifdef TARGET_NR_msgrcv
9211 case TARGET_NR_msgrcv
:
9212 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9214 #ifdef TARGET_NR_msgsnd
9215 case TARGET_NR_msgsnd
:
9216 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9218 #ifdef TARGET_NR_shmget
9219 case TARGET_NR_shmget
:
9220 return get_errno(shmget(arg1
, arg2
, arg3
));
9222 #ifdef TARGET_NR_shmctl
9223 case TARGET_NR_shmctl
:
9224 return do_shmctl(arg1
, arg2
, arg3
);
9226 #ifdef TARGET_NR_shmat
9227 case TARGET_NR_shmat
:
9228 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9230 #ifdef TARGET_NR_shmdt
9231 case TARGET_NR_shmdt
:
9232 return do_shmdt(arg1
);
9234 case TARGET_NR_fsync
:
9235 return get_errno(fsync(arg1
));
9236 case TARGET_NR_clone
:
9237 /* Linux manages to have three different orderings for its
9238 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9239 * match the kernel's CONFIG_CLONE_* settings.
9240 * Microblaze is further special in that it uses a sixth
9241 * implicit argument to clone for the TLS pointer.
9243 #if defined(TARGET_MICROBLAZE)
9244 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9245 #elif defined(TARGET_CLONE_BACKWARDS)
9246 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9247 #elif defined(TARGET_CLONE_BACKWARDS2)
9248 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9250 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9253 #ifdef __NR_exit_group
9254 /* new thread calls */
9255 case TARGET_NR_exit_group
:
9256 preexit_cleanup(cpu_env
, arg1
);
9257 return get_errno(exit_group(arg1
));
9259 case TARGET_NR_setdomainname
:
9260 if (!(p
= lock_user_string(arg1
)))
9261 return -TARGET_EFAULT
;
9262 ret
= get_errno(setdomainname(p
, arg2
));
9263 unlock_user(p
, arg1
, 0);
9265 case TARGET_NR_uname
:
9266 /* no need to transcode because we use the linux syscall */
9268 struct new_utsname
* buf
;
9270 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9271 return -TARGET_EFAULT
;
9272 ret
= get_errno(sys_uname(buf
));
9273 if (!is_error(ret
)) {
9274 /* Overwrite the native machine name with whatever is being
9276 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
9277 sizeof(buf
->machine
));
9278 /* Allow the user to override the reported release. */
9279 if (qemu_uname_release
&& *qemu_uname_release
) {
9280 g_strlcpy(buf
->release
, qemu_uname_release
,
9281 sizeof(buf
->release
));
9284 unlock_user_struct(buf
, arg1
, 1);
9288 case TARGET_NR_modify_ldt
:
9289 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9290 #if !defined(TARGET_X86_64)
9291 case TARGET_NR_vm86
:
9292 return do_vm86(cpu_env
, arg1
, arg2
);
9295 case TARGET_NR_adjtimex
:
9297 struct timex host_buf
;
9299 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9300 return -TARGET_EFAULT
;
9302 ret
= get_errno(adjtimex(&host_buf
));
9303 if (!is_error(ret
)) {
9304 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9305 return -TARGET_EFAULT
;
9310 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9311 case TARGET_NR_clock_adjtime
:
9313 struct timex htx
, *phtx
= &htx
;
9315 if (target_to_host_timex(phtx
, arg2
) != 0) {
9316 return -TARGET_EFAULT
;
9318 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9319 if (!is_error(ret
) && phtx
) {
9320 if (host_to_target_timex(arg2
, phtx
) != 0) {
9321 return -TARGET_EFAULT
;
9327 case TARGET_NR_getpgid
:
9328 return get_errno(getpgid(arg1
));
9329 case TARGET_NR_fchdir
:
9330 return get_errno(fchdir(arg1
));
9331 case TARGET_NR_personality
:
9332 return get_errno(personality(arg1
));
9333 #ifdef TARGET_NR__llseek /* Not on alpha */
9334 case TARGET_NR__llseek
:
9337 #if !defined(__NR_llseek)
9338 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9340 ret
= get_errno(res
);
9345 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9347 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9348 return -TARGET_EFAULT
;
9353 #ifdef TARGET_NR_getdents
9354 case TARGET_NR_getdents
:
9355 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9356 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9358 struct target_dirent
*target_dirp
;
9359 struct linux_dirent
*dirp
;
9360 abi_long count
= arg3
;
9362 dirp
= g_try_malloc(count
);
9364 return -TARGET_ENOMEM
;
9367 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9368 if (!is_error(ret
)) {
9369 struct linux_dirent
*de
;
9370 struct target_dirent
*tde
;
9372 int reclen
, treclen
;
9373 int count1
, tnamelen
;
9377 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9378 return -TARGET_EFAULT
;
9381 reclen
= de
->d_reclen
;
9382 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9383 assert(tnamelen
>= 0);
9384 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9385 assert(count1
+ treclen
<= count
);
9386 tde
->d_reclen
= tswap16(treclen
);
9387 tde
->d_ino
= tswapal(de
->d_ino
);
9388 tde
->d_off
= tswapal(de
->d_off
);
9389 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9390 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9392 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9396 unlock_user(target_dirp
, arg2
, ret
);
9402 struct linux_dirent
*dirp
;
9403 abi_long count
= arg3
;
9405 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9406 return -TARGET_EFAULT
;
9407 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9408 if (!is_error(ret
)) {
9409 struct linux_dirent
*de
;
9414 reclen
= de
->d_reclen
;
9417 de
->d_reclen
= tswap16(reclen
);
9418 tswapls(&de
->d_ino
);
9419 tswapls(&de
->d_off
);
9420 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9424 unlock_user(dirp
, arg2
, ret
);
9428 /* Implement getdents in terms of getdents64 */
9430 struct linux_dirent64
*dirp
;
9431 abi_long count
= arg3
;
9433 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9435 return -TARGET_EFAULT
;
9437 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9438 if (!is_error(ret
)) {
9439 /* Convert the dirent64 structs to target dirent. We do this
9440 * in-place, since we can guarantee that a target_dirent is no
9441 * larger than a dirent64; however this means we have to be
9442 * careful to read everything before writing in the new format.
9444 struct linux_dirent64
*de
;
9445 struct target_dirent
*tde
;
9450 tde
= (struct target_dirent
*)dirp
;
9452 int namelen
, treclen
;
9453 int reclen
= de
->d_reclen
;
9454 uint64_t ino
= de
->d_ino
;
9455 int64_t off
= de
->d_off
;
9456 uint8_t type
= de
->d_type
;
9458 namelen
= strlen(de
->d_name
);
9459 treclen
= offsetof(struct target_dirent
, d_name
)
9461 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9463 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9464 tde
->d_ino
= tswapal(ino
);
9465 tde
->d_off
= tswapal(off
);
9466 tde
->d_reclen
= tswap16(treclen
);
9467 /* The target_dirent type is in what was formerly a padding
9468 * byte at the end of the structure:
9470 *(((char *)tde
) + treclen
- 1) = type
;
9472 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9473 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9479 unlock_user(dirp
, arg2
, ret
);
9483 #endif /* TARGET_NR_getdents */
9484 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9485 case TARGET_NR_getdents64
:
9487 struct linux_dirent64
*dirp
;
9488 abi_long count
= arg3
;
9489 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9490 return -TARGET_EFAULT
;
9491 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9492 if (!is_error(ret
)) {
9493 struct linux_dirent64
*de
;
9498 reclen
= de
->d_reclen
;
9501 de
->d_reclen
= tswap16(reclen
);
9502 tswap64s((uint64_t *)&de
->d_ino
);
9503 tswap64s((uint64_t *)&de
->d_off
);
9504 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9508 unlock_user(dirp
, arg2
, ret
);
9511 #endif /* TARGET_NR_getdents64 */
9512 #if defined(TARGET_NR__newselect)
9513 case TARGET_NR__newselect
:
9514 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9516 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9517 # ifdef TARGET_NR_poll
9518 case TARGET_NR_poll
:
9520 # ifdef TARGET_NR_ppoll
9521 case TARGET_NR_ppoll
:
9524 struct target_pollfd
*target_pfd
;
9525 unsigned int nfds
= arg2
;
9532 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9533 return -TARGET_EINVAL
;
9536 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9537 sizeof(struct target_pollfd
) * nfds
, 1);
9539 return -TARGET_EFAULT
;
9542 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9543 for (i
= 0; i
< nfds
; i
++) {
9544 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9545 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9550 # ifdef TARGET_NR_ppoll
9551 case TARGET_NR_ppoll
:
9553 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9554 target_sigset_t
*target_set
;
9555 sigset_t _set
, *set
= &_set
;
9558 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9559 unlock_user(target_pfd
, arg1
, 0);
9560 return -TARGET_EFAULT
;
9567 if (arg5
!= sizeof(target_sigset_t
)) {
9568 unlock_user(target_pfd
, arg1
, 0);
9569 return -TARGET_EINVAL
;
9572 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9574 unlock_user(target_pfd
, arg1
, 0);
9575 return -TARGET_EFAULT
;
9577 target_to_host_sigset(set
, target_set
);
9582 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9583 set
, SIGSET_T_SIZE
));
9585 if (!is_error(ret
) && arg3
) {
9586 host_to_target_timespec(arg3
, timeout_ts
);
9589 unlock_user(target_set
, arg4
, 0);
9594 # ifdef TARGET_NR_poll
9595 case TARGET_NR_poll
:
9597 struct timespec ts
, *pts
;
9600 /* Convert ms to secs, ns */
9601 ts
.tv_sec
= arg3
/ 1000;
9602 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9605 /* -ve poll() timeout means "infinite" */
9608 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9613 g_assert_not_reached();
9616 if (!is_error(ret
)) {
9617 for(i
= 0; i
< nfds
; i
++) {
9618 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9621 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9625 case TARGET_NR_flock
:
9626 /* NOTE: the flock constant seems to be the same for every
9628 return get_errno(safe_flock(arg1
, arg2
));
9629 case TARGET_NR_readv
:
9631 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9633 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9634 unlock_iovec(vec
, arg2
, arg3
, 1);
9636 ret
= -host_to_target_errno(errno
);
9640 case TARGET_NR_writev
:
9642 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9644 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9645 unlock_iovec(vec
, arg2
, arg3
, 0);
9647 ret
= -host_to_target_errno(errno
);
9651 #if defined(TARGET_NR_preadv)
9652 case TARGET_NR_preadv
:
9654 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9656 unsigned long low
, high
;
9658 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9659 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
9660 unlock_iovec(vec
, arg2
, arg3
, 1);
9662 ret
= -host_to_target_errno(errno
);
9667 #if defined(TARGET_NR_pwritev)
9668 case TARGET_NR_pwritev
:
9670 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9672 unsigned long low
, high
;
9674 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
9675 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
9676 unlock_iovec(vec
, arg2
, arg3
, 0);
9678 ret
= -host_to_target_errno(errno
);
9683 case TARGET_NR_getsid
:
9684 return get_errno(getsid(arg1
));
9685 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9686 case TARGET_NR_fdatasync
:
9687 return get_errno(fdatasync(arg1
));
9689 #ifdef TARGET_NR__sysctl
9690 case TARGET_NR__sysctl
:
9691 /* We don't implement this, but ENOTDIR is always a safe
9693 return -TARGET_ENOTDIR
;
9695 case TARGET_NR_sched_getaffinity
:
9697 unsigned int mask_size
;
9698 unsigned long *mask
;
9701 * sched_getaffinity needs multiples of ulong, so need to take
9702 * care of mismatches between target ulong and host ulong sizes.
9704 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9705 return -TARGET_EINVAL
;
9707 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9709 mask
= alloca(mask_size
);
9710 memset(mask
, 0, mask_size
);
9711 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9713 if (!is_error(ret
)) {
9715 /* More data returned than the caller's buffer will fit.
9716 * This only happens if sizeof(abi_long) < sizeof(long)
9717 * and the caller passed us a buffer holding an odd number
9718 * of abi_longs. If the host kernel is actually using the
9719 * extra 4 bytes then fail EINVAL; otherwise we can just
9720 * ignore them and only copy the interesting part.
9722 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9723 if (numcpus
> arg2
* 8) {
9724 return -TARGET_EINVAL
;
9729 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
9730 return -TARGET_EFAULT
;
9735 case TARGET_NR_sched_setaffinity
:
9737 unsigned int mask_size
;
9738 unsigned long *mask
;
9741 * sched_setaffinity needs multiples of ulong, so need to take
9742 * care of mismatches between target ulong and host ulong sizes.
9744 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9745 return -TARGET_EINVAL
;
9747 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9748 mask
= alloca(mask_size
);
9750 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
9755 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9757 case TARGET_NR_getcpu
:
9760 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
9761 arg2
? &node
: NULL
,
9763 if (is_error(ret
)) {
9766 if (arg1
&& put_user_u32(cpu
, arg1
)) {
9767 return -TARGET_EFAULT
;
9769 if (arg2
&& put_user_u32(node
, arg2
)) {
9770 return -TARGET_EFAULT
;
9774 case TARGET_NR_sched_setparam
:
9776 struct sched_param
*target_schp
;
9777 struct sched_param schp
;
9780 return -TARGET_EINVAL
;
9782 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9783 return -TARGET_EFAULT
;
9784 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9785 unlock_user_struct(target_schp
, arg2
, 0);
9786 return get_errno(sched_setparam(arg1
, &schp
));
9788 case TARGET_NR_sched_getparam
:
9790 struct sched_param
*target_schp
;
9791 struct sched_param schp
;
9794 return -TARGET_EINVAL
;
9796 ret
= get_errno(sched_getparam(arg1
, &schp
));
9797 if (!is_error(ret
)) {
9798 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9799 return -TARGET_EFAULT
;
9800 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9801 unlock_user_struct(target_schp
, arg2
, 1);
9805 case TARGET_NR_sched_setscheduler
:
9807 struct sched_param
*target_schp
;
9808 struct sched_param schp
;
9810 return -TARGET_EINVAL
;
9812 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9813 return -TARGET_EFAULT
;
9814 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9815 unlock_user_struct(target_schp
, arg3
, 0);
9816 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9818 case TARGET_NR_sched_getscheduler
:
9819 return get_errno(sched_getscheduler(arg1
));
9820 case TARGET_NR_sched_yield
:
9821 return get_errno(sched_yield());
9822 case TARGET_NR_sched_get_priority_max
:
9823 return get_errno(sched_get_priority_max(arg1
));
9824 case TARGET_NR_sched_get_priority_min
:
9825 return get_errno(sched_get_priority_min(arg1
));
9826 case TARGET_NR_sched_rr_get_interval
:
9829 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9830 if (!is_error(ret
)) {
9831 ret
= host_to_target_timespec(arg2
, &ts
);
9835 case TARGET_NR_nanosleep
:
9837 struct timespec req
, rem
;
9838 target_to_host_timespec(&req
, arg1
);
9839 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9840 if (is_error(ret
) && arg2
) {
9841 host_to_target_timespec(arg2
, &rem
);
9845 case TARGET_NR_prctl
:
9847 case PR_GET_PDEATHSIG
:
9850 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9851 if (!is_error(ret
) && arg2
9852 && put_user_ual(deathsig
, arg2
)) {
9853 return -TARGET_EFAULT
;
9860 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9862 return -TARGET_EFAULT
;
9864 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9866 unlock_user(name
, arg2
, 16);
9871 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9873 return -TARGET_EFAULT
;
9875 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9877 unlock_user(name
, arg2
, 0);
9882 case TARGET_PR_GET_FP_MODE
:
9884 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
9886 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
9887 ret
|= TARGET_PR_FP_MODE_FR
;
9889 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
9890 ret
|= TARGET_PR_FP_MODE_FRE
;
9894 case TARGET_PR_SET_FP_MODE
:
9896 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
9897 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
9898 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
9899 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
9900 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
9902 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
9903 TARGET_PR_FP_MODE_FRE
;
9905 /* If nothing to change, return right away, successfully. */
9906 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
9909 /* Check the value is valid */
9910 if (arg2
& ~known_bits
) {
9911 return -TARGET_EOPNOTSUPP
;
9913 /* Setting FRE without FR is not supported. */
9914 if (new_fre
&& !new_fr
) {
9915 return -TARGET_EOPNOTSUPP
;
9917 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
9918 /* FR1 is not supported */
9919 return -TARGET_EOPNOTSUPP
;
9921 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
9922 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
9923 /* cannot set FR=0 */
9924 return -TARGET_EOPNOTSUPP
;
9926 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
9927 /* Cannot set FRE=1 */
9928 return -TARGET_EOPNOTSUPP
;
9932 fpr_t
*fpr
= env
->active_fpu
.fpr
;
9933 for (i
= 0; i
< 32 ; i
+= 2) {
9934 if (!old_fr
&& new_fr
) {
9935 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
9936 } else if (old_fr
&& !new_fr
) {
9937 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
9942 env
->CP0_Status
|= (1 << CP0St_FR
);
9943 env
->hflags
|= MIPS_HFLAG_F64
;
9945 env
->CP0_Status
&= ~(1 << CP0St_FR
);
9946 env
->hflags
&= ~MIPS_HFLAG_F64
;
9949 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
9950 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
9951 env
->hflags
|= MIPS_HFLAG_FRE
;
9954 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
9955 env
->hflags
&= ~MIPS_HFLAG_FRE
;
9961 #ifdef TARGET_AARCH64
9962 case TARGET_PR_SVE_SET_VL
:
9964 * We cannot support either PR_SVE_SET_VL_ONEXEC or
9965 * PR_SVE_VL_INHERIT. Note the kernel definition
9966 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9967 * even though the current architectural maximum is VQ=16.
9969 ret
= -TARGET_EINVAL
;
9970 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
9971 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
9972 CPUARMState
*env
= cpu_env
;
9973 ARMCPU
*cpu
= env_archcpu(env
);
9974 uint32_t vq
, old_vq
;
9976 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
9977 vq
= MAX(arg2
/ 16, 1);
9978 vq
= MIN(vq
, cpu
->sve_max_vq
);
9981 aarch64_sve_narrow_vq(env
, vq
);
9983 env
->vfp
.zcr_el
[1] = vq
- 1;
9987 case TARGET_PR_SVE_GET_VL
:
9988 ret
= -TARGET_EINVAL
;
9990 ARMCPU
*cpu
= env_archcpu(cpu_env
);
9991 if (cpu_isar_feature(aa64_sve
, cpu
)) {
9992 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
9996 case TARGET_PR_PAC_RESET_KEYS
:
9998 CPUARMState
*env
= cpu_env
;
9999 ARMCPU
*cpu
= env_archcpu(env
);
10001 if (arg3
|| arg4
|| arg5
) {
10002 return -TARGET_EINVAL
;
10004 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10005 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10006 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10007 TARGET_PR_PAC_APGAKEY
);
10013 } else if (arg2
& ~all
) {
10014 return -TARGET_EINVAL
;
10016 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10017 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10018 sizeof(ARMPACKey
), &err
);
10020 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10021 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10022 sizeof(ARMPACKey
), &err
);
10024 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10025 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10026 sizeof(ARMPACKey
), &err
);
10028 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10029 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10030 sizeof(ARMPACKey
), &err
);
10032 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10033 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10034 sizeof(ARMPACKey
), &err
);
10038 * Some unknown failure in the crypto. The best
10039 * we can do is log it and fail the syscall.
10040 * The real syscall cannot fail this way.
10042 qemu_log_mask(LOG_UNIMP
,
10043 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10044 error_get_pretty(err
));
10046 return -TARGET_EIO
;
10051 return -TARGET_EINVAL
;
10052 #endif /* AARCH64 */
10053 case PR_GET_SECCOMP
:
10054 case PR_SET_SECCOMP
:
10055 /* Disable seccomp to prevent the target disabling syscalls we
10057 return -TARGET_EINVAL
;
10059 /* Most prctl options have no pointer arguments */
10060 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10063 #ifdef TARGET_NR_arch_prctl
10064 case TARGET_NR_arch_prctl
:
10065 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10066 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10071 #ifdef TARGET_NR_pread64
10072 case TARGET_NR_pread64
:
10073 if (regpairs_aligned(cpu_env
, num
)) {
10077 if (arg2
== 0 && arg3
== 0) {
10078 /* Special-case NULL buffer and zero length, which should succeed */
10081 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10083 return -TARGET_EFAULT
;
10086 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10087 unlock_user(p
, arg2
, ret
);
10089 case TARGET_NR_pwrite64
:
10090 if (regpairs_aligned(cpu_env
, num
)) {
10094 if (arg2
== 0 && arg3
== 0) {
10095 /* Special-case NULL buffer and zero length, which should succeed */
10098 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10100 return -TARGET_EFAULT
;
10103 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10104 unlock_user(p
, arg2
, 0);
10107 case TARGET_NR_getcwd
:
10108 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10109 return -TARGET_EFAULT
;
10110 ret
= get_errno(sys_getcwd1(p
, arg2
));
10111 unlock_user(p
, arg1
, ret
);
10113 case TARGET_NR_capget
:
10114 case TARGET_NR_capset
:
10116 struct target_user_cap_header
*target_header
;
10117 struct target_user_cap_data
*target_data
= NULL
;
10118 struct __user_cap_header_struct header
;
10119 struct __user_cap_data_struct data
[2];
10120 struct __user_cap_data_struct
*dataptr
= NULL
;
10121 int i
, target_datalen
;
10122 int data_items
= 1;
10124 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10125 return -TARGET_EFAULT
;
10127 header
.version
= tswap32(target_header
->version
);
10128 header
.pid
= tswap32(target_header
->pid
);
10130 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10131 /* Version 2 and up takes pointer to two user_data structs */
10135 target_datalen
= sizeof(*target_data
) * data_items
;
10138 if (num
== TARGET_NR_capget
) {
10139 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10141 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10143 if (!target_data
) {
10144 unlock_user_struct(target_header
, arg1
, 0);
10145 return -TARGET_EFAULT
;
10148 if (num
== TARGET_NR_capset
) {
10149 for (i
= 0; i
< data_items
; i
++) {
10150 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10151 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10152 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10159 if (num
== TARGET_NR_capget
) {
10160 ret
= get_errno(capget(&header
, dataptr
));
10162 ret
= get_errno(capset(&header
, dataptr
));
10165 /* The kernel always updates version for both capget and capset */
10166 target_header
->version
= tswap32(header
.version
);
10167 unlock_user_struct(target_header
, arg1
, 1);
10170 if (num
== TARGET_NR_capget
) {
10171 for (i
= 0; i
< data_items
; i
++) {
10172 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10173 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10174 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10176 unlock_user(target_data
, arg2
, target_datalen
);
10178 unlock_user(target_data
, arg2
, 0);
10183 case TARGET_NR_sigaltstack
:
10184 return do_sigaltstack(arg1
, arg2
,
10185 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10187 #ifdef CONFIG_SENDFILE
10188 #ifdef TARGET_NR_sendfile
10189 case TARGET_NR_sendfile
:
10191 off_t
*offp
= NULL
;
10194 ret
= get_user_sal(off
, arg3
);
10195 if (is_error(ret
)) {
10200 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10201 if (!is_error(ret
) && arg3
) {
10202 abi_long ret2
= put_user_sal(off
, arg3
);
10203 if (is_error(ret2
)) {
10210 #ifdef TARGET_NR_sendfile64
10211 case TARGET_NR_sendfile64
:
10213 off_t
*offp
= NULL
;
10216 ret
= get_user_s64(off
, arg3
);
10217 if (is_error(ret
)) {
10222 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10223 if (!is_error(ret
) && arg3
) {
10224 abi_long ret2
= put_user_s64(off
, arg3
);
10225 if (is_error(ret2
)) {
10233 #ifdef TARGET_NR_vfork
10234 case TARGET_NR_vfork
:
10235 return get_errno(do_fork(cpu_env
,
10236 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10239 #ifdef TARGET_NR_ugetrlimit
10240 case TARGET_NR_ugetrlimit
:
10242 struct rlimit rlim
;
10243 int resource
= target_to_host_resource(arg1
);
10244 ret
= get_errno(getrlimit(resource
, &rlim
));
10245 if (!is_error(ret
)) {
10246 struct target_rlimit
*target_rlim
;
10247 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10248 return -TARGET_EFAULT
;
10249 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10250 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10251 unlock_user_struct(target_rlim
, arg2
, 1);
10256 #ifdef TARGET_NR_truncate64
10257 case TARGET_NR_truncate64
:
10258 if (!(p
= lock_user_string(arg1
)))
10259 return -TARGET_EFAULT
;
10260 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10261 unlock_user(p
, arg1
, 0);
10264 #ifdef TARGET_NR_ftruncate64
10265 case TARGET_NR_ftruncate64
:
10266 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10268 #ifdef TARGET_NR_stat64
10269 case TARGET_NR_stat64
:
10270 if (!(p
= lock_user_string(arg1
))) {
10271 return -TARGET_EFAULT
;
10273 ret
= get_errno(stat(path(p
), &st
));
10274 unlock_user(p
, arg1
, 0);
10275 if (!is_error(ret
))
10276 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10279 #ifdef TARGET_NR_lstat64
10280 case TARGET_NR_lstat64
:
10281 if (!(p
= lock_user_string(arg1
))) {
10282 return -TARGET_EFAULT
;
10284 ret
= get_errno(lstat(path(p
), &st
));
10285 unlock_user(p
, arg1
, 0);
10286 if (!is_error(ret
))
10287 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10290 #ifdef TARGET_NR_fstat64
10291 case TARGET_NR_fstat64
:
10292 ret
= get_errno(fstat(arg1
, &st
));
10293 if (!is_error(ret
))
10294 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10297 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10298 #ifdef TARGET_NR_fstatat64
10299 case TARGET_NR_fstatat64
:
10301 #ifdef TARGET_NR_newfstatat
10302 case TARGET_NR_newfstatat
:
10304 if (!(p
= lock_user_string(arg2
))) {
10305 return -TARGET_EFAULT
;
10307 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10308 unlock_user(p
, arg2
, 0);
10309 if (!is_error(ret
))
10310 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10313 #if defined(TARGET_NR_statx)
10314 case TARGET_NR_statx
:
10316 struct target_statx
*target_stx
;
10320 p
= lock_user_string(arg2
);
10322 return -TARGET_EFAULT
;
10324 #if defined(__NR_statx)
10327 * It is assumed that struct statx is architecture independent.
10329 struct target_statx host_stx
;
10332 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
10333 if (!is_error(ret
)) {
10334 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
10335 unlock_user(p
, arg2
, 0);
10336 return -TARGET_EFAULT
;
10340 if (ret
!= -TARGET_ENOSYS
) {
10341 unlock_user(p
, arg2
, 0);
10346 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
10347 unlock_user(p
, arg2
, 0);
10349 if (!is_error(ret
)) {
10350 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
10351 return -TARGET_EFAULT
;
10353 memset(target_stx
, 0, sizeof(*target_stx
));
10354 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
10355 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
10356 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
10357 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
10358 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
10359 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
10360 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
10361 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
10362 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
10363 __put_user(st
.st_size
, &target_stx
->stx_size
);
10364 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
10365 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
10366 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
10367 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
10368 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
10369 unlock_user_struct(target_stx
, arg5
, 1);
10374 #ifdef TARGET_NR_lchown
10375 case TARGET_NR_lchown
:
10376 if (!(p
= lock_user_string(arg1
)))
10377 return -TARGET_EFAULT
;
10378 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10379 unlock_user(p
, arg1
, 0);
10382 #ifdef TARGET_NR_getuid
10383 case TARGET_NR_getuid
:
10384 return get_errno(high2lowuid(getuid()));
10386 #ifdef TARGET_NR_getgid
10387 case TARGET_NR_getgid
:
10388 return get_errno(high2lowgid(getgid()));
10390 #ifdef TARGET_NR_geteuid
10391 case TARGET_NR_geteuid
:
10392 return get_errno(high2lowuid(geteuid()));
10394 #ifdef TARGET_NR_getegid
10395 case TARGET_NR_getegid
:
10396 return get_errno(high2lowgid(getegid()));
10398 case TARGET_NR_setreuid
:
10399 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10400 case TARGET_NR_setregid
:
10401 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10402 case TARGET_NR_getgroups
:
10404 int gidsetsize
= arg1
;
10405 target_id
*target_grouplist
;
10409 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10410 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10411 if (gidsetsize
== 0)
10413 if (!is_error(ret
)) {
10414 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10415 if (!target_grouplist
)
10416 return -TARGET_EFAULT
;
10417 for(i
= 0;i
< ret
; i
++)
10418 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10419 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10423 case TARGET_NR_setgroups
:
10425 int gidsetsize
= arg1
;
10426 target_id
*target_grouplist
;
10427 gid_t
*grouplist
= NULL
;
10430 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10431 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10432 if (!target_grouplist
) {
10433 return -TARGET_EFAULT
;
10435 for (i
= 0; i
< gidsetsize
; i
++) {
10436 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10438 unlock_user(target_grouplist
, arg2
, 0);
10440 return get_errno(setgroups(gidsetsize
, grouplist
));
10442 case TARGET_NR_fchown
:
10443 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10444 #if defined(TARGET_NR_fchownat)
10445 case TARGET_NR_fchownat
:
10446 if (!(p
= lock_user_string(arg2
)))
10447 return -TARGET_EFAULT
;
10448 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10449 low2highgid(arg4
), arg5
));
10450 unlock_user(p
, arg2
, 0);
10453 #ifdef TARGET_NR_setresuid
10454 case TARGET_NR_setresuid
:
10455 return get_errno(sys_setresuid(low2highuid(arg1
),
10457 low2highuid(arg3
)));
10459 #ifdef TARGET_NR_getresuid
10460 case TARGET_NR_getresuid
:
10462 uid_t ruid
, euid
, suid
;
10463 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10464 if (!is_error(ret
)) {
10465 if (put_user_id(high2lowuid(ruid
), arg1
)
10466 || put_user_id(high2lowuid(euid
), arg2
)
10467 || put_user_id(high2lowuid(suid
), arg3
))
10468 return -TARGET_EFAULT
;
10473 #ifdef TARGET_NR_getresgid
10474 case TARGET_NR_setresgid
:
10475 return get_errno(sys_setresgid(low2highgid(arg1
),
10477 low2highgid(arg3
)));
10479 #ifdef TARGET_NR_getresgid
10480 case TARGET_NR_getresgid
:
10482 gid_t rgid
, egid
, sgid
;
10483 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10484 if (!is_error(ret
)) {
10485 if (put_user_id(high2lowgid(rgid
), arg1
)
10486 || put_user_id(high2lowgid(egid
), arg2
)
10487 || put_user_id(high2lowgid(sgid
), arg3
))
10488 return -TARGET_EFAULT
;
10493 #ifdef TARGET_NR_chown
10494 case TARGET_NR_chown
:
10495 if (!(p
= lock_user_string(arg1
)))
10496 return -TARGET_EFAULT
;
10497 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10498 unlock_user(p
, arg1
, 0);
10501 case TARGET_NR_setuid
:
10502 return get_errno(sys_setuid(low2highuid(arg1
)));
10503 case TARGET_NR_setgid
:
10504 return get_errno(sys_setgid(low2highgid(arg1
)));
10505 case TARGET_NR_setfsuid
:
10506 return get_errno(setfsuid(arg1
));
10507 case TARGET_NR_setfsgid
:
10508 return get_errno(setfsgid(arg1
));
10510 #ifdef TARGET_NR_lchown32
10511 case TARGET_NR_lchown32
:
10512 if (!(p
= lock_user_string(arg1
)))
10513 return -TARGET_EFAULT
;
10514 ret
= get_errno(lchown(p
, arg2
, arg3
));
10515 unlock_user(p
, arg1
, 0);
10518 #ifdef TARGET_NR_getuid32
10519 case TARGET_NR_getuid32
:
10520 return get_errno(getuid());
10523 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10524 /* Alpha specific */
10525 case TARGET_NR_getxuid
:
10529 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10531 return get_errno(getuid());
10533 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10534 /* Alpha specific */
10535 case TARGET_NR_getxgid
:
10539 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10541 return get_errno(getgid());
10543 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10544 /* Alpha specific */
10545 case TARGET_NR_osf_getsysinfo
:
10546 ret
= -TARGET_EOPNOTSUPP
;
10548 case TARGET_GSI_IEEE_FP_CONTROL
:
10550 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10551 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
10553 swcr
&= ~SWCR_STATUS_MASK
;
10554 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10556 if (put_user_u64 (swcr
, arg2
))
10557 return -TARGET_EFAULT
;
10562 /* case GSI_IEEE_STATE_AT_SIGNAL:
10563 -- Not implemented in linux kernel.
10565 -- Retrieves current unaligned access state; not much used.
10566 case GSI_PROC_TYPE:
10567 -- Retrieves implver information; surely not used.
10568 case GSI_GET_HWRPB:
10569 -- Grabs a copy of the HWRPB; surely not used.
10574 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10575 /* Alpha specific */
10576 case TARGET_NR_osf_setsysinfo
:
10577 ret
= -TARGET_EOPNOTSUPP
;
10579 case TARGET_SSI_IEEE_FP_CONTROL
:
10581 uint64_t swcr
, fpcr
;
10583 if (get_user_u64 (swcr
, arg2
)) {
10584 return -TARGET_EFAULT
;
10588 * The kernel calls swcr_update_status to update the
10589 * status bits from the fpcr at every point that it
10590 * could be queried. Therefore, we store the status
10591 * bits only in FPCR.
10593 ((CPUAlphaState
*)cpu_env
)->swcr
10594 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
10596 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10597 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
10598 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
10599 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10604 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10606 uint64_t exc
, fpcr
, fex
;
10608 if (get_user_u64(exc
, arg2
)) {
10609 return -TARGET_EFAULT
;
10611 exc
&= SWCR_STATUS_MASK
;
10612 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10614 /* Old exceptions are not signaled. */
10615 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
10617 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
10618 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
10620 /* Update the hardware fpcr. */
10621 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
10622 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10625 int si_code
= TARGET_FPE_FLTUNK
;
10626 target_siginfo_t info
;
10628 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
10629 si_code
= TARGET_FPE_FLTUND
;
10631 if (fex
& SWCR_TRAP_ENABLE_INE
) {
10632 si_code
= TARGET_FPE_FLTRES
;
10634 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
10635 si_code
= TARGET_FPE_FLTUND
;
10637 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
10638 si_code
= TARGET_FPE_FLTOVF
;
10640 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
10641 si_code
= TARGET_FPE_FLTDIV
;
10643 if (fex
& SWCR_TRAP_ENABLE_INV
) {
10644 si_code
= TARGET_FPE_FLTINV
;
10647 info
.si_signo
= SIGFPE
;
10649 info
.si_code
= si_code
;
10650 info
._sifields
._sigfault
._addr
10651 = ((CPUArchState
*)cpu_env
)->pc
;
10652 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
10653 QEMU_SI_FAULT
, &info
);
10659 /* case SSI_NVPAIRS:
10660 -- Used with SSIN_UACPROC to enable unaligned accesses.
10661 case SSI_IEEE_STATE_AT_SIGNAL:
10662 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10663 -- Not implemented in linux kernel
10668 #ifdef TARGET_NR_osf_sigprocmask
10669 /* Alpha specific. */
10670 case TARGET_NR_osf_sigprocmask
:
10674 sigset_t set
, oldset
;
10677 case TARGET_SIG_BLOCK
:
10680 case TARGET_SIG_UNBLOCK
:
10683 case TARGET_SIG_SETMASK
:
10687 return -TARGET_EINVAL
;
10690 target_to_host_old_sigset(&set
, &mask
);
10691 ret
= do_sigprocmask(how
, &set
, &oldset
);
10693 host_to_target_old_sigset(&mask
, &oldset
);
10700 #ifdef TARGET_NR_getgid32
10701 case TARGET_NR_getgid32
:
10702 return get_errno(getgid());
10704 #ifdef TARGET_NR_geteuid32
10705 case TARGET_NR_geteuid32
:
10706 return get_errno(geteuid());
10708 #ifdef TARGET_NR_getegid32
10709 case TARGET_NR_getegid32
:
10710 return get_errno(getegid());
10712 #ifdef TARGET_NR_setreuid32
10713 case TARGET_NR_setreuid32
:
10714 return get_errno(setreuid(arg1
, arg2
));
10716 #ifdef TARGET_NR_setregid32
10717 case TARGET_NR_setregid32
:
10718 return get_errno(setregid(arg1
, arg2
));
10720 #ifdef TARGET_NR_getgroups32
10721 case TARGET_NR_getgroups32
:
10723 int gidsetsize
= arg1
;
10724 uint32_t *target_grouplist
;
10728 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10729 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10730 if (gidsetsize
== 0)
10732 if (!is_error(ret
)) {
10733 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10734 if (!target_grouplist
) {
10735 return -TARGET_EFAULT
;
10737 for(i
= 0;i
< ret
; i
++)
10738 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10739 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10744 #ifdef TARGET_NR_setgroups32
10745 case TARGET_NR_setgroups32
:
10747 int gidsetsize
= arg1
;
10748 uint32_t *target_grouplist
;
10752 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10753 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10754 if (!target_grouplist
) {
10755 return -TARGET_EFAULT
;
10757 for(i
= 0;i
< gidsetsize
; i
++)
10758 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10759 unlock_user(target_grouplist
, arg2
, 0);
10760 return get_errno(setgroups(gidsetsize
, grouplist
));
10763 #ifdef TARGET_NR_fchown32
10764 case TARGET_NR_fchown32
:
10765 return get_errno(fchown(arg1
, arg2
, arg3
));
10767 #ifdef TARGET_NR_setresuid32
10768 case TARGET_NR_setresuid32
:
10769 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10771 #ifdef TARGET_NR_getresuid32
10772 case TARGET_NR_getresuid32
:
10774 uid_t ruid
, euid
, suid
;
10775 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10776 if (!is_error(ret
)) {
10777 if (put_user_u32(ruid
, arg1
)
10778 || put_user_u32(euid
, arg2
)
10779 || put_user_u32(suid
, arg3
))
10780 return -TARGET_EFAULT
;
10785 #ifdef TARGET_NR_setresgid32
10786 case TARGET_NR_setresgid32
:
10787 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10789 #ifdef TARGET_NR_getresgid32
10790 case TARGET_NR_getresgid32
:
10792 gid_t rgid
, egid
, sgid
;
10793 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10794 if (!is_error(ret
)) {
10795 if (put_user_u32(rgid
, arg1
)
10796 || put_user_u32(egid
, arg2
)
10797 || put_user_u32(sgid
, arg3
))
10798 return -TARGET_EFAULT
;
10803 #ifdef TARGET_NR_chown32
10804 case TARGET_NR_chown32
:
10805 if (!(p
= lock_user_string(arg1
)))
10806 return -TARGET_EFAULT
;
10807 ret
= get_errno(chown(p
, arg2
, arg3
));
10808 unlock_user(p
, arg1
, 0);
10811 #ifdef TARGET_NR_setuid32
10812 case TARGET_NR_setuid32
:
10813 return get_errno(sys_setuid(arg1
));
10815 #ifdef TARGET_NR_setgid32
10816 case TARGET_NR_setgid32
:
10817 return get_errno(sys_setgid(arg1
));
10819 #ifdef TARGET_NR_setfsuid32
10820 case TARGET_NR_setfsuid32
:
10821 return get_errno(setfsuid(arg1
));
10823 #ifdef TARGET_NR_setfsgid32
10824 case TARGET_NR_setfsgid32
:
10825 return get_errno(setfsgid(arg1
));
10827 #ifdef TARGET_NR_mincore
10828 case TARGET_NR_mincore
:
10830 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
10832 return -TARGET_ENOMEM
;
10834 p
= lock_user_string(arg3
);
10836 ret
= -TARGET_EFAULT
;
10838 ret
= get_errno(mincore(a
, arg2
, p
));
10839 unlock_user(p
, arg3
, ret
);
10841 unlock_user(a
, arg1
, 0);
10845 #ifdef TARGET_NR_arm_fadvise64_64
10846 case TARGET_NR_arm_fadvise64_64
:
10847 /* arm_fadvise64_64 looks like fadvise64_64 but
10848 * with different argument order: fd, advice, offset, len
10849 * rather than the usual fd, offset, len, advice.
10850 * Note that offset and len are both 64-bit so appear as
10851 * pairs of 32-bit registers.
10853 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10854 target_offset64(arg5
, arg6
), arg2
);
10855 return -host_to_target_errno(ret
);
10858 #if TARGET_ABI_BITS == 32
10860 #ifdef TARGET_NR_fadvise64_64
10861 case TARGET_NR_fadvise64_64
:
10862 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10863 /* 6 args: fd, advice, offset (high, low), len (high, low) */
10871 /* 6 args: fd, offset (high, low), len (high, low), advice */
10872 if (regpairs_aligned(cpu_env
, num
)) {
10873 /* offset is in (3,4), len in (5,6) and advice in 7 */
10881 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
10882 target_offset64(arg4
, arg5
), arg6
);
10883 return -host_to_target_errno(ret
);
10886 #ifdef TARGET_NR_fadvise64
10887 case TARGET_NR_fadvise64
:
10888 /* 5 args: fd, offset (high, low), len, advice */
10889 if (regpairs_aligned(cpu_env
, num
)) {
10890 /* offset is in (3,4), len in 5 and advice in 6 */
10896 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
10897 return -host_to_target_errno(ret
);
10900 #else /* not a 32-bit ABI */
10901 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10902 #ifdef TARGET_NR_fadvise64_64
10903 case TARGET_NR_fadvise64_64
:
10905 #ifdef TARGET_NR_fadvise64
10906 case TARGET_NR_fadvise64
:
10908 #ifdef TARGET_S390X
10910 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10911 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10912 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10913 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10917 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10919 #endif /* end of 64-bit ABI fadvise handling */
10921 #ifdef TARGET_NR_madvise
10922 case TARGET_NR_madvise
:
10923 /* A straight passthrough may not be safe because qemu sometimes
10924 turns private file-backed mappings into anonymous mappings.
10925 This will break MADV_DONTNEED.
10926 This is a hint, so ignoring and returning success is ok. */
10929 #if TARGET_ABI_BITS == 32
10930 case TARGET_NR_fcntl64
:
10934 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
10935 to_flock64_fn
*copyto
= copy_to_user_flock64
;
10938 if (!((CPUARMState
*)cpu_env
)->eabi
) {
10939 copyfrom
= copy_from_user_oabi_flock64
;
10940 copyto
= copy_to_user_oabi_flock64
;
10944 cmd
= target_to_host_fcntl_cmd(arg2
);
10945 if (cmd
== -TARGET_EINVAL
) {
10950 case TARGET_F_GETLK64
:
10951 ret
= copyfrom(&fl
, arg3
);
10955 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10957 ret
= copyto(arg3
, &fl
);
10961 case TARGET_F_SETLK64
:
10962 case TARGET_F_SETLKW64
:
10963 ret
= copyfrom(&fl
, arg3
);
10967 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10970 ret
= do_fcntl(arg1
, arg2
, arg3
);
10976 #ifdef TARGET_NR_cacheflush
10977 case TARGET_NR_cacheflush
:
10978 /* self-modifying code is handled automatically, so nothing needed */
10981 #ifdef TARGET_NR_getpagesize
10982 case TARGET_NR_getpagesize
:
10983 return TARGET_PAGE_SIZE
;
10985 case TARGET_NR_gettid
:
10986 return get_errno(sys_gettid());
10987 #ifdef TARGET_NR_readahead
10988 case TARGET_NR_readahead
:
10989 #if TARGET_ABI_BITS == 32
10990 if (regpairs_aligned(cpu_env
, num
)) {
10995 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
10997 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11002 #ifdef TARGET_NR_setxattr
11003 case TARGET_NR_listxattr
:
11004 case TARGET_NR_llistxattr
:
11008 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11010 return -TARGET_EFAULT
;
11013 p
= lock_user_string(arg1
);
11015 if (num
== TARGET_NR_listxattr
) {
11016 ret
= get_errno(listxattr(p
, b
, arg3
));
11018 ret
= get_errno(llistxattr(p
, b
, arg3
));
11021 ret
= -TARGET_EFAULT
;
11023 unlock_user(p
, arg1
, 0);
11024 unlock_user(b
, arg2
, arg3
);
11027 case TARGET_NR_flistxattr
:
11031 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11033 return -TARGET_EFAULT
;
11036 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11037 unlock_user(b
, arg2
, arg3
);
11040 case TARGET_NR_setxattr
:
11041 case TARGET_NR_lsetxattr
:
11043 void *p
, *n
, *v
= 0;
11045 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11047 return -TARGET_EFAULT
;
11050 p
= lock_user_string(arg1
);
11051 n
= lock_user_string(arg2
);
11053 if (num
== TARGET_NR_setxattr
) {
11054 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11056 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11059 ret
= -TARGET_EFAULT
;
11061 unlock_user(p
, arg1
, 0);
11062 unlock_user(n
, arg2
, 0);
11063 unlock_user(v
, arg3
, 0);
11066 case TARGET_NR_fsetxattr
:
11070 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11072 return -TARGET_EFAULT
;
11075 n
= lock_user_string(arg2
);
11077 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11079 ret
= -TARGET_EFAULT
;
11081 unlock_user(n
, arg2
, 0);
11082 unlock_user(v
, arg3
, 0);
11085 case TARGET_NR_getxattr
:
11086 case TARGET_NR_lgetxattr
:
11088 void *p
, *n
, *v
= 0;
11090 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11092 return -TARGET_EFAULT
;
11095 p
= lock_user_string(arg1
);
11096 n
= lock_user_string(arg2
);
11098 if (num
== TARGET_NR_getxattr
) {
11099 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11101 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11104 ret
= -TARGET_EFAULT
;
11106 unlock_user(p
, arg1
, 0);
11107 unlock_user(n
, arg2
, 0);
11108 unlock_user(v
, arg3
, arg4
);
11111 case TARGET_NR_fgetxattr
:
11115 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11117 return -TARGET_EFAULT
;
11120 n
= lock_user_string(arg2
);
11122 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11124 ret
= -TARGET_EFAULT
;
11126 unlock_user(n
, arg2
, 0);
11127 unlock_user(v
, arg3
, arg4
);
11130 case TARGET_NR_removexattr
:
11131 case TARGET_NR_lremovexattr
:
11134 p
= lock_user_string(arg1
);
11135 n
= lock_user_string(arg2
);
11137 if (num
== TARGET_NR_removexattr
) {
11138 ret
= get_errno(removexattr(p
, n
));
11140 ret
= get_errno(lremovexattr(p
, n
));
11143 ret
= -TARGET_EFAULT
;
11145 unlock_user(p
, arg1
, 0);
11146 unlock_user(n
, arg2
, 0);
11149 case TARGET_NR_fremovexattr
:
11152 n
= lock_user_string(arg2
);
11154 ret
= get_errno(fremovexattr(arg1
, n
));
11156 ret
= -TARGET_EFAULT
;
11158 unlock_user(n
, arg2
, 0);
11162 #endif /* CONFIG_ATTR */
11163 #ifdef TARGET_NR_set_thread_area
11164 case TARGET_NR_set_thread_area
:
11165 #if defined(TARGET_MIPS)
11166 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11168 #elif defined(TARGET_CRIS)
11170 ret
= -TARGET_EINVAL
;
11172 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11176 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11177 return do_set_thread_area(cpu_env
, arg1
);
11178 #elif defined(TARGET_M68K)
11180 TaskState
*ts
= cpu
->opaque
;
11181 ts
->tp_value
= arg1
;
11185 return -TARGET_ENOSYS
;
11188 #ifdef TARGET_NR_get_thread_area
11189 case TARGET_NR_get_thread_area
:
11190 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11191 return do_get_thread_area(cpu_env
, arg1
);
11192 #elif defined(TARGET_M68K)
11194 TaskState
*ts
= cpu
->opaque
;
11195 return ts
->tp_value
;
11198 return -TARGET_ENOSYS
;
11201 #ifdef TARGET_NR_getdomainname
11202 case TARGET_NR_getdomainname
:
11203 return -TARGET_ENOSYS
;
11206 #ifdef TARGET_NR_clock_settime
11207 case TARGET_NR_clock_settime
:
11209 struct timespec ts
;
11211 ret
= target_to_host_timespec(&ts
, arg2
);
11212 if (!is_error(ret
)) {
11213 ret
= get_errno(clock_settime(arg1
, &ts
));
11218 #ifdef TARGET_NR_clock_gettime
11219 case TARGET_NR_clock_gettime
:
11221 struct timespec ts
;
11222 ret
= get_errno(clock_gettime(arg1
, &ts
));
11223 if (!is_error(ret
)) {
11224 ret
= host_to_target_timespec(arg2
, &ts
);
11229 #ifdef TARGET_NR_clock_getres
11230 case TARGET_NR_clock_getres
:
11232 struct timespec ts
;
11233 ret
= get_errno(clock_getres(arg1
, &ts
));
11234 if (!is_error(ret
)) {
11235 host_to_target_timespec(arg2
, &ts
);
11240 #ifdef TARGET_NR_clock_nanosleep
11241 case TARGET_NR_clock_nanosleep
:
11243 struct timespec ts
;
11244 target_to_host_timespec(&ts
, arg3
);
11245 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11246 &ts
, arg4
? &ts
: NULL
));
11248 host_to_target_timespec(arg4
, &ts
);
11250 #if defined(TARGET_PPC)
11251 /* clock_nanosleep is odd in that it returns positive errno values.
11252 * On PPC, CR0 bit 3 should be set in such a situation. */
11253 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11254 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11261 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11262 case TARGET_NR_set_tid_address
:
11263 return get_errno(set_tid_address((int *)g2h(arg1
)));
11266 case TARGET_NR_tkill
:
11267 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11269 case TARGET_NR_tgkill
:
11270 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11271 target_to_host_signal(arg3
)));
11273 #ifdef TARGET_NR_set_robust_list
11274 case TARGET_NR_set_robust_list
:
11275 case TARGET_NR_get_robust_list
:
11276 /* The ABI for supporting robust futexes has userspace pass
11277 * the kernel a pointer to a linked list which is updated by
11278 * userspace after the syscall; the list is walked by the kernel
11279 * when the thread exits. Since the linked list in QEMU guest
11280 * memory isn't a valid linked list for the host and we have
11281 * no way to reliably intercept the thread-death event, we can't
11282 * support these. Silently return ENOSYS so that guest userspace
11283 * falls back to a non-robust futex implementation (which should
11284 * be OK except in the corner case of the guest crashing while
11285 * holding a mutex that is shared with another process via
11288 return -TARGET_ENOSYS
;
11291 #if defined(TARGET_NR_utimensat)
11292 case TARGET_NR_utimensat
:
11294 struct timespec
*tsp
, ts
[2];
11298 target_to_host_timespec(ts
, arg3
);
11299 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11303 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11305 if (!(p
= lock_user_string(arg2
))) {
11306 return -TARGET_EFAULT
;
11308 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11309 unlock_user(p
, arg2
, 0);
11314 case TARGET_NR_futex
:
11315 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11316 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11317 case TARGET_NR_inotify_init
:
11318 ret
= get_errno(sys_inotify_init());
11320 fd_trans_register(ret
, &target_inotify_trans
);
11324 #ifdef CONFIG_INOTIFY1
11325 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11326 case TARGET_NR_inotify_init1
:
11327 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11328 fcntl_flags_tbl
)));
11330 fd_trans_register(ret
, &target_inotify_trans
);
11335 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11336 case TARGET_NR_inotify_add_watch
:
11337 p
= lock_user_string(arg2
);
11338 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11339 unlock_user(p
, arg2
, 0);
11342 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11343 case TARGET_NR_inotify_rm_watch
:
11344 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11347 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11348 case TARGET_NR_mq_open
:
11350 struct mq_attr posix_mq_attr
;
11351 struct mq_attr
*pposix_mq_attr
;
11354 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11355 pposix_mq_attr
= NULL
;
11357 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11358 return -TARGET_EFAULT
;
11360 pposix_mq_attr
= &posix_mq_attr
;
11362 p
= lock_user_string(arg1
- 1);
11364 return -TARGET_EFAULT
;
11366 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
11367 unlock_user (p
, arg1
, 0);
11371 case TARGET_NR_mq_unlink
:
11372 p
= lock_user_string(arg1
- 1);
11374 return -TARGET_EFAULT
;
11376 ret
= get_errno(mq_unlink(p
));
11377 unlock_user (p
, arg1
, 0);
11380 case TARGET_NR_mq_timedsend
:
11382 struct timespec ts
;
11384 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11386 target_to_host_timespec(&ts
, arg5
);
11387 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11388 host_to_target_timespec(arg5
, &ts
);
11390 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11392 unlock_user (p
, arg2
, arg3
);
11396 case TARGET_NR_mq_timedreceive
:
11398 struct timespec ts
;
11401 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11403 target_to_host_timespec(&ts
, arg5
);
11404 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11406 host_to_target_timespec(arg5
, &ts
);
11408 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11411 unlock_user (p
, arg2
, arg3
);
11413 put_user_u32(prio
, arg4
);
11417 /* Not implemented for now... */
11418 /* case TARGET_NR_mq_notify: */
11421 case TARGET_NR_mq_getsetattr
:
11423 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11426 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11427 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
11428 &posix_mq_attr_out
));
11429 } else if (arg3
!= 0) {
11430 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
11432 if (ret
== 0 && arg3
!= 0) {
11433 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11439 #ifdef CONFIG_SPLICE
11440 #ifdef TARGET_NR_tee
11441 case TARGET_NR_tee
:
11443 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11447 #ifdef TARGET_NR_splice
11448 case TARGET_NR_splice
:
11450 loff_t loff_in
, loff_out
;
11451 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11453 if (get_user_u64(loff_in
, arg2
)) {
11454 return -TARGET_EFAULT
;
11456 ploff_in
= &loff_in
;
11459 if (get_user_u64(loff_out
, arg4
)) {
11460 return -TARGET_EFAULT
;
11462 ploff_out
= &loff_out
;
11464 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11466 if (put_user_u64(loff_in
, arg2
)) {
11467 return -TARGET_EFAULT
;
11471 if (put_user_u64(loff_out
, arg4
)) {
11472 return -TARGET_EFAULT
;
11478 #ifdef TARGET_NR_vmsplice
11479 case TARGET_NR_vmsplice
:
11481 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11483 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11484 unlock_iovec(vec
, arg2
, arg3
, 0);
11486 ret
= -host_to_target_errno(errno
);
11491 #endif /* CONFIG_SPLICE */
11492 #ifdef CONFIG_EVENTFD
11493 #if defined(TARGET_NR_eventfd)
11494 case TARGET_NR_eventfd
:
11495 ret
= get_errno(eventfd(arg1
, 0));
11497 fd_trans_register(ret
, &target_eventfd_trans
);
11501 #if defined(TARGET_NR_eventfd2)
11502 case TARGET_NR_eventfd2
:
11504 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11505 if (arg2
& TARGET_O_NONBLOCK
) {
11506 host_flags
|= O_NONBLOCK
;
11508 if (arg2
& TARGET_O_CLOEXEC
) {
11509 host_flags
|= O_CLOEXEC
;
11511 ret
= get_errno(eventfd(arg1
, host_flags
));
11513 fd_trans_register(ret
, &target_eventfd_trans
);
11518 #endif /* CONFIG_EVENTFD */
11519 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11520 case TARGET_NR_fallocate
:
11521 #if TARGET_ABI_BITS == 32
11522 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11523 target_offset64(arg5
, arg6
)));
11525 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11529 #if defined(CONFIG_SYNC_FILE_RANGE)
11530 #if defined(TARGET_NR_sync_file_range)
11531 case TARGET_NR_sync_file_range
:
11532 #if TARGET_ABI_BITS == 32
11533 #if defined(TARGET_MIPS)
11534 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11535 target_offset64(arg5
, arg6
), arg7
));
11537 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11538 target_offset64(arg4
, arg5
), arg6
));
11539 #endif /* !TARGET_MIPS */
11541 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11545 #if defined(TARGET_NR_sync_file_range2)
11546 case TARGET_NR_sync_file_range2
:
11547 /* This is like sync_file_range but the arguments are reordered */
11548 #if TARGET_ABI_BITS == 32
11549 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11550 target_offset64(arg5
, arg6
), arg2
));
11552 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11557 #if defined(TARGET_NR_signalfd4)
11558 case TARGET_NR_signalfd4
:
11559 return do_signalfd4(arg1
, arg2
, arg4
);
11561 #if defined(TARGET_NR_signalfd)
11562 case TARGET_NR_signalfd
:
11563 return do_signalfd4(arg1
, arg2
, 0);
11565 #if defined(CONFIG_EPOLL)
11566 #if defined(TARGET_NR_epoll_create)
11567 case TARGET_NR_epoll_create
:
11568 return get_errno(epoll_create(arg1
));
11570 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11571 case TARGET_NR_epoll_create1
:
11572 return get_errno(epoll_create1(arg1
));
11574 #if defined(TARGET_NR_epoll_ctl)
11575 case TARGET_NR_epoll_ctl
:
11577 struct epoll_event ep
;
11578 struct epoll_event
*epp
= 0;
11580 struct target_epoll_event
*target_ep
;
11581 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11582 return -TARGET_EFAULT
;
11584 ep
.events
= tswap32(target_ep
->events
);
11585 /* The epoll_data_t union is just opaque data to the kernel,
11586 * so we transfer all 64 bits across and need not worry what
11587 * actual data type it is.
11589 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11590 unlock_user_struct(target_ep
, arg4
, 0);
11593 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11597 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11598 #if defined(TARGET_NR_epoll_wait)
11599 case TARGET_NR_epoll_wait
:
11601 #if defined(TARGET_NR_epoll_pwait)
11602 case TARGET_NR_epoll_pwait
:
11605 struct target_epoll_event
*target_ep
;
11606 struct epoll_event
*ep
;
11608 int maxevents
= arg3
;
11609 int timeout
= arg4
;
11611 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11612 return -TARGET_EINVAL
;
11615 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11616 maxevents
* sizeof(struct target_epoll_event
), 1);
11618 return -TARGET_EFAULT
;
11621 ep
= g_try_new(struct epoll_event
, maxevents
);
11623 unlock_user(target_ep
, arg2
, 0);
11624 return -TARGET_ENOMEM
;
11628 #if defined(TARGET_NR_epoll_pwait)
11629 case TARGET_NR_epoll_pwait
:
11631 target_sigset_t
*target_set
;
11632 sigset_t _set
, *set
= &_set
;
11635 if (arg6
!= sizeof(target_sigset_t
)) {
11636 ret
= -TARGET_EINVAL
;
11640 target_set
= lock_user(VERIFY_READ
, arg5
,
11641 sizeof(target_sigset_t
), 1);
11643 ret
= -TARGET_EFAULT
;
11646 target_to_host_sigset(set
, target_set
);
11647 unlock_user(target_set
, arg5
, 0);
11652 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11653 set
, SIGSET_T_SIZE
));
11657 #if defined(TARGET_NR_epoll_wait)
11658 case TARGET_NR_epoll_wait
:
11659 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11664 ret
= -TARGET_ENOSYS
;
11666 if (!is_error(ret
)) {
11668 for (i
= 0; i
< ret
; i
++) {
11669 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11670 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11672 unlock_user(target_ep
, arg2
,
11673 ret
* sizeof(struct target_epoll_event
));
11675 unlock_user(target_ep
, arg2
, 0);
11682 #ifdef TARGET_NR_prlimit64
11683 case TARGET_NR_prlimit64
:
11685 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11686 struct target_rlimit64
*target_rnew
, *target_rold
;
11687 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11688 int resource
= target_to_host_resource(arg2
);
11690 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11691 return -TARGET_EFAULT
;
11693 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11694 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11695 unlock_user_struct(target_rnew
, arg3
, 0);
11699 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11700 if (!is_error(ret
) && arg4
) {
11701 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11702 return -TARGET_EFAULT
;
11704 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11705 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11706 unlock_user_struct(target_rold
, arg4
, 1);
11711 #ifdef TARGET_NR_gethostname
11712 case TARGET_NR_gethostname
:
11714 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11716 ret
= get_errno(gethostname(name
, arg2
));
11717 unlock_user(name
, arg1
, arg2
);
11719 ret
= -TARGET_EFAULT
;
11724 #ifdef TARGET_NR_atomic_cmpxchg_32
11725 case TARGET_NR_atomic_cmpxchg_32
:
11727 /* should use start_exclusive from main.c */
11728 abi_ulong mem_value
;
11729 if (get_user_u32(mem_value
, arg6
)) {
11730 target_siginfo_t info
;
11731 info
.si_signo
= SIGSEGV
;
11733 info
.si_code
= TARGET_SEGV_MAPERR
;
11734 info
._sifields
._sigfault
._addr
= arg6
;
11735 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11736 QEMU_SI_FAULT
, &info
);
11740 if (mem_value
== arg2
)
11741 put_user_u32(arg1
, arg6
);
11745 #ifdef TARGET_NR_atomic_barrier
11746 case TARGET_NR_atomic_barrier
:
11747 /* Like the kernel implementation and the
11748 qemu arm barrier, no-op this? */
11752 #ifdef TARGET_NR_timer_create
11753 case TARGET_NR_timer_create
:
11755 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11757 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11760 int timer_index
= next_free_host_timer();
11762 if (timer_index
< 0) {
11763 ret
= -TARGET_EAGAIN
;
11765 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11768 phost_sevp
= &host_sevp
;
11769 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11775 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11779 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11780 return -TARGET_EFAULT
;
11788 #ifdef TARGET_NR_timer_settime
11789 case TARGET_NR_timer_settime
:
11791 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11792 * struct itimerspec * old_value */
11793 target_timer_t timerid
= get_timer_id(arg1
);
11797 } else if (arg3
== 0) {
11798 ret
= -TARGET_EINVAL
;
11800 timer_t htimer
= g_posix_timers
[timerid
];
11801 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11803 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
11804 return -TARGET_EFAULT
;
11807 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11808 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
11809 return -TARGET_EFAULT
;
11816 #ifdef TARGET_NR_timer_gettime
11817 case TARGET_NR_timer_gettime
:
11819 /* args: timer_t timerid, struct itimerspec *curr_value */
11820 target_timer_t timerid
= get_timer_id(arg1
);
11824 } else if (!arg2
) {
11825 ret
= -TARGET_EFAULT
;
11827 timer_t htimer
= g_posix_timers
[timerid
];
11828 struct itimerspec hspec
;
11829 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11831 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11832 ret
= -TARGET_EFAULT
;
11839 #ifdef TARGET_NR_timer_getoverrun
11840 case TARGET_NR_timer_getoverrun
:
11842 /* args: timer_t timerid */
11843 target_timer_t timerid
= get_timer_id(arg1
);
11848 timer_t htimer
= g_posix_timers
[timerid
];
11849 ret
= get_errno(timer_getoverrun(htimer
));
11855 #ifdef TARGET_NR_timer_delete
11856 case TARGET_NR_timer_delete
:
11858 /* args: timer_t timerid */
11859 target_timer_t timerid
= get_timer_id(arg1
);
11864 timer_t htimer
= g_posix_timers
[timerid
];
11865 ret
= get_errno(timer_delete(htimer
));
11866 g_posix_timers
[timerid
] = 0;
11872 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11873 case TARGET_NR_timerfd_create
:
11874 return get_errno(timerfd_create(arg1
,
11875 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11878 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11879 case TARGET_NR_timerfd_gettime
:
11881 struct itimerspec its_curr
;
11883 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11885 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11886 return -TARGET_EFAULT
;
11892 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11893 case TARGET_NR_timerfd_settime
:
11895 struct itimerspec its_new
, its_old
, *p_new
;
11898 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11899 return -TARGET_EFAULT
;
11906 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11908 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11909 return -TARGET_EFAULT
;
11915 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11916 case TARGET_NR_ioprio_get
:
11917 return get_errno(ioprio_get(arg1
, arg2
));
11920 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11921 case TARGET_NR_ioprio_set
:
11922 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
11925 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11926 case TARGET_NR_setns
:
11927 return get_errno(setns(arg1
, arg2
));
11929 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11930 case TARGET_NR_unshare
:
11931 return get_errno(unshare(arg1
));
11933 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11934 case TARGET_NR_kcmp
:
11935 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
11937 #ifdef TARGET_NR_swapcontext
11938 case TARGET_NR_swapcontext
:
11939 /* PowerPC specific. */
11940 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
11942 #ifdef TARGET_NR_memfd_create
11943 case TARGET_NR_memfd_create
:
11944 p
= lock_user_string(arg1
);
11946 return -TARGET_EFAULT
;
11948 ret
= get_errno(memfd_create(p
, arg2
));
11949 fd_trans_unregister(ret
);
11950 unlock_user(p
, arg1
, 0);
11955 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
11956 return -TARGET_ENOSYS
;
11961 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
11962 abi_long arg2
, abi_long arg3
, abi_long arg4
,
11963 abi_long arg5
, abi_long arg6
, abi_long arg7
,
11966 CPUState
*cpu
= env_cpu(cpu_env
);
11969 #ifdef DEBUG_ERESTARTSYS
11970 /* Debug-only code for exercising the syscall-restart code paths
11971 * in the per-architecture cpu main loops: restart every syscall
11972 * the guest makes once before letting it through.
11978 return -TARGET_ERESTARTSYS
;
11983 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
,
11984 arg5
, arg6
, arg7
, arg8
);
11986 if (unlikely(do_strace
)) {
11987 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11988 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
11989 arg5
, arg6
, arg7
, arg8
);
11990 print_syscall_ret(num
, ret
);
11992 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
11993 arg5
, arg6
, arg7
, arg8
);
11996 trace_guest_user_syscall_ret(cpu
, num
, ret
);