4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
66 #include <sys/timerfd.h>
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu/xattr.h"
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
80 #ifdef HAVE_SYS_KCOV_H
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
97 #include <linux/mtio.h>
99 #ifdef HAVE_SYS_MOUNT_FSCONFIG
101 * glibc >= 2.36 linux/mount.h conflicts with sys/mount.h,
102 * which in turn prevents use of linux/fs.h. So we have to
103 * define the constants ourselves for now.
105 #define FS_IOC_GETFLAGS _IOR('f', 1, long)
106 #define FS_IOC_SETFLAGS _IOW('f', 2, long)
107 #define FS_IOC_GETVERSION _IOR('v', 1, long)
108 #define FS_IOC_SETVERSION _IOW('v', 2, long)
109 #define FS_IOC_FIEMAP _IOWR('f', 11, struct fiemap)
110 #define FS_IOC32_GETFLAGS _IOR('f', 1, int)
111 #define FS_IOC32_SETFLAGS _IOW('f', 2, int)
112 #define FS_IOC32_GETVERSION _IOR('v', 1, int)
113 #define FS_IOC32_SETVERSION _IOW('v', 2, int)
115 #define BLKGETSIZE64 _IOR(0x12,114,size_t)
116 #define BLKDISCARD _IO(0x12,119)
117 #define BLKIOMIN _IO(0x12,120)
118 #define BLKIOOPT _IO(0x12,121)
119 #define BLKALIGNOFF _IO(0x12,122)
120 #define BLKPBSZGET _IO(0x12,123)
121 #define BLKDISCARDZEROES _IO(0x12,124)
122 #define BLKSECDISCARD _IO(0x12,125)
123 #define BLKROTATIONAL _IO(0x12,126)
124 #define BLKZEROOUT _IO(0x12,127)
126 #define FIBMAP _IO(0x00,1)
127 #define FIGETBSZ _IO(0x00,2)
129 struct file_clone_range
{
136 #define FICLONE _IOW(0x94, 9, int)
137 #define FICLONERANGE _IOW(0x94, 13, struct file_clone_range)
140 #include <linux/fs.h>
142 #include <linux/fd.h>
143 #if defined(CONFIG_FIEMAP)
144 #include <linux/fiemap.h>
146 #include <linux/fb.h>
147 #if defined(CONFIG_USBFS)
148 #include <linux/usbdevice_fs.h>
149 #include <linux/usb/ch9.h>
151 #include <linux/vt.h>
152 #include <linux/dm-ioctl.h>
153 #include <linux/reboot.h>
154 #include <linux/route.h>
155 #include <linux/filter.h>
156 #include <linux/blkpg.h>
157 #include <netpacket/packet.h>
158 #include <linux/netlink.h>
159 #include <linux/if_alg.h>
160 #include <linux/rtc.h>
161 #include <sound/asound.h>
163 #include <linux/btrfs.h>
166 #include <libdrm/drm.h>
167 #include <libdrm/i915_drm.h>
169 #include "linux_loop.h"
173 #include "user-internals.h"
175 #include "signal-common.h"
177 #include "user-mmap.h"
178 #include "user/safe-syscall.h"
179 #include "qemu/guest-random.h"
180 #include "qemu/selfmap.h"
181 #include "user/syscall-trace.h"
182 #include "special-errno.h"
183 #include "qapi/error.h"
184 #include "fd-trans.h"
186 #include "cpu_loop-common.h"
189 #define CLONE_IO 0x80000000 /* Clone io context */
192 /* We can't directly call the host clone syscall, because this will
193 * badly confuse libc (breaking mutexes, for example). So we must
194 * divide clone flags into:
195 * * flag combinations that look like pthread_create()
196 * * flag combinations that look like fork()
197 * * flags we can implement within QEMU itself
198 * * flags we can't support and will return an error for
200 /* For thread creation, all these flags must be present; for
201 * fork, none must be present.
203 #define CLONE_THREAD_FLAGS \
204 (CLONE_VM | CLONE_FS | CLONE_FILES | \
205 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
207 /* These flags are ignored:
208 * CLONE_DETACHED is now ignored by the kernel;
209 * CLONE_IO is just an optimisation hint to the I/O scheduler
211 #define CLONE_IGNORED_FLAGS \
212 (CLONE_DETACHED | CLONE_IO)
214 /* Flags for fork which we can implement within QEMU itself */
215 #define CLONE_OPTIONAL_FORK_FLAGS \
216 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
217 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
219 /* Flags for thread creation which we can implement within QEMU itself */
220 #define CLONE_OPTIONAL_THREAD_FLAGS \
221 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
222 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
224 #define CLONE_INVALID_FORK_FLAGS \
225 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
227 #define CLONE_INVALID_THREAD_FLAGS \
228 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
229 CLONE_IGNORED_FLAGS))
231 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
232 * have almost all been allocated. We cannot support any of
233 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
234 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
235 * The checks against the invalid thread masks above will catch these.
236 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
239 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
240 * once. This exercises the codepaths for restart.
242 //#define DEBUG_ERESTARTSYS
244 //#include <linux/msdos_fs.h>
245 #define VFAT_IOCTL_READDIR_BOTH \
246 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
247 #define VFAT_IOCTL_READDIR_SHORT \
248 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
258 #define _syscall0(type,name) \
259 static type name (void) \
261 return syscall(__NR_##name); \
264 #define _syscall1(type,name,type1,arg1) \
265 static type name (type1 arg1) \
267 return syscall(__NR_##name, arg1); \
270 #define _syscall2(type,name,type1,arg1,type2,arg2) \
271 static type name (type1 arg1,type2 arg2) \
273 return syscall(__NR_##name, arg1, arg2); \
276 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
277 static type name (type1 arg1,type2 arg2,type3 arg3) \
279 return syscall(__NR_##name, arg1, arg2, arg3); \
282 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
283 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
285 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
288 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
290 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
292 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
296 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
297 type5,arg5,type6,arg6) \
298 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
301 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
305 #define __NR_sys_uname __NR_uname
306 #define __NR_sys_getcwd1 __NR_getcwd
307 #define __NR_sys_getdents __NR_getdents
308 #define __NR_sys_getdents64 __NR_getdents64
309 #define __NR_sys_getpriority __NR_getpriority
310 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
311 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
312 #define __NR_sys_syslog __NR_syslog
313 #if defined(__NR_futex)
314 # define __NR_sys_futex __NR_futex
316 #if defined(__NR_futex_time64)
317 # define __NR_sys_futex_time64 __NR_futex_time64
319 #define __NR_sys_statx __NR_statx
321 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
322 #define __NR__llseek __NR_lseek
325 /* Newer kernel ports have llseek() instead of _llseek() */
326 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
327 #define TARGET_NR__llseek TARGET_NR_llseek
330 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
331 #ifndef TARGET_O_NONBLOCK_MASK
332 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
335 #define __NR_sys_gettid __NR_gettid
336 _syscall0(int, sys_gettid
)
338 /* For the 64-bit guest on 32-bit host case we must emulate
339 * getdents using getdents64, because otherwise the host
340 * might hand us back more dirent records than we can fit
341 * into the guest buffer after structure format conversion.
342 * Otherwise we emulate getdents with getdents if the host has it.
344 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
345 #define EMULATE_GETDENTS_WITH_GETDENTS
348 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
349 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
351 #if (defined(TARGET_NR_getdents) && \
352 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
353 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
354 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
356 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
357 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
358 loff_t
*, res
, uint
, wh
);
360 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
361 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
363 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
364 #ifdef __NR_exit_group
365 _syscall1(int,exit_group
,int,error_code
)
367 #if defined(__NR_futex)
368 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
369 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
371 #if defined(__NR_futex_time64)
372 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
373 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
375 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
376 _syscall2(int, pidfd_open
, pid_t
, pid
, unsigned int, flags
);
378 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
379 _syscall4(int, pidfd_send_signal
, int, pidfd
, int, sig
, siginfo_t
*, info
,
380 unsigned int, flags
);
382 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
383 _syscall3(int, pidfd_getfd
, int, pidfd
, int, targetfd
, unsigned int, flags
);
385 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
386 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
387 unsigned long *, user_mask_ptr
);
388 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
389 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
390 unsigned long *, user_mask_ptr
);
391 /* sched_attr is not defined in glibc */
394 uint32_t sched_policy
;
395 uint64_t sched_flags
;
397 uint32_t sched_priority
;
398 uint64_t sched_runtime
;
399 uint64_t sched_deadline
;
400 uint64_t sched_period
;
401 uint32_t sched_util_min
;
402 uint32_t sched_util_max
;
404 #define __NR_sys_sched_getattr __NR_sched_getattr
405 _syscall4(int, sys_sched_getattr
, pid_t
, pid
, struct sched_attr
*, attr
,
406 unsigned int, size
, unsigned int, flags
);
407 #define __NR_sys_sched_setattr __NR_sched_setattr
408 _syscall3(int, sys_sched_setattr
, pid_t
, pid
, struct sched_attr
*, attr
,
409 unsigned int, flags
);
410 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
411 _syscall1(int, sys_sched_getscheduler
, pid_t
, pid
);
412 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
413 _syscall3(int, sys_sched_setscheduler
, pid_t
, pid
, int, policy
,
414 const struct sched_param
*, param
);
415 #define __NR_sys_sched_getparam __NR_sched_getparam
416 _syscall2(int, sys_sched_getparam
, pid_t
, pid
,
417 struct sched_param
*, param
);
418 #define __NR_sys_sched_setparam __NR_sched_setparam
419 _syscall2(int, sys_sched_setparam
, pid_t
, pid
,
420 const struct sched_param
*, param
);
421 #define __NR_sys_getcpu __NR_getcpu
422 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
423 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
425 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
426 struct __user_cap_data_struct
*, data
);
427 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
428 struct __user_cap_data_struct
*, data
);
429 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
430 _syscall2(int, ioprio_get
, int, which
, int, who
)
432 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
433 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
435 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
436 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
439 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
440 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
441 unsigned long, idx1
, unsigned long, idx2
)
445 * It is assumed that struct statx is architecture independent.
447 #if defined(TARGET_NR_statx) && defined(__NR_statx)
448 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
449 unsigned int, mask
, struct target_statx
*, statxbuf
)
451 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
452 _syscall2(int, membarrier
, int, cmd
, int, flags
)
455 static const bitmask_transtbl fcntl_flags_tbl
[] = {
456 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
457 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
458 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
459 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
460 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
461 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
462 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
463 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
464 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
465 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
466 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
467 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
468 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
469 #if defined(O_DIRECT)
470 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
472 #if defined(O_NOATIME)
473 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
475 #if defined(O_CLOEXEC)
476 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
479 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
481 #if defined(O_TMPFILE)
482 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
484 /* Don't terminate the list prematurely on 64-bit host+guest. */
485 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
486 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
491 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
493 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
494 #if defined(__NR_utimensat)
495 #define __NR_sys_utimensat __NR_utimensat
496 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
497 const struct timespec
*,tsp
,int,flags
)
499 static int sys_utimensat(int dirfd
, const char *pathname
,
500 const struct timespec times
[2], int flags
)
506 #endif /* TARGET_NR_utimensat */
508 #ifdef TARGET_NR_renameat2
509 #if defined(__NR_renameat2)
510 #define __NR_sys_renameat2 __NR_renameat2
511 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
512 const char *, new, unsigned int, flags
)
514 static int sys_renameat2(int oldfd
, const char *old
,
515 int newfd
, const char *new, int flags
)
518 return renameat(oldfd
, old
, newfd
, new);
524 #endif /* TARGET_NR_renameat2 */
526 #ifdef CONFIG_INOTIFY
527 #include <sys/inotify.h>
529 /* Userspace can usually survive runtime without inotify */
530 #undef TARGET_NR_inotify_init
531 #undef TARGET_NR_inotify_init1
532 #undef TARGET_NR_inotify_add_watch
533 #undef TARGET_NR_inotify_rm_watch
534 #endif /* CONFIG_INOTIFY */
536 #if defined(TARGET_NR_prlimit64)
537 #ifndef __NR_prlimit64
538 # define __NR_prlimit64 -1
540 #define __NR_sys_prlimit64 __NR_prlimit64
541 /* The glibc rlimit structure may not be that used by the underlying syscall */
542 struct host_rlimit64
{
546 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
547 const struct host_rlimit64
*, new_limit
,
548 struct host_rlimit64
*, old_limit
)
552 #if defined(TARGET_NR_timer_create)
553 /* Maximum of 32 active POSIX timers allowed at any one time. */
554 #define GUEST_TIMER_MAX 32
555 static timer_t g_posix_timers
[GUEST_TIMER_MAX
];
556 static int g_posix_timer_allocated
[GUEST_TIMER_MAX
];
558 static inline int next_free_host_timer(void)
561 for (k
= 0; k
< ARRAY_SIZE(g_posix_timer_allocated
); k
++) {
562 if (qatomic_xchg(g_posix_timer_allocated
+ k
, 1) == 0) {
569 static inline void free_host_timer_slot(int id
)
571 qatomic_store_release(g_posix_timer_allocated
+ id
, 0);
575 static inline int host_to_target_errno(int host_errno
)
577 switch (host_errno
) {
578 #define E(X) case X: return TARGET_##X;
579 #include "errnos.c.inc"
586 static inline int target_to_host_errno(int target_errno
)
588 switch (target_errno
) {
589 #define E(X) case TARGET_##X: return X;
590 #include "errnos.c.inc"
597 abi_long
get_errno(abi_long ret
)
600 return -host_to_target_errno(errno
);
605 const char *target_strerror(int err
)
607 if (err
== QEMU_ERESTARTSYS
) {
608 return "To be restarted";
610 if (err
== QEMU_ESIGRETURN
) {
611 return "Successful exit from sigreturn";
614 return strerror(target_to_host_errno(err
));
617 static int check_zeroed_user(abi_long addr
, size_t ksize
, size_t usize
)
621 if (usize
<= ksize
) {
624 for (i
= ksize
; i
< usize
; i
++) {
625 if (get_user_u8(b
, addr
+ i
)) {
626 return -TARGET_EFAULT
;
635 #define safe_syscall0(type, name) \
636 static type safe_##name(void) \
638 return safe_syscall(__NR_##name); \
641 #define safe_syscall1(type, name, type1, arg1) \
642 static type safe_##name(type1 arg1) \
644 return safe_syscall(__NR_##name, arg1); \
647 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
648 static type safe_##name(type1 arg1, type2 arg2) \
650 return safe_syscall(__NR_##name, arg1, arg2); \
653 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
654 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
656 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
659 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
661 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
663 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
666 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
667 type4, arg4, type5, arg5) \
668 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
671 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
674 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
675 type4, arg4, type5, arg5, type6, arg6) \
676 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
677 type5 arg5, type6 arg6) \
679 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
682 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
683 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
684 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
685 int, flags
, mode_t
, mode
)
686 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
687 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
688 struct rusage
*, rusage
)
690 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
691 int, options
, struct rusage
*, rusage
)
692 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
693 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
694 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
695 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
696 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
698 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
699 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
700 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
703 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
704 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
706 #if defined(__NR_futex)
707 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
708 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
710 #if defined(__NR_futex_time64)
711 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
712 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
714 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
715 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
716 safe_syscall2(int, tkill
, int, tid
, int, sig
)
717 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
718 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
719 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
720 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
721 unsigned long, pos_l
, unsigned long, pos_h
)
722 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
723 unsigned long, pos_l
, unsigned long, pos_h
)
724 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
726 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
727 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
728 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
729 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
730 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
731 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
732 safe_syscall2(int, flock
, int, fd
, int, operation
)
733 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
734 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
735 const struct timespec
*, uts
, size_t, sigsetsize
)
737 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
739 #if defined(TARGET_NR_nanosleep)
740 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
741 struct timespec
*, rem
)
743 #if defined(TARGET_NR_clock_nanosleep) || \
744 defined(TARGET_NR_clock_nanosleep_time64)
745 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
746 const struct timespec
*, req
, struct timespec
*, rem
)
750 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
753 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
754 void *, ptr
, long, fifth
)
758 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
762 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
763 long, msgtype
, int, flags
)
765 #ifdef __NR_semtimedop
766 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
767 unsigned, nsops
, const struct timespec
*, timeout
)
769 #if defined(TARGET_NR_mq_timedsend) || \
770 defined(TARGET_NR_mq_timedsend_time64)
771 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
772 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
774 #if defined(TARGET_NR_mq_timedreceive) || \
775 defined(TARGET_NR_mq_timedreceive_time64)
776 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
777 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
779 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
780 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
781 int, outfd
, loff_t
*, poutoff
, size_t, length
,
785 /* We do ioctl like this rather than via safe_syscall3 to preserve the
786 * "third argument might be integer or pointer or not present" behaviour of
789 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
790 /* Similarly for fcntl. Note that callers must always:
791 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
792 * use the flock64 struct rather than unsuffixed flock
793 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
796 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
798 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
801 static inline int host_to_target_sock_type(int host_type
)
805 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
807 target_type
= TARGET_SOCK_DGRAM
;
810 target_type
= TARGET_SOCK_STREAM
;
813 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
817 #if defined(SOCK_CLOEXEC)
818 if (host_type
& SOCK_CLOEXEC
) {
819 target_type
|= TARGET_SOCK_CLOEXEC
;
823 #if defined(SOCK_NONBLOCK)
824 if (host_type
& SOCK_NONBLOCK
) {
825 target_type
|= TARGET_SOCK_NONBLOCK
;
832 static abi_ulong target_brk
;
833 static abi_ulong target_original_brk
;
834 static abi_ulong brk_page
;
836 void target_set_brk(abi_ulong new_brk
)
838 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
839 brk_page
= HOST_PAGE_ALIGN(target_brk
);
842 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
843 #define DEBUGF_BRK(message, args...)
845 /* do_brk() must return target values and target errnos. */
846 abi_long
do_brk(abi_ulong new_brk
)
848 abi_long mapped_addr
;
849 abi_ulong new_alloc_size
;
851 /* brk pointers are always untagged */
853 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
856 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
859 if (new_brk
< target_original_brk
) {
860 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
865 /* If the new brk is less than the highest page reserved to the
866 * target heap allocation, set it and we're almost done... */
867 if (new_brk
<= brk_page
) {
868 /* Heap contents are initialized to zero, as for anonymous
870 if (new_brk
> target_brk
) {
871 memset(g2h_untagged(target_brk
), 0, new_brk
- target_brk
);
873 target_brk
= new_brk
;
874 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
878 /* We need to allocate more memory after the brk... Note that
879 * we don't use MAP_FIXED because that will map over the top of
880 * any existing mapping (like the one with the host libc or qemu
881 * itself); instead we treat "mapped but at wrong address" as
882 * a failure and unmap again.
884 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
885 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
886 PROT_READ
|PROT_WRITE
,
887 MAP_ANON
|MAP_PRIVATE
, 0, 0));
889 if (mapped_addr
== brk_page
) {
890 /* Heap contents are initialized to zero, as for anonymous
891 * mapped pages. Technically the new pages are already
892 * initialized to zero since they *are* anonymous mapped
893 * pages, however we have to take care with the contents that
894 * come from the remaining part of the previous page: it may
895 * contains garbage data due to a previous heap usage (grown
897 memset(g2h_untagged(target_brk
), 0, brk_page
- target_brk
);
899 target_brk
= new_brk
;
900 brk_page
= HOST_PAGE_ALIGN(target_brk
);
901 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
904 } else if (mapped_addr
!= -1) {
905 /* Mapped but at wrong address, meaning there wasn't actually
906 * enough space for this brk.
908 target_munmap(mapped_addr
, new_alloc_size
);
910 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
913 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
916 #if defined(TARGET_ALPHA)
917 /* We (partially) emulate OSF/1 on Alpha, which requires we
918 return a proper errno, not an unchanged brk value. */
919 return -TARGET_ENOMEM
;
921 /* For everything else, return the previous break. */
925 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
926 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
927 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
928 abi_ulong target_fds_addr
,
932 abi_ulong b
, *target_fds
;
934 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
935 if (!(target_fds
= lock_user(VERIFY_READ
,
937 sizeof(abi_ulong
) * nw
,
939 return -TARGET_EFAULT
;
943 for (i
= 0; i
< nw
; i
++) {
944 /* grab the abi_ulong */
945 __get_user(b
, &target_fds
[i
]);
946 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
947 /* check the bit inside the abi_ulong */
954 unlock_user(target_fds
, target_fds_addr
, 0);
959 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
960 abi_ulong target_fds_addr
,
963 if (target_fds_addr
) {
964 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
965 return -TARGET_EFAULT
;
973 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
979 abi_ulong
*target_fds
;
981 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
982 if (!(target_fds
= lock_user(VERIFY_WRITE
,
984 sizeof(abi_ulong
) * nw
,
986 return -TARGET_EFAULT
;
989 for (i
= 0; i
< nw
; i
++) {
991 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
992 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
995 __put_user(v
, &target_fds
[i
]);
998 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1004 #if defined(__alpha__)
1005 #define HOST_HZ 1024
1010 static inline abi_long
host_to_target_clock_t(long ticks
)
1012 #if HOST_HZ == TARGET_HZ
1015 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1019 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1020 const struct rusage
*rusage
)
1022 struct target_rusage
*target_rusage
;
1024 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1025 return -TARGET_EFAULT
;
1026 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1027 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1028 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1029 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1030 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1031 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1032 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1033 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1034 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1035 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1036 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1037 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1038 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1039 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1040 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1041 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1042 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1043 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1044 unlock_user_struct(target_rusage
, target_addr
, 1);
1049 #ifdef TARGET_NR_setrlimit
1050 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1052 abi_ulong target_rlim_swap
;
1055 target_rlim_swap
= tswapal(target_rlim
);
1056 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1057 return RLIM_INFINITY
;
1059 result
= target_rlim_swap
;
1060 if (target_rlim_swap
!= (rlim_t
)result
)
1061 return RLIM_INFINITY
;
1067 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1068 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1070 abi_ulong target_rlim_swap
;
1073 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1074 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1076 target_rlim_swap
= rlim
;
1077 result
= tswapal(target_rlim_swap
);
1083 static inline int target_to_host_resource(int code
)
1086 case TARGET_RLIMIT_AS
:
1088 case TARGET_RLIMIT_CORE
:
1090 case TARGET_RLIMIT_CPU
:
1092 case TARGET_RLIMIT_DATA
:
1094 case TARGET_RLIMIT_FSIZE
:
1095 return RLIMIT_FSIZE
;
1096 case TARGET_RLIMIT_LOCKS
:
1097 return RLIMIT_LOCKS
;
1098 case TARGET_RLIMIT_MEMLOCK
:
1099 return RLIMIT_MEMLOCK
;
1100 case TARGET_RLIMIT_MSGQUEUE
:
1101 return RLIMIT_MSGQUEUE
;
1102 case TARGET_RLIMIT_NICE
:
1104 case TARGET_RLIMIT_NOFILE
:
1105 return RLIMIT_NOFILE
;
1106 case TARGET_RLIMIT_NPROC
:
1107 return RLIMIT_NPROC
;
1108 case TARGET_RLIMIT_RSS
:
1110 case TARGET_RLIMIT_RTPRIO
:
1111 return RLIMIT_RTPRIO
;
1112 #ifdef RLIMIT_RTTIME
1113 case TARGET_RLIMIT_RTTIME
:
1114 return RLIMIT_RTTIME
;
1116 case TARGET_RLIMIT_SIGPENDING
:
1117 return RLIMIT_SIGPENDING
;
1118 case TARGET_RLIMIT_STACK
:
1119 return RLIMIT_STACK
;
1125 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1126 abi_ulong target_tv_addr
)
1128 struct target_timeval
*target_tv
;
1130 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1131 return -TARGET_EFAULT
;
1134 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1135 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1137 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1142 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1143 const struct timeval
*tv
)
1145 struct target_timeval
*target_tv
;
1147 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1148 return -TARGET_EFAULT
;
1151 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1152 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1154 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1159 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1160 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1161 abi_ulong target_tv_addr
)
1163 struct target__kernel_sock_timeval
*target_tv
;
1165 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1166 return -TARGET_EFAULT
;
1169 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1170 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1172 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1178 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1179 const struct timeval
*tv
)
1181 struct target__kernel_sock_timeval
*target_tv
;
1183 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1184 return -TARGET_EFAULT
;
1187 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1188 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1190 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1195 #if defined(TARGET_NR_futex) || \
1196 defined(TARGET_NR_rt_sigtimedwait) || \
1197 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1198 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1199 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1200 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1201 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1202 defined(TARGET_NR_timer_settime) || \
1203 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1204 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1205 abi_ulong target_addr
)
1207 struct target_timespec
*target_ts
;
1209 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1210 return -TARGET_EFAULT
;
1212 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1213 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1214 unlock_user_struct(target_ts
, target_addr
, 0);
1219 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1220 defined(TARGET_NR_timer_settime64) || \
1221 defined(TARGET_NR_mq_timedsend_time64) || \
1222 defined(TARGET_NR_mq_timedreceive_time64) || \
1223 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1224 defined(TARGET_NR_clock_nanosleep_time64) || \
1225 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1226 defined(TARGET_NR_utimensat) || \
1227 defined(TARGET_NR_utimensat_time64) || \
1228 defined(TARGET_NR_semtimedop_time64) || \
1229 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1230 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1231 abi_ulong target_addr
)
1233 struct target__kernel_timespec
*target_ts
;
1235 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1236 return -TARGET_EFAULT
;
1238 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1239 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1240 /* in 32bit mode, this drops the padding */
1241 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1242 unlock_user_struct(target_ts
, target_addr
, 0);
1247 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1248 struct timespec
*host_ts
)
1250 struct target_timespec
*target_ts
;
1252 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1253 return -TARGET_EFAULT
;
1255 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1256 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1257 unlock_user_struct(target_ts
, target_addr
, 1);
1261 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1262 struct timespec
*host_ts
)
1264 struct target__kernel_timespec
*target_ts
;
1266 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1267 return -TARGET_EFAULT
;
1269 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1270 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1271 unlock_user_struct(target_ts
, target_addr
, 1);
1275 #if defined(TARGET_NR_gettimeofday)
1276 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1277 struct timezone
*tz
)
1279 struct target_timezone
*target_tz
;
1281 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1282 return -TARGET_EFAULT
;
1285 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1286 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1288 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1294 #if defined(TARGET_NR_settimeofday)
1295 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1296 abi_ulong target_tz_addr
)
1298 struct target_timezone
*target_tz
;
1300 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1301 return -TARGET_EFAULT
;
1304 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1305 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1307 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1313 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1316 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1317 abi_ulong target_mq_attr_addr
)
1319 struct target_mq_attr
*target_mq_attr
;
1321 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1322 target_mq_attr_addr
, 1))
1323 return -TARGET_EFAULT
;
1325 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1326 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1327 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1328 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1330 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1335 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1336 const struct mq_attr
*attr
)
1338 struct target_mq_attr
*target_mq_attr
;
1340 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1341 target_mq_attr_addr
, 0))
1342 return -TARGET_EFAULT
;
1344 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1345 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1346 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1347 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1349 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1355 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1356 /* do_select() must return target values and target errnos. */
1357 static abi_long
do_select(int n
,
1358 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1359 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1361 fd_set rfds
, wfds
, efds
;
1362 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1364 struct timespec ts
, *ts_ptr
;
1367 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1371 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1375 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1380 if (target_tv_addr
) {
1381 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1382 return -TARGET_EFAULT
;
1383 ts
.tv_sec
= tv
.tv_sec
;
1384 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1390 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1393 if (!is_error(ret
)) {
1394 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1395 return -TARGET_EFAULT
;
1396 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1397 return -TARGET_EFAULT
;
1398 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1399 return -TARGET_EFAULT
;
1401 if (target_tv_addr
) {
1402 tv
.tv_sec
= ts
.tv_sec
;
1403 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1404 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1405 return -TARGET_EFAULT
;
1413 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1414 static abi_long
do_old_select(abi_ulong arg1
)
1416 struct target_sel_arg_struct
*sel
;
1417 abi_ulong inp
, outp
, exp
, tvp
;
1420 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1421 return -TARGET_EFAULT
;
1424 nsel
= tswapal(sel
->n
);
1425 inp
= tswapal(sel
->inp
);
1426 outp
= tswapal(sel
->outp
);
1427 exp
= tswapal(sel
->exp
);
1428 tvp
= tswapal(sel
->tvp
);
1430 unlock_user_struct(sel
, arg1
, 0);
1432 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1437 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1438 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1439 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1442 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1443 fd_set rfds
, wfds
, efds
;
1444 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1445 struct timespec ts
, *ts_ptr
;
1449 * The 6th arg is actually two args smashed together,
1450 * so we cannot use the C library.
1457 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1465 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1469 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1473 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1479 * This takes a timespec, and not a timeval, so we cannot
1480 * use the do_select() helper ...
1484 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1485 return -TARGET_EFAULT
;
1488 if (target_to_host_timespec(&ts
, ts_addr
)) {
1489 return -TARGET_EFAULT
;
1497 /* Extract the two packed args for the sigset */
1500 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1502 return -TARGET_EFAULT
;
1504 arg_sigset
= tswapal(arg7
[0]);
1505 arg_sigsize
= tswapal(arg7
[1]);
1506 unlock_user(arg7
, arg6
, 0);
1509 ret
= process_sigsuspend_mask(&sig
.set
, arg_sigset
, arg_sigsize
);
1514 sig
.size
= SIGSET_T_SIZE
;
1518 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1522 finish_sigsuspend_mask(ret
);
1525 if (!is_error(ret
)) {
1526 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1527 return -TARGET_EFAULT
;
1529 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1530 return -TARGET_EFAULT
;
1532 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1533 return -TARGET_EFAULT
;
1536 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1537 return -TARGET_EFAULT
;
1540 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1541 return -TARGET_EFAULT
;
1549 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1550 defined(TARGET_NR_ppoll_time64)
1551 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1552 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1554 struct target_pollfd
*target_pfd
;
1555 unsigned int nfds
= arg2
;
1563 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1564 return -TARGET_EINVAL
;
1566 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1567 sizeof(struct target_pollfd
) * nfds
, 1);
1569 return -TARGET_EFAULT
;
1572 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1573 for (i
= 0; i
< nfds
; i
++) {
1574 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1575 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1579 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1580 sigset_t
*set
= NULL
;
1584 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1585 unlock_user(target_pfd
, arg1
, 0);
1586 return -TARGET_EFAULT
;
1589 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1590 unlock_user(target_pfd
, arg1
, 0);
1591 return -TARGET_EFAULT
;
1599 ret
= process_sigsuspend_mask(&set
, arg4
, arg5
);
1601 unlock_user(target_pfd
, arg1
, 0);
1606 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1607 set
, SIGSET_T_SIZE
));
1610 finish_sigsuspend_mask(ret
);
1612 if (!is_error(ret
) && arg3
) {
1614 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1615 return -TARGET_EFAULT
;
1618 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1619 return -TARGET_EFAULT
;
1624 struct timespec ts
, *pts
;
1627 /* Convert ms to secs, ns */
1628 ts
.tv_sec
= arg3
/ 1000;
1629 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1632 /* -ve poll() timeout means "infinite" */
1635 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1638 if (!is_error(ret
)) {
1639 for (i
= 0; i
< nfds
; i
++) {
1640 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1643 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1648 static abi_long
do_pipe(CPUArchState
*cpu_env
, abi_ulong pipedes
,
1649 int flags
, int is_pipe2
)
1653 ret
= pipe2(host_pipe
, flags
);
1656 return get_errno(ret
);
1658 /* Several targets have special calling conventions for the original
1659 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1661 #if defined(TARGET_ALPHA)
1662 cpu_env
->ir
[IR_A4
] = host_pipe
[1];
1663 return host_pipe
[0];
1664 #elif defined(TARGET_MIPS)
1665 cpu_env
->active_tc
.gpr
[3] = host_pipe
[1];
1666 return host_pipe
[0];
1667 #elif defined(TARGET_SH4)
1668 cpu_env
->gregs
[1] = host_pipe
[1];
1669 return host_pipe
[0];
1670 #elif defined(TARGET_SPARC)
1671 cpu_env
->regwptr
[1] = host_pipe
[1];
1672 return host_pipe
[0];
1676 if (put_user_s32(host_pipe
[0], pipedes
)
1677 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(abi_int
)))
1678 return -TARGET_EFAULT
;
1679 return get_errno(ret
);
1682 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1683 abi_ulong target_addr
,
1686 struct target_ip_mreqn
*target_smreqn
;
1688 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1690 return -TARGET_EFAULT
;
1691 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1692 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1693 if (len
== sizeof(struct target_ip_mreqn
))
1694 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1695 unlock_user(target_smreqn
, target_addr
, 0);
1700 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1701 abi_ulong target_addr
,
1704 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1705 sa_family_t sa_family
;
1706 struct target_sockaddr
*target_saddr
;
1708 if (fd_trans_target_to_host_addr(fd
)) {
1709 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1712 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1714 return -TARGET_EFAULT
;
1716 sa_family
= tswap16(target_saddr
->sa_family
);
1718 /* Oops. The caller might send a incomplete sun_path; sun_path
1719 * must be terminated by \0 (see the manual page), but
1720 * unfortunately it is quite common to specify sockaddr_un
1721 * length as "strlen(x->sun_path)" while it should be
1722 * "strlen(...) + 1". We'll fix that here if needed.
1723 * Linux kernel has a similar feature.
1726 if (sa_family
== AF_UNIX
) {
1727 if (len
< unix_maxlen
&& len
> 0) {
1728 char *cp
= (char*)target_saddr
;
1730 if ( cp
[len
-1] && !cp
[len
] )
1733 if (len
> unix_maxlen
)
1737 memcpy(addr
, target_saddr
, len
);
1738 addr
->sa_family
= sa_family
;
1739 if (sa_family
== AF_NETLINK
) {
1740 struct sockaddr_nl
*nladdr
;
1742 nladdr
= (struct sockaddr_nl
*)addr
;
1743 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1744 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1745 } else if (sa_family
== AF_PACKET
) {
1746 struct target_sockaddr_ll
*lladdr
;
1748 lladdr
= (struct target_sockaddr_ll
*)addr
;
1749 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1750 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1752 unlock_user(target_saddr
, target_addr
, 0);
1757 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1758 struct sockaddr
*addr
,
1761 struct target_sockaddr
*target_saddr
;
1768 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1770 return -TARGET_EFAULT
;
1771 memcpy(target_saddr
, addr
, len
);
1772 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1773 sizeof(target_saddr
->sa_family
)) {
1774 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1776 if (addr
->sa_family
== AF_NETLINK
&&
1777 len
>= sizeof(struct target_sockaddr_nl
)) {
1778 struct target_sockaddr_nl
*target_nl
=
1779 (struct target_sockaddr_nl
*)target_saddr
;
1780 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1781 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1782 } else if (addr
->sa_family
== AF_PACKET
) {
1783 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1784 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1785 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1786 } else if (addr
->sa_family
== AF_INET6
&&
1787 len
>= sizeof(struct target_sockaddr_in6
)) {
1788 struct target_sockaddr_in6
*target_in6
=
1789 (struct target_sockaddr_in6
*)target_saddr
;
1790 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1792 unlock_user(target_saddr
, target_addr
, len
);
1797 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1798 struct target_msghdr
*target_msgh
)
1800 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1801 abi_long msg_controllen
;
1802 abi_ulong target_cmsg_addr
;
1803 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1804 socklen_t space
= 0;
1806 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1807 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1809 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1810 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1811 target_cmsg_start
= target_cmsg
;
1813 return -TARGET_EFAULT
;
1815 while (cmsg
&& target_cmsg
) {
1816 void *data
= CMSG_DATA(cmsg
);
1817 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1819 int len
= tswapal(target_cmsg
->cmsg_len
)
1820 - sizeof(struct target_cmsghdr
);
1822 space
+= CMSG_SPACE(len
);
1823 if (space
> msgh
->msg_controllen
) {
1824 space
-= CMSG_SPACE(len
);
1825 /* This is a QEMU bug, since we allocated the payload
1826 * area ourselves (unlike overflow in host-to-target
1827 * conversion, which is just the guest giving us a buffer
1828 * that's too small). It can't happen for the payload types
1829 * we currently support; if it becomes an issue in future
1830 * we would need to improve our allocation strategy to
1831 * something more intelligent than "twice the size of the
1832 * target buffer we're reading from".
1834 qemu_log_mask(LOG_UNIMP
,
1835 ("Unsupported ancillary data %d/%d: "
1836 "unhandled msg size\n"),
1837 tswap32(target_cmsg
->cmsg_level
),
1838 tswap32(target_cmsg
->cmsg_type
));
1842 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1843 cmsg
->cmsg_level
= SOL_SOCKET
;
1845 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1847 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1848 cmsg
->cmsg_len
= CMSG_LEN(len
);
1850 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1851 int *fd
= (int *)data
;
1852 int *target_fd
= (int *)target_data
;
1853 int i
, numfds
= len
/ sizeof(int);
1855 for (i
= 0; i
< numfds
; i
++) {
1856 __get_user(fd
[i
], target_fd
+ i
);
1858 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1859 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1860 struct ucred
*cred
= (struct ucred
*)data
;
1861 struct target_ucred
*target_cred
=
1862 (struct target_ucred
*)target_data
;
1864 __get_user(cred
->pid
, &target_cred
->pid
);
1865 __get_user(cred
->uid
, &target_cred
->uid
);
1866 __get_user(cred
->gid
, &target_cred
->gid
);
1868 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1869 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1870 memcpy(data
, target_data
, len
);
1873 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1874 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1877 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1879 msgh
->msg_controllen
= space
;
1883 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1884 struct msghdr
*msgh
)
1886 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1887 abi_long msg_controllen
;
1888 abi_ulong target_cmsg_addr
;
1889 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1890 socklen_t space
= 0;
1892 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1893 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1895 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1896 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1897 target_cmsg_start
= target_cmsg
;
1899 return -TARGET_EFAULT
;
1901 while (cmsg
&& target_cmsg
) {
1902 void *data
= CMSG_DATA(cmsg
);
1903 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1905 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1906 int tgt_len
, tgt_space
;
1908 /* We never copy a half-header but may copy half-data;
1909 * this is Linux's behaviour in put_cmsg(). Note that
1910 * truncation here is a guest problem (which we report
1911 * to the guest via the CTRUNC bit), unlike truncation
1912 * in target_to_host_cmsg, which is a QEMU bug.
1914 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1915 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1919 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1920 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1922 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1924 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1926 /* Payload types which need a different size of payload on
1927 * the target must adjust tgt_len here.
1930 switch (cmsg
->cmsg_level
) {
1932 switch (cmsg
->cmsg_type
) {
1934 tgt_len
= sizeof(struct target_timeval
);
1944 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1945 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1946 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1949 /* We must now copy-and-convert len bytes of payload
1950 * into tgt_len bytes of destination space. Bear in mind
1951 * that in both source and destination we may be dealing
1952 * with a truncated value!
1954 switch (cmsg
->cmsg_level
) {
1956 switch (cmsg
->cmsg_type
) {
1959 int *fd
= (int *)data
;
1960 int *target_fd
= (int *)target_data
;
1961 int i
, numfds
= tgt_len
/ sizeof(int);
1963 for (i
= 0; i
< numfds
; i
++) {
1964 __put_user(fd
[i
], target_fd
+ i
);
1970 struct timeval
*tv
= (struct timeval
*)data
;
1971 struct target_timeval
*target_tv
=
1972 (struct target_timeval
*)target_data
;
1974 if (len
!= sizeof(struct timeval
) ||
1975 tgt_len
!= sizeof(struct target_timeval
)) {
1979 /* copy struct timeval to target */
1980 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1981 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1984 case SCM_CREDENTIALS
:
1986 struct ucred
*cred
= (struct ucred
*)data
;
1987 struct target_ucred
*target_cred
=
1988 (struct target_ucred
*)target_data
;
1990 __put_user(cred
->pid
, &target_cred
->pid
);
1991 __put_user(cred
->uid
, &target_cred
->uid
);
1992 __put_user(cred
->gid
, &target_cred
->gid
);
2001 switch (cmsg
->cmsg_type
) {
2004 uint32_t *v
= (uint32_t *)data
;
2005 uint32_t *t_int
= (uint32_t *)target_data
;
2007 if (len
!= sizeof(uint32_t) ||
2008 tgt_len
!= sizeof(uint32_t)) {
2011 __put_user(*v
, t_int
);
2017 struct sock_extended_err ee
;
2018 struct sockaddr_in offender
;
2020 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
2021 struct errhdr_t
*target_errh
=
2022 (struct errhdr_t
*)target_data
;
2024 if (len
!= sizeof(struct errhdr_t
) ||
2025 tgt_len
!= sizeof(struct errhdr_t
)) {
2028 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2029 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2030 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2031 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2032 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2033 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2034 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2035 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2036 (void *) &errh
->offender
, sizeof(errh
->offender
));
2045 switch (cmsg
->cmsg_type
) {
2048 uint32_t *v
= (uint32_t *)data
;
2049 uint32_t *t_int
= (uint32_t *)target_data
;
2051 if (len
!= sizeof(uint32_t) ||
2052 tgt_len
!= sizeof(uint32_t)) {
2055 __put_user(*v
, t_int
);
2061 struct sock_extended_err ee
;
2062 struct sockaddr_in6 offender
;
2064 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2065 struct errhdr6_t
*target_errh
=
2066 (struct errhdr6_t
*)target_data
;
2068 if (len
!= sizeof(struct errhdr6_t
) ||
2069 tgt_len
!= sizeof(struct errhdr6_t
)) {
2072 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2073 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2074 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2075 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2076 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2077 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2078 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2079 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2080 (void *) &errh
->offender
, sizeof(errh
->offender
));
2090 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2091 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2092 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2093 if (tgt_len
> len
) {
2094 memset(target_data
+ len
, 0, tgt_len
- len
);
2098 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2099 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2100 if (msg_controllen
< tgt_space
) {
2101 tgt_space
= msg_controllen
;
2103 msg_controllen
-= tgt_space
;
2105 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2106 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2109 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2111 target_msgh
->msg_controllen
= tswapal(space
);
2115 /* do_setsockopt() Must return target values and target errnos. */
2116 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2117 abi_ulong optval_addr
, socklen_t optlen
)
2121 struct ip_mreqn
*ip_mreq
;
2122 struct ip_mreq_source
*ip_mreq_source
;
2127 /* TCP and UDP options all take an 'int' value. */
2128 if (optlen
< sizeof(uint32_t))
2129 return -TARGET_EINVAL
;
2131 if (get_user_u32(val
, optval_addr
))
2132 return -TARGET_EFAULT
;
2133 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2140 case IP_ROUTER_ALERT
:
2144 case IP_MTU_DISCOVER
:
2151 case IP_MULTICAST_TTL
:
2152 case IP_MULTICAST_LOOP
:
2154 if (optlen
>= sizeof(uint32_t)) {
2155 if (get_user_u32(val
, optval_addr
))
2156 return -TARGET_EFAULT
;
2157 } else if (optlen
>= 1) {
2158 if (get_user_u8(val
, optval_addr
))
2159 return -TARGET_EFAULT
;
2161 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2163 case IP_ADD_MEMBERSHIP
:
2164 case IP_DROP_MEMBERSHIP
:
2165 if (optlen
< sizeof (struct target_ip_mreq
) ||
2166 optlen
> sizeof (struct target_ip_mreqn
))
2167 return -TARGET_EINVAL
;
2169 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2170 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2171 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2174 case IP_BLOCK_SOURCE
:
2175 case IP_UNBLOCK_SOURCE
:
2176 case IP_ADD_SOURCE_MEMBERSHIP
:
2177 case IP_DROP_SOURCE_MEMBERSHIP
:
2178 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2179 return -TARGET_EINVAL
;
2181 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2182 if (!ip_mreq_source
) {
2183 return -TARGET_EFAULT
;
2185 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2186 unlock_user (ip_mreq_source
, optval_addr
, 0);
2195 case IPV6_MTU_DISCOVER
:
2198 case IPV6_RECVPKTINFO
:
2199 case IPV6_UNICAST_HOPS
:
2200 case IPV6_MULTICAST_HOPS
:
2201 case IPV6_MULTICAST_LOOP
:
2203 case IPV6_RECVHOPLIMIT
:
2204 case IPV6_2292HOPLIMIT
:
2207 case IPV6_2292PKTINFO
:
2208 case IPV6_RECVTCLASS
:
2209 case IPV6_RECVRTHDR
:
2210 case IPV6_2292RTHDR
:
2211 case IPV6_RECVHOPOPTS
:
2212 case IPV6_2292HOPOPTS
:
2213 case IPV6_RECVDSTOPTS
:
2214 case IPV6_2292DSTOPTS
:
2216 case IPV6_ADDR_PREFERENCES
:
2217 #ifdef IPV6_RECVPATHMTU
2218 case IPV6_RECVPATHMTU
:
2220 #ifdef IPV6_TRANSPARENT
2221 case IPV6_TRANSPARENT
:
2223 #ifdef IPV6_FREEBIND
2226 #ifdef IPV6_RECVORIGDSTADDR
2227 case IPV6_RECVORIGDSTADDR
:
2230 if (optlen
< sizeof(uint32_t)) {
2231 return -TARGET_EINVAL
;
2233 if (get_user_u32(val
, optval_addr
)) {
2234 return -TARGET_EFAULT
;
2236 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2237 &val
, sizeof(val
)));
2241 struct in6_pktinfo pki
;
2243 if (optlen
< sizeof(pki
)) {
2244 return -TARGET_EINVAL
;
2247 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2248 return -TARGET_EFAULT
;
2251 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2253 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2254 &pki
, sizeof(pki
)));
2257 case IPV6_ADD_MEMBERSHIP
:
2258 case IPV6_DROP_MEMBERSHIP
:
2260 struct ipv6_mreq ipv6mreq
;
2262 if (optlen
< sizeof(ipv6mreq
)) {
2263 return -TARGET_EINVAL
;
2266 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2267 return -TARGET_EFAULT
;
2270 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2272 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2273 &ipv6mreq
, sizeof(ipv6mreq
)));
2284 struct icmp6_filter icmp6f
;
2286 if (optlen
> sizeof(icmp6f
)) {
2287 optlen
= sizeof(icmp6f
);
2290 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2291 return -TARGET_EFAULT
;
2294 for (val
= 0; val
< 8; val
++) {
2295 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2298 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2310 /* those take an u32 value */
2311 if (optlen
< sizeof(uint32_t)) {
2312 return -TARGET_EINVAL
;
2315 if (get_user_u32(val
, optval_addr
)) {
2316 return -TARGET_EFAULT
;
2318 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2319 &val
, sizeof(val
)));
2326 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2331 char *alg_key
= g_malloc(optlen
);
2334 return -TARGET_ENOMEM
;
2336 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2338 return -TARGET_EFAULT
;
2340 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2345 case ALG_SET_AEAD_AUTHSIZE
:
2347 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2356 case TARGET_SOL_SOCKET
:
2358 case TARGET_SO_RCVTIMEO
:
2362 optname
= SO_RCVTIMEO
;
2365 if (optlen
!= sizeof(struct target_timeval
)) {
2366 return -TARGET_EINVAL
;
2369 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2370 return -TARGET_EFAULT
;
2373 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2377 case TARGET_SO_SNDTIMEO
:
2378 optname
= SO_SNDTIMEO
;
2380 case TARGET_SO_ATTACH_FILTER
:
2382 struct target_sock_fprog
*tfprog
;
2383 struct target_sock_filter
*tfilter
;
2384 struct sock_fprog fprog
;
2385 struct sock_filter
*filter
;
2388 if (optlen
!= sizeof(*tfprog
)) {
2389 return -TARGET_EINVAL
;
2391 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2392 return -TARGET_EFAULT
;
2394 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2395 tswapal(tfprog
->filter
), 0)) {
2396 unlock_user_struct(tfprog
, optval_addr
, 1);
2397 return -TARGET_EFAULT
;
2400 fprog
.len
= tswap16(tfprog
->len
);
2401 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2402 if (filter
== NULL
) {
2403 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2404 unlock_user_struct(tfprog
, optval_addr
, 1);
2405 return -TARGET_ENOMEM
;
2407 for (i
= 0; i
< fprog
.len
; i
++) {
2408 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2409 filter
[i
].jt
= tfilter
[i
].jt
;
2410 filter
[i
].jf
= tfilter
[i
].jf
;
2411 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2413 fprog
.filter
= filter
;
2415 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2416 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2419 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2420 unlock_user_struct(tfprog
, optval_addr
, 1);
2423 case TARGET_SO_BINDTODEVICE
:
2425 char *dev_ifname
, *addr_ifname
;
2427 if (optlen
> IFNAMSIZ
- 1) {
2428 optlen
= IFNAMSIZ
- 1;
2430 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2432 return -TARGET_EFAULT
;
2434 optname
= SO_BINDTODEVICE
;
2435 addr_ifname
= alloca(IFNAMSIZ
);
2436 memcpy(addr_ifname
, dev_ifname
, optlen
);
2437 addr_ifname
[optlen
] = 0;
2438 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2439 addr_ifname
, optlen
));
2440 unlock_user (dev_ifname
, optval_addr
, 0);
2443 case TARGET_SO_LINGER
:
2446 struct target_linger
*tlg
;
2448 if (optlen
!= sizeof(struct target_linger
)) {
2449 return -TARGET_EINVAL
;
2451 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2452 return -TARGET_EFAULT
;
2454 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2455 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2456 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2458 unlock_user_struct(tlg
, optval_addr
, 0);
2461 /* Options with 'int' argument. */
2462 case TARGET_SO_DEBUG
:
2465 case TARGET_SO_REUSEADDR
:
2466 optname
= SO_REUSEADDR
;
2469 case TARGET_SO_REUSEPORT
:
2470 optname
= SO_REUSEPORT
;
2473 case TARGET_SO_TYPE
:
2476 case TARGET_SO_ERROR
:
2479 case TARGET_SO_DONTROUTE
:
2480 optname
= SO_DONTROUTE
;
2482 case TARGET_SO_BROADCAST
:
2483 optname
= SO_BROADCAST
;
2485 case TARGET_SO_SNDBUF
:
2486 optname
= SO_SNDBUF
;
2488 case TARGET_SO_SNDBUFFORCE
:
2489 optname
= SO_SNDBUFFORCE
;
2491 case TARGET_SO_RCVBUF
:
2492 optname
= SO_RCVBUF
;
2494 case TARGET_SO_RCVBUFFORCE
:
2495 optname
= SO_RCVBUFFORCE
;
2497 case TARGET_SO_KEEPALIVE
:
2498 optname
= SO_KEEPALIVE
;
2500 case TARGET_SO_OOBINLINE
:
2501 optname
= SO_OOBINLINE
;
2503 case TARGET_SO_NO_CHECK
:
2504 optname
= SO_NO_CHECK
;
2506 case TARGET_SO_PRIORITY
:
2507 optname
= SO_PRIORITY
;
2510 case TARGET_SO_BSDCOMPAT
:
2511 optname
= SO_BSDCOMPAT
;
2514 case TARGET_SO_PASSCRED
:
2515 optname
= SO_PASSCRED
;
2517 case TARGET_SO_PASSSEC
:
2518 optname
= SO_PASSSEC
;
2520 case TARGET_SO_TIMESTAMP
:
2521 optname
= SO_TIMESTAMP
;
2523 case TARGET_SO_RCVLOWAT
:
2524 optname
= SO_RCVLOWAT
;
2529 if (optlen
< sizeof(uint32_t))
2530 return -TARGET_EINVAL
;
2532 if (get_user_u32(val
, optval_addr
))
2533 return -TARGET_EFAULT
;
2534 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2539 case NETLINK_PKTINFO
:
2540 case NETLINK_ADD_MEMBERSHIP
:
2541 case NETLINK_DROP_MEMBERSHIP
:
2542 case NETLINK_BROADCAST_ERROR
:
2543 case NETLINK_NO_ENOBUFS
:
2544 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2545 case NETLINK_LISTEN_ALL_NSID
:
2546 case NETLINK_CAP_ACK
:
2547 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2548 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2549 case NETLINK_EXT_ACK
:
2550 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2551 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2552 case NETLINK_GET_STRICT_CHK
:
2553 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2559 if (optlen
< sizeof(uint32_t)) {
2560 return -TARGET_EINVAL
;
2562 if (get_user_u32(val
, optval_addr
)) {
2563 return -TARGET_EFAULT
;
2565 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2568 #endif /* SOL_NETLINK */
2571 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2573 ret
= -TARGET_ENOPROTOOPT
;
2578 /* do_getsockopt() Must return target values and target errnos. */
2579 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2580 abi_ulong optval_addr
, abi_ulong optlen
)
2587 case TARGET_SOL_SOCKET
:
2590 /* These don't just return a single integer */
2591 case TARGET_SO_PEERNAME
:
2593 case TARGET_SO_RCVTIMEO
: {
2597 optname
= SO_RCVTIMEO
;
2600 if (get_user_u32(len
, optlen
)) {
2601 return -TARGET_EFAULT
;
2604 return -TARGET_EINVAL
;
2608 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2613 if (len
> sizeof(struct target_timeval
)) {
2614 len
= sizeof(struct target_timeval
);
2616 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2617 return -TARGET_EFAULT
;
2619 if (put_user_u32(len
, optlen
)) {
2620 return -TARGET_EFAULT
;
2624 case TARGET_SO_SNDTIMEO
:
2625 optname
= SO_SNDTIMEO
;
2627 case TARGET_SO_PEERCRED
: {
2630 struct target_ucred
*tcr
;
2632 if (get_user_u32(len
, optlen
)) {
2633 return -TARGET_EFAULT
;
2636 return -TARGET_EINVAL
;
2640 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2648 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2649 return -TARGET_EFAULT
;
2651 __put_user(cr
.pid
, &tcr
->pid
);
2652 __put_user(cr
.uid
, &tcr
->uid
);
2653 __put_user(cr
.gid
, &tcr
->gid
);
2654 unlock_user_struct(tcr
, optval_addr
, 1);
2655 if (put_user_u32(len
, optlen
)) {
2656 return -TARGET_EFAULT
;
2660 case TARGET_SO_PEERSEC
: {
2663 if (get_user_u32(len
, optlen
)) {
2664 return -TARGET_EFAULT
;
2667 return -TARGET_EINVAL
;
2669 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2671 return -TARGET_EFAULT
;
2674 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2676 if (put_user_u32(lv
, optlen
)) {
2677 ret
= -TARGET_EFAULT
;
2679 unlock_user(name
, optval_addr
, lv
);
2682 case TARGET_SO_LINGER
:
2686 struct target_linger
*tlg
;
2688 if (get_user_u32(len
, optlen
)) {
2689 return -TARGET_EFAULT
;
2692 return -TARGET_EINVAL
;
2696 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2704 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2705 return -TARGET_EFAULT
;
2707 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2708 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2709 unlock_user_struct(tlg
, optval_addr
, 1);
2710 if (put_user_u32(len
, optlen
)) {
2711 return -TARGET_EFAULT
;
2715 /* Options with 'int' argument. */
2716 case TARGET_SO_DEBUG
:
2719 case TARGET_SO_REUSEADDR
:
2720 optname
= SO_REUSEADDR
;
2723 case TARGET_SO_REUSEPORT
:
2724 optname
= SO_REUSEPORT
;
2727 case TARGET_SO_TYPE
:
2730 case TARGET_SO_ERROR
:
2733 case TARGET_SO_DONTROUTE
:
2734 optname
= SO_DONTROUTE
;
2736 case TARGET_SO_BROADCAST
:
2737 optname
= SO_BROADCAST
;
2739 case TARGET_SO_SNDBUF
:
2740 optname
= SO_SNDBUF
;
2742 case TARGET_SO_RCVBUF
:
2743 optname
= SO_RCVBUF
;
2745 case TARGET_SO_KEEPALIVE
:
2746 optname
= SO_KEEPALIVE
;
2748 case TARGET_SO_OOBINLINE
:
2749 optname
= SO_OOBINLINE
;
2751 case TARGET_SO_NO_CHECK
:
2752 optname
= SO_NO_CHECK
;
2754 case TARGET_SO_PRIORITY
:
2755 optname
= SO_PRIORITY
;
2758 case TARGET_SO_BSDCOMPAT
:
2759 optname
= SO_BSDCOMPAT
;
2762 case TARGET_SO_PASSCRED
:
2763 optname
= SO_PASSCRED
;
2765 case TARGET_SO_TIMESTAMP
:
2766 optname
= SO_TIMESTAMP
;
2768 case TARGET_SO_RCVLOWAT
:
2769 optname
= SO_RCVLOWAT
;
2771 case TARGET_SO_ACCEPTCONN
:
2772 optname
= SO_ACCEPTCONN
;
2774 case TARGET_SO_PROTOCOL
:
2775 optname
= SO_PROTOCOL
;
2777 case TARGET_SO_DOMAIN
:
2778 optname
= SO_DOMAIN
;
2786 /* TCP and UDP options all take an 'int' value. */
2788 if (get_user_u32(len
, optlen
))
2789 return -TARGET_EFAULT
;
2791 return -TARGET_EINVAL
;
2793 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2796 if (optname
== SO_TYPE
) {
2797 val
= host_to_target_sock_type(val
);
2802 if (put_user_u32(val
, optval_addr
))
2803 return -TARGET_EFAULT
;
2805 if (put_user_u8(val
, optval_addr
))
2806 return -TARGET_EFAULT
;
2808 if (put_user_u32(len
, optlen
))
2809 return -TARGET_EFAULT
;
2816 case IP_ROUTER_ALERT
:
2820 case IP_MTU_DISCOVER
:
2826 case IP_MULTICAST_TTL
:
2827 case IP_MULTICAST_LOOP
:
2828 if (get_user_u32(len
, optlen
))
2829 return -TARGET_EFAULT
;
2831 return -TARGET_EINVAL
;
2833 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2836 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2838 if (put_user_u32(len
, optlen
)
2839 || put_user_u8(val
, optval_addr
))
2840 return -TARGET_EFAULT
;
2842 if (len
> sizeof(int))
2844 if (put_user_u32(len
, optlen
)
2845 || put_user_u32(val
, optval_addr
))
2846 return -TARGET_EFAULT
;
2850 ret
= -TARGET_ENOPROTOOPT
;
2856 case IPV6_MTU_DISCOVER
:
2859 case IPV6_RECVPKTINFO
:
2860 case IPV6_UNICAST_HOPS
:
2861 case IPV6_MULTICAST_HOPS
:
2862 case IPV6_MULTICAST_LOOP
:
2864 case IPV6_RECVHOPLIMIT
:
2865 case IPV6_2292HOPLIMIT
:
2868 case IPV6_2292PKTINFO
:
2869 case IPV6_RECVTCLASS
:
2870 case IPV6_RECVRTHDR
:
2871 case IPV6_2292RTHDR
:
2872 case IPV6_RECVHOPOPTS
:
2873 case IPV6_2292HOPOPTS
:
2874 case IPV6_RECVDSTOPTS
:
2875 case IPV6_2292DSTOPTS
:
2877 case IPV6_ADDR_PREFERENCES
:
2878 #ifdef IPV6_RECVPATHMTU
2879 case IPV6_RECVPATHMTU
:
2881 #ifdef IPV6_TRANSPARENT
2882 case IPV6_TRANSPARENT
:
2884 #ifdef IPV6_FREEBIND
2887 #ifdef IPV6_RECVORIGDSTADDR
2888 case IPV6_RECVORIGDSTADDR
:
2890 if (get_user_u32(len
, optlen
))
2891 return -TARGET_EFAULT
;
2893 return -TARGET_EINVAL
;
2895 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2898 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2900 if (put_user_u32(len
, optlen
)
2901 || put_user_u8(val
, optval_addr
))
2902 return -TARGET_EFAULT
;
2904 if (len
> sizeof(int))
2906 if (put_user_u32(len
, optlen
)
2907 || put_user_u32(val
, optval_addr
))
2908 return -TARGET_EFAULT
;
2912 ret
= -TARGET_ENOPROTOOPT
;
2919 case NETLINK_PKTINFO
:
2920 case NETLINK_BROADCAST_ERROR
:
2921 case NETLINK_NO_ENOBUFS
:
2922 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2923 case NETLINK_LISTEN_ALL_NSID
:
2924 case NETLINK_CAP_ACK
:
2925 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2926 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2927 case NETLINK_EXT_ACK
:
2928 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2929 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2930 case NETLINK_GET_STRICT_CHK
:
2931 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2932 if (get_user_u32(len
, optlen
)) {
2933 return -TARGET_EFAULT
;
2935 if (len
!= sizeof(val
)) {
2936 return -TARGET_EINVAL
;
2939 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2943 if (put_user_u32(lv
, optlen
)
2944 || put_user_u32(val
, optval_addr
)) {
2945 return -TARGET_EFAULT
;
2948 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2949 case NETLINK_LIST_MEMBERSHIPS
:
2953 if (get_user_u32(len
, optlen
)) {
2954 return -TARGET_EFAULT
;
2957 return -TARGET_EINVAL
;
2959 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2960 if (!results
&& len
> 0) {
2961 return -TARGET_EFAULT
;
2964 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2966 unlock_user(results
, optval_addr
, 0);
2969 /* swap host endianess to target endianess. */
2970 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2971 results
[i
] = tswap32(results
[i
]);
2973 if (put_user_u32(lv
, optlen
)) {
2974 return -TARGET_EFAULT
;
2976 unlock_user(results
, optval_addr
, 0);
2979 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2984 #endif /* SOL_NETLINK */
2987 qemu_log_mask(LOG_UNIMP
,
2988 "getsockopt level=%d optname=%d not yet supported\n",
2990 ret
= -TARGET_EOPNOTSUPP
;
2996 /* Convert target low/high pair representing file offset into the host
2997 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2998 * as the kernel doesn't handle them either.
3000 static void target_to_host_low_high(abi_ulong tlow
,
3002 unsigned long *hlow
,
3003 unsigned long *hhigh
)
3005 uint64_t off
= tlow
|
3006 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
3007 TARGET_LONG_BITS
/ 2;
3010 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3013 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3014 abi_ulong count
, int copy
)
3016 struct target_iovec
*target_vec
;
3018 abi_ulong total_len
, max_len
;
3021 bool bad_address
= false;
3027 if (count
> IOV_MAX
) {
3032 vec
= g_try_new0(struct iovec
, count
);
3038 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3039 count
* sizeof(struct target_iovec
), 1);
3040 if (target_vec
== NULL
) {
3045 /* ??? If host page size > target page size, this will result in a
3046 value larger than what we can actually support. */
3047 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3050 for (i
= 0; i
< count
; i
++) {
3051 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3052 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3057 } else if (len
== 0) {
3058 /* Zero length pointer is ignored. */
3059 vec
[i
].iov_base
= 0;
3061 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3062 /* If the first buffer pointer is bad, this is a fault. But
3063 * subsequent bad buffers will result in a partial write; this
3064 * is realized by filling the vector with null pointers and
3066 if (!vec
[i
].iov_base
) {
3077 if (len
> max_len
- total_len
) {
3078 len
= max_len
- total_len
;
3081 vec
[i
].iov_len
= len
;
3085 unlock_user(target_vec
, target_addr
, 0);
3090 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3091 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3094 unlock_user(target_vec
, target_addr
, 0);
3101 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3102 abi_ulong count
, int copy
)
3104 struct target_iovec
*target_vec
;
3107 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3108 count
* sizeof(struct target_iovec
), 1);
3110 for (i
= 0; i
< count
; i
++) {
3111 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3112 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3116 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3118 unlock_user(target_vec
, target_addr
, 0);
3124 static inline int target_to_host_sock_type(int *type
)
3127 int target_type
= *type
;
3129 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3130 case TARGET_SOCK_DGRAM
:
3131 host_type
= SOCK_DGRAM
;
3133 case TARGET_SOCK_STREAM
:
3134 host_type
= SOCK_STREAM
;
3137 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3140 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3141 #if defined(SOCK_CLOEXEC)
3142 host_type
|= SOCK_CLOEXEC
;
3144 return -TARGET_EINVAL
;
3147 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3148 #if defined(SOCK_NONBLOCK)
3149 host_type
|= SOCK_NONBLOCK
;
3150 #elif !defined(O_NONBLOCK)
3151 return -TARGET_EINVAL
;
3158 /* Try to emulate socket type flags after socket creation. */
3159 static int sock_flags_fixup(int fd
, int target_type
)
3161 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3162 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3163 int flags
= fcntl(fd
, F_GETFL
);
3164 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3166 return -TARGET_EINVAL
;
3173 /* do_socket() Must return target values and target errnos. */
3174 static abi_long
do_socket(int domain
, int type
, int protocol
)
3176 int target_type
= type
;
3179 ret
= target_to_host_sock_type(&type
);
3184 if (domain
== PF_NETLINK
&& !(
3185 #ifdef CONFIG_RTNETLINK
3186 protocol
== NETLINK_ROUTE
||
3188 protocol
== NETLINK_KOBJECT_UEVENT
||
3189 protocol
== NETLINK_AUDIT
)) {
3190 return -TARGET_EPROTONOSUPPORT
;
3193 if (domain
== AF_PACKET
||
3194 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3195 protocol
= tswap16(protocol
);
3198 ret
= get_errno(socket(domain
, type
, protocol
));
3200 ret
= sock_flags_fixup(ret
, target_type
);
3201 if (type
== SOCK_PACKET
) {
3202 /* Manage an obsolete case :
3203 * if socket type is SOCK_PACKET, bind by name
3205 fd_trans_register(ret
, &target_packet_trans
);
3206 } else if (domain
== PF_NETLINK
) {
3208 #ifdef CONFIG_RTNETLINK
3210 fd_trans_register(ret
, &target_netlink_route_trans
);
3213 case NETLINK_KOBJECT_UEVENT
:
3214 /* nothing to do: messages are strings */
3217 fd_trans_register(ret
, &target_netlink_audit_trans
);
3220 g_assert_not_reached();
3227 /* do_bind() Must return target values and target errnos. */
3228 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3234 if ((int)addrlen
< 0) {
3235 return -TARGET_EINVAL
;
3238 addr
= alloca(addrlen
+1);
3240 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3244 return get_errno(bind(sockfd
, addr
, addrlen
));
3247 /* do_connect() Must return target values and target errnos. */
3248 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3254 if ((int)addrlen
< 0) {
3255 return -TARGET_EINVAL
;
3258 addr
= alloca(addrlen
+1);
3260 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3264 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3267 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3268 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3269 int flags
, int send
)
3275 abi_ulong target_vec
;
3277 if (msgp
->msg_name
) {
3278 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3279 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3280 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3281 tswapal(msgp
->msg_name
),
3283 if (ret
== -TARGET_EFAULT
) {
3284 /* For connected sockets msg_name and msg_namelen must
3285 * be ignored, so returning EFAULT immediately is wrong.
3286 * Instead, pass a bad msg_name to the host kernel, and
3287 * let it decide whether to return EFAULT or not.
3289 msg
.msg_name
= (void *)-1;
3294 msg
.msg_name
= NULL
;
3295 msg
.msg_namelen
= 0;
3297 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3298 msg
.msg_control
= alloca(msg
.msg_controllen
);
3299 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3301 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3303 count
= tswapal(msgp
->msg_iovlen
);
3304 target_vec
= tswapal(msgp
->msg_iov
);
3306 if (count
> IOV_MAX
) {
3307 /* sendrcvmsg returns a different errno for this condition than
3308 * readv/writev, so we must catch it here before lock_iovec() does.
3310 ret
= -TARGET_EMSGSIZE
;
3314 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3315 target_vec
, count
, send
);
3317 ret
= -host_to_target_errno(errno
);
3320 msg
.msg_iovlen
= count
;
3324 if (fd_trans_target_to_host_data(fd
)) {
3327 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3328 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3329 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3330 msg
.msg_iov
->iov_len
);
3332 msg
.msg_iov
->iov_base
= host_msg
;
3333 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3337 ret
= target_to_host_cmsg(&msg
, msgp
);
3339 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3343 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3344 if (!is_error(ret
)) {
3346 if (fd_trans_host_to_target_data(fd
)) {
3347 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3348 MIN(msg
.msg_iov
->iov_len
, len
));
3350 ret
= host_to_target_cmsg(msgp
, &msg
);
3352 if (!is_error(ret
)) {
3353 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3354 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3355 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3356 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3357 msg
.msg_name
, msg
.msg_namelen
);
3369 unlock_iovec(vec
, target_vec
, count
, !send
);
3374 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3375 int flags
, int send
)
3378 struct target_msghdr
*msgp
;
3380 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3384 return -TARGET_EFAULT
;
3386 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3387 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3391 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3392 * so it might not have this *mmsg-specific flag either.
3394 #ifndef MSG_WAITFORONE
3395 #define MSG_WAITFORONE 0x10000
3398 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3399 unsigned int vlen
, unsigned int flags
,
3402 struct target_mmsghdr
*mmsgp
;
3406 if (vlen
> UIO_MAXIOV
) {
3410 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3412 return -TARGET_EFAULT
;
3415 for (i
= 0; i
< vlen
; i
++) {
3416 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3417 if (is_error(ret
)) {
3420 mmsgp
[i
].msg_len
= tswap32(ret
);
3421 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3422 if (flags
& MSG_WAITFORONE
) {
3423 flags
|= MSG_DONTWAIT
;
3427 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3429 /* Return number of datagrams sent if we sent any at all;
3430 * otherwise return the error.
3438 /* do_accept4() Must return target values and target errnos. */
3439 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3440 abi_ulong target_addrlen_addr
, int flags
)
3442 socklen_t addrlen
, ret_addrlen
;
3447 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3449 if (target_addr
== 0) {
3450 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3453 /* linux returns EFAULT if addrlen pointer is invalid */
3454 if (get_user_u32(addrlen
, target_addrlen_addr
))
3455 return -TARGET_EFAULT
;
3457 if ((int)addrlen
< 0) {
3458 return -TARGET_EINVAL
;
3461 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3462 return -TARGET_EFAULT
;
3465 addr
= alloca(addrlen
);
3467 ret_addrlen
= addrlen
;
3468 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3469 if (!is_error(ret
)) {
3470 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3471 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3472 ret
= -TARGET_EFAULT
;
3478 /* do_getpeername() Must return target values and target errnos. */
3479 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3480 abi_ulong target_addrlen_addr
)
3482 socklen_t addrlen
, ret_addrlen
;
3486 if (get_user_u32(addrlen
, target_addrlen_addr
))
3487 return -TARGET_EFAULT
;
3489 if ((int)addrlen
< 0) {
3490 return -TARGET_EINVAL
;
3493 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3494 return -TARGET_EFAULT
;
3497 addr
= alloca(addrlen
);
3499 ret_addrlen
= addrlen
;
3500 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3501 if (!is_error(ret
)) {
3502 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3503 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3504 ret
= -TARGET_EFAULT
;
3510 /* do_getsockname() Must return target values and target errnos. */
3511 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3512 abi_ulong target_addrlen_addr
)
3514 socklen_t addrlen
, ret_addrlen
;
3518 if (get_user_u32(addrlen
, target_addrlen_addr
))
3519 return -TARGET_EFAULT
;
3521 if ((int)addrlen
< 0) {
3522 return -TARGET_EINVAL
;
3525 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3526 return -TARGET_EFAULT
;
3529 addr
= alloca(addrlen
);
3531 ret_addrlen
= addrlen
;
3532 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3533 if (!is_error(ret
)) {
3534 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3535 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3536 ret
= -TARGET_EFAULT
;
3542 /* do_socketpair() Must return target values and target errnos. */
3543 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3544 abi_ulong target_tab_addr
)
3549 target_to_host_sock_type(&type
);
3551 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3552 if (!is_error(ret
)) {
3553 if (put_user_s32(tab
[0], target_tab_addr
)
3554 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3555 ret
= -TARGET_EFAULT
;
3560 /* do_sendto() Must return target values and target errnos. */
3561 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3562 abi_ulong target_addr
, socklen_t addrlen
)
3566 void *copy_msg
= NULL
;
3569 if ((int)addrlen
< 0) {
3570 return -TARGET_EINVAL
;
3573 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3575 return -TARGET_EFAULT
;
3576 if (fd_trans_target_to_host_data(fd
)) {
3577 copy_msg
= host_msg
;
3578 host_msg
= g_malloc(len
);
3579 memcpy(host_msg
, copy_msg
, len
);
3580 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3586 addr
= alloca(addrlen
+1);
3587 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3591 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3593 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3598 host_msg
= copy_msg
;
3600 unlock_user(host_msg
, msg
, 0);
3604 /* do_recvfrom() Must return target values and target errnos. */
3605 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3606 abi_ulong target_addr
,
3607 abi_ulong target_addrlen
)
3609 socklen_t addrlen
, ret_addrlen
;
3617 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3619 return -TARGET_EFAULT
;
3623 if (get_user_u32(addrlen
, target_addrlen
)) {
3624 ret
= -TARGET_EFAULT
;
3627 if ((int)addrlen
< 0) {
3628 ret
= -TARGET_EINVAL
;
3631 addr
= alloca(addrlen
);
3632 ret_addrlen
= addrlen
;
3633 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3634 addr
, &ret_addrlen
));
3636 addr
= NULL
; /* To keep compiler quiet. */
3637 addrlen
= 0; /* To keep compiler quiet. */
3638 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3640 if (!is_error(ret
)) {
3641 if (fd_trans_host_to_target_data(fd
)) {
3643 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3644 if (is_error(trans
)) {
3650 host_to_target_sockaddr(target_addr
, addr
,
3651 MIN(addrlen
, ret_addrlen
));
3652 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3653 ret
= -TARGET_EFAULT
;
3657 unlock_user(host_msg
, msg
, len
);
3660 unlock_user(host_msg
, msg
, 0);
3665 #ifdef TARGET_NR_socketcall
3666 /* do_socketcall() must return target values and target errnos. */
3667 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3669 static const unsigned nargs
[] = { /* number of arguments per operation */
3670 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3671 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3672 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3673 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3674 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3675 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3676 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3677 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3678 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3679 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3680 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3681 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3682 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3683 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3684 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3685 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3686 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3687 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3688 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3689 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3691 abi_long a
[6]; /* max 6 args */
3694 /* check the range of the first argument num */
3695 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3696 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3697 return -TARGET_EINVAL
;
3699 /* ensure we have space for args */
3700 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3701 return -TARGET_EINVAL
;
3703 /* collect the arguments in a[] according to nargs[] */
3704 for (i
= 0; i
< nargs
[num
]; ++i
) {
3705 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3706 return -TARGET_EFAULT
;
3709 /* now when we have the args, invoke the appropriate underlying function */
3711 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3712 return do_socket(a
[0], a
[1], a
[2]);
3713 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3714 return do_bind(a
[0], a
[1], a
[2]);
3715 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3716 return do_connect(a
[0], a
[1], a
[2]);
3717 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3718 return get_errno(listen(a
[0], a
[1]));
3719 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3720 return do_accept4(a
[0], a
[1], a
[2], 0);
3721 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3722 return do_getsockname(a
[0], a
[1], a
[2]);
3723 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3724 return do_getpeername(a
[0], a
[1], a
[2]);
3725 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3726 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3727 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3728 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3729 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3730 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3731 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3732 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3733 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3734 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3735 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3736 return get_errno(shutdown(a
[0], a
[1]));
3737 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3738 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3739 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3740 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3741 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3742 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3743 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3744 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3745 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3746 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3747 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3748 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3749 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3750 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3752 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3753 return -TARGET_EINVAL
;
3758 #define N_SHM_REGIONS 32
3760 static struct shm_region
{
3764 } shm_regions
[N_SHM_REGIONS
];
3766 #ifndef TARGET_SEMID64_DS
3767 /* asm-generic version of this struct */
3768 struct target_semid64_ds
3770 struct target_ipc_perm sem_perm
;
3771 abi_ulong sem_otime
;
3772 #if TARGET_ABI_BITS == 32
3773 abi_ulong __unused1
;
3775 abi_ulong sem_ctime
;
3776 #if TARGET_ABI_BITS == 32
3777 abi_ulong __unused2
;
3779 abi_ulong sem_nsems
;
3780 abi_ulong __unused3
;
3781 abi_ulong __unused4
;
3785 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3786 abi_ulong target_addr
)
3788 struct target_ipc_perm
*target_ip
;
3789 struct target_semid64_ds
*target_sd
;
3791 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3792 return -TARGET_EFAULT
;
3793 target_ip
= &(target_sd
->sem_perm
);
3794 host_ip
->__key
= tswap32(target_ip
->__key
);
3795 host_ip
->uid
= tswap32(target_ip
->uid
);
3796 host_ip
->gid
= tswap32(target_ip
->gid
);
3797 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3798 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3799 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3800 host_ip
->mode
= tswap32(target_ip
->mode
);
3802 host_ip
->mode
= tswap16(target_ip
->mode
);
3804 #if defined(TARGET_PPC)
3805 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3807 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3809 unlock_user_struct(target_sd
, target_addr
, 0);
3813 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3814 struct ipc_perm
*host_ip
)
3816 struct target_ipc_perm
*target_ip
;
3817 struct target_semid64_ds
*target_sd
;
3819 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3820 return -TARGET_EFAULT
;
3821 target_ip
= &(target_sd
->sem_perm
);
3822 target_ip
->__key
= tswap32(host_ip
->__key
);
3823 target_ip
->uid
= tswap32(host_ip
->uid
);
3824 target_ip
->gid
= tswap32(host_ip
->gid
);
3825 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3826 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3827 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3828 target_ip
->mode
= tswap32(host_ip
->mode
);
3830 target_ip
->mode
= tswap16(host_ip
->mode
);
3832 #if defined(TARGET_PPC)
3833 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3835 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3837 unlock_user_struct(target_sd
, target_addr
, 1);
3841 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3842 abi_ulong target_addr
)
3844 struct target_semid64_ds
*target_sd
;
3846 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3847 return -TARGET_EFAULT
;
3848 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3849 return -TARGET_EFAULT
;
3850 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3851 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3852 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3853 unlock_user_struct(target_sd
, target_addr
, 0);
3857 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3858 struct semid_ds
*host_sd
)
3860 struct target_semid64_ds
*target_sd
;
3862 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3863 return -TARGET_EFAULT
;
3864 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3865 return -TARGET_EFAULT
;
3866 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3867 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3868 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3869 unlock_user_struct(target_sd
, target_addr
, 1);
3873 struct target_seminfo
{
3886 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3887 struct seminfo
*host_seminfo
)
3889 struct target_seminfo
*target_seminfo
;
3890 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3891 return -TARGET_EFAULT
;
3892 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3893 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3894 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3895 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3896 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3897 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3898 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3899 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3900 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3901 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3902 unlock_user_struct(target_seminfo
, target_addr
, 1);
3908 struct semid_ds
*buf
;
3909 unsigned short *array
;
3910 struct seminfo
*__buf
;
3913 union target_semun
{
3920 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3921 abi_ulong target_addr
)
3924 unsigned short *array
;
3926 struct semid_ds semid_ds
;
3929 semun
.buf
= &semid_ds
;
3931 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3933 return get_errno(ret
);
3935 nsems
= semid_ds
.sem_nsems
;
3937 *host_array
= g_try_new(unsigned short, nsems
);
3939 return -TARGET_ENOMEM
;
3941 array
= lock_user(VERIFY_READ
, target_addr
,
3942 nsems
*sizeof(unsigned short), 1);
3944 g_free(*host_array
);
3945 return -TARGET_EFAULT
;
3948 for(i
=0; i
<nsems
; i
++) {
3949 __get_user((*host_array
)[i
], &array
[i
]);
3951 unlock_user(array
, target_addr
, 0);
3956 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3957 unsigned short **host_array
)
3960 unsigned short *array
;
3962 struct semid_ds semid_ds
;
3965 semun
.buf
= &semid_ds
;
3967 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3969 return get_errno(ret
);
3971 nsems
= semid_ds
.sem_nsems
;
3973 array
= lock_user(VERIFY_WRITE
, target_addr
,
3974 nsems
*sizeof(unsigned short), 0);
3976 return -TARGET_EFAULT
;
3978 for(i
=0; i
<nsems
; i
++) {
3979 __put_user((*host_array
)[i
], &array
[i
]);
3981 g_free(*host_array
);
3982 unlock_user(array
, target_addr
, 1);
3987 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3988 abi_ulong target_arg
)
3990 union target_semun target_su
= { .buf
= target_arg
};
3992 struct semid_ds dsarg
;
3993 unsigned short *array
= NULL
;
3994 struct seminfo seminfo
;
3995 abi_long ret
= -TARGET_EINVAL
;
4002 /* In 64 bit cross-endian situations, we will erroneously pick up
4003 * the wrong half of the union for the "val" element. To rectify
4004 * this, the entire 8-byte structure is byteswapped, followed by
4005 * a swap of the 4 byte val field. In other cases, the data is
4006 * already in proper host byte order. */
4007 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4008 target_su
.buf
= tswapal(target_su
.buf
);
4009 arg
.val
= tswap32(target_su
.val
);
4011 arg
.val
= target_su
.val
;
4013 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4017 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4021 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4022 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4029 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4033 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4034 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4040 arg
.__buf
= &seminfo
;
4041 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4042 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4050 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4057 struct target_sembuf
{
4058 unsigned short sem_num
;
4063 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4064 abi_ulong target_addr
,
4067 struct target_sembuf
*target_sembuf
;
4070 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4071 nsops
*sizeof(struct target_sembuf
), 1);
4073 return -TARGET_EFAULT
;
4075 for(i
=0; i
<nsops
; i
++) {
4076 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4077 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4078 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4081 unlock_user(target_sembuf
, target_addr
, 0);
4086 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4087 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4090 * This macro is required to handle the s390 variants, which passes the
4091 * arguments in a different order than default.
4094 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4095 (__nsops), (__timeout), (__sops)
4097 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4098 (__nsops), 0, (__sops), (__timeout)
4101 static inline abi_long
do_semtimedop(int semid
,
4104 abi_long timeout
, bool time64
)
4106 struct sembuf
*sops
;
4107 struct timespec ts
, *pts
= NULL
;
4113 if (target_to_host_timespec64(pts
, timeout
)) {
4114 return -TARGET_EFAULT
;
4117 if (target_to_host_timespec(pts
, timeout
)) {
4118 return -TARGET_EFAULT
;
4123 if (nsops
> TARGET_SEMOPM
) {
4124 return -TARGET_E2BIG
;
4127 sops
= g_new(struct sembuf
, nsops
);
4129 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4131 return -TARGET_EFAULT
;
4134 ret
= -TARGET_ENOSYS
;
4135 #ifdef __NR_semtimedop
4136 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4139 if (ret
== -TARGET_ENOSYS
) {
4140 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4141 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4149 struct target_msqid_ds
4151 struct target_ipc_perm msg_perm
;
4152 abi_ulong msg_stime
;
4153 #if TARGET_ABI_BITS == 32
4154 abi_ulong __unused1
;
4156 abi_ulong msg_rtime
;
4157 #if TARGET_ABI_BITS == 32
4158 abi_ulong __unused2
;
4160 abi_ulong msg_ctime
;
4161 #if TARGET_ABI_BITS == 32
4162 abi_ulong __unused3
;
4164 abi_ulong __msg_cbytes
;
4166 abi_ulong msg_qbytes
;
4167 abi_ulong msg_lspid
;
4168 abi_ulong msg_lrpid
;
4169 abi_ulong __unused4
;
4170 abi_ulong __unused5
;
4173 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4174 abi_ulong target_addr
)
4176 struct target_msqid_ds
*target_md
;
4178 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4179 return -TARGET_EFAULT
;
4180 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4181 return -TARGET_EFAULT
;
4182 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4183 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4184 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4185 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4186 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4187 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4188 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4189 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4190 unlock_user_struct(target_md
, target_addr
, 0);
4194 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4195 struct msqid_ds
*host_md
)
4197 struct target_msqid_ds
*target_md
;
4199 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4200 return -TARGET_EFAULT
;
4201 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4202 return -TARGET_EFAULT
;
4203 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4204 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4205 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4206 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4207 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4208 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4209 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4210 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4211 unlock_user_struct(target_md
, target_addr
, 1);
4215 struct target_msginfo
{
4223 unsigned short int msgseg
;
4226 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4227 struct msginfo
*host_msginfo
)
4229 struct target_msginfo
*target_msginfo
;
4230 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4231 return -TARGET_EFAULT
;
4232 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4233 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4234 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4235 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4236 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4237 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4238 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4239 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4240 unlock_user_struct(target_msginfo
, target_addr
, 1);
4244 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4246 struct msqid_ds dsarg
;
4247 struct msginfo msginfo
;
4248 abi_long ret
= -TARGET_EINVAL
;
4256 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4257 return -TARGET_EFAULT
;
4258 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4259 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4260 return -TARGET_EFAULT
;
4263 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4267 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4268 if (host_to_target_msginfo(ptr
, &msginfo
))
4269 return -TARGET_EFAULT
;
4276 struct target_msgbuf
{
4281 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4282 ssize_t msgsz
, int msgflg
)
4284 struct target_msgbuf
*target_mb
;
4285 struct msgbuf
*host_mb
;
4289 return -TARGET_EINVAL
;
4292 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4293 return -TARGET_EFAULT
;
4294 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4296 unlock_user_struct(target_mb
, msgp
, 0);
4297 return -TARGET_ENOMEM
;
4299 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4300 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4301 ret
= -TARGET_ENOSYS
;
4303 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4306 if (ret
== -TARGET_ENOSYS
) {
4308 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4311 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4317 unlock_user_struct(target_mb
, msgp
, 0);
4323 #if defined(__sparc__)
4324 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4325 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4326 #elif defined(__s390x__)
4327 /* The s390 sys_ipc variant has only five parameters. */
4328 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4329 ((long int[]){(long int)__msgp, __msgtyp})
4331 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4332 ((long int[]){(long int)__msgp, __msgtyp}), 0
4336 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4337 ssize_t msgsz
, abi_long msgtyp
,
4340 struct target_msgbuf
*target_mb
;
4342 struct msgbuf
*host_mb
;
4346 return -TARGET_EINVAL
;
4349 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4350 return -TARGET_EFAULT
;
4352 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4354 ret
= -TARGET_ENOMEM
;
4357 ret
= -TARGET_ENOSYS
;
4359 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4362 if (ret
== -TARGET_ENOSYS
) {
4363 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4364 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4369 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4370 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4371 if (!target_mtext
) {
4372 ret
= -TARGET_EFAULT
;
4375 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4376 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4379 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4383 unlock_user_struct(target_mb
, msgp
, 1);
4388 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4389 abi_ulong target_addr
)
4391 struct target_shmid_ds
*target_sd
;
4393 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4394 return -TARGET_EFAULT
;
4395 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4396 return -TARGET_EFAULT
;
4397 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4398 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4399 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4400 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4401 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4402 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4403 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4404 unlock_user_struct(target_sd
, target_addr
, 0);
4408 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4409 struct shmid_ds
*host_sd
)
4411 struct target_shmid_ds
*target_sd
;
4413 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4414 return -TARGET_EFAULT
;
4415 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4416 return -TARGET_EFAULT
;
4417 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4418 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4419 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4420 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4421 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4422 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4423 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4424 unlock_user_struct(target_sd
, target_addr
, 1);
4428 struct target_shminfo
{
4436 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4437 struct shminfo
*host_shminfo
)
4439 struct target_shminfo
*target_shminfo
;
4440 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4441 return -TARGET_EFAULT
;
4442 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4443 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4444 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4445 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4446 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4447 unlock_user_struct(target_shminfo
, target_addr
, 1);
4451 struct target_shm_info
{
4456 abi_ulong swap_attempts
;
4457 abi_ulong swap_successes
;
4460 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4461 struct shm_info
*host_shm_info
)
4463 struct target_shm_info
*target_shm_info
;
4464 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4465 return -TARGET_EFAULT
;
4466 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4467 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4468 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4469 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4470 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4471 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4472 unlock_user_struct(target_shm_info
, target_addr
, 1);
4476 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4478 struct shmid_ds dsarg
;
4479 struct shminfo shminfo
;
4480 struct shm_info shm_info
;
4481 abi_long ret
= -TARGET_EINVAL
;
4489 if (target_to_host_shmid_ds(&dsarg
, buf
))
4490 return -TARGET_EFAULT
;
4491 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4492 if (host_to_target_shmid_ds(buf
, &dsarg
))
4493 return -TARGET_EFAULT
;
4496 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4497 if (host_to_target_shminfo(buf
, &shminfo
))
4498 return -TARGET_EFAULT
;
4501 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4502 if (host_to_target_shm_info(buf
, &shm_info
))
4503 return -TARGET_EFAULT
;
4508 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4515 #ifndef TARGET_FORCE_SHMLBA
4516 /* For most architectures, SHMLBA is the same as the page size;
4517 * some architectures have larger values, in which case they should
4518 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4519 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4520 * and defining its own value for SHMLBA.
4522 * The kernel also permits SHMLBA to be set by the architecture to a
4523 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4524 * this means that addresses are rounded to the large size if
4525 * SHM_RND is set but addresses not aligned to that size are not rejected
4526 * as long as they are at least page-aligned. Since the only architecture
4527 * which uses this is ia64 this code doesn't provide for that oddity.
4529 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4531 return TARGET_PAGE_SIZE
;
4535 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4536 int shmid
, abi_ulong shmaddr
, int shmflg
)
4538 CPUState
*cpu
= env_cpu(cpu_env
);
4541 struct shmid_ds shm_info
;
4545 /* shmat pointers are always untagged */
4547 /* find out the length of the shared memory segment */
4548 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4549 if (is_error(ret
)) {
4550 /* can't get length, bail out */
4554 shmlba
= target_shmlba(cpu_env
);
4556 if (shmaddr
& (shmlba
- 1)) {
4557 if (shmflg
& SHM_RND
) {
4558 shmaddr
&= ~(shmlba
- 1);
4560 return -TARGET_EINVAL
;
4563 if (!guest_range_valid_untagged(shmaddr
, shm_info
.shm_segsz
)) {
4564 return -TARGET_EINVAL
;
4570 * We're mapping shared memory, so ensure we generate code for parallel
4571 * execution and flush old translations. This will work up to the level
4572 * supported by the host -- anything that requires EXCP_ATOMIC will not
4573 * be atomic with respect to an external process.
4575 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
4576 cpu
->tcg_cflags
|= CF_PARALLEL
;
4581 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4583 abi_ulong mmap_start
;
4585 /* In order to use the host shmat, we need to honor host SHMLBA. */
4586 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4588 if (mmap_start
== -1) {
4590 host_raddr
= (void *)-1;
4592 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4593 shmflg
| SHM_REMAP
);
4596 if (host_raddr
== (void *)-1) {
4598 return get_errno((long)host_raddr
);
4600 raddr
=h2g((unsigned long)host_raddr
);
4602 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4603 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4604 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4606 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4607 if (!shm_regions
[i
].in_use
) {
4608 shm_regions
[i
].in_use
= true;
4609 shm_regions
[i
].start
= raddr
;
4610 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4620 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4625 /* shmdt pointers are always untagged */
4629 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4630 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4631 shm_regions
[i
].in_use
= false;
4632 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4636 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4643 #ifdef TARGET_NR_ipc
4644 /* ??? This only works with linear mappings. */
4645 /* do_ipc() must return target values and target errnos. */
4646 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4647 unsigned int call
, abi_long first
,
4648 abi_long second
, abi_long third
,
4649 abi_long ptr
, abi_long fifth
)
4654 version
= call
>> 16;
4659 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4661 case IPCOP_semtimedop
:
4663 * The s390 sys_ipc variant has only five parameters instead of six
4664 * (as for default variant) and the only difference is the handling of
4665 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4666 * to a struct timespec where the generic variant uses fifth parameter.
4668 #if defined(TARGET_S390X)
4669 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4671 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4676 ret
= get_errno(semget(first
, second
, third
));
4679 case IPCOP_semctl
: {
4680 /* The semun argument to semctl is passed by value, so dereference the
4683 get_user_ual(atptr
, ptr
);
4684 ret
= do_semctl(first
, second
, third
, atptr
);
4689 ret
= get_errno(msgget(first
, second
));
4693 ret
= do_msgsnd(first
, ptr
, second
, third
);
4697 ret
= do_msgctl(first
, second
, ptr
);
4704 struct target_ipc_kludge
{
4709 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4710 ret
= -TARGET_EFAULT
;
4714 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4716 unlock_user_struct(tmp
, ptr
, 0);
4720 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4729 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4730 if (is_error(raddr
))
4731 return get_errno(raddr
);
4732 if (put_user_ual(raddr
, third
))
4733 return -TARGET_EFAULT
;
4737 ret
= -TARGET_EINVAL
;
4742 ret
= do_shmdt(ptr
);
4746 /* IPC_* flag values are the same on all linux platforms */
4747 ret
= get_errno(shmget(first
, second
, third
));
4750 /* IPC_* and SHM_* command values are the same on all linux platforms */
4752 ret
= do_shmctl(first
, second
, ptr
);
4755 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4757 ret
= -TARGET_ENOSYS
;
4764 /* kernel structure types definitions */
4766 #define STRUCT(name, ...) STRUCT_ ## name,
4767 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4769 #include "syscall_types.h"
4773 #undef STRUCT_SPECIAL
4775 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4776 #define STRUCT_SPECIAL(name)
4777 #include "syscall_types.h"
4779 #undef STRUCT_SPECIAL
4781 #define MAX_STRUCT_SIZE 4096
4783 #ifdef CONFIG_FIEMAP
4784 /* So fiemap access checks don't overflow on 32 bit systems.
4785 * This is very slightly smaller than the limit imposed by
4786 * the underlying kernel.
4788 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4789 / sizeof(struct fiemap_extent))
4791 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4792 int fd
, int cmd
, abi_long arg
)
4794 /* The parameter for this ioctl is a struct fiemap followed
4795 * by an array of struct fiemap_extent whose size is set
4796 * in fiemap->fm_extent_count. The array is filled in by the
4799 int target_size_in
, target_size_out
;
4801 const argtype
*arg_type
= ie
->arg_type
;
4802 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4805 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4809 assert(arg_type
[0] == TYPE_PTR
);
4810 assert(ie
->access
== IOC_RW
);
4812 target_size_in
= thunk_type_size(arg_type
, 0);
4813 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4815 return -TARGET_EFAULT
;
4817 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4818 unlock_user(argptr
, arg
, 0);
4819 fm
= (struct fiemap
*)buf_temp
;
4820 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4821 return -TARGET_EINVAL
;
4824 outbufsz
= sizeof (*fm
) +
4825 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4827 if (outbufsz
> MAX_STRUCT_SIZE
) {
4828 /* We can't fit all the extents into the fixed size buffer.
4829 * Allocate one that is large enough and use it instead.
4831 fm
= g_try_malloc(outbufsz
);
4833 return -TARGET_ENOMEM
;
4835 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4838 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4839 if (!is_error(ret
)) {
4840 target_size_out
= target_size_in
;
4841 /* An extent_count of 0 means we were only counting the extents
4842 * so there are no structs to copy
4844 if (fm
->fm_extent_count
!= 0) {
4845 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4847 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4849 ret
= -TARGET_EFAULT
;
4851 /* Convert the struct fiemap */
4852 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4853 if (fm
->fm_extent_count
!= 0) {
4854 p
= argptr
+ target_size_in
;
4855 /* ...and then all the struct fiemap_extents */
4856 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4857 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4862 unlock_user(argptr
, arg
, target_size_out
);
4872 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4873 int fd
, int cmd
, abi_long arg
)
4875 const argtype
*arg_type
= ie
->arg_type
;
4879 struct ifconf
*host_ifconf
;
4881 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4882 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4883 int target_ifreq_size
;
4888 abi_long target_ifc_buf
;
4892 assert(arg_type
[0] == TYPE_PTR
);
4893 assert(ie
->access
== IOC_RW
);
4896 target_size
= thunk_type_size(arg_type
, 0);
4898 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4900 return -TARGET_EFAULT
;
4901 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4902 unlock_user(argptr
, arg
, 0);
4904 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4905 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4906 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4908 if (target_ifc_buf
!= 0) {
4909 target_ifc_len
= host_ifconf
->ifc_len
;
4910 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4911 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4913 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4914 if (outbufsz
> MAX_STRUCT_SIZE
) {
4916 * We can't fit all the extents into the fixed size buffer.
4917 * Allocate one that is large enough and use it instead.
4919 host_ifconf
= g_try_malloc(outbufsz
);
4921 return -TARGET_ENOMEM
;
4923 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4926 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4928 host_ifconf
->ifc_len
= host_ifc_len
;
4930 host_ifc_buf
= NULL
;
4932 host_ifconf
->ifc_buf
= host_ifc_buf
;
4934 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4935 if (!is_error(ret
)) {
4936 /* convert host ifc_len to target ifc_len */
4938 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4939 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4940 host_ifconf
->ifc_len
= target_ifc_len
;
4942 /* restore target ifc_buf */
4944 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4946 /* copy struct ifconf to target user */
4948 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4950 return -TARGET_EFAULT
;
4951 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4952 unlock_user(argptr
, arg
, target_size
);
4954 if (target_ifc_buf
!= 0) {
4955 /* copy ifreq[] to target user */
4956 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4957 for (i
= 0; i
< nb_ifreq
; i
++) {
4958 thunk_convert(argptr
+ i
* target_ifreq_size
,
4959 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4960 ifreq_arg_type
, THUNK_TARGET
);
4962 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4967 g_free(host_ifconf
);
4973 #if defined(CONFIG_USBFS)
4974 #if HOST_LONG_BITS > 64
4975 #error USBDEVFS thunks do not support >64 bit hosts yet.
4978 uint64_t target_urb_adr
;
4979 uint64_t target_buf_adr
;
4980 char *target_buf_ptr
;
4981 struct usbdevfs_urb host_urb
;
4984 static GHashTable
*usbdevfs_urb_hashtable(void)
4986 static GHashTable
*urb_hashtable
;
4988 if (!urb_hashtable
) {
4989 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4991 return urb_hashtable
;
4994 static void urb_hashtable_insert(struct live_urb
*urb
)
4996 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4997 g_hash_table_insert(urb_hashtable
, urb
, urb
);
5000 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
5002 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5003 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
5006 static void urb_hashtable_remove(struct live_urb
*urb
)
5008 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5009 g_hash_table_remove(urb_hashtable
, urb
);
5013 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5014 int fd
, int cmd
, abi_long arg
)
5016 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
5017 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
5018 struct live_urb
*lurb
;
5022 uintptr_t target_urb_adr
;
5025 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
5027 memset(buf_temp
, 0, sizeof(uint64_t));
5028 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5029 if (is_error(ret
)) {
5033 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
5034 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
5035 if (!lurb
->target_urb_adr
) {
5036 return -TARGET_EFAULT
;
5038 urb_hashtable_remove(lurb
);
5039 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
5040 lurb
->host_urb
.buffer_length
);
5041 lurb
->target_buf_ptr
= NULL
;
5043 /* restore the guest buffer pointer */
5044 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
5046 /* update the guest urb struct */
5047 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5050 return -TARGET_EFAULT
;
5052 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5053 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5055 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5056 /* write back the urb handle */
5057 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5060 return -TARGET_EFAULT
;
5063 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5064 target_urb_adr
= lurb
->target_urb_adr
;
5065 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5066 unlock_user(argptr
, arg
, target_size
);
5073 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5074 uint8_t *buf_temp
__attribute__((unused
)),
5075 int fd
, int cmd
, abi_long arg
)
5077 struct live_urb
*lurb
;
5079 /* map target address back to host URB with metadata. */
5080 lurb
= urb_hashtable_lookup(arg
);
5082 return -TARGET_EFAULT
;
5084 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5088 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5089 int fd
, int cmd
, abi_long arg
)
5091 const argtype
*arg_type
= ie
->arg_type
;
5096 struct live_urb
*lurb
;
5099 * each submitted URB needs to map to a unique ID for the
5100 * kernel, and that unique ID needs to be a pointer to
5101 * host memory. hence, we need to malloc for each URB.
5102 * isochronous transfers have a variable length struct.
5105 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5107 /* construct host copy of urb and metadata */
5108 lurb
= g_try_new0(struct live_urb
, 1);
5110 return -TARGET_ENOMEM
;
5113 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5116 return -TARGET_EFAULT
;
5118 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5119 unlock_user(argptr
, arg
, 0);
5121 lurb
->target_urb_adr
= arg
;
5122 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5124 /* buffer space used depends on endpoint type so lock the entire buffer */
5125 /* control type urbs should check the buffer contents for true direction */
5126 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5127 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5128 lurb
->host_urb
.buffer_length
, 1);
5129 if (lurb
->target_buf_ptr
== NULL
) {
5131 return -TARGET_EFAULT
;
5134 /* update buffer pointer in host copy */
5135 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5137 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5138 if (is_error(ret
)) {
5139 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5142 urb_hashtable_insert(lurb
);
5147 #endif /* CONFIG_USBFS */
5149 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5150 int cmd
, abi_long arg
)
5153 struct dm_ioctl
*host_dm
;
5154 abi_long guest_data
;
5155 uint32_t guest_data_size
;
5157 const argtype
*arg_type
= ie
->arg_type
;
5159 void *big_buf
= NULL
;
5163 target_size
= thunk_type_size(arg_type
, 0);
5164 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5166 ret
= -TARGET_EFAULT
;
5169 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5170 unlock_user(argptr
, arg
, 0);
5172 /* buf_temp is too small, so fetch things into a bigger buffer */
5173 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5174 memcpy(big_buf
, buf_temp
, target_size
);
5178 guest_data
= arg
+ host_dm
->data_start
;
5179 if ((guest_data
- arg
) < 0) {
5180 ret
= -TARGET_EINVAL
;
5183 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5184 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5186 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5188 ret
= -TARGET_EFAULT
;
5192 switch (ie
->host_cmd
) {
5194 case DM_LIST_DEVICES
:
5197 case DM_DEV_SUSPEND
:
5200 case DM_TABLE_STATUS
:
5201 case DM_TABLE_CLEAR
:
5203 case DM_LIST_VERSIONS
:
5207 case DM_DEV_SET_GEOMETRY
:
5208 /* data contains only strings */
5209 memcpy(host_data
, argptr
, guest_data_size
);
5212 memcpy(host_data
, argptr
, guest_data_size
);
5213 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5217 void *gspec
= argptr
;
5218 void *cur_data
= host_data
;
5219 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5220 int spec_size
= thunk_type_size(arg_type
, 0);
5223 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5224 struct dm_target_spec
*spec
= cur_data
;
5228 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5229 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5231 spec
->next
= sizeof(*spec
) + slen
;
5232 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5234 cur_data
+= spec
->next
;
5239 ret
= -TARGET_EINVAL
;
5240 unlock_user(argptr
, guest_data
, 0);
5243 unlock_user(argptr
, guest_data
, 0);
5245 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5246 if (!is_error(ret
)) {
5247 guest_data
= arg
+ host_dm
->data_start
;
5248 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5249 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5250 switch (ie
->host_cmd
) {
5255 case DM_DEV_SUSPEND
:
5258 case DM_TABLE_CLEAR
:
5260 case DM_DEV_SET_GEOMETRY
:
5261 /* no return data */
5263 case DM_LIST_DEVICES
:
5265 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5266 uint32_t remaining_data
= guest_data_size
;
5267 void *cur_data
= argptr
;
5268 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5269 int nl_size
= 12; /* can't use thunk_size due to alignment */
5272 uint32_t next
= nl
->next
;
5274 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5276 if (remaining_data
< nl
->next
) {
5277 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5280 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5281 strcpy(cur_data
+ nl_size
, nl
->name
);
5282 cur_data
+= nl
->next
;
5283 remaining_data
-= nl
->next
;
5287 nl
= (void*)nl
+ next
;
5292 case DM_TABLE_STATUS
:
5294 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5295 void *cur_data
= argptr
;
5296 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5297 int spec_size
= thunk_type_size(arg_type
, 0);
5300 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5301 uint32_t next
= spec
->next
;
5302 int slen
= strlen((char*)&spec
[1]) + 1;
5303 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5304 if (guest_data_size
< spec
->next
) {
5305 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5308 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5309 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5310 cur_data
= argptr
+ spec
->next
;
5311 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5317 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5318 int count
= *(uint32_t*)hdata
;
5319 uint64_t *hdev
= hdata
+ 8;
5320 uint64_t *gdev
= argptr
+ 8;
5323 *(uint32_t*)argptr
= tswap32(count
);
5324 for (i
= 0; i
< count
; i
++) {
5325 *gdev
= tswap64(*hdev
);
5331 case DM_LIST_VERSIONS
:
5333 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5334 uint32_t remaining_data
= guest_data_size
;
5335 void *cur_data
= argptr
;
5336 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5337 int vers_size
= thunk_type_size(arg_type
, 0);
5340 uint32_t next
= vers
->next
;
5342 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5344 if (remaining_data
< vers
->next
) {
5345 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5348 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5349 strcpy(cur_data
+ vers_size
, vers
->name
);
5350 cur_data
+= vers
->next
;
5351 remaining_data
-= vers
->next
;
5355 vers
= (void*)vers
+ next
;
5360 unlock_user(argptr
, guest_data
, 0);
5361 ret
= -TARGET_EINVAL
;
5364 unlock_user(argptr
, guest_data
, guest_data_size
);
5366 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5368 ret
= -TARGET_EFAULT
;
5371 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5372 unlock_user(argptr
, arg
, target_size
);
5379 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5380 int cmd
, abi_long arg
)
5384 const argtype
*arg_type
= ie
->arg_type
;
5385 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5388 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5389 struct blkpg_partition host_part
;
5391 /* Read and convert blkpg */
5393 target_size
= thunk_type_size(arg_type
, 0);
5394 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5396 ret
= -TARGET_EFAULT
;
5399 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5400 unlock_user(argptr
, arg
, 0);
5402 switch (host_blkpg
->op
) {
5403 case BLKPG_ADD_PARTITION
:
5404 case BLKPG_DEL_PARTITION
:
5405 /* payload is struct blkpg_partition */
5408 /* Unknown opcode */
5409 ret
= -TARGET_EINVAL
;
5413 /* Read and convert blkpg->data */
5414 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5415 target_size
= thunk_type_size(part_arg_type
, 0);
5416 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5418 ret
= -TARGET_EFAULT
;
5421 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5422 unlock_user(argptr
, arg
, 0);
5424 /* Swizzle the data pointer to our local copy and call! */
5425 host_blkpg
->data
= &host_part
;
5426 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5432 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5433 int fd
, int cmd
, abi_long arg
)
5435 const argtype
*arg_type
= ie
->arg_type
;
5436 const StructEntry
*se
;
5437 const argtype
*field_types
;
5438 const int *dst_offsets
, *src_offsets
;
5441 abi_ulong
*target_rt_dev_ptr
= NULL
;
5442 unsigned long *host_rt_dev_ptr
= NULL
;
5446 assert(ie
->access
== IOC_W
);
5447 assert(*arg_type
== TYPE_PTR
);
5449 assert(*arg_type
== TYPE_STRUCT
);
5450 target_size
= thunk_type_size(arg_type
, 0);
5451 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5453 return -TARGET_EFAULT
;
5456 assert(*arg_type
== (int)STRUCT_rtentry
);
5457 se
= struct_entries
+ *arg_type
++;
5458 assert(se
->convert
[0] == NULL
);
5459 /* convert struct here to be able to catch rt_dev string */
5460 field_types
= se
->field_types
;
5461 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5462 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5463 for (i
= 0; i
< se
->nb_fields
; i
++) {
5464 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5465 assert(*field_types
== TYPE_PTRVOID
);
5466 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5467 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5468 if (*target_rt_dev_ptr
!= 0) {
5469 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5470 tswapal(*target_rt_dev_ptr
));
5471 if (!*host_rt_dev_ptr
) {
5472 unlock_user(argptr
, arg
, 0);
5473 return -TARGET_EFAULT
;
5476 *host_rt_dev_ptr
= 0;
5481 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5482 argptr
+ src_offsets
[i
],
5483 field_types
, THUNK_HOST
);
5485 unlock_user(argptr
, arg
, 0);
5487 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5489 assert(host_rt_dev_ptr
!= NULL
);
5490 assert(target_rt_dev_ptr
!= NULL
);
5491 if (*host_rt_dev_ptr
!= 0) {
5492 unlock_user((void *)*host_rt_dev_ptr
,
5493 *target_rt_dev_ptr
, 0);
5498 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5499 int fd
, int cmd
, abi_long arg
)
5501 int sig
= target_to_host_signal(arg
);
5502 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5505 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5506 int fd
, int cmd
, abi_long arg
)
5511 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5512 if (is_error(ret
)) {
5516 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5517 if (copy_to_user_timeval(arg
, &tv
)) {
5518 return -TARGET_EFAULT
;
5521 if (copy_to_user_timeval64(arg
, &tv
)) {
5522 return -TARGET_EFAULT
;
5529 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5530 int fd
, int cmd
, abi_long arg
)
5535 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5536 if (is_error(ret
)) {
5540 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5541 if (host_to_target_timespec(arg
, &ts
)) {
5542 return -TARGET_EFAULT
;
5545 if (host_to_target_timespec64(arg
, &ts
)) {
5546 return -TARGET_EFAULT
;
5554 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5555 int fd
, int cmd
, abi_long arg
)
5557 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5558 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5564 static void unlock_drm_version(struct drm_version
*host_ver
,
5565 struct target_drm_version
*target_ver
,
5568 unlock_user(host_ver
->name
, target_ver
->name
,
5569 copy
? host_ver
->name_len
: 0);
5570 unlock_user(host_ver
->date
, target_ver
->date
,
5571 copy
? host_ver
->date_len
: 0);
5572 unlock_user(host_ver
->desc
, target_ver
->desc
,
5573 copy
? host_ver
->desc_len
: 0);
5576 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5577 struct target_drm_version
*target_ver
)
5579 memset(host_ver
, 0, sizeof(*host_ver
));
5581 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5582 if (host_ver
->name_len
) {
5583 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5584 target_ver
->name_len
, 0);
5585 if (!host_ver
->name
) {
5590 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5591 if (host_ver
->date_len
) {
5592 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5593 target_ver
->date_len
, 0);
5594 if (!host_ver
->date
) {
5599 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5600 if (host_ver
->desc_len
) {
5601 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5602 target_ver
->desc_len
, 0);
5603 if (!host_ver
->desc
) {
5610 unlock_drm_version(host_ver
, target_ver
, false);
5614 static inline void host_to_target_drmversion(
5615 struct target_drm_version
*target_ver
,
5616 struct drm_version
*host_ver
)
5618 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5619 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5620 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5621 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5622 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5623 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5624 unlock_drm_version(host_ver
, target_ver
, true);
5627 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5628 int fd
, int cmd
, abi_long arg
)
5630 struct drm_version
*ver
;
5631 struct target_drm_version
*target_ver
;
5634 switch (ie
->host_cmd
) {
5635 case DRM_IOCTL_VERSION
:
5636 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5637 return -TARGET_EFAULT
;
5639 ver
= (struct drm_version
*)buf_temp
;
5640 ret
= target_to_host_drmversion(ver
, target_ver
);
5641 if (!is_error(ret
)) {
5642 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5643 if (is_error(ret
)) {
5644 unlock_drm_version(ver
, target_ver
, false);
5646 host_to_target_drmversion(target_ver
, ver
);
5649 unlock_user_struct(target_ver
, arg
, 0);
5652 return -TARGET_ENOSYS
;
5655 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5656 struct drm_i915_getparam
*gparam
,
5657 int fd
, abi_long arg
)
5661 struct target_drm_i915_getparam
*target_gparam
;
5663 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5664 return -TARGET_EFAULT
;
5667 __get_user(gparam
->param
, &target_gparam
->param
);
5668 gparam
->value
= &value
;
5669 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5670 put_user_s32(value
, target_gparam
->value
);
5672 unlock_user_struct(target_gparam
, arg
, 0);
5676 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5677 int fd
, int cmd
, abi_long arg
)
5679 switch (ie
->host_cmd
) {
5680 case DRM_IOCTL_I915_GETPARAM
:
5681 return do_ioctl_drm_i915_getparam(ie
,
5682 (struct drm_i915_getparam
*)buf_temp
,
5685 return -TARGET_ENOSYS
;
5691 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5692 int fd
, int cmd
, abi_long arg
)
5694 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5695 struct tun_filter
*target_filter
;
5698 assert(ie
->access
== IOC_W
);
5700 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5701 if (!target_filter
) {
5702 return -TARGET_EFAULT
;
5704 filter
->flags
= tswap16(target_filter
->flags
);
5705 filter
->count
= tswap16(target_filter
->count
);
5706 unlock_user(target_filter
, arg
, 0);
5708 if (filter
->count
) {
5709 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5711 return -TARGET_EFAULT
;
5714 target_addr
= lock_user(VERIFY_READ
,
5715 arg
+ offsetof(struct tun_filter
, addr
),
5716 filter
->count
* ETH_ALEN
, 1);
5718 return -TARGET_EFAULT
;
5720 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5721 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5724 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5727 IOCTLEntry ioctl_entries
[] = {
5728 #define IOCTL(cmd, access, ...) \
5729 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5730 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5731 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5732 #define IOCTL_IGNORE(cmd) \
5733 { TARGET_ ## cmd, 0, #cmd },
5738 /* ??? Implement proper locking for ioctls. */
5739 /* do_ioctl() Must return target values and target errnos. */
5740 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5742 const IOCTLEntry
*ie
;
5743 const argtype
*arg_type
;
5745 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5751 if (ie
->target_cmd
== 0) {
5753 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5754 return -TARGET_ENOSYS
;
5756 if (ie
->target_cmd
== cmd
)
5760 arg_type
= ie
->arg_type
;
5762 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5763 } else if (!ie
->host_cmd
) {
5764 /* Some architectures define BSD ioctls in their headers
5765 that are not implemented in Linux. */
5766 return -TARGET_ENOSYS
;
5769 switch(arg_type
[0]) {
5772 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5778 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5782 target_size
= thunk_type_size(arg_type
, 0);
5783 switch(ie
->access
) {
5785 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5786 if (!is_error(ret
)) {
5787 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5789 return -TARGET_EFAULT
;
5790 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5791 unlock_user(argptr
, arg
, target_size
);
5795 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5797 return -TARGET_EFAULT
;
5798 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5799 unlock_user(argptr
, arg
, 0);
5800 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5804 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5806 return -TARGET_EFAULT
;
5807 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5808 unlock_user(argptr
, arg
, 0);
5809 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5810 if (!is_error(ret
)) {
5811 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5813 return -TARGET_EFAULT
;
5814 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5815 unlock_user(argptr
, arg
, target_size
);
5821 qemu_log_mask(LOG_UNIMP
,
5822 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5823 (long)cmd
, arg_type
[0]);
5824 ret
= -TARGET_ENOSYS
;
5830 static const bitmask_transtbl iflag_tbl
[] = {
5831 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5832 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5833 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5834 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5835 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5836 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5837 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5838 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5839 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5840 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5841 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5842 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5843 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5844 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5845 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5849 static const bitmask_transtbl oflag_tbl
[] = {
5850 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5851 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5852 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5853 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5854 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5855 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5856 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5857 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5858 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5859 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5860 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5861 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5862 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5863 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5864 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5865 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5866 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5867 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5868 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5869 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5870 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5871 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5872 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5873 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5877 static const bitmask_transtbl cflag_tbl
[] = {
5878 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5879 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5880 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5881 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5882 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5883 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5884 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5885 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5886 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5887 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5888 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5889 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5890 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5891 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5892 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5893 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5894 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5895 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5896 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5897 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5898 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5899 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5900 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5901 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5902 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5903 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5904 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5905 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5906 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5907 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5908 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5912 static const bitmask_transtbl lflag_tbl
[] = {
5913 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5914 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5915 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5916 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5917 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5918 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5919 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5920 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5921 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5922 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5923 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5924 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5925 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5926 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5927 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5928 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5932 static void target_to_host_termios (void *dst
, const void *src
)
5934 struct host_termios
*host
= dst
;
5935 const struct target_termios
*target
= src
;
5938 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5940 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5942 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5944 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5945 host
->c_line
= target
->c_line
;
5947 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5948 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5949 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5950 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5951 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5952 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5953 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5954 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5955 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5956 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5957 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5958 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5959 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5960 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5961 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5962 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5963 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5964 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5967 static void host_to_target_termios (void *dst
, const void *src
)
5969 struct target_termios
*target
= dst
;
5970 const struct host_termios
*host
= src
;
5973 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5975 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5977 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5979 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5980 target
->c_line
= host
->c_line
;
5982 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5983 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5984 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5985 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5986 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5987 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5988 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5989 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5990 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5991 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5992 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5993 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5994 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5995 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5996 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5997 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5998 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5999 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
6002 static const StructEntry struct_termios_def
= {
6003 .convert
= { host_to_target_termios
, target_to_host_termios
},
6004 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
6005 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
6006 .print
= print_termios
,
6009 static const bitmask_transtbl mmap_flags_tbl
[] = {
6010 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
6011 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
6012 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
6013 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
6014 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
6015 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
6016 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
6017 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
6018 MAP_DENYWRITE
, MAP_DENYWRITE
},
6019 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
6020 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
6021 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
6022 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
6023 MAP_NORESERVE
, MAP_NORESERVE
},
6024 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
6025 /* MAP_STACK had been ignored by the kernel for quite some time.
6026 Recognize it for the target insofar as we do not want to pass
6027 it through to the host. */
6028 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6033 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6034 * TARGET_I386 is defined if TARGET_X86_64 is defined
6036 #if defined(TARGET_I386)
6038 /* NOTE: there is really one LDT for all the threads */
6039 static uint8_t *ldt_table
;
6041 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6048 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6049 if (size
> bytecount
)
6051 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6053 return -TARGET_EFAULT
;
6054 /* ??? Should this by byteswapped? */
6055 memcpy(p
, ldt_table
, size
);
6056 unlock_user(p
, ptr
, size
);
6060 /* XXX: add locking support */
6061 static abi_long
write_ldt(CPUX86State
*env
,
6062 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6064 struct target_modify_ldt_ldt_s ldt_info
;
6065 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6066 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6067 int seg_not_present
, useable
, lm
;
6068 uint32_t *lp
, entry_1
, entry_2
;
6070 if (bytecount
!= sizeof(ldt_info
))
6071 return -TARGET_EINVAL
;
6072 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6073 return -TARGET_EFAULT
;
6074 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6075 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6076 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6077 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6078 unlock_user_struct(target_ldt_info
, ptr
, 0);
6080 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6081 return -TARGET_EINVAL
;
6082 seg_32bit
= ldt_info
.flags
& 1;
6083 contents
= (ldt_info
.flags
>> 1) & 3;
6084 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6085 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6086 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6087 useable
= (ldt_info
.flags
>> 6) & 1;
6091 lm
= (ldt_info
.flags
>> 7) & 1;
6093 if (contents
== 3) {
6095 return -TARGET_EINVAL
;
6096 if (seg_not_present
== 0)
6097 return -TARGET_EINVAL
;
6099 /* allocate the LDT */
6101 env
->ldt
.base
= target_mmap(0,
6102 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6103 PROT_READ
|PROT_WRITE
,
6104 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6105 if (env
->ldt
.base
== -1)
6106 return -TARGET_ENOMEM
;
6107 memset(g2h_untagged(env
->ldt
.base
), 0,
6108 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6109 env
->ldt
.limit
= 0xffff;
6110 ldt_table
= g2h_untagged(env
->ldt
.base
);
6113 /* NOTE: same code as Linux kernel */
6114 /* Allow LDTs to be cleared by the user. */
6115 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6118 read_exec_only
== 1 &&
6120 limit_in_pages
== 0 &&
6121 seg_not_present
== 1 &&
6129 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6130 (ldt_info
.limit
& 0x0ffff);
6131 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6132 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6133 (ldt_info
.limit
& 0xf0000) |
6134 ((read_exec_only
^ 1) << 9) |
6136 ((seg_not_present
^ 1) << 15) |
6138 (limit_in_pages
<< 23) |
6142 entry_2
|= (useable
<< 20);
6144 /* Install the new entry ... */
6146 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6147 lp
[0] = tswap32(entry_1
);
6148 lp
[1] = tswap32(entry_2
);
6152 /* specific and weird i386 syscalls */
6153 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6154 unsigned long bytecount
)
6160 ret
= read_ldt(ptr
, bytecount
);
6163 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6166 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6169 ret
= -TARGET_ENOSYS
;
6175 #if defined(TARGET_ABI32)
6176 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6178 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6179 struct target_modify_ldt_ldt_s ldt_info
;
6180 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6181 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6182 int seg_not_present
, useable
, lm
;
6183 uint32_t *lp
, entry_1
, entry_2
;
6186 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6187 if (!target_ldt_info
)
6188 return -TARGET_EFAULT
;
6189 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6190 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6191 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6192 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6193 if (ldt_info
.entry_number
== -1) {
6194 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6195 if (gdt_table
[i
] == 0) {
6196 ldt_info
.entry_number
= i
;
6197 target_ldt_info
->entry_number
= tswap32(i
);
6202 unlock_user_struct(target_ldt_info
, ptr
, 1);
6204 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6205 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6206 return -TARGET_EINVAL
;
6207 seg_32bit
= ldt_info
.flags
& 1;
6208 contents
= (ldt_info
.flags
>> 1) & 3;
6209 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6210 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6211 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6212 useable
= (ldt_info
.flags
>> 6) & 1;
6216 lm
= (ldt_info
.flags
>> 7) & 1;
6219 if (contents
== 3) {
6220 if (seg_not_present
== 0)
6221 return -TARGET_EINVAL
;
6224 /* NOTE: same code as Linux kernel */
6225 /* Allow LDTs to be cleared by the user. */
6226 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6227 if ((contents
== 0 &&
6228 read_exec_only
== 1 &&
6230 limit_in_pages
== 0 &&
6231 seg_not_present
== 1 &&
6239 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6240 (ldt_info
.limit
& 0x0ffff);
6241 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6242 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6243 (ldt_info
.limit
& 0xf0000) |
6244 ((read_exec_only
^ 1) << 9) |
6246 ((seg_not_present
^ 1) << 15) |
6248 (limit_in_pages
<< 23) |
6253 /* Install the new entry ... */
6255 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6256 lp
[0] = tswap32(entry_1
);
6257 lp
[1] = tswap32(entry_2
);
6261 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6263 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6264 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6265 uint32_t base_addr
, limit
, flags
;
6266 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6267 int seg_not_present
, useable
, lm
;
6268 uint32_t *lp
, entry_1
, entry_2
;
6270 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6271 if (!target_ldt_info
)
6272 return -TARGET_EFAULT
;
6273 idx
= tswap32(target_ldt_info
->entry_number
);
6274 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6275 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6276 unlock_user_struct(target_ldt_info
, ptr
, 1);
6277 return -TARGET_EINVAL
;
6279 lp
= (uint32_t *)(gdt_table
+ idx
);
6280 entry_1
= tswap32(lp
[0]);
6281 entry_2
= tswap32(lp
[1]);
6283 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6284 contents
= (entry_2
>> 10) & 3;
6285 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6286 seg_32bit
= (entry_2
>> 22) & 1;
6287 limit_in_pages
= (entry_2
>> 23) & 1;
6288 useable
= (entry_2
>> 20) & 1;
6292 lm
= (entry_2
>> 21) & 1;
6294 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6295 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6296 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6297 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6298 base_addr
= (entry_1
>> 16) |
6299 (entry_2
& 0xff000000) |
6300 ((entry_2
& 0xff) << 16);
6301 target_ldt_info
->base_addr
= tswapal(base_addr
);
6302 target_ldt_info
->limit
= tswap32(limit
);
6303 target_ldt_info
->flags
= tswap32(flags
);
6304 unlock_user_struct(target_ldt_info
, ptr
, 1);
6308 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6310 return -TARGET_ENOSYS
;
6313 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6320 case TARGET_ARCH_SET_GS
:
6321 case TARGET_ARCH_SET_FS
:
6322 if (code
== TARGET_ARCH_SET_GS
)
6326 cpu_x86_load_seg(env
, idx
, 0);
6327 env
->segs
[idx
].base
= addr
;
6329 case TARGET_ARCH_GET_GS
:
6330 case TARGET_ARCH_GET_FS
:
6331 if (code
== TARGET_ARCH_GET_GS
)
6335 val
= env
->segs
[idx
].base
;
6336 if (put_user(val
, addr
, abi_ulong
))
6337 ret
= -TARGET_EFAULT
;
6340 ret
= -TARGET_EINVAL
;
6345 #endif /* defined(TARGET_ABI32 */
6346 #endif /* defined(TARGET_I386) */
6349 * These constants are generic. Supply any that are missing from the host.
6352 # define PR_SET_NAME 15
6353 # define PR_GET_NAME 16
6355 #ifndef PR_SET_FP_MODE
6356 # define PR_SET_FP_MODE 45
6357 # define PR_GET_FP_MODE 46
6358 # define PR_FP_MODE_FR (1 << 0)
6359 # define PR_FP_MODE_FRE (1 << 1)
6361 #ifndef PR_SVE_SET_VL
6362 # define PR_SVE_SET_VL 50
6363 # define PR_SVE_GET_VL 51
6364 # define PR_SVE_VL_LEN_MASK 0xffff
6365 # define PR_SVE_VL_INHERIT (1 << 17)
6367 #ifndef PR_PAC_RESET_KEYS
6368 # define PR_PAC_RESET_KEYS 54
6369 # define PR_PAC_APIAKEY (1 << 0)
6370 # define PR_PAC_APIBKEY (1 << 1)
6371 # define PR_PAC_APDAKEY (1 << 2)
6372 # define PR_PAC_APDBKEY (1 << 3)
6373 # define PR_PAC_APGAKEY (1 << 4)
6375 #ifndef PR_SET_TAGGED_ADDR_CTRL
6376 # define PR_SET_TAGGED_ADDR_CTRL 55
6377 # define PR_GET_TAGGED_ADDR_CTRL 56
6378 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6380 #ifndef PR_MTE_TCF_SHIFT
6381 # define PR_MTE_TCF_SHIFT 1
6382 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6383 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6384 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6385 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6386 # define PR_MTE_TAG_SHIFT 3
6387 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6389 #ifndef PR_SET_IO_FLUSHER
6390 # define PR_SET_IO_FLUSHER 57
6391 # define PR_GET_IO_FLUSHER 58
6393 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6394 # define PR_SET_SYSCALL_USER_DISPATCH 59
6396 #ifndef PR_SME_SET_VL
6397 # define PR_SME_SET_VL 63
6398 # define PR_SME_GET_VL 64
6399 # define PR_SME_VL_LEN_MASK 0xffff
6400 # define PR_SME_VL_INHERIT (1 << 17)
6403 #include "target_prctl.h"
6405 static abi_long
do_prctl_inval0(CPUArchState
*env
)
6407 return -TARGET_EINVAL
;
6410 static abi_long
do_prctl_inval1(CPUArchState
*env
, abi_long arg2
)
6412 return -TARGET_EINVAL
;
6415 #ifndef do_prctl_get_fp_mode
6416 #define do_prctl_get_fp_mode do_prctl_inval0
6418 #ifndef do_prctl_set_fp_mode
6419 #define do_prctl_set_fp_mode do_prctl_inval1
6421 #ifndef do_prctl_sve_get_vl
6422 #define do_prctl_sve_get_vl do_prctl_inval0
6424 #ifndef do_prctl_sve_set_vl
6425 #define do_prctl_sve_set_vl do_prctl_inval1
6427 #ifndef do_prctl_reset_keys
6428 #define do_prctl_reset_keys do_prctl_inval1
6430 #ifndef do_prctl_set_tagged_addr_ctrl
6431 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6433 #ifndef do_prctl_get_tagged_addr_ctrl
6434 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6436 #ifndef do_prctl_get_unalign
6437 #define do_prctl_get_unalign do_prctl_inval1
6439 #ifndef do_prctl_set_unalign
6440 #define do_prctl_set_unalign do_prctl_inval1
6442 #ifndef do_prctl_sme_get_vl
6443 #define do_prctl_sme_get_vl do_prctl_inval0
6445 #ifndef do_prctl_sme_set_vl
6446 #define do_prctl_sme_set_vl do_prctl_inval1
6449 static abi_long
do_prctl(CPUArchState
*env
, abi_long option
, abi_long arg2
,
6450 abi_long arg3
, abi_long arg4
, abi_long arg5
)
6455 case PR_GET_PDEATHSIG
:
6458 ret
= get_errno(prctl(PR_GET_PDEATHSIG
, &deathsig
,
6460 if (!is_error(ret
) &&
6461 put_user_s32(host_to_target_signal(deathsig
), arg2
)) {
6462 return -TARGET_EFAULT
;
6466 case PR_SET_PDEATHSIG
:
6467 return get_errno(prctl(PR_SET_PDEATHSIG
, target_to_host_signal(arg2
),
6471 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
6473 return -TARGET_EFAULT
;
6475 ret
= get_errno(prctl(PR_GET_NAME
, (uintptr_t)name
,
6477 unlock_user(name
, arg2
, 16);
6482 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
6484 return -TARGET_EFAULT
;
6486 ret
= get_errno(prctl(PR_SET_NAME
, (uintptr_t)name
,
6488 unlock_user(name
, arg2
, 0);
6491 case PR_GET_FP_MODE
:
6492 return do_prctl_get_fp_mode(env
);
6493 case PR_SET_FP_MODE
:
6494 return do_prctl_set_fp_mode(env
, arg2
);
6496 return do_prctl_sve_get_vl(env
);
6498 return do_prctl_sve_set_vl(env
, arg2
);
6500 return do_prctl_sme_get_vl(env
);
6502 return do_prctl_sme_set_vl(env
, arg2
);
6503 case PR_PAC_RESET_KEYS
:
6504 if (arg3
|| arg4
|| arg5
) {
6505 return -TARGET_EINVAL
;
6507 return do_prctl_reset_keys(env
, arg2
);
6508 case PR_SET_TAGGED_ADDR_CTRL
:
6509 if (arg3
|| arg4
|| arg5
) {
6510 return -TARGET_EINVAL
;
6512 return do_prctl_set_tagged_addr_ctrl(env
, arg2
);
6513 case PR_GET_TAGGED_ADDR_CTRL
:
6514 if (arg2
|| arg3
|| arg4
|| arg5
) {
6515 return -TARGET_EINVAL
;
6517 return do_prctl_get_tagged_addr_ctrl(env
);
6519 case PR_GET_UNALIGN
:
6520 return do_prctl_get_unalign(env
, arg2
);
6521 case PR_SET_UNALIGN
:
6522 return do_prctl_set_unalign(env
, arg2
);
6524 case PR_CAP_AMBIENT
:
6525 case PR_CAPBSET_READ
:
6526 case PR_CAPBSET_DROP
:
6527 case PR_GET_DUMPABLE
:
6528 case PR_SET_DUMPABLE
:
6529 case PR_GET_KEEPCAPS
:
6530 case PR_SET_KEEPCAPS
:
6531 case PR_GET_SECUREBITS
:
6532 case PR_SET_SECUREBITS
:
6535 case PR_GET_TIMERSLACK
:
6536 case PR_SET_TIMERSLACK
:
6538 case PR_MCE_KILL_GET
:
6539 case PR_GET_NO_NEW_PRIVS
:
6540 case PR_SET_NO_NEW_PRIVS
:
6541 case PR_GET_IO_FLUSHER
:
6542 case PR_SET_IO_FLUSHER
:
6543 /* Some prctl options have no pointer arguments and we can pass on. */
6544 return get_errno(prctl(option
, arg2
, arg3
, arg4
, arg5
));
6546 case PR_GET_CHILD_SUBREAPER
:
6547 case PR_SET_CHILD_SUBREAPER
:
6548 case PR_GET_SPECULATION_CTRL
:
6549 case PR_SET_SPECULATION_CTRL
:
6550 case PR_GET_TID_ADDRESS
:
6552 return -TARGET_EINVAL
;
6556 /* Was used for SPE on PowerPC. */
6557 return -TARGET_EINVAL
;
6564 case PR_GET_SECCOMP
:
6565 case PR_SET_SECCOMP
:
6566 case PR_SET_SYSCALL_USER_DISPATCH
:
6567 case PR_GET_THP_DISABLE
:
6568 case PR_SET_THP_DISABLE
:
6571 /* Disable to prevent the target disabling stuff we need. */
6572 return -TARGET_EINVAL
;
6575 qemu_log_mask(LOG_UNIMP
, "Unsupported prctl: " TARGET_ABI_FMT_ld
"\n",
6577 return -TARGET_EINVAL
;
6581 #define NEW_STACK_SIZE 0x40000
6584 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6587 pthread_mutex_t mutex
;
6588 pthread_cond_t cond
;
6591 abi_ulong child_tidptr
;
6592 abi_ulong parent_tidptr
;
6596 static void *clone_func(void *arg
)
6598 new_thread_info
*info
= arg
;
6603 rcu_register_thread();
6604 tcg_register_thread();
6608 ts
= (TaskState
*)cpu
->opaque
;
6609 info
->tid
= sys_gettid();
6611 if (info
->child_tidptr
)
6612 put_user_u32(info
->tid
, info
->child_tidptr
);
6613 if (info
->parent_tidptr
)
6614 put_user_u32(info
->tid
, info
->parent_tidptr
);
6615 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6616 /* Enable signals. */
6617 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6618 /* Signal to the parent that we're ready. */
6619 pthread_mutex_lock(&info
->mutex
);
6620 pthread_cond_broadcast(&info
->cond
);
6621 pthread_mutex_unlock(&info
->mutex
);
6622 /* Wait until the parent has finished initializing the tls state. */
6623 pthread_mutex_lock(&clone_lock
);
6624 pthread_mutex_unlock(&clone_lock
);
6630 /* do_fork() Must return host values and target errnos (unlike most
6631 do_*() functions). */
6632 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6633 abi_ulong parent_tidptr
, target_ulong newtls
,
6634 abi_ulong child_tidptr
)
6636 CPUState
*cpu
= env_cpu(env
);
6640 CPUArchState
*new_env
;
6643 flags
&= ~CLONE_IGNORED_FLAGS
;
6645 /* Emulate vfork() with fork() */
6646 if (flags
& CLONE_VFORK
)
6647 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6649 if (flags
& CLONE_VM
) {
6650 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6651 new_thread_info info
;
6652 pthread_attr_t attr
;
6654 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6655 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6656 return -TARGET_EINVAL
;
6659 ts
= g_new0(TaskState
, 1);
6660 init_task_state(ts
);
6662 /* Grab a mutex so that thread setup appears atomic. */
6663 pthread_mutex_lock(&clone_lock
);
6666 * If this is our first additional thread, we need to ensure we
6667 * generate code for parallel execution and flush old translations.
6668 * Do this now so that the copy gets CF_PARALLEL too.
6670 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
6671 cpu
->tcg_cflags
|= CF_PARALLEL
;
6675 /* we create a new CPU instance. */
6676 new_env
= cpu_copy(env
);
6677 /* Init regs that differ from the parent. */
6678 cpu_clone_regs_child(new_env
, newsp
, flags
);
6679 cpu_clone_regs_parent(env
, flags
);
6680 new_cpu
= env_cpu(new_env
);
6681 new_cpu
->opaque
= ts
;
6682 ts
->bprm
= parent_ts
->bprm
;
6683 ts
->info
= parent_ts
->info
;
6684 ts
->signal_mask
= parent_ts
->signal_mask
;
6686 if (flags
& CLONE_CHILD_CLEARTID
) {
6687 ts
->child_tidptr
= child_tidptr
;
6690 if (flags
& CLONE_SETTLS
) {
6691 cpu_set_tls (new_env
, newtls
);
6694 memset(&info
, 0, sizeof(info
));
6695 pthread_mutex_init(&info
.mutex
, NULL
);
6696 pthread_mutex_lock(&info
.mutex
);
6697 pthread_cond_init(&info
.cond
, NULL
);
6699 if (flags
& CLONE_CHILD_SETTID
) {
6700 info
.child_tidptr
= child_tidptr
;
6702 if (flags
& CLONE_PARENT_SETTID
) {
6703 info
.parent_tidptr
= parent_tidptr
;
6706 ret
= pthread_attr_init(&attr
);
6707 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6708 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6709 /* It is not safe to deliver signals until the child has finished
6710 initializing, so temporarily block all signals. */
6711 sigfillset(&sigmask
);
6712 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6713 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6715 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6716 /* TODO: Free new CPU state if thread creation failed. */
6718 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6719 pthread_attr_destroy(&attr
);
6721 /* Wait for the child to initialize. */
6722 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6727 pthread_mutex_unlock(&info
.mutex
);
6728 pthread_cond_destroy(&info
.cond
);
6729 pthread_mutex_destroy(&info
.mutex
);
6730 pthread_mutex_unlock(&clone_lock
);
6732 /* if no CLONE_VM, we consider it is a fork */
6733 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6734 return -TARGET_EINVAL
;
6737 /* We can't support custom termination signals */
6738 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6739 return -TARGET_EINVAL
;
6742 if (block_signals()) {
6743 return -QEMU_ERESTARTSYS
;
6749 /* Child Process. */
6750 cpu_clone_regs_child(env
, newsp
, flags
);
6752 /* There is a race condition here. The parent process could
6753 theoretically read the TID in the child process before the child
6754 tid is set. This would require using either ptrace
6755 (not implemented) or having *_tidptr to point at a shared memory
6756 mapping. We can't repeat the spinlock hack used above because
6757 the child process gets its own copy of the lock. */
6758 if (flags
& CLONE_CHILD_SETTID
)
6759 put_user_u32(sys_gettid(), child_tidptr
);
6760 if (flags
& CLONE_PARENT_SETTID
)
6761 put_user_u32(sys_gettid(), parent_tidptr
);
6762 ts
= (TaskState
*)cpu
->opaque
;
6763 if (flags
& CLONE_SETTLS
)
6764 cpu_set_tls (env
, newtls
);
6765 if (flags
& CLONE_CHILD_CLEARTID
)
6766 ts
->child_tidptr
= child_tidptr
;
6768 cpu_clone_regs_parent(env
, flags
);
6775 /* warning : doesn't handle linux specific flags... */
6776 static int target_to_host_fcntl_cmd(int cmd
)
6781 case TARGET_F_DUPFD
:
6782 case TARGET_F_GETFD
:
6783 case TARGET_F_SETFD
:
6784 case TARGET_F_GETFL
:
6785 case TARGET_F_SETFL
:
6786 case TARGET_F_OFD_GETLK
:
6787 case TARGET_F_OFD_SETLK
:
6788 case TARGET_F_OFD_SETLKW
:
6791 case TARGET_F_GETLK
:
6794 case TARGET_F_SETLK
:
6797 case TARGET_F_SETLKW
:
6800 case TARGET_F_GETOWN
:
6803 case TARGET_F_SETOWN
:
6806 case TARGET_F_GETSIG
:
6809 case TARGET_F_SETSIG
:
6812 #if TARGET_ABI_BITS == 32
6813 case TARGET_F_GETLK64
:
6816 case TARGET_F_SETLK64
:
6819 case TARGET_F_SETLKW64
:
6823 case TARGET_F_SETLEASE
:
6826 case TARGET_F_GETLEASE
:
6829 #ifdef F_DUPFD_CLOEXEC
6830 case TARGET_F_DUPFD_CLOEXEC
:
6831 ret
= F_DUPFD_CLOEXEC
;
6834 case TARGET_F_NOTIFY
:
6838 case TARGET_F_GETOWN_EX
:
6843 case TARGET_F_SETOWN_EX
:
6848 case TARGET_F_SETPIPE_SZ
:
6851 case TARGET_F_GETPIPE_SZ
:
6856 case TARGET_F_ADD_SEALS
:
6859 case TARGET_F_GET_SEALS
:
6864 ret
= -TARGET_EINVAL
;
6868 #if defined(__powerpc64__)
6869 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6870 * is not supported by kernel. The glibc fcntl call actually adjusts
6871 * them to 5, 6 and 7 before making the syscall(). Since we make the
6872 * syscall directly, adjust to what is supported by the kernel.
6874 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6875 ret
-= F_GETLK64
- 5;
6882 #define FLOCK_TRANSTBL \
6884 TRANSTBL_CONVERT(F_RDLCK); \
6885 TRANSTBL_CONVERT(F_WRLCK); \
6886 TRANSTBL_CONVERT(F_UNLCK); \
6889 static int target_to_host_flock(int type
)
6891 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6893 #undef TRANSTBL_CONVERT
6894 return -TARGET_EINVAL
;
6897 static int host_to_target_flock(int type
)
6899 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6901 #undef TRANSTBL_CONVERT
6902 /* if we don't know how to convert the value coming
6903 * from the host we copy to the target field as-is
6908 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6909 abi_ulong target_flock_addr
)
6911 struct target_flock
*target_fl
;
6914 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6915 return -TARGET_EFAULT
;
6918 __get_user(l_type
, &target_fl
->l_type
);
6919 l_type
= target_to_host_flock(l_type
);
6923 fl
->l_type
= l_type
;
6924 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6925 __get_user(fl
->l_start
, &target_fl
->l_start
);
6926 __get_user(fl
->l_len
, &target_fl
->l_len
);
6927 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6928 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6932 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6933 const struct flock64
*fl
)
6935 struct target_flock
*target_fl
;
6938 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6939 return -TARGET_EFAULT
;
6942 l_type
= host_to_target_flock(fl
->l_type
);
6943 __put_user(l_type
, &target_fl
->l_type
);
6944 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6945 __put_user(fl
->l_start
, &target_fl
->l_start
);
6946 __put_user(fl
->l_len
, &target_fl
->l_len
);
6947 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6948 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6952 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6953 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6955 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6956 struct target_oabi_flock64
{
6964 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6965 abi_ulong target_flock_addr
)
6967 struct target_oabi_flock64
*target_fl
;
6970 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6971 return -TARGET_EFAULT
;
6974 __get_user(l_type
, &target_fl
->l_type
);
6975 l_type
= target_to_host_flock(l_type
);
6979 fl
->l_type
= l_type
;
6980 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6981 __get_user(fl
->l_start
, &target_fl
->l_start
);
6982 __get_user(fl
->l_len
, &target_fl
->l_len
);
6983 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6984 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6988 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6989 const struct flock64
*fl
)
6991 struct target_oabi_flock64
*target_fl
;
6994 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6995 return -TARGET_EFAULT
;
6998 l_type
= host_to_target_flock(fl
->l_type
);
6999 __put_user(l_type
, &target_fl
->l_type
);
7000 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
7001 __put_user(fl
->l_start
, &target_fl
->l_start
);
7002 __put_user(fl
->l_len
, &target_fl
->l_len
);
7003 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
7004 unlock_user_struct(target_fl
, target_flock_addr
, 1);
7009 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
7010 abi_ulong target_flock_addr
)
7012 struct target_flock64
*target_fl
;
7015 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
7016 return -TARGET_EFAULT
;
7019 __get_user(l_type
, &target_fl
->l_type
);
7020 l_type
= target_to_host_flock(l_type
);
7024 fl
->l_type
= l_type
;
7025 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
7026 __get_user(fl
->l_start
, &target_fl
->l_start
);
7027 __get_user(fl
->l_len
, &target_fl
->l_len
);
7028 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
7029 unlock_user_struct(target_fl
, target_flock_addr
, 0);
7033 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
7034 const struct flock64
*fl
)
7036 struct target_flock64
*target_fl
;
7039 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
7040 return -TARGET_EFAULT
;
7043 l_type
= host_to_target_flock(fl
->l_type
);
7044 __put_user(l_type
, &target_fl
->l_type
);
7045 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
7046 __put_user(fl
->l_start
, &target_fl
->l_start
);
7047 __put_user(fl
->l_len
, &target_fl
->l_len
);
7048 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
7049 unlock_user_struct(target_fl
, target_flock_addr
, 1);
7053 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
7055 struct flock64 fl64
;
7057 struct f_owner_ex fox
;
7058 struct target_f_owner_ex
*target_fox
;
7061 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
7063 if (host_cmd
== -TARGET_EINVAL
)
7067 case TARGET_F_GETLK
:
7068 ret
= copy_from_user_flock(&fl64
, arg
);
7072 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7074 ret
= copy_to_user_flock(arg
, &fl64
);
7078 case TARGET_F_SETLK
:
7079 case TARGET_F_SETLKW
:
7080 ret
= copy_from_user_flock(&fl64
, arg
);
7084 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7087 case TARGET_F_GETLK64
:
7088 case TARGET_F_OFD_GETLK
:
7089 ret
= copy_from_user_flock64(&fl64
, arg
);
7093 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7095 ret
= copy_to_user_flock64(arg
, &fl64
);
7098 case TARGET_F_SETLK64
:
7099 case TARGET_F_SETLKW64
:
7100 case TARGET_F_OFD_SETLK
:
7101 case TARGET_F_OFD_SETLKW
:
7102 ret
= copy_from_user_flock64(&fl64
, arg
);
7106 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7109 case TARGET_F_GETFL
:
7110 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7112 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
7116 case TARGET_F_SETFL
:
7117 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
7118 target_to_host_bitmask(arg
,
7123 case TARGET_F_GETOWN_EX
:
7124 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7126 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
7127 return -TARGET_EFAULT
;
7128 target_fox
->type
= tswap32(fox
.type
);
7129 target_fox
->pid
= tswap32(fox
.pid
);
7130 unlock_user_struct(target_fox
, arg
, 1);
7136 case TARGET_F_SETOWN_EX
:
7137 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
7138 return -TARGET_EFAULT
;
7139 fox
.type
= tswap32(target_fox
->type
);
7140 fox
.pid
= tswap32(target_fox
->pid
);
7141 unlock_user_struct(target_fox
, arg
, 0);
7142 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7146 case TARGET_F_SETSIG
:
7147 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
7150 case TARGET_F_GETSIG
:
7151 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
7154 case TARGET_F_SETOWN
:
7155 case TARGET_F_GETOWN
:
7156 case TARGET_F_SETLEASE
:
7157 case TARGET_F_GETLEASE
:
7158 case TARGET_F_SETPIPE_SZ
:
7159 case TARGET_F_GETPIPE_SZ
:
7160 case TARGET_F_ADD_SEALS
:
7161 case TARGET_F_GET_SEALS
:
7162 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7166 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
7174 static inline int high2lowuid(int uid
)
7182 static inline int high2lowgid(int gid
)
7190 static inline int low2highuid(int uid
)
7192 if ((int16_t)uid
== -1)
7198 static inline int low2highgid(int gid
)
7200 if ((int16_t)gid
== -1)
7205 static inline int tswapid(int id
)
7210 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7212 #else /* !USE_UID16 */
7213 static inline int high2lowuid(int uid
)
7217 static inline int high2lowgid(int gid
)
7221 static inline int low2highuid(int uid
)
7225 static inline int low2highgid(int gid
)
7229 static inline int tswapid(int id
)
7234 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7236 #endif /* USE_UID16 */
7238 /* We must do direct syscalls for setting UID/GID, because we want to
7239 * implement the Linux system call semantics of "change only for this thread",
7240 * not the libc/POSIX semantics of "change for all threads in process".
7241 * (See http://ewontfix.com/17/ for more details.)
7242 * We use the 32-bit version of the syscalls if present; if it is not
7243 * then either the host architecture supports 32-bit UIDs natively with
7244 * the standard syscall, or the 16-bit UID is the best we can do.
7246 #ifdef __NR_setuid32
7247 #define __NR_sys_setuid __NR_setuid32
7249 #define __NR_sys_setuid __NR_setuid
7251 #ifdef __NR_setgid32
7252 #define __NR_sys_setgid __NR_setgid32
7254 #define __NR_sys_setgid __NR_setgid
7256 #ifdef __NR_setresuid32
7257 #define __NR_sys_setresuid __NR_setresuid32
7259 #define __NR_sys_setresuid __NR_setresuid
7261 #ifdef __NR_setresgid32
7262 #define __NR_sys_setresgid __NR_setresgid32
7264 #define __NR_sys_setresgid __NR_setresgid
7267 _syscall1(int, sys_setuid
, uid_t
, uid
)
7268 _syscall1(int, sys_setgid
, gid_t
, gid
)
7269 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7270 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7272 void syscall_init(void)
7275 const argtype
*arg_type
;
7278 thunk_init(STRUCT_MAX
);
7280 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7281 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7282 #include "syscall_types.h"
7284 #undef STRUCT_SPECIAL
7286 /* we patch the ioctl size if necessary. We rely on the fact that
7287 no ioctl has all the bits at '1' in the size field */
7289 while (ie
->target_cmd
!= 0) {
7290 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7291 TARGET_IOC_SIZEMASK
) {
7292 arg_type
= ie
->arg_type
;
7293 if (arg_type
[0] != TYPE_PTR
) {
7294 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7299 size
= thunk_type_size(arg_type
, 0);
7300 ie
->target_cmd
= (ie
->target_cmd
&
7301 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7302 (size
<< TARGET_IOC_SIZESHIFT
);
7305 /* automatic consistency check if same arch */
7306 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7307 (defined(__x86_64__) && defined(TARGET_X86_64))
7308 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7309 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7310 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7317 #ifdef TARGET_NR_truncate64
7318 static inline abi_long
target_truncate64(CPUArchState
*cpu_env
, const char *arg1
,
7323 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7327 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7331 #ifdef TARGET_NR_ftruncate64
7332 static inline abi_long
target_ftruncate64(CPUArchState
*cpu_env
, abi_long arg1
,
7337 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7341 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7345 #if defined(TARGET_NR_timer_settime) || \
7346 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7347 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7348 abi_ulong target_addr
)
7350 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7351 offsetof(struct target_itimerspec
,
7353 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7354 offsetof(struct target_itimerspec
,
7356 return -TARGET_EFAULT
;
7363 #if defined(TARGET_NR_timer_settime64) || \
7364 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7365 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7366 abi_ulong target_addr
)
7368 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7369 offsetof(struct target__kernel_itimerspec
,
7371 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7372 offsetof(struct target__kernel_itimerspec
,
7374 return -TARGET_EFAULT
;
7381 #if ((defined(TARGET_NR_timerfd_gettime) || \
7382 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7383 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7384 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7385 struct itimerspec
*host_its
)
7387 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7389 &host_its
->it_interval
) ||
7390 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7392 &host_its
->it_value
)) {
7393 return -TARGET_EFAULT
;
7399 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7400 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7401 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7402 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7403 struct itimerspec
*host_its
)
7405 if (host_to_target_timespec64(target_addr
+
7406 offsetof(struct target__kernel_itimerspec
,
7408 &host_its
->it_interval
) ||
7409 host_to_target_timespec64(target_addr
+
7410 offsetof(struct target__kernel_itimerspec
,
7412 &host_its
->it_value
)) {
7413 return -TARGET_EFAULT
;
7419 #if defined(TARGET_NR_adjtimex) || \
7420 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7421 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7422 abi_long target_addr
)
7424 struct target_timex
*target_tx
;
7426 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7427 return -TARGET_EFAULT
;
7430 __get_user(host_tx
->modes
, &target_tx
->modes
);
7431 __get_user(host_tx
->offset
, &target_tx
->offset
);
7432 __get_user(host_tx
->freq
, &target_tx
->freq
);
7433 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7434 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7435 __get_user(host_tx
->status
, &target_tx
->status
);
7436 __get_user(host_tx
->constant
, &target_tx
->constant
);
7437 __get_user(host_tx
->precision
, &target_tx
->precision
);
7438 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7439 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7440 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7441 __get_user(host_tx
->tick
, &target_tx
->tick
);
7442 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7443 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7444 __get_user(host_tx
->shift
, &target_tx
->shift
);
7445 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7446 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7447 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7448 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7449 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7450 __get_user(host_tx
->tai
, &target_tx
->tai
);
7452 unlock_user_struct(target_tx
, target_addr
, 0);
7456 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7457 struct timex
*host_tx
)
7459 struct target_timex
*target_tx
;
7461 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7462 return -TARGET_EFAULT
;
7465 __put_user(host_tx
->modes
, &target_tx
->modes
);
7466 __put_user(host_tx
->offset
, &target_tx
->offset
);
7467 __put_user(host_tx
->freq
, &target_tx
->freq
);
7468 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7469 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7470 __put_user(host_tx
->status
, &target_tx
->status
);
7471 __put_user(host_tx
->constant
, &target_tx
->constant
);
7472 __put_user(host_tx
->precision
, &target_tx
->precision
);
7473 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7474 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7475 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7476 __put_user(host_tx
->tick
, &target_tx
->tick
);
7477 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7478 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7479 __put_user(host_tx
->shift
, &target_tx
->shift
);
7480 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7481 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7482 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7483 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7484 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7485 __put_user(host_tx
->tai
, &target_tx
->tai
);
7487 unlock_user_struct(target_tx
, target_addr
, 1);
7493 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7494 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7495 abi_long target_addr
)
7497 struct target__kernel_timex
*target_tx
;
7499 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7500 offsetof(struct target__kernel_timex
,
7502 return -TARGET_EFAULT
;
7505 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7506 return -TARGET_EFAULT
;
7509 __get_user(host_tx
->modes
, &target_tx
->modes
);
7510 __get_user(host_tx
->offset
, &target_tx
->offset
);
7511 __get_user(host_tx
->freq
, &target_tx
->freq
);
7512 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7513 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7514 __get_user(host_tx
->status
, &target_tx
->status
);
7515 __get_user(host_tx
->constant
, &target_tx
->constant
);
7516 __get_user(host_tx
->precision
, &target_tx
->precision
);
7517 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7518 __get_user(host_tx
->tick
, &target_tx
->tick
);
7519 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7520 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7521 __get_user(host_tx
->shift
, &target_tx
->shift
);
7522 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7523 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7524 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7525 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7526 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7527 __get_user(host_tx
->tai
, &target_tx
->tai
);
7529 unlock_user_struct(target_tx
, target_addr
, 0);
7533 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7534 struct timex
*host_tx
)
7536 struct target__kernel_timex
*target_tx
;
7538 if (copy_to_user_timeval64(target_addr
+
7539 offsetof(struct target__kernel_timex
, time
),
7541 return -TARGET_EFAULT
;
7544 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7545 return -TARGET_EFAULT
;
7548 __put_user(host_tx
->modes
, &target_tx
->modes
);
7549 __put_user(host_tx
->offset
, &target_tx
->offset
);
7550 __put_user(host_tx
->freq
, &target_tx
->freq
);
7551 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7552 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7553 __put_user(host_tx
->status
, &target_tx
->status
);
7554 __put_user(host_tx
->constant
, &target_tx
->constant
);
7555 __put_user(host_tx
->precision
, &target_tx
->precision
);
7556 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7557 __put_user(host_tx
->tick
, &target_tx
->tick
);
7558 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7559 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7560 __put_user(host_tx
->shift
, &target_tx
->shift
);
7561 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7562 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7563 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7564 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7565 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7566 __put_user(host_tx
->tai
, &target_tx
->tai
);
7568 unlock_user_struct(target_tx
, target_addr
, 1);
7573 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7574 #define sigev_notify_thread_id _sigev_un._tid
7577 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7578 abi_ulong target_addr
)
7580 struct target_sigevent
*target_sevp
;
7582 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7583 return -TARGET_EFAULT
;
7586 /* This union is awkward on 64 bit systems because it has a 32 bit
7587 * integer and a pointer in it; we follow the conversion approach
7588 * used for handling sigval types in signal.c so the guest should get
7589 * the correct value back even if we did a 64 bit byteswap and it's
7590 * using the 32 bit integer.
7592 host_sevp
->sigev_value
.sival_ptr
=
7593 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7594 host_sevp
->sigev_signo
=
7595 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7596 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7597 host_sevp
->sigev_notify_thread_id
= tswap32(target_sevp
->_sigev_un
._tid
);
7599 unlock_user_struct(target_sevp
, target_addr
, 1);
7603 #if defined(TARGET_NR_mlockall)
7604 static inline int target_to_host_mlockall_arg(int arg
)
7608 if (arg
& TARGET_MCL_CURRENT
) {
7609 result
|= MCL_CURRENT
;
7611 if (arg
& TARGET_MCL_FUTURE
) {
7612 result
|= MCL_FUTURE
;
7615 if (arg
& TARGET_MCL_ONFAULT
) {
7616 result
|= MCL_ONFAULT
;
7624 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7625 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7626 defined(TARGET_NR_newfstatat))
7627 static inline abi_long
host_to_target_stat64(CPUArchState
*cpu_env
,
7628 abi_ulong target_addr
,
7629 struct stat
*host_st
)
7631 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7632 if (cpu_env
->eabi
) {
7633 struct target_eabi_stat64
*target_st
;
7635 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7636 return -TARGET_EFAULT
;
7637 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7638 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7639 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7640 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7641 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7643 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7644 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7645 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7646 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7647 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7648 __put_user(host_st
->st_size
, &target_st
->st_size
);
7649 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7650 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7651 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7652 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7653 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7654 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7655 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7656 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7657 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7659 unlock_user_struct(target_st
, target_addr
, 1);
7663 #if defined(TARGET_HAS_STRUCT_STAT64)
7664 struct target_stat64
*target_st
;
7666 struct target_stat
*target_st
;
7669 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7670 return -TARGET_EFAULT
;
7671 memset(target_st
, 0, sizeof(*target_st
));
7672 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7673 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7674 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7675 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7677 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7678 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7679 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7680 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7681 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7682 /* XXX: better use of kernel struct */
7683 __put_user(host_st
->st_size
, &target_st
->st_size
);
7684 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7685 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7686 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7687 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7688 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7689 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7690 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7691 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7692 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7694 unlock_user_struct(target_st
, target_addr
, 1);
7701 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7702 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7703 abi_ulong target_addr
)
7705 struct target_statx
*target_stx
;
7707 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7708 return -TARGET_EFAULT
;
7710 memset(target_stx
, 0, sizeof(*target_stx
));
7712 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7713 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7714 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7715 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7716 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7717 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7718 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7719 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7720 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7721 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7722 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7723 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7724 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7725 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7726 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7727 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7728 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7729 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7730 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7731 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7732 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7733 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7734 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7736 unlock_user_struct(target_stx
, target_addr
, 1);
7742 static int do_sys_futex(int *uaddr
, int op
, int val
,
7743 const struct timespec
*timeout
, int *uaddr2
,
7746 #if HOST_LONG_BITS == 64
7747 #if defined(__NR_futex)
7748 /* always a 64-bit time_t, it doesn't define _time64 version */
7749 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7752 #else /* HOST_LONG_BITS == 64 */
7753 #if defined(__NR_futex_time64)
7754 if (sizeof(timeout
->tv_sec
) == 8) {
7755 /* _time64 function on 32bit arch */
7756 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7759 #if defined(__NR_futex)
7760 /* old function on 32bit arch */
7761 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7763 #endif /* HOST_LONG_BITS == 64 */
7764 g_assert_not_reached();
7767 static int do_safe_futex(int *uaddr
, int op
, int val
,
7768 const struct timespec
*timeout
, int *uaddr2
,
7771 #if HOST_LONG_BITS == 64
7772 #if defined(__NR_futex)
7773 /* always a 64-bit time_t, it doesn't define _time64 version */
7774 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7776 #else /* HOST_LONG_BITS == 64 */
7777 #if defined(__NR_futex_time64)
7778 if (sizeof(timeout
->tv_sec
) == 8) {
7779 /* _time64 function on 32bit arch */
7780 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7784 #if defined(__NR_futex)
7785 /* old function on 32bit arch */
7786 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7788 #endif /* HOST_LONG_BITS == 64 */
7789 return -TARGET_ENOSYS
;
7792 /* ??? Using host futex calls even when target atomic operations
7793 are not really atomic probably breaks things. However implementing
7794 futexes locally would make futexes shared between multiple processes
7795 tricky. However they're probably useless because guest atomic
7796 operations won't work either. */
7797 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7798 static int do_futex(CPUState
*cpu
, bool time64
, target_ulong uaddr
,
7799 int op
, int val
, target_ulong timeout
,
7800 target_ulong uaddr2
, int val3
)
7802 struct timespec ts
, *pts
= NULL
;
7803 void *haddr2
= NULL
;
7806 /* We assume FUTEX_* constants are the same on both host and target. */
7807 #ifdef FUTEX_CMD_MASK
7808 base_op
= op
& FUTEX_CMD_MASK
;
7814 case FUTEX_WAIT_BITSET
:
7817 case FUTEX_WAIT_REQUEUE_PI
:
7819 haddr2
= g2h(cpu
, uaddr2
);
7822 case FUTEX_LOCK_PI2
:
7825 case FUTEX_WAKE_BITSET
:
7826 case FUTEX_TRYLOCK_PI
:
7827 case FUTEX_UNLOCK_PI
:
7831 val
= target_to_host_signal(val
);
7834 case FUTEX_CMP_REQUEUE
:
7835 case FUTEX_CMP_REQUEUE_PI
:
7836 val3
= tswap32(val3
);
7841 * For these, the 4th argument is not TIMEOUT, but VAL2.
7842 * But the prototype of do_safe_futex takes a pointer, so
7843 * insert casts to satisfy the compiler. We do not need
7844 * to tswap VAL2 since it's not compared to guest memory.
7846 pts
= (struct timespec
*)(uintptr_t)timeout
;
7848 haddr2
= g2h(cpu
, uaddr2
);
7851 return -TARGET_ENOSYS
;
7856 ? target_to_host_timespec64(pts
, timeout
)
7857 : target_to_host_timespec(pts
, timeout
)) {
7858 return -TARGET_EFAULT
;
7861 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, haddr2
, val3
);
7865 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7866 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7867 abi_long handle
, abi_long mount_id
,
7870 struct file_handle
*target_fh
;
7871 struct file_handle
*fh
;
7875 unsigned int size
, total_size
;
7877 if (get_user_s32(size
, handle
)) {
7878 return -TARGET_EFAULT
;
7881 name
= lock_user_string(pathname
);
7883 return -TARGET_EFAULT
;
7886 total_size
= sizeof(struct file_handle
) + size
;
7887 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7889 unlock_user(name
, pathname
, 0);
7890 return -TARGET_EFAULT
;
7893 fh
= g_malloc0(total_size
);
7894 fh
->handle_bytes
= size
;
7896 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7897 unlock_user(name
, pathname
, 0);
7899 /* man name_to_handle_at(2):
7900 * Other than the use of the handle_bytes field, the caller should treat
7901 * the file_handle structure as an opaque data type
7904 memcpy(target_fh
, fh
, total_size
);
7905 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7906 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7908 unlock_user(target_fh
, handle
, total_size
);
7910 if (put_user_s32(mid
, mount_id
)) {
7911 return -TARGET_EFAULT
;
7919 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7920 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7923 struct file_handle
*target_fh
;
7924 struct file_handle
*fh
;
7925 unsigned int size
, total_size
;
7928 if (get_user_s32(size
, handle
)) {
7929 return -TARGET_EFAULT
;
7932 total_size
= sizeof(struct file_handle
) + size
;
7933 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7935 return -TARGET_EFAULT
;
7938 fh
= g_memdup(target_fh
, total_size
);
7939 fh
->handle_bytes
= size
;
7940 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7942 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7943 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7947 unlock_user(target_fh
, handle
, total_size
);
7953 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7955 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7958 target_sigset_t
*target_mask
;
7962 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
7963 return -TARGET_EINVAL
;
7965 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7966 return -TARGET_EFAULT
;
7969 target_to_host_sigset(&host_mask
, target_mask
);
7971 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7973 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7975 fd_trans_register(ret
, &target_signalfd_trans
);
7978 unlock_user_struct(target_mask
, mask
, 0);
7984 /* Map host to target signal numbers for the wait family of syscalls.
7985 Assume all other status bits are the same. */
7986 int host_to_target_waitstatus(int status
)
7988 if (WIFSIGNALED(status
)) {
7989 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7991 if (WIFSTOPPED(status
)) {
7992 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7998 static int open_self_cmdline(CPUArchState
*cpu_env
, int fd
)
8000 CPUState
*cpu
= env_cpu(cpu_env
);
8001 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
8004 for (i
= 0; i
< bprm
->argc
; i
++) {
8005 size_t len
= strlen(bprm
->argv
[i
]) + 1;
8007 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
8015 static int open_self_maps(CPUArchState
*cpu_env
, int fd
)
8017 CPUState
*cpu
= env_cpu(cpu_env
);
8018 TaskState
*ts
= cpu
->opaque
;
8019 GSList
*map_info
= read_self_maps();
8023 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
8024 MapInfo
*e
= (MapInfo
*) s
->data
;
8026 if (h2g_valid(e
->start
)) {
8027 unsigned long min
= e
->start
;
8028 unsigned long max
= e
->end
;
8029 int flags
= page_get_flags(h2g(min
));
8032 max
= h2g_valid(max
- 1) ?
8033 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
8035 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
8040 if (h2g(max
) == ts
->info
->stack_limit
) {
8042 if (h2g(min
) == ts
->info
->stack_limit
) {
8049 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
8050 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
8051 h2g(min
), h2g(max
- 1) + 1,
8052 (flags
& PAGE_READ
) ? 'r' : '-',
8053 (flags
& PAGE_WRITE_ORG
) ? 'w' : '-',
8054 (flags
& PAGE_EXEC
) ? 'x' : '-',
8055 e
->is_priv
? 'p' : 's',
8056 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
8058 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
8065 free_self_maps(map_info
);
8067 #ifdef TARGET_VSYSCALL_PAGE
8069 * We only support execution from the vsyscall page.
8070 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8072 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
8073 " --xp 00000000 00:00 0",
8074 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
8075 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
8081 static int open_self_stat(CPUArchState
*cpu_env
, int fd
)
8083 CPUState
*cpu
= env_cpu(cpu_env
);
8084 TaskState
*ts
= cpu
->opaque
;
8085 g_autoptr(GString
) buf
= g_string_new(NULL
);
8088 for (i
= 0; i
< 44; i
++) {
8091 g_string_printf(buf
, FMT_pid
" ", getpid());
8092 } else if (i
== 1) {
8094 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
8095 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
8096 g_string_printf(buf
, "(%.15s) ", bin
);
8097 } else if (i
== 3) {
8099 g_string_printf(buf
, FMT_pid
" ", getppid());
8100 } else if (i
== 21) {
8102 g_string_printf(buf
, "%" PRIu64
" ", ts
->start_boottime
);
8103 } else if (i
== 27) {
8105 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
8107 /* for the rest, there is MasterCard */
8108 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
8111 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
8119 static int open_self_auxv(CPUArchState
*cpu_env
, int fd
)
8121 CPUState
*cpu
= env_cpu(cpu_env
);
8122 TaskState
*ts
= cpu
->opaque
;
8123 abi_ulong auxv
= ts
->info
->saved_auxv
;
8124 abi_ulong len
= ts
->info
->auxv_len
;
8128 * Auxiliary vector is stored in target process stack.
8129 * read in whole auxv vector and copy it to file
8131 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
8135 r
= write(fd
, ptr
, len
);
8142 lseek(fd
, 0, SEEK_SET
);
8143 unlock_user(ptr
, auxv
, len
);
8149 static int is_proc_myself(const char *filename
, const char *entry
)
8151 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
8152 filename
+= strlen("/proc/");
8153 if (!strncmp(filename
, "self/", strlen("self/"))) {
8154 filename
+= strlen("self/");
8155 } else if (*filename
>= '1' && *filename
<= '9') {
8157 snprintf(myself
, sizeof(myself
), "%d/", getpid());
8158 if (!strncmp(filename
, myself
, strlen(myself
))) {
8159 filename
+= strlen(myself
);
8166 if (!strcmp(filename
, entry
)) {
8173 static void excp_dump_file(FILE *logfile
, CPUArchState
*env
,
8174 const char *fmt
, int code
)
8177 CPUState
*cs
= env_cpu(env
);
8179 fprintf(logfile
, fmt
, code
);
8180 fprintf(logfile
, "Failing executable: %s\n", exec_path
);
8181 cpu_dump_state(cs
, logfile
, 0);
8182 open_self_maps(env
, fileno(logfile
));
8186 void target_exception_dump(CPUArchState
*env
, const char *fmt
, int code
)
8188 /* dump to console */
8189 excp_dump_file(stderr
, env
, fmt
, code
);
8191 /* dump to log file */
8192 if (qemu_log_separate()) {
8193 FILE *logfile
= qemu_log_trylock();
8195 excp_dump_file(logfile
, env
, fmt
, code
);
8196 qemu_log_unlock(logfile
);
8200 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8201 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8202 static int is_proc(const char *filename
, const char *entry
)
8204 return strcmp(filename
, entry
) == 0;
8208 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8209 static int open_net_route(CPUArchState
*cpu_env
, int fd
)
8216 fp
= fopen("/proc/net/route", "r");
8223 read
= getline(&line
, &len
, fp
);
8224 dprintf(fd
, "%s", line
);
8228 while ((read
= getline(&line
, &len
, fp
)) != -1) {
8230 uint32_t dest
, gw
, mask
;
8231 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8234 fields
= sscanf(line
,
8235 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8236 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8237 &mask
, &mtu
, &window
, &irtt
);
8241 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8242 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8243 metric
, tswap32(mask
), mtu
, window
, irtt
);
8253 #if defined(TARGET_SPARC)
8254 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8256 dprintf(fd
, "type\t\t: sun4u\n");
8261 #if defined(TARGET_HPPA)
8262 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8264 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
8265 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
8266 dprintf(fd
, "capabilities\t: os32\n");
8267 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
8268 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8273 #if defined(TARGET_M68K)
8274 static int open_hardware(CPUArchState
*cpu_env
, int fd
)
8276 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8281 static int do_openat(CPUArchState
*cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
8284 const char *filename
;
8285 int (*fill
)(CPUArchState
*cpu_env
, int fd
);
8286 int (*cmp
)(const char *s1
, const char *s2
);
8288 const struct fake_open
*fake_open
;
8289 static const struct fake_open fakes
[] = {
8290 { "maps", open_self_maps
, is_proc_myself
},
8291 { "stat", open_self_stat
, is_proc_myself
},
8292 { "auxv", open_self_auxv
, is_proc_myself
},
8293 { "cmdline", open_self_cmdline
, is_proc_myself
},
8294 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8295 { "/proc/net/route", open_net_route
, is_proc
},
8297 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8298 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8300 #if defined(TARGET_M68K)
8301 { "/proc/hardware", open_hardware
, is_proc
},
8303 { NULL
, NULL
, NULL
}
8306 if (is_proc_myself(pathname
, "exe")) {
8307 return safe_openat(dirfd
, exec_path
, flags
, mode
);
8310 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8311 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8316 if (fake_open
->filename
) {
8318 char filename
[PATH_MAX
];
8321 fd
= memfd_create("qemu-open", 0);
8323 if (errno
!= ENOSYS
) {
8326 /* create temporary file to map stat to */
8327 tmpdir
= getenv("TMPDIR");
8330 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8331 fd
= mkstemp(filename
);
8338 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8344 lseek(fd
, 0, SEEK_SET
);
8349 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8352 #define TIMER_MAGIC 0x0caf0000
8353 #define TIMER_MAGIC_MASK 0xffff0000
8355 /* Convert QEMU provided timer ID back to internal 16bit index format */
8356 static target_timer_t
get_timer_id(abi_long arg
)
8358 target_timer_t timerid
= arg
;
8360 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8361 return -TARGET_EINVAL
;
8366 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8367 return -TARGET_EINVAL
;
8373 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8375 abi_ulong target_addr
,
8378 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8379 unsigned host_bits
= sizeof(*host_mask
) * 8;
8380 abi_ulong
*target_mask
;
8383 assert(host_size
>= target_size
);
8385 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8387 return -TARGET_EFAULT
;
8389 memset(host_mask
, 0, host_size
);
8391 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8392 unsigned bit
= i
* target_bits
;
8395 __get_user(val
, &target_mask
[i
]);
8396 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8397 if (val
& (1UL << j
)) {
8398 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8403 unlock_user(target_mask
, target_addr
, 0);
8407 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8409 abi_ulong target_addr
,
8412 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8413 unsigned host_bits
= sizeof(*host_mask
) * 8;
8414 abi_ulong
*target_mask
;
8417 assert(host_size
>= target_size
);
8419 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8421 return -TARGET_EFAULT
;
8424 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8425 unsigned bit
= i
* target_bits
;
8428 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8429 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8433 __put_user(val
, &target_mask
[i
]);
8436 unlock_user(target_mask
, target_addr
, target_size
);
8440 #ifdef TARGET_NR_getdents
8441 static int do_getdents(abi_long dirfd
, abi_long arg2
, abi_long count
)
8443 g_autofree
void *hdirp
= NULL
;
8445 int hlen
, hoff
, toff
;
8446 int hreclen
, treclen
;
8447 off64_t prev_diroff
= 0;
8449 hdirp
= g_try_malloc(count
);
8451 return -TARGET_ENOMEM
;
8454 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8455 hlen
= sys_getdents(dirfd
, hdirp
, count
);
8457 hlen
= sys_getdents64(dirfd
, hdirp
, count
);
8460 hlen
= get_errno(hlen
);
8461 if (is_error(hlen
)) {
8465 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8467 return -TARGET_EFAULT
;
8470 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8471 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8472 struct linux_dirent
*hde
= hdirp
+ hoff
;
8474 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8476 struct target_dirent
*tde
= tdirp
+ toff
;
8480 namelen
= strlen(hde
->d_name
);
8481 hreclen
= hde
->d_reclen
;
8482 treclen
= offsetof(struct target_dirent
, d_name
) + namelen
+ 2;
8483 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent
));
8485 if (toff
+ treclen
> count
) {
8487 * If the host struct is smaller than the target struct, or
8488 * requires less alignment and thus packs into less space,
8489 * then the host can return more entries than we can pass
8493 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8497 * Return what we have, resetting the file pointer to the
8498 * location of the first record not returned.
8500 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8504 prev_diroff
= hde
->d_off
;
8505 tde
->d_ino
= tswapal(hde
->d_ino
);
8506 tde
->d_off
= tswapal(hde
->d_off
);
8507 tde
->d_reclen
= tswap16(treclen
);
8508 memcpy(tde
->d_name
, hde
->d_name
, namelen
+ 1);
8511 * The getdents type is in what was formerly a padding byte at the
8512 * end of the structure.
8514 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8515 type
= *((uint8_t *)hde
+ hreclen
- 1);
8519 *((uint8_t *)tde
+ treclen
- 1) = type
;
8522 unlock_user(tdirp
, arg2
, toff
);
8525 #endif /* TARGET_NR_getdents */
8527 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8528 static int do_getdents64(abi_long dirfd
, abi_long arg2
, abi_long count
)
8530 g_autofree
void *hdirp
= NULL
;
8532 int hlen
, hoff
, toff
;
8533 int hreclen
, treclen
;
8534 off64_t prev_diroff
= 0;
8536 hdirp
= g_try_malloc(count
);
8538 return -TARGET_ENOMEM
;
8541 hlen
= get_errno(sys_getdents64(dirfd
, hdirp
, count
));
8542 if (is_error(hlen
)) {
8546 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8548 return -TARGET_EFAULT
;
8551 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8552 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8553 struct target_dirent64
*tde
= tdirp
+ toff
;
8556 namelen
= strlen(hde
->d_name
) + 1;
8557 hreclen
= hde
->d_reclen
;
8558 treclen
= offsetof(struct target_dirent64
, d_name
) + namelen
;
8559 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent64
));
8561 if (toff
+ treclen
> count
) {
8563 * If the host struct is smaller than the target struct, or
8564 * requires less alignment and thus packs into less space,
8565 * then the host can return more entries than we can pass
8569 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8573 * Return what we have, resetting the file pointer to the
8574 * location of the first record not returned.
8576 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8580 prev_diroff
= hde
->d_off
;
8581 tde
->d_ino
= tswap64(hde
->d_ino
);
8582 tde
->d_off
= tswap64(hde
->d_off
);
8583 tde
->d_reclen
= tswap16(treclen
);
8584 tde
->d_type
= hde
->d_type
;
8585 memcpy(tde
->d_name
, hde
->d_name
, namelen
);
8588 unlock_user(tdirp
, arg2
, toff
);
8591 #endif /* TARGET_NR_getdents64 */
8593 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8594 _syscall2(int, pivot_root
, const char *, new_root
, const char *, put_old
)
8597 /* This is an internal helper for do_syscall so that it is easier
8598 * to have a single return point, so that actions, such as logging
8599 * of syscall results, can be performed.
8600 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8602 static abi_long
do_syscall1(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
8603 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8604 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8607 CPUState
*cpu
= env_cpu(cpu_env
);
8609 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8610 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8611 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8612 || defined(TARGET_NR_statx)
8615 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8616 || defined(TARGET_NR_fstatfs)
8622 case TARGET_NR_exit
:
8623 /* In old applications this may be used to implement _exit(2).
8624 However in threaded applications it is used for thread termination,
8625 and _exit_group is used for application termination.
8626 Do thread termination if we have more then one thread. */
8628 if (block_signals()) {
8629 return -QEMU_ERESTARTSYS
;
8632 pthread_mutex_lock(&clone_lock
);
8634 if (CPU_NEXT(first_cpu
)) {
8635 TaskState
*ts
= cpu
->opaque
;
8637 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
8638 object_unref(OBJECT(cpu
));
8640 * At this point the CPU should be unrealized and removed
8641 * from cpu lists. We can clean-up the rest of the thread
8642 * data without the lock held.
8645 pthread_mutex_unlock(&clone_lock
);
8647 if (ts
->child_tidptr
) {
8648 put_user_u32(0, ts
->child_tidptr
);
8649 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
8650 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
8654 rcu_unregister_thread();
8658 pthread_mutex_unlock(&clone_lock
);
8659 preexit_cleanup(cpu_env
, arg1
);
8661 return 0; /* avoid warning */
8662 case TARGET_NR_read
:
8663 if (arg2
== 0 && arg3
== 0) {
8664 return get_errno(safe_read(arg1
, 0, 0));
8666 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8667 return -TARGET_EFAULT
;
8668 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8670 fd_trans_host_to_target_data(arg1
)) {
8671 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8673 unlock_user(p
, arg2
, ret
);
8676 case TARGET_NR_write
:
8677 if (arg2
== 0 && arg3
== 0) {
8678 return get_errno(safe_write(arg1
, 0, 0));
8680 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8681 return -TARGET_EFAULT
;
8682 if (fd_trans_target_to_host_data(arg1
)) {
8683 void *copy
= g_malloc(arg3
);
8684 memcpy(copy
, p
, arg3
);
8685 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8687 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8691 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8693 unlock_user(p
, arg2
, 0);
8696 #ifdef TARGET_NR_open
8697 case TARGET_NR_open
:
8698 if (!(p
= lock_user_string(arg1
)))
8699 return -TARGET_EFAULT
;
8700 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8701 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8703 fd_trans_unregister(ret
);
8704 unlock_user(p
, arg1
, 0);
8707 case TARGET_NR_openat
:
8708 if (!(p
= lock_user_string(arg2
)))
8709 return -TARGET_EFAULT
;
8710 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8711 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8713 fd_trans_unregister(ret
);
8714 unlock_user(p
, arg2
, 0);
8716 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8717 case TARGET_NR_name_to_handle_at
:
8718 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8721 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8722 case TARGET_NR_open_by_handle_at
:
8723 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8724 fd_trans_unregister(ret
);
8727 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8728 case TARGET_NR_pidfd_open
:
8729 return get_errno(pidfd_open(arg1
, arg2
));
8731 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8732 case TARGET_NR_pidfd_send_signal
:
8734 siginfo_t uinfo
, *puinfo
;
8737 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8739 return -TARGET_EFAULT
;
8741 target_to_host_siginfo(&uinfo
, p
);
8742 unlock_user(p
, arg3
, 0);
8747 ret
= get_errno(pidfd_send_signal(arg1
, target_to_host_signal(arg2
),
8752 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8753 case TARGET_NR_pidfd_getfd
:
8754 return get_errno(pidfd_getfd(arg1
, arg2
, arg3
));
8756 case TARGET_NR_close
:
8757 fd_trans_unregister(arg1
);
8758 return get_errno(close(arg1
));
8761 return do_brk(arg1
);
8762 #ifdef TARGET_NR_fork
8763 case TARGET_NR_fork
:
8764 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8766 #ifdef TARGET_NR_waitpid
8767 case TARGET_NR_waitpid
:
8770 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8771 if (!is_error(ret
) && arg2
&& ret
8772 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8773 return -TARGET_EFAULT
;
8777 #ifdef TARGET_NR_waitid
8778 case TARGET_NR_waitid
:
8782 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8783 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8784 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8785 return -TARGET_EFAULT
;
8786 host_to_target_siginfo(p
, &info
);
8787 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8792 #ifdef TARGET_NR_creat /* not on alpha */
8793 case TARGET_NR_creat
:
8794 if (!(p
= lock_user_string(arg1
)))
8795 return -TARGET_EFAULT
;
8796 ret
= get_errno(creat(p
, arg2
));
8797 fd_trans_unregister(ret
);
8798 unlock_user(p
, arg1
, 0);
8801 #ifdef TARGET_NR_link
8802 case TARGET_NR_link
:
8805 p
= lock_user_string(arg1
);
8806 p2
= lock_user_string(arg2
);
8808 ret
= -TARGET_EFAULT
;
8810 ret
= get_errno(link(p
, p2
));
8811 unlock_user(p2
, arg2
, 0);
8812 unlock_user(p
, arg1
, 0);
8816 #if defined(TARGET_NR_linkat)
8817 case TARGET_NR_linkat
:
8821 return -TARGET_EFAULT
;
8822 p
= lock_user_string(arg2
);
8823 p2
= lock_user_string(arg4
);
8825 ret
= -TARGET_EFAULT
;
8827 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8828 unlock_user(p
, arg2
, 0);
8829 unlock_user(p2
, arg4
, 0);
8833 #ifdef TARGET_NR_unlink
8834 case TARGET_NR_unlink
:
8835 if (!(p
= lock_user_string(arg1
)))
8836 return -TARGET_EFAULT
;
8837 ret
= get_errno(unlink(p
));
8838 unlock_user(p
, arg1
, 0);
8841 #if defined(TARGET_NR_unlinkat)
8842 case TARGET_NR_unlinkat
:
8843 if (!(p
= lock_user_string(arg2
)))
8844 return -TARGET_EFAULT
;
8845 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8846 unlock_user(p
, arg2
, 0);
8849 case TARGET_NR_execve
:
8851 char **argp
, **envp
;
8854 abi_ulong guest_argp
;
8855 abi_ulong guest_envp
;
8861 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8862 if (get_user_ual(addr
, gp
))
8863 return -TARGET_EFAULT
;
8870 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8871 if (get_user_ual(addr
, gp
))
8872 return -TARGET_EFAULT
;
8878 argp
= g_new0(char *, argc
+ 1);
8879 envp
= g_new0(char *, envc
+ 1);
8881 for (gp
= guest_argp
, q
= argp
; gp
;
8882 gp
+= sizeof(abi_ulong
), q
++) {
8883 if (get_user_ual(addr
, gp
))
8887 if (!(*q
= lock_user_string(addr
)))
8892 for (gp
= guest_envp
, q
= envp
; gp
;
8893 gp
+= sizeof(abi_ulong
), q
++) {
8894 if (get_user_ual(addr
, gp
))
8898 if (!(*q
= lock_user_string(addr
)))
8903 if (!(p
= lock_user_string(arg1
)))
8905 /* Although execve() is not an interruptible syscall it is
8906 * a special case where we must use the safe_syscall wrapper:
8907 * if we allow a signal to happen before we make the host
8908 * syscall then we will 'lose' it, because at the point of
8909 * execve the process leaves QEMU's control. So we use the
8910 * safe syscall wrapper to ensure that we either take the
8911 * signal as a guest signal, or else it does not happen
8912 * before the execve completes and makes it the other
8913 * program's problem.
8915 if (is_proc_myself(p
, "exe")) {
8916 ret
= get_errno(safe_execve(exec_path
, argp
, envp
));
8918 ret
= get_errno(safe_execve(p
, argp
, envp
));
8920 unlock_user(p
, arg1
, 0);
8925 ret
= -TARGET_EFAULT
;
8928 for (gp
= guest_argp
, q
= argp
; *q
;
8929 gp
+= sizeof(abi_ulong
), q
++) {
8930 if (get_user_ual(addr
, gp
)
8933 unlock_user(*q
, addr
, 0);
8935 for (gp
= guest_envp
, q
= envp
; *q
;
8936 gp
+= sizeof(abi_ulong
), q
++) {
8937 if (get_user_ual(addr
, gp
)
8940 unlock_user(*q
, addr
, 0);
8947 case TARGET_NR_chdir
:
8948 if (!(p
= lock_user_string(arg1
)))
8949 return -TARGET_EFAULT
;
8950 ret
= get_errno(chdir(p
));
8951 unlock_user(p
, arg1
, 0);
8953 #ifdef TARGET_NR_time
8954 case TARGET_NR_time
:
8957 ret
= get_errno(time(&host_time
));
8960 && put_user_sal(host_time
, arg1
))
8961 return -TARGET_EFAULT
;
8965 #ifdef TARGET_NR_mknod
8966 case TARGET_NR_mknod
:
8967 if (!(p
= lock_user_string(arg1
)))
8968 return -TARGET_EFAULT
;
8969 ret
= get_errno(mknod(p
, arg2
, arg3
));
8970 unlock_user(p
, arg1
, 0);
8973 #if defined(TARGET_NR_mknodat)
8974 case TARGET_NR_mknodat
:
8975 if (!(p
= lock_user_string(arg2
)))
8976 return -TARGET_EFAULT
;
8977 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8978 unlock_user(p
, arg2
, 0);
8981 #ifdef TARGET_NR_chmod
8982 case TARGET_NR_chmod
:
8983 if (!(p
= lock_user_string(arg1
)))
8984 return -TARGET_EFAULT
;
8985 ret
= get_errno(chmod(p
, arg2
));
8986 unlock_user(p
, arg1
, 0);
8989 #ifdef TARGET_NR_lseek
8990 case TARGET_NR_lseek
:
8991 return get_errno(lseek(arg1
, arg2
, arg3
));
8993 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8994 /* Alpha specific */
8995 case TARGET_NR_getxpid
:
8996 cpu_env
->ir
[IR_A4
] = getppid();
8997 return get_errno(getpid());
8999 #ifdef TARGET_NR_getpid
9000 case TARGET_NR_getpid
:
9001 return get_errno(getpid());
9003 case TARGET_NR_mount
:
9005 /* need to look at the data field */
9009 p
= lock_user_string(arg1
);
9011 return -TARGET_EFAULT
;
9017 p2
= lock_user_string(arg2
);
9020 unlock_user(p
, arg1
, 0);
9022 return -TARGET_EFAULT
;
9026 p3
= lock_user_string(arg3
);
9029 unlock_user(p
, arg1
, 0);
9031 unlock_user(p2
, arg2
, 0);
9032 return -TARGET_EFAULT
;
9038 /* FIXME - arg5 should be locked, but it isn't clear how to
9039 * do that since it's not guaranteed to be a NULL-terminated
9043 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
9045 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
9047 ret
= get_errno(ret
);
9050 unlock_user(p
, arg1
, 0);
9052 unlock_user(p2
, arg2
, 0);
9054 unlock_user(p3
, arg3
, 0);
9058 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9059 #if defined(TARGET_NR_umount)
9060 case TARGET_NR_umount
:
9062 #if defined(TARGET_NR_oldumount)
9063 case TARGET_NR_oldumount
:
9065 if (!(p
= lock_user_string(arg1
)))
9066 return -TARGET_EFAULT
;
9067 ret
= get_errno(umount(p
));
9068 unlock_user(p
, arg1
, 0);
9071 #ifdef TARGET_NR_stime /* not on alpha */
9072 case TARGET_NR_stime
:
9076 if (get_user_sal(ts
.tv_sec
, arg1
)) {
9077 return -TARGET_EFAULT
;
9079 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
9082 #ifdef TARGET_NR_alarm /* not on alpha */
9083 case TARGET_NR_alarm
:
9086 #ifdef TARGET_NR_pause /* not on alpha */
9087 case TARGET_NR_pause
:
9088 if (!block_signals()) {
9089 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
9091 return -TARGET_EINTR
;
9093 #ifdef TARGET_NR_utime
9094 case TARGET_NR_utime
:
9096 struct utimbuf tbuf
, *host_tbuf
;
9097 struct target_utimbuf
*target_tbuf
;
9099 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
9100 return -TARGET_EFAULT
;
9101 tbuf
.actime
= tswapal(target_tbuf
->actime
);
9102 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
9103 unlock_user_struct(target_tbuf
, arg2
, 0);
9108 if (!(p
= lock_user_string(arg1
)))
9109 return -TARGET_EFAULT
;
9110 ret
= get_errno(utime(p
, host_tbuf
));
9111 unlock_user(p
, arg1
, 0);
9115 #ifdef TARGET_NR_utimes
9116 case TARGET_NR_utimes
:
9118 struct timeval
*tvp
, tv
[2];
9120 if (copy_from_user_timeval(&tv
[0], arg2
)
9121 || copy_from_user_timeval(&tv
[1],
9122 arg2
+ sizeof(struct target_timeval
)))
9123 return -TARGET_EFAULT
;
9128 if (!(p
= lock_user_string(arg1
)))
9129 return -TARGET_EFAULT
;
9130 ret
= get_errno(utimes(p
, tvp
));
9131 unlock_user(p
, arg1
, 0);
9135 #if defined(TARGET_NR_futimesat)
9136 case TARGET_NR_futimesat
:
9138 struct timeval
*tvp
, tv
[2];
9140 if (copy_from_user_timeval(&tv
[0], arg3
)
9141 || copy_from_user_timeval(&tv
[1],
9142 arg3
+ sizeof(struct target_timeval
)))
9143 return -TARGET_EFAULT
;
9148 if (!(p
= lock_user_string(arg2
))) {
9149 return -TARGET_EFAULT
;
9151 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
9152 unlock_user(p
, arg2
, 0);
9156 #ifdef TARGET_NR_access
9157 case TARGET_NR_access
:
9158 if (!(p
= lock_user_string(arg1
))) {
9159 return -TARGET_EFAULT
;
9161 ret
= get_errno(access(path(p
), arg2
));
9162 unlock_user(p
, arg1
, 0);
9165 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9166 case TARGET_NR_faccessat
:
9167 if (!(p
= lock_user_string(arg2
))) {
9168 return -TARGET_EFAULT
;
9170 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
9171 unlock_user(p
, arg2
, 0);
9174 #if defined(TARGET_NR_faccessat2)
9175 case TARGET_NR_faccessat2
:
9176 if (!(p
= lock_user_string(arg2
))) {
9177 return -TARGET_EFAULT
;
9179 ret
= get_errno(faccessat(arg1
, p
, arg3
, arg4
));
9180 unlock_user(p
, arg2
, 0);
9183 #ifdef TARGET_NR_nice /* not on alpha */
9184 case TARGET_NR_nice
:
9185 return get_errno(nice(arg1
));
9187 case TARGET_NR_sync
:
9190 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9191 case TARGET_NR_syncfs
:
9192 return get_errno(syncfs(arg1
));
9194 case TARGET_NR_kill
:
9195 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
9196 #ifdef TARGET_NR_rename
9197 case TARGET_NR_rename
:
9200 p
= lock_user_string(arg1
);
9201 p2
= lock_user_string(arg2
);
9203 ret
= -TARGET_EFAULT
;
9205 ret
= get_errno(rename(p
, p2
));
9206 unlock_user(p2
, arg2
, 0);
9207 unlock_user(p
, arg1
, 0);
9211 #if defined(TARGET_NR_renameat)
9212 case TARGET_NR_renameat
:
9215 p
= lock_user_string(arg2
);
9216 p2
= lock_user_string(arg4
);
9218 ret
= -TARGET_EFAULT
;
9220 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
9221 unlock_user(p2
, arg4
, 0);
9222 unlock_user(p
, arg2
, 0);
9226 #if defined(TARGET_NR_renameat2)
9227 case TARGET_NR_renameat2
:
9230 p
= lock_user_string(arg2
);
9231 p2
= lock_user_string(arg4
);
9233 ret
= -TARGET_EFAULT
;
9235 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
9237 unlock_user(p2
, arg4
, 0);
9238 unlock_user(p
, arg2
, 0);
9242 #ifdef TARGET_NR_mkdir
9243 case TARGET_NR_mkdir
:
9244 if (!(p
= lock_user_string(arg1
)))
9245 return -TARGET_EFAULT
;
9246 ret
= get_errno(mkdir(p
, arg2
));
9247 unlock_user(p
, arg1
, 0);
9250 #if defined(TARGET_NR_mkdirat)
9251 case TARGET_NR_mkdirat
:
9252 if (!(p
= lock_user_string(arg2
)))
9253 return -TARGET_EFAULT
;
9254 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
9255 unlock_user(p
, arg2
, 0);
9258 #ifdef TARGET_NR_rmdir
9259 case TARGET_NR_rmdir
:
9260 if (!(p
= lock_user_string(arg1
)))
9261 return -TARGET_EFAULT
;
9262 ret
= get_errno(rmdir(p
));
9263 unlock_user(p
, arg1
, 0);
9267 ret
= get_errno(dup(arg1
));
9269 fd_trans_dup(arg1
, ret
);
9272 #ifdef TARGET_NR_pipe
9273 case TARGET_NR_pipe
:
9274 return do_pipe(cpu_env
, arg1
, 0, 0);
9276 #ifdef TARGET_NR_pipe2
9277 case TARGET_NR_pipe2
:
9278 return do_pipe(cpu_env
, arg1
,
9279 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
9281 case TARGET_NR_times
:
9283 struct target_tms
*tmsp
;
9285 ret
= get_errno(times(&tms
));
9287 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
9289 return -TARGET_EFAULT
;
9290 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
9291 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
9292 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
9293 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
9296 ret
= host_to_target_clock_t(ret
);
9299 case TARGET_NR_acct
:
9301 ret
= get_errno(acct(NULL
));
9303 if (!(p
= lock_user_string(arg1
))) {
9304 return -TARGET_EFAULT
;
9306 ret
= get_errno(acct(path(p
)));
9307 unlock_user(p
, arg1
, 0);
9310 #ifdef TARGET_NR_umount2
9311 case TARGET_NR_umount2
:
9312 if (!(p
= lock_user_string(arg1
)))
9313 return -TARGET_EFAULT
;
9314 ret
= get_errno(umount2(p
, arg2
));
9315 unlock_user(p
, arg1
, 0);
9318 case TARGET_NR_ioctl
:
9319 return do_ioctl(arg1
, arg2
, arg3
);
9320 #ifdef TARGET_NR_fcntl
9321 case TARGET_NR_fcntl
:
9322 return do_fcntl(arg1
, arg2
, arg3
);
9324 case TARGET_NR_setpgid
:
9325 return get_errno(setpgid(arg1
, arg2
));
9326 case TARGET_NR_umask
:
9327 return get_errno(umask(arg1
));
9328 case TARGET_NR_chroot
:
9329 if (!(p
= lock_user_string(arg1
)))
9330 return -TARGET_EFAULT
;
9331 ret
= get_errno(chroot(p
));
9332 unlock_user(p
, arg1
, 0);
9334 #ifdef TARGET_NR_dup2
9335 case TARGET_NR_dup2
:
9336 ret
= get_errno(dup2(arg1
, arg2
));
9338 fd_trans_dup(arg1
, arg2
);
9342 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9343 case TARGET_NR_dup3
:
9347 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
9350 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
9351 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
9353 fd_trans_dup(arg1
, arg2
);
9358 #ifdef TARGET_NR_getppid /* not on alpha */
9359 case TARGET_NR_getppid
:
9360 return get_errno(getppid());
9362 #ifdef TARGET_NR_getpgrp
9363 case TARGET_NR_getpgrp
:
9364 return get_errno(getpgrp());
9366 case TARGET_NR_setsid
:
9367 return get_errno(setsid());
9368 #ifdef TARGET_NR_sigaction
9369 case TARGET_NR_sigaction
:
9371 #if defined(TARGET_MIPS)
9372 struct target_sigaction act
, oact
, *pact
, *old_act
;
9375 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9376 return -TARGET_EFAULT
;
9377 act
._sa_handler
= old_act
->_sa_handler
;
9378 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
9379 act
.sa_flags
= old_act
->sa_flags
;
9380 unlock_user_struct(old_act
, arg2
, 0);
9386 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9388 if (!is_error(ret
) && arg3
) {
9389 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9390 return -TARGET_EFAULT
;
9391 old_act
->_sa_handler
= oact
._sa_handler
;
9392 old_act
->sa_flags
= oact
.sa_flags
;
9393 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
9394 old_act
->sa_mask
.sig
[1] = 0;
9395 old_act
->sa_mask
.sig
[2] = 0;
9396 old_act
->sa_mask
.sig
[3] = 0;
9397 unlock_user_struct(old_act
, arg3
, 1);
9400 struct target_old_sigaction
*old_act
;
9401 struct target_sigaction act
, oact
, *pact
;
9403 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9404 return -TARGET_EFAULT
;
9405 act
._sa_handler
= old_act
->_sa_handler
;
9406 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9407 act
.sa_flags
= old_act
->sa_flags
;
9408 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9409 act
.sa_restorer
= old_act
->sa_restorer
;
9411 unlock_user_struct(old_act
, arg2
, 0);
9416 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9417 if (!is_error(ret
) && arg3
) {
9418 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9419 return -TARGET_EFAULT
;
9420 old_act
->_sa_handler
= oact
._sa_handler
;
9421 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9422 old_act
->sa_flags
= oact
.sa_flags
;
9423 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9424 old_act
->sa_restorer
= oact
.sa_restorer
;
9426 unlock_user_struct(old_act
, arg3
, 1);
9432 case TARGET_NR_rt_sigaction
:
9435 * For Alpha and SPARC this is a 5 argument syscall, with
9436 * a 'restorer' parameter which must be copied into the
9437 * sa_restorer field of the sigaction struct.
9438 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9439 * and arg5 is the sigsetsize.
9441 #if defined(TARGET_ALPHA)
9442 target_ulong sigsetsize
= arg4
;
9443 target_ulong restorer
= arg5
;
9444 #elif defined(TARGET_SPARC)
9445 target_ulong restorer
= arg4
;
9446 target_ulong sigsetsize
= arg5
;
9448 target_ulong sigsetsize
= arg4
;
9449 target_ulong restorer
= 0;
9451 struct target_sigaction
*act
= NULL
;
9452 struct target_sigaction
*oact
= NULL
;
9454 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9455 return -TARGET_EINVAL
;
9457 if (arg2
&& !lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9458 return -TARGET_EFAULT
;
9460 if (arg3
&& !lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9461 ret
= -TARGET_EFAULT
;
9463 ret
= get_errno(do_sigaction(arg1
, act
, oact
, restorer
));
9465 unlock_user_struct(oact
, arg3
, 1);
9469 unlock_user_struct(act
, arg2
, 0);
9473 #ifdef TARGET_NR_sgetmask /* not on alpha */
9474 case TARGET_NR_sgetmask
:
9477 abi_ulong target_set
;
9478 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9480 host_to_target_old_sigset(&target_set
, &cur_set
);
9486 #ifdef TARGET_NR_ssetmask /* not on alpha */
9487 case TARGET_NR_ssetmask
:
9490 abi_ulong target_set
= arg1
;
9491 target_to_host_old_sigset(&set
, &target_set
);
9492 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9494 host_to_target_old_sigset(&target_set
, &oset
);
9500 #ifdef TARGET_NR_sigprocmask
9501 case TARGET_NR_sigprocmask
:
9503 #if defined(TARGET_ALPHA)
9504 sigset_t set
, oldset
;
9509 case TARGET_SIG_BLOCK
:
9512 case TARGET_SIG_UNBLOCK
:
9515 case TARGET_SIG_SETMASK
:
9519 return -TARGET_EINVAL
;
9522 target_to_host_old_sigset(&set
, &mask
);
9524 ret
= do_sigprocmask(how
, &set
, &oldset
);
9525 if (!is_error(ret
)) {
9526 host_to_target_old_sigset(&mask
, &oldset
);
9528 cpu_env
->ir
[IR_V0
] = 0; /* force no error */
9531 sigset_t set
, oldset
, *set_ptr
;
9535 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
9537 return -TARGET_EFAULT
;
9539 target_to_host_old_sigset(&set
, p
);
9540 unlock_user(p
, arg2
, 0);
9543 case TARGET_SIG_BLOCK
:
9546 case TARGET_SIG_UNBLOCK
:
9549 case TARGET_SIG_SETMASK
:
9553 return -TARGET_EINVAL
;
9559 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9560 if (!is_error(ret
) && arg3
) {
9561 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9562 return -TARGET_EFAULT
;
9563 host_to_target_old_sigset(p
, &oldset
);
9564 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9570 case TARGET_NR_rt_sigprocmask
:
9573 sigset_t set
, oldset
, *set_ptr
;
9575 if (arg4
!= sizeof(target_sigset_t
)) {
9576 return -TARGET_EINVAL
;
9580 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
9582 return -TARGET_EFAULT
;
9584 target_to_host_sigset(&set
, p
);
9585 unlock_user(p
, arg2
, 0);
9588 case TARGET_SIG_BLOCK
:
9591 case TARGET_SIG_UNBLOCK
:
9594 case TARGET_SIG_SETMASK
:
9598 return -TARGET_EINVAL
;
9604 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9605 if (!is_error(ret
) && arg3
) {
9606 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9607 return -TARGET_EFAULT
;
9608 host_to_target_sigset(p
, &oldset
);
9609 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9613 #ifdef TARGET_NR_sigpending
9614 case TARGET_NR_sigpending
:
9617 ret
= get_errno(sigpending(&set
));
9618 if (!is_error(ret
)) {
9619 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9620 return -TARGET_EFAULT
;
9621 host_to_target_old_sigset(p
, &set
);
9622 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9627 case TARGET_NR_rt_sigpending
:
9631 /* Yes, this check is >, not != like most. We follow the kernel's
9632 * logic and it does it like this because it implements
9633 * NR_sigpending through the same code path, and in that case
9634 * the old_sigset_t is smaller in size.
9636 if (arg2
> sizeof(target_sigset_t
)) {
9637 return -TARGET_EINVAL
;
9640 ret
= get_errno(sigpending(&set
));
9641 if (!is_error(ret
)) {
9642 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9643 return -TARGET_EFAULT
;
9644 host_to_target_sigset(p
, &set
);
9645 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9649 #ifdef TARGET_NR_sigsuspend
9650 case TARGET_NR_sigsuspend
:
9654 #if defined(TARGET_ALPHA)
9655 TaskState
*ts
= cpu
->opaque
;
9656 /* target_to_host_old_sigset will bswap back */
9657 abi_ulong mask
= tswapal(arg1
);
9658 set
= &ts
->sigsuspend_mask
;
9659 target_to_host_old_sigset(set
, &mask
);
9661 ret
= process_sigsuspend_mask(&set
, arg1
, sizeof(target_sigset_t
));
9666 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
9667 finish_sigsuspend_mask(ret
);
9671 case TARGET_NR_rt_sigsuspend
:
9675 ret
= process_sigsuspend_mask(&set
, arg1
, arg2
);
9679 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
9680 finish_sigsuspend_mask(ret
);
9683 #ifdef TARGET_NR_rt_sigtimedwait
9684 case TARGET_NR_rt_sigtimedwait
:
9687 struct timespec uts
, *puts
;
9690 if (arg4
!= sizeof(target_sigset_t
)) {
9691 return -TARGET_EINVAL
;
9694 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9695 return -TARGET_EFAULT
;
9696 target_to_host_sigset(&set
, p
);
9697 unlock_user(p
, arg1
, 0);
9700 if (target_to_host_timespec(puts
, arg3
)) {
9701 return -TARGET_EFAULT
;
9706 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9708 if (!is_error(ret
)) {
9710 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9713 return -TARGET_EFAULT
;
9715 host_to_target_siginfo(p
, &uinfo
);
9716 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9718 ret
= host_to_target_signal(ret
);
9723 #ifdef TARGET_NR_rt_sigtimedwait_time64
9724 case TARGET_NR_rt_sigtimedwait_time64
:
9727 struct timespec uts
, *puts
;
9730 if (arg4
!= sizeof(target_sigset_t
)) {
9731 return -TARGET_EINVAL
;
9734 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9736 return -TARGET_EFAULT
;
9738 target_to_host_sigset(&set
, p
);
9739 unlock_user(p
, arg1
, 0);
9742 if (target_to_host_timespec64(puts
, arg3
)) {
9743 return -TARGET_EFAULT
;
9748 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9750 if (!is_error(ret
)) {
9752 p
= lock_user(VERIFY_WRITE
, arg2
,
9753 sizeof(target_siginfo_t
), 0);
9755 return -TARGET_EFAULT
;
9757 host_to_target_siginfo(p
, &uinfo
);
9758 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9760 ret
= host_to_target_signal(ret
);
9765 case TARGET_NR_rt_sigqueueinfo
:
9769 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9771 return -TARGET_EFAULT
;
9773 target_to_host_siginfo(&uinfo
, p
);
9774 unlock_user(p
, arg3
, 0);
9775 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, target_to_host_signal(arg2
), &uinfo
));
9778 case TARGET_NR_rt_tgsigqueueinfo
:
9782 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9784 return -TARGET_EFAULT
;
9786 target_to_host_siginfo(&uinfo
, p
);
9787 unlock_user(p
, arg4
, 0);
9788 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, target_to_host_signal(arg3
), &uinfo
));
9791 #ifdef TARGET_NR_sigreturn
9792 case TARGET_NR_sigreturn
:
9793 if (block_signals()) {
9794 return -QEMU_ERESTARTSYS
;
9796 return do_sigreturn(cpu_env
);
9798 case TARGET_NR_rt_sigreturn
:
9799 if (block_signals()) {
9800 return -QEMU_ERESTARTSYS
;
9802 return do_rt_sigreturn(cpu_env
);
9803 case TARGET_NR_sethostname
:
9804 if (!(p
= lock_user_string(arg1
)))
9805 return -TARGET_EFAULT
;
9806 ret
= get_errno(sethostname(p
, arg2
));
9807 unlock_user(p
, arg1
, 0);
9809 #ifdef TARGET_NR_setrlimit
9810 case TARGET_NR_setrlimit
:
9812 int resource
= target_to_host_resource(arg1
);
9813 struct target_rlimit
*target_rlim
;
9815 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9816 return -TARGET_EFAULT
;
9817 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9818 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9819 unlock_user_struct(target_rlim
, arg2
, 0);
9821 * If we just passed through resource limit settings for memory then
9822 * they would also apply to QEMU's own allocations, and QEMU will
9823 * crash or hang or die if its allocations fail. Ideally we would
9824 * track the guest allocations in QEMU and apply the limits ourselves.
9825 * For now, just tell the guest the call succeeded but don't actually
9828 if (resource
!= RLIMIT_AS
&&
9829 resource
!= RLIMIT_DATA
&&
9830 resource
!= RLIMIT_STACK
) {
9831 return get_errno(setrlimit(resource
, &rlim
));
9837 #ifdef TARGET_NR_getrlimit
9838 case TARGET_NR_getrlimit
:
9840 int resource
= target_to_host_resource(arg1
);
9841 struct target_rlimit
*target_rlim
;
9844 ret
= get_errno(getrlimit(resource
, &rlim
));
9845 if (!is_error(ret
)) {
9846 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9847 return -TARGET_EFAULT
;
9848 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9849 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9850 unlock_user_struct(target_rlim
, arg2
, 1);
9855 case TARGET_NR_getrusage
:
9857 struct rusage rusage
;
9858 ret
= get_errno(getrusage(arg1
, &rusage
));
9859 if (!is_error(ret
)) {
9860 ret
= host_to_target_rusage(arg2
, &rusage
);
9864 #if defined(TARGET_NR_gettimeofday)
9865 case TARGET_NR_gettimeofday
:
9870 ret
= get_errno(gettimeofday(&tv
, &tz
));
9871 if (!is_error(ret
)) {
9872 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9873 return -TARGET_EFAULT
;
9875 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9876 return -TARGET_EFAULT
;
9882 #if defined(TARGET_NR_settimeofday)
9883 case TARGET_NR_settimeofday
:
9885 struct timeval tv
, *ptv
= NULL
;
9886 struct timezone tz
, *ptz
= NULL
;
9889 if (copy_from_user_timeval(&tv
, arg1
)) {
9890 return -TARGET_EFAULT
;
9896 if (copy_from_user_timezone(&tz
, arg2
)) {
9897 return -TARGET_EFAULT
;
9902 return get_errno(settimeofday(ptv
, ptz
));
9905 #if defined(TARGET_NR_select)
9906 case TARGET_NR_select
:
9907 #if defined(TARGET_WANT_NI_OLD_SELECT)
9908 /* some architectures used to have old_select here
9909 * but now ENOSYS it.
9911 ret
= -TARGET_ENOSYS
;
9912 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9913 ret
= do_old_select(arg1
);
9915 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9919 #ifdef TARGET_NR_pselect6
9920 case TARGET_NR_pselect6
:
9921 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9923 #ifdef TARGET_NR_pselect6_time64
9924 case TARGET_NR_pselect6_time64
:
9925 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9927 #ifdef TARGET_NR_symlink
9928 case TARGET_NR_symlink
:
9931 p
= lock_user_string(arg1
);
9932 p2
= lock_user_string(arg2
);
9934 ret
= -TARGET_EFAULT
;
9936 ret
= get_errno(symlink(p
, p2
));
9937 unlock_user(p2
, arg2
, 0);
9938 unlock_user(p
, arg1
, 0);
9942 #if defined(TARGET_NR_symlinkat)
9943 case TARGET_NR_symlinkat
:
9946 p
= lock_user_string(arg1
);
9947 p2
= lock_user_string(arg3
);
9949 ret
= -TARGET_EFAULT
;
9951 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9952 unlock_user(p2
, arg3
, 0);
9953 unlock_user(p
, arg1
, 0);
9957 #ifdef TARGET_NR_readlink
9958 case TARGET_NR_readlink
:
9961 p
= lock_user_string(arg1
);
9962 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9964 ret
= -TARGET_EFAULT
;
9966 /* Short circuit this for the magic exe check. */
9967 ret
= -TARGET_EINVAL
;
9968 } else if (is_proc_myself((const char *)p
, "exe")) {
9969 char real
[PATH_MAX
], *temp
;
9970 temp
= realpath(exec_path
, real
);
9971 /* Return value is # of bytes that we wrote to the buffer. */
9973 ret
= get_errno(-1);
9975 /* Don't worry about sign mismatch as earlier mapping
9976 * logic would have thrown a bad address error. */
9977 ret
= MIN(strlen(real
), arg3
);
9978 /* We cannot NUL terminate the string. */
9979 memcpy(p2
, real
, ret
);
9982 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9984 unlock_user(p2
, arg2
, ret
);
9985 unlock_user(p
, arg1
, 0);
9989 #if defined(TARGET_NR_readlinkat)
9990 case TARGET_NR_readlinkat
:
9993 p
= lock_user_string(arg2
);
9994 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9996 ret
= -TARGET_EFAULT
;
9998 /* Short circuit this for the magic exe check. */
9999 ret
= -TARGET_EINVAL
;
10000 } else if (is_proc_myself((const char *)p
, "exe")) {
10001 char real
[PATH_MAX
], *temp
;
10002 temp
= realpath(exec_path
, real
);
10003 /* Return value is # of bytes that we wrote to the buffer. */
10004 if (temp
== NULL
) {
10005 ret
= get_errno(-1);
10007 /* Don't worry about sign mismatch as earlier mapping
10008 * logic would have thrown a bad address error. */
10009 ret
= MIN(strlen(real
), arg4
);
10010 /* We cannot NUL terminate the string. */
10011 memcpy(p2
, real
, ret
);
10014 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
10016 unlock_user(p2
, arg3
, ret
);
10017 unlock_user(p
, arg2
, 0);
10021 #ifdef TARGET_NR_swapon
10022 case TARGET_NR_swapon
:
10023 if (!(p
= lock_user_string(arg1
)))
10024 return -TARGET_EFAULT
;
10025 ret
= get_errno(swapon(p
, arg2
));
10026 unlock_user(p
, arg1
, 0);
10029 case TARGET_NR_reboot
:
10030 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
10031 /* arg4 must be ignored in all other cases */
10032 p
= lock_user_string(arg4
);
10034 return -TARGET_EFAULT
;
10036 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
10037 unlock_user(p
, arg4
, 0);
10039 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
10042 #ifdef TARGET_NR_mmap
10043 case TARGET_NR_mmap
:
10044 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10045 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10046 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10047 || defined(TARGET_S390X)
10050 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
10051 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
10052 return -TARGET_EFAULT
;
10053 v1
= tswapal(v
[0]);
10054 v2
= tswapal(v
[1]);
10055 v3
= tswapal(v
[2]);
10056 v4
= tswapal(v
[3]);
10057 v5
= tswapal(v
[4]);
10058 v6
= tswapal(v
[5]);
10059 unlock_user(v
, arg1
, 0);
10060 ret
= get_errno(target_mmap(v1
, v2
, v3
,
10061 target_to_host_bitmask(v4
, mmap_flags_tbl
),
10065 /* mmap pointers are always untagged */
10066 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
10067 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
10073 #ifdef TARGET_NR_mmap2
10074 case TARGET_NR_mmap2
:
10076 #define MMAP_SHIFT 12
10078 ret
= target_mmap(arg1
, arg2
, arg3
,
10079 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
10080 arg5
, arg6
<< MMAP_SHIFT
);
10081 return get_errno(ret
);
10083 case TARGET_NR_munmap
:
10084 arg1
= cpu_untagged_addr(cpu
, arg1
);
10085 return get_errno(target_munmap(arg1
, arg2
));
10086 case TARGET_NR_mprotect
:
10087 arg1
= cpu_untagged_addr(cpu
, arg1
);
10089 TaskState
*ts
= cpu
->opaque
;
10090 /* Special hack to detect libc making the stack executable. */
10091 if ((arg3
& PROT_GROWSDOWN
)
10092 && arg1
>= ts
->info
->stack_limit
10093 && arg1
<= ts
->info
->start_stack
) {
10094 arg3
&= ~PROT_GROWSDOWN
;
10095 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
10096 arg1
= ts
->info
->stack_limit
;
10099 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
10100 #ifdef TARGET_NR_mremap
10101 case TARGET_NR_mremap
:
10102 arg1
= cpu_untagged_addr(cpu
, arg1
);
10103 /* mremap new_addr (arg5) is always untagged */
10104 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
10106 /* ??? msync/mlock/munlock are broken for softmmu. */
10107 #ifdef TARGET_NR_msync
10108 case TARGET_NR_msync
:
10109 return get_errno(msync(g2h(cpu
, arg1
), arg2
, arg3
));
10111 #ifdef TARGET_NR_mlock
10112 case TARGET_NR_mlock
:
10113 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
10115 #ifdef TARGET_NR_munlock
10116 case TARGET_NR_munlock
:
10117 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
10119 #ifdef TARGET_NR_mlockall
10120 case TARGET_NR_mlockall
:
10121 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
10123 #ifdef TARGET_NR_munlockall
10124 case TARGET_NR_munlockall
:
10125 return get_errno(munlockall());
10127 #ifdef TARGET_NR_truncate
10128 case TARGET_NR_truncate
:
10129 if (!(p
= lock_user_string(arg1
)))
10130 return -TARGET_EFAULT
;
10131 ret
= get_errno(truncate(p
, arg2
));
10132 unlock_user(p
, arg1
, 0);
10135 #ifdef TARGET_NR_ftruncate
10136 case TARGET_NR_ftruncate
:
10137 return get_errno(ftruncate(arg1
, arg2
));
10139 case TARGET_NR_fchmod
:
10140 return get_errno(fchmod(arg1
, arg2
));
10141 #if defined(TARGET_NR_fchmodat)
10142 case TARGET_NR_fchmodat
:
10143 if (!(p
= lock_user_string(arg2
)))
10144 return -TARGET_EFAULT
;
10145 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
10146 unlock_user(p
, arg2
, 0);
10149 case TARGET_NR_getpriority
:
10150 /* Note that negative values are valid for getpriority, so we must
10151 differentiate based on errno settings. */
10153 ret
= getpriority(arg1
, arg2
);
10154 if (ret
== -1 && errno
!= 0) {
10155 return -host_to_target_errno(errno
);
10157 #ifdef TARGET_ALPHA
10158 /* Return value is the unbiased priority. Signal no error. */
10159 cpu_env
->ir
[IR_V0
] = 0;
10161 /* Return value is a biased priority to avoid negative numbers. */
10165 case TARGET_NR_setpriority
:
10166 return get_errno(setpriority(arg1
, arg2
, arg3
));
10167 #ifdef TARGET_NR_statfs
10168 case TARGET_NR_statfs
:
10169 if (!(p
= lock_user_string(arg1
))) {
10170 return -TARGET_EFAULT
;
10172 ret
= get_errno(statfs(path(p
), &stfs
));
10173 unlock_user(p
, arg1
, 0);
10175 if (!is_error(ret
)) {
10176 struct target_statfs
*target_stfs
;
10178 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
10179 return -TARGET_EFAULT
;
10180 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10181 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10182 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10183 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10184 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10185 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10186 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10187 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10188 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10189 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10190 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10191 #ifdef _STATFS_F_FLAGS
10192 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10194 __put_user(0, &target_stfs
->f_flags
);
10196 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10197 unlock_user_struct(target_stfs
, arg2
, 1);
10201 #ifdef TARGET_NR_fstatfs
10202 case TARGET_NR_fstatfs
:
10203 ret
= get_errno(fstatfs(arg1
, &stfs
));
10204 goto convert_statfs
;
10206 #ifdef TARGET_NR_statfs64
10207 case TARGET_NR_statfs64
:
10208 if (!(p
= lock_user_string(arg1
))) {
10209 return -TARGET_EFAULT
;
10211 ret
= get_errno(statfs(path(p
), &stfs
));
10212 unlock_user(p
, arg1
, 0);
10214 if (!is_error(ret
)) {
10215 struct target_statfs64
*target_stfs
;
10217 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
10218 return -TARGET_EFAULT
;
10219 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10220 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10221 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10222 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10223 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10224 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10225 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10226 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10227 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10228 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10229 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10230 #ifdef _STATFS_F_FLAGS
10231 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10233 __put_user(0, &target_stfs
->f_flags
);
10235 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10236 unlock_user_struct(target_stfs
, arg3
, 1);
10239 case TARGET_NR_fstatfs64
:
10240 ret
= get_errno(fstatfs(arg1
, &stfs
));
10241 goto convert_statfs64
;
10243 #ifdef TARGET_NR_socketcall
10244 case TARGET_NR_socketcall
:
10245 return do_socketcall(arg1
, arg2
);
10247 #ifdef TARGET_NR_accept
10248 case TARGET_NR_accept
:
10249 return do_accept4(arg1
, arg2
, arg3
, 0);
10251 #ifdef TARGET_NR_accept4
10252 case TARGET_NR_accept4
:
10253 return do_accept4(arg1
, arg2
, arg3
, arg4
);
10255 #ifdef TARGET_NR_bind
10256 case TARGET_NR_bind
:
10257 return do_bind(arg1
, arg2
, arg3
);
10259 #ifdef TARGET_NR_connect
10260 case TARGET_NR_connect
:
10261 return do_connect(arg1
, arg2
, arg3
);
10263 #ifdef TARGET_NR_getpeername
10264 case TARGET_NR_getpeername
:
10265 return do_getpeername(arg1
, arg2
, arg3
);
10267 #ifdef TARGET_NR_getsockname
10268 case TARGET_NR_getsockname
:
10269 return do_getsockname(arg1
, arg2
, arg3
);
10271 #ifdef TARGET_NR_getsockopt
10272 case TARGET_NR_getsockopt
:
10273 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
10275 #ifdef TARGET_NR_listen
10276 case TARGET_NR_listen
:
10277 return get_errno(listen(arg1
, arg2
));
10279 #ifdef TARGET_NR_recv
10280 case TARGET_NR_recv
:
10281 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
10283 #ifdef TARGET_NR_recvfrom
10284 case TARGET_NR_recvfrom
:
10285 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10287 #ifdef TARGET_NR_recvmsg
10288 case TARGET_NR_recvmsg
:
10289 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
10291 #ifdef TARGET_NR_send
10292 case TARGET_NR_send
:
10293 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
10295 #ifdef TARGET_NR_sendmsg
10296 case TARGET_NR_sendmsg
:
10297 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
10299 #ifdef TARGET_NR_sendmmsg
10300 case TARGET_NR_sendmmsg
:
10301 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
10303 #ifdef TARGET_NR_recvmmsg
10304 case TARGET_NR_recvmmsg
:
10305 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
10307 #ifdef TARGET_NR_sendto
10308 case TARGET_NR_sendto
:
10309 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10311 #ifdef TARGET_NR_shutdown
10312 case TARGET_NR_shutdown
:
10313 return get_errno(shutdown(arg1
, arg2
));
10315 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10316 case TARGET_NR_getrandom
:
10317 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10319 return -TARGET_EFAULT
;
10321 ret
= get_errno(getrandom(p
, arg2
, arg3
));
10322 unlock_user(p
, arg1
, ret
);
10325 #ifdef TARGET_NR_socket
10326 case TARGET_NR_socket
:
10327 return do_socket(arg1
, arg2
, arg3
);
10329 #ifdef TARGET_NR_socketpair
10330 case TARGET_NR_socketpair
:
10331 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
10333 #ifdef TARGET_NR_setsockopt
10334 case TARGET_NR_setsockopt
:
10335 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
10337 #if defined(TARGET_NR_syslog)
10338 case TARGET_NR_syslog
:
10343 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
10344 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
10345 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
10346 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
10347 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
10348 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
10349 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
10350 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
10351 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
10352 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
10353 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
10354 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
10357 return -TARGET_EINVAL
;
10362 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10364 return -TARGET_EFAULT
;
10366 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
10367 unlock_user(p
, arg2
, arg3
);
10371 return -TARGET_EINVAL
;
10376 case TARGET_NR_setitimer
:
10378 struct itimerval value
, ovalue
, *pvalue
;
10382 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10383 || copy_from_user_timeval(&pvalue
->it_value
,
10384 arg2
+ sizeof(struct target_timeval
)))
10385 return -TARGET_EFAULT
;
10389 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10390 if (!is_error(ret
) && arg3
) {
10391 if (copy_to_user_timeval(arg3
,
10392 &ovalue
.it_interval
)
10393 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10395 return -TARGET_EFAULT
;
10399 case TARGET_NR_getitimer
:
10401 struct itimerval value
;
10403 ret
= get_errno(getitimer(arg1
, &value
));
10404 if (!is_error(ret
) && arg2
) {
10405 if (copy_to_user_timeval(arg2
,
10406 &value
.it_interval
)
10407 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10409 return -TARGET_EFAULT
;
10413 #ifdef TARGET_NR_stat
10414 case TARGET_NR_stat
:
10415 if (!(p
= lock_user_string(arg1
))) {
10416 return -TARGET_EFAULT
;
10418 ret
= get_errno(stat(path(p
), &st
));
10419 unlock_user(p
, arg1
, 0);
10422 #ifdef TARGET_NR_lstat
10423 case TARGET_NR_lstat
:
10424 if (!(p
= lock_user_string(arg1
))) {
10425 return -TARGET_EFAULT
;
10427 ret
= get_errno(lstat(path(p
), &st
));
10428 unlock_user(p
, arg1
, 0);
10431 #ifdef TARGET_NR_fstat
10432 case TARGET_NR_fstat
:
10434 ret
= get_errno(fstat(arg1
, &st
));
10435 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10438 if (!is_error(ret
)) {
10439 struct target_stat
*target_st
;
10441 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10442 return -TARGET_EFAULT
;
10443 memset(target_st
, 0, sizeof(*target_st
));
10444 __put_user(st
.st_dev
, &target_st
->st_dev
);
10445 __put_user(st
.st_ino
, &target_st
->st_ino
);
10446 __put_user(st
.st_mode
, &target_st
->st_mode
);
10447 __put_user(st
.st_uid
, &target_st
->st_uid
);
10448 __put_user(st
.st_gid
, &target_st
->st_gid
);
10449 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10450 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10451 __put_user(st
.st_size
, &target_st
->st_size
);
10452 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10453 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10454 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10455 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10456 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10457 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10458 __put_user(st
.st_atim
.tv_nsec
,
10459 &target_st
->target_st_atime_nsec
);
10460 __put_user(st
.st_mtim
.tv_nsec
,
10461 &target_st
->target_st_mtime_nsec
);
10462 __put_user(st
.st_ctim
.tv_nsec
,
10463 &target_st
->target_st_ctime_nsec
);
10465 unlock_user_struct(target_st
, arg2
, 1);
10470 case TARGET_NR_vhangup
:
10471 return get_errno(vhangup());
10472 #ifdef TARGET_NR_syscall
10473 case TARGET_NR_syscall
:
10474 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10475 arg6
, arg7
, arg8
, 0);
10477 #if defined(TARGET_NR_wait4)
10478 case TARGET_NR_wait4
:
10481 abi_long status_ptr
= arg2
;
10482 struct rusage rusage
, *rusage_ptr
;
10483 abi_ulong target_rusage
= arg4
;
10484 abi_long rusage_err
;
10486 rusage_ptr
= &rusage
;
10489 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10490 if (!is_error(ret
)) {
10491 if (status_ptr
&& ret
) {
10492 status
= host_to_target_waitstatus(status
);
10493 if (put_user_s32(status
, status_ptr
))
10494 return -TARGET_EFAULT
;
10496 if (target_rusage
) {
10497 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10506 #ifdef TARGET_NR_swapoff
10507 case TARGET_NR_swapoff
:
10508 if (!(p
= lock_user_string(arg1
)))
10509 return -TARGET_EFAULT
;
10510 ret
= get_errno(swapoff(p
));
10511 unlock_user(p
, arg1
, 0);
10514 case TARGET_NR_sysinfo
:
10516 struct target_sysinfo
*target_value
;
10517 struct sysinfo value
;
10518 ret
= get_errno(sysinfo(&value
));
10519 if (!is_error(ret
) && arg1
)
10521 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10522 return -TARGET_EFAULT
;
10523 __put_user(value
.uptime
, &target_value
->uptime
);
10524 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10525 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10526 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10527 __put_user(value
.totalram
, &target_value
->totalram
);
10528 __put_user(value
.freeram
, &target_value
->freeram
);
10529 __put_user(value
.sharedram
, &target_value
->sharedram
);
10530 __put_user(value
.bufferram
, &target_value
->bufferram
);
10531 __put_user(value
.totalswap
, &target_value
->totalswap
);
10532 __put_user(value
.freeswap
, &target_value
->freeswap
);
10533 __put_user(value
.procs
, &target_value
->procs
);
10534 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10535 __put_user(value
.freehigh
, &target_value
->freehigh
);
10536 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10537 unlock_user_struct(target_value
, arg1
, 1);
10541 #ifdef TARGET_NR_ipc
10542 case TARGET_NR_ipc
:
10543 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10545 #ifdef TARGET_NR_semget
10546 case TARGET_NR_semget
:
10547 return get_errno(semget(arg1
, arg2
, arg3
));
10549 #ifdef TARGET_NR_semop
10550 case TARGET_NR_semop
:
10551 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10553 #ifdef TARGET_NR_semtimedop
10554 case TARGET_NR_semtimedop
:
10555 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10557 #ifdef TARGET_NR_semtimedop_time64
10558 case TARGET_NR_semtimedop_time64
:
10559 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10561 #ifdef TARGET_NR_semctl
10562 case TARGET_NR_semctl
:
10563 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10565 #ifdef TARGET_NR_msgctl
10566 case TARGET_NR_msgctl
:
10567 return do_msgctl(arg1
, arg2
, arg3
);
10569 #ifdef TARGET_NR_msgget
10570 case TARGET_NR_msgget
:
10571 return get_errno(msgget(arg1
, arg2
));
10573 #ifdef TARGET_NR_msgrcv
10574 case TARGET_NR_msgrcv
:
10575 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10577 #ifdef TARGET_NR_msgsnd
10578 case TARGET_NR_msgsnd
:
10579 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10581 #ifdef TARGET_NR_shmget
10582 case TARGET_NR_shmget
:
10583 return get_errno(shmget(arg1
, arg2
, arg3
));
10585 #ifdef TARGET_NR_shmctl
10586 case TARGET_NR_shmctl
:
10587 return do_shmctl(arg1
, arg2
, arg3
);
10589 #ifdef TARGET_NR_shmat
10590 case TARGET_NR_shmat
:
10591 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10593 #ifdef TARGET_NR_shmdt
10594 case TARGET_NR_shmdt
:
10595 return do_shmdt(arg1
);
10597 case TARGET_NR_fsync
:
10598 return get_errno(fsync(arg1
));
10599 case TARGET_NR_clone
:
10600 /* Linux manages to have three different orderings for its
10601 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10602 * match the kernel's CONFIG_CLONE_* settings.
10603 * Microblaze is further special in that it uses a sixth
10604 * implicit argument to clone for the TLS pointer.
10606 #if defined(TARGET_MICROBLAZE)
10607 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10608 #elif defined(TARGET_CLONE_BACKWARDS)
10609 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10610 #elif defined(TARGET_CLONE_BACKWARDS2)
10611 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10613 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10616 #ifdef __NR_exit_group
10617 /* new thread calls */
10618 case TARGET_NR_exit_group
:
10619 preexit_cleanup(cpu_env
, arg1
);
10620 return get_errno(exit_group(arg1
));
10622 case TARGET_NR_setdomainname
:
10623 if (!(p
= lock_user_string(arg1
)))
10624 return -TARGET_EFAULT
;
10625 ret
= get_errno(setdomainname(p
, arg2
));
10626 unlock_user(p
, arg1
, 0);
10628 case TARGET_NR_uname
:
10629 /* no need to transcode because we use the linux syscall */
10631 struct new_utsname
* buf
;
10633 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10634 return -TARGET_EFAULT
;
10635 ret
= get_errno(sys_uname(buf
));
10636 if (!is_error(ret
)) {
10637 /* Overwrite the native machine name with whatever is being
10639 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10640 sizeof(buf
->machine
));
10641 /* Allow the user to override the reported release. */
10642 if (qemu_uname_release
&& *qemu_uname_release
) {
10643 g_strlcpy(buf
->release
, qemu_uname_release
,
10644 sizeof(buf
->release
));
10647 unlock_user_struct(buf
, arg1
, 1);
10651 case TARGET_NR_modify_ldt
:
10652 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10653 #if !defined(TARGET_X86_64)
10654 case TARGET_NR_vm86
:
10655 return do_vm86(cpu_env
, arg1
, arg2
);
10658 #if defined(TARGET_NR_adjtimex)
10659 case TARGET_NR_adjtimex
:
10661 struct timex host_buf
;
10663 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10664 return -TARGET_EFAULT
;
10666 ret
= get_errno(adjtimex(&host_buf
));
10667 if (!is_error(ret
)) {
10668 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10669 return -TARGET_EFAULT
;
10675 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10676 case TARGET_NR_clock_adjtime
:
10678 struct timex htx
, *phtx
= &htx
;
10680 if (target_to_host_timex(phtx
, arg2
) != 0) {
10681 return -TARGET_EFAULT
;
10683 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10684 if (!is_error(ret
) && phtx
) {
10685 if (host_to_target_timex(arg2
, phtx
) != 0) {
10686 return -TARGET_EFAULT
;
10692 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10693 case TARGET_NR_clock_adjtime64
:
10697 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10698 return -TARGET_EFAULT
;
10700 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10701 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10702 return -TARGET_EFAULT
;
10707 case TARGET_NR_getpgid
:
10708 return get_errno(getpgid(arg1
));
10709 case TARGET_NR_fchdir
:
10710 return get_errno(fchdir(arg1
));
10711 case TARGET_NR_personality
:
10712 return get_errno(personality(arg1
));
10713 #ifdef TARGET_NR__llseek /* Not on alpha */
10714 case TARGET_NR__llseek
:
10717 #if !defined(__NR_llseek)
10718 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10720 ret
= get_errno(res
);
10725 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10727 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10728 return -TARGET_EFAULT
;
10733 #ifdef TARGET_NR_getdents
10734 case TARGET_NR_getdents
:
10735 return do_getdents(arg1
, arg2
, arg3
);
10736 #endif /* TARGET_NR_getdents */
10737 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10738 case TARGET_NR_getdents64
:
10739 return do_getdents64(arg1
, arg2
, arg3
);
10740 #endif /* TARGET_NR_getdents64 */
10741 #if defined(TARGET_NR__newselect)
10742 case TARGET_NR__newselect
:
10743 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10745 #ifdef TARGET_NR_poll
10746 case TARGET_NR_poll
:
10747 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10749 #ifdef TARGET_NR_ppoll
10750 case TARGET_NR_ppoll
:
10751 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10753 #ifdef TARGET_NR_ppoll_time64
10754 case TARGET_NR_ppoll_time64
:
10755 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10757 case TARGET_NR_flock
:
10758 /* NOTE: the flock constant seems to be the same for every
10760 return get_errno(safe_flock(arg1
, arg2
));
10761 case TARGET_NR_readv
:
10763 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10765 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10766 unlock_iovec(vec
, arg2
, arg3
, 1);
10768 ret
= -host_to_target_errno(errno
);
10772 case TARGET_NR_writev
:
10774 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10776 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10777 unlock_iovec(vec
, arg2
, arg3
, 0);
10779 ret
= -host_to_target_errno(errno
);
10783 #if defined(TARGET_NR_preadv)
10784 case TARGET_NR_preadv
:
10786 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10788 unsigned long low
, high
;
10790 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10791 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10792 unlock_iovec(vec
, arg2
, arg3
, 1);
10794 ret
= -host_to_target_errno(errno
);
10799 #if defined(TARGET_NR_pwritev)
10800 case TARGET_NR_pwritev
:
10802 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10804 unsigned long low
, high
;
10806 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10807 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10808 unlock_iovec(vec
, arg2
, arg3
, 0);
10810 ret
= -host_to_target_errno(errno
);
10815 case TARGET_NR_getsid
:
10816 return get_errno(getsid(arg1
));
10817 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10818 case TARGET_NR_fdatasync
:
10819 return get_errno(fdatasync(arg1
));
10821 case TARGET_NR_sched_getaffinity
:
10823 unsigned int mask_size
;
10824 unsigned long *mask
;
10827 * sched_getaffinity needs multiples of ulong, so need to take
10828 * care of mismatches between target ulong and host ulong sizes.
10830 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10831 return -TARGET_EINVAL
;
10833 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10835 mask
= alloca(mask_size
);
10836 memset(mask
, 0, mask_size
);
10837 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10839 if (!is_error(ret
)) {
10841 /* More data returned than the caller's buffer will fit.
10842 * This only happens if sizeof(abi_long) < sizeof(long)
10843 * and the caller passed us a buffer holding an odd number
10844 * of abi_longs. If the host kernel is actually using the
10845 * extra 4 bytes then fail EINVAL; otherwise we can just
10846 * ignore them and only copy the interesting part.
10848 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10849 if (numcpus
> arg2
* 8) {
10850 return -TARGET_EINVAL
;
10855 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10856 return -TARGET_EFAULT
;
10861 case TARGET_NR_sched_setaffinity
:
10863 unsigned int mask_size
;
10864 unsigned long *mask
;
10867 * sched_setaffinity needs multiples of ulong, so need to take
10868 * care of mismatches between target ulong and host ulong sizes.
10870 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10871 return -TARGET_EINVAL
;
10873 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10874 mask
= alloca(mask_size
);
10876 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10881 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10883 case TARGET_NR_getcpu
:
10885 unsigned cpu
, node
;
10886 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10887 arg2
? &node
: NULL
,
10889 if (is_error(ret
)) {
10892 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10893 return -TARGET_EFAULT
;
10895 if (arg2
&& put_user_u32(node
, arg2
)) {
10896 return -TARGET_EFAULT
;
10900 case TARGET_NR_sched_setparam
:
10902 struct target_sched_param
*target_schp
;
10903 struct sched_param schp
;
10906 return -TARGET_EINVAL
;
10908 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1)) {
10909 return -TARGET_EFAULT
;
10911 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10912 unlock_user_struct(target_schp
, arg2
, 0);
10913 return get_errno(sys_sched_setparam(arg1
, &schp
));
10915 case TARGET_NR_sched_getparam
:
10917 struct target_sched_param
*target_schp
;
10918 struct sched_param schp
;
10921 return -TARGET_EINVAL
;
10923 ret
= get_errno(sys_sched_getparam(arg1
, &schp
));
10924 if (!is_error(ret
)) {
10925 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0)) {
10926 return -TARGET_EFAULT
;
10928 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10929 unlock_user_struct(target_schp
, arg2
, 1);
10933 case TARGET_NR_sched_setscheduler
:
10935 struct target_sched_param
*target_schp
;
10936 struct sched_param schp
;
10938 return -TARGET_EINVAL
;
10940 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1)) {
10941 return -TARGET_EFAULT
;
10943 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10944 unlock_user_struct(target_schp
, arg3
, 0);
10945 return get_errno(sys_sched_setscheduler(arg1
, arg2
, &schp
));
10947 case TARGET_NR_sched_getscheduler
:
10948 return get_errno(sys_sched_getscheduler(arg1
));
10949 case TARGET_NR_sched_getattr
:
10951 struct target_sched_attr
*target_scha
;
10952 struct sched_attr scha
;
10954 return -TARGET_EINVAL
;
10956 if (arg3
> sizeof(scha
)) {
10957 arg3
= sizeof(scha
);
10959 ret
= get_errno(sys_sched_getattr(arg1
, &scha
, arg3
, arg4
));
10960 if (!is_error(ret
)) {
10961 target_scha
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10962 if (!target_scha
) {
10963 return -TARGET_EFAULT
;
10965 target_scha
->size
= tswap32(scha
.size
);
10966 target_scha
->sched_policy
= tswap32(scha
.sched_policy
);
10967 target_scha
->sched_flags
= tswap64(scha
.sched_flags
);
10968 target_scha
->sched_nice
= tswap32(scha
.sched_nice
);
10969 target_scha
->sched_priority
= tswap32(scha
.sched_priority
);
10970 target_scha
->sched_runtime
= tswap64(scha
.sched_runtime
);
10971 target_scha
->sched_deadline
= tswap64(scha
.sched_deadline
);
10972 target_scha
->sched_period
= tswap64(scha
.sched_period
);
10973 if (scha
.size
> offsetof(struct sched_attr
, sched_util_min
)) {
10974 target_scha
->sched_util_min
= tswap32(scha
.sched_util_min
);
10975 target_scha
->sched_util_max
= tswap32(scha
.sched_util_max
);
10977 unlock_user(target_scha
, arg2
, arg3
);
10981 case TARGET_NR_sched_setattr
:
10983 struct target_sched_attr
*target_scha
;
10984 struct sched_attr scha
;
10988 return -TARGET_EINVAL
;
10990 if (get_user_u32(size
, arg2
)) {
10991 return -TARGET_EFAULT
;
10994 size
= offsetof(struct target_sched_attr
, sched_util_min
);
10996 if (size
< offsetof(struct target_sched_attr
, sched_util_min
)) {
10997 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
10998 return -TARGET_EFAULT
;
11000 return -TARGET_E2BIG
;
11003 zeroed
= check_zeroed_user(arg2
, sizeof(struct target_sched_attr
), size
);
11006 } else if (zeroed
== 0) {
11007 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
11008 return -TARGET_EFAULT
;
11010 return -TARGET_E2BIG
;
11012 if (size
> sizeof(struct target_sched_attr
)) {
11013 size
= sizeof(struct target_sched_attr
);
11016 target_scha
= lock_user(VERIFY_READ
, arg2
, size
, 1);
11017 if (!target_scha
) {
11018 return -TARGET_EFAULT
;
11021 scha
.sched_policy
= tswap32(target_scha
->sched_policy
);
11022 scha
.sched_flags
= tswap64(target_scha
->sched_flags
);
11023 scha
.sched_nice
= tswap32(target_scha
->sched_nice
);
11024 scha
.sched_priority
= tswap32(target_scha
->sched_priority
);
11025 scha
.sched_runtime
= tswap64(target_scha
->sched_runtime
);
11026 scha
.sched_deadline
= tswap64(target_scha
->sched_deadline
);
11027 scha
.sched_period
= tswap64(target_scha
->sched_period
);
11028 if (size
> offsetof(struct target_sched_attr
, sched_util_min
)) {
11029 scha
.sched_util_min
= tswap32(target_scha
->sched_util_min
);
11030 scha
.sched_util_max
= tswap32(target_scha
->sched_util_max
);
11032 unlock_user(target_scha
, arg2
, 0);
11033 return get_errno(sys_sched_setattr(arg1
, &scha
, arg3
));
11035 case TARGET_NR_sched_yield
:
11036 return get_errno(sched_yield());
11037 case TARGET_NR_sched_get_priority_max
:
11038 return get_errno(sched_get_priority_max(arg1
));
11039 case TARGET_NR_sched_get_priority_min
:
11040 return get_errno(sched_get_priority_min(arg1
));
11041 #ifdef TARGET_NR_sched_rr_get_interval
11042 case TARGET_NR_sched_rr_get_interval
:
11044 struct timespec ts
;
11045 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
11046 if (!is_error(ret
)) {
11047 ret
= host_to_target_timespec(arg2
, &ts
);
11052 #ifdef TARGET_NR_sched_rr_get_interval_time64
11053 case TARGET_NR_sched_rr_get_interval_time64
:
11055 struct timespec ts
;
11056 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
11057 if (!is_error(ret
)) {
11058 ret
= host_to_target_timespec64(arg2
, &ts
);
11063 #if defined(TARGET_NR_nanosleep)
11064 case TARGET_NR_nanosleep
:
11066 struct timespec req
, rem
;
11067 target_to_host_timespec(&req
, arg1
);
11068 ret
= get_errno(safe_nanosleep(&req
, &rem
));
11069 if (is_error(ret
) && arg2
) {
11070 host_to_target_timespec(arg2
, &rem
);
11075 case TARGET_NR_prctl
:
11076 return do_prctl(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
);
11078 #ifdef TARGET_NR_arch_prctl
11079 case TARGET_NR_arch_prctl
:
11080 return do_arch_prctl(cpu_env
, arg1
, arg2
);
11082 #ifdef TARGET_NR_pread64
11083 case TARGET_NR_pread64
:
11084 if (regpairs_aligned(cpu_env
, num
)) {
11088 if (arg2
== 0 && arg3
== 0) {
11089 /* Special-case NULL buffer and zero length, which should succeed */
11092 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11094 return -TARGET_EFAULT
;
11097 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11098 unlock_user(p
, arg2
, ret
);
11100 case TARGET_NR_pwrite64
:
11101 if (regpairs_aligned(cpu_env
, num
)) {
11105 if (arg2
== 0 && arg3
== 0) {
11106 /* Special-case NULL buffer and zero length, which should succeed */
11109 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
11111 return -TARGET_EFAULT
;
11114 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11115 unlock_user(p
, arg2
, 0);
11118 case TARGET_NR_getcwd
:
11119 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11120 return -TARGET_EFAULT
;
11121 ret
= get_errno(sys_getcwd1(p
, arg2
));
11122 unlock_user(p
, arg1
, ret
);
11124 case TARGET_NR_capget
:
11125 case TARGET_NR_capset
:
11127 struct target_user_cap_header
*target_header
;
11128 struct target_user_cap_data
*target_data
= NULL
;
11129 struct __user_cap_header_struct header
;
11130 struct __user_cap_data_struct data
[2];
11131 struct __user_cap_data_struct
*dataptr
= NULL
;
11132 int i
, target_datalen
;
11133 int data_items
= 1;
11135 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11136 return -TARGET_EFAULT
;
11138 header
.version
= tswap32(target_header
->version
);
11139 header
.pid
= tswap32(target_header
->pid
);
11141 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11142 /* Version 2 and up takes pointer to two user_data structs */
11146 target_datalen
= sizeof(*target_data
) * data_items
;
11149 if (num
== TARGET_NR_capget
) {
11150 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11152 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11154 if (!target_data
) {
11155 unlock_user_struct(target_header
, arg1
, 0);
11156 return -TARGET_EFAULT
;
11159 if (num
== TARGET_NR_capset
) {
11160 for (i
= 0; i
< data_items
; i
++) {
11161 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11162 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11163 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11170 if (num
== TARGET_NR_capget
) {
11171 ret
= get_errno(capget(&header
, dataptr
));
11173 ret
= get_errno(capset(&header
, dataptr
));
11176 /* The kernel always updates version for both capget and capset */
11177 target_header
->version
= tswap32(header
.version
);
11178 unlock_user_struct(target_header
, arg1
, 1);
11181 if (num
== TARGET_NR_capget
) {
11182 for (i
= 0; i
< data_items
; i
++) {
11183 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11184 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11185 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11187 unlock_user(target_data
, arg2
, target_datalen
);
11189 unlock_user(target_data
, arg2
, 0);
11194 case TARGET_NR_sigaltstack
:
11195 return do_sigaltstack(arg1
, arg2
, cpu_env
);
11197 #ifdef CONFIG_SENDFILE
11198 #ifdef TARGET_NR_sendfile
11199 case TARGET_NR_sendfile
:
11201 off_t
*offp
= NULL
;
11204 ret
= get_user_sal(off
, arg3
);
11205 if (is_error(ret
)) {
11210 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11211 if (!is_error(ret
) && arg3
) {
11212 abi_long ret2
= put_user_sal(off
, arg3
);
11213 if (is_error(ret2
)) {
11220 #ifdef TARGET_NR_sendfile64
11221 case TARGET_NR_sendfile64
:
11223 off_t
*offp
= NULL
;
11226 ret
= get_user_s64(off
, arg3
);
11227 if (is_error(ret
)) {
11232 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11233 if (!is_error(ret
) && arg3
) {
11234 abi_long ret2
= put_user_s64(off
, arg3
);
11235 if (is_error(ret2
)) {
11243 #ifdef TARGET_NR_vfork
11244 case TARGET_NR_vfork
:
11245 return get_errno(do_fork(cpu_env
,
11246 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11249 #ifdef TARGET_NR_ugetrlimit
11250 case TARGET_NR_ugetrlimit
:
11252 struct rlimit rlim
;
11253 int resource
= target_to_host_resource(arg1
);
11254 ret
= get_errno(getrlimit(resource
, &rlim
));
11255 if (!is_error(ret
)) {
11256 struct target_rlimit
*target_rlim
;
11257 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11258 return -TARGET_EFAULT
;
11259 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11260 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11261 unlock_user_struct(target_rlim
, arg2
, 1);
11266 #ifdef TARGET_NR_truncate64
11267 case TARGET_NR_truncate64
:
11268 if (!(p
= lock_user_string(arg1
)))
11269 return -TARGET_EFAULT
;
11270 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11271 unlock_user(p
, arg1
, 0);
11274 #ifdef TARGET_NR_ftruncate64
11275 case TARGET_NR_ftruncate64
:
11276 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11278 #ifdef TARGET_NR_stat64
11279 case TARGET_NR_stat64
:
11280 if (!(p
= lock_user_string(arg1
))) {
11281 return -TARGET_EFAULT
;
11283 ret
= get_errno(stat(path(p
), &st
));
11284 unlock_user(p
, arg1
, 0);
11285 if (!is_error(ret
))
11286 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11289 #ifdef TARGET_NR_lstat64
11290 case TARGET_NR_lstat64
:
11291 if (!(p
= lock_user_string(arg1
))) {
11292 return -TARGET_EFAULT
;
11294 ret
= get_errno(lstat(path(p
), &st
));
11295 unlock_user(p
, arg1
, 0);
11296 if (!is_error(ret
))
11297 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11300 #ifdef TARGET_NR_fstat64
11301 case TARGET_NR_fstat64
:
11302 ret
= get_errno(fstat(arg1
, &st
));
11303 if (!is_error(ret
))
11304 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11307 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11308 #ifdef TARGET_NR_fstatat64
11309 case TARGET_NR_fstatat64
:
11311 #ifdef TARGET_NR_newfstatat
11312 case TARGET_NR_newfstatat
:
11314 if (!(p
= lock_user_string(arg2
))) {
11315 return -TARGET_EFAULT
;
11317 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11318 unlock_user(p
, arg2
, 0);
11319 if (!is_error(ret
))
11320 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11323 #if defined(TARGET_NR_statx)
11324 case TARGET_NR_statx
:
11326 struct target_statx
*target_stx
;
11330 p
= lock_user_string(arg2
);
11332 return -TARGET_EFAULT
;
11334 #if defined(__NR_statx)
11337 * It is assumed that struct statx is architecture independent.
11339 struct target_statx host_stx
;
11342 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11343 if (!is_error(ret
)) {
11344 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11345 unlock_user(p
, arg2
, 0);
11346 return -TARGET_EFAULT
;
11350 if (ret
!= -TARGET_ENOSYS
) {
11351 unlock_user(p
, arg2
, 0);
11356 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11357 unlock_user(p
, arg2
, 0);
11359 if (!is_error(ret
)) {
11360 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11361 return -TARGET_EFAULT
;
11363 memset(target_stx
, 0, sizeof(*target_stx
));
11364 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11365 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11366 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11367 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11368 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11369 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11370 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11371 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11372 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11373 __put_user(st
.st_size
, &target_stx
->stx_size
);
11374 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11375 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11376 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11377 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11378 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11379 unlock_user_struct(target_stx
, arg5
, 1);
11384 #ifdef TARGET_NR_lchown
11385 case TARGET_NR_lchown
:
11386 if (!(p
= lock_user_string(arg1
)))
11387 return -TARGET_EFAULT
;
11388 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11389 unlock_user(p
, arg1
, 0);
11392 #ifdef TARGET_NR_getuid
11393 case TARGET_NR_getuid
:
11394 return get_errno(high2lowuid(getuid()));
11396 #ifdef TARGET_NR_getgid
11397 case TARGET_NR_getgid
:
11398 return get_errno(high2lowgid(getgid()));
11400 #ifdef TARGET_NR_geteuid
11401 case TARGET_NR_geteuid
:
11402 return get_errno(high2lowuid(geteuid()));
11404 #ifdef TARGET_NR_getegid
11405 case TARGET_NR_getegid
:
11406 return get_errno(high2lowgid(getegid()));
11408 case TARGET_NR_setreuid
:
11409 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11410 case TARGET_NR_setregid
:
11411 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11412 case TARGET_NR_getgroups
:
11414 int gidsetsize
= arg1
;
11415 target_id
*target_grouplist
;
11419 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11420 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11421 if (gidsetsize
== 0)
11423 if (!is_error(ret
)) {
11424 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11425 if (!target_grouplist
)
11426 return -TARGET_EFAULT
;
11427 for(i
= 0;i
< ret
; i
++)
11428 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11429 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11433 case TARGET_NR_setgroups
:
11435 int gidsetsize
= arg1
;
11436 target_id
*target_grouplist
;
11437 gid_t
*grouplist
= NULL
;
11440 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11441 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11442 if (!target_grouplist
) {
11443 return -TARGET_EFAULT
;
11445 for (i
= 0; i
< gidsetsize
; i
++) {
11446 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11448 unlock_user(target_grouplist
, arg2
, 0);
11450 return get_errno(setgroups(gidsetsize
, grouplist
));
11452 case TARGET_NR_fchown
:
11453 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11454 #if defined(TARGET_NR_fchownat)
11455 case TARGET_NR_fchownat
:
11456 if (!(p
= lock_user_string(arg2
)))
11457 return -TARGET_EFAULT
;
11458 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11459 low2highgid(arg4
), arg5
));
11460 unlock_user(p
, arg2
, 0);
11463 #ifdef TARGET_NR_setresuid
11464 case TARGET_NR_setresuid
:
11465 return get_errno(sys_setresuid(low2highuid(arg1
),
11467 low2highuid(arg3
)));
11469 #ifdef TARGET_NR_getresuid
11470 case TARGET_NR_getresuid
:
11472 uid_t ruid
, euid
, suid
;
11473 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11474 if (!is_error(ret
)) {
11475 if (put_user_id(high2lowuid(ruid
), arg1
)
11476 || put_user_id(high2lowuid(euid
), arg2
)
11477 || put_user_id(high2lowuid(suid
), arg3
))
11478 return -TARGET_EFAULT
;
11483 #ifdef TARGET_NR_getresgid
11484 case TARGET_NR_setresgid
:
11485 return get_errno(sys_setresgid(low2highgid(arg1
),
11487 low2highgid(arg3
)));
11489 #ifdef TARGET_NR_getresgid
11490 case TARGET_NR_getresgid
:
11492 gid_t rgid
, egid
, sgid
;
11493 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11494 if (!is_error(ret
)) {
11495 if (put_user_id(high2lowgid(rgid
), arg1
)
11496 || put_user_id(high2lowgid(egid
), arg2
)
11497 || put_user_id(high2lowgid(sgid
), arg3
))
11498 return -TARGET_EFAULT
;
11503 #ifdef TARGET_NR_chown
11504 case TARGET_NR_chown
:
11505 if (!(p
= lock_user_string(arg1
)))
11506 return -TARGET_EFAULT
;
11507 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11508 unlock_user(p
, arg1
, 0);
11511 case TARGET_NR_setuid
:
11512 return get_errno(sys_setuid(low2highuid(arg1
)));
11513 case TARGET_NR_setgid
:
11514 return get_errno(sys_setgid(low2highgid(arg1
)));
11515 case TARGET_NR_setfsuid
:
11516 return get_errno(setfsuid(arg1
));
11517 case TARGET_NR_setfsgid
:
11518 return get_errno(setfsgid(arg1
));
11520 #ifdef TARGET_NR_lchown32
11521 case TARGET_NR_lchown32
:
11522 if (!(p
= lock_user_string(arg1
)))
11523 return -TARGET_EFAULT
;
11524 ret
= get_errno(lchown(p
, arg2
, arg3
));
11525 unlock_user(p
, arg1
, 0);
11528 #ifdef TARGET_NR_getuid32
11529 case TARGET_NR_getuid32
:
11530 return get_errno(getuid());
11533 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11534 /* Alpha specific */
11535 case TARGET_NR_getxuid
:
11539 cpu_env
->ir
[IR_A4
]=euid
;
11541 return get_errno(getuid());
11543 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11544 /* Alpha specific */
11545 case TARGET_NR_getxgid
:
11549 cpu_env
->ir
[IR_A4
]=egid
;
11551 return get_errno(getgid());
11553 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11554 /* Alpha specific */
11555 case TARGET_NR_osf_getsysinfo
:
11556 ret
= -TARGET_EOPNOTSUPP
;
11558 case TARGET_GSI_IEEE_FP_CONTROL
:
11560 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11561 uint64_t swcr
= cpu_env
->swcr
;
11563 swcr
&= ~SWCR_STATUS_MASK
;
11564 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11566 if (put_user_u64 (swcr
, arg2
))
11567 return -TARGET_EFAULT
;
11572 /* case GSI_IEEE_STATE_AT_SIGNAL:
11573 -- Not implemented in linux kernel.
11575 -- Retrieves current unaligned access state; not much used.
11576 case GSI_PROC_TYPE:
11577 -- Retrieves implver information; surely not used.
11578 case GSI_GET_HWRPB:
11579 -- Grabs a copy of the HWRPB; surely not used.
11584 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11585 /* Alpha specific */
11586 case TARGET_NR_osf_setsysinfo
:
11587 ret
= -TARGET_EOPNOTSUPP
;
11589 case TARGET_SSI_IEEE_FP_CONTROL
:
11591 uint64_t swcr
, fpcr
;
11593 if (get_user_u64 (swcr
, arg2
)) {
11594 return -TARGET_EFAULT
;
11598 * The kernel calls swcr_update_status to update the
11599 * status bits from the fpcr at every point that it
11600 * could be queried. Therefore, we store the status
11601 * bits only in FPCR.
11603 cpu_env
->swcr
= swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11605 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11606 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11607 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11608 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11613 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11615 uint64_t exc
, fpcr
, fex
;
11617 if (get_user_u64(exc
, arg2
)) {
11618 return -TARGET_EFAULT
;
11620 exc
&= SWCR_STATUS_MASK
;
11621 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11623 /* Old exceptions are not signaled. */
11624 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11626 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11627 fex
&= (cpu_env
)->swcr
;
11629 /* Update the hardware fpcr. */
11630 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11631 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11634 int si_code
= TARGET_FPE_FLTUNK
;
11635 target_siginfo_t info
;
11637 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11638 si_code
= TARGET_FPE_FLTUND
;
11640 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11641 si_code
= TARGET_FPE_FLTRES
;
11643 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11644 si_code
= TARGET_FPE_FLTUND
;
11646 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11647 si_code
= TARGET_FPE_FLTOVF
;
11649 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11650 si_code
= TARGET_FPE_FLTDIV
;
11652 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11653 si_code
= TARGET_FPE_FLTINV
;
11656 info
.si_signo
= SIGFPE
;
11658 info
.si_code
= si_code
;
11659 info
._sifields
._sigfault
._addr
= (cpu_env
)->pc
;
11660 queue_signal(cpu_env
, info
.si_signo
,
11661 QEMU_SI_FAULT
, &info
);
11667 /* case SSI_NVPAIRS:
11668 -- Used with SSIN_UACPROC to enable unaligned accesses.
11669 case SSI_IEEE_STATE_AT_SIGNAL:
11670 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11671 -- Not implemented in linux kernel
11676 #ifdef TARGET_NR_osf_sigprocmask
11677 /* Alpha specific. */
11678 case TARGET_NR_osf_sigprocmask
:
11682 sigset_t set
, oldset
;
11685 case TARGET_SIG_BLOCK
:
11688 case TARGET_SIG_UNBLOCK
:
11691 case TARGET_SIG_SETMASK
:
11695 return -TARGET_EINVAL
;
11698 target_to_host_old_sigset(&set
, &mask
);
11699 ret
= do_sigprocmask(how
, &set
, &oldset
);
11701 host_to_target_old_sigset(&mask
, &oldset
);
11708 #ifdef TARGET_NR_getgid32
11709 case TARGET_NR_getgid32
:
11710 return get_errno(getgid());
11712 #ifdef TARGET_NR_geteuid32
11713 case TARGET_NR_geteuid32
:
11714 return get_errno(geteuid());
11716 #ifdef TARGET_NR_getegid32
11717 case TARGET_NR_getegid32
:
11718 return get_errno(getegid());
11720 #ifdef TARGET_NR_setreuid32
11721 case TARGET_NR_setreuid32
:
11722 return get_errno(setreuid(arg1
, arg2
));
11724 #ifdef TARGET_NR_setregid32
11725 case TARGET_NR_setregid32
:
11726 return get_errno(setregid(arg1
, arg2
));
11728 #ifdef TARGET_NR_getgroups32
11729 case TARGET_NR_getgroups32
:
11731 int gidsetsize
= arg1
;
11732 uint32_t *target_grouplist
;
11736 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11737 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11738 if (gidsetsize
== 0)
11740 if (!is_error(ret
)) {
11741 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11742 if (!target_grouplist
) {
11743 return -TARGET_EFAULT
;
11745 for(i
= 0;i
< ret
; i
++)
11746 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11747 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11752 #ifdef TARGET_NR_setgroups32
11753 case TARGET_NR_setgroups32
:
11755 int gidsetsize
= arg1
;
11756 uint32_t *target_grouplist
;
11760 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11761 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11762 if (!target_grouplist
) {
11763 return -TARGET_EFAULT
;
11765 for(i
= 0;i
< gidsetsize
; i
++)
11766 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11767 unlock_user(target_grouplist
, arg2
, 0);
11768 return get_errno(setgroups(gidsetsize
, grouplist
));
11771 #ifdef TARGET_NR_fchown32
11772 case TARGET_NR_fchown32
:
11773 return get_errno(fchown(arg1
, arg2
, arg3
));
11775 #ifdef TARGET_NR_setresuid32
11776 case TARGET_NR_setresuid32
:
11777 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11779 #ifdef TARGET_NR_getresuid32
11780 case TARGET_NR_getresuid32
:
11782 uid_t ruid
, euid
, suid
;
11783 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11784 if (!is_error(ret
)) {
11785 if (put_user_u32(ruid
, arg1
)
11786 || put_user_u32(euid
, arg2
)
11787 || put_user_u32(suid
, arg3
))
11788 return -TARGET_EFAULT
;
11793 #ifdef TARGET_NR_setresgid32
11794 case TARGET_NR_setresgid32
:
11795 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11797 #ifdef TARGET_NR_getresgid32
11798 case TARGET_NR_getresgid32
:
11800 gid_t rgid
, egid
, sgid
;
11801 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11802 if (!is_error(ret
)) {
11803 if (put_user_u32(rgid
, arg1
)
11804 || put_user_u32(egid
, arg2
)
11805 || put_user_u32(sgid
, arg3
))
11806 return -TARGET_EFAULT
;
11811 #ifdef TARGET_NR_chown32
11812 case TARGET_NR_chown32
:
11813 if (!(p
= lock_user_string(arg1
)))
11814 return -TARGET_EFAULT
;
11815 ret
= get_errno(chown(p
, arg2
, arg3
));
11816 unlock_user(p
, arg1
, 0);
11819 #ifdef TARGET_NR_setuid32
11820 case TARGET_NR_setuid32
:
11821 return get_errno(sys_setuid(arg1
));
11823 #ifdef TARGET_NR_setgid32
11824 case TARGET_NR_setgid32
:
11825 return get_errno(sys_setgid(arg1
));
11827 #ifdef TARGET_NR_setfsuid32
11828 case TARGET_NR_setfsuid32
:
11829 return get_errno(setfsuid(arg1
));
11831 #ifdef TARGET_NR_setfsgid32
11832 case TARGET_NR_setfsgid32
:
11833 return get_errno(setfsgid(arg1
));
11835 #ifdef TARGET_NR_mincore
11836 case TARGET_NR_mincore
:
11838 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11840 return -TARGET_ENOMEM
;
11842 p
= lock_user_string(arg3
);
11844 ret
= -TARGET_EFAULT
;
11846 ret
= get_errno(mincore(a
, arg2
, p
));
11847 unlock_user(p
, arg3
, ret
);
11849 unlock_user(a
, arg1
, 0);
11853 #ifdef TARGET_NR_arm_fadvise64_64
11854 case TARGET_NR_arm_fadvise64_64
:
11855 /* arm_fadvise64_64 looks like fadvise64_64 but
11856 * with different argument order: fd, advice, offset, len
11857 * rather than the usual fd, offset, len, advice.
11858 * Note that offset and len are both 64-bit so appear as
11859 * pairs of 32-bit registers.
11861 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11862 target_offset64(arg5
, arg6
), arg2
);
11863 return -host_to_target_errno(ret
);
11866 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11868 #ifdef TARGET_NR_fadvise64_64
11869 case TARGET_NR_fadvise64_64
:
11870 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11871 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11879 /* 6 args: fd, offset (high, low), len (high, low), advice */
11880 if (regpairs_aligned(cpu_env
, num
)) {
11881 /* offset is in (3,4), len in (5,6) and advice in 7 */
11889 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11890 target_offset64(arg4
, arg5
), arg6
);
11891 return -host_to_target_errno(ret
);
11894 #ifdef TARGET_NR_fadvise64
11895 case TARGET_NR_fadvise64
:
11896 /* 5 args: fd, offset (high, low), len, advice */
11897 if (regpairs_aligned(cpu_env
, num
)) {
11898 /* offset is in (3,4), len in 5 and advice in 6 */
11904 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11905 return -host_to_target_errno(ret
);
11908 #else /* not a 32-bit ABI */
11909 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11910 #ifdef TARGET_NR_fadvise64_64
11911 case TARGET_NR_fadvise64_64
:
11913 #ifdef TARGET_NR_fadvise64
11914 case TARGET_NR_fadvise64
:
11916 #ifdef TARGET_S390X
11918 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11919 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11920 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11921 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11925 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11927 #endif /* end of 64-bit ABI fadvise handling */
11929 #ifdef TARGET_NR_madvise
11930 case TARGET_NR_madvise
:
11931 return target_madvise(arg1
, arg2
, arg3
);
11933 #ifdef TARGET_NR_fcntl64
11934 case TARGET_NR_fcntl64
:
11938 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11939 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11942 if (!cpu_env
->eabi
) {
11943 copyfrom
= copy_from_user_oabi_flock64
;
11944 copyto
= copy_to_user_oabi_flock64
;
11948 cmd
= target_to_host_fcntl_cmd(arg2
);
11949 if (cmd
== -TARGET_EINVAL
) {
11954 case TARGET_F_GETLK64
:
11955 ret
= copyfrom(&fl
, arg3
);
11959 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11961 ret
= copyto(arg3
, &fl
);
11965 case TARGET_F_SETLK64
:
11966 case TARGET_F_SETLKW64
:
11967 ret
= copyfrom(&fl
, arg3
);
11971 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11974 ret
= do_fcntl(arg1
, arg2
, arg3
);
11980 #ifdef TARGET_NR_cacheflush
11981 case TARGET_NR_cacheflush
:
11982 /* self-modifying code is handled automatically, so nothing needed */
11985 #ifdef TARGET_NR_getpagesize
11986 case TARGET_NR_getpagesize
:
11987 return TARGET_PAGE_SIZE
;
11989 case TARGET_NR_gettid
:
11990 return get_errno(sys_gettid());
11991 #ifdef TARGET_NR_readahead
11992 case TARGET_NR_readahead
:
11993 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11994 if (regpairs_aligned(cpu_env
, num
)) {
11999 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
12001 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
12006 #ifdef TARGET_NR_setxattr
12007 case TARGET_NR_listxattr
:
12008 case TARGET_NR_llistxattr
:
12012 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
12014 return -TARGET_EFAULT
;
12017 p
= lock_user_string(arg1
);
12019 if (num
== TARGET_NR_listxattr
) {
12020 ret
= get_errno(listxattr(p
, b
, arg3
));
12022 ret
= get_errno(llistxattr(p
, b
, arg3
));
12025 ret
= -TARGET_EFAULT
;
12027 unlock_user(p
, arg1
, 0);
12028 unlock_user(b
, arg2
, arg3
);
12031 case TARGET_NR_flistxattr
:
12035 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
12037 return -TARGET_EFAULT
;
12040 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
12041 unlock_user(b
, arg2
, arg3
);
12044 case TARGET_NR_setxattr
:
12045 case TARGET_NR_lsetxattr
:
12047 void *p
, *n
, *v
= 0;
12049 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12051 return -TARGET_EFAULT
;
12054 p
= lock_user_string(arg1
);
12055 n
= lock_user_string(arg2
);
12057 if (num
== TARGET_NR_setxattr
) {
12058 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
12060 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
12063 ret
= -TARGET_EFAULT
;
12065 unlock_user(p
, arg1
, 0);
12066 unlock_user(n
, arg2
, 0);
12067 unlock_user(v
, arg3
, 0);
12070 case TARGET_NR_fsetxattr
:
12074 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12076 return -TARGET_EFAULT
;
12079 n
= lock_user_string(arg2
);
12081 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
12083 ret
= -TARGET_EFAULT
;
12085 unlock_user(n
, arg2
, 0);
12086 unlock_user(v
, arg3
, 0);
12089 case TARGET_NR_getxattr
:
12090 case TARGET_NR_lgetxattr
:
12092 void *p
, *n
, *v
= 0;
12094 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12096 return -TARGET_EFAULT
;
12099 p
= lock_user_string(arg1
);
12100 n
= lock_user_string(arg2
);
12102 if (num
== TARGET_NR_getxattr
) {
12103 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
12105 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
12108 ret
= -TARGET_EFAULT
;
12110 unlock_user(p
, arg1
, 0);
12111 unlock_user(n
, arg2
, 0);
12112 unlock_user(v
, arg3
, arg4
);
12115 case TARGET_NR_fgetxattr
:
12119 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12121 return -TARGET_EFAULT
;
12124 n
= lock_user_string(arg2
);
12126 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12128 ret
= -TARGET_EFAULT
;
12130 unlock_user(n
, arg2
, 0);
12131 unlock_user(v
, arg3
, arg4
);
12134 case TARGET_NR_removexattr
:
12135 case TARGET_NR_lremovexattr
:
12138 p
= lock_user_string(arg1
);
12139 n
= lock_user_string(arg2
);
12141 if (num
== TARGET_NR_removexattr
) {
12142 ret
= get_errno(removexattr(p
, n
));
12144 ret
= get_errno(lremovexattr(p
, n
));
12147 ret
= -TARGET_EFAULT
;
12149 unlock_user(p
, arg1
, 0);
12150 unlock_user(n
, arg2
, 0);
12153 case TARGET_NR_fremovexattr
:
12156 n
= lock_user_string(arg2
);
12158 ret
= get_errno(fremovexattr(arg1
, n
));
12160 ret
= -TARGET_EFAULT
;
12162 unlock_user(n
, arg2
, 0);
12166 #endif /* CONFIG_ATTR */
12167 #ifdef TARGET_NR_set_thread_area
12168 case TARGET_NR_set_thread_area
:
12169 #if defined(TARGET_MIPS)
12170 cpu_env
->active_tc
.CP0_UserLocal
= arg1
;
12172 #elif defined(TARGET_CRIS)
12174 ret
= -TARGET_EINVAL
;
12176 cpu_env
->pregs
[PR_PID
] = arg1
;
12180 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12181 return do_set_thread_area(cpu_env
, arg1
);
12182 #elif defined(TARGET_M68K)
12184 TaskState
*ts
= cpu
->opaque
;
12185 ts
->tp_value
= arg1
;
12189 return -TARGET_ENOSYS
;
12192 #ifdef TARGET_NR_get_thread_area
12193 case TARGET_NR_get_thread_area
:
12194 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12195 return do_get_thread_area(cpu_env
, arg1
);
12196 #elif defined(TARGET_M68K)
12198 TaskState
*ts
= cpu
->opaque
;
12199 return ts
->tp_value
;
12202 return -TARGET_ENOSYS
;
12205 #ifdef TARGET_NR_getdomainname
12206 case TARGET_NR_getdomainname
:
12207 return -TARGET_ENOSYS
;
12210 #ifdef TARGET_NR_clock_settime
12211 case TARGET_NR_clock_settime
:
12213 struct timespec ts
;
12215 ret
= target_to_host_timespec(&ts
, arg2
);
12216 if (!is_error(ret
)) {
12217 ret
= get_errno(clock_settime(arg1
, &ts
));
12222 #ifdef TARGET_NR_clock_settime64
12223 case TARGET_NR_clock_settime64
:
12225 struct timespec ts
;
12227 ret
= target_to_host_timespec64(&ts
, arg2
);
12228 if (!is_error(ret
)) {
12229 ret
= get_errno(clock_settime(arg1
, &ts
));
12234 #ifdef TARGET_NR_clock_gettime
12235 case TARGET_NR_clock_gettime
:
12237 struct timespec ts
;
12238 ret
= get_errno(clock_gettime(arg1
, &ts
));
12239 if (!is_error(ret
)) {
12240 ret
= host_to_target_timespec(arg2
, &ts
);
12245 #ifdef TARGET_NR_clock_gettime64
12246 case TARGET_NR_clock_gettime64
:
12248 struct timespec ts
;
12249 ret
= get_errno(clock_gettime(arg1
, &ts
));
12250 if (!is_error(ret
)) {
12251 ret
= host_to_target_timespec64(arg2
, &ts
);
12256 #ifdef TARGET_NR_clock_getres
12257 case TARGET_NR_clock_getres
:
12259 struct timespec ts
;
12260 ret
= get_errno(clock_getres(arg1
, &ts
));
12261 if (!is_error(ret
)) {
12262 host_to_target_timespec(arg2
, &ts
);
12267 #ifdef TARGET_NR_clock_getres_time64
12268 case TARGET_NR_clock_getres_time64
:
12270 struct timespec ts
;
12271 ret
= get_errno(clock_getres(arg1
, &ts
));
12272 if (!is_error(ret
)) {
12273 host_to_target_timespec64(arg2
, &ts
);
12278 #ifdef TARGET_NR_clock_nanosleep
12279 case TARGET_NR_clock_nanosleep
:
12281 struct timespec ts
;
12282 if (target_to_host_timespec(&ts
, arg3
)) {
12283 return -TARGET_EFAULT
;
12285 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12286 &ts
, arg4
? &ts
: NULL
));
12288 * if the call is interrupted by a signal handler, it fails
12289 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12290 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12292 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12293 host_to_target_timespec(arg4
, &ts
)) {
12294 return -TARGET_EFAULT
;
12300 #ifdef TARGET_NR_clock_nanosleep_time64
12301 case TARGET_NR_clock_nanosleep_time64
:
12303 struct timespec ts
;
12305 if (target_to_host_timespec64(&ts
, arg3
)) {
12306 return -TARGET_EFAULT
;
12309 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12310 &ts
, arg4
? &ts
: NULL
));
12312 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12313 host_to_target_timespec64(arg4
, &ts
)) {
12314 return -TARGET_EFAULT
;
12320 #if defined(TARGET_NR_set_tid_address)
12321 case TARGET_NR_set_tid_address
:
12323 TaskState
*ts
= cpu
->opaque
;
12324 ts
->child_tidptr
= arg1
;
12325 /* do not call host set_tid_address() syscall, instead return tid() */
12326 return get_errno(sys_gettid());
12330 case TARGET_NR_tkill
:
12331 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12333 case TARGET_NR_tgkill
:
12334 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12335 target_to_host_signal(arg3
)));
12337 #ifdef TARGET_NR_set_robust_list
12338 case TARGET_NR_set_robust_list
:
12339 case TARGET_NR_get_robust_list
:
12340 /* The ABI for supporting robust futexes has userspace pass
12341 * the kernel a pointer to a linked list which is updated by
12342 * userspace after the syscall; the list is walked by the kernel
12343 * when the thread exits. Since the linked list in QEMU guest
12344 * memory isn't a valid linked list for the host and we have
12345 * no way to reliably intercept the thread-death event, we can't
12346 * support these. Silently return ENOSYS so that guest userspace
12347 * falls back to a non-robust futex implementation (which should
12348 * be OK except in the corner case of the guest crashing while
12349 * holding a mutex that is shared with another process via
12352 return -TARGET_ENOSYS
;
12355 #if defined(TARGET_NR_utimensat)
12356 case TARGET_NR_utimensat
:
12358 struct timespec
*tsp
, ts
[2];
12362 if (target_to_host_timespec(ts
, arg3
)) {
12363 return -TARGET_EFAULT
;
12365 if (target_to_host_timespec(ts
+ 1, arg3
+
12366 sizeof(struct target_timespec
))) {
12367 return -TARGET_EFAULT
;
12372 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12374 if (!(p
= lock_user_string(arg2
))) {
12375 return -TARGET_EFAULT
;
12377 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12378 unlock_user(p
, arg2
, 0);
12383 #ifdef TARGET_NR_utimensat_time64
12384 case TARGET_NR_utimensat_time64
:
12386 struct timespec
*tsp
, ts
[2];
12390 if (target_to_host_timespec64(ts
, arg3
)) {
12391 return -TARGET_EFAULT
;
12393 if (target_to_host_timespec64(ts
+ 1, arg3
+
12394 sizeof(struct target__kernel_timespec
))) {
12395 return -TARGET_EFAULT
;
12400 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12402 p
= lock_user_string(arg2
);
12404 return -TARGET_EFAULT
;
12406 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12407 unlock_user(p
, arg2
, 0);
12412 #ifdef TARGET_NR_futex
12413 case TARGET_NR_futex
:
12414 return do_futex(cpu
, false, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12416 #ifdef TARGET_NR_futex_time64
12417 case TARGET_NR_futex_time64
:
12418 return do_futex(cpu
, true, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12420 #ifdef CONFIG_INOTIFY
12421 #if defined(TARGET_NR_inotify_init)
12422 case TARGET_NR_inotify_init
:
12423 ret
= get_errno(inotify_init());
12425 fd_trans_register(ret
, &target_inotify_trans
);
12429 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12430 case TARGET_NR_inotify_init1
:
12431 ret
= get_errno(inotify_init1(target_to_host_bitmask(arg1
,
12432 fcntl_flags_tbl
)));
12434 fd_trans_register(ret
, &target_inotify_trans
);
12438 #if defined(TARGET_NR_inotify_add_watch)
12439 case TARGET_NR_inotify_add_watch
:
12440 p
= lock_user_string(arg2
);
12441 ret
= get_errno(inotify_add_watch(arg1
, path(p
), arg3
));
12442 unlock_user(p
, arg2
, 0);
12445 #if defined(TARGET_NR_inotify_rm_watch)
12446 case TARGET_NR_inotify_rm_watch
:
12447 return get_errno(inotify_rm_watch(arg1
, arg2
));
12451 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12452 case TARGET_NR_mq_open
:
12454 struct mq_attr posix_mq_attr
;
12455 struct mq_attr
*pposix_mq_attr
;
12458 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12459 pposix_mq_attr
= NULL
;
12461 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12462 return -TARGET_EFAULT
;
12464 pposix_mq_attr
= &posix_mq_attr
;
12466 p
= lock_user_string(arg1
- 1);
12468 return -TARGET_EFAULT
;
12470 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12471 unlock_user (p
, arg1
, 0);
12475 case TARGET_NR_mq_unlink
:
12476 p
= lock_user_string(arg1
- 1);
12478 return -TARGET_EFAULT
;
12480 ret
= get_errno(mq_unlink(p
));
12481 unlock_user (p
, arg1
, 0);
12484 #ifdef TARGET_NR_mq_timedsend
12485 case TARGET_NR_mq_timedsend
:
12487 struct timespec ts
;
12489 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12491 if (target_to_host_timespec(&ts
, arg5
)) {
12492 return -TARGET_EFAULT
;
12494 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12495 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12496 return -TARGET_EFAULT
;
12499 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12501 unlock_user (p
, arg2
, arg3
);
12505 #ifdef TARGET_NR_mq_timedsend_time64
12506 case TARGET_NR_mq_timedsend_time64
:
12508 struct timespec ts
;
12510 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12512 if (target_to_host_timespec64(&ts
, arg5
)) {
12513 return -TARGET_EFAULT
;
12515 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12516 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12517 return -TARGET_EFAULT
;
12520 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12522 unlock_user(p
, arg2
, arg3
);
12527 #ifdef TARGET_NR_mq_timedreceive
12528 case TARGET_NR_mq_timedreceive
:
12530 struct timespec ts
;
12533 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12535 if (target_to_host_timespec(&ts
, arg5
)) {
12536 return -TARGET_EFAULT
;
12538 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12540 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12541 return -TARGET_EFAULT
;
12544 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12547 unlock_user (p
, arg2
, arg3
);
12549 put_user_u32(prio
, arg4
);
12553 #ifdef TARGET_NR_mq_timedreceive_time64
12554 case TARGET_NR_mq_timedreceive_time64
:
12556 struct timespec ts
;
12559 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12561 if (target_to_host_timespec64(&ts
, arg5
)) {
12562 return -TARGET_EFAULT
;
12564 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12566 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12567 return -TARGET_EFAULT
;
12570 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12573 unlock_user(p
, arg2
, arg3
);
12575 put_user_u32(prio
, arg4
);
12581 /* Not implemented for now... */
12582 /* case TARGET_NR_mq_notify: */
12585 case TARGET_NR_mq_getsetattr
:
12587 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12590 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12591 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12592 &posix_mq_attr_out
));
12593 } else if (arg3
!= 0) {
12594 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12596 if (ret
== 0 && arg3
!= 0) {
12597 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12603 #ifdef CONFIG_SPLICE
12604 #ifdef TARGET_NR_tee
12605 case TARGET_NR_tee
:
12607 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12611 #ifdef TARGET_NR_splice
12612 case TARGET_NR_splice
:
12614 loff_t loff_in
, loff_out
;
12615 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12617 if (get_user_u64(loff_in
, arg2
)) {
12618 return -TARGET_EFAULT
;
12620 ploff_in
= &loff_in
;
12623 if (get_user_u64(loff_out
, arg4
)) {
12624 return -TARGET_EFAULT
;
12626 ploff_out
= &loff_out
;
12628 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12630 if (put_user_u64(loff_in
, arg2
)) {
12631 return -TARGET_EFAULT
;
12635 if (put_user_u64(loff_out
, arg4
)) {
12636 return -TARGET_EFAULT
;
12642 #ifdef TARGET_NR_vmsplice
12643 case TARGET_NR_vmsplice
:
12645 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12647 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12648 unlock_iovec(vec
, arg2
, arg3
, 0);
12650 ret
= -host_to_target_errno(errno
);
12655 #endif /* CONFIG_SPLICE */
12656 #ifdef CONFIG_EVENTFD
12657 #if defined(TARGET_NR_eventfd)
12658 case TARGET_NR_eventfd
:
12659 ret
= get_errno(eventfd(arg1
, 0));
12661 fd_trans_register(ret
, &target_eventfd_trans
);
12665 #if defined(TARGET_NR_eventfd2)
12666 case TARGET_NR_eventfd2
:
12668 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
12669 if (arg2
& TARGET_O_NONBLOCK
) {
12670 host_flags
|= O_NONBLOCK
;
12672 if (arg2
& TARGET_O_CLOEXEC
) {
12673 host_flags
|= O_CLOEXEC
;
12675 ret
= get_errno(eventfd(arg1
, host_flags
));
12677 fd_trans_register(ret
, &target_eventfd_trans
);
12682 #endif /* CONFIG_EVENTFD */
12683 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12684 case TARGET_NR_fallocate
:
12685 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12686 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12687 target_offset64(arg5
, arg6
)));
12689 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12693 #if defined(CONFIG_SYNC_FILE_RANGE)
12694 #if defined(TARGET_NR_sync_file_range)
12695 case TARGET_NR_sync_file_range
:
12696 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12697 #if defined(TARGET_MIPS)
12698 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12699 target_offset64(arg5
, arg6
), arg7
));
12701 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12702 target_offset64(arg4
, arg5
), arg6
));
12703 #endif /* !TARGET_MIPS */
12705 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12709 #if defined(TARGET_NR_sync_file_range2) || \
12710 defined(TARGET_NR_arm_sync_file_range)
12711 #if defined(TARGET_NR_sync_file_range2)
12712 case TARGET_NR_sync_file_range2
:
12714 #if defined(TARGET_NR_arm_sync_file_range)
12715 case TARGET_NR_arm_sync_file_range
:
12717 /* This is like sync_file_range but the arguments are reordered */
12718 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12719 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12720 target_offset64(arg5
, arg6
), arg2
));
12722 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12727 #if defined(TARGET_NR_signalfd4)
12728 case TARGET_NR_signalfd4
:
12729 return do_signalfd4(arg1
, arg2
, arg4
);
12731 #if defined(TARGET_NR_signalfd)
12732 case TARGET_NR_signalfd
:
12733 return do_signalfd4(arg1
, arg2
, 0);
12735 #if defined(CONFIG_EPOLL)
12736 #if defined(TARGET_NR_epoll_create)
12737 case TARGET_NR_epoll_create
:
12738 return get_errno(epoll_create(arg1
));
12740 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12741 case TARGET_NR_epoll_create1
:
12742 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12744 #if defined(TARGET_NR_epoll_ctl)
12745 case TARGET_NR_epoll_ctl
:
12747 struct epoll_event ep
;
12748 struct epoll_event
*epp
= 0;
12750 if (arg2
!= EPOLL_CTL_DEL
) {
12751 struct target_epoll_event
*target_ep
;
12752 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12753 return -TARGET_EFAULT
;
12755 ep
.events
= tswap32(target_ep
->events
);
12757 * The epoll_data_t union is just opaque data to the kernel,
12758 * so we transfer all 64 bits across and need not worry what
12759 * actual data type it is.
12761 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12762 unlock_user_struct(target_ep
, arg4
, 0);
12765 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12766 * non-null pointer, even though this argument is ignored.
12771 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12775 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12776 #if defined(TARGET_NR_epoll_wait)
12777 case TARGET_NR_epoll_wait
:
12779 #if defined(TARGET_NR_epoll_pwait)
12780 case TARGET_NR_epoll_pwait
:
12783 struct target_epoll_event
*target_ep
;
12784 struct epoll_event
*ep
;
12786 int maxevents
= arg3
;
12787 int timeout
= arg4
;
12789 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12790 return -TARGET_EINVAL
;
12793 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12794 maxevents
* sizeof(struct target_epoll_event
), 1);
12796 return -TARGET_EFAULT
;
12799 ep
= g_try_new(struct epoll_event
, maxevents
);
12801 unlock_user(target_ep
, arg2
, 0);
12802 return -TARGET_ENOMEM
;
12806 #if defined(TARGET_NR_epoll_pwait)
12807 case TARGET_NR_epoll_pwait
:
12809 sigset_t
*set
= NULL
;
12812 ret
= process_sigsuspend_mask(&set
, arg5
, arg6
);
12818 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12819 set
, SIGSET_T_SIZE
));
12822 finish_sigsuspend_mask(ret
);
12827 #if defined(TARGET_NR_epoll_wait)
12828 case TARGET_NR_epoll_wait
:
12829 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12834 ret
= -TARGET_ENOSYS
;
12836 if (!is_error(ret
)) {
12838 for (i
= 0; i
< ret
; i
++) {
12839 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12840 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12842 unlock_user(target_ep
, arg2
,
12843 ret
* sizeof(struct target_epoll_event
));
12845 unlock_user(target_ep
, arg2
, 0);
12852 #ifdef TARGET_NR_prlimit64
12853 case TARGET_NR_prlimit64
:
12855 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12856 struct target_rlimit64
*target_rnew
, *target_rold
;
12857 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12858 int resource
= target_to_host_resource(arg2
);
12860 if (arg3
&& (resource
!= RLIMIT_AS
&&
12861 resource
!= RLIMIT_DATA
&&
12862 resource
!= RLIMIT_STACK
)) {
12863 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12864 return -TARGET_EFAULT
;
12866 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12867 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12868 unlock_user_struct(target_rnew
, arg3
, 0);
12872 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12873 if (!is_error(ret
) && arg4
) {
12874 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12875 return -TARGET_EFAULT
;
12877 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12878 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12879 unlock_user_struct(target_rold
, arg4
, 1);
12884 #ifdef TARGET_NR_gethostname
12885 case TARGET_NR_gethostname
:
12887 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12889 ret
= get_errno(gethostname(name
, arg2
));
12890 unlock_user(name
, arg1
, arg2
);
12892 ret
= -TARGET_EFAULT
;
12897 #ifdef TARGET_NR_atomic_cmpxchg_32
12898 case TARGET_NR_atomic_cmpxchg_32
:
12900 /* should use start_exclusive from main.c */
12901 abi_ulong mem_value
;
12902 if (get_user_u32(mem_value
, arg6
)) {
12903 target_siginfo_t info
;
12904 info
.si_signo
= SIGSEGV
;
12906 info
.si_code
= TARGET_SEGV_MAPERR
;
12907 info
._sifields
._sigfault
._addr
= arg6
;
12908 queue_signal(cpu_env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
12912 if (mem_value
== arg2
)
12913 put_user_u32(arg1
, arg6
);
12917 #ifdef TARGET_NR_atomic_barrier
12918 case TARGET_NR_atomic_barrier
:
12919 /* Like the kernel implementation and the
12920 qemu arm barrier, no-op this? */
12924 #ifdef TARGET_NR_timer_create
12925 case TARGET_NR_timer_create
:
12927 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12929 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12932 int timer_index
= next_free_host_timer();
12934 if (timer_index
< 0) {
12935 ret
= -TARGET_EAGAIN
;
12937 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12940 phost_sevp
= &host_sevp
;
12941 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12943 free_host_timer_slot(timer_index
);
12948 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12950 free_host_timer_slot(timer_index
);
12952 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12953 timer_delete(*phtimer
);
12954 free_host_timer_slot(timer_index
);
12955 return -TARGET_EFAULT
;
12963 #ifdef TARGET_NR_timer_settime
12964 case TARGET_NR_timer_settime
:
12966 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12967 * struct itimerspec * old_value */
12968 target_timer_t timerid
= get_timer_id(arg1
);
12972 } else if (arg3
== 0) {
12973 ret
= -TARGET_EINVAL
;
12975 timer_t htimer
= g_posix_timers
[timerid
];
12976 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12978 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12979 return -TARGET_EFAULT
;
12982 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12983 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12984 return -TARGET_EFAULT
;
12991 #ifdef TARGET_NR_timer_settime64
12992 case TARGET_NR_timer_settime64
:
12994 target_timer_t timerid
= get_timer_id(arg1
);
12998 } else if (arg3
== 0) {
12999 ret
= -TARGET_EINVAL
;
13001 timer_t htimer
= g_posix_timers
[timerid
];
13002 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
13004 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
13005 return -TARGET_EFAULT
;
13008 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
13009 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
13010 return -TARGET_EFAULT
;
13017 #ifdef TARGET_NR_timer_gettime
13018 case TARGET_NR_timer_gettime
:
13020 /* args: timer_t timerid, struct itimerspec *curr_value */
13021 target_timer_t timerid
= get_timer_id(arg1
);
13025 } else if (!arg2
) {
13026 ret
= -TARGET_EFAULT
;
13028 timer_t htimer
= g_posix_timers
[timerid
];
13029 struct itimerspec hspec
;
13030 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13032 if (host_to_target_itimerspec(arg2
, &hspec
)) {
13033 ret
= -TARGET_EFAULT
;
13040 #ifdef TARGET_NR_timer_gettime64
13041 case TARGET_NR_timer_gettime64
:
13043 /* args: timer_t timerid, struct itimerspec64 *curr_value */
13044 target_timer_t timerid
= get_timer_id(arg1
);
13048 } else if (!arg2
) {
13049 ret
= -TARGET_EFAULT
;
13051 timer_t htimer
= g_posix_timers
[timerid
];
13052 struct itimerspec hspec
;
13053 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13055 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
13056 ret
= -TARGET_EFAULT
;
13063 #ifdef TARGET_NR_timer_getoverrun
13064 case TARGET_NR_timer_getoverrun
:
13066 /* args: timer_t timerid */
13067 target_timer_t timerid
= get_timer_id(arg1
);
13072 timer_t htimer
= g_posix_timers
[timerid
];
13073 ret
= get_errno(timer_getoverrun(htimer
));
13079 #ifdef TARGET_NR_timer_delete
13080 case TARGET_NR_timer_delete
:
13082 /* args: timer_t timerid */
13083 target_timer_t timerid
= get_timer_id(arg1
);
13088 timer_t htimer
= g_posix_timers
[timerid
];
13089 ret
= get_errno(timer_delete(htimer
));
13090 free_host_timer_slot(timerid
);
13096 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13097 case TARGET_NR_timerfd_create
:
13098 return get_errno(timerfd_create(arg1
,
13099 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
13102 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13103 case TARGET_NR_timerfd_gettime
:
13105 struct itimerspec its_curr
;
13107 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13109 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
13110 return -TARGET_EFAULT
;
13116 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13117 case TARGET_NR_timerfd_gettime64
:
13119 struct itimerspec its_curr
;
13121 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13123 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
13124 return -TARGET_EFAULT
;
13130 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13131 case TARGET_NR_timerfd_settime
:
13133 struct itimerspec its_new
, its_old
, *p_new
;
13136 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13137 return -TARGET_EFAULT
;
13144 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13146 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13147 return -TARGET_EFAULT
;
13153 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13154 case TARGET_NR_timerfd_settime64
:
13156 struct itimerspec its_new
, its_old
, *p_new
;
13159 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13160 return -TARGET_EFAULT
;
13167 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13169 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13170 return -TARGET_EFAULT
;
13176 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13177 case TARGET_NR_ioprio_get
:
13178 return get_errno(ioprio_get(arg1
, arg2
));
13181 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13182 case TARGET_NR_ioprio_set
:
13183 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13186 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13187 case TARGET_NR_setns
:
13188 return get_errno(setns(arg1
, arg2
));
13190 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13191 case TARGET_NR_unshare
:
13192 return get_errno(unshare(arg1
));
13194 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13195 case TARGET_NR_kcmp
:
13196 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13198 #ifdef TARGET_NR_swapcontext
13199 case TARGET_NR_swapcontext
:
13200 /* PowerPC specific. */
13201 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13203 #ifdef TARGET_NR_memfd_create
13204 case TARGET_NR_memfd_create
:
13205 p
= lock_user_string(arg1
);
13207 return -TARGET_EFAULT
;
13209 ret
= get_errno(memfd_create(p
, arg2
));
13210 fd_trans_unregister(ret
);
13211 unlock_user(p
, arg1
, 0);
13214 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13215 case TARGET_NR_membarrier
:
13216 return get_errno(membarrier(arg1
, arg2
));
13219 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13220 case TARGET_NR_copy_file_range
:
13222 loff_t inoff
, outoff
;
13223 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13226 if (get_user_u64(inoff
, arg2
)) {
13227 return -TARGET_EFAULT
;
13232 if (get_user_u64(outoff
, arg4
)) {
13233 return -TARGET_EFAULT
;
13237 /* Do not sign-extend the count parameter. */
13238 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13239 (abi_ulong
)arg5
, arg6
));
13240 if (!is_error(ret
) && ret
> 0) {
13242 if (put_user_u64(inoff
, arg2
)) {
13243 return -TARGET_EFAULT
;
13247 if (put_user_u64(outoff
, arg4
)) {
13248 return -TARGET_EFAULT
;
13256 #if defined(TARGET_NR_pivot_root)
13257 case TARGET_NR_pivot_root
:
13260 p
= lock_user_string(arg1
); /* new_root */
13261 p2
= lock_user_string(arg2
); /* put_old */
13263 ret
= -TARGET_EFAULT
;
13265 ret
= get_errno(pivot_root(p
, p2
));
13267 unlock_user(p2
, arg2
, 0);
13268 unlock_user(p
, arg1
, 0);
13274 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13275 return -TARGET_ENOSYS
;
13280 abi_long
do_syscall(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
13281 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13282 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13285 CPUState
*cpu
= env_cpu(cpu_env
);
13288 #ifdef DEBUG_ERESTARTSYS
13289 /* Debug-only code for exercising the syscall-restart code paths
13290 * in the per-architecture cpu main loops: restart every syscall
13291 * the guest makes once before letting it through.
13297 return -QEMU_ERESTARTSYS
;
13302 record_syscall_start(cpu
, num
, arg1
,
13303 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13305 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13306 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13309 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13310 arg5
, arg6
, arg7
, arg8
);
13312 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13313 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13314 arg3
, arg4
, arg5
, arg6
);
13317 record_syscall_return(cpu
, num
, ret
);