4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "target_mman.h"
33 #include <sys/mount.h>
35 #include <sys/fsuid.h>
36 #include <sys/personality.h>
37 #include <sys/prctl.h>
38 #include <sys/resource.h>
40 #include <linux/capability.h>
42 #include <sys/timex.h>
43 #include <sys/socket.h>
44 #include <linux/sockios.h>
48 #include <sys/times.h>
51 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <netinet/udp.h>
60 #include <linux/wireless.h>
61 #include <linux/icmp.h>
62 #include <linux/icmpv6.h>
63 #include <linux/if_tun.h>
64 #include <linux/in6.h>
65 #include <linux/errqueue.h>
66 #include <linux/random.h>
68 #include <sys/timerfd.h>
71 #include <sys/eventfd.h>
74 #include <sys/epoll.h>
77 #include "qemu/xattr.h"
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
82 #ifdef HAVE_SYS_KCOV_H
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #include <linux/fd.h>
102 #if defined(CONFIG_FIEMAP)
103 #include <linux/fiemap.h>
105 #include <linux/fb.h>
106 #if defined(CONFIG_USBFS)
107 #include <linux/usbdevice_fs.h>
108 #include <linux/usb/ch9.h>
110 #include <linux/vt.h>
111 #include <linux/dm-ioctl.h>
112 #include <linux/reboot.h>
113 #include <linux/route.h>
114 #include <linux/filter.h>
115 #include <linux/blkpg.h>
116 #include <netpacket/packet.h>
117 #include <linux/netlink.h>
118 #include <linux/if_alg.h>
119 #include <linux/rtc.h>
120 #include <sound/asound.h>
122 #include <linux/btrfs.h>
125 #include <libdrm/drm.h>
126 #include <libdrm/i915_drm.h>
128 #include "linux_loop.h"
132 #include "user-internals.h"
134 #include "signal-common.h"
136 #include "user-mmap.h"
137 #include "user/safe-syscall.h"
138 #include "qemu/guest-random.h"
139 #include "qemu/selfmap.h"
140 #include "user/syscall-trace.h"
141 #include "special-errno.h"
142 #include "qapi/error.h"
143 #include "fd-trans.h"
145 #include "cpu_loop-common.h"
148 #define CLONE_IO 0x80000000 /* Clone io context */
151 /* We can't directly call the host clone syscall, because this will
152 * badly confuse libc (breaking mutexes, for example). So we must
153 * divide clone flags into:
154 * * flag combinations that look like pthread_create()
155 * * flag combinations that look like fork()
156 * * flags we can implement within QEMU itself
157 * * flags we can't support and will return an error for
159 /* For thread creation, all these flags must be present; for
160 * fork, none must be present.
162 #define CLONE_THREAD_FLAGS \
163 (CLONE_VM | CLONE_FS | CLONE_FILES | \
164 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
166 /* These flags are ignored:
167 * CLONE_DETACHED is now ignored by the kernel;
168 * CLONE_IO is just an optimisation hint to the I/O scheduler
170 #define CLONE_IGNORED_FLAGS \
171 (CLONE_DETACHED | CLONE_IO)
174 # define CLONE_PIDFD 0x00001000
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS \
179 (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS \
184 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
185 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
187 #define CLONE_INVALID_FORK_FLAGS \
188 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
190 #define CLONE_INVALID_THREAD_FLAGS \
191 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
192 CLONE_IGNORED_FLAGS))
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195 * have almost all been allocated. We cannot support any of
196 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198 * The checks against the invalid thread masks above will catch these.
199 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203 * once. This exercises the codepaths for restart.
205 //#define DEBUG_ERESTARTSYS
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
221 #define _syscall0(type,name) \
222 static type name (void) \
224 return syscall(__NR_##name); \
227 #define _syscall1(type,name,type1,arg1) \
228 static type name (type1 arg1) \
230 return syscall(__NR_##name, arg1); \
233 #define _syscall2(type,name,type1,arg1,type2,arg2) \
234 static type name (type1 arg1,type2 arg2) \
236 return syscall(__NR_##name, arg1, arg2); \
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
240 static type name (type1 arg1,type2 arg2,type3 arg3) \
242 return syscall(__NR_##name, arg1, arg2, arg3); \
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
255 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
260 type5,arg5,type6,arg6) \
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
264 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
282 #define __NR_sys_statx __NR_statx
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid
)
301 /* For the 64-bit guest on 32-bit host case we must emulate
302 * getdents using getdents64, because otherwise the host
303 * might hand us back more dirent records than we can fit
304 * into the guest buffer after structure format conversion.
305 * Otherwise we emulate getdents with getdents if the host has it.
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents
, unsigned int, fd
, struct linux_dirent
*, dirp
, unsigned int, count
);
314 #if (defined(TARGET_NR_getdents) && \
315 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64
, unsigned int, fd
, struct linux_dirent64
*, dirp
, unsigned int, count
);
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek
, unsigned int, fd
, unsigned long, hi
, unsigned long, lo
,
321 loff_t
*, res
, unsigned int, wh
);
323 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
324 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
326 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group
,int,error_code
)
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range
,int,first
,int,last
,int,flags
)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC (1U << 2)
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
339 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
343 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open
, pid_t
, pid
, unsigned int, flags
);
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal
, int, pidfd
, int, sig
, siginfo_t
*, info
,
350 unsigned int, flags
);
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd
, int, pidfd
, int, targetfd
, unsigned int, flags
);
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
357 unsigned long *, user_mask_ptr
);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
360 unsigned long *, user_mask_ptr
);
361 /* sched_attr is not defined in glibc */
364 uint32_t sched_policy
;
365 uint64_t sched_flags
;
367 uint32_t sched_priority
;
368 uint64_t sched_runtime
;
369 uint64_t sched_deadline
;
370 uint64_t sched_period
;
371 uint32_t sched_util_min
;
372 uint32_t sched_util_max
;
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr
, pid_t
, pid
, struct sched_attr
*, attr
,
376 unsigned int, size
, unsigned int, flags
);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr
, pid_t
, pid
, struct sched_attr
*, attr
,
379 unsigned int, flags
);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler
, pid_t
, pid
);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler
, pid_t
, pid
, int, policy
,
384 const struct sched_param
*, param
);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam
, pid_t
, pid
,
387 struct sched_param
*, param
);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam
, pid_t
, pid
,
390 const struct sched_param
*, param
);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
393 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
395 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
396 struct __user_cap_data_struct
*, data
);
397 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
398 struct __user_cap_data_struct
*, data
);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get
, int, which
, int, who
)
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
411 unsigned long, idx1
, unsigned long, idx2
)
415 * It is assumed that struct statx is architecture independent.
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
419 unsigned int, mask
, struct target_statx
*, statxbuf
)
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier
, int, cmd
, int, flags
)
425 static const bitmask_transtbl fcntl_flags_tbl
[] = {
426 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
427 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
428 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
429 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
430 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
431 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
432 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
433 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
434 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
435 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
436 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
437 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
438 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
439 #if defined(O_DIRECT)
440 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
442 #if defined(O_NOATIME)
443 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
445 #if defined(O_CLOEXEC)
446 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
449 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
451 #if defined(O_TMPFILE)
452 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
454 /* Don't terminate the list prematurely on 64-bit host+guest. */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
461 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
467 const struct timespec
*,tsp
,int,flags
)
469 static int sys_utimensat(int dirfd
, const char *pathname
,
470 const struct timespec times
[2], int flags
)
476 #endif /* TARGET_NR_utimensat */
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
482 const char *, new, unsigned int, flags
)
484 static int sys_renameat2(int oldfd
, const char *old
,
485 int newfd
, const char *new, int flags
)
488 return renameat(oldfd
, old
, newfd
, new);
494 #endif /* TARGET_NR_renameat2 */
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY */
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64
{
516 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
517 const struct host_rlimit64
*, new_limit
,
518 struct host_rlimit64
*, old_limit
)
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers
[GUEST_TIMER_MAX
];
526 static int g_posix_timer_allocated
[GUEST_TIMER_MAX
];
528 static inline int next_free_host_timer(void)
531 for (k
= 0; k
< ARRAY_SIZE(g_posix_timer_allocated
); k
++) {
532 if (qatomic_xchg(g_posix_timer_allocated
+ k
, 1) == 0) {
539 static inline void free_host_timer_slot(int id
)
541 qatomic_store_release(g_posix_timer_allocated
+ id
, 0);
545 static inline int host_to_target_errno(int host_errno
)
547 switch (host_errno
) {
548 #define E(X) case X: return TARGET_##X;
549 #include "errnos.c.inc"
556 static inline int target_to_host_errno(int target_errno
)
558 switch (target_errno
) {
559 #define E(X) case TARGET_##X: return X;
560 #include "errnos.c.inc"
567 abi_long
get_errno(abi_long ret
)
570 return -host_to_target_errno(errno
);
575 const char *target_strerror(int err
)
577 if (err
== QEMU_ERESTARTSYS
) {
578 return "To be restarted";
580 if (err
== QEMU_ESIGRETURN
) {
581 return "Successful exit from sigreturn";
584 return strerror(target_to_host_errno(err
));
587 static int check_zeroed_user(abi_long addr
, size_t ksize
, size_t usize
)
591 if (usize
<= ksize
) {
594 for (i
= ksize
; i
< usize
; i
++) {
595 if (get_user_u8(b
, addr
+ i
)) {
596 return -TARGET_EFAULT
;
605 #define safe_syscall0(type, name) \
606 static type safe_##name(void) \
608 return safe_syscall(__NR_##name); \
611 #define safe_syscall1(type, name, type1, arg1) \
612 static type safe_##name(type1 arg1) \
614 return safe_syscall(__NR_##name, arg1); \
617 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
618 static type safe_##name(type1 arg1, type2 arg2) \
620 return safe_syscall(__NR_##name, arg1, arg2); \
623 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
626 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
629 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
633 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
636 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
637 type4, arg4, type5, arg5) \
638 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
641 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
644 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
645 type4, arg4, type5, arg5, type6, arg6) \
646 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
647 type5 arg5, type6 arg6) \
649 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
652 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
653 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
654 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
655 int, flags
, mode_t
, mode
)
656 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
657 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
658 struct rusage
*, rusage
)
660 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
661 int, options
, struct rusage
*, rusage
)
662 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
663 safe_syscall5(int, execveat
, int, dirfd
, const char *, filename
,
664 char **, argv
, char **, envp
, int, flags
)
665 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
666 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
667 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
668 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
670 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
671 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
672 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
675 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
676 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
678 #if defined(__NR_futex)
679 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
680 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
682 #if defined(__NR_futex_time64)
683 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
684 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
686 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
687 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
688 safe_syscall2(int, tkill
, int, tid
, int, sig
)
689 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
690 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
691 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
692 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
693 unsigned long, pos_l
, unsigned long, pos_h
)
694 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
695 unsigned long, pos_l
, unsigned long, pos_h
)
696 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
698 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
699 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
700 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
701 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
702 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
703 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
704 safe_syscall2(int, flock
, int, fd
, int, operation
)
705 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
706 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
707 const struct timespec
*, uts
, size_t, sigsetsize
)
709 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
711 #if defined(TARGET_NR_nanosleep)
712 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
713 struct timespec
*, rem
)
715 #if defined(TARGET_NR_clock_nanosleep) || \
716 defined(TARGET_NR_clock_nanosleep_time64)
717 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
718 const struct timespec
*, req
, struct timespec
*, rem
)
722 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
725 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
726 void *, ptr
, long, fifth
)
730 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
734 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
735 long, msgtype
, int, flags
)
737 #ifdef __NR_semtimedop
738 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
739 unsigned, nsops
, const struct timespec
*, timeout
)
741 #if defined(TARGET_NR_mq_timedsend) || \
742 defined(TARGET_NR_mq_timedsend_time64)
743 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
744 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
746 #if defined(TARGET_NR_mq_timedreceive) || \
747 defined(TARGET_NR_mq_timedreceive_time64)
748 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
749 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
751 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
752 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
753 int, outfd
, loff_t
*, poutoff
, size_t, length
,
757 /* We do ioctl like this rather than via safe_syscall3 to preserve the
758 * "third argument might be integer or pointer or not present" behaviour of
761 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
762 /* Similarly for fcntl. Note that callers must always:
763 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
764 * use the flock64 struct rather than unsuffixed flock
765 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
768 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
770 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
773 static inline int host_to_target_sock_type(int host_type
)
777 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
779 target_type
= TARGET_SOCK_DGRAM
;
782 target_type
= TARGET_SOCK_STREAM
;
785 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
789 #if defined(SOCK_CLOEXEC)
790 if (host_type
& SOCK_CLOEXEC
) {
791 target_type
|= TARGET_SOCK_CLOEXEC
;
795 #if defined(SOCK_NONBLOCK)
796 if (host_type
& SOCK_NONBLOCK
) {
797 target_type
|= TARGET_SOCK_NONBLOCK
;
804 static abi_ulong target_brk
, initial_target_brk
;
806 void target_set_brk(abi_ulong new_brk
)
808 target_brk
= TARGET_PAGE_ALIGN(new_brk
);
809 initial_target_brk
= target_brk
;
812 /* do_brk() must return target values and target errnos. */
813 abi_long
do_brk(abi_ulong brk_val
)
815 abi_long mapped_addr
;
819 /* brk pointers are always untagged */
821 /* do not allow to shrink below initial brk value */
822 if (brk_val
< initial_target_brk
) {
826 new_brk
= TARGET_PAGE_ALIGN(brk_val
);
827 old_brk
= TARGET_PAGE_ALIGN(target_brk
);
829 /* new and old target_brk might be on the same page */
830 if (new_brk
== old_brk
) {
831 target_brk
= brk_val
;
835 /* Release heap if necesary */
836 if (new_brk
< old_brk
) {
837 target_munmap(new_brk
, old_brk
- new_brk
);
839 target_brk
= brk_val
;
843 mapped_addr
= target_mmap(old_brk
, new_brk
- old_brk
,
844 PROT_READ
| PROT_WRITE
,
845 MAP_FIXED_NOREPLACE
| MAP_ANON
| MAP_PRIVATE
,
848 if (mapped_addr
== old_brk
) {
849 target_brk
= brk_val
;
853 #if defined(TARGET_ALPHA)
854 /* We (partially) emulate OSF/1 on Alpha, which requires we
855 return a proper errno, not an unchanged brk value. */
856 return -TARGET_ENOMEM
;
858 /* For everything else, return the previous break. */
862 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
863 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
864 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
865 abi_ulong target_fds_addr
,
869 abi_ulong b
, *target_fds
;
871 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
872 if (!(target_fds
= lock_user(VERIFY_READ
,
874 sizeof(abi_ulong
) * nw
,
876 return -TARGET_EFAULT
;
880 for (i
= 0; i
< nw
; i
++) {
881 /* grab the abi_ulong */
882 __get_user(b
, &target_fds
[i
]);
883 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
884 /* check the bit inside the abi_ulong */
891 unlock_user(target_fds
, target_fds_addr
, 0);
896 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
897 abi_ulong target_fds_addr
,
900 if (target_fds_addr
) {
901 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
902 return -TARGET_EFAULT
;
910 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
916 abi_ulong
*target_fds
;
918 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
919 if (!(target_fds
= lock_user(VERIFY_WRITE
,
921 sizeof(abi_ulong
) * nw
,
923 return -TARGET_EFAULT
;
926 for (i
= 0; i
< nw
; i
++) {
928 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
929 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
932 __put_user(v
, &target_fds
[i
]);
935 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
941 #if defined(__alpha__)
947 static inline abi_long
host_to_target_clock_t(long ticks
)
949 #if HOST_HZ == TARGET_HZ
952 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
956 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
957 const struct rusage
*rusage
)
959 struct target_rusage
*target_rusage
;
961 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
962 return -TARGET_EFAULT
;
963 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
964 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
965 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
966 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
967 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
968 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
969 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
970 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
971 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
972 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
973 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
974 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
975 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
976 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
977 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
978 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
979 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
980 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
981 unlock_user_struct(target_rusage
, target_addr
, 1);
986 #ifdef TARGET_NR_setrlimit
987 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
989 abi_ulong target_rlim_swap
;
992 target_rlim_swap
= tswapal(target_rlim
);
993 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
994 return RLIM_INFINITY
;
996 result
= target_rlim_swap
;
997 if (target_rlim_swap
!= (rlim_t
)result
)
998 return RLIM_INFINITY
;
1004 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1005 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1007 abi_ulong target_rlim_swap
;
1010 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1011 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1013 target_rlim_swap
= rlim
;
1014 result
= tswapal(target_rlim_swap
);
1020 static inline int target_to_host_resource(int code
)
1023 case TARGET_RLIMIT_AS
:
1025 case TARGET_RLIMIT_CORE
:
1027 case TARGET_RLIMIT_CPU
:
1029 case TARGET_RLIMIT_DATA
:
1031 case TARGET_RLIMIT_FSIZE
:
1032 return RLIMIT_FSIZE
;
1033 case TARGET_RLIMIT_LOCKS
:
1034 return RLIMIT_LOCKS
;
1035 case TARGET_RLIMIT_MEMLOCK
:
1036 return RLIMIT_MEMLOCK
;
1037 case TARGET_RLIMIT_MSGQUEUE
:
1038 return RLIMIT_MSGQUEUE
;
1039 case TARGET_RLIMIT_NICE
:
1041 case TARGET_RLIMIT_NOFILE
:
1042 return RLIMIT_NOFILE
;
1043 case TARGET_RLIMIT_NPROC
:
1044 return RLIMIT_NPROC
;
1045 case TARGET_RLIMIT_RSS
:
1047 case TARGET_RLIMIT_RTPRIO
:
1048 return RLIMIT_RTPRIO
;
1049 #ifdef RLIMIT_RTTIME
1050 case TARGET_RLIMIT_RTTIME
:
1051 return RLIMIT_RTTIME
;
1053 case TARGET_RLIMIT_SIGPENDING
:
1054 return RLIMIT_SIGPENDING
;
1055 case TARGET_RLIMIT_STACK
:
1056 return RLIMIT_STACK
;
1062 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1063 abi_ulong target_tv_addr
)
1065 struct target_timeval
*target_tv
;
1067 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1068 return -TARGET_EFAULT
;
1071 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1072 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1074 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1079 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1080 const struct timeval
*tv
)
1082 struct target_timeval
*target_tv
;
1084 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1085 return -TARGET_EFAULT
;
1088 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1089 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1091 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1096 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1097 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1098 abi_ulong target_tv_addr
)
1100 struct target__kernel_sock_timeval
*target_tv
;
1102 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1103 return -TARGET_EFAULT
;
1106 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1107 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1109 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1115 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1116 const struct timeval
*tv
)
1118 struct target__kernel_sock_timeval
*target_tv
;
1120 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1121 return -TARGET_EFAULT
;
1124 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1125 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1127 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1132 #if defined(TARGET_NR_futex) || \
1133 defined(TARGET_NR_rt_sigtimedwait) || \
1134 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1135 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1136 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1137 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1138 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1139 defined(TARGET_NR_timer_settime) || \
1140 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1141 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1142 abi_ulong target_addr
)
1144 struct target_timespec
*target_ts
;
1146 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1147 return -TARGET_EFAULT
;
1149 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1150 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1151 unlock_user_struct(target_ts
, target_addr
, 0);
1156 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1157 defined(TARGET_NR_timer_settime64) || \
1158 defined(TARGET_NR_mq_timedsend_time64) || \
1159 defined(TARGET_NR_mq_timedreceive_time64) || \
1160 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1161 defined(TARGET_NR_clock_nanosleep_time64) || \
1162 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1163 defined(TARGET_NR_utimensat) || \
1164 defined(TARGET_NR_utimensat_time64) || \
1165 defined(TARGET_NR_semtimedop_time64) || \
1166 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1167 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1168 abi_ulong target_addr
)
1170 struct target__kernel_timespec
*target_ts
;
1172 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1173 return -TARGET_EFAULT
;
1175 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1176 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1177 /* in 32bit mode, this drops the padding */
1178 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1179 unlock_user_struct(target_ts
, target_addr
, 0);
1184 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1185 struct timespec
*host_ts
)
1187 struct target_timespec
*target_ts
;
1189 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1190 return -TARGET_EFAULT
;
1192 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1193 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1194 unlock_user_struct(target_ts
, target_addr
, 1);
1198 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1199 struct timespec
*host_ts
)
1201 struct target__kernel_timespec
*target_ts
;
1203 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1204 return -TARGET_EFAULT
;
1206 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1207 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1208 unlock_user_struct(target_ts
, target_addr
, 1);
1212 #if defined(TARGET_NR_gettimeofday)
1213 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1214 struct timezone
*tz
)
1216 struct target_timezone
*target_tz
;
1218 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1219 return -TARGET_EFAULT
;
1222 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1223 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1225 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1231 #if defined(TARGET_NR_settimeofday)
1232 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1233 abi_ulong target_tz_addr
)
1235 struct target_timezone
*target_tz
;
1237 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1238 return -TARGET_EFAULT
;
1241 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1242 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1244 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1250 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1253 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1254 abi_ulong target_mq_attr_addr
)
1256 struct target_mq_attr
*target_mq_attr
;
1258 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1259 target_mq_attr_addr
, 1))
1260 return -TARGET_EFAULT
;
1262 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1263 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1264 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1265 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1267 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1272 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1273 const struct mq_attr
*attr
)
1275 struct target_mq_attr
*target_mq_attr
;
1277 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1278 target_mq_attr_addr
, 0))
1279 return -TARGET_EFAULT
;
1281 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1282 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1283 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1284 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1286 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1292 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1293 /* do_select() must return target values and target errnos. */
1294 static abi_long
do_select(int n
,
1295 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1296 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1298 fd_set rfds
, wfds
, efds
;
1299 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1301 struct timespec ts
, *ts_ptr
;
1304 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1308 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1312 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1317 if (target_tv_addr
) {
1318 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1319 return -TARGET_EFAULT
;
1320 ts
.tv_sec
= tv
.tv_sec
;
1321 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1327 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1330 if (!is_error(ret
)) {
1331 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1332 return -TARGET_EFAULT
;
1333 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1334 return -TARGET_EFAULT
;
1335 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1336 return -TARGET_EFAULT
;
1338 if (target_tv_addr
) {
1339 tv
.tv_sec
= ts
.tv_sec
;
1340 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1341 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1342 return -TARGET_EFAULT
;
1350 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1351 static abi_long
do_old_select(abi_ulong arg1
)
1353 struct target_sel_arg_struct
*sel
;
1354 abi_ulong inp
, outp
, exp
, tvp
;
1357 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1358 return -TARGET_EFAULT
;
1361 nsel
= tswapal(sel
->n
);
1362 inp
= tswapal(sel
->inp
);
1363 outp
= tswapal(sel
->outp
);
1364 exp
= tswapal(sel
->exp
);
1365 tvp
= tswapal(sel
->tvp
);
1367 unlock_user_struct(sel
, arg1
, 0);
1369 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1374 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1375 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1376 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1379 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1380 fd_set rfds
, wfds
, efds
;
1381 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1382 struct timespec ts
, *ts_ptr
;
1386 * The 6th arg is actually two args smashed together,
1387 * so we cannot use the C library.
1394 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1402 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1406 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1410 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1416 * This takes a timespec, and not a timeval, so we cannot
1417 * use the do_select() helper ...
1421 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1422 return -TARGET_EFAULT
;
1425 if (target_to_host_timespec(&ts
, ts_addr
)) {
1426 return -TARGET_EFAULT
;
1434 /* Extract the two packed args for the sigset */
1437 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1439 return -TARGET_EFAULT
;
1441 arg_sigset
= tswapal(arg7
[0]);
1442 arg_sigsize
= tswapal(arg7
[1]);
1443 unlock_user(arg7
, arg6
, 0);
1446 ret
= process_sigsuspend_mask(&sig
.set
, arg_sigset
, arg_sigsize
);
1451 sig
.size
= SIGSET_T_SIZE
;
1455 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1459 finish_sigsuspend_mask(ret
);
1462 if (!is_error(ret
)) {
1463 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1464 return -TARGET_EFAULT
;
1466 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1467 return -TARGET_EFAULT
;
1469 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1470 return -TARGET_EFAULT
;
1473 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1474 return -TARGET_EFAULT
;
1477 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1478 return -TARGET_EFAULT
;
1486 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1487 defined(TARGET_NR_ppoll_time64)
1488 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1489 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1491 struct target_pollfd
*target_pfd
;
1492 unsigned int nfds
= arg2
;
1500 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1501 return -TARGET_EINVAL
;
1503 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1504 sizeof(struct target_pollfd
) * nfds
, 1);
1506 return -TARGET_EFAULT
;
1509 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1510 for (i
= 0; i
< nfds
; i
++) {
1511 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1512 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1516 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1517 sigset_t
*set
= NULL
;
1521 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1522 unlock_user(target_pfd
, arg1
, 0);
1523 return -TARGET_EFAULT
;
1526 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1527 unlock_user(target_pfd
, arg1
, 0);
1528 return -TARGET_EFAULT
;
1536 ret
= process_sigsuspend_mask(&set
, arg4
, arg5
);
1538 unlock_user(target_pfd
, arg1
, 0);
1543 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1544 set
, SIGSET_T_SIZE
));
1547 finish_sigsuspend_mask(ret
);
1549 if (!is_error(ret
) && arg3
) {
1551 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1552 return -TARGET_EFAULT
;
1555 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1556 return -TARGET_EFAULT
;
1561 struct timespec ts
, *pts
;
1564 /* Convert ms to secs, ns */
1565 ts
.tv_sec
= arg3
/ 1000;
1566 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1569 /* -ve poll() timeout means "infinite" */
1572 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1575 if (!is_error(ret
)) {
1576 for (i
= 0; i
< nfds
; i
++) {
1577 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1580 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1585 static abi_long
do_pipe(CPUArchState
*cpu_env
, abi_ulong pipedes
,
1586 int flags
, int is_pipe2
)
1590 ret
= pipe2(host_pipe
, flags
);
1593 return get_errno(ret
);
1595 /* Several targets have special calling conventions for the original
1596 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1598 #if defined(TARGET_ALPHA)
1599 cpu_env
->ir
[IR_A4
] = host_pipe
[1];
1600 return host_pipe
[0];
1601 #elif defined(TARGET_MIPS)
1602 cpu_env
->active_tc
.gpr
[3] = host_pipe
[1];
1603 return host_pipe
[0];
1604 #elif defined(TARGET_SH4)
1605 cpu_env
->gregs
[1] = host_pipe
[1];
1606 return host_pipe
[0];
1607 #elif defined(TARGET_SPARC)
1608 cpu_env
->regwptr
[1] = host_pipe
[1];
1609 return host_pipe
[0];
1613 if (put_user_s32(host_pipe
[0], pipedes
)
1614 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(abi_int
)))
1615 return -TARGET_EFAULT
;
1616 return get_errno(ret
);
1619 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1620 abi_ulong target_addr
,
1623 struct target_ip_mreqn
*target_smreqn
;
1625 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1627 return -TARGET_EFAULT
;
1628 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1629 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1630 if (len
== sizeof(struct target_ip_mreqn
))
1631 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1632 unlock_user(target_smreqn
, target_addr
, 0);
1637 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1638 abi_ulong target_addr
,
1641 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1642 sa_family_t sa_family
;
1643 struct target_sockaddr
*target_saddr
;
1645 if (fd_trans_target_to_host_addr(fd
)) {
1646 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1649 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1651 return -TARGET_EFAULT
;
1653 sa_family
= tswap16(target_saddr
->sa_family
);
1655 /* Oops. The caller might send a incomplete sun_path; sun_path
1656 * must be terminated by \0 (see the manual page), but
1657 * unfortunately it is quite common to specify sockaddr_un
1658 * length as "strlen(x->sun_path)" while it should be
1659 * "strlen(...) + 1". We'll fix that here if needed.
1660 * Linux kernel has a similar feature.
1663 if (sa_family
== AF_UNIX
) {
1664 if (len
< unix_maxlen
&& len
> 0) {
1665 char *cp
= (char*)target_saddr
;
1667 if ( cp
[len
-1] && !cp
[len
] )
1670 if (len
> unix_maxlen
)
1674 memcpy(addr
, target_saddr
, len
);
1675 addr
->sa_family
= sa_family
;
1676 if (sa_family
== AF_NETLINK
) {
1677 struct sockaddr_nl
*nladdr
;
1679 nladdr
= (struct sockaddr_nl
*)addr
;
1680 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1681 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1682 } else if (sa_family
== AF_PACKET
) {
1683 struct target_sockaddr_ll
*lladdr
;
1685 lladdr
= (struct target_sockaddr_ll
*)addr
;
1686 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1687 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1688 } else if (sa_family
== AF_INET6
) {
1689 struct sockaddr_in6
*in6addr
;
1691 in6addr
= (struct sockaddr_in6
*)addr
;
1692 in6addr
->sin6_scope_id
= tswap32(in6addr
->sin6_scope_id
);
1694 unlock_user(target_saddr
, target_addr
, 0);
1699 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1700 struct sockaddr
*addr
,
1703 struct target_sockaddr
*target_saddr
;
1710 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1712 return -TARGET_EFAULT
;
1713 memcpy(target_saddr
, addr
, len
);
1714 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1715 sizeof(target_saddr
->sa_family
)) {
1716 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1718 if (addr
->sa_family
== AF_NETLINK
&&
1719 len
>= sizeof(struct target_sockaddr_nl
)) {
1720 struct target_sockaddr_nl
*target_nl
=
1721 (struct target_sockaddr_nl
*)target_saddr
;
1722 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1723 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1724 } else if (addr
->sa_family
== AF_PACKET
) {
1725 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1726 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1727 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1728 } else if (addr
->sa_family
== AF_INET6
&&
1729 len
>= sizeof(struct target_sockaddr_in6
)) {
1730 struct target_sockaddr_in6
*target_in6
=
1731 (struct target_sockaddr_in6
*)target_saddr
;
1732 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1734 unlock_user(target_saddr
, target_addr
, len
);
1739 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1740 struct target_msghdr
*target_msgh
)
1742 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1743 abi_long msg_controllen
;
1744 abi_ulong target_cmsg_addr
;
1745 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1746 socklen_t space
= 0;
1748 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1749 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1751 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1752 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1753 target_cmsg_start
= target_cmsg
;
1755 return -TARGET_EFAULT
;
1757 while (cmsg
&& target_cmsg
) {
1758 void *data
= CMSG_DATA(cmsg
);
1759 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1761 int len
= tswapal(target_cmsg
->cmsg_len
)
1762 - sizeof(struct target_cmsghdr
);
1764 space
+= CMSG_SPACE(len
);
1765 if (space
> msgh
->msg_controllen
) {
1766 space
-= CMSG_SPACE(len
);
1767 /* This is a QEMU bug, since we allocated the payload
1768 * area ourselves (unlike overflow in host-to-target
1769 * conversion, which is just the guest giving us a buffer
1770 * that's too small). It can't happen for the payload types
1771 * we currently support; if it becomes an issue in future
1772 * we would need to improve our allocation strategy to
1773 * something more intelligent than "twice the size of the
1774 * target buffer we're reading from".
1776 qemu_log_mask(LOG_UNIMP
,
1777 ("Unsupported ancillary data %d/%d: "
1778 "unhandled msg size\n"),
1779 tswap32(target_cmsg
->cmsg_level
),
1780 tswap32(target_cmsg
->cmsg_type
));
1784 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1785 cmsg
->cmsg_level
= SOL_SOCKET
;
1787 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1789 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1790 cmsg
->cmsg_len
= CMSG_LEN(len
);
1792 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1793 int *fd
= (int *)data
;
1794 int *target_fd
= (int *)target_data
;
1795 int i
, numfds
= len
/ sizeof(int);
1797 for (i
= 0; i
< numfds
; i
++) {
1798 __get_user(fd
[i
], target_fd
+ i
);
1800 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1801 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1802 struct ucred
*cred
= (struct ucred
*)data
;
1803 struct target_ucred
*target_cred
=
1804 (struct target_ucred
*)target_data
;
1806 __get_user(cred
->pid
, &target_cred
->pid
);
1807 __get_user(cred
->uid
, &target_cred
->uid
);
1808 __get_user(cred
->gid
, &target_cred
->gid
);
1809 } else if (cmsg
->cmsg_level
== SOL_ALG
) {
1810 uint32_t *dst
= (uint32_t *)data
;
1812 memcpy(dst
, target_data
, len
);
1813 /* fix endianess of first 32-bit word */
1814 if (len
>= sizeof(uint32_t)) {
1815 *dst
= tswap32(*dst
);
1818 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1819 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1820 memcpy(data
, target_data
, len
);
1823 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1824 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1827 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1829 msgh
->msg_controllen
= space
;
1833 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1834 struct msghdr
*msgh
)
1836 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1837 abi_long msg_controllen
;
1838 abi_ulong target_cmsg_addr
;
1839 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1840 socklen_t space
= 0;
1842 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1843 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1845 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1846 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1847 target_cmsg_start
= target_cmsg
;
1849 return -TARGET_EFAULT
;
1851 while (cmsg
&& target_cmsg
) {
1852 void *data
= CMSG_DATA(cmsg
);
1853 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1855 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1856 int tgt_len
, tgt_space
;
1858 /* We never copy a half-header but may copy half-data;
1859 * this is Linux's behaviour in put_cmsg(). Note that
1860 * truncation here is a guest problem (which we report
1861 * to the guest via the CTRUNC bit), unlike truncation
1862 * in target_to_host_cmsg, which is a QEMU bug.
1864 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1865 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1869 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1870 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1872 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1874 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1876 /* Payload types which need a different size of payload on
1877 * the target must adjust tgt_len here.
1880 switch (cmsg
->cmsg_level
) {
1882 switch (cmsg
->cmsg_type
) {
1884 tgt_len
= sizeof(struct target_timeval
);
1894 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1895 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1896 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1899 /* We must now copy-and-convert len bytes of payload
1900 * into tgt_len bytes of destination space. Bear in mind
1901 * that in both source and destination we may be dealing
1902 * with a truncated value!
1904 switch (cmsg
->cmsg_level
) {
1906 switch (cmsg
->cmsg_type
) {
1909 int *fd
= (int *)data
;
1910 int *target_fd
= (int *)target_data
;
1911 int i
, numfds
= tgt_len
/ sizeof(int);
1913 for (i
= 0; i
< numfds
; i
++) {
1914 __put_user(fd
[i
], target_fd
+ i
);
1920 struct timeval
*tv
= (struct timeval
*)data
;
1921 struct target_timeval
*target_tv
=
1922 (struct target_timeval
*)target_data
;
1924 if (len
!= sizeof(struct timeval
) ||
1925 tgt_len
!= sizeof(struct target_timeval
)) {
1929 /* copy struct timeval to target */
1930 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1931 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1934 case SCM_CREDENTIALS
:
1936 struct ucred
*cred
= (struct ucred
*)data
;
1937 struct target_ucred
*target_cred
=
1938 (struct target_ucred
*)target_data
;
1940 __put_user(cred
->pid
, &target_cred
->pid
);
1941 __put_user(cred
->uid
, &target_cred
->uid
);
1942 __put_user(cred
->gid
, &target_cred
->gid
);
1951 switch (cmsg
->cmsg_type
) {
1954 uint32_t *v
= (uint32_t *)data
;
1955 uint32_t *t_int
= (uint32_t *)target_data
;
1957 if (len
!= sizeof(uint32_t) ||
1958 tgt_len
!= sizeof(uint32_t)) {
1961 __put_user(*v
, t_int
);
1967 struct sock_extended_err ee
;
1968 struct sockaddr_in offender
;
1970 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1971 struct errhdr_t
*target_errh
=
1972 (struct errhdr_t
*)target_data
;
1974 if (len
!= sizeof(struct errhdr_t
) ||
1975 tgt_len
!= sizeof(struct errhdr_t
)) {
1978 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1979 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1980 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1981 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1982 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1983 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1984 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1985 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1986 (void *) &errh
->offender
, sizeof(errh
->offender
));
1995 switch (cmsg
->cmsg_type
) {
1998 uint32_t *v
= (uint32_t *)data
;
1999 uint32_t *t_int
= (uint32_t *)target_data
;
2001 if (len
!= sizeof(uint32_t) ||
2002 tgt_len
!= sizeof(uint32_t)) {
2005 __put_user(*v
, t_int
);
2011 struct sock_extended_err ee
;
2012 struct sockaddr_in6 offender
;
2014 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2015 struct errhdr6_t
*target_errh
=
2016 (struct errhdr6_t
*)target_data
;
2018 if (len
!= sizeof(struct errhdr6_t
) ||
2019 tgt_len
!= sizeof(struct errhdr6_t
)) {
2022 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2023 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2024 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2025 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2026 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2027 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2028 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2029 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2030 (void *) &errh
->offender
, sizeof(errh
->offender
));
2040 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2041 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2042 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2043 if (tgt_len
> len
) {
2044 memset(target_data
+ len
, 0, tgt_len
- len
);
2048 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2049 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2050 if (msg_controllen
< tgt_space
) {
2051 tgt_space
= msg_controllen
;
2053 msg_controllen
-= tgt_space
;
2055 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2056 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2059 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2061 target_msgh
->msg_controllen
= tswapal(space
);
2065 /* do_setsockopt() Must return target values and target errnos. */
2066 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2067 abi_ulong optval_addr
, socklen_t optlen
)
2071 struct ip_mreqn
*ip_mreq
;
2072 struct ip_mreq_source
*ip_mreq_source
;
2077 /* TCP and UDP options all take an 'int' value. */
2078 if (optlen
< sizeof(uint32_t))
2079 return -TARGET_EINVAL
;
2081 if (get_user_u32(val
, optval_addr
))
2082 return -TARGET_EFAULT
;
2083 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2090 case IP_ROUTER_ALERT
:
2094 case IP_MTU_DISCOVER
:
2101 case IP_MULTICAST_TTL
:
2102 case IP_MULTICAST_LOOP
:
2104 if (optlen
>= sizeof(uint32_t)) {
2105 if (get_user_u32(val
, optval_addr
))
2106 return -TARGET_EFAULT
;
2107 } else if (optlen
>= 1) {
2108 if (get_user_u8(val
, optval_addr
))
2109 return -TARGET_EFAULT
;
2111 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2113 case IP_ADD_MEMBERSHIP
:
2114 case IP_DROP_MEMBERSHIP
:
2115 if (optlen
< sizeof (struct target_ip_mreq
) ||
2116 optlen
> sizeof (struct target_ip_mreqn
))
2117 return -TARGET_EINVAL
;
2119 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2120 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2121 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2124 case IP_BLOCK_SOURCE
:
2125 case IP_UNBLOCK_SOURCE
:
2126 case IP_ADD_SOURCE_MEMBERSHIP
:
2127 case IP_DROP_SOURCE_MEMBERSHIP
:
2128 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2129 return -TARGET_EINVAL
;
2131 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2132 if (!ip_mreq_source
) {
2133 return -TARGET_EFAULT
;
2135 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2136 unlock_user (ip_mreq_source
, optval_addr
, 0);
2145 case IPV6_MTU_DISCOVER
:
2148 case IPV6_RECVPKTINFO
:
2149 case IPV6_UNICAST_HOPS
:
2150 case IPV6_MULTICAST_HOPS
:
2151 case IPV6_MULTICAST_LOOP
:
2153 case IPV6_RECVHOPLIMIT
:
2154 case IPV6_2292HOPLIMIT
:
2157 case IPV6_2292PKTINFO
:
2158 case IPV6_RECVTCLASS
:
2159 case IPV6_RECVRTHDR
:
2160 case IPV6_2292RTHDR
:
2161 case IPV6_RECVHOPOPTS
:
2162 case IPV6_2292HOPOPTS
:
2163 case IPV6_RECVDSTOPTS
:
2164 case IPV6_2292DSTOPTS
:
2166 case IPV6_ADDR_PREFERENCES
:
2167 #ifdef IPV6_RECVPATHMTU
2168 case IPV6_RECVPATHMTU
:
2170 #ifdef IPV6_TRANSPARENT
2171 case IPV6_TRANSPARENT
:
2173 #ifdef IPV6_FREEBIND
2176 #ifdef IPV6_RECVORIGDSTADDR
2177 case IPV6_RECVORIGDSTADDR
:
2180 if (optlen
< sizeof(uint32_t)) {
2181 return -TARGET_EINVAL
;
2183 if (get_user_u32(val
, optval_addr
)) {
2184 return -TARGET_EFAULT
;
2186 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2187 &val
, sizeof(val
)));
2191 struct in6_pktinfo pki
;
2193 if (optlen
< sizeof(pki
)) {
2194 return -TARGET_EINVAL
;
2197 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2198 return -TARGET_EFAULT
;
2201 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2203 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2204 &pki
, sizeof(pki
)));
2207 case IPV6_ADD_MEMBERSHIP
:
2208 case IPV6_DROP_MEMBERSHIP
:
2210 struct ipv6_mreq ipv6mreq
;
2212 if (optlen
< sizeof(ipv6mreq
)) {
2213 return -TARGET_EINVAL
;
2216 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2217 return -TARGET_EFAULT
;
2220 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2222 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2223 &ipv6mreq
, sizeof(ipv6mreq
)));
2234 struct icmp6_filter icmp6f
;
2236 if (optlen
> sizeof(icmp6f
)) {
2237 optlen
= sizeof(icmp6f
);
2240 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2241 return -TARGET_EFAULT
;
2244 for (val
= 0; val
< 8; val
++) {
2245 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2248 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2260 /* those take an u32 value */
2261 if (optlen
< sizeof(uint32_t)) {
2262 return -TARGET_EINVAL
;
2265 if (get_user_u32(val
, optval_addr
)) {
2266 return -TARGET_EFAULT
;
2268 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2269 &val
, sizeof(val
)));
2276 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2281 char *alg_key
= g_malloc(optlen
);
2284 return -TARGET_ENOMEM
;
2286 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2288 return -TARGET_EFAULT
;
2290 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2295 case ALG_SET_AEAD_AUTHSIZE
:
2297 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2306 case TARGET_SOL_SOCKET
:
2308 case TARGET_SO_RCVTIMEO
:
2312 optname
= SO_RCVTIMEO
;
2315 if (optlen
!= sizeof(struct target_timeval
)) {
2316 return -TARGET_EINVAL
;
2319 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2320 return -TARGET_EFAULT
;
2323 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2327 case TARGET_SO_SNDTIMEO
:
2328 optname
= SO_SNDTIMEO
;
2330 case TARGET_SO_ATTACH_FILTER
:
2332 struct target_sock_fprog
*tfprog
;
2333 struct target_sock_filter
*tfilter
;
2334 struct sock_fprog fprog
;
2335 struct sock_filter
*filter
;
2338 if (optlen
!= sizeof(*tfprog
)) {
2339 return -TARGET_EINVAL
;
2341 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2342 return -TARGET_EFAULT
;
2344 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2345 tswapal(tfprog
->filter
), 0)) {
2346 unlock_user_struct(tfprog
, optval_addr
, 1);
2347 return -TARGET_EFAULT
;
2350 fprog
.len
= tswap16(tfprog
->len
);
2351 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2352 if (filter
== NULL
) {
2353 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2354 unlock_user_struct(tfprog
, optval_addr
, 1);
2355 return -TARGET_ENOMEM
;
2357 for (i
= 0; i
< fprog
.len
; i
++) {
2358 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2359 filter
[i
].jt
= tfilter
[i
].jt
;
2360 filter
[i
].jf
= tfilter
[i
].jf
;
2361 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2363 fprog
.filter
= filter
;
2365 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2366 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2369 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2370 unlock_user_struct(tfprog
, optval_addr
, 1);
2373 case TARGET_SO_BINDTODEVICE
:
2375 char *dev_ifname
, *addr_ifname
;
2377 if (optlen
> IFNAMSIZ
- 1) {
2378 optlen
= IFNAMSIZ
- 1;
2380 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2382 return -TARGET_EFAULT
;
2384 optname
= SO_BINDTODEVICE
;
2385 addr_ifname
= alloca(IFNAMSIZ
);
2386 memcpy(addr_ifname
, dev_ifname
, optlen
);
2387 addr_ifname
[optlen
] = 0;
2388 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2389 addr_ifname
, optlen
));
2390 unlock_user (dev_ifname
, optval_addr
, 0);
2393 case TARGET_SO_LINGER
:
2396 struct target_linger
*tlg
;
2398 if (optlen
!= sizeof(struct target_linger
)) {
2399 return -TARGET_EINVAL
;
2401 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2402 return -TARGET_EFAULT
;
2404 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2405 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2406 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2408 unlock_user_struct(tlg
, optval_addr
, 0);
2411 /* Options with 'int' argument. */
2412 case TARGET_SO_DEBUG
:
2415 case TARGET_SO_REUSEADDR
:
2416 optname
= SO_REUSEADDR
;
2419 case TARGET_SO_REUSEPORT
:
2420 optname
= SO_REUSEPORT
;
2423 case TARGET_SO_TYPE
:
2426 case TARGET_SO_ERROR
:
2429 case TARGET_SO_DONTROUTE
:
2430 optname
= SO_DONTROUTE
;
2432 case TARGET_SO_BROADCAST
:
2433 optname
= SO_BROADCAST
;
2435 case TARGET_SO_SNDBUF
:
2436 optname
= SO_SNDBUF
;
2438 case TARGET_SO_SNDBUFFORCE
:
2439 optname
= SO_SNDBUFFORCE
;
2441 case TARGET_SO_RCVBUF
:
2442 optname
= SO_RCVBUF
;
2444 case TARGET_SO_RCVBUFFORCE
:
2445 optname
= SO_RCVBUFFORCE
;
2447 case TARGET_SO_KEEPALIVE
:
2448 optname
= SO_KEEPALIVE
;
2450 case TARGET_SO_OOBINLINE
:
2451 optname
= SO_OOBINLINE
;
2453 case TARGET_SO_NO_CHECK
:
2454 optname
= SO_NO_CHECK
;
2456 case TARGET_SO_PRIORITY
:
2457 optname
= SO_PRIORITY
;
2460 case TARGET_SO_BSDCOMPAT
:
2461 optname
= SO_BSDCOMPAT
;
2464 case TARGET_SO_PASSCRED
:
2465 optname
= SO_PASSCRED
;
2467 case TARGET_SO_PASSSEC
:
2468 optname
= SO_PASSSEC
;
2470 case TARGET_SO_TIMESTAMP
:
2471 optname
= SO_TIMESTAMP
;
2473 case TARGET_SO_RCVLOWAT
:
2474 optname
= SO_RCVLOWAT
;
2479 if (optlen
< sizeof(uint32_t))
2480 return -TARGET_EINVAL
;
2482 if (get_user_u32(val
, optval_addr
))
2483 return -TARGET_EFAULT
;
2484 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2489 case NETLINK_PKTINFO
:
2490 case NETLINK_ADD_MEMBERSHIP
:
2491 case NETLINK_DROP_MEMBERSHIP
:
2492 case NETLINK_BROADCAST_ERROR
:
2493 case NETLINK_NO_ENOBUFS
:
2494 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2495 case NETLINK_LISTEN_ALL_NSID
:
2496 case NETLINK_CAP_ACK
:
2497 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2498 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2499 case NETLINK_EXT_ACK
:
2500 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2501 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2502 case NETLINK_GET_STRICT_CHK
:
2503 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2509 if (optlen
< sizeof(uint32_t)) {
2510 return -TARGET_EINVAL
;
2512 if (get_user_u32(val
, optval_addr
)) {
2513 return -TARGET_EFAULT
;
2515 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2518 #endif /* SOL_NETLINK */
2521 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2523 ret
= -TARGET_ENOPROTOOPT
;
2528 /* do_getsockopt() Must return target values and target errnos. */
2529 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2530 abi_ulong optval_addr
, abi_ulong optlen
)
2537 case TARGET_SOL_SOCKET
:
2540 /* These don't just return a single integer */
2541 case TARGET_SO_PEERNAME
:
2543 case TARGET_SO_RCVTIMEO
: {
2547 optname
= SO_RCVTIMEO
;
2550 if (get_user_u32(len
, optlen
)) {
2551 return -TARGET_EFAULT
;
2554 return -TARGET_EINVAL
;
2558 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2563 if (len
> sizeof(struct target_timeval
)) {
2564 len
= sizeof(struct target_timeval
);
2566 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2567 return -TARGET_EFAULT
;
2569 if (put_user_u32(len
, optlen
)) {
2570 return -TARGET_EFAULT
;
2574 case TARGET_SO_SNDTIMEO
:
2575 optname
= SO_SNDTIMEO
;
2577 case TARGET_SO_PEERCRED
: {
2580 struct target_ucred
*tcr
;
2582 if (get_user_u32(len
, optlen
)) {
2583 return -TARGET_EFAULT
;
2586 return -TARGET_EINVAL
;
2590 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2598 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2599 return -TARGET_EFAULT
;
2601 __put_user(cr
.pid
, &tcr
->pid
);
2602 __put_user(cr
.uid
, &tcr
->uid
);
2603 __put_user(cr
.gid
, &tcr
->gid
);
2604 unlock_user_struct(tcr
, optval_addr
, 1);
2605 if (put_user_u32(len
, optlen
)) {
2606 return -TARGET_EFAULT
;
2610 case TARGET_SO_PEERSEC
: {
2613 if (get_user_u32(len
, optlen
)) {
2614 return -TARGET_EFAULT
;
2617 return -TARGET_EINVAL
;
2619 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2621 return -TARGET_EFAULT
;
2624 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2626 if (put_user_u32(lv
, optlen
)) {
2627 ret
= -TARGET_EFAULT
;
2629 unlock_user(name
, optval_addr
, lv
);
2632 case TARGET_SO_LINGER
:
2636 struct target_linger
*tlg
;
2638 if (get_user_u32(len
, optlen
)) {
2639 return -TARGET_EFAULT
;
2642 return -TARGET_EINVAL
;
2646 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2654 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2655 return -TARGET_EFAULT
;
2657 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2658 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2659 unlock_user_struct(tlg
, optval_addr
, 1);
2660 if (put_user_u32(len
, optlen
)) {
2661 return -TARGET_EFAULT
;
2665 /* Options with 'int' argument. */
2666 case TARGET_SO_DEBUG
:
2669 case TARGET_SO_REUSEADDR
:
2670 optname
= SO_REUSEADDR
;
2673 case TARGET_SO_REUSEPORT
:
2674 optname
= SO_REUSEPORT
;
2677 case TARGET_SO_TYPE
:
2680 case TARGET_SO_ERROR
:
2683 case TARGET_SO_DONTROUTE
:
2684 optname
= SO_DONTROUTE
;
2686 case TARGET_SO_BROADCAST
:
2687 optname
= SO_BROADCAST
;
2689 case TARGET_SO_SNDBUF
:
2690 optname
= SO_SNDBUF
;
2692 case TARGET_SO_RCVBUF
:
2693 optname
= SO_RCVBUF
;
2695 case TARGET_SO_KEEPALIVE
:
2696 optname
= SO_KEEPALIVE
;
2698 case TARGET_SO_OOBINLINE
:
2699 optname
= SO_OOBINLINE
;
2701 case TARGET_SO_NO_CHECK
:
2702 optname
= SO_NO_CHECK
;
2704 case TARGET_SO_PRIORITY
:
2705 optname
= SO_PRIORITY
;
2708 case TARGET_SO_BSDCOMPAT
:
2709 optname
= SO_BSDCOMPAT
;
2712 case TARGET_SO_PASSCRED
:
2713 optname
= SO_PASSCRED
;
2715 case TARGET_SO_TIMESTAMP
:
2716 optname
= SO_TIMESTAMP
;
2718 case TARGET_SO_RCVLOWAT
:
2719 optname
= SO_RCVLOWAT
;
2721 case TARGET_SO_ACCEPTCONN
:
2722 optname
= SO_ACCEPTCONN
;
2724 case TARGET_SO_PROTOCOL
:
2725 optname
= SO_PROTOCOL
;
2727 case TARGET_SO_DOMAIN
:
2728 optname
= SO_DOMAIN
;
2736 /* TCP and UDP options all take an 'int' value. */
2738 if (get_user_u32(len
, optlen
))
2739 return -TARGET_EFAULT
;
2741 return -TARGET_EINVAL
;
2743 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2748 val
= host_to_target_sock_type(val
);
2751 val
= host_to_target_errno(val
);
2757 if (put_user_u32(val
, optval_addr
))
2758 return -TARGET_EFAULT
;
2760 if (put_user_u8(val
, optval_addr
))
2761 return -TARGET_EFAULT
;
2763 if (put_user_u32(len
, optlen
))
2764 return -TARGET_EFAULT
;
2771 case IP_ROUTER_ALERT
:
2775 case IP_MTU_DISCOVER
:
2781 case IP_MULTICAST_TTL
:
2782 case IP_MULTICAST_LOOP
:
2783 if (get_user_u32(len
, optlen
))
2784 return -TARGET_EFAULT
;
2786 return -TARGET_EINVAL
;
2788 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2791 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2793 if (put_user_u32(len
, optlen
)
2794 || put_user_u8(val
, optval_addr
))
2795 return -TARGET_EFAULT
;
2797 if (len
> sizeof(int))
2799 if (put_user_u32(len
, optlen
)
2800 || put_user_u32(val
, optval_addr
))
2801 return -TARGET_EFAULT
;
2805 ret
= -TARGET_ENOPROTOOPT
;
2811 case IPV6_MTU_DISCOVER
:
2814 case IPV6_RECVPKTINFO
:
2815 case IPV6_UNICAST_HOPS
:
2816 case IPV6_MULTICAST_HOPS
:
2817 case IPV6_MULTICAST_LOOP
:
2819 case IPV6_RECVHOPLIMIT
:
2820 case IPV6_2292HOPLIMIT
:
2823 case IPV6_2292PKTINFO
:
2824 case IPV6_RECVTCLASS
:
2825 case IPV6_RECVRTHDR
:
2826 case IPV6_2292RTHDR
:
2827 case IPV6_RECVHOPOPTS
:
2828 case IPV6_2292HOPOPTS
:
2829 case IPV6_RECVDSTOPTS
:
2830 case IPV6_2292DSTOPTS
:
2832 case IPV6_ADDR_PREFERENCES
:
2833 #ifdef IPV6_RECVPATHMTU
2834 case IPV6_RECVPATHMTU
:
2836 #ifdef IPV6_TRANSPARENT
2837 case IPV6_TRANSPARENT
:
2839 #ifdef IPV6_FREEBIND
2842 #ifdef IPV6_RECVORIGDSTADDR
2843 case IPV6_RECVORIGDSTADDR
:
2845 if (get_user_u32(len
, optlen
))
2846 return -TARGET_EFAULT
;
2848 return -TARGET_EINVAL
;
2850 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2853 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2855 if (put_user_u32(len
, optlen
)
2856 || put_user_u8(val
, optval_addr
))
2857 return -TARGET_EFAULT
;
2859 if (len
> sizeof(int))
2861 if (put_user_u32(len
, optlen
)
2862 || put_user_u32(val
, optval_addr
))
2863 return -TARGET_EFAULT
;
2867 ret
= -TARGET_ENOPROTOOPT
;
2874 case NETLINK_PKTINFO
:
2875 case NETLINK_BROADCAST_ERROR
:
2876 case NETLINK_NO_ENOBUFS
:
2877 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2878 case NETLINK_LISTEN_ALL_NSID
:
2879 case NETLINK_CAP_ACK
:
2880 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2881 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2882 case NETLINK_EXT_ACK
:
2883 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2884 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2885 case NETLINK_GET_STRICT_CHK
:
2886 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2887 if (get_user_u32(len
, optlen
)) {
2888 return -TARGET_EFAULT
;
2890 if (len
!= sizeof(val
)) {
2891 return -TARGET_EINVAL
;
2894 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2898 if (put_user_u32(lv
, optlen
)
2899 || put_user_u32(val
, optval_addr
)) {
2900 return -TARGET_EFAULT
;
2903 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2904 case NETLINK_LIST_MEMBERSHIPS
:
2908 if (get_user_u32(len
, optlen
)) {
2909 return -TARGET_EFAULT
;
2912 return -TARGET_EINVAL
;
2914 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2915 if (!results
&& len
> 0) {
2916 return -TARGET_EFAULT
;
2919 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2921 unlock_user(results
, optval_addr
, 0);
2924 /* swap host endianess to target endianess. */
2925 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2926 results
[i
] = tswap32(results
[i
]);
2928 if (put_user_u32(lv
, optlen
)) {
2929 return -TARGET_EFAULT
;
2931 unlock_user(results
, optval_addr
, 0);
2934 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2939 #endif /* SOL_NETLINK */
2942 qemu_log_mask(LOG_UNIMP
,
2943 "getsockopt level=%d optname=%d not yet supported\n",
2945 ret
= -TARGET_EOPNOTSUPP
;
2951 /* Convert target low/high pair representing file offset into the host
2952 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2953 * as the kernel doesn't handle them either.
2955 static void target_to_host_low_high(abi_ulong tlow
,
2957 unsigned long *hlow
,
2958 unsigned long *hhigh
)
2960 uint64_t off
= tlow
|
2961 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2962 TARGET_LONG_BITS
/ 2;
2965 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2968 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2969 abi_ulong count
, int copy
)
2971 struct target_iovec
*target_vec
;
2973 abi_ulong total_len
, max_len
;
2976 bool bad_address
= false;
2982 if (count
> IOV_MAX
) {
2987 vec
= g_try_new0(struct iovec
, count
);
2993 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2994 count
* sizeof(struct target_iovec
), 1);
2995 if (target_vec
== NULL
) {
3000 /* ??? If host page size > target page size, this will result in a
3001 value larger than what we can actually support. */
3002 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3005 for (i
= 0; i
< count
; i
++) {
3006 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3007 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3012 } else if (len
== 0) {
3013 /* Zero length pointer is ignored. */
3014 vec
[i
].iov_base
= 0;
3016 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3017 /* If the first buffer pointer is bad, this is a fault. But
3018 * subsequent bad buffers will result in a partial write; this
3019 * is realized by filling the vector with null pointers and
3021 if (!vec
[i
].iov_base
) {
3032 if (len
> max_len
- total_len
) {
3033 len
= max_len
- total_len
;
3036 vec
[i
].iov_len
= len
;
3040 unlock_user(target_vec
, target_addr
, 0);
3045 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3046 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3049 unlock_user(target_vec
, target_addr
, 0);
3056 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3057 abi_ulong count
, int copy
)
3059 struct target_iovec
*target_vec
;
3062 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3063 count
* sizeof(struct target_iovec
), 1);
3065 for (i
= 0; i
< count
; i
++) {
3066 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3067 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3071 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3073 unlock_user(target_vec
, target_addr
, 0);
3079 static inline int target_to_host_sock_type(int *type
)
3082 int target_type
= *type
;
3084 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3085 case TARGET_SOCK_DGRAM
:
3086 host_type
= SOCK_DGRAM
;
3088 case TARGET_SOCK_STREAM
:
3089 host_type
= SOCK_STREAM
;
3092 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3095 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3096 #if defined(SOCK_CLOEXEC)
3097 host_type
|= SOCK_CLOEXEC
;
3099 return -TARGET_EINVAL
;
3102 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3103 #if defined(SOCK_NONBLOCK)
3104 host_type
|= SOCK_NONBLOCK
;
3105 #elif !defined(O_NONBLOCK)
3106 return -TARGET_EINVAL
;
3113 /* Try to emulate socket type flags after socket creation. */
3114 static int sock_flags_fixup(int fd
, int target_type
)
3116 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3117 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3118 int flags
= fcntl(fd
, F_GETFL
);
3119 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3121 return -TARGET_EINVAL
;
3128 /* do_socket() Must return target values and target errnos. */
3129 static abi_long
do_socket(int domain
, int type
, int protocol
)
3131 int target_type
= type
;
3134 ret
= target_to_host_sock_type(&type
);
3139 if (domain
== PF_NETLINK
&& !(
3140 #ifdef CONFIG_RTNETLINK
3141 protocol
== NETLINK_ROUTE
||
3143 protocol
== NETLINK_KOBJECT_UEVENT
||
3144 protocol
== NETLINK_AUDIT
)) {
3145 return -TARGET_EPROTONOSUPPORT
;
3148 if (domain
== AF_PACKET
||
3149 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3150 protocol
= tswap16(protocol
);
3153 ret
= get_errno(socket(domain
, type
, protocol
));
3155 ret
= sock_flags_fixup(ret
, target_type
);
3156 if (type
== SOCK_PACKET
) {
3157 /* Manage an obsolete case :
3158 * if socket type is SOCK_PACKET, bind by name
3160 fd_trans_register(ret
, &target_packet_trans
);
3161 } else if (domain
== PF_NETLINK
) {
3163 #ifdef CONFIG_RTNETLINK
3165 fd_trans_register(ret
, &target_netlink_route_trans
);
3168 case NETLINK_KOBJECT_UEVENT
:
3169 /* nothing to do: messages are strings */
3172 fd_trans_register(ret
, &target_netlink_audit_trans
);
3175 g_assert_not_reached();
3182 /* do_bind() Must return target values and target errnos. */
3183 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3189 if ((int)addrlen
< 0) {
3190 return -TARGET_EINVAL
;
3193 addr
= alloca(addrlen
+1);
3195 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3199 return get_errno(bind(sockfd
, addr
, addrlen
));
3202 /* do_connect() Must return target values and target errnos. */
3203 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3209 if ((int)addrlen
< 0) {
3210 return -TARGET_EINVAL
;
3213 addr
= alloca(addrlen
+1);
3215 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3219 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3222 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3223 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3224 int flags
, int send
)
3230 abi_ulong target_vec
;
3232 if (msgp
->msg_name
) {
3233 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3234 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3235 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3236 tswapal(msgp
->msg_name
),
3238 if (ret
== -TARGET_EFAULT
) {
3239 /* For connected sockets msg_name and msg_namelen must
3240 * be ignored, so returning EFAULT immediately is wrong.
3241 * Instead, pass a bad msg_name to the host kernel, and
3242 * let it decide whether to return EFAULT or not.
3244 msg
.msg_name
= (void *)-1;
3249 msg
.msg_name
= NULL
;
3250 msg
.msg_namelen
= 0;
3252 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3253 msg
.msg_control
= alloca(msg
.msg_controllen
);
3254 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3256 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3258 count
= tswapal(msgp
->msg_iovlen
);
3259 target_vec
= tswapal(msgp
->msg_iov
);
3261 if (count
> IOV_MAX
) {
3262 /* sendrcvmsg returns a different errno for this condition than
3263 * readv/writev, so we must catch it here before lock_iovec() does.
3265 ret
= -TARGET_EMSGSIZE
;
3269 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3270 target_vec
, count
, send
);
3272 ret
= -host_to_target_errno(errno
);
3273 /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3278 msg
.msg_iovlen
= count
;
3282 if (fd_trans_target_to_host_data(fd
)) {
3285 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3286 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3287 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3288 msg
.msg_iov
->iov_len
);
3290 msg
.msg_iov
->iov_base
= host_msg
;
3291 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3295 ret
= target_to_host_cmsg(&msg
, msgp
);
3297 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3301 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3302 if (!is_error(ret
)) {
3304 if (fd_trans_host_to_target_data(fd
)) {
3305 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3306 MIN(msg
.msg_iov
->iov_len
, len
));
3308 if (!is_error(ret
)) {
3309 ret
= host_to_target_cmsg(msgp
, &msg
);
3311 if (!is_error(ret
)) {
3312 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3313 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3314 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3315 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3316 msg
.msg_name
, msg
.msg_namelen
);
3329 unlock_iovec(vec
, target_vec
, count
, !send
);
3335 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3336 int flags
, int send
)
3339 struct target_msghdr
*msgp
;
3341 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3345 return -TARGET_EFAULT
;
3347 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3348 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3352 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3353 * so it might not have this *mmsg-specific flag either.
3355 #ifndef MSG_WAITFORONE
3356 #define MSG_WAITFORONE 0x10000
3359 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3360 unsigned int vlen
, unsigned int flags
,
3363 struct target_mmsghdr
*mmsgp
;
3367 if (vlen
> UIO_MAXIOV
) {
3371 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3373 return -TARGET_EFAULT
;
3376 for (i
= 0; i
< vlen
; i
++) {
3377 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3378 if (is_error(ret
)) {
3381 mmsgp
[i
].msg_len
= tswap32(ret
);
3382 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3383 if (flags
& MSG_WAITFORONE
) {
3384 flags
|= MSG_DONTWAIT
;
3388 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3390 /* Return number of datagrams sent if we sent any at all;
3391 * otherwise return the error.
3399 /* do_accept4() Must return target values and target errnos. */
3400 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3401 abi_ulong target_addrlen_addr
, int flags
)
3403 socklen_t addrlen
, ret_addrlen
;
3408 if (flags
& ~(TARGET_SOCK_CLOEXEC
| TARGET_SOCK_NONBLOCK
)) {
3409 return -TARGET_EINVAL
;
3413 if (flags
& TARGET_SOCK_NONBLOCK
) {
3414 host_flags
|= SOCK_NONBLOCK
;
3416 if (flags
& TARGET_SOCK_CLOEXEC
) {
3417 host_flags
|= SOCK_CLOEXEC
;
3420 if (target_addr
== 0) {
3421 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3424 /* linux returns EFAULT if addrlen pointer is invalid */
3425 if (get_user_u32(addrlen
, target_addrlen_addr
))
3426 return -TARGET_EFAULT
;
3428 if ((int)addrlen
< 0) {
3429 return -TARGET_EINVAL
;
3432 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3433 return -TARGET_EFAULT
;
3436 addr
= alloca(addrlen
);
3438 ret_addrlen
= addrlen
;
3439 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3440 if (!is_error(ret
)) {
3441 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3442 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3443 ret
= -TARGET_EFAULT
;
3449 /* do_getpeername() Must return target values and target errnos. */
3450 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3451 abi_ulong target_addrlen_addr
)
3453 socklen_t addrlen
, ret_addrlen
;
3457 if (get_user_u32(addrlen
, target_addrlen_addr
))
3458 return -TARGET_EFAULT
;
3460 if ((int)addrlen
< 0) {
3461 return -TARGET_EINVAL
;
3464 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3465 return -TARGET_EFAULT
;
3468 addr
= alloca(addrlen
);
3470 ret_addrlen
= addrlen
;
3471 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3472 if (!is_error(ret
)) {
3473 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3474 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3475 ret
= -TARGET_EFAULT
;
3481 /* do_getsockname() Must return target values and target errnos. */
3482 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3483 abi_ulong target_addrlen_addr
)
3485 socklen_t addrlen
, ret_addrlen
;
3489 if (get_user_u32(addrlen
, target_addrlen_addr
))
3490 return -TARGET_EFAULT
;
3492 if ((int)addrlen
< 0) {
3493 return -TARGET_EINVAL
;
3496 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3497 return -TARGET_EFAULT
;
3500 addr
= alloca(addrlen
);
3502 ret_addrlen
= addrlen
;
3503 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3504 if (!is_error(ret
)) {
3505 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3506 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3507 ret
= -TARGET_EFAULT
;
3513 /* do_socketpair() Must return target values and target errnos. */
3514 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3515 abi_ulong target_tab_addr
)
3520 target_to_host_sock_type(&type
);
3522 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3523 if (!is_error(ret
)) {
3524 if (put_user_s32(tab
[0], target_tab_addr
)
3525 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3526 ret
= -TARGET_EFAULT
;
3531 /* do_sendto() Must return target values and target errnos. */
3532 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3533 abi_ulong target_addr
, socklen_t addrlen
)
3537 void *copy_msg
= NULL
;
3540 if ((int)addrlen
< 0) {
3541 return -TARGET_EINVAL
;
3544 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3546 return -TARGET_EFAULT
;
3547 if (fd_trans_target_to_host_data(fd
)) {
3548 copy_msg
= host_msg
;
3549 host_msg
= g_malloc(len
);
3550 memcpy(host_msg
, copy_msg
, len
);
3551 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3557 addr
= alloca(addrlen
+1);
3558 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3562 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3564 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3569 host_msg
= copy_msg
;
3571 unlock_user(host_msg
, msg
, 0);
3575 /* do_recvfrom() Must return target values and target errnos. */
3576 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3577 abi_ulong target_addr
,
3578 abi_ulong target_addrlen
)
3580 socklen_t addrlen
, ret_addrlen
;
3588 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3590 return -TARGET_EFAULT
;
3594 if (get_user_u32(addrlen
, target_addrlen
)) {
3595 ret
= -TARGET_EFAULT
;
3598 if ((int)addrlen
< 0) {
3599 ret
= -TARGET_EINVAL
;
3602 addr
= alloca(addrlen
);
3603 ret_addrlen
= addrlen
;
3604 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3605 addr
, &ret_addrlen
));
3607 addr
= NULL
; /* To keep compiler quiet. */
3608 addrlen
= 0; /* To keep compiler quiet. */
3609 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3611 if (!is_error(ret
)) {
3612 if (fd_trans_host_to_target_data(fd
)) {
3614 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3615 if (is_error(trans
)) {
3621 host_to_target_sockaddr(target_addr
, addr
,
3622 MIN(addrlen
, ret_addrlen
));
3623 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3624 ret
= -TARGET_EFAULT
;
3628 unlock_user(host_msg
, msg
, len
);
3631 unlock_user(host_msg
, msg
, 0);
3636 #ifdef TARGET_NR_socketcall
3637 /* do_socketcall() must return target values and target errnos. */
3638 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3640 static const unsigned nargs
[] = { /* number of arguments per operation */
3641 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3642 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3643 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3644 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3645 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3646 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3647 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3648 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3649 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3650 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3651 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3652 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3653 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3654 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3655 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3656 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3657 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3658 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3659 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3660 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3662 abi_long a
[6]; /* max 6 args */
3665 /* check the range of the first argument num */
3666 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3667 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3668 return -TARGET_EINVAL
;
3670 /* ensure we have space for args */
3671 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3672 return -TARGET_EINVAL
;
3674 /* collect the arguments in a[] according to nargs[] */
3675 for (i
= 0; i
< nargs
[num
]; ++i
) {
3676 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3677 return -TARGET_EFAULT
;
3680 /* now when we have the args, invoke the appropriate underlying function */
3682 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3683 return do_socket(a
[0], a
[1], a
[2]);
3684 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3685 return do_bind(a
[0], a
[1], a
[2]);
3686 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3687 return do_connect(a
[0], a
[1], a
[2]);
3688 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3689 return get_errno(listen(a
[0], a
[1]));
3690 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3691 return do_accept4(a
[0], a
[1], a
[2], 0);
3692 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3693 return do_getsockname(a
[0], a
[1], a
[2]);
3694 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3695 return do_getpeername(a
[0], a
[1], a
[2]);
3696 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3697 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3698 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3699 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3700 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3701 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3702 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3703 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3704 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3705 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3706 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3707 return get_errno(shutdown(a
[0], a
[1]));
3708 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3709 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3710 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3711 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3712 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3713 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3714 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3715 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3716 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3717 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3718 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3719 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3720 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3721 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3723 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3724 return -TARGET_EINVAL
;
3729 #define N_SHM_REGIONS 32
3731 static struct shm_region
{
3735 } shm_regions
[N_SHM_REGIONS
];
3737 #ifndef TARGET_SEMID64_DS
3738 /* asm-generic version of this struct */
3739 struct target_semid64_ds
3741 struct target_ipc_perm sem_perm
;
3742 abi_ulong sem_otime
;
3743 #if TARGET_ABI_BITS == 32
3744 abi_ulong __unused1
;
3746 abi_ulong sem_ctime
;
3747 #if TARGET_ABI_BITS == 32
3748 abi_ulong __unused2
;
3750 abi_ulong sem_nsems
;
3751 abi_ulong __unused3
;
3752 abi_ulong __unused4
;
3756 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3757 abi_ulong target_addr
)
3759 struct target_ipc_perm
*target_ip
;
3760 struct target_semid64_ds
*target_sd
;
3762 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3763 return -TARGET_EFAULT
;
3764 target_ip
= &(target_sd
->sem_perm
);
3765 host_ip
->__key
= tswap32(target_ip
->__key
);
3766 host_ip
->uid
= tswap32(target_ip
->uid
);
3767 host_ip
->gid
= tswap32(target_ip
->gid
);
3768 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3769 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3770 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3771 host_ip
->mode
= tswap32(target_ip
->mode
);
3773 host_ip
->mode
= tswap16(target_ip
->mode
);
3775 #if defined(TARGET_PPC)
3776 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3778 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3780 unlock_user_struct(target_sd
, target_addr
, 0);
3784 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3785 struct ipc_perm
*host_ip
)
3787 struct target_ipc_perm
*target_ip
;
3788 struct target_semid64_ds
*target_sd
;
3790 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3791 return -TARGET_EFAULT
;
3792 target_ip
= &(target_sd
->sem_perm
);
3793 target_ip
->__key
= tswap32(host_ip
->__key
);
3794 target_ip
->uid
= tswap32(host_ip
->uid
);
3795 target_ip
->gid
= tswap32(host_ip
->gid
);
3796 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3797 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3798 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3799 target_ip
->mode
= tswap32(host_ip
->mode
);
3801 target_ip
->mode
= tswap16(host_ip
->mode
);
3803 #if defined(TARGET_PPC)
3804 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3806 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3808 unlock_user_struct(target_sd
, target_addr
, 1);
3812 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3813 abi_ulong target_addr
)
3815 struct target_semid64_ds
*target_sd
;
3817 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3818 return -TARGET_EFAULT
;
3819 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3820 return -TARGET_EFAULT
;
3821 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3822 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3823 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3824 unlock_user_struct(target_sd
, target_addr
, 0);
3828 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3829 struct semid_ds
*host_sd
)
3831 struct target_semid64_ds
*target_sd
;
3833 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3834 return -TARGET_EFAULT
;
3835 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3836 return -TARGET_EFAULT
;
3837 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3838 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3839 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3840 unlock_user_struct(target_sd
, target_addr
, 1);
3844 struct target_seminfo
{
3857 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3858 struct seminfo
*host_seminfo
)
3860 struct target_seminfo
*target_seminfo
;
3861 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3862 return -TARGET_EFAULT
;
3863 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3864 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3865 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3866 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3867 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3868 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3869 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3870 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3871 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3872 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3873 unlock_user_struct(target_seminfo
, target_addr
, 1);
3879 struct semid_ds
*buf
;
3880 unsigned short *array
;
3881 struct seminfo
*__buf
;
3884 union target_semun
{
3891 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3892 abi_ulong target_addr
)
3895 unsigned short *array
;
3897 struct semid_ds semid_ds
;
3900 semun
.buf
= &semid_ds
;
3902 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3904 return get_errno(ret
);
3906 nsems
= semid_ds
.sem_nsems
;
3908 *host_array
= g_try_new(unsigned short, nsems
);
3910 return -TARGET_ENOMEM
;
3912 array
= lock_user(VERIFY_READ
, target_addr
,
3913 nsems
*sizeof(unsigned short), 1);
3915 g_free(*host_array
);
3916 return -TARGET_EFAULT
;
3919 for(i
=0; i
<nsems
; i
++) {
3920 __get_user((*host_array
)[i
], &array
[i
]);
3922 unlock_user(array
, target_addr
, 0);
3927 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3928 unsigned short **host_array
)
3931 unsigned short *array
;
3933 struct semid_ds semid_ds
;
3936 semun
.buf
= &semid_ds
;
3938 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3940 return get_errno(ret
);
3942 nsems
= semid_ds
.sem_nsems
;
3944 array
= lock_user(VERIFY_WRITE
, target_addr
,
3945 nsems
*sizeof(unsigned short), 0);
3947 return -TARGET_EFAULT
;
3949 for(i
=0; i
<nsems
; i
++) {
3950 __put_user((*host_array
)[i
], &array
[i
]);
3952 g_free(*host_array
);
3953 unlock_user(array
, target_addr
, 1);
3958 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3959 abi_ulong target_arg
)
3961 union target_semun target_su
= { .buf
= target_arg
};
3963 struct semid_ds dsarg
;
3964 unsigned short *array
= NULL
;
3965 struct seminfo seminfo
;
3966 abi_long ret
= -TARGET_EINVAL
;
3973 /* In 64 bit cross-endian situations, we will erroneously pick up
3974 * the wrong half of the union for the "val" element. To rectify
3975 * this, the entire 8-byte structure is byteswapped, followed by
3976 * a swap of the 4 byte val field. In other cases, the data is
3977 * already in proper host byte order. */
3978 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3979 target_su
.buf
= tswapal(target_su
.buf
);
3980 arg
.val
= tswap32(target_su
.val
);
3982 arg
.val
= target_su
.val
;
3984 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3988 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3992 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3993 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4000 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4004 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4005 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4011 arg
.__buf
= &seminfo
;
4012 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4013 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4021 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4028 struct target_sembuf
{
4029 unsigned short sem_num
;
4034 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4035 abi_ulong target_addr
,
4038 struct target_sembuf
*target_sembuf
;
4041 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4042 nsops
*sizeof(struct target_sembuf
), 1);
4044 return -TARGET_EFAULT
;
4046 for(i
=0; i
<nsops
; i
++) {
4047 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4048 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4049 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4052 unlock_user(target_sembuf
, target_addr
, 0);
4057 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4058 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4061 * This macro is required to handle the s390 variants, which passes the
4062 * arguments in a different order than default.
4065 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4066 (__nsops), (__timeout), (__sops)
4068 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4069 (__nsops), 0, (__sops), (__timeout)
4072 static inline abi_long
do_semtimedop(int semid
,
4075 abi_long timeout
, bool time64
)
4077 struct sembuf
*sops
;
4078 struct timespec ts
, *pts
= NULL
;
4084 if (target_to_host_timespec64(pts
, timeout
)) {
4085 return -TARGET_EFAULT
;
4088 if (target_to_host_timespec(pts
, timeout
)) {
4089 return -TARGET_EFAULT
;
4094 if (nsops
> TARGET_SEMOPM
) {
4095 return -TARGET_E2BIG
;
4098 sops
= g_new(struct sembuf
, nsops
);
4100 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4102 return -TARGET_EFAULT
;
4105 ret
= -TARGET_ENOSYS
;
4106 #ifdef __NR_semtimedop
4107 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4110 if (ret
== -TARGET_ENOSYS
) {
4111 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4112 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4120 struct target_msqid_ds
4122 struct target_ipc_perm msg_perm
;
4123 abi_ulong msg_stime
;
4124 #if TARGET_ABI_BITS == 32
4125 abi_ulong __unused1
;
4127 abi_ulong msg_rtime
;
4128 #if TARGET_ABI_BITS == 32
4129 abi_ulong __unused2
;
4131 abi_ulong msg_ctime
;
4132 #if TARGET_ABI_BITS == 32
4133 abi_ulong __unused3
;
4135 abi_ulong __msg_cbytes
;
4137 abi_ulong msg_qbytes
;
4138 abi_ulong msg_lspid
;
4139 abi_ulong msg_lrpid
;
4140 abi_ulong __unused4
;
4141 abi_ulong __unused5
;
4144 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4145 abi_ulong target_addr
)
4147 struct target_msqid_ds
*target_md
;
4149 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4150 return -TARGET_EFAULT
;
4151 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4152 return -TARGET_EFAULT
;
4153 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4154 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4155 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4156 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4157 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4158 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4159 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4160 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4161 unlock_user_struct(target_md
, target_addr
, 0);
4165 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4166 struct msqid_ds
*host_md
)
4168 struct target_msqid_ds
*target_md
;
4170 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4171 return -TARGET_EFAULT
;
4172 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4173 return -TARGET_EFAULT
;
4174 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4175 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4176 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4177 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4178 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4179 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4180 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4181 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4182 unlock_user_struct(target_md
, target_addr
, 1);
4186 struct target_msginfo
{
4194 unsigned short int msgseg
;
4197 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4198 struct msginfo
*host_msginfo
)
4200 struct target_msginfo
*target_msginfo
;
4201 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4202 return -TARGET_EFAULT
;
4203 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4204 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4205 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4206 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4207 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4208 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4209 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4210 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4211 unlock_user_struct(target_msginfo
, target_addr
, 1);
4215 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4217 struct msqid_ds dsarg
;
4218 struct msginfo msginfo
;
4219 abi_long ret
= -TARGET_EINVAL
;
4227 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4228 return -TARGET_EFAULT
;
4229 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4230 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4231 return -TARGET_EFAULT
;
4234 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4238 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4239 if (host_to_target_msginfo(ptr
, &msginfo
))
4240 return -TARGET_EFAULT
;
4247 struct target_msgbuf
{
4252 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4253 ssize_t msgsz
, int msgflg
)
4255 struct target_msgbuf
*target_mb
;
4256 struct msgbuf
*host_mb
;
4260 return -TARGET_EINVAL
;
4263 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4264 return -TARGET_EFAULT
;
4265 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4267 unlock_user_struct(target_mb
, msgp
, 0);
4268 return -TARGET_ENOMEM
;
4270 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4271 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4272 ret
= -TARGET_ENOSYS
;
4274 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4277 if (ret
== -TARGET_ENOSYS
) {
4279 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4282 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4288 unlock_user_struct(target_mb
, msgp
, 0);
4294 #if defined(__sparc__)
4295 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4296 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4297 #elif defined(__s390x__)
4298 /* The s390 sys_ipc variant has only five parameters. */
4299 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4300 ((long int[]){(long int)__msgp, __msgtyp})
4302 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4303 ((long int[]){(long int)__msgp, __msgtyp}), 0
4307 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4308 ssize_t msgsz
, abi_long msgtyp
,
4311 struct target_msgbuf
*target_mb
;
4313 struct msgbuf
*host_mb
;
4317 return -TARGET_EINVAL
;
4320 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4321 return -TARGET_EFAULT
;
4323 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4325 ret
= -TARGET_ENOMEM
;
4328 ret
= -TARGET_ENOSYS
;
4330 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4333 if (ret
== -TARGET_ENOSYS
) {
4334 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4335 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4340 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4341 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4342 if (!target_mtext
) {
4343 ret
= -TARGET_EFAULT
;
4346 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4347 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4350 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4354 unlock_user_struct(target_mb
, msgp
, 1);
4359 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4360 abi_ulong target_addr
)
4362 struct target_shmid_ds
*target_sd
;
4364 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4365 return -TARGET_EFAULT
;
4366 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4367 return -TARGET_EFAULT
;
4368 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4369 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4370 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4371 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4372 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4373 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4374 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4375 unlock_user_struct(target_sd
, target_addr
, 0);
4379 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4380 struct shmid_ds
*host_sd
)
4382 struct target_shmid_ds
*target_sd
;
4384 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4385 return -TARGET_EFAULT
;
4386 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4387 return -TARGET_EFAULT
;
4388 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4389 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4390 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4391 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4392 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4393 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4394 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4395 unlock_user_struct(target_sd
, target_addr
, 1);
4399 struct target_shminfo
{
4407 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4408 struct shminfo
*host_shminfo
)
4410 struct target_shminfo
*target_shminfo
;
4411 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4412 return -TARGET_EFAULT
;
4413 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4414 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4415 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4416 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4417 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4418 unlock_user_struct(target_shminfo
, target_addr
, 1);
4422 struct target_shm_info
{
4427 abi_ulong swap_attempts
;
4428 abi_ulong swap_successes
;
4431 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4432 struct shm_info
*host_shm_info
)
4434 struct target_shm_info
*target_shm_info
;
4435 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4436 return -TARGET_EFAULT
;
4437 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4438 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4439 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4440 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4441 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4442 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4443 unlock_user_struct(target_shm_info
, target_addr
, 1);
4447 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4449 struct shmid_ds dsarg
;
4450 struct shminfo shminfo
;
4451 struct shm_info shm_info
;
4452 abi_long ret
= -TARGET_EINVAL
;
4460 if (target_to_host_shmid_ds(&dsarg
, buf
))
4461 return -TARGET_EFAULT
;
4462 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4463 if (host_to_target_shmid_ds(buf
, &dsarg
))
4464 return -TARGET_EFAULT
;
4467 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4468 if (host_to_target_shminfo(buf
, &shminfo
))
4469 return -TARGET_EFAULT
;
4472 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4473 if (host_to_target_shm_info(buf
, &shm_info
))
4474 return -TARGET_EFAULT
;
4479 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4486 #ifndef TARGET_FORCE_SHMLBA
4487 /* For most architectures, SHMLBA is the same as the page size;
4488 * some architectures have larger values, in which case they should
4489 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4490 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4491 * and defining its own value for SHMLBA.
4493 * The kernel also permits SHMLBA to be set by the architecture to a
4494 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4495 * this means that addresses are rounded to the large size if
4496 * SHM_RND is set but addresses not aligned to that size are not rejected
4497 * as long as they are at least page-aligned. Since the only architecture
4498 * which uses this is ia64 this code doesn't provide for that oddity.
4500 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4502 return TARGET_PAGE_SIZE
;
4506 static abi_ulong
do_shmat(CPUArchState
*cpu_env
, int shmid
,
4507 abi_ulong shmaddr
, int shmflg
)
4509 CPUState
*cpu
= env_cpu(cpu_env
);
4512 struct shmid_ds shm_info
;
4516 /* shmat pointers are always untagged */
4518 /* find out the length of the shared memory segment */
4519 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4520 if (is_error(ret
)) {
4521 /* can't get length, bail out */
4525 shmlba
= target_shmlba(cpu_env
);
4527 if (shmaddr
& (shmlba
- 1)) {
4528 if (shmflg
& SHM_RND
) {
4529 shmaddr
&= ~(shmlba
- 1);
4531 return -TARGET_EINVAL
;
4534 if (!guest_range_valid_untagged(shmaddr
, shm_info
.shm_segsz
)) {
4535 return -TARGET_EINVAL
;
4541 * We're mapping shared memory, so ensure we generate code for parallel
4542 * execution and flush old translations. This will work up to the level
4543 * supported by the host -- anything that requires EXCP_ATOMIC will not
4544 * be atomic with respect to an external process.
4546 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
4547 cpu
->tcg_cflags
|= CF_PARALLEL
;
4552 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4554 abi_ulong mmap_start
;
4556 /* In order to use the host shmat, we need to honor host SHMLBA. */
4557 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4559 if (mmap_start
== -1) {
4561 host_raddr
= (void *)-1;
4563 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4564 shmflg
| SHM_REMAP
);
4567 if (host_raddr
== (void *)-1) {
4569 return get_errno((intptr_t)host_raddr
);
4571 raddr
= h2g((uintptr_t)host_raddr
);
4573 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
- 1,
4574 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4575 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4577 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4578 if (!shm_regions
[i
].in_use
) {
4579 shm_regions
[i
].in_use
= true;
4580 shm_regions
[i
].start
= raddr
;
4581 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4590 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4595 /* shmdt pointers are always untagged */
4599 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4600 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4601 shm_regions
[i
].in_use
= false;
4602 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
- 1, 0);
4606 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4613 #ifdef TARGET_NR_ipc
4614 /* ??? This only works with linear mappings. */
4615 /* do_ipc() must return target values and target errnos. */
4616 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4617 unsigned int call
, abi_long first
,
4618 abi_long second
, abi_long third
,
4619 abi_long ptr
, abi_long fifth
)
4624 version
= call
>> 16;
4629 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4631 case IPCOP_semtimedop
:
4633 * The s390 sys_ipc variant has only five parameters instead of six
4634 * (as for default variant) and the only difference is the handling of
4635 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4636 * to a struct timespec where the generic variant uses fifth parameter.
4638 #if defined(TARGET_S390X)
4639 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4641 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4646 ret
= get_errno(semget(first
, second
, third
));
4649 case IPCOP_semctl
: {
4650 /* The semun argument to semctl is passed by value, so dereference the
4653 get_user_ual(atptr
, ptr
);
4654 ret
= do_semctl(first
, second
, third
, atptr
);
4659 ret
= get_errno(msgget(first
, second
));
4663 ret
= do_msgsnd(first
, ptr
, second
, third
);
4667 ret
= do_msgctl(first
, second
, ptr
);
4674 struct target_ipc_kludge
{
4679 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4680 ret
= -TARGET_EFAULT
;
4684 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4686 unlock_user_struct(tmp
, ptr
, 0);
4690 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4699 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4700 if (is_error(raddr
))
4701 return get_errno(raddr
);
4702 if (put_user_ual(raddr
, third
))
4703 return -TARGET_EFAULT
;
4707 ret
= -TARGET_EINVAL
;
4712 ret
= do_shmdt(ptr
);
4716 /* IPC_* flag values are the same on all linux platforms */
4717 ret
= get_errno(shmget(first
, second
, third
));
4720 /* IPC_* and SHM_* command values are the same on all linux platforms */
4722 ret
= do_shmctl(first
, second
, ptr
);
4725 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4727 ret
= -TARGET_ENOSYS
;
4734 /* kernel structure types definitions */
4736 #define STRUCT(name, ...) STRUCT_ ## name,
4737 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4739 #include "syscall_types.h"
4743 #undef STRUCT_SPECIAL
4745 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4746 #define STRUCT_SPECIAL(name)
4747 #include "syscall_types.h"
4749 #undef STRUCT_SPECIAL
4751 #define MAX_STRUCT_SIZE 4096
4753 #ifdef CONFIG_FIEMAP
4754 /* So fiemap access checks don't overflow on 32 bit systems.
4755 * This is very slightly smaller than the limit imposed by
4756 * the underlying kernel.
4758 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4759 / sizeof(struct fiemap_extent))
4761 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4762 int fd
, int cmd
, abi_long arg
)
4764 /* The parameter for this ioctl is a struct fiemap followed
4765 * by an array of struct fiemap_extent whose size is set
4766 * in fiemap->fm_extent_count. The array is filled in by the
4769 int target_size_in
, target_size_out
;
4771 const argtype
*arg_type
= ie
->arg_type
;
4772 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4775 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4779 assert(arg_type
[0] == TYPE_PTR
);
4780 assert(ie
->access
== IOC_RW
);
4782 target_size_in
= thunk_type_size(arg_type
, 0);
4783 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4785 return -TARGET_EFAULT
;
4787 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4788 unlock_user(argptr
, arg
, 0);
4789 fm
= (struct fiemap
*)buf_temp
;
4790 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4791 return -TARGET_EINVAL
;
4794 outbufsz
= sizeof (*fm
) +
4795 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4797 if (outbufsz
> MAX_STRUCT_SIZE
) {
4798 /* We can't fit all the extents into the fixed size buffer.
4799 * Allocate one that is large enough and use it instead.
4801 fm
= g_try_malloc(outbufsz
);
4803 return -TARGET_ENOMEM
;
4805 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4808 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4809 if (!is_error(ret
)) {
4810 target_size_out
= target_size_in
;
4811 /* An extent_count of 0 means we were only counting the extents
4812 * so there are no structs to copy
4814 if (fm
->fm_extent_count
!= 0) {
4815 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4817 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4819 ret
= -TARGET_EFAULT
;
4821 /* Convert the struct fiemap */
4822 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4823 if (fm
->fm_extent_count
!= 0) {
4824 p
= argptr
+ target_size_in
;
4825 /* ...and then all the struct fiemap_extents */
4826 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4827 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4832 unlock_user(argptr
, arg
, target_size_out
);
4842 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4843 int fd
, int cmd
, abi_long arg
)
4845 const argtype
*arg_type
= ie
->arg_type
;
4849 struct ifconf
*host_ifconf
;
4851 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4852 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4853 int target_ifreq_size
;
4858 abi_long target_ifc_buf
;
4862 assert(arg_type
[0] == TYPE_PTR
);
4863 assert(ie
->access
== IOC_RW
);
4866 target_size
= thunk_type_size(arg_type
, 0);
4868 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4870 return -TARGET_EFAULT
;
4871 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4872 unlock_user(argptr
, arg
, 0);
4874 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4875 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4876 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4878 if (target_ifc_buf
!= 0) {
4879 target_ifc_len
= host_ifconf
->ifc_len
;
4880 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4881 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4883 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4884 if (outbufsz
> MAX_STRUCT_SIZE
) {
4886 * We can't fit all the extents into the fixed size buffer.
4887 * Allocate one that is large enough and use it instead.
4889 host_ifconf
= g_try_malloc(outbufsz
);
4891 return -TARGET_ENOMEM
;
4893 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4896 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4898 host_ifconf
->ifc_len
= host_ifc_len
;
4900 host_ifc_buf
= NULL
;
4902 host_ifconf
->ifc_buf
= host_ifc_buf
;
4904 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4905 if (!is_error(ret
)) {
4906 /* convert host ifc_len to target ifc_len */
4908 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4909 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4910 host_ifconf
->ifc_len
= target_ifc_len
;
4912 /* restore target ifc_buf */
4914 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4916 /* copy struct ifconf to target user */
4918 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4920 return -TARGET_EFAULT
;
4921 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4922 unlock_user(argptr
, arg
, target_size
);
4924 if (target_ifc_buf
!= 0) {
4925 /* copy ifreq[] to target user */
4926 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4927 for (i
= 0; i
< nb_ifreq
; i
++) {
4928 thunk_convert(argptr
+ i
* target_ifreq_size
,
4929 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4930 ifreq_arg_type
, THUNK_TARGET
);
4932 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4937 g_free(host_ifconf
);
4943 #if defined(CONFIG_USBFS)
4944 #if HOST_LONG_BITS > 64
4945 #error USBDEVFS thunks do not support >64 bit hosts yet.
4948 uint64_t target_urb_adr
;
4949 uint64_t target_buf_adr
;
4950 char *target_buf_ptr
;
4951 struct usbdevfs_urb host_urb
;
4954 static GHashTable
*usbdevfs_urb_hashtable(void)
4956 static GHashTable
*urb_hashtable
;
4958 if (!urb_hashtable
) {
4959 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4961 return urb_hashtable
;
4964 static void urb_hashtable_insert(struct live_urb
*urb
)
4966 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4967 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4970 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4972 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4973 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4976 static void urb_hashtable_remove(struct live_urb
*urb
)
4978 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4979 g_hash_table_remove(urb_hashtable
, urb
);
4983 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4984 int fd
, int cmd
, abi_long arg
)
4986 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4987 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4988 struct live_urb
*lurb
;
4992 uintptr_t target_urb_adr
;
4995 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4997 memset(buf_temp
, 0, sizeof(uint64_t));
4998 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4999 if (is_error(ret
)) {
5003 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
5004 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
5005 if (!lurb
->target_urb_adr
) {
5006 return -TARGET_EFAULT
;
5008 urb_hashtable_remove(lurb
);
5009 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
5010 lurb
->host_urb
.buffer_length
);
5011 lurb
->target_buf_ptr
= NULL
;
5013 /* restore the guest buffer pointer */
5014 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
5016 /* update the guest urb struct */
5017 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5020 return -TARGET_EFAULT
;
5022 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5023 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5025 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5026 /* write back the urb handle */
5027 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5030 return -TARGET_EFAULT
;
5033 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5034 target_urb_adr
= lurb
->target_urb_adr
;
5035 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5036 unlock_user(argptr
, arg
, target_size
);
5043 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5044 uint8_t *buf_temp
__attribute__((unused
)),
5045 int fd
, int cmd
, abi_long arg
)
5047 struct live_urb
*lurb
;
5049 /* map target address back to host URB with metadata. */
5050 lurb
= urb_hashtable_lookup(arg
);
5052 return -TARGET_EFAULT
;
5054 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5058 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5059 int fd
, int cmd
, abi_long arg
)
5061 const argtype
*arg_type
= ie
->arg_type
;
5066 struct live_urb
*lurb
;
5069 * each submitted URB needs to map to a unique ID for the
5070 * kernel, and that unique ID needs to be a pointer to
5071 * host memory. hence, we need to malloc for each URB.
5072 * isochronous transfers have a variable length struct.
5075 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5077 /* construct host copy of urb and metadata */
5078 lurb
= g_try_new0(struct live_urb
, 1);
5080 return -TARGET_ENOMEM
;
5083 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5086 return -TARGET_EFAULT
;
5088 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5089 unlock_user(argptr
, arg
, 0);
5091 lurb
->target_urb_adr
= arg
;
5092 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5094 /* buffer space used depends on endpoint type so lock the entire buffer */
5095 /* control type urbs should check the buffer contents for true direction */
5096 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5097 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5098 lurb
->host_urb
.buffer_length
, 1);
5099 if (lurb
->target_buf_ptr
== NULL
) {
5101 return -TARGET_EFAULT
;
5104 /* update buffer pointer in host copy */
5105 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5107 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5108 if (is_error(ret
)) {
5109 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5112 urb_hashtable_insert(lurb
);
5117 #endif /* CONFIG_USBFS */
5119 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5120 int cmd
, abi_long arg
)
5123 struct dm_ioctl
*host_dm
;
5124 abi_long guest_data
;
5125 uint32_t guest_data_size
;
5127 const argtype
*arg_type
= ie
->arg_type
;
5129 void *big_buf
= NULL
;
5133 target_size
= thunk_type_size(arg_type
, 0);
5134 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5136 ret
= -TARGET_EFAULT
;
5139 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5140 unlock_user(argptr
, arg
, 0);
5142 /* buf_temp is too small, so fetch things into a bigger buffer */
5143 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5144 memcpy(big_buf
, buf_temp
, target_size
);
5148 guest_data
= arg
+ host_dm
->data_start
;
5149 if ((guest_data
- arg
) < 0) {
5150 ret
= -TARGET_EINVAL
;
5153 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5154 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5156 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5158 ret
= -TARGET_EFAULT
;
5162 switch (ie
->host_cmd
) {
5164 case DM_LIST_DEVICES
:
5167 case DM_DEV_SUSPEND
:
5170 case DM_TABLE_STATUS
:
5171 case DM_TABLE_CLEAR
:
5173 case DM_LIST_VERSIONS
:
5177 case DM_DEV_SET_GEOMETRY
:
5178 /* data contains only strings */
5179 memcpy(host_data
, argptr
, guest_data_size
);
5182 memcpy(host_data
, argptr
, guest_data_size
);
5183 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5187 void *gspec
= argptr
;
5188 void *cur_data
= host_data
;
5189 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5190 int spec_size
= thunk_type_size(arg_type
, 0);
5193 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5194 struct dm_target_spec
*spec
= cur_data
;
5198 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5199 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5201 spec
->next
= sizeof(*spec
) + slen
;
5202 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5204 cur_data
+= spec
->next
;
5209 ret
= -TARGET_EINVAL
;
5210 unlock_user(argptr
, guest_data
, 0);
5213 unlock_user(argptr
, guest_data
, 0);
5215 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5216 if (!is_error(ret
)) {
5217 guest_data
= arg
+ host_dm
->data_start
;
5218 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5219 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5220 switch (ie
->host_cmd
) {
5225 case DM_DEV_SUSPEND
:
5228 case DM_TABLE_CLEAR
:
5230 case DM_DEV_SET_GEOMETRY
:
5231 /* no return data */
5233 case DM_LIST_DEVICES
:
5235 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5236 uint32_t remaining_data
= guest_data_size
;
5237 void *cur_data
= argptr
;
5238 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5239 int nl_size
= 12; /* can't use thunk_size due to alignment */
5242 uint32_t next
= nl
->next
;
5244 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5246 if (remaining_data
< nl
->next
) {
5247 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5250 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5251 strcpy(cur_data
+ nl_size
, nl
->name
);
5252 cur_data
+= nl
->next
;
5253 remaining_data
-= nl
->next
;
5257 nl
= (void*)nl
+ next
;
5262 case DM_TABLE_STATUS
:
5264 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5265 void *cur_data
= argptr
;
5266 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5267 int spec_size
= thunk_type_size(arg_type
, 0);
5270 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5271 uint32_t next
= spec
->next
;
5272 int slen
= strlen((char*)&spec
[1]) + 1;
5273 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5274 if (guest_data_size
< spec
->next
) {
5275 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5278 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5279 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5280 cur_data
= argptr
+ spec
->next
;
5281 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5287 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5288 int count
= *(uint32_t*)hdata
;
5289 uint64_t *hdev
= hdata
+ 8;
5290 uint64_t *gdev
= argptr
+ 8;
5293 *(uint32_t*)argptr
= tswap32(count
);
5294 for (i
= 0; i
< count
; i
++) {
5295 *gdev
= tswap64(*hdev
);
5301 case DM_LIST_VERSIONS
:
5303 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5304 uint32_t remaining_data
= guest_data_size
;
5305 void *cur_data
= argptr
;
5306 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5307 int vers_size
= thunk_type_size(arg_type
, 0);
5310 uint32_t next
= vers
->next
;
5312 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5314 if (remaining_data
< vers
->next
) {
5315 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5318 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5319 strcpy(cur_data
+ vers_size
, vers
->name
);
5320 cur_data
+= vers
->next
;
5321 remaining_data
-= vers
->next
;
5325 vers
= (void*)vers
+ next
;
5330 unlock_user(argptr
, guest_data
, 0);
5331 ret
= -TARGET_EINVAL
;
5334 unlock_user(argptr
, guest_data
, guest_data_size
);
5336 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5338 ret
= -TARGET_EFAULT
;
5341 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5342 unlock_user(argptr
, arg
, target_size
);
5349 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5350 int cmd
, abi_long arg
)
5354 const argtype
*arg_type
= ie
->arg_type
;
5355 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5358 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5359 struct blkpg_partition host_part
;
5361 /* Read and convert blkpg */
5363 target_size
= thunk_type_size(arg_type
, 0);
5364 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5366 ret
= -TARGET_EFAULT
;
5369 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5370 unlock_user(argptr
, arg
, 0);
5372 switch (host_blkpg
->op
) {
5373 case BLKPG_ADD_PARTITION
:
5374 case BLKPG_DEL_PARTITION
:
5375 /* payload is struct blkpg_partition */
5378 /* Unknown opcode */
5379 ret
= -TARGET_EINVAL
;
5383 /* Read and convert blkpg->data */
5384 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5385 target_size
= thunk_type_size(part_arg_type
, 0);
5386 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5388 ret
= -TARGET_EFAULT
;
5391 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5392 unlock_user(argptr
, arg
, 0);
5394 /* Swizzle the data pointer to our local copy and call! */
5395 host_blkpg
->data
= &host_part
;
5396 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5402 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5403 int fd
, int cmd
, abi_long arg
)
5405 const argtype
*arg_type
= ie
->arg_type
;
5406 const StructEntry
*se
;
5407 const argtype
*field_types
;
5408 const int *dst_offsets
, *src_offsets
;
5411 abi_ulong
*target_rt_dev_ptr
= NULL
;
5412 unsigned long *host_rt_dev_ptr
= NULL
;
5416 assert(ie
->access
== IOC_W
);
5417 assert(*arg_type
== TYPE_PTR
);
5419 assert(*arg_type
== TYPE_STRUCT
);
5420 target_size
= thunk_type_size(arg_type
, 0);
5421 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5423 return -TARGET_EFAULT
;
5426 assert(*arg_type
== (int)STRUCT_rtentry
);
5427 se
= struct_entries
+ *arg_type
++;
5428 assert(se
->convert
[0] == NULL
);
5429 /* convert struct here to be able to catch rt_dev string */
5430 field_types
= se
->field_types
;
5431 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5432 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5433 for (i
= 0; i
< se
->nb_fields
; i
++) {
5434 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5435 assert(*field_types
== TYPE_PTRVOID
);
5436 target_rt_dev_ptr
= argptr
+ src_offsets
[i
];
5437 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5438 if (*target_rt_dev_ptr
!= 0) {
5439 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5440 tswapal(*target_rt_dev_ptr
));
5441 if (!*host_rt_dev_ptr
) {
5442 unlock_user(argptr
, arg
, 0);
5443 return -TARGET_EFAULT
;
5446 *host_rt_dev_ptr
= 0;
5451 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5452 argptr
+ src_offsets
[i
],
5453 field_types
, THUNK_HOST
);
5455 unlock_user(argptr
, arg
, 0);
5457 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5459 assert(host_rt_dev_ptr
!= NULL
);
5460 assert(target_rt_dev_ptr
!= NULL
);
5461 if (*host_rt_dev_ptr
!= 0) {
5462 unlock_user((void *)*host_rt_dev_ptr
,
5463 *target_rt_dev_ptr
, 0);
5468 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5469 int fd
, int cmd
, abi_long arg
)
5471 int sig
= target_to_host_signal(arg
);
5472 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5475 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5476 int fd
, int cmd
, abi_long arg
)
5481 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5482 if (is_error(ret
)) {
5486 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5487 if (copy_to_user_timeval(arg
, &tv
)) {
5488 return -TARGET_EFAULT
;
5491 if (copy_to_user_timeval64(arg
, &tv
)) {
5492 return -TARGET_EFAULT
;
5499 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5500 int fd
, int cmd
, abi_long arg
)
5505 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5506 if (is_error(ret
)) {
5510 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5511 if (host_to_target_timespec(arg
, &ts
)) {
5512 return -TARGET_EFAULT
;
5515 if (host_to_target_timespec64(arg
, &ts
)) {
5516 return -TARGET_EFAULT
;
5524 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5525 int fd
, int cmd
, abi_long arg
)
5527 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5528 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5534 static void unlock_drm_version(struct drm_version
*host_ver
,
5535 struct target_drm_version
*target_ver
,
5538 unlock_user(host_ver
->name
, target_ver
->name
,
5539 copy
? host_ver
->name_len
: 0);
5540 unlock_user(host_ver
->date
, target_ver
->date
,
5541 copy
? host_ver
->date_len
: 0);
5542 unlock_user(host_ver
->desc
, target_ver
->desc
,
5543 copy
? host_ver
->desc_len
: 0);
5546 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5547 struct target_drm_version
*target_ver
)
5549 memset(host_ver
, 0, sizeof(*host_ver
));
5551 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5552 if (host_ver
->name_len
) {
5553 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5554 target_ver
->name_len
, 0);
5555 if (!host_ver
->name
) {
5560 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5561 if (host_ver
->date_len
) {
5562 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5563 target_ver
->date_len
, 0);
5564 if (!host_ver
->date
) {
5569 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5570 if (host_ver
->desc_len
) {
5571 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5572 target_ver
->desc_len
, 0);
5573 if (!host_ver
->desc
) {
5580 unlock_drm_version(host_ver
, target_ver
, false);
5584 static inline void host_to_target_drmversion(
5585 struct target_drm_version
*target_ver
,
5586 struct drm_version
*host_ver
)
5588 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5589 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5590 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5591 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5592 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5593 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5594 unlock_drm_version(host_ver
, target_ver
, true);
5597 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5598 int fd
, int cmd
, abi_long arg
)
5600 struct drm_version
*ver
;
5601 struct target_drm_version
*target_ver
;
5604 switch (ie
->host_cmd
) {
5605 case DRM_IOCTL_VERSION
:
5606 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5607 return -TARGET_EFAULT
;
5609 ver
= (struct drm_version
*)buf_temp
;
5610 ret
= target_to_host_drmversion(ver
, target_ver
);
5611 if (!is_error(ret
)) {
5612 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5613 if (is_error(ret
)) {
5614 unlock_drm_version(ver
, target_ver
, false);
5616 host_to_target_drmversion(target_ver
, ver
);
5619 unlock_user_struct(target_ver
, arg
, 0);
5622 return -TARGET_ENOSYS
;
5625 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5626 struct drm_i915_getparam
*gparam
,
5627 int fd
, abi_long arg
)
5631 struct target_drm_i915_getparam
*target_gparam
;
5633 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5634 return -TARGET_EFAULT
;
5637 __get_user(gparam
->param
, &target_gparam
->param
);
5638 gparam
->value
= &value
;
5639 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5640 put_user_s32(value
, target_gparam
->value
);
5642 unlock_user_struct(target_gparam
, arg
, 0);
5646 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5647 int fd
, int cmd
, abi_long arg
)
5649 switch (ie
->host_cmd
) {
5650 case DRM_IOCTL_I915_GETPARAM
:
5651 return do_ioctl_drm_i915_getparam(ie
,
5652 (struct drm_i915_getparam
*)buf_temp
,
5655 return -TARGET_ENOSYS
;
5661 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5662 int fd
, int cmd
, abi_long arg
)
5664 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5665 struct tun_filter
*target_filter
;
5668 assert(ie
->access
== IOC_W
);
5670 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5671 if (!target_filter
) {
5672 return -TARGET_EFAULT
;
5674 filter
->flags
= tswap16(target_filter
->flags
);
5675 filter
->count
= tswap16(target_filter
->count
);
5676 unlock_user(target_filter
, arg
, 0);
5678 if (filter
->count
) {
5679 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5681 return -TARGET_EFAULT
;
5684 target_addr
= lock_user(VERIFY_READ
,
5685 arg
+ offsetof(struct tun_filter
, addr
),
5686 filter
->count
* ETH_ALEN
, 1);
5688 return -TARGET_EFAULT
;
5690 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5691 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5694 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5697 IOCTLEntry ioctl_entries
[] = {
5698 #define IOCTL(cmd, access, ...) \
5699 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5700 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5701 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5702 #define IOCTL_IGNORE(cmd) \
5703 { TARGET_ ## cmd, 0, #cmd },
5708 /* ??? Implement proper locking for ioctls. */
5709 /* do_ioctl() Must return target values and target errnos. */
5710 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5712 const IOCTLEntry
*ie
;
5713 const argtype
*arg_type
;
5715 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5721 if (ie
->target_cmd
== 0) {
5723 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5724 return -TARGET_ENOTTY
;
5726 if (ie
->target_cmd
== cmd
)
5730 arg_type
= ie
->arg_type
;
5732 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5733 } else if (!ie
->host_cmd
) {
5734 /* Some architectures define BSD ioctls in their headers
5735 that are not implemented in Linux. */
5736 return -TARGET_ENOTTY
;
5739 switch(arg_type
[0]) {
5742 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5748 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5752 target_size
= thunk_type_size(arg_type
, 0);
5753 switch(ie
->access
) {
5755 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5756 if (!is_error(ret
)) {
5757 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5759 return -TARGET_EFAULT
;
5760 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5761 unlock_user(argptr
, arg
, target_size
);
5765 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5767 return -TARGET_EFAULT
;
5768 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5769 unlock_user(argptr
, arg
, 0);
5770 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5774 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5776 return -TARGET_EFAULT
;
5777 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5778 unlock_user(argptr
, arg
, 0);
5779 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5780 if (!is_error(ret
)) {
5781 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5783 return -TARGET_EFAULT
;
5784 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5785 unlock_user(argptr
, arg
, target_size
);
5791 qemu_log_mask(LOG_UNIMP
,
5792 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5793 (long)cmd
, arg_type
[0]);
5794 ret
= -TARGET_ENOTTY
;
5800 static const bitmask_transtbl iflag_tbl
[] = {
5801 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5802 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5803 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5804 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5805 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5806 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5807 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5808 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5809 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5810 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5811 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5812 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5813 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5814 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5815 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5819 static const bitmask_transtbl oflag_tbl
[] = {
5820 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5821 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5822 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5823 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5824 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5825 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5826 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5827 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5828 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5829 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5830 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5831 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5832 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5833 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5834 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5835 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5836 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5837 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5838 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5839 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5840 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5841 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5842 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5843 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5847 static const bitmask_transtbl cflag_tbl
[] = {
5848 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5849 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5850 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5851 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5852 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5853 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5854 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5855 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5856 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5857 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5858 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5859 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5860 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5861 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5862 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5863 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5864 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5865 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5866 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5867 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5868 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5869 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5870 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5871 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5872 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5873 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5874 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5875 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5876 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5877 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5878 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5882 static const bitmask_transtbl lflag_tbl
[] = {
5883 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5884 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5885 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5886 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5887 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5888 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5889 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5890 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5891 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5892 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5893 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5894 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5895 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5896 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5897 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5898 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5902 static void target_to_host_termios (void *dst
, const void *src
)
5904 struct host_termios
*host
= dst
;
5905 const struct target_termios
*target
= src
;
5908 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5910 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5912 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5914 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5915 host
->c_line
= target
->c_line
;
5917 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5918 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5919 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5920 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5921 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5922 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5923 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5924 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5925 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5926 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5927 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5928 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5929 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5930 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5931 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5932 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5933 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5934 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5937 static void host_to_target_termios (void *dst
, const void *src
)
5939 struct target_termios
*target
= dst
;
5940 const struct host_termios
*host
= src
;
5943 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5945 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5947 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5949 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5950 target
->c_line
= host
->c_line
;
5952 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5953 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5954 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5955 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5956 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5957 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5958 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5959 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5960 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5961 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5962 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5963 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5964 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5965 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5966 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5967 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5968 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5969 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5972 static const StructEntry struct_termios_def
= {
5973 .convert
= { host_to_target_termios
, target_to_host_termios
},
5974 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5975 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5976 .print
= print_termios
,
5979 /* If the host does not provide these bits, they may be safely discarded. */
5983 #ifndef MAP_UNINITIALIZED
5984 #define MAP_UNINITIALIZED 0
5987 static const bitmask_transtbl mmap_flags_tbl
[] = {
5988 { TARGET_MAP_TYPE
, TARGET_MAP_SHARED
, MAP_TYPE
, MAP_SHARED
},
5989 { TARGET_MAP_TYPE
, TARGET_MAP_PRIVATE
, MAP_TYPE
, MAP_PRIVATE
},
5990 { TARGET_MAP_TYPE
, TARGET_MAP_SHARED_VALIDATE
,
5991 MAP_TYPE
, MAP_SHARED_VALIDATE
},
5992 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5993 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5994 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5995 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5996 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5997 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5998 MAP_DENYWRITE
, MAP_DENYWRITE
},
5999 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
6000 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
6001 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
6002 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
6003 MAP_NORESERVE
, MAP_NORESERVE
},
6004 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
6005 /* MAP_STACK had been ignored by the kernel for quite some time.
6006 Recognize it for the target insofar as we do not want to pass
6007 it through to the host. */
6008 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6009 { TARGET_MAP_SYNC
, TARGET_MAP_SYNC
, MAP_SYNC
, MAP_SYNC
},
6010 { TARGET_MAP_NONBLOCK
, TARGET_MAP_NONBLOCK
, MAP_NONBLOCK
, MAP_NONBLOCK
},
6011 { TARGET_MAP_POPULATE
, TARGET_MAP_POPULATE
, MAP_POPULATE
, MAP_POPULATE
},
6012 { TARGET_MAP_FIXED_NOREPLACE
, TARGET_MAP_FIXED_NOREPLACE
,
6013 MAP_FIXED_NOREPLACE
, MAP_FIXED_NOREPLACE
},
6014 { TARGET_MAP_UNINITIALIZED
, TARGET_MAP_UNINITIALIZED
,
6015 MAP_UNINITIALIZED
, MAP_UNINITIALIZED
},
6020 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6021 * TARGET_I386 is defined if TARGET_X86_64 is defined
6023 #if defined(TARGET_I386)
6025 /* NOTE: there is really one LDT for all the threads */
6026 static uint8_t *ldt_table
;
6028 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6035 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6036 if (size
> bytecount
)
6038 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6040 return -TARGET_EFAULT
;
6041 /* ??? Should this by byteswapped? */
6042 memcpy(p
, ldt_table
, size
);
6043 unlock_user(p
, ptr
, size
);
6047 /* XXX: add locking support */
6048 static abi_long
write_ldt(CPUX86State
*env
,
6049 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6051 struct target_modify_ldt_ldt_s ldt_info
;
6052 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6053 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6054 int seg_not_present
, useable
, lm
;
6055 uint32_t *lp
, entry_1
, entry_2
;
6057 if (bytecount
!= sizeof(ldt_info
))
6058 return -TARGET_EINVAL
;
6059 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6060 return -TARGET_EFAULT
;
6061 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6062 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6063 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6064 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6065 unlock_user_struct(target_ldt_info
, ptr
, 0);
6067 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6068 return -TARGET_EINVAL
;
6069 seg_32bit
= ldt_info
.flags
& 1;
6070 contents
= (ldt_info
.flags
>> 1) & 3;
6071 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6072 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6073 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6074 useable
= (ldt_info
.flags
>> 6) & 1;
6078 lm
= (ldt_info
.flags
>> 7) & 1;
6080 if (contents
== 3) {
6082 return -TARGET_EINVAL
;
6083 if (seg_not_present
== 0)
6084 return -TARGET_EINVAL
;
6086 /* allocate the LDT */
6088 env
->ldt
.base
= target_mmap(0,
6089 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6090 PROT_READ
|PROT_WRITE
,
6091 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6092 if (env
->ldt
.base
== -1)
6093 return -TARGET_ENOMEM
;
6094 memset(g2h_untagged(env
->ldt
.base
), 0,
6095 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6096 env
->ldt
.limit
= 0xffff;
6097 ldt_table
= g2h_untagged(env
->ldt
.base
);
6100 /* NOTE: same code as Linux kernel */
6101 /* Allow LDTs to be cleared by the user. */
6102 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6105 read_exec_only
== 1 &&
6107 limit_in_pages
== 0 &&
6108 seg_not_present
== 1 &&
6116 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6117 (ldt_info
.limit
& 0x0ffff);
6118 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6119 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6120 (ldt_info
.limit
& 0xf0000) |
6121 ((read_exec_only
^ 1) << 9) |
6123 ((seg_not_present
^ 1) << 15) |
6125 (limit_in_pages
<< 23) |
6129 entry_2
|= (useable
<< 20);
6131 /* Install the new entry ... */
6133 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6134 lp
[0] = tswap32(entry_1
);
6135 lp
[1] = tswap32(entry_2
);
6139 /* specific and weird i386 syscalls */
6140 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6141 unsigned long bytecount
)
6147 ret
= read_ldt(ptr
, bytecount
);
6150 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6153 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6156 ret
= -TARGET_ENOSYS
;
6162 #if defined(TARGET_ABI32)
6163 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6165 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6166 struct target_modify_ldt_ldt_s ldt_info
;
6167 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6168 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6169 int seg_not_present
, useable
, lm
;
6170 uint32_t *lp
, entry_1
, entry_2
;
6173 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6174 if (!target_ldt_info
)
6175 return -TARGET_EFAULT
;
6176 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6177 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6178 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6179 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6180 if (ldt_info
.entry_number
== -1) {
6181 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6182 if (gdt_table
[i
] == 0) {
6183 ldt_info
.entry_number
= i
;
6184 target_ldt_info
->entry_number
= tswap32(i
);
6189 unlock_user_struct(target_ldt_info
, ptr
, 1);
6191 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6192 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6193 return -TARGET_EINVAL
;
6194 seg_32bit
= ldt_info
.flags
& 1;
6195 contents
= (ldt_info
.flags
>> 1) & 3;
6196 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6197 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6198 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6199 useable
= (ldt_info
.flags
>> 6) & 1;
6203 lm
= (ldt_info
.flags
>> 7) & 1;
6206 if (contents
== 3) {
6207 if (seg_not_present
== 0)
6208 return -TARGET_EINVAL
;
6211 /* NOTE: same code as Linux kernel */
6212 /* Allow LDTs to be cleared by the user. */
6213 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6214 if ((contents
== 0 &&
6215 read_exec_only
== 1 &&
6217 limit_in_pages
== 0 &&
6218 seg_not_present
== 1 &&
6226 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6227 (ldt_info
.limit
& 0x0ffff);
6228 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6229 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6230 (ldt_info
.limit
& 0xf0000) |
6231 ((read_exec_only
^ 1) << 9) |
6233 ((seg_not_present
^ 1) << 15) |
6235 (limit_in_pages
<< 23) |
6240 /* Install the new entry ... */
6242 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6243 lp
[0] = tswap32(entry_1
);
6244 lp
[1] = tswap32(entry_2
);
6248 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6250 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6251 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6252 uint32_t base_addr
, limit
, flags
;
6253 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6254 int seg_not_present
, useable
, lm
;
6255 uint32_t *lp
, entry_1
, entry_2
;
6257 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6258 if (!target_ldt_info
)
6259 return -TARGET_EFAULT
;
6260 idx
= tswap32(target_ldt_info
->entry_number
);
6261 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6262 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6263 unlock_user_struct(target_ldt_info
, ptr
, 1);
6264 return -TARGET_EINVAL
;
6266 lp
= (uint32_t *)(gdt_table
+ idx
);
6267 entry_1
= tswap32(lp
[0]);
6268 entry_2
= tswap32(lp
[1]);
6270 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6271 contents
= (entry_2
>> 10) & 3;
6272 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6273 seg_32bit
= (entry_2
>> 22) & 1;
6274 limit_in_pages
= (entry_2
>> 23) & 1;
6275 useable
= (entry_2
>> 20) & 1;
6279 lm
= (entry_2
>> 21) & 1;
6281 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6282 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6283 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6284 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6285 base_addr
= (entry_1
>> 16) |
6286 (entry_2
& 0xff000000) |
6287 ((entry_2
& 0xff) << 16);
6288 target_ldt_info
->base_addr
= tswapal(base_addr
);
6289 target_ldt_info
->limit
= tswap32(limit
);
6290 target_ldt_info
->flags
= tswap32(flags
);
6291 unlock_user_struct(target_ldt_info
, ptr
, 1);
6295 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6297 return -TARGET_ENOSYS
;
6300 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6307 case TARGET_ARCH_SET_GS
:
6308 case TARGET_ARCH_SET_FS
:
6309 if (code
== TARGET_ARCH_SET_GS
)
6313 cpu_x86_load_seg(env
, idx
, 0);
6314 env
->segs
[idx
].base
= addr
;
6316 case TARGET_ARCH_GET_GS
:
6317 case TARGET_ARCH_GET_FS
:
6318 if (code
== TARGET_ARCH_GET_GS
)
6322 val
= env
->segs
[idx
].base
;
6323 if (put_user(val
, addr
, abi_ulong
))
6324 ret
= -TARGET_EFAULT
;
6327 ret
= -TARGET_EINVAL
;
6332 #endif /* defined(TARGET_ABI32 */
6333 #endif /* defined(TARGET_I386) */
6336 * These constants are generic. Supply any that are missing from the host.
6339 # define PR_SET_NAME 15
6340 # define PR_GET_NAME 16
6342 #ifndef PR_SET_FP_MODE
6343 # define PR_SET_FP_MODE 45
6344 # define PR_GET_FP_MODE 46
6345 # define PR_FP_MODE_FR (1 << 0)
6346 # define PR_FP_MODE_FRE (1 << 1)
6348 #ifndef PR_SVE_SET_VL
6349 # define PR_SVE_SET_VL 50
6350 # define PR_SVE_GET_VL 51
6351 # define PR_SVE_VL_LEN_MASK 0xffff
6352 # define PR_SVE_VL_INHERIT (1 << 17)
6354 #ifndef PR_PAC_RESET_KEYS
6355 # define PR_PAC_RESET_KEYS 54
6356 # define PR_PAC_APIAKEY (1 << 0)
6357 # define PR_PAC_APIBKEY (1 << 1)
6358 # define PR_PAC_APDAKEY (1 << 2)
6359 # define PR_PAC_APDBKEY (1 << 3)
6360 # define PR_PAC_APGAKEY (1 << 4)
6362 #ifndef PR_SET_TAGGED_ADDR_CTRL
6363 # define PR_SET_TAGGED_ADDR_CTRL 55
6364 # define PR_GET_TAGGED_ADDR_CTRL 56
6365 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6367 #ifndef PR_MTE_TCF_SHIFT
6368 # define PR_MTE_TCF_SHIFT 1
6369 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6370 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6371 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6372 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6373 # define PR_MTE_TAG_SHIFT 3
6374 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6376 #ifndef PR_SET_IO_FLUSHER
6377 # define PR_SET_IO_FLUSHER 57
6378 # define PR_GET_IO_FLUSHER 58
6380 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6381 # define PR_SET_SYSCALL_USER_DISPATCH 59
6383 #ifndef PR_SME_SET_VL
6384 # define PR_SME_SET_VL 63
6385 # define PR_SME_GET_VL 64
6386 # define PR_SME_VL_LEN_MASK 0xffff
6387 # define PR_SME_VL_INHERIT (1 << 17)
6390 #include "target_prctl.h"
6392 static abi_long
do_prctl_inval0(CPUArchState
*env
)
6394 return -TARGET_EINVAL
;
6397 static abi_long
do_prctl_inval1(CPUArchState
*env
, abi_long arg2
)
6399 return -TARGET_EINVAL
;
6402 #ifndef do_prctl_get_fp_mode
6403 #define do_prctl_get_fp_mode do_prctl_inval0
6405 #ifndef do_prctl_set_fp_mode
6406 #define do_prctl_set_fp_mode do_prctl_inval1
6408 #ifndef do_prctl_sve_get_vl
6409 #define do_prctl_sve_get_vl do_prctl_inval0
6411 #ifndef do_prctl_sve_set_vl
6412 #define do_prctl_sve_set_vl do_prctl_inval1
6414 #ifndef do_prctl_reset_keys
6415 #define do_prctl_reset_keys do_prctl_inval1
6417 #ifndef do_prctl_set_tagged_addr_ctrl
6418 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6420 #ifndef do_prctl_get_tagged_addr_ctrl
6421 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6423 #ifndef do_prctl_get_unalign
6424 #define do_prctl_get_unalign do_prctl_inval1
6426 #ifndef do_prctl_set_unalign
6427 #define do_prctl_set_unalign do_prctl_inval1
6429 #ifndef do_prctl_sme_get_vl
6430 #define do_prctl_sme_get_vl do_prctl_inval0
6432 #ifndef do_prctl_sme_set_vl
6433 #define do_prctl_sme_set_vl do_prctl_inval1
6436 static abi_long
do_prctl(CPUArchState
*env
, abi_long option
, abi_long arg2
,
6437 abi_long arg3
, abi_long arg4
, abi_long arg5
)
6442 case PR_GET_PDEATHSIG
:
6445 ret
= get_errno(prctl(PR_GET_PDEATHSIG
, &deathsig
,
6447 if (!is_error(ret
) &&
6448 put_user_s32(host_to_target_signal(deathsig
), arg2
)) {
6449 return -TARGET_EFAULT
;
6453 case PR_SET_PDEATHSIG
:
6454 return get_errno(prctl(PR_SET_PDEATHSIG
, target_to_host_signal(arg2
),
6458 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
6460 return -TARGET_EFAULT
;
6462 ret
= get_errno(prctl(PR_GET_NAME
, (uintptr_t)name
,
6464 unlock_user(name
, arg2
, 16);
6469 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
6471 return -TARGET_EFAULT
;
6473 ret
= get_errno(prctl(PR_SET_NAME
, (uintptr_t)name
,
6475 unlock_user(name
, arg2
, 0);
6478 case PR_GET_FP_MODE
:
6479 return do_prctl_get_fp_mode(env
);
6480 case PR_SET_FP_MODE
:
6481 return do_prctl_set_fp_mode(env
, arg2
);
6483 return do_prctl_sve_get_vl(env
);
6485 return do_prctl_sve_set_vl(env
, arg2
);
6487 return do_prctl_sme_get_vl(env
);
6489 return do_prctl_sme_set_vl(env
, arg2
);
6490 case PR_PAC_RESET_KEYS
:
6491 if (arg3
|| arg4
|| arg5
) {
6492 return -TARGET_EINVAL
;
6494 return do_prctl_reset_keys(env
, arg2
);
6495 case PR_SET_TAGGED_ADDR_CTRL
:
6496 if (arg3
|| arg4
|| arg5
) {
6497 return -TARGET_EINVAL
;
6499 return do_prctl_set_tagged_addr_ctrl(env
, arg2
);
6500 case PR_GET_TAGGED_ADDR_CTRL
:
6501 if (arg2
|| arg3
|| arg4
|| arg5
) {
6502 return -TARGET_EINVAL
;
6504 return do_prctl_get_tagged_addr_ctrl(env
);
6506 case PR_GET_UNALIGN
:
6507 return do_prctl_get_unalign(env
, arg2
);
6508 case PR_SET_UNALIGN
:
6509 return do_prctl_set_unalign(env
, arg2
);
6511 case PR_CAP_AMBIENT
:
6512 case PR_CAPBSET_READ
:
6513 case PR_CAPBSET_DROP
:
6514 case PR_GET_DUMPABLE
:
6515 case PR_SET_DUMPABLE
:
6516 case PR_GET_KEEPCAPS
:
6517 case PR_SET_KEEPCAPS
:
6518 case PR_GET_SECUREBITS
:
6519 case PR_SET_SECUREBITS
:
6522 case PR_GET_TIMERSLACK
:
6523 case PR_SET_TIMERSLACK
:
6525 case PR_MCE_KILL_GET
:
6526 case PR_GET_NO_NEW_PRIVS
:
6527 case PR_SET_NO_NEW_PRIVS
:
6528 case PR_GET_IO_FLUSHER
:
6529 case PR_SET_IO_FLUSHER
:
6530 /* Some prctl options have no pointer arguments and we can pass on. */
6531 return get_errno(prctl(option
, arg2
, arg3
, arg4
, arg5
));
6533 case PR_GET_CHILD_SUBREAPER
:
6534 case PR_SET_CHILD_SUBREAPER
:
6535 case PR_GET_SPECULATION_CTRL
:
6536 case PR_SET_SPECULATION_CTRL
:
6537 case PR_GET_TID_ADDRESS
:
6539 return -TARGET_EINVAL
;
6543 /* Was used for SPE on PowerPC. */
6544 return -TARGET_EINVAL
;
6551 case PR_GET_SECCOMP
:
6552 case PR_SET_SECCOMP
:
6553 case PR_SET_SYSCALL_USER_DISPATCH
:
6554 case PR_GET_THP_DISABLE
:
6555 case PR_SET_THP_DISABLE
:
6558 /* Disable to prevent the target disabling stuff we need. */
6559 return -TARGET_EINVAL
;
6562 qemu_log_mask(LOG_UNIMP
, "Unsupported prctl: " TARGET_ABI_FMT_ld
"\n",
6564 return -TARGET_EINVAL
;
6568 #define NEW_STACK_SIZE 0x40000
6571 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6574 pthread_mutex_t mutex
;
6575 pthread_cond_t cond
;
6578 abi_ulong child_tidptr
;
6579 abi_ulong parent_tidptr
;
6583 static void *clone_func(void *arg
)
6585 new_thread_info
*info
= arg
;
6590 rcu_register_thread();
6591 tcg_register_thread();
6595 ts
= (TaskState
*)cpu
->opaque
;
6596 info
->tid
= sys_gettid();
6598 if (info
->child_tidptr
)
6599 put_user_u32(info
->tid
, info
->child_tidptr
);
6600 if (info
->parent_tidptr
)
6601 put_user_u32(info
->tid
, info
->parent_tidptr
);
6602 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6603 /* Enable signals. */
6604 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6605 /* Signal to the parent that we're ready. */
6606 pthread_mutex_lock(&info
->mutex
);
6607 pthread_cond_broadcast(&info
->cond
);
6608 pthread_mutex_unlock(&info
->mutex
);
6609 /* Wait until the parent has finished initializing the tls state. */
6610 pthread_mutex_lock(&clone_lock
);
6611 pthread_mutex_unlock(&clone_lock
);
6617 /* do_fork() Must return host values and target errnos (unlike most
6618 do_*() functions). */
6619 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6620 abi_ulong parent_tidptr
, target_ulong newtls
,
6621 abi_ulong child_tidptr
)
6623 CPUState
*cpu
= env_cpu(env
);
6627 CPUArchState
*new_env
;
6630 flags
&= ~CLONE_IGNORED_FLAGS
;
6632 /* Emulate vfork() with fork() */
6633 if (flags
& CLONE_VFORK
)
6634 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6636 if (flags
& CLONE_VM
) {
6637 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6638 new_thread_info info
;
6639 pthread_attr_t attr
;
6641 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6642 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6643 return -TARGET_EINVAL
;
6646 ts
= g_new0(TaskState
, 1);
6647 init_task_state(ts
);
6649 /* Grab a mutex so that thread setup appears atomic. */
6650 pthread_mutex_lock(&clone_lock
);
6653 * If this is our first additional thread, we need to ensure we
6654 * generate code for parallel execution and flush old translations.
6655 * Do this now so that the copy gets CF_PARALLEL too.
6657 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
6658 cpu
->tcg_cflags
|= CF_PARALLEL
;
6662 /* we create a new CPU instance. */
6663 new_env
= cpu_copy(env
);
6664 /* Init regs that differ from the parent. */
6665 cpu_clone_regs_child(new_env
, newsp
, flags
);
6666 cpu_clone_regs_parent(env
, flags
);
6667 new_cpu
= env_cpu(new_env
);
6668 new_cpu
->opaque
= ts
;
6669 ts
->bprm
= parent_ts
->bprm
;
6670 ts
->info
= parent_ts
->info
;
6671 ts
->signal_mask
= parent_ts
->signal_mask
;
6673 if (flags
& CLONE_CHILD_CLEARTID
) {
6674 ts
->child_tidptr
= child_tidptr
;
6677 if (flags
& CLONE_SETTLS
) {
6678 cpu_set_tls (new_env
, newtls
);
6681 memset(&info
, 0, sizeof(info
));
6682 pthread_mutex_init(&info
.mutex
, NULL
);
6683 pthread_mutex_lock(&info
.mutex
);
6684 pthread_cond_init(&info
.cond
, NULL
);
6686 if (flags
& CLONE_CHILD_SETTID
) {
6687 info
.child_tidptr
= child_tidptr
;
6689 if (flags
& CLONE_PARENT_SETTID
) {
6690 info
.parent_tidptr
= parent_tidptr
;
6693 ret
= pthread_attr_init(&attr
);
6694 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6695 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6696 /* It is not safe to deliver signals until the child has finished
6697 initializing, so temporarily block all signals. */
6698 sigfillset(&sigmask
);
6699 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6700 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6702 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6703 /* TODO: Free new CPU state if thread creation failed. */
6705 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6706 pthread_attr_destroy(&attr
);
6708 /* Wait for the child to initialize. */
6709 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6714 pthread_mutex_unlock(&info
.mutex
);
6715 pthread_cond_destroy(&info
.cond
);
6716 pthread_mutex_destroy(&info
.mutex
);
6717 pthread_mutex_unlock(&clone_lock
);
6719 /* if no CLONE_VM, we consider it is a fork */
6720 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6721 return -TARGET_EINVAL
;
6724 /* We can't support custom termination signals */
6725 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6726 return -TARGET_EINVAL
;
6729 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6730 if (flags
& CLONE_PIDFD
) {
6731 return -TARGET_EINVAL
;
6735 /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6736 if ((flags
& CLONE_PIDFD
) && (flags
& CLONE_PARENT_SETTID
)) {
6737 return -TARGET_EINVAL
;
6740 if (block_signals()) {
6741 return -QEMU_ERESTARTSYS
;
6747 /* Child Process. */
6748 cpu_clone_regs_child(env
, newsp
, flags
);
6750 /* There is a race condition here. The parent process could
6751 theoretically read the TID in the child process before the child
6752 tid is set. This would require using either ptrace
6753 (not implemented) or having *_tidptr to point at a shared memory
6754 mapping. We can't repeat the spinlock hack used above because
6755 the child process gets its own copy of the lock. */
6756 if (flags
& CLONE_CHILD_SETTID
)
6757 put_user_u32(sys_gettid(), child_tidptr
);
6758 if (flags
& CLONE_PARENT_SETTID
)
6759 put_user_u32(sys_gettid(), parent_tidptr
);
6760 ts
= (TaskState
*)cpu
->opaque
;
6761 if (flags
& CLONE_SETTLS
)
6762 cpu_set_tls (env
, newtls
);
6763 if (flags
& CLONE_CHILD_CLEARTID
)
6764 ts
->child_tidptr
= child_tidptr
;
6766 cpu_clone_regs_parent(env
, flags
);
6767 if (flags
& CLONE_PIDFD
) {
6769 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6770 int pid_child
= ret
;
6771 pid_fd
= pidfd_open(pid_child
, 0);
6773 fcntl(pid_fd
, F_SETFD
, fcntl(pid_fd
, F_GETFL
)
6779 put_user_u32(pid_fd
, parent_tidptr
);
6783 g_assert(!cpu_in_exclusive_context(cpu
));
6788 /* warning : doesn't handle linux specific flags... */
6789 static int target_to_host_fcntl_cmd(int cmd
)
6794 case TARGET_F_DUPFD
:
6795 case TARGET_F_GETFD
:
6796 case TARGET_F_SETFD
:
6797 case TARGET_F_GETFL
:
6798 case TARGET_F_SETFL
:
6799 case TARGET_F_OFD_GETLK
:
6800 case TARGET_F_OFD_SETLK
:
6801 case TARGET_F_OFD_SETLKW
:
6804 case TARGET_F_GETLK
:
6807 case TARGET_F_SETLK
:
6810 case TARGET_F_SETLKW
:
6813 case TARGET_F_GETOWN
:
6816 case TARGET_F_SETOWN
:
6819 case TARGET_F_GETSIG
:
6822 case TARGET_F_SETSIG
:
6825 #if TARGET_ABI_BITS == 32
6826 case TARGET_F_GETLK64
:
6829 case TARGET_F_SETLK64
:
6832 case TARGET_F_SETLKW64
:
6836 case TARGET_F_SETLEASE
:
6839 case TARGET_F_GETLEASE
:
6842 #ifdef F_DUPFD_CLOEXEC
6843 case TARGET_F_DUPFD_CLOEXEC
:
6844 ret
= F_DUPFD_CLOEXEC
;
6847 case TARGET_F_NOTIFY
:
6851 case TARGET_F_GETOWN_EX
:
6856 case TARGET_F_SETOWN_EX
:
6861 case TARGET_F_SETPIPE_SZ
:
6864 case TARGET_F_GETPIPE_SZ
:
6869 case TARGET_F_ADD_SEALS
:
6872 case TARGET_F_GET_SEALS
:
6877 ret
= -TARGET_EINVAL
;
6881 #if defined(__powerpc64__)
6882 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6883 * is not supported by kernel. The glibc fcntl call actually adjusts
6884 * them to 5, 6 and 7 before making the syscall(). Since we make the
6885 * syscall directly, adjust to what is supported by the kernel.
6887 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6888 ret
-= F_GETLK64
- 5;
6895 #define FLOCK_TRANSTBL \
6897 TRANSTBL_CONVERT(F_RDLCK); \
6898 TRANSTBL_CONVERT(F_WRLCK); \
6899 TRANSTBL_CONVERT(F_UNLCK); \
6902 static int target_to_host_flock(int type
)
6904 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6906 #undef TRANSTBL_CONVERT
6907 return -TARGET_EINVAL
;
6910 static int host_to_target_flock(int type
)
6912 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6914 #undef TRANSTBL_CONVERT
6915 /* if we don't know how to convert the value coming
6916 * from the host we copy to the target field as-is
6921 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6922 abi_ulong target_flock_addr
)
6924 struct target_flock
*target_fl
;
6927 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6928 return -TARGET_EFAULT
;
6931 __get_user(l_type
, &target_fl
->l_type
);
6932 l_type
= target_to_host_flock(l_type
);
6936 fl
->l_type
= l_type
;
6937 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6938 __get_user(fl
->l_start
, &target_fl
->l_start
);
6939 __get_user(fl
->l_len
, &target_fl
->l_len
);
6940 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6941 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6945 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6946 const struct flock64
*fl
)
6948 struct target_flock
*target_fl
;
6951 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6952 return -TARGET_EFAULT
;
6955 l_type
= host_to_target_flock(fl
->l_type
);
6956 __put_user(l_type
, &target_fl
->l_type
);
6957 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6958 __put_user(fl
->l_start
, &target_fl
->l_start
);
6959 __put_user(fl
->l_len
, &target_fl
->l_len
);
6960 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6961 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6965 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6966 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6968 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6969 struct target_oabi_flock64
{
6977 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6978 abi_ulong target_flock_addr
)
6980 struct target_oabi_flock64
*target_fl
;
6983 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6984 return -TARGET_EFAULT
;
6987 __get_user(l_type
, &target_fl
->l_type
);
6988 l_type
= target_to_host_flock(l_type
);
6992 fl
->l_type
= l_type
;
6993 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6994 __get_user(fl
->l_start
, &target_fl
->l_start
);
6995 __get_user(fl
->l_len
, &target_fl
->l_len
);
6996 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6997 unlock_user_struct(target_fl
, target_flock_addr
, 0);
7001 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
7002 const struct flock64
*fl
)
7004 struct target_oabi_flock64
*target_fl
;
7007 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
7008 return -TARGET_EFAULT
;
7011 l_type
= host_to_target_flock(fl
->l_type
);
7012 __put_user(l_type
, &target_fl
->l_type
);
7013 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
7014 __put_user(fl
->l_start
, &target_fl
->l_start
);
7015 __put_user(fl
->l_len
, &target_fl
->l_len
);
7016 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
7017 unlock_user_struct(target_fl
, target_flock_addr
, 1);
7022 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
7023 abi_ulong target_flock_addr
)
7025 struct target_flock64
*target_fl
;
7028 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
7029 return -TARGET_EFAULT
;
7032 __get_user(l_type
, &target_fl
->l_type
);
7033 l_type
= target_to_host_flock(l_type
);
7037 fl
->l_type
= l_type
;
7038 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
7039 __get_user(fl
->l_start
, &target_fl
->l_start
);
7040 __get_user(fl
->l_len
, &target_fl
->l_len
);
7041 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
7042 unlock_user_struct(target_fl
, target_flock_addr
, 0);
7046 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
7047 const struct flock64
*fl
)
7049 struct target_flock64
*target_fl
;
7052 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
7053 return -TARGET_EFAULT
;
7056 l_type
= host_to_target_flock(fl
->l_type
);
7057 __put_user(l_type
, &target_fl
->l_type
);
7058 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
7059 __put_user(fl
->l_start
, &target_fl
->l_start
);
7060 __put_user(fl
->l_len
, &target_fl
->l_len
);
7061 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
7062 unlock_user_struct(target_fl
, target_flock_addr
, 1);
7066 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
7068 struct flock64 fl64
;
7070 struct f_owner_ex fox
;
7071 struct target_f_owner_ex
*target_fox
;
7074 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
7076 if (host_cmd
== -TARGET_EINVAL
)
7080 case TARGET_F_GETLK
:
7081 ret
= copy_from_user_flock(&fl64
, arg
);
7085 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7087 ret
= copy_to_user_flock(arg
, &fl64
);
7091 case TARGET_F_SETLK
:
7092 case TARGET_F_SETLKW
:
7093 ret
= copy_from_user_flock(&fl64
, arg
);
7097 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7100 case TARGET_F_GETLK64
:
7101 case TARGET_F_OFD_GETLK
:
7102 ret
= copy_from_user_flock64(&fl64
, arg
);
7106 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7108 ret
= copy_to_user_flock64(arg
, &fl64
);
7111 case TARGET_F_SETLK64
:
7112 case TARGET_F_SETLKW64
:
7113 case TARGET_F_OFD_SETLK
:
7114 case TARGET_F_OFD_SETLKW
:
7115 ret
= copy_from_user_flock64(&fl64
, arg
);
7119 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7122 case TARGET_F_GETFL
:
7123 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7125 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
7126 /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7127 if (O_LARGEFILE
== 0 && HOST_LONG_BITS
== 64) {
7128 ret
|= TARGET_O_LARGEFILE
;
7133 case TARGET_F_SETFL
:
7134 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
7135 target_to_host_bitmask(arg
,
7140 case TARGET_F_GETOWN_EX
:
7141 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7143 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
7144 return -TARGET_EFAULT
;
7145 target_fox
->type
= tswap32(fox
.type
);
7146 target_fox
->pid
= tswap32(fox
.pid
);
7147 unlock_user_struct(target_fox
, arg
, 1);
7153 case TARGET_F_SETOWN_EX
:
7154 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
7155 return -TARGET_EFAULT
;
7156 fox
.type
= tswap32(target_fox
->type
);
7157 fox
.pid
= tswap32(target_fox
->pid
);
7158 unlock_user_struct(target_fox
, arg
, 0);
7159 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7163 case TARGET_F_SETSIG
:
7164 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
7167 case TARGET_F_GETSIG
:
7168 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
7171 case TARGET_F_SETOWN
:
7172 case TARGET_F_GETOWN
:
7173 case TARGET_F_SETLEASE
:
7174 case TARGET_F_GETLEASE
:
7175 case TARGET_F_SETPIPE_SZ
:
7176 case TARGET_F_GETPIPE_SZ
:
7177 case TARGET_F_ADD_SEALS
:
7178 case TARGET_F_GET_SEALS
:
7179 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7183 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
7191 static inline int high2lowuid(int uid
)
7199 static inline int high2lowgid(int gid
)
7207 static inline int low2highuid(int uid
)
7209 if ((int16_t)uid
== -1)
7215 static inline int low2highgid(int gid
)
7217 if ((int16_t)gid
== -1)
7222 static inline int tswapid(int id
)
7227 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7229 #else /* !USE_UID16 */
7230 static inline int high2lowuid(int uid
)
7234 static inline int high2lowgid(int gid
)
7238 static inline int low2highuid(int uid
)
7242 static inline int low2highgid(int gid
)
7246 static inline int tswapid(int id
)
7251 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7253 #endif /* USE_UID16 */
7255 /* We must do direct syscalls for setting UID/GID, because we want to
7256 * implement the Linux system call semantics of "change only for this thread",
7257 * not the libc/POSIX semantics of "change for all threads in process".
7258 * (See http://ewontfix.com/17/ for more details.)
7259 * We use the 32-bit version of the syscalls if present; if it is not
7260 * then either the host architecture supports 32-bit UIDs natively with
7261 * the standard syscall, or the 16-bit UID is the best we can do.
7263 #ifdef __NR_setuid32
7264 #define __NR_sys_setuid __NR_setuid32
7266 #define __NR_sys_setuid __NR_setuid
7268 #ifdef __NR_setgid32
7269 #define __NR_sys_setgid __NR_setgid32
7271 #define __NR_sys_setgid __NR_setgid
7273 #ifdef __NR_setresuid32
7274 #define __NR_sys_setresuid __NR_setresuid32
7276 #define __NR_sys_setresuid __NR_setresuid
7278 #ifdef __NR_setresgid32
7279 #define __NR_sys_setresgid __NR_setresgid32
7281 #define __NR_sys_setresgid __NR_setresgid
7284 _syscall1(int, sys_setuid
, uid_t
, uid
)
7285 _syscall1(int, sys_setgid
, gid_t
, gid
)
7286 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7287 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7289 void syscall_init(void)
7292 const argtype
*arg_type
;
7295 thunk_init(STRUCT_MAX
);
7297 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7298 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7299 #include "syscall_types.h"
7301 #undef STRUCT_SPECIAL
7303 /* we patch the ioctl size if necessary. We rely on the fact that
7304 no ioctl has all the bits at '1' in the size field */
7306 while (ie
->target_cmd
!= 0) {
7307 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7308 TARGET_IOC_SIZEMASK
) {
7309 arg_type
= ie
->arg_type
;
7310 if (arg_type
[0] != TYPE_PTR
) {
7311 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7316 size
= thunk_type_size(arg_type
, 0);
7317 ie
->target_cmd
= (ie
->target_cmd
&
7318 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7319 (size
<< TARGET_IOC_SIZESHIFT
);
7322 /* automatic consistency check if same arch */
7323 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7324 (defined(__x86_64__) && defined(TARGET_X86_64))
7325 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7326 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7327 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7334 #ifdef TARGET_NR_truncate64
7335 static inline abi_long
target_truncate64(CPUArchState
*cpu_env
, const char *arg1
,
7340 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7344 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7348 #ifdef TARGET_NR_ftruncate64
7349 static inline abi_long
target_ftruncate64(CPUArchState
*cpu_env
, abi_long arg1
,
7354 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7358 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7362 #if defined(TARGET_NR_timer_settime) || \
7363 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7364 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7365 abi_ulong target_addr
)
7367 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7368 offsetof(struct target_itimerspec
,
7370 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7371 offsetof(struct target_itimerspec
,
7373 return -TARGET_EFAULT
;
7380 #if defined(TARGET_NR_timer_settime64) || \
7381 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7382 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7383 abi_ulong target_addr
)
7385 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7386 offsetof(struct target__kernel_itimerspec
,
7388 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7389 offsetof(struct target__kernel_itimerspec
,
7391 return -TARGET_EFAULT
;
7398 #if ((defined(TARGET_NR_timerfd_gettime) || \
7399 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7400 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7401 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7402 struct itimerspec
*host_its
)
7404 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7406 &host_its
->it_interval
) ||
7407 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7409 &host_its
->it_value
)) {
7410 return -TARGET_EFAULT
;
7416 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7417 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7418 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7419 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7420 struct itimerspec
*host_its
)
7422 if (host_to_target_timespec64(target_addr
+
7423 offsetof(struct target__kernel_itimerspec
,
7425 &host_its
->it_interval
) ||
7426 host_to_target_timespec64(target_addr
+
7427 offsetof(struct target__kernel_itimerspec
,
7429 &host_its
->it_value
)) {
7430 return -TARGET_EFAULT
;
7436 #if defined(TARGET_NR_adjtimex) || \
7437 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7438 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7439 abi_long target_addr
)
7441 struct target_timex
*target_tx
;
7443 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7444 return -TARGET_EFAULT
;
7447 __get_user(host_tx
->modes
, &target_tx
->modes
);
7448 __get_user(host_tx
->offset
, &target_tx
->offset
);
7449 __get_user(host_tx
->freq
, &target_tx
->freq
);
7450 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7451 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7452 __get_user(host_tx
->status
, &target_tx
->status
);
7453 __get_user(host_tx
->constant
, &target_tx
->constant
);
7454 __get_user(host_tx
->precision
, &target_tx
->precision
);
7455 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7456 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7457 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7458 __get_user(host_tx
->tick
, &target_tx
->tick
);
7459 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7460 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7461 __get_user(host_tx
->shift
, &target_tx
->shift
);
7462 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7463 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7464 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7465 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7466 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7467 __get_user(host_tx
->tai
, &target_tx
->tai
);
7469 unlock_user_struct(target_tx
, target_addr
, 0);
7473 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7474 struct timex
*host_tx
)
7476 struct target_timex
*target_tx
;
7478 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7479 return -TARGET_EFAULT
;
7482 __put_user(host_tx
->modes
, &target_tx
->modes
);
7483 __put_user(host_tx
->offset
, &target_tx
->offset
);
7484 __put_user(host_tx
->freq
, &target_tx
->freq
);
7485 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7486 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7487 __put_user(host_tx
->status
, &target_tx
->status
);
7488 __put_user(host_tx
->constant
, &target_tx
->constant
);
7489 __put_user(host_tx
->precision
, &target_tx
->precision
);
7490 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7491 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7492 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7493 __put_user(host_tx
->tick
, &target_tx
->tick
);
7494 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7495 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7496 __put_user(host_tx
->shift
, &target_tx
->shift
);
7497 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7498 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7499 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7500 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7501 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7502 __put_user(host_tx
->tai
, &target_tx
->tai
);
7504 unlock_user_struct(target_tx
, target_addr
, 1);
7510 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7511 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7512 abi_long target_addr
)
7514 struct target__kernel_timex
*target_tx
;
7516 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7517 offsetof(struct target__kernel_timex
,
7519 return -TARGET_EFAULT
;
7522 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7523 return -TARGET_EFAULT
;
7526 __get_user(host_tx
->modes
, &target_tx
->modes
);
7527 __get_user(host_tx
->offset
, &target_tx
->offset
);
7528 __get_user(host_tx
->freq
, &target_tx
->freq
);
7529 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7530 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7531 __get_user(host_tx
->status
, &target_tx
->status
);
7532 __get_user(host_tx
->constant
, &target_tx
->constant
);
7533 __get_user(host_tx
->precision
, &target_tx
->precision
);
7534 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7535 __get_user(host_tx
->tick
, &target_tx
->tick
);
7536 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7537 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7538 __get_user(host_tx
->shift
, &target_tx
->shift
);
7539 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7540 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7541 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7542 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7543 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7544 __get_user(host_tx
->tai
, &target_tx
->tai
);
7546 unlock_user_struct(target_tx
, target_addr
, 0);
7550 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7551 struct timex
*host_tx
)
7553 struct target__kernel_timex
*target_tx
;
7555 if (copy_to_user_timeval64(target_addr
+
7556 offsetof(struct target__kernel_timex
, time
),
7558 return -TARGET_EFAULT
;
7561 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7562 return -TARGET_EFAULT
;
7565 __put_user(host_tx
->modes
, &target_tx
->modes
);
7566 __put_user(host_tx
->offset
, &target_tx
->offset
);
7567 __put_user(host_tx
->freq
, &target_tx
->freq
);
7568 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7569 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7570 __put_user(host_tx
->status
, &target_tx
->status
);
7571 __put_user(host_tx
->constant
, &target_tx
->constant
);
7572 __put_user(host_tx
->precision
, &target_tx
->precision
);
7573 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7574 __put_user(host_tx
->tick
, &target_tx
->tick
);
7575 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7576 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7577 __put_user(host_tx
->shift
, &target_tx
->shift
);
7578 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7579 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7580 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7581 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7582 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7583 __put_user(host_tx
->tai
, &target_tx
->tai
);
7585 unlock_user_struct(target_tx
, target_addr
, 1);
7590 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7591 #define sigev_notify_thread_id _sigev_un._tid
7594 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7595 abi_ulong target_addr
)
7597 struct target_sigevent
*target_sevp
;
7599 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7600 return -TARGET_EFAULT
;
7603 /* This union is awkward on 64 bit systems because it has a 32 bit
7604 * integer and a pointer in it; we follow the conversion approach
7605 * used for handling sigval types in signal.c so the guest should get
7606 * the correct value back even if we did a 64 bit byteswap and it's
7607 * using the 32 bit integer.
7609 host_sevp
->sigev_value
.sival_ptr
=
7610 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7611 host_sevp
->sigev_signo
=
7612 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7613 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7614 host_sevp
->sigev_notify_thread_id
= tswap32(target_sevp
->_sigev_un
._tid
);
7616 unlock_user_struct(target_sevp
, target_addr
, 1);
7620 #if defined(TARGET_NR_mlockall)
7621 static inline int target_to_host_mlockall_arg(int arg
)
7625 if (arg
& TARGET_MCL_CURRENT
) {
7626 result
|= MCL_CURRENT
;
7628 if (arg
& TARGET_MCL_FUTURE
) {
7629 result
|= MCL_FUTURE
;
7632 if (arg
& TARGET_MCL_ONFAULT
) {
7633 result
|= MCL_ONFAULT
;
7641 static inline int target_to_host_msync_arg(abi_long arg
)
7643 return ((arg
& TARGET_MS_ASYNC
) ? MS_ASYNC
: 0) |
7644 ((arg
& TARGET_MS_INVALIDATE
) ? MS_INVALIDATE
: 0) |
7645 ((arg
& TARGET_MS_SYNC
) ? MS_SYNC
: 0) |
7646 (arg
& ~(TARGET_MS_ASYNC
| TARGET_MS_INVALIDATE
| TARGET_MS_SYNC
));
7649 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7650 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7651 defined(TARGET_NR_newfstatat))
7652 static inline abi_long
host_to_target_stat64(CPUArchState
*cpu_env
,
7653 abi_ulong target_addr
,
7654 struct stat
*host_st
)
7656 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7657 if (cpu_env
->eabi
) {
7658 struct target_eabi_stat64
*target_st
;
7660 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7661 return -TARGET_EFAULT
;
7662 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7663 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7664 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7665 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7666 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7668 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7669 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7670 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7671 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7672 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7673 __put_user(host_st
->st_size
, &target_st
->st_size
);
7674 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7675 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7676 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7677 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7678 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7679 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7680 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7681 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7682 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7684 unlock_user_struct(target_st
, target_addr
, 1);
7688 #if defined(TARGET_HAS_STRUCT_STAT64)
7689 struct target_stat64
*target_st
;
7691 struct target_stat
*target_st
;
7694 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7695 return -TARGET_EFAULT
;
7696 memset(target_st
, 0, sizeof(*target_st
));
7697 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7698 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7699 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7700 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7702 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7703 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7704 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7705 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7706 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7707 /* XXX: better use of kernel struct */
7708 __put_user(host_st
->st_size
, &target_st
->st_size
);
7709 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7710 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7711 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7712 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7713 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7714 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7715 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7716 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7717 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7719 unlock_user_struct(target_st
, target_addr
, 1);
7726 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7727 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7728 abi_ulong target_addr
)
7730 struct target_statx
*target_stx
;
7732 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7733 return -TARGET_EFAULT
;
7735 memset(target_stx
, 0, sizeof(*target_stx
));
7737 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7738 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7739 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7740 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7741 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7742 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7743 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7744 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7745 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7746 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7747 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7748 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7749 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7750 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7751 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7752 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7753 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7754 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7755 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7756 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7757 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7758 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7759 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7761 unlock_user_struct(target_stx
, target_addr
, 1);
7767 static int do_sys_futex(int *uaddr
, int op
, int val
,
7768 const struct timespec
*timeout
, int *uaddr2
,
7771 #if HOST_LONG_BITS == 64
7772 #if defined(__NR_futex)
7773 /* always a 64-bit time_t, it doesn't define _time64 version */
7774 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7777 #else /* HOST_LONG_BITS == 64 */
7778 #if defined(__NR_futex_time64)
7779 if (sizeof(timeout
->tv_sec
) == 8) {
7780 /* _time64 function on 32bit arch */
7781 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7784 #if defined(__NR_futex)
7785 /* old function on 32bit arch */
7786 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7788 #endif /* HOST_LONG_BITS == 64 */
7789 g_assert_not_reached();
7792 static int do_safe_futex(int *uaddr
, int op
, int val
,
7793 const struct timespec
*timeout
, int *uaddr2
,
7796 #if HOST_LONG_BITS == 64
7797 #if defined(__NR_futex)
7798 /* always a 64-bit time_t, it doesn't define _time64 version */
7799 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7801 #else /* HOST_LONG_BITS == 64 */
7802 #if defined(__NR_futex_time64)
7803 if (sizeof(timeout
->tv_sec
) == 8) {
7804 /* _time64 function on 32bit arch */
7805 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7809 #if defined(__NR_futex)
7810 /* old function on 32bit arch */
7811 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7813 #endif /* HOST_LONG_BITS == 64 */
7814 return -TARGET_ENOSYS
;
7817 /* ??? Using host futex calls even when target atomic operations
7818 are not really atomic probably breaks things. However implementing
7819 futexes locally would make futexes shared between multiple processes
7820 tricky. However they're probably useless because guest atomic
7821 operations won't work either. */
7822 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7823 static int do_futex(CPUState
*cpu
, bool time64
, target_ulong uaddr
,
7824 int op
, int val
, target_ulong timeout
,
7825 target_ulong uaddr2
, int val3
)
7827 struct timespec ts
, *pts
= NULL
;
7828 void *haddr2
= NULL
;
7831 /* We assume FUTEX_* constants are the same on both host and target. */
7832 #ifdef FUTEX_CMD_MASK
7833 base_op
= op
& FUTEX_CMD_MASK
;
7839 case FUTEX_WAIT_BITSET
:
7842 case FUTEX_WAIT_REQUEUE_PI
:
7844 haddr2
= g2h(cpu
, uaddr2
);
7847 case FUTEX_LOCK_PI2
:
7850 case FUTEX_WAKE_BITSET
:
7851 case FUTEX_TRYLOCK_PI
:
7852 case FUTEX_UNLOCK_PI
:
7856 val
= target_to_host_signal(val
);
7859 case FUTEX_CMP_REQUEUE
:
7860 case FUTEX_CMP_REQUEUE_PI
:
7861 val3
= tswap32(val3
);
7866 * For these, the 4th argument is not TIMEOUT, but VAL2.
7867 * But the prototype of do_safe_futex takes a pointer, so
7868 * insert casts to satisfy the compiler. We do not need
7869 * to tswap VAL2 since it's not compared to guest memory.
7871 pts
= (struct timespec
*)(uintptr_t)timeout
;
7873 haddr2
= g2h(cpu
, uaddr2
);
7876 return -TARGET_ENOSYS
;
7881 ? target_to_host_timespec64(pts
, timeout
)
7882 : target_to_host_timespec(pts
, timeout
)) {
7883 return -TARGET_EFAULT
;
7886 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, haddr2
, val3
);
7890 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7891 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7892 abi_long handle
, abi_long mount_id
,
7895 struct file_handle
*target_fh
;
7896 struct file_handle
*fh
;
7900 unsigned int size
, total_size
;
7902 if (get_user_s32(size
, handle
)) {
7903 return -TARGET_EFAULT
;
7906 name
= lock_user_string(pathname
);
7908 return -TARGET_EFAULT
;
7911 total_size
= sizeof(struct file_handle
) + size
;
7912 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7914 unlock_user(name
, pathname
, 0);
7915 return -TARGET_EFAULT
;
7918 fh
= g_malloc0(total_size
);
7919 fh
->handle_bytes
= size
;
7921 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7922 unlock_user(name
, pathname
, 0);
7924 /* man name_to_handle_at(2):
7925 * Other than the use of the handle_bytes field, the caller should treat
7926 * the file_handle structure as an opaque data type
7929 memcpy(target_fh
, fh
, total_size
);
7930 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7931 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7933 unlock_user(target_fh
, handle
, total_size
);
7935 if (put_user_s32(mid
, mount_id
)) {
7936 return -TARGET_EFAULT
;
7944 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7945 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7948 struct file_handle
*target_fh
;
7949 struct file_handle
*fh
;
7950 unsigned int size
, total_size
;
7953 if (get_user_s32(size
, handle
)) {
7954 return -TARGET_EFAULT
;
7957 total_size
= sizeof(struct file_handle
) + size
;
7958 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7960 return -TARGET_EFAULT
;
7963 fh
= g_memdup(target_fh
, total_size
);
7964 fh
->handle_bytes
= size
;
7965 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7967 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7968 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7972 unlock_user(target_fh
, handle
, total_size
);
7978 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7980 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7983 target_sigset_t
*target_mask
;
7987 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
7988 return -TARGET_EINVAL
;
7990 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7991 return -TARGET_EFAULT
;
7994 target_to_host_sigset(&host_mask
, target_mask
);
7996 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7998 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
8000 fd_trans_register(ret
, &target_signalfd_trans
);
8003 unlock_user_struct(target_mask
, mask
, 0);
8009 /* Map host to target signal numbers for the wait family of syscalls.
8010 Assume all other status bits are the same. */
8011 int host_to_target_waitstatus(int status
)
8013 if (WIFSIGNALED(status
)) {
8014 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
8016 if (WIFSTOPPED(status
)) {
8017 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
8023 static int open_self_cmdline(CPUArchState
*cpu_env
, int fd
)
8025 CPUState
*cpu
= env_cpu(cpu_env
);
8026 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
8029 for (i
= 0; i
< bprm
->argc
; i
++) {
8030 size_t len
= strlen(bprm
->argv
[i
]) + 1;
8032 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
8040 static void show_smaps(int fd
, unsigned long size
)
8042 unsigned long page_size_kb
= TARGET_PAGE_SIZE
>> 10;
8043 unsigned long size_kb
= size
>> 10;
8045 dprintf(fd
, "Size: %lu kB\n"
8046 "KernelPageSize: %lu kB\n"
8047 "MMUPageSize: %lu kB\n"
8051 "Shared_Clean: 0 kB\n"
8052 "Shared_Dirty: 0 kB\n"
8053 "Private_Clean: 0 kB\n"
8054 "Private_Dirty: 0 kB\n"
8055 "Referenced: 0 kB\n"
8058 "AnonHugePages: 0 kB\n"
8059 "ShmemPmdMapped: 0 kB\n"
8060 "FilePmdMapped: 0 kB\n"
8061 "Shared_Hugetlb: 0 kB\n"
8062 "Private_Hugetlb: 0 kB\n"
8066 "THPeligible: 0\n", size_kb
, page_size_kb
, page_size_kb
);
8069 static int open_self_maps_1(CPUArchState
*cpu_env
, int fd
, bool smaps
)
8071 CPUState
*cpu
= env_cpu(cpu_env
);
8072 TaskState
*ts
= cpu
->opaque
;
8073 IntervalTreeRoot
*map_info
= read_self_maps();
8074 IntervalTreeNode
*s
;
8077 for (s
= interval_tree_iter_first(map_info
, 0, -1); s
;
8078 s
= interval_tree_iter_next(s
, 0, -1)) {
8079 MapInfo
*e
= container_of(s
, MapInfo
, itree
);
8081 if (h2g_valid(e
->itree
.start
)) {
8082 unsigned long min
= e
->itree
.start
;
8083 unsigned long max
= e
->itree
.last
+ 1;
8084 int flags
= page_get_flags(h2g(min
));
8087 max
= h2g_valid(max
- 1) ?
8088 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
8090 if (!page_check_range(h2g(min
), max
- min
, flags
)) {
8095 if (h2g(max
) == ts
->info
->stack_limit
) {
8097 if (h2g(min
) == ts
->info
->stack_limit
) {
8104 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
8105 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
8106 h2g(min
), h2g(max
- 1) + 1,
8107 (flags
& PAGE_READ
) ? 'r' : '-',
8108 (flags
& PAGE_WRITE_ORG
) ? 'w' : '-',
8109 (flags
& PAGE_EXEC
) ? 'x' : '-',
8110 e
->is_priv
? 'p' : 's',
8111 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
8113 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
8118 show_smaps(fd
, max
- min
);
8119 dprintf(fd
, "VmFlags:%s%s%s%s%s%s%s%s\n",
8120 (flags
& PAGE_READ
) ? " rd" : "",
8121 (flags
& PAGE_WRITE_ORG
) ? " wr" : "",
8122 (flags
& PAGE_EXEC
) ? " ex" : "",
8123 e
->is_priv
? "" : " sh",
8124 (flags
& PAGE_READ
) ? " mr" : "",
8125 (flags
& PAGE_WRITE_ORG
) ? " mw" : "",
8126 (flags
& PAGE_EXEC
) ? " me" : "",
8127 e
->is_priv
? "" : " ms");
8132 free_self_maps(map_info
);
8134 #ifdef TARGET_VSYSCALL_PAGE
8136 * We only support execution from the vsyscall page.
8137 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8139 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
8140 " --xp 00000000 00:00 0",
8141 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
8142 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
8144 show_smaps(fd
, TARGET_PAGE_SIZE
);
8145 dprintf(fd
, "VmFlags: ex\n");
8152 static int open_self_maps(CPUArchState
*cpu_env
, int fd
)
8154 return open_self_maps_1(cpu_env
, fd
, false);
8157 static int open_self_smaps(CPUArchState
*cpu_env
, int fd
)
8159 return open_self_maps_1(cpu_env
, fd
, true);
8162 static int open_self_stat(CPUArchState
*cpu_env
, int fd
)
8164 CPUState
*cpu
= env_cpu(cpu_env
);
8165 TaskState
*ts
= cpu
->opaque
;
8166 g_autoptr(GString
) buf
= g_string_new(NULL
);
8169 for (i
= 0; i
< 44; i
++) {
8172 g_string_printf(buf
, FMT_pid
" ", getpid());
8173 } else if (i
== 1) {
8175 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
8176 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
8177 g_string_printf(buf
, "(%.15s) ", bin
);
8178 } else if (i
== 2) {
8180 g_string_assign(buf
, "R "); /* we are running right now */
8181 } else if (i
== 3) {
8183 g_string_printf(buf
, FMT_pid
" ", getppid());
8184 } else if (i
== 21) {
8186 g_string_printf(buf
, "%" PRIu64
" ", ts
->start_boottime
);
8187 } else if (i
== 27) {
8189 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
8191 /* for the rest, there is MasterCard */
8192 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
8195 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
8203 static int open_self_auxv(CPUArchState
*cpu_env
, int fd
)
8205 CPUState
*cpu
= env_cpu(cpu_env
);
8206 TaskState
*ts
= cpu
->opaque
;
8207 abi_ulong auxv
= ts
->info
->saved_auxv
;
8208 abi_ulong len
= ts
->info
->auxv_len
;
8212 * Auxiliary vector is stored in target process stack.
8213 * read in whole auxv vector and copy it to file
8215 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
8219 r
= write(fd
, ptr
, len
);
8226 lseek(fd
, 0, SEEK_SET
);
8227 unlock_user(ptr
, auxv
, len
);
8233 static int is_proc_myself(const char *filename
, const char *entry
)
8235 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
8236 filename
+= strlen("/proc/");
8237 if (!strncmp(filename
, "self/", strlen("self/"))) {
8238 filename
+= strlen("self/");
8239 } else if (*filename
>= '1' && *filename
<= '9') {
8241 snprintf(myself
, sizeof(myself
), "%d/", getpid());
8242 if (!strncmp(filename
, myself
, strlen(myself
))) {
8243 filename
+= strlen(myself
);
8250 if (!strcmp(filename
, entry
)) {
8257 static void excp_dump_file(FILE *logfile
, CPUArchState
*env
,
8258 const char *fmt
, int code
)
8261 CPUState
*cs
= env_cpu(env
);
8263 fprintf(logfile
, fmt
, code
);
8264 fprintf(logfile
, "Failing executable: %s\n", exec_path
);
8265 cpu_dump_state(cs
, logfile
, 0);
8266 open_self_maps(env
, fileno(logfile
));
8270 void target_exception_dump(CPUArchState
*env
, const char *fmt
, int code
)
8272 /* dump to console */
8273 excp_dump_file(stderr
, env
, fmt
, code
);
8275 /* dump to log file */
8276 if (qemu_log_separate()) {
8277 FILE *logfile
= qemu_log_trylock();
8279 excp_dump_file(logfile
, env
, fmt
, code
);
8280 qemu_log_unlock(logfile
);
8284 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8285 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) || \
8286 defined(TARGET_RISCV) || defined(TARGET_S390X)
8287 static int is_proc(const char *filename
, const char *entry
)
8289 return strcmp(filename
, entry
) == 0;
8293 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8294 static int open_net_route(CPUArchState
*cpu_env
, int fd
)
8301 fp
= fopen("/proc/net/route", "r");
8308 read
= getline(&line
, &len
, fp
);
8309 dprintf(fd
, "%s", line
);
8313 while ((read
= getline(&line
, &len
, fp
)) != -1) {
8315 uint32_t dest
, gw
, mask
;
8316 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8319 fields
= sscanf(line
,
8320 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8321 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8322 &mask
, &mtu
, &window
, &irtt
);
8326 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8327 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8328 metric
, tswap32(mask
), mtu
, window
, irtt
);
8338 #if defined(TARGET_SPARC)
8339 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8341 dprintf(fd
, "type\t\t: sun4u\n");
8346 #if defined(TARGET_HPPA)
8347 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8351 num_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
8352 for (i
= 0; i
< num_cpus
; i
++) {
8353 dprintf(fd
, "processor\t: %d\n", i
);
8354 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
8355 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
8356 dprintf(fd
, "capabilities\t: os32\n");
8357 dprintf(fd
, "model\t\t: 9000/778/B160L - "
8358 "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8364 #if defined(TARGET_RISCV)
8365 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8368 int num_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
8369 RISCVCPU
*cpu
= env_archcpu(cpu_env
);
8370 const RISCVCPUConfig
*cfg
= riscv_cpu_cfg((CPURISCVState
*) cpu_env
);
8371 char *isa_string
= riscv_isa_string(cpu
);
8375 mmu
= (cpu_env
->xl
== MXL_RV32
) ? "sv32" : "sv48";
8380 for (i
= 0; i
< num_cpus
; i
++) {
8381 dprintf(fd
, "processor\t: %d\n", i
);
8382 dprintf(fd
, "hart\t\t: %d\n", i
);
8383 dprintf(fd
, "isa\t\t: %s\n", isa_string
);
8384 dprintf(fd
, "mmu\t\t: %s\n", mmu
);
8385 dprintf(fd
, "uarch\t\t: qemu\n\n");
8393 #if defined(TARGET_S390X)
8395 * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would
8396 * show in /proc/cpuinfo.
8398 * Skip the following in order to match the missing support in op_ecag():
8399 * - show_cacheinfo().
8400 * - show_cpu_topology().
8403 * Use fixed values for certain fields:
8404 * - bogomips per cpu - from a qemu-system-s390x run.
8405 * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported.
8407 * Keep the code structure close to arch/s390/kernel/processor.c.
8410 static void show_facilities(int fd
)
8412 size_t sizeof_stfl_bytes
= 2048;
8413 g_autofree
uint8_t *stfl_bytes
= g_new0(uint8_t, sizeof_stfl_bytes
);
8416 dprintf(fd
, "facilities :");
8417 s390_get_feat_block(S390_FEAT_TYPE_STFL
, stfl_bytes
);
8418 for (bit
= 0; bit
< sizeof_stfl_bytes
* 8; bit
++) {
8419 if (test_be_bit(bit
, stfl_bytes
)) {
8420 dprintf(fd
, " %d", bit
);
8426 static int cpu_ident(unsigned long n
)
8428 return deposit32(0, CPU_ID_BITS
- CPU_PHYS_ADDR_BITS
, CPU_PHYS_ADDR_BITS
,
8432 static void show_cpu_summary(CPUArchState
*cpu_env
, int fd
)
8434 S390CPUModel
*model
= env_archcpu(cpu_env
)->model
;
8435 int num_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
8436 uint32_t elf_hwcap
= get_elf_hwcap();
8437 const char *hwcap_str
;
8440 dprintf(fd
, "vendor_id : IBM/S390\n"
8441 "# processors : %i\n"
8442 "bogomips per cpu: 13370.00\n",
8444 dprintf(fd
, "max thread id : 0\n");
8445 dprintf(fd
, "features\t: ");
8446 for (i
= 0; i
< sizeof(elf_hwcap
) * 8; i
++) {
8447 if (!(elf_hwcap
& (1 << i
))) {
8450 hwcap_str
= elf_hwcap_str(i
);
8452 dprintf(fd
, "%s ", hwcap_str
);
8456 show_facilities(fd
);
8457 for (i
= 0; i
< num_cpus
; i
++) {
8458 dprintf(fd
, "processor %d: "
8460 "identification = %06X, "
8462 i
, model
->cpu_ver
, cpu_ident(i
), model
->def
->type
);
8466 static void show_cpu_ids(CPUArchState
*cpu_env
, int fd
, unsigned long n
)
8468 S390CPUModel
*model
= env_archcpu(cpu_env
)->model
;
8470 dprintf(fd
, "version : %02X\n", model
->cpu_ver
);
8471 dprintf(fd
, "identification : %06X\n", cpu_ident(n
));
8472 dprintf(fd
, "machine : %04X\n", model
->def
->type
);
8475 static void show_cpuinfo(CPUArchState
*cpu_env
, int fd
, unsigned long n
)
8477 dprintf(fd
, "\ncpu number : %ld\n", n
);
8478 show_cpu_ids(cpu_env
, fd
, n
);
8481 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8483 int num_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
8486 show_cpu_summary(cpu_env
, fd
);
8487 for (i
= 0; i
< num_cpus
; i
++) {
8488 show_cpuinfo(cpu_env
, fd
, i
);
8494 #if defined(TARGET_M68K)
8495 static int open_hardware(CPUArchState
*cpu_env
, int fd
)
8497 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8502 int do_guest_openat(CPUArchState
*cpu_env
, int dirfd
, const char *pathname
,
8503 int flags
, mode_t mode
, bool safe
)
8506 const char *filename
;
8507 int (*fill
)(CPUArchState
*cpu_env
, int fd
);
8508 int (*cmp
)(const char *s1
, const char *s2
);
8510 const struct fake_open
*fake_open
;
8511 static const struct fake_open fakes
[] = {
8512 { "maps", open_self_maps
, is_proc_myself
},
8513 { "smaps", open_self_smaps
, is_proc_myself
},
8514 { "stat", open_self_stat
, is_proc_myself
},
8515 { "auxv", open_self_auxv
, is_proc_myself
},
8516 { "cmdline", open_self_cmdline
, is_proc_myself
},
8517 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8518 { "/proc/net/route", open_net_route
, is_proc
},
8520 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) || \
8521 defined(TARGET_RISCV) || defined(TARGET_S390X)
8522 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8524 #if defined(TARGET_M68K)
8525 { "/proc/hardware", open_hardware
, is_proc
},
8527 { NULL
, NULL
, NULL
}
8530 if (is_proc_myself(pathname
, "exe")) {
8532 return safe_openat(dirfd
, exec_path
, flags
, mode
);
8534 return openat(dirfd
, exec_path
, flags
, mode
);
8538 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8539 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8544 if (fake_open
->filename
) {
8546 char filename
[PATH_MAX
];
8549 fd
= memfd_create("qemu-open", 0);
8551 if (errno
!= ENOSYS
) {
8554 /* create temporary file to map stat to */
8555 tmpdir
= getenv("TMPDIR");
8558 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8559 fd
= mkstemp(filename
);
8566 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8572 lseek(fd
, 0, SEEK_SET
);
8578 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8580 return openat(dirfd
, path(pathname
), flags
, mode
);
8584 ssize_t
do_guest_readlink(const char *pathname
, char *buf
, size_t bufsiz
)
8588 if (!pathname
|| !buf
) {
8594 /* Short circuit this for the magic exe check. */
8599 if (is_proc_myself((const char *)pathname
, "exe")) {
8601 * Don't worry about sign mismatch as earlier mapping
8602 * logic would have thrown a bad address error.
8604 ret
= MIN(strlen(exec_path
), bufsiz
);
8605 /* We cannot NUL terminate the string. */
8606 memcpy(buf
, exec_path
, ret
);
8608 ret
= readlink(path(pathname
), buf
, bufsiz
);
8614 static int do_execv(CPUArchState
*cpu_env
, int dirfd
,
8615 abi_long pathname
, abi_long guest_argp
,
8616 abi_long guest_envp
, int flags
, bool is_execveat
)
8619 char **argp
, **envp
;
8628 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8629 if (get_user_ual(addr
, gp
)) {
8630 return -TARGET_EFAULT
;
8638 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8639 if (get_user_ual(addr
, gp
)) {
8640 return -TARGET_EFAULT
;
8648 argp
= g_new0(char *, argc
+ 1);
8649 envp
= g_new0(char *, envc
+ 1);
8651 for (gp
= guest_argp
, q
= argp
; gp
; gp
+= sizeof(abi_ulong
), q
++) {
8652 if (get_user_ual(addr
, gp
)) {
8658 *q
= lock_user_string(addr
);
8665 for (gp
= guest_envp
, q
= envp
; gp
; gp
+= sizeof(abi_ulong
), q
++) {
8666 if (get_user_ual(addr
, gp
)) {
8672 *q
= lock_user_string(addr
);
8680 * Although execve() is not an interruptible syscall it is
8681 * a special case where we must use the safe_syscall wrapper:
8682 * if we allow a signal to happen before we make the host
8683 * syscall then we will 'lose' it, because at the point of
8684 * execve the process leaves QEMU's control. So we use the
8685 * safe syscall wrapper to ensure that we either take the
8686 * signal as a guest signal, or else it does not happen
8687 * before the execve completes and makes it the other
8688 * program's problem.
8690 p
= lock_user_string(pathname
);
8695 const char *exe
= p
;
8696 if (is_proc_myself(p
, "exe")) {
8700 ? safe_execveat(dirfd
, exe
, argp
, envp
, flags
)
8701 : safe_execve(exe
, argp
, envp
);
8702 ret
= get_errno(ret
);
8704 unlock_user(p
, pathname
, 0);
8709 ret
= -TARGET_EFAULT
;
8712 for (gp
= guest_argp
, q
= argp
; *q
; gp
+= sizeof(abi_ulong
), q
++) {
8713 if (get_user_ual(addr
, gp
) || !addr
) {
8716 unlock_user(*q
, addr
, 0);
8718 for (gp
= guest_envp
, q
= envp
; *q
; gp
+= sizeof(abi_ulong
), q
++) {
8719 if (get_user_ual(addr
, gp
) || !addr
) {
8722 unlock_user(*q
, addr
, 0);
8730 #define TIMER_MAGIC 0x0caf0000
8731 #define TIMER_MAGIC_MASK 0xffff0000
8733 /* Convert QEMU provided timer ID back to internal 16bit index format */
8734 static target_timer_t
get_timer_id(abi_long arg
)
8736 target_timer_t timerid
= arg
;
8738 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8739 return -TARGET_EINVAL
;
8744 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8745 return -TARGET_EINVAL
;
8751 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8753 abi_ulong target_addr
,
8756 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8757 unsigned host_bits
= sizeof(*host_mask
) * 8;
8758 abi_ulong
*target_mask
;
8761 assert(host_size
>= target_size
);
8763 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8765 return -TARGET_EFAULT
;
8767 memset(host_mask
, 0, host_size
);
8769 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8770 unsigned bit
= i
* target_bits
;
8773 __get_user(val
, &target_mask
[i
]);
8774 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8775 if (val
& (1UL << j
)) {
8776 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8781 unlock_user(target_mask
, target_addr
, 0);
8785 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8787 abi_ulong target_addr
,
8790 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8791 unsigned host_bits
= sizeof(*host_mask
) * 8;
8792 abi_ulong
*target_mask
;
8795 assert(host_size
>= target_size
);
8797 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8799 return -TARGET_EFAULT
;
8802 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8803 unsigned bit
= i
* target_bits
;
8806 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8807 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8811 __put_user(val
, &target_mask
[i
]);
8814 unlock_user(target_mask
, target_addr
, target_size
);
8818 #ifdef TARGET_NR_getdents
8819 static int do_getdents(abi_long dirfd
, abi_long arg2
, abi_long count
)
8821 g_autofree
void *hdirp
= NULL
;
8823 int hlen
, hoff
, toff
;
8824 int hreclen
, treclen
;
8825 off64_t prev_diroff
= 0;
8827 hdirp
= g_try_malloc(count
);
8829 return -TARGET_ENOMEM
;
8832 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8833 hlen
= sys_getdents(dirfd
, hdirp
, count
);
8835 hlen
= sys_getdents64(dirfd
, hdirp
, count
);
8838 hlen
= get_errno(hlen
);
8839 if (is_error(hlen
)) {
8843 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8845 return -TARGET_EFAULT
;
8848 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8849 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8850 struct linux_dirent
*hde
= hdirp
+ hoff
;
8852 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8854 struct target_dirent
*tde
= tdirp
+ toff
;
8858 namelen
= strlen(hde
->d_name
);
8859 hreclen
= hde
->d_reclen
;
8860 treclen
= offsetof(struct target_dirent
, d_name
) + namelen
+ 2;
8861 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent
));
8863 if (toff
+ treclen
> count
) {
8865 * If the host struct is smaller than the target struct, or
8866 * requires less alignment and thus packs into less space,
8867 * then the host can return more entries than we can pass
8871 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8875 * Return what we have, resetting the file pointer to the
8876 * location of the first record not returned.
8878 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8882 prev_diroff
= hde
->d_off
;
8883 tde
->d_ino
= tswapal(hde
->d_ino
);
8884 tde
->d_off
= tswapal(hde
->d_off
);
8885 tde
->d_reclen
= tswap16(treclen
);
8886 memcpy(tde
->d_name
, hde
->d_name
, namelen
+ 1);
8889 * The getdents type is in what was formerly a padding byte at the
8890 * end of the structure.
8892 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8893 type
= *((uint8_t *)hde
+ hreclen
- 1);
8897 *((uint8_t *)tde
+ treclen
- 1) = type
;
8900 unlock_user(tdirp
, arg2
, toff
);
8903 #endif /* TARGET_NR_getdents */
8905 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8906 static int do_getdents64(abi_long dirfd
, abi_long arg2
, abi_long count
)
8908 g_autofree
void *hdirp
= NULL
;
8910 int hlen
, hoff
, toff
;
8911 int hreclen
, treclen
;
8912 off64_t prev_diroff
= 0;
8914 hdirp
= g_try_malloc(count
);
8916 return -TARGET_ENOMEM
;
8919 hlen
= get_errno(sys_getdents64(dirfd
, hdirp
, count
));
8920 if (is_error(hlen
)) {
8924 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8926 return -TARGET_EFAULT
;
8929 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8930 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8931 struct target_dirent64
*tde
= tdirp
+ toff
;
8934 namelen
= strlen(hde
->d_name
) + 1;
8935 hreclen
= hde
->d_reclen
;
8936 treclen
= offsetof(struct target_dirent64
, d_name
) + namelen
;
8937 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent64
));
8939 if (toff
+ treclen
> count
) {
8941 * If the host struct is smaller than the target struct, or
8942 * requires less alignment and thus packs into less space,
8943 * then the host can return more entries than we can pass
8947 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8951 * Return what we have, resetting the file pointer to the
8952 * location of the first record not returned.
8954 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8958 prev_diroff
= hde
->d_off
;
8959 tde
->d_ino
= tswap64(hde
->d_ino
);
8960 tde
->d_off
= tswap64(hde
->d_off
);
8961 tde
->d_reclen
= tswap16(treclen
);
8962 tde
->d_type
= hde
->d_type
;
8963 memcpy(tde
->d_name
, hde
->d_name
, namelen
);
8966 unlock_user(tdirp
, arg2
, toff
);
8969 #endif /* TARGET_NR_getdents64 */
8971 #if defined(TARGET_NR_riscv_hwprobe)
8973 #define RISCV_HWPROBE_KEY_MVENDORID 0
8974 #define RISCV_HWPROBE_KEY_MARCHID 1
8975 #define RISCV_HWPROBE_KEY_MIMPID 2
8977 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8978 #define RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8980 #define RISCV_HWPROBE_KEY_IMA_EXT_0 4
8981 #define RISCV_HWPROBE_IMA_FD (1 << 0)
8982 #define RISCV_HWPROBE_IMA_C (1 << 1)
8984 #define RISCV_HWPROBE_KEY_CPUPERF_0 5
8985 #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
8986 #define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
8987 #define RISCV_HWPROBE_MISALIGNED_SLOW (2 << 0)
8988 #define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0)
8989 #define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8990 #define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0)
8992 struct riscv_hwprobe
{
8997 static void risc_hwprobe_fill_pairs(CPURISCVState
*env
,
8998 struct riscv_hwprobe
*pair
,
9001 const RISCVCPUConfig
*cfg
= riscv_cpu_cfg(env
);
9003 for (; pair_count
> 0; pair_count
--, pair
++) {
9006 __put_user(0, &pair
->value
);
9007 __get_user(key
, &pair
->key
);
9009 case RISCV_HWPROBE_KEY_MVENDORID
:
9010 __put_user(cfg
->mvendorid
, &pair
->value
);
9012 case RISCV_HWPROBE_KEY_MARCHID
:
9013 __put_user(cfg
->marchid
, &pair
->value
);
9015 case RISCV_HWPROBE_KEY_MIMPID
:
9016 __put_user(cfg
->mimpid
, &pair
->value
);
9018 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR
:
9019 value
= riscv_has_ext(env
, RVI
) &&
9020 riscv_has_ext(env
, RVM
) &&
9021 riscv_has_ext(env
, RVA
) ?
9022 RISCV_HWPROBE_BASE_BEHAVIOR_IMA
: 0;
9023 __put_user(value
, &pair
->value
);
9025 case RISCV_HWPROBE_KEY_IMA_EXT_0
:
9026 value
= riscv_has_ext(env
, RVF
) &&
9027 riscv_has_ext(env
, RVD
) ?
9028 RISCV_HWPROBE_IMA_FD
: 0;
9029 value
|= riscv_has_ext(env
, RVC
) ?
9030 RISCV_HWPROBE_IMA_C
: pair
->value
;
9031 __put_user(value
, &pair
->value
);
9033 case RISCV_HWPROBE_KEY_CPUPERF_0
:
9034 __put_user(RISCV_HWPROBE_MISALIGNED_FAST
, &pair
->value
);
9037 __put_user(-1, &pair
->key
);
9043 static int cpu_set_valid(abi_long arg3
, abi_long arg4
)
9046 size_t host_mask_size
, target_mask_size
;
9047 unsigned long *host_mask
;
9050 * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9051 * arg3 contains the cpu count.
9053 tmp
= (8 * sizeof(abi_ulong
));
9054 target_mask_size
= ((arg3
+ tmp
- 1) / tmp
) * sizeof(abi_ulong
);
9055 host_mask_size
= (target_mask_size
+ (sizeof(*host_mask
) - 1)) &
9056 ~(sizeof(*host_mask
) - 1);
9058 host_mask
= alloca(host_mask_size
);
9060 ret
= target_to_host_cpu_mask(host_mask
, host_mask_size
,
9061 arg4
, target_mask_size
);
9066 for (i
= 0 ; i
< host_mask_size
/ sizeof(*host_mask
); i
++) {
9067 if (host_mask
[i
] != 0) {
9071 return -TARGET_EINVAL
;
9074 static abi_long
do_riscv_hwprobe(CPUArchState
*cpu_env
, abi_long arg1
,
9075 abi_long arg2
, abi_long arg3
,
9076 abi_long arg4
, abi_long arg5
)
9079 struct riscv_hwprobe
*host_pairs
;
9081 /* flags must be 0 */
9083 return -TARGET_EINVAL
;
9088 ret
= cpu_set_valid(arg3
, arg4
);
9092 } else if (arg4
!= 0) {
9093 return -TARGET_EINVAL
;
9101 host_pairs
= lock_user(VERIFY_WRITE
, arg1
,
9102 sizeof(*host_pairs
) * (size_t)arg2
, 0);
9103 if (host_pairs
== NULL
) {
9104 return -TARGET_EFAULT
;
9106 risc_hwprobe_fill_pairs(cpu_env
, host_pairs
, arg2
);
9107 unlock_user(host_pairs
, arg1
, sizeof(*host_pairs
) * (size_t)arg2
);
9110 #endif /* TARGET_NR_riscv_hwprobe */
9112 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9113 _syscall2(int, pivot_root
, const char *, new_root
, const char *, put_old
)
9116 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9117 #define __NR_sys_open_tree __NR_open_tree
9118 _syscall3(int, sys_open_tree
, int, __dfd
, const char *, __filename
,
9119 unsigned int, __flags
)
9122 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9123 #define __NR_sys_move_mount __NR_move_mount
9124 _syscall5(int, sys_move_mount
, int, __from_dfd
, const char *, __from_pathname
,
9125 int, __to_dfd
, const char *, __to_pathname
, unsigned int, flag
)
9128 /* This is an internal helper for do_syscall so that it is easier
9129 * to have a single return point, so that actions, such as logging
9130 * of syscall results, can be performed.
9131 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9133 static abi_long
do_syscall1(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
9134 abi_long arg2
, abi_long arg3
, abi_long arg4
,
9135 abi_long arg5
, abi_long arg6
, abi_long arg7
,
9138 CPUState
*cpu
= env_cpu(cpu_env
);
9140 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9141 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9142 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9143 || defined(TARGET_NR_statx)
9146 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9147 || defined(TARGET_NR_fstatfs)
9153 case TARGET_NR_exit
:
9154 /* In old applications this may be used to implement _exit(2).
9155 However in threaded applications it is used for thread termination,
9156 and _exit_group is used for application termination.
9157 Do thread termination if we have more then one thread. */
9159 if (block_signals()) {
9160 return -QEMU_ERESTARTSYS
;
9163 pthread_mutex_lock(&clone_lock
);
9165 if (CPU_NEXT(first_cpu
)) {
9166 TaskState
*ts
= cpu
->opaque
;
9168 if (ts
->child_tidptr
) {
9169 put_user_u32(0, ts
->child_tidptr
);
9170 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
9171 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
9174 object_unparent(OBJECT(cpu
));
9175 object_unref(OBJECT(cpu
));
9177 * At this point the CPU should be unrealized and removed
9178 * from cpu lists. We can clean-up the rest of the thread
9179 * data without the lock held.
9182 pthread_mutex_unlock(&clone_lock
);
9186 rcu_unregister_thread();
9190 pthread_mutex_unlock(&clone_lock
);
9191 preexit_cleanup(cpu_env
, arg1
);
9193 return 0; /* avoid warning */
9194 case TARGET_NR_read
:
9195 if (arg2
== 0 && arg3
== 0) {
9196 return get_errno(safe_read(arg1
, 0, 0));
9198 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
9199 return -TARGET_EFAULT
;
9200 ret
= get_errno(safe_read(arg1
, p
, arg3
));
9202 fd_trans_host_to_target_data(arg1
)) {
9203 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
9205 unlock_user(p
, arg2
, ret
);
9208 case TARGET_NR_write
:
9209 if (arg2
== 0 && arg3
== 0) {
9210 return get_errno(safe_write(arg1
, 0, 0));
9212 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
9213 return -TARGET_EFAULT
;
9214 if (fd_trans_target_to_host_data(arg1
)) {
9215 void *copy
= g_malloc(arg3
);
9216 memcpy(copy
, p
, arg3
);
9217 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
9219 ret
= get_errno(safe_write(arg1
, copy
, ret
));
9223 ret
= get_errno(safe_write(arg1
, p
, arg3
));
9225 unlock_user(p
, arg2
, 0);
9228 #ifdef TARGET_NR_open
9229 case TARGET_NR_open
:
9230 if (!(p
= lock_user_string(arg1
)))
9231 return -TARGET_EFAULT
;
9232 ret
= get_errno(do_guest_openat(cpu_env
, AT_FDCWD
, p
,
9233 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
9235 fd_trans_unregister(ret
);
9236 unlock_user(p
, arg1
, 0);
9239 case TARGET_NR_openat
:
9240 if (!(p
= lock_user_string(arg2
)))
9241 return -TARGET_EFAULT
;
9242 ret
= get_errno(do_guest_openat(cpu_env
, arg1
, p
,
9243 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
9245 fd_trans_unregister(ret
);
9246 unlock_user(p
, arg2
, 0);
9248 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9249 case TARGET_NR_name_to_handle_at
:
9250 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
9253 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9254 case TARGET_NR_open_by_handle_at
:
9255 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
9256 fd_trans_unregister(ret
);
9259 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9260 case TARGET_NR_pidfd_open
:
9261 return get_errno(pidfd_open(arg1
, arg2
));
9263 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9264 case TARGET_NR_pidfd_send_signal
:
9266 siginfo_t uinfo
, *puinfo
;
9269 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9271 return -TARGET_EFAULT
;
9273 target_to_host_siginfo(&uinfo
, p
);
9274 unlock_user(p
, arg3
, 0);
9279 ret
= get_errno(pidfd_send_signal(arg1
, target_to_host_signal(arg2
),
9284 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9285 case TARGET_NR_pidfd_getfd
:
9286 return get_errno(pidfd_getfd(arg1
, arg2
, arg3
));
9288 case TARGET_NR_close
:
9289 fd_trans_unregister(arg1
);
9290 return get_errno(close(arg1
));
9291 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9292 case TARGET_NR_close_range
:
9293 ret
= get_errno(sys_close_range(arg1
, arg2
, arg3
));
9294 if (ret
== 0 && !(arg3
& CLOSE_RANGE_CLOEXEC
)) {
9296 maxfd
= MIN(arg2
, target_fd_max
);
9297 for (fd
= arg1
; fd
< maxfd
; fd
++) {
9298 fd_trans_unregister(fd
);
9305 return do_brk(arg1
);
9306 #ifdef TARGET_NR_fork
9307 case TARGET_NR_fork
:
9308 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
9310 #ifdef TARGET_NR_waitpid
9311 case TARGET_NR_waitpid
:
9314 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
9315 if (!is_error(ret
) && arg2
&& ret
9316 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
9317 return -TARGET_EFAULT
;
9321 #ifdef TARGET_NR_waitid
9322 case TARGET_NR_waitid
:
9326 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
9327 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
9328 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
9329 return -TARGET_EFAULT
;
9330 host_to_target_siginfo(p
, &info
);
9331 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
9336 #ifdef TARGET_NR_creat /* not on alpha */
9337 case TARGET_NR_creat
:
9338 if (!(p
= lock_user_string(arg1
)))
9339 return -TARGET_EFAULT
;
9340 ret
= get_errno(creat(p
, arg2
));
9341 fd_trans_unregister(ret
);
9342 unlock_user(p
, arg1
, 0);
9345 #ifdef TARGET_NR_link
9346 case TARGET_NR_link
:
9349 p
= lock_user_string(arg1
);
9350 p2
= lock_user_string(arg2
);
9352 ret
= -TARGET_EFAULT
;
9354 ret
= get_errno(link(p
, p2
));
9355 unlock_user(p2
, arg2
, 0);
9356 unlock_user(p
, arg1
, 0);
9360 #if defined(TARGET_NR_linkat)
9361 case TARGET_NR_linkat
:
9365 return -TARGET_EFAULT
;
9366 p
= lock_user_string(arg2
);
9367 p2
= lock_user_string(arg4
);
9369 ret
= -TARGET_EFAULT
;
9371 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
9372 unlock_user(p
, arg2
, 0);
9373 unlock_user(p2
, arg4
, 0);
9377 #ifdef TARGET_NR_unlink
9378 case TARGET_NR_unlink
:
9379 if (!(p
= lock_user_string(arg1
)))
9380 return -TARGET_EFAULT
;
9381 ret
= get_errno(unlink(p
));
9382 unlock_user(p
, arg1
, 0);
9385 #if defined(TARGET_NR_unlinkat)
9386 case TARGET_NR_unlinkat
:
9387 if (!(p
= lock_user_string(arg2
)))
9388 return -TARGET_EFAULT
;
9389 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
9390 unlock_user(p
, arg2
, 0);
9393 case TARGET_NR_execveat
:
9394 return do_execv(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, true);
9395 case TARGET_NR_execve
:
9396 return do_execv(cpu_env
, AT_FDCWD
, arg1
, arg2
, arg3
, 0, false);
9397 case TARGET_NR_chdir
:
9398 if (!(p
= lock_user_string(arg1
)))
9399 return -TARGET_EFAULT
;
9400 ret
= get_errno(chdir(p
));
9401 unlock_user(p
, arg1
, 0);
9403 #ifdef TARGET_NR_time
9404 case TARGET_NR_time
:
9407 ret
= get_errno(time(&host_time
));
9410 && put_user_sal(host_time
, arg1
))
9411 return -TARGET_EFAULT
;
9415 #ifdef TARGET_NR_mknod
9416 case TARGET_NR_mknod
:
9417 if (!(p
= lock_user_string(arg1
)))
9418 return -TARGET_EFAULT
;
9419 ret
= get_errno(mknod(p
, arg2
, arg3
));
9420 unlock_user(p
, arg1
, 0);
9423 #if defined(TARGET_NR_mknodat)
9424 case TARGET_NR_mknodat
:
9425 if (!(p
= lock_user_string(arg2
)))
9426 return -TARGET_EFAULT
;
9427 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
9428 unlock_user(p
, arg2
, 0);
9431 #ifdef TARGET_NR_chmod
9432 case TARGET_NR_chmod
:
9433 if (!(p
= lock_user_string(arg1
)))
9434 return -TARGET_EFAULT
;
9435 ret
= get_errno(chmod(p
, arg2
));
9436 unlock_user(p
, arg1
, 0);
9439 #ifdef TARGET_NR_lseek
9440 case TARGET_NR_lseek
:
9441 return get_errno(lseek(arg1
, arg2
, arg3
));
9443 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9444 /* Alpha specific */
9445 case TARGET_NR_getxpid
:
9446 cpu_env
->ir
[IR_A4
] = getppid();
9447 return get_errno(getpid());
9449 #ifdef TARGET_NR_getpid
9450 case TARGET_NR_getpid
:
9451 return get_errno(getpid());
9453 case TARGET_NR_mount
:
9455 /* need to look at the data field */
9459 p
= lock_user_string(arg1
);
9461 return -TARGET_EFAULT
;
9467 p2
= lock_user_string(arg2
);
9470 unlock_user(p
, arg1
, 0);
9472 return -TARGET_EFAULT
;
9476 p3
= lock_user_string(arg3
);
9479 unlock_user(p
, arg1
, 0);
9481 unlock_user(p2
, arg2
, 0);
9482 return -TARGET_EFAULT
;
9488 /* FIXME - arg5 should be locked, but it isn't clear how to
9489 * do that since it's not guaranteed to be a NULL-terminated
9493 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
9495 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
9497 ret
= get_errno(ret
);
9500 unlock_user(p
, arg1
, 0);
9502 unlock_user(p2
, arg2
, 0);
9504 unlock_user(p3
, arg3
, 0);
9508 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9509 #if defined(TARGET_NR_umount)
9510 case TARGET_NR_umount
:
9512 #if defined(TARGET_NR_oldumount)
9513 case TARGET_NR_oldumount
:
9515 if (!(p
= lock_user_string(arg1
)))
9516 return -TARGET_EFAULT
;
9517 ret
= get_errno(umount(p
));
9518 unlock_user(p
, arg1
, 0);
9521 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9522 case TARGET_NR_move_mount
:
9526 if (!arg2
|| !arg4
) {
9527 return -TARGET_EFAULT
;
9530 p2
= lock_user_string(arg2
);
9532 return -TARGET_EFAULT
;
9535 p4
= lock_user_string(arg4
);
9537 unlock_user(p2
, arg2
, 0);
9538 return -TARGET_EFAULT
;
9540 ret
= get_errno(sys_move_mount(arg1
, p2
, arg3
, p4
, arg5
));
9542 unlock_user(p2
, arg2
, 0);
9543 unlock_user(p4
, arg4
, 0);
9548 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9549 case TARGET_NR_open_tree
:
9555 return -TARGET_EFAULT
;
9558 p2
= lock_user_string(arg2
);
9560 return -TARGET_EFAULT
;
9563 host_flags
= arg3
& ~TARGET_O_CLOEXEC
;
9564 if (arg3
& TARGET_O_CLOEXEC
) {
9565 host_flags
|= O_CLOEXEC
;
9568 ret
= get_errno(sys_open_tree(arg1
, p2
, host_flags
));
9570 unlock_user(p2
, arg2
, 0);
9575 #ifdef TARGET_NR_stime /* not on alpha */
9576 case TARGET_NR_stime
:
9580 if (get_user_sal(ts
.tv_sec
, arg1
)) {
9581 return -TARGET_EFAULT
;
9583 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
9586 #ifdef TARGET_NR_alarm /* not on alpha */
9587 case TARGET_NR_alarm
:
9590 #ifdef TARGET_NR_pause /* not on alpha */
9591 case TARGET_NR_pause
:
9592 if (!block_signals()) {
9593 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
9595 return -TARGET_EINTR
;
9597 #ifdef TARGET_NR_utime
9598 case TARGET_NR_utime
:
9600 struct utimbuf tbuf
, *host_tbuf
;
9601 struct target_utimbuf
*target_tbuf
;
9603 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
9604 return -TARGET_EFAULT
;
9605 tbuf
.actime
= tswapal(target_tbuf
->actime
);
9606 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
9607 unlock_user_struct(target_tbuf
, arg2
, 0);
9612 if (!(p
= lock_user_string(arg1
)))
9613 return -TARGET_EFAULT
;
9614 ret
= get_errno(utime(p
, host_tbuf
));
9615 unlock_user(p
, arg1
, 0);
9619 #ifdef TARGET_NR_utimes
9620 case TARGET_NR_utimes
:
9622 struct timeval
*tvp
, tv
[2];
9624 if (copy_from_user_timeval(&tv
[0], arg2
)
9625 || copy_from_user_timeval(&tv
[1],
9626 arg2
+ sizeof(struct target_timeval
)))
9627 return -TARGET_EFAULT
;
9632 if (!(p
= lock_user_string(arg1
)))
9633 return -TARGET_EFAULT
;
9634 ret
= get_errno(utimes(p
, tvp
));
9635 unlock_user(p
, arg1
, 0);
9639 #if defined(TARGET_NR_futimesat)
9640 case TARGET_NR_futimesat
:
9642 struct timeval
*tvp
, tv
[2];
9644 if (copy_from_user_timeval(&tv
[0], arg3
)
9645 || copy_from_user_timeval(&tv
[1],
9646 arg3
+ sizeof(struct target_timeval
)))
9647 return -TARGET_EFAULT
;
9652 if (!(p
= lock_user_string(arg2
))) {
9653 return -TARGET_EFAULT
;
9655 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
9656 unlock_user(p
, arg2
, 0);
9660 #ifdef TARGET_NR_access
9661 case TARGET_NR_access
:
9662 if (!(p
= lock_user_string(arg1
))) {
9663 return -TARGET_EFAULT
;
9665 ret
= get_errno(access(path(p
), arg2
));
9666 unlock_user(p
, arg1
, 0);
9669 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9670 case TARGET_NR_faccessat
:
9671 if (!(p
= lock_user_string(arg2
))) {
9672 return -TARGET_EFAULT
;
9674 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
9675 unlock_user(p
, arg2
, 0);
9678 #if defined(TARGET_NR_faccessat2)
9679 case TARGET_NR_faccessat2
:
9680 if (!(p
= lock_user_string(arg2
))) {
9681 return -TARGET_EFAULT
;
9683 ret
= get_errno(faccessat(arg1
, p
, arg3
, arg4
));
9684 unlock_user(p
, arg2
, 0);
9687 #ifdef TARGET_NR_nice /* not on alpha */
9688 case TARGET_NR_nice
:
9689 return get_errno(nice(arg1
));
9691 case TARGET_NR_sync
:
9694 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9695 case TARGET_NR_syncfs
:
9696 return get_errno(syncfs(arg1
));
9698 case TARGET_NR_kill
:
9699 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
9700 #ifdef TARGET_NR_rename
9701 case TARGET_NR_rename
:
9704 p
= lock_user_string(arg1
);
9705 p2
= lock_user_string(arg2
);
9707 ret
= -TARGET_EFAULT
;
9709 ret
= get_errno(rename(p
, p2
));
9710 unlock_user(p2
, arg2
, 0);
9711 unlock_user(p
, arg1
, 0);
9715 #if defined(TARGET_NR_renameat)
9716 case TARGET_NR_renameat
:
9719 p
= lock_user_string(arg2
);
9720 p2
= lock_user_string(arg4
);
9722 ret
= -TARGET_EFAULT
;
9724 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
9725 unlock_user(p2
, arg4
, 0);
9726 unlock_user(p
, arg2
, 0);
9730 #if defined(TARGET_NR_renameat2)
9731 case TARGET_NR_renameat2
:
9734 p
= lock_user_string(arg2
);
9735 p2
= lock_user_string(arg4
);
9737 ret
= -TARGET_EFAULT
;
9739 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
9741 unlock_user(p2
, arg4
, 0);
9742 unlock_user(p
, arg2
, 0);
9746 #ifdef TARGET_NR_mkdir
9747 case TARGET_NR_mkdir
:
9748 if (!(p
= lock_user_string(arg1
)))
9749 return -TARGET_EFAULT
;
9750 ret
= get_errno(mkdir(p
, arg2
));
9751 unlock_user(p
, arg1
, 0);
9754 #if defined(TARGET_NR_mkdirat)
9755 case TARGET_NR_mkdirat
:
9756 if (!(p
= lock_user_string(arg2
)))
9757 return -TARGET_EFAULT
;
9758 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
9759 unlock_user(p
, arg2
, 0);
9762 #ifdef TARGET_NR_rmdir
9763 case TARGET_NR_rmdir
:
9764 if (!(p
= lock_user_string(arg1
)))
9765 return -TARGET_EFAULT
;
9766 ret
= get_errno(rmdir(p
));
9767 unlock_user(p
, arg1
, 0);
9771 ret
= get_errno(dup(arg1
));
9773 fd_trans_dup(arg1
, ret
);
9776 #ifdef TARGET_NR_pipe
9777 case TARGET_NR_pipe
:
9778 return do_pipe(cpu_env
, arg1
, 0, 0);
9780 #ifdef TARGET_NR_pipe2
9781 case TARGET_NR_pipe2
:
9782 return do_pipe(cpu_env
, arg1
,
9783 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
9785 case TARGET_NR_times
:
9787 struct target_tms
*tmsp
;
9789 ret
= get_errno(times(&tms
));
9791 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
9793 return -TARGET_EFAULT
;
9794 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
9795 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
9796 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
9797 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
9800 ret
= host_to_target_clock_t(ret
);
9803 case TARGET_NR_acct
:
9805 ret
= get_errno(acct(NULL
));
9807 if (!(p
= lock_user_string(arg1
))) {
9808 return -TARGET_EFAULT
;
9810 ret
= get_errno(acct(path(p
)));
9811 unlock_user(p
, arg1
, 0);
9814 #ifdef TARGET_NR_umount2
9815 case TARGET_NR_umount2
:
9816 if (!(p
= lock_user_string(arg1
)))
9817 return -TARGET_EFAULT
;
9818 ret
= get_errno(umount2(p
, arg2
));
9819 unlock_user(p
, arg1
, 0);
9822 case TARGET_NR_ioctl
:
9823 return do_ioctl(arg1
, arg2
, arg3
);
9824 #ifdef TARGET_NR_fcntl
9825 case TARGET_NR_fcntl
:
9826 return do_fcntl(arg1
, arg2
, arg3
);
9828 case TARGET_NR_setpgid
:
9829 return get_errno(setpgid(arg1
, arg2
));
9830 case TARGET_NR_umask
:
9831 return get_errno(umask(arg1
));
9832 case TARGET_NR_chroot
:
9833 if (!(p
= lock_user_string(arg1
)))
9834 return -TARGET_EFAULT
;
9835 ret
= get_errno(chroot(p
));
9836 unlock_user(p
, arg1
, 0);
9838 #ifdef TARGET_NR_dup2
9839 case TARGET_NR_dup2
:
9840 ret
= get_errno(dup2(arg1
, arg2
));
9842 fd_trans_dup(arg1
, arg2
);
9846 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9847 case TARGET_NR_dup3
:
9851 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
9854 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
9855 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
9857 fd_trans_dup(arg1
, arg2
);
9862 #ifdef TARGET_NR_getppid /* not on alpha */
9863 case TARGET_NR_getppid
:
9864 return get_errno(getppid());
9866 #ifdef TARGET_NR_getpgrp
9867 case TARGET_NR_getpgrp
:
9868 return get_errno(getpgrp());
9870 case TARGET_NR_setsid
:
9871 return get_errno(setsid());
9872 #ifdef TARGET_NR_sigaction
9873 case TARGET_NR_sigaction
:
9875 #if defined(TARGET_MIPS)
9876 struct target_sigaction act
, oact
, *pact
, *old_act
;
9879 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9880 return -TARGET_EFAULT
;
9881 act
._sa_handler
= old_act
->_sa_handler
;
9882 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
9883 act
.sa_flags
= old_act
->sa_flags
;
9884 unlock_user_struct(old_act
, arg2
, 0);
9890 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9892 if (!is_error(ret
) && arg3
) {
9893 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9894 return -TARGET_EFAULT
;
9895 old_act
->_sa_handler
= oact
._sa_handler
;
9896 old_act
->sa_flags
= oact
.sa_flags
;
9897 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
9898 old_act
->sa_mask
.sig
[1] = 0;
9899 old_act
->sa_mask
.sig
[2] = 0;
9900 old_act
->sa_mask
.sig
[3] = 0;
9901 unlock_user_struct(old_act
, arg3
, 1);
9904 struct target_old_sigaction
*old_act
;
9905 struct target_sigaction act
, oact
, *pact
;
9907 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9908 return -TARGET_EFAULT
;
9909 act
._sa_handler
= old_act
->_sa_handler
;
9910 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9911 act
.sa_flags
= old_act
->sa_flags
;
9912 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9913 act
.sa_restorer
= old_act
->sa_restorer
;
9915 unlock_user_struct(old_act
, arg2
, 0);
9920 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9921 if (!is_error(ret
) && arg3
) {
9922 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9923 return -TARGET_EFAULT
;
9924 old_act
->_sa_handler
= oact
._sa_handler
;
9925 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9926 old_act
->sa_flags
= oact
.sa_flags
;
9927 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9928 old_act
->sa_restorer
= oact
.sa_restorer
;
9930 unlock_user_struct(old_act
, arg3
, 1);
9936 case TARGET_NR_rt_sigaction
:
9939 * For Alpha and SPARC this is a 5 argument syscall, with
9940 * a 'restorer' parameter which must be copied into the
9941 * sa_restorer field of the sigaction struct.
9942 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9943 * and arg5 is the sigsetsize.
9945 #if defined(TARGET_ALPHA)
9946 target_ulong sigsetsize
= arg4
;
9947 target_ulong restorer
= arg5
;
9948 #elif defined(TARGET_SPARC)
9949 target_ulong restorer
= arg4
;
9950 target_ulong sigsetsize
= arg5
;
9952 target_ulong sigsetsize
= arg4
;
9953 target_ulong restorer
= 0;
9955 struct target_sigaction
*act
= NULL
;
9956 struct target_sigaction
*oact
= NULL
;
9958 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9959 return -TARGET_EINVAL
;
9961 if (arg2
&& !lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9962 return -TARGET_EFAULT
;
9964 if (arg3
&& !lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9965 ret
= -TARGET_EFAULT
;
9967 ret
= get_errno(do_sigaction(arg1
, act
, oact
, restorer
));
9969 unlock_user_struct(oact
, arg3
, 1);
9973 unlock_user_struct(act
, arg2
, 0);
9977 #ifdef TARGET_NR_sgetmask /* not on alpha */
9978 case TARGET_NR_sgetmask
:
9981 abi_ulong target_set
;
9982 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9984 host_to_target_old_sigset(&target_set
, &cur_set
);
9990 #ifdef TARGET_NR_ssetmask /* not on alpha */
9991 case TARGET_NR_ssetmask
:
9994 abi_ulong target_set
= arg1
;
9995 target_to_host_old_sigset(&set
, &target_set
);
9996 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9998 host_to_target_old_sigset(&target_set
, &oset
);
10004 #ifdef TARGET_NR_sigprocmask
10005 case TARGET_NR_sigprocmask
:
10007 #if defined(TARGET_ALPHA)
10008 sigset_t set
, oldset
;
10013 case TARGET_SIG_BLOCK
:
10016 case TARGET_SIG_UNBLOCK
:
10019 case TARGET_SIG_SETMASK
:
10023 return -TARGET_EINVAL
;
10026 target_to_host_old_sigset(&set
, &mask
);
10028 ret
= do_sigprocmask(how
, &set
, &oldset
);
10029 if (!is_error(ret
)) {
10030 host_to_target_old_sigset(&mask
, &oldset
);
10032 cpu_env
->ir
[IR_V0
] = 0; /* force no error */
10035 sigset_t set
, oldset
, *set_ptr
;
10039 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
10041 return -TARGET_EFAULT
;
10043 target_to_host_old_sigset(&set
, p
);
10044 unlock_user(p
, arg2
, 0);
10047 case TARGET_SIG_BLOCK
:
10050 case TARGET_SIG_UNBLOCK
:
10053 case TARGET_SIG_SETMASK
:
10057 return -TARGET_EINVAL
;
10063 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
10064 if (!is_error(ret
) && arg3
) {
10065 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
10066 return -TARGET_EFAULT
;
10067 host_to_target_old_sigset(p
, &oldset
);
10068 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
10074 case TARGET_NR_rt_sigprocmask
:
10077 sigset_t set
, oldset
, *set_ptr
;
10079 if (arg4
!= sizeof(target_sigset_t
)) {
10080 return -TARGET_EINVAL
;
10084 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
10086 return -TARGET_EFAULT
;
10088 target_to_host_sigset(&set
, p
);
10089 unlock_user(p
, arg2
, 0);
10092 case TARGET_SIG_BLOCK
:
10095 case TARGET_SIG_UNBLOCK
:
10098 case TARGET_SIG_SETMASK
:
10102 return -TARGET_EINVAL
;
10108 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
10109 if (!is_error(ret
) && arg3
) {
10110 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
10111 return -TARGET_EFAULT
;
10112 host_to_target_sigset(p
, &oldset
);
10113 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
10117 #ifdef TARGET_NR_sigpending
10118 case TARGET_NR_sigpending
:
10121 ret
= get_errno(sigpending(&set
));
10122 if (!is_error(ret
)) {
10123 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
10124 return -TARGET_EFAULT
;
10125 host_to_target_old_sigset(p
, &set
);
10126 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
10131 case TARGET_NR_rt_sigpending
:
10135 /* Yes, this check is >, not != like most. We follow the kernel's
10136 * logic and it does it like this because it implements
10137 * NR_sigpending through the same code path, and in that case
10138 * the old_sigset_t is smaller in size.
10140 if (arg2
> sizeof(target_sigset_t
)) {
10141 return -TARGET_EINVAL
;
10144 ret
= get_errno(sigpending(&set
));
10145 if (!is_error(ret
)) {
10146 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
10147 return -TARGET_EFAULT
;
10148 host_to_target_sigset(p
, &set
);
10149 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
10153 #ifdef TARGET_NR_sigsuspend
10154 case TARGET_NR_sigsuspend
:
10158 #if defined(TARGET_ALPHA)
10159 TaskState
*ts
= cpu
->opaque
;
10160 /* target_to_host_old_sigset will bswap back */
10161 abi_ulong mask
= tswapal(arg1
);
10162 set
= &ts
->sigsuspend_mask
;
10163 target_to_host_old_sigset(set
, &mask
);
10165 ret
= process_sigsuspend_mask(&set
, arg1
, sizeof(target_sigset_t
));
10170 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
10171 finish_sigsuspend_mask(ret
);
10175 case TARGET_NR_rt_sigsuspend
:
10179 ret
= process_sigsuspend_mask(&set
, arg1
, arg2
);
10183 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
10184 finish_sigsuspend_mask(ret
);
10187 #ifdef TARGET_NR_rt_sigtimedwait
10188 case TARGET_NR_rt_sigtimedwait
:
10191 struct timespec uts
, *puts
;
10194 if (arg4
!= sizeof(target_sigset_t
)) {
10195 return -TARGET_EINVAL
;
10198 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
10199 return -TARGET_EFAULT
;
10200 target_to_host_sigset(&set
, p
);
10201 unlock_user(p
, arg1
, 0);
10204 if (target_to_host_timespec(puts
, arg3
)) {
10205 return -TARGET_EFAULT
;
10210 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
10212 if (!is_error(ret
)) {
10214 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
10217 return -TARGET_EFAULT
;
10219 host_to_target_siginfo(p
, &uinfo
);
10220 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
10222 ret
= host_to_target_signal(ret
);
10227 #ifdef TARGET_NR_rt_sigtimedwait_time64
10228 case TARGET_NR_rt_sigtimedwait_time64
:
10231 struct timespec uts
, *puts
;
10234 if (arg4
!= sizeof(target_sigset_t
)) {
10235 return -TARGET_EINVAL
;
10238 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
10240 return -TARGET_EFAULT
;
10242 target_to_host_sigset(&set
, p
);
10243 unlock_user(p
, arg1
, 0);
10246 if (target_to_host_timespec64(puts
, arg3
)) {
10247 return -TARGET_EFAULT
;
10252 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
10254 if (!is_error(ret
)) {
10256 p
= lock_user(VERIFY_WRITE
, arg2
,
10257 sizeof(target_siginfo_t
), 0);
10259 return -TARGET_EFAULT
;
10261 host_to_target_siginfo(p
, &uinfo
);
10262 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
10264 ret
= host_to_target_signal(ret
);
10269 case TARGET_NR_rt_sigqueueinfo
:
10273 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
10275 return -TARGET_EFAULT
;
10277 target_to_host_siginfo(&uinfo
, p
);
10278 unlock_user(p
, arg3
, 0);
10279 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, target_to_host_signal(arg2
), &uinfo
));
10282 case TARGET_NR_rt_tgsigqueueinfo
:
10286 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
10288 return -TARGET_EFAULT
;
10290 target_to_host_siginfo(&uinfo
, p
);
10291 unlock_user(p
, arg4
, 0);
10292 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, target_to_host_signal(arg3
), &uinfo
));
10295 #ifdef TARGET_NR_sigreturn
10296 case TARGET_NR_sigreturn
:
10297 if (block_signals()) {
10298 return -QEMU_ERESTARTSYS
;
10300 return do_sigreturn(cpu_env
);
10302 case TARGET_NR_rt_sigreturn
:
10303 if (block_signals()) {
10304 return -QEMU_ERESTARTSYS
;
10306 return do_rt_sigreturn(cpu_env
);
10307 case TARGET_NR_sethostname
:
10308 if (!(p
= lock_user_string(arg1
)))
10309 return -TARGET_EFAULT
;
10310 ret
= get_errno(sethostname(p
, arg2
));
10311 unlock_user(p
, arg1
, 0);
10313 #ifdef TARGET_NR_setrlimit
10314 case TARGET_NR_setrlimit
:
10316 int resource
= target_to_host_resource(arg1
);
10317 struct target_rlimit
*target_rlim
;
10318 struct rlimit rlim
;
10319 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
10320 return -TARGET_EFAULT
;
10321 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
10322 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
10323 unlock_user_struct(target_rlim
, arg2
, 0);
10325 * If we just passed through resource limit settings for memory then
10326 * they would also apply to QEMU's own allocations, and QEMU will
10327 * crash or hang or die if its allocations fail. Ideally we would
10328 * track the guest allocations in QEMU and apply the limits ourselves.
10329 * For now, just tell the guest the call succeeded but don't actually
10332 if (resource
!= RLIMIT_AS
&&
10333 resource
!= RLIMIT_DATA
&&
10334 resource
!= RLIMIT_STACK
) {
10335 return get_errno(setrlimit(resource
, &rlim
));
10341 #ifdef TARGET_NR_getrlimit
10342 case TARGET_NR_getrlimit
:
10344 int resource
= target_to_host_resource(arg1
);
10345 struct target_rlimit
*target_rlim
;
10346 struct rlimit rlim
;
10348 ret
= get_errno(getrlimit(resource
, &rlim
));
10349 if (!is_error(ret
)) {
10350 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10351 return -TARGET_EFAULT
;
10352 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10353 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10354 unlock_user_struct(target_rlim
, arg2
, 1);
10359 case TARGET_NR_getrusage
:
10361 struct rusage rusage
;
10362 ret
= get_errno(getrusage(arg1
, &rusage
));
10363 if (!is_error(ret
)) {
10364 ret
= host_to_target_rusage(arg2
, &rusage
);
10368 #if defined(TARGET_NR_gettimeofday)
10369 case TARGET_NR_gettimeofday
:
10372 struct timezone tz
;
10374 ret
= get_errno(gettimeofday(&tv
, &tz
));
10375 if (!is_error(ret
)) {
10376 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
10377 return -TARGET_EFAULT
;
10379 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
10380 return -TARGET_EFAULT
;
10386 #if defined(TARGET_NR_settimeofday)
10387 case TARGET_NR_settimeofday
:
10389 struct timeval tv
, *ptv
= NULL
;
10390 struct timezone tz
, *ptz
= NULL
;
10393 if (copy_from_user_timeval(&tv
, arg1
)) {
10394 return -TARGET_EFAULT
;
10400 if (copy_from_user_timezone(&tz
, arg2
)) {
10401 return -TARGET_EFAULT
;
10406 return get_errno(settimeofday(ptv
, ptz
));
10409 #if defined(TARGET_NR_select)
10410 case TARGET_NR_select
:
10411 #if defined(TARGET_WANT_NI_OLD_SELECT)
10412 /* some architectures used to have old_select here
10413 * but now ENOSYS it.
10415 ret
= -TARGET_ENOSYS
;
10416 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10417 ret
= do_old_select(arg1
);
10419 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10423 #ifdef TARGET_NR_pselect6
10424 case TARGET_NR_pselect6
:
10425 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
10427 #ifdef TARGET_NR_pselect6_time64
10428 case TARGET_NR_pselect6_time64
:
10429 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
10431 #ifdef TARGET_NR_symlink
10432 case TARGET_NR_symlink
:
10435 p
= lock_user_string(arg1
);
10436 p2
= lock_user_string(arg2
);
10438 ret
= -TARGET_EFAULT
;
10440 ret
= get_errno(symlink(p
, p2
));
10441 unlock_user(p2
, arg2
, 0);
10442 unlock_user(p
, arg1
, 0);
10446 #if defined(TARGET_NR_symlinkat)
10447 case TARGET_NR_symlinkat
:
10450 p
= lock_user_string(arg1
);
10451 p2
= lock_user_string(arg3
);
10453 ret
= -TARGET_EFAULT
;
10455 ret
= get_errno(symlinkat(p
, arg2
, p2
));
10456 unlock_user(p2
, arg3
, 0);
10457 unlock_user(p
, arg1
, 0);
10461 #ifdef TARGET_NR_readlink
10462 case TARGET_NR_readlink
:
10465 p
= lock_user_string(arg1
);
10466 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10467 ret
= get_errno(do_guest_readlink(p
, p2
, arg3
));
10468 unlock_user(p2
, arg2
, ret
);
10469 unlock_user(p
, arg1
, 0);
10473 #if defined(TARGET_NR_readlinkat)
10474 case TARGET_NR_readlinkat
:
10477 p
= lock_user_string(arg2
);
10478 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10480 ret
= -TARGET_EFAULT
;
10481 } else if (!arg4
) {
10482 /* Short circuit this for the magic exe check. */
10483 ret
= -TARGET_EINVAL
;
10484 } else if (is_proc_myself((const char *)p
, "exe")) {
10486 * Don't worry about sign mismatch as earlier mapping
10487 * logic would have thrown a bad address error.
10489 ret
= MIN(strlen(exec_path
), arg4
);
10490 /* We cannot NUL terminate the string. */
10491 memcpy(p2
, exec_path
, ret
);
10493 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
10495 unlock_user(p2
, arg3
, ret
);
10496 unlock_user(p
, arg2
, 0);
10500 #ifdef TARGET_NR_swapon
10501 case TARGET_NR_swapon
:
10502 if (!(p
= lock_user_string(arg1
)))
10503 return -TARGET_EFAULT
;
10504 ret
= get_errno(swapon(p
, arg2
));
10505 unlock_user(p
, arg1
, 0);
10508 case TARGET_NR_reboot
:
10509 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
10510 /* arg4 must be ignored in all other cases */
10511 p
= lock_user_string(arg4
);
10513 return -TARGET_EFAULT
;
10515 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
10516 unlock_user(p
, arg4
, 0);
10518 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
10521 #ifdef TARGET_NR_mmap
10522 case TARGET_NR_mmap
:
10523 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10524 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10525 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10526 || defined(TARGET_S390X)
10529 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
10530 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
10531 return -TARGET_EFAULT
;
10532 v1
= tswapal(v
[0]);
10533 v2
= tswapal(v
[1]);
10534 v3
= tswapal(v
[2]);
10535 v4
= tswapal(v
[3]);
10536 v5
= tswapal(v
[4]);
10537 v6
= tswapal(v
[5]);
10538 unlock_user(v
, arg1
, 0);
10539 ret
= get_errno(target_mmap(v1
, v2
, v3
,
10540 target_to_host_bitmask(v4
, mmap_flags_tbl
),
10544 /* mmap pointers are always untagged */
10545 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
10546 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
10552 #ifdef TARGET_NR_mmap2
10553 case TARGET_NR_mmap2
:
10555 #define MMAP_SHIFT 12
10557 ret
= target_mmap(arg1
, arg2
, arg3
,
10558 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
10559 arg5
, (off_t
)(abi_ulong
)arg6
<< MMAP_SHIFT
);
10560 return get_errno(ret
);
10562 case TARGET_NR_munmap
:
10563 arg1
= cpu_untagged_addr(cpu
, arg1
);
10564 return get_errno(target_munmap(arg1
, arg2
));
10565 case TARGET_NR_mprotect
:
10566 arg1
= cpu_untagged_addr(cpu
, arg1
);
10568 TaskState
*ts
= cpu
->opaque
;
10569 /* Special hack to detect libc making the stack executable. */
10570 if ((arg3
& PROT_GROWSDOWN
)
10571 && arg1
>= ts
->info
->stack_limit
10572 && arg1
<= ts
->info
->start_stack
) {
10573 arg3
&= ~PROT_GROWSDOWN
;
10574 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
10575 arg1
= ts
->info
->stack_limit
;
10578 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
10579 #ifdef TARGET_NR_mremap
10580 case TARGET_NR_mremap
:
10581 arg1
= cpu_untagged_addr(cpu
, arg1
);
10582 /* mremap new_addr (arg5) is always untagged */
10583 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
10585 /* ??? msync/mlock/munlock are broken for softmmu. */
10586 #ifdef TARGET_NR_msync
10587 case TARGET_NR_msync
:
10588 return get_errno(msync(g2h(cpu
, arg1
), arg2
,
10589 target_to_host_msync_arg(arg3
)));
10591 #ifdef TARGET_NR_mlock
10592 case TARGET_NR_mlock
:
10593 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
10595 #ifdef TARGET_NR_munlock
10596 case TARGET_NR_munlock
:
10597 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
10599 #ifdef TARGET_NR_mlockall
10600 case TARGET_NR_mlockall
:
10601 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
10603 #ifdef TARGET_NR_munlockall
10604 case TARGET_NR_munlockall
:
10605 return get_errno(munlockall());
10607 #ifdef TARGET_NR_truncate
10608 case TARGET_NR_truncate
:
10609 if (!(p
= lock_user_string(arg1
)))
10610 return -TARGET_EFAULT
;
10611 ret
= get_errno(truncate(p
, arg2
));
10612 unlock_user(p
, arg1
, 0);
10615 #ifdef TARGET_NR_ftruncate
10616 case TARGET_NR_ftruncate
:
10617 return get_errno(ftruncate(arg1
, arg2
));
10619 case TARGET_NR_fchmod
:
10620 return get_errno(fchmod(arg1
, arg2
));
10621 #if defined(TARGET_NR_fchmodat)
10622 case TARGET_NR_fchmodat
:
10623 if (!(p
= lock_user_string(arg2
)))
10624 return -TARGET_EFAULT
;
10625 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
10626 unlock_user(p
, arg2
, 0);
10629 case TARGET_NR_getpriority
:
10630 /* Note that negative values are valid for getpriority, so we must
10631 differentiate based on errno settings. */
10633 ret
= getpriority(arg1
, arg2
);
10634 if (ret
== -1 && errno
!= 0) {
10635 return -host_to_target_errno(errno
);
10637 #ifdef TARGET_ALPHA
10638 /* Return value is the unbiased priority. Signal no error. */
10639 cpu_env
->ir
[IR_V0
] = 0;
10641 /* Return value is a biased priority to avoid negative numbers. */
10645 case TARGET_NR_setpriority
:
10646 return get_errno(setpriority(arg1
, arg2
, arg3
));
10647 #ifdef TARGET_NR_statfs
10648 case TARGET_NR_statfs
:
10649 if (!(p
= lock_user_string(arg1
))) {
10650 return -TARGET_EFAULT
;
10652 ret
= get_errno(statfs(path(p
), &stfs
));
10653 unlock_user(p
, arg1
, 0);
10655 if (!is_error(ret
)) {
10656 struct target_statfs
*target_stfs
;
10658 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
10659 return -TARGET_EFAULT
;
10660 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10661 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10662 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10663 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10664 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10665 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10666 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10667 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10668 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10669 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10670 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10671 #ifdef _STATFS_F_FLAGS
10672 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10674 __put_user(0, &target_stfs
->f_flags
);
10676 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10677 unlock_user_struct(target_stfs
, arg2
, 1);
10681 #ifdef TARGET_NR_fstatfs
10682 case TARGET_NR_fstatfs
:
10683 ret
= get_errno(fstatfs(arg1
, &stfs
));
10684 goto convert_statfs
;
10686 #ifdef TARGET_NR_statfs64
10687 case TARGET_NR_statfs64
:
10688 if (!(p
= lock_user_string(arg1
))) {
10689 return -TARGET_EFAULT
;
10691 ret
= get_errno(statfs(path(p
), &stfs
));
10692 unlock_user(p
, arg1
, 0);
10694 if (!is_error(ret
)) {
10695 struct target_statfs64
*target_stfs
;
10697 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
10698 return -TARGET_EFAULT
;
10699 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10700 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10701 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10702 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10703 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10704 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10705 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10706 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10707 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10708 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10709 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10710 #ifdef _STATFS_F_FLAGS
10711 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10713 __put_user(0, &target_stfs
->f_flags
);
10715 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10716 unlock_user_struct(target_stfs
, arg3
, 1);
10719 case TARGET_NR_fstatfs64
:
10720 ret
= get_errno(fstatfs(arg1
, &stfs
));
10721 goto convert_statfs64
;
10723 #ifdef TARGET_NR_socketcall
10724 case TARGET_NR_socketcall
:
10725 return do_socketcall(arg1
, arg2
);
10727 #ifdef TARGET_NR_accept
10728 case TARGET_NR_accept
:
10729 return do_accept4(arg1
, arg2
, arg3
, 0);
10731 #ifdef TARGET_NR_accept4
10732 case TARGET_NR_accept4
:
10733 return do_accept4(arg1
, arg2
, arg3
, arg4
);
10735 #ifdef TARGET_NR_bind
10736 case TARGET_NR_bind
:
10737 return do_bind(arg1
, arg2
, arg3
);
10739 #ifdef TARGET_NR_connect
10740 case TARGET_NR_connect
:
10741 return do_connect(arg1
, arg2
, arg3
);
10743 #ifdef TARGET_NR_getpeername
10744 case TARGET_NR_getpeername
:
10745 return do_getpeername(arg1
, arg2
, arg3
);
10747 #ifdef TARGET_NR_getsockname
10748 case TARGET_NR_getsockname
:
10749 return do_getsockname(arg1
, arg2
, arg3
);
10751 #ifdef TARGET_NR_getsockopt
10752 case TARGET_NR_getsockopt
:
10753 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
10755 #ifdef TARGET_NR_listen
10756 case TARGET_NR_listen
:
10757 return get_errno(listen(arg1
, arg2
));
10759 #ifdef TARGET_NR_recv
10760 case TARGET_NR_recv
:
10761 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
10763 #ifdef TARGET_NR_recvfrom
10764 case TARGET_NR_recvfrom
:
10765 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10767 #ifdef TARGET_NR_recvmsg
10768 case TARGET_NR_recvmsg
:
10769 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
10771 #ifdef TARGET_NR_send
10772 case TARGET_NR_send
:
10773 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
10775 #ifdef TARGET_NR_sendmsg
10776 case TARGET_NR_sendmsg
:
10777 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
10779 #ifdef TARGET_NR_sendmmsg
10780 case TARGET_NR_sendmmsg
:
10781 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
10783 #ifdef TARGET_NR_recvmmsg
10784 case TARGET_NR_recvmmsg
:
10785 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
10787 #ifdef TARGET_NR_sendto
10788 case TARGET_NR_sendto
:
10789 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10791 #ifdef TARGET_NR_shutdown
10792 case TARGET_NR_shutdown
:
10793 return get_errno(shutdown(arg1
, arg2
));
10795 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10796 case TARGET_NR_getrandom
:
10797 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10799 return -TARGET_EFAULT
;
10801 ret
= get_errno(getrandom(p
, arg2
, arg3
));
10802 unlock_user(p
, arg1
, ret
);
10805 #ifdef TARGET_NR_socket
10806 case TARGET_NR_socket
:
10807 return do_socket(arg1
, arg2
, arg3
);
10809 #ifdef TARGET_NR_socketpair
10810 case TARGET_NR_socketpair
:
10811 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
10813 #ifdef TARGET_NR_setsockopt
10814 case TARGET_NR_setsockopt
:
10815 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
10817 #if defined(TARGET_NR_syslog)
10818 case TARGET_NR_syslog
:
10823 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
10824 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
10825 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
10826 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
10827 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
10828 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
10829 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
10830 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
10831 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
10832 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
10833 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
10834 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
10837 return -TARGET_EINVAL
;
10842 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10844 return -TARGET_EFAULT
;
10846 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
10847 unlock_user(p
, arg2
, arg3
);
10851 return -TARGET_EINVAL
;
10856 case TARGET_NR_setitimer
:
10858 struct itimerval value
, ovalue
, *pvalue
;
10862 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10863 || copy_from_user_timeval(&pvalue
->it_value
,
10864 arg2
+ sizeof(struct target_timeval
)))
10865 return -TARGET_EFAULT
;
10869 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10870 if (!is_error(ret
) && arg3
) {
10871 if (copy_to_user_timeval(arg3
,
10872 &ovalue
.it_interval
)
10873 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10875 return -TARGET_EFAULT
;
10879 case TARGET_NR_getitimer
:
10881 struct itimerval value
;
10883 ret
= get_errno(getitimer(arg1
, &value
));
10884 if (!is_error(ret
) && arg2
) {
10885 if (copy_to_user_timeval(arg2
,
10886 &value
.it_interval
)
10887 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10889 return -TARGET_EFAULT
;
10893 #ifdef TARGET_NR_stat
10894 case TARGET_NR_stat
:
10895 if (!(p
= lock_user_string(arg1
))) {
10896 return -TARGET_EFAULT
;
10898 ret
= get_errno(stat(path(p
), &st
));
10899 unlock_user(p
, arg1
, 0);
10902 #ifdef TARGET_NR_lstat
10903 case TARGET_NR_lstat
:
10904 if (!(p
= lock_user_string(arg1
))) {
10905 return -TARGET_EFAULT
;
10907 ret
= get_errno(lstat(path(p
), &st
));
10908 unlock_user(p
, arg1
, 0);
10911 #ifdef TARGET_NR_fstat
10912 case TARGET_NR_fstat
:
10914 ret
= get_errno(fstat(arg1
, &st
));
10915 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10918 if (!is_error(ret
)) {
10919 struct target_stat
*target_st
;
10921 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10922 return -TARGET_EFAULT
;
10923 memset(target_st
, 0, sizeof(*target_st
));
10924 __put_user(st
.st_dev
, &target_st
->st_dev
);
10925 __put_user(st
.st_ino
, &target_st
->st_ino
);
10926 __put_user(st
.st_mode
, &target_st
->st_mode
);
10927 __put_user(st
.st_uid
, &target_st
->st_uid
);
10928 __put_user(st
.st_gid
, &target_st
->st_gid
);
10929 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10930 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10931 __put_user(st
.st_size
, &target_st
->st_size
);
10932 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10933 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10934 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10935 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10936 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10937 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10938 __put_user(st
.st_atim
.tv_nsec
,
10939 &target_st
->target_st_atime_nsec
);
10940 __put_user(st
.st_mtim
.tv_nsec
,
10941 &target_st
->target_st_mtime_nsec
);
10942 __put_user(st
.st_ctim
.tv_nsec
,
10943 &target_st
->target_st_ctime_nsec
);
10945 unlock_user_struct(target_st
, arg2
, 1);
10950 case TARGET_NR_vhangup
:
10951 return get_errno(vhangup());
10952 #ifdef TARGET_NR_syscall
10953 case TARGET_NR_syscall
:
10954 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10955 arg6
, arg7
, arg8
, 0);
10957 #if defined(TARGET_NR_wait4)
10958 case TARGET_NR_wait4
:
10961 abi_long status_ptr
= arg2
;
10962 struct rusage rusage
, *rusage_ptr
;
10963 abi_ulong target_rusage
= arg4
;
10964 abi_long rusage_err
;
10966 rusage_ptr
= &rusage
;
10969 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10970 if (!is_error(ret
)) {
10971 if (status_ptr
&& ret
) {
10972 status
= host_to_target_waitstatus(status
);
10973 if (put_user_s32(status
, status_ptr
))
10974 return -TARGET_EFAULT
;
10976 if (target_rusage
) {
10977 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10986 #ifdef TARGET_NR_swapoff
10987 case TARGET_NR_swapoff
:
10988 if (!(p
= lock_user_string(arg1
)))
10989 return -TARGET_EFAULT
;
10990 ret
= get_errno(swapoff(p
));
10991 unlock_user(p
, arg1
, 0);
10994 case TARGET_NR_sysinfo
:
10996 struct target_sysinfo
*target_value
;
10997 struct sysinfo value
;
10998 ret
= get_errno(sysinfo(&value
));
10999 if (!is_error(ret
) && arg1
)
11001 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
11002 return -TARGET_EFAULT
;
11003 __put_user(value
.uptime
, &target_value
->uptime
);
11004 __put_user(value
.loads
[0], &target_value
->loads
[0]);
11005 __put_user(value
.loads
[1], &target_value
->loads
[1]);
11006 __put_user(value
.loads
[2], &target_value
->loads
[2]);
11007 __put_user(value
.totalram
, &target_value
->totalram
);
11008 __put_user(value
.freeram
, &target_value
->freeram
);
11009 __put_user(value
.sharedram
, &target_value
->sharedram
);
11010 __put_user(value
.bufferram
, &target_value
->bufferram
);
11011 __put_user(value
.totalswap
, &target_value
->totalswap
);
11012 __put_user(value
.freeswap
, &target_value
->freeswap
);
11013 __put_user(value
.procs
, &target_value
->procs
);
11014 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
11015 __put_user(value
.freehigh
, &target_value
->freehigh
);
11016 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
11017 unlock_user_struct(target_value
, arg1
, 1);
11021 #ifdef TARGET_NR_ipc
11022 case TARGET_NR_ipc
:
11023 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11025 #ifdef TARGET_NR_semget
11026 case TARGET_NR_semget
:
11027 return get_errno(semget(arg1
, arg2
, arg3
));
11029 #ifdef TARGET_NR_semop
11030 case TARGET_NR_semop
:
11031 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
11033 #ifdef TARGET_NR_semtimedop
11034 case TARGET_NR_semtimedop
:
11035 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
11037 #ifdef TARGET_NR_semtimedop_time64
11038 case TARGET_NR_semtimedop_time64
:
11039 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
11041 #ifdef TARGET_NR_semctl
11042 case TARGET_NR_semctl
:
11043 return do_semctl(arg1
, arg2
, arg3
, arg4
);
11045 #ifdef TARGET_NR_msgctl
11046 case TARGET_NR_msgctl
:
11047 return do_msgctl(arg1
, arg2
, arg3
);
11049 #ifdef TARGET_NR_msgget
11050 case TARGET_NR_msgget
:
11051 return get_errno(msgget(arg1
, arg2
));
11053 #ifdef TARGET_NR_msgrcv
11054 case TARGET_NR_msgrcv
:
11055 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
11057 #ifdef TARGET_NR_msgsnd
11058 case TARGET_NR_msgsnd
:
11059 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
11061 #ifdef TARGET_NR_shmget
11062 case TARGET_NR_shmget
:
11063 return get_errno(shmget(arg1
, arg2
, arg3
));
11065 #ifdef TARGET_NR_shmctl
11066 case TARGET_NR_shmctl
:
11067 return do_shmctl(arg1
, arg2
, arg3
);
11069 #ifdef TARGET_NR_shmat
11070 case TARGET_NR_shmat
:
11071 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
11073 #ifdef TARGET_NR_shmdt
11074 case TARGET_NR_shmdt
:
11075 return do_shmdt(arg1
);
11077 case TARGET_NR_fsync
:
11078 return get_errno(fsync(arg1
));
11079 case TARGET_NR_clone
:
11080 /* Linux manages to have three different orderings for its
11081 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11082 * match the kernel's CONFIG_CLONE_* settings.
11083 * Microblaze is further special in that it uses a sixth
11084 * implicit argument to clone for the TLS pointer.
11086 #if defined(TARGET_MICROBLAZE)
11087 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
11088 #elif defined(TARGET_CLONE_BACKWARDS)
11089 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
11090 #elif defined(TARGET_CLONE_BACKWARDS2)
11091 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
11093 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
11096 #ifdef __NR_exit_group
11097 /* new thread calls */
11098 case TARGET_NR_exit_group
:
11099 preexit_cleanup(cpu_env
, arg1
);
11100 return get_errno(exit_group(arg1
));
11102 case TARGET_NR_setdomainname
:
11103 if (!(p
= lock_user_string(arg1
)))
11104 return -TARGET_EFAULT
;
11105 ret
= get_errno(setdomainname(p
, arg2
));
11106 unlock_user(p
, arg1
, 0);
11108 case TARGET_NR_uname
:
11109 /* no need to transcode because we use the linux syscall */
11111 struct new_utsname
* buf
;
11113 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
11114 return -TARGET_EFAULT
;
11115 ret
= get_errno(sys_uname(buf
));
11116 if (!is_error(ret
)) {
11117 /* Overwrite the native machine name with whatever is being
11119 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
11120 sizeof(buf
->machine
));
11121 /* Allow the user to override the reported release. */
11122 if (qemu_uname_release
&& *qemu_uname_release
) {
11123 g_strlcpy(buf
->release
, qemu_uname_release
,
11124 sizeof(buf
->release
));
11127 unlock_user_struct(buf
, arg1
, 1);
11131 case TARGET_NR_modify_ldt
:
11132 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
11133 #if !defined(TARGET_X86_64)
11134 case TARGET_NR_vm86
:
11135 return do_vm86(cpu_env
, arg1
, arg2
);
11138 #if defined(TARGET_NR_adjtimex)
11139 case TARGET_NR_adjtimex
:
11141 struct timex host_buf
;
11143 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
11144 return -TARGET_EFAULT
;
11146 ret
= get_errno(adjtimex(&host_buf
));
11147 if (!is_error(ret
)) {
11148 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
11149 return -TARGET_EFAULT
;
11155 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11156 case TARGET_NR_clock_adjtime
:
11160 if (target_to_host_timex(&htx
, arg2
) != 0) {
11161 return -TARGET_EFAULT
;
11163 ret
= get_errno(clock_adjtime(arg1
, &htx
));
11164 if (!is_error(ret
) && host_to_target_timex(arg2
, &htx
)) {
11165 return -TARGET_EFAULT
;
11170 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11171 case TARGET_NR_clock_adjtime64
:
11175 if (target_to_host_timex64(&htx
, arg2
) != 0) {
11176 return -TARGET_EFAULT
;
11178 ret
= get_errno(clock_adjtime(arg1
, &htx
));
11179 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
11180 return -TARGET_EFAULT
;
11185 case TARGET_NR_getpgid
:
11186 return get_errno(getpgid(arg1
));
11187 case TARGET_NR_fchdir
:
11188 return get_errno(fchdir(arg1
));
11189 case TARGET_NR_personality
:
11190 return get_errno(personality(arg1
));
11191 #ifdef TARGET_NR__llseek /* Not on alpha */
11192 case TARGET_NR__llseek
:
11195 #if !defined(__NR_llseek)
11196 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
11198 ret
= get_errno(res
);
11203 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
11205 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
11206 return -TARGET_EFAULT
;
11211 #ifdef TARGET_NR_getdents
11212 case TARGET_NR_getdents
:
11213 return do_getdents(arg1
, arg2
, arg3
);
11214 #endif /* TARGET_NR_getdents */
11215 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11216 case TARGET_NR_getdents64
:
11217 return do_getdents64(arg1
, arg2
, arg3
);
11218 #endif /* TARGET_NR_getdents64 */
11219 #if defined(TARGET_NR__newselect)
11220 case TARGET_NR__newselect
:
11221 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
11223 #ifdef TARGET_NR_poll
11224 case TARGET_NR_poll
:
11225 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
11227 #ifdef TARGET_NR_ppoll
11228 case TARGET_NR_ppoll
:
11229 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
11231 #ifdef TARGET_NR_ppoll_time64
11232 case TARGET_NR_ppoll_time64
:
11233 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
11235 case TARGET_NR_flock
:
11236 /* NOTE: the flock constant seems to be the same for every
11238 return get_errno(safe_flock(arg1
, arg2
));
11239 case TARGET_NR_readv
:
11241 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
11243 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
11244 unlock_iovec(vec
, arg2
, arg3
, 1);
11246 ret
= -host_to_target_errno(errno
);
11250 case TARGET_NR_writev
:
11252 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11254 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
11255 unlock_iovec(vec
, arg2
, arg3
, 0);
11257 ret
= -host_to_target_errno(errno
);
11261 #if defined(TARGET_NR_preadv)
11262 case TARGET_NR_preadv
:
11264 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
11266 unsigned long low
, high
;
11268 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
11269 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
11270 unlock_iovec(vec
, arg2
, arg3
, 1);
11272 ret
= -host_to_target_errno(errno
);
11277 #if defined(TARGET_NR_pwritev)
11278 case TARGET_NR_pwritev
:
11280 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11282 unsigned long low
, high
;
11284 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
11285 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
11286 unlock_iovec(vec
, arg2
, arg3
, 0);
11288 ret
= -host_to_target_errno(errno
);
11293 case TARGET_NR_getsid
:
11294 return get_errno(getsid(arg1
));
11295 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11296 case TARGET_NR_fdatasync
:
11297 return get_errno(fdatasync(arg1
));
11299 case TARGET_NR_sched_getaffinity
:
11301 unsigned int mask_size
;
11302 unsigned long *mask
;
11305 * sched_getaffinity needs multiples of ulong, so need to take
11306 * care of mismatches between target ulong and host ulong sizes.
11308 if (arg2
& (sizeof(abi_ulong
) - 1)) {
11309 return -TARGET_EINVAL
;
11311 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
11313 mask
= alloca(mask_size
);
11314 memset(mask
, 0, mask_size
);
11315 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
11317 if (!is_error(ret
)) {
11319 /* More data returned than the caller's buffer will fit.
11320 * This only happens if sizeof(abi_long) < sizeof(long)
11321 * and the caller passed us a buffer holding an odd number
11322 * of abi_longs. If the host kernel is actually using the
11323 * extra 4 bytes then fail EINVAL; otherwise we can just
11324 * ignore them and only copy the interesting part.
11326 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
11327 if (numcpus
> arg2
* 8) {
11328 return -TARGET_EINVAL
;
11333 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
11334 return -TARGET_EFAULT
;
11339 case TARGET_NR_sched_setaffinity
:
11341 unsigned int mask_size
;
11342 unsigned long *mask
;
11345 * sched_setaffinity needs multiples of ulong, so need to take
11346 * care of mismatches between target ulong and host ulong sizes.
11348 if (arg2
& (sizeof(abi_ulong
) - 1)) {
11349 return -TARGET_EINVAL
;
11351 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
11352 mask
= alloca(mask_size
);
11354 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
11359 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
11361 case TARGET_NR_getcpu
:
11363 unsigned cpu
, node
;
11364 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
11365 arg2
? &node
: NULL
,
11367 if (is_error(ret
)) {
11370 if (arg1
&& put_user_u32(cpu
, arg1
)) {
11371 return -TARGET_EFAULT
;
11373 if (arg2
&& put_user_u32(node
, arg2
)) {
11374 return -TARGET_EFAULT
;
11378 case TARGET_NR_sched_setparam
:
11380 struct target_sched_param
*target_schp
;
11381 struct sched_param schp
;
11384 return -TARGET_EINVAL
;
11386 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1)) {
11387 return -TARGET_EFAULT
;
11389 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
11390 unlock_user_struct(target_schp
, arg2
, 0);
11391 return get_errno(sys_sched_setparam(arg1
, &schp
));
11393 case TARGET_NR_sched_getparam
:
11395 struct target_sched_param
*target_schp
;
11396 struct sched_param schp
;
11399 return -TARGET_EINVAL
;
11401 ret
= get_errno(sys_sched_getparam(arg1
, &schp
));
11402 if (!is_error(ret
)) {
11403 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0)) {
11404 return -TARGET_EFAULT
;
11406 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
11407 unlock_user_struct(target_schp
, arg2
, 1);
11411 case TARGET_NR_sched_setscheduler
:
11413 struct target_sched_param
*target_schp
;
11414 struct sched_param schp
;
11416 return -TARGET_EINVAL
;
11418 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1)) {
11419 return -TARGET_EFAULT
;
11421 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
11422 unlock_user_struct(target_schp
, arg3
, 0);
11423 return get_errno(sys_sched_setscheduler(arg1
, arg2
, &schp
));
11425 case TARGET_NR_sched_getscheduler
:
11426 return get_errno(sys_sched_getscheduler(arg1
));
11427 case TARGET_NR_sched_getattr
:
11429 struct target_sched_attr
*target_scha
;
11430 struct sched_attr scha
;
11432 return -TARGET_EINVAL
;
11434 if (arg3
> sizeof(scha
)) {
11435 arg3
= sizeof(scha
);
11437 ret
= get_errno(sys_sched_getattr(arg1
, &scha
, arg3
, arg4
));
11438 if (!is_error(ret
)) {
11439 target_scha
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11440 if (!target_scha
) {
11441 return -TARGET_EFAULT
;
11443 target_scha
->size
= tswap32(scha
.size
);
11444 target_scha
->sched_policy
= tswap32(scha
.sched_policy
);
11445 target_scha
->sched_flags
= tswap64(scha
.sched_flags
);
11446 target_scha
->sched_nice
= tswap32(scha
.sched_nice
);
11447 target_scha
->sched_priority
= tswap32(scha
.sched_priority
);
11448 target_scha
->sched_runtime
= tswap64(scha
.sched_runtime
);
11449 target_scha
->sched_deadline
= tswap64(scha
.sched_deadline
);
11450 target_scha
->sched_period
= tswap64(scha
.sched_period
);
11451 if (scha
.size
> offsetof(struct sched_attr
, sched_util_min
)) {
11452 target_scha
->sched_util_min
= tswap32(scha
.sched_util_min
);
11453 target_scha
->sched_util_max
= tswap32(scha
.sched_util_max
);
11455 unlock_user(target_scha
, arg2
, arg3
);
11459 case TARGET_NR_sched_setattr
:
11461 struct target_sched_attr
*target_scha
;
11462 struct sched_attr scha
;
11466 return -TARGET_EINVAL
;
11468 if (get_user_u32(size
, arg2
)) {
11469 return -TARGET_EFAULT
;
11472 size
= offsetof(struct target_sched_attr
, sched_util_min
);
11474 if (size
< offsetof(struct target_sched_attr
, sched_util_min
)) {
11475 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
11476 return -TARGET_EFAULT
;
11478 return -TARGET_E2BIG
;
11481 zeroed
= check_zeroed_user(arg2
, sizeof(struct target_sched_attr
), size
);
11484 } else if (zeroed
== 0) {
11485 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
11486 return -TARGET_EFAULT
;
11488 return -TARGET_E2BIG
;
11490 if (size
> sizeof(struct target_sched_attr
)) {
11491 size
= sizeof(struct target_sched_attr
);
11494 target_scha
= lock_user(VERIFY_READ
, arg2
, size
, 1);
11495 if (!target_scha
) {
11496 return -TARGET_EFAULT
;
11499 scha
.sched_policy
= tswap32(target_scha
->sched_policy
);
11500 scha
.sched_flags
= tswap64(target_scha
->sched_flags
);
11501 scha
.sched_nice
= tswap32(target_scha
->sched_nice
);
11502 scha
.sched_priority
= tswap32(target_scha
->sched_priority
);
11503 scha
.sched_runtime
= tswap64(target_scha
->sched_runtime
);
11504 scha
.sched_deadline
= tswap64(target_scha
->sched_deadline
);
11505 scha
.sched_period
= tswap64(target_scha
->sched_period
);
11506 if (size
> offsetof(struct target_sched_attr
, sched_util_min
)) {
11507 scha
.sched_util_min
= tswap32(target_scha
->sched_util_min
);
11508 scha
.sched_util_max
= tswap32(target_scha
->sched_util_max
);
11510 unlock_user(target_scha
, arg2
, 0);
11511 return get_errno(sys_sched_setattr(arg1
, &scha
, arg3
));
11513 case TARGET_NR_sched_yield
:
11514 return get_errno(sched_yield());
11515 case TARGET_NR_sched_get_priority_max
:
11516 return get_errno(sched_get_priority_max(arg1
));
11517 case TARGET_NR_sched_get_priority_min
:
11518 return get_errno(sched_get_priority_min(arg1
));
11519 #ifdef TARGET_NR_sched_rr_get_interval
11520 case TARGET_NR_sched_rr_get_interval
:
11522 struct timespec ts
;
11523 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
11524 if (!is_error(ret
)) {
11525 ret
= host_to_target_timespec(arg2
, &ts
);
11530 #ifdef TARGET_NR_sched_rr_get_interval_time64
11531 case TARGET_NR_sched_rr_get_interval_time64
:
11533 struct timespec ts
;
11534 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
11535 if (!is_error(ret
)) {
11536 ret
= host_to_target_timespec64(arg2
, &ts
);
11541 #if defined(TARGET_NR_nanosleep)
11542 case TARGET_NR_nanosleep
:
11544 struct timespec req
, rem
;
11545 target_to_host_timespec(&req
, arg1
);
11546 ret
= get_errno(safe_nanosleep(&req
, &rem
));
11547 if (is_error(ret
) && arg2
) {
11548 host_to_target_timespec(arg2
, &rem
);
11553 case TARGET_NR_prctl
:
11554 return do_prctl(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
);
11556 #ifdef TARGET_NR_arch_prctl
11557 case TARGET_NR_arch_prctl
:
11558 return do_arch_prctl(cpu_env
, arg1
, arg2
);
11560 #ifdef TARGET_NR_pread64
11561 case TARGET_NR_pread64
:
11562 if (regpairs_aligned(cpu_env
, num
)) {
11566 if (arg2
== 0 && arg3
== 0) {
11567 /* Special-case NULL buffer and zero length, which should succeed */
11570 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11572 return -TARGET_EFAULT
;
11575 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11576 unlock_user(p
, arg2
, ret
);
11578 case TARGET_NR_pwrite64
:
11579 if (regpairs_aligned(cpu_env
, num
)) {
11583 if (arg2
== 0 && arg3
== 0) {
11584 /* Special-case NULL buffer and zero length, which should succeed */
11587 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
11589 return -TARGET_EFAULT
;
11592 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11593 unlock_user(p
, arg2
, 0);
11596 case TARGET_NR_getcwd
:
11597 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11598 return -TARGET_EFAULT
;
11599 ret
= get_errno(sys_getcwd1(p
, arg2
));
11600 unlock_user(p
, arg1
, ret
);
11602 case TARGET_NR_capget
:
11603 case TARGET_NR_capset
:
11605 struct target_user_cap_header
*target_header
;
11606 struct target_user_cap_data
*target_data
= NULL
;
11607 struct __user_cap_header_struct header
;
11608 struct __user_cap_data_struct data
[2];
11609 struct __user_cap_data_struct
*dataptr
= NULL
;
11610 int i
, target_datalen
;
11611 int data_items
= 1;
11613 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11614 return -TARGET_EFAULT
;
11616 header
.version
= tswap32(target_header
->version
);
11617 header
.pid
= tswap32(target_header
->pid
);
11619 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11620 /* Version 2 and up takes pointer to two user_data structs */
11624 target_datalen
= sizeof(*target_data
) * data_items
;
11627 if (num
== TARGET_NR_capget
) {
11628 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11630 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11632 if (!target_data
) {
11633 unlock_user_struct(target_header
, arg1
, 0);
11634 return -TARGET_EFAULT
;
11637 if (num
== TARGET_NR_capset
) {
11638 for (i
= 0; i
< data_items
; i
++) {
11639 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11640 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11641 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11648 if (num
== TARGET_NR_capget
) {
11649 ret
= get_errno(capget(&header
, dataptr
));
11651 ret
= get_errno(capset(&header
, dataptr
));
11654 /* The kernel always updates version for both capget and capset */
11655 target_header
->version
= tswap32(header
.version
);
11656 unlock_user_struct(target_header
, arg1
, 1);
11659 if (num
== TARGET_NR_capget
) {
11660 for (i
= 0; i
< data_items
; i
++) {
11661 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11662 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11663 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11665 unlock_user(target_data
, arg2
, target_datalen
);
11667 unlock_user(target_data
, arg2
, 0);
11672 case TARGET_NR_sigaltstack
:
11673 return do_sigaltstack(arg1
, arg2
, cpu_env
);
11675 #ifdef CONFIG_SENDFILE
11676 #ifdef TARGET_NR_sendfile
11677 case TARGET_NR_sendfile
:
11679 off_t
*offp
= NULL
;
11682 ret
= get_user_sal(off
, arg3
);
11683 if (is_error(ret
)) {
11688 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11689 if (!is_error(ret
) && arg3
) {
11690 abi_long ret2
= put_user_sal(off
, arg3
);
11691 if (is_error(ret2
)) {
11698 #ifdef TARGET_NR_sendfile64
11699 case TARGET_NR_sendfile64
:
11701 off_t
*offp
= NULL
;
11704 ret
= get_user_s64(off
, arg3
);
11705 if (is_error(ret
)) {
11710 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11711 if (!is_error(ret
) && arg3
) {
11712 abi_long ret2
= put_user_s64(off
, arg3
);
11713 if (is_error(ret2
)) {
11721 #ifdef TARGET_NR_vfork
11722 case TARGET_NR_vfork
:
11723 return get_errno(do_fork(cpu_env
,
11724 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11727 #ifdef TARGET_NR_ugetrlimit
11728 case TARGET_NR_ugetrlimit
:
11730 struct rlimit rlim
;
11731 int resource
= target_to_host_resource(arg1
);
11732 ret
= get_errno(getrlimit(resource
, &rlim
));
11733 if (!is_error(ret
)) {
11734 struct target_rlimit
*target_rlim
;
11735 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11736 return -TARGET_EFAULT
;
11737 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11738 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11739 unlock_user_struct(target_rlim
, arg2
, 1);
11744 #ifdef TARGET_NR_truncate64
11745 case TARGET_NR_truncate64
:
11746 if (!(p
= lock_user_string(arg1
)))
11747 return -TARGET_EFAULT
;
11748 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11749 unlock_user(p
, arg1
, 0);
11752 #ifdef TARGET_NR_ftruncate64
11753 case TARGET_NR_ftruncate64
:
11754 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11756 #ifdef TARGET_NR_stat64
11757 case TARGET_NR_stat64
:
11758 if (!(p
= lock_user_string(arg1
))) {
11759 return -TARGET_EFAULT
;
11761 ret
= get_errno(stat(path(p
), &st
));
11762 unlock_user(p
, arg1
, 0);
11763 if (!is_error(ret
))
11764 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11767 #ifdef TARGET_NR_lstat64
11768 case TARGET_NR_lstat64
:
11769 if (!(p
= lock_user_string(arg1
))) {
11770 return -TARGET_EFAULT
;
11772 ret
= get_errno(lstat(path(p
), &st
));
11773 unlock_user(p
, arg1
, 0);
11774 if (!is_error(ret
))
11775 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11778 #ifdef TARGET_NR_fstat64
11779 case TARGET_NR_fstat64
:
11780 ret
= get_errno(fstat(arg1
, &st
));
11781 if (!is_error(ret
))
11782 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11785 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11786 #ifdef TARGET_NR_fstatat64
11787 case TARGET_NR_fstatat64
:
11789 #ifdef TARGET_NR_newfstatat
11790 case TARGET_NR_newfstatat
:
11792 if (!(p
= lock_user_string(arg2
))) {
11793 return -TARGET_EFAULT
;
11795 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11796 unlock_user(p
, arg2
, 0);
11797 if (!is_error(ret
))
11798 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11801 #if defined(TARGET_NR_statx)
11802 case TARGET_NR_statx
:
11804 struct target_statx
*target_stx
;
11808 p
= lock_user_string(arg2
);
11810 return -TARGET_EFAULT
;
11812 #if defined(__NR_statx)
11815 * It is assumed that struct statx is architecture independent.
11817 struct target_statx host_stx
;
11820 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11821 if (!is_error(ret
)) {
11822 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11823 unlock_user(p
, arg2
, 0);
11824 return -TARGET_EFAULT
;
11828 if (ret
!= -TARGET_ENOSYS
) {
11829 unlock_user(p
, arg2
, 0);
11834 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11835 unlock_user(p
, arg2
, 0);
11837 if (!is_error(ret
)) {
11838 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11839 return -TARGET_EFAULT
;
11841 memset(target_stx
, 0, sizeof(*target_stx
));
11842 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11843 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11844 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11845 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11846 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11847 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11848 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11849 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11850 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11851 __put_user(st
.st_size
, &target_stx
->stx_size
);
11852 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11853 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11854 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11855 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11856 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11857 unlock_user_struct(target_stx
, arg5
, 1);
11862 #ifdef TARGET_NR_lchown
11863 case TARGET_NR_lchown
:
11864 if (!(p
= lock_user_string(arg1
)))
11865 return -TARGET_EFAULT
;
11866 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11867 unlock_user(p
, arg1
, 0);
11870 #ifdef TARGET_NR_getuid
11871 case TARGET_NR_getuid
:
11872 return get_errno(high2lowuid(getuid()));
11874 #ifdef TARGET_NR_getgid
11875 case TARGET_NR_getgid
:
11876 return get_errno(high2lowgid(getgid()));
11878 #ifdef TARGET_NR_geteuid
11879 case TARGET_NR_geteuid
:
11880 return get_errno(high2lowuid(geteuid()));
11882 #ifdef TARGET_NR_getegid
11883 case TARGET_NR_getegid
:
11884 return get_errno(high2lowgid(getegid()));
11886 case TARGET_NR_setreuid
:
11887 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11888 case TARGET_NR_setregid
:
11889 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11890 case TARGET_NR_getgroups
:
11891 { /* the same code as for TARGET_NR_getgroups32 */
11892 int gidsetsize
= arg1
;
11893 target_id
*target_grouplist
;
11894 g_autofree gid_t
*grouplist
= NULL
;
11897 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
11898 return -TARGET_EINVAL
;
11900 if (gidsetsize
> 0) {
11901 grouplist
= g_try_new(gid_t
, gidsetsize
);
11903 return -TARGET_ENOMEM
;
11906 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11907 if (!is_error(ret
) && gidsetsize
> 0) {
11908 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
,
11909 gidsetsize
* sizeof(target_id
), 0);
11910 if (!target_grouplist
) {
11911 return -TARGET_EFAULT
;
11913 for (i
= 0; i
< ret
; i
++) {
11914 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11916 unlock_user(target_grouplist
, arg2
,
11917 gidsetsize
* sizeof(target_id
));
11921 case TARGET_NR_setgroups
:
11922 { /* the same code as for TARGET_NR_setgroups32 */
11923 int gidsetsize
= arg1
;
11924 target_id
*target_grouplist
;
11925 g_autofree gid_t
*grouplist
= NULL
;
11928 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
11929 return -TARGET_EINVAL
;
11931 if (gidsetsize
> 0) {
11932 grouplist
= g_try_new(gid_t
, gidsetsize
);
11934 return -TARGET_ENOMEM
;
11936 target_grouplist
= lock_user(VERIFY_READ
, arg2
,
11937 gidsetsize
* sizeof(target_id
), 1);
11938 if (!target_grouplist
) {
11939 return -TARGET_EFAULT
;
11941 for (i
= 0; i
< gidsetsize
; i
++) {
11942 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11944 unlock_user(target_grouplist
, arg2
,
11945 gidsetsize
* sizeof(target_id
));
11947 return get_errno(setgroups(gidsetsize
, grouplist
));
11949 case TARGET_NR_fchown
:
11950 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11951 #if defined(TARGET_NR_fchownat)
11952 case TARGET_NR_fchownat
:
11953 if (!(p
= lock_user_string(arg2
)))
11954 return -TARGET_EFAULT
;
11955 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11956 low2highgid(arg4
), arg5
));
11957 unlock_user(p
, arg2
, 0);
11960 #ifdef TARGET_NR_setresuid
11961 case TARGET_NR_setresuid
:
11962 return get_errno(sys_setresuid(low2highuid(arg1
),
11964 low2highuid(arg3
)));
11966 #ifdef TARGET_NR_getresuid
11967 case TARGET_NR_getresuid
:
11969 uid_t ruid
, euid
, suid
;
11970 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11971 if (!is_error(ret
)) {
11972 if (put_user_id(high2lowuid(ruid
), arg1
)
11973 || put_user_id(high2lowuid(euid
), arg2
)
11974 || put_user_id(high2lowuid(suid
), arg3
))
11975 return -TARGET_EFAULT
;
11980 #ifdef TARGET_NR_getresgid
11981 case TARGET_NR_setresgid
:
11982 return get_errno(sys_setresgid(low2highgid(arg1
),
11984 low2highgid(arg3
)));
11986 #ifdef TARGET_NR_getresgid
11987 case TARGET_NR_getresgid
:
11989 gid_t rgid
, egid
, sgid
;
11990 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11991 if (!is_error(ret
)) {
11992 if (put_user_id(high2lowgid(rgid
), arg1
)
11993 || put_user_id(high2lowgid(egid
), arg2
)
11994 || put_user_id(high2lowgid(sgid
), arg3
))
11995 return -TARGET_EFAULT
;
12000 #ifdef TARGET_NR_chown
12001 case TARGET_NR_chown
:
12002 if (!(p
= lock_user_string(arg1
)))
12003 return -TARGET_EFAULT
;
12004 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
12005 unlock_user(p
, arg1
, 0);
12008 case TARGET_NR_setuid
:
12009 return get_errno(sys_setuid(low2highuid(arg1
)));
12010 case TARGET_NR_setgid
:
12011 return get_errno(sys_setgid(low2highgid(arg1
)));
12012 case TARGET_NR_setfsuid
:
12013 return get_errno(setfsuid(arg1
));
12014 case TARGET_NR_setfsgid
:
12015 return get_errno(setfsgid(arg1
));
12017 #ifdef TARGET_NR_lchown32
12018 case TARGET_NR_lchown32
:
12019 if (!(p
= lock_user_string(arg1
)))
12020 return -TARGET_EFAULT
;
12021 ret
= get_errno(lchown(p
, arg2
, arg3
));
12022 unlock_user(p
, arg1
, 0);
12025 #ifdef TARGET_NR_getuid32
12026 case TARGET_NR_getuid32
:
12027 return get_errno(getuid());
12030 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12031 /* Alpha specific */
12032 case TARGET_NR_getxuid
:
12036 cpu_env
->ir
[IR_A4
]=euid
;
12038 return get_errno(getuid());
12040 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12041 /* Alpha specific */
12042 case TARGET_NR_getxgid
:
12046 cpu_env
->ir
[IR_A4
]=egid
;
12048 return get_errno(getgid());
12050 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12051 /* Alpha specific */
12052 case TARGET_NR_osf_getsysinfo
:
12053 ret
= -TARGET_EOPNOTSUPP
;
12055 case TARGET_GSI_IEEE_FP_CONTROL
:
12057 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
12058 uint64_t swcr
= cpu_env
->swcr
;
12060 swcr
&= ~SWCR_STATUS_MASK
;
12061 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
12063 if (put_user_u64 (swcr
, arg2
))
12064 return -TARGET_EFAULT
;
12069 /* case GSI_IEEE_STATE_AT_SIGNAL:
12070 -- Not implemented in linux kernel.
12072 -- Retrieves current unaligned access state; not much used.
12073 case GSI_PROC_TYPE:
12074 -- Retrieves implver information; surely not used.
12075 case GSI_GET_HWRPB:
12076 -- Grabs a copy of the HWRPB; surely not used.
12081 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12082 /* Alpha specific */
12083 case TARGET_NR_osf_setsysinfo
:
12084 ret
= -TARGET_EOPNOTSUPP
;
12086 case TARGET_SSI_IEEE_FP_CONTROL
:
12088 uint64_t swcr
, fpcr
;
12090 if (get_user_u64 (swcr
, arg2
)) {
12091 return -TARGET_EFAULT
;
12095 * The kernel calls swcr_update_status to update the
12096 * status bits from the fpcr at every point that it
12097 * could be queried. Therefore, we store the status
12098 * bits only in FPCR.
12100 cpu_env
->swcr
= swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
12102 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
12103 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
12104 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
12105 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
12110 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
12112 uint64_t exc
, fpcr
, fex
;
12114 if (get_user_u64(exc
, arg2
)) {
12115 return -TARGET_EFAULT
;
12117 exc
&= SWCR_STATUS_MASK
;
12118 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
12120 /* Old exceptions are not signaled. */
12121 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
12123 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
12124 fex
&= (cpu_env
)->swcr
;
12126 /* Update the hardware fpcr. */
12127 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
12128 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
12131 int si_code
= TARGET_FPE_FLTUNK
;
12132 target_siginfo_t info
;
12134 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
12135 si_code
= TARGET_FPE_FLTUND
;
12137 if (fex
& SWCR_TRAP_ENABLE_INE
) {
12138 si_code
= TARGET_FPE_FLTRES
;
12140 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
12141 si_code
= TARGET_FPE_FLTUND
;
12143 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
12144 si_code
= TARGET_FPE_FLTOVF
;
12146 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
12147 si_code
= TARGET_FPE_FLTDIV
;
12149 if (fex
& SWCR_TRAP_ENABLE_INV
) {
12150 si_code
= TARGET_FPE_FLTINV
;
12153 info
.si_signo
= SIGFPE
;
12155 info
.si_code
= si_code
;
12156 info
._sifields
._sigfault
._addr
= (cpu_env
)->pc
;
12157 queue_signal(cpu_env
, info
.si_signo
,
12158 QEMU_SI_FAULT
, &info
);
12164 /* case SSI_NVPAIRS:
12165 -- Used with SSIN_UACPROC to enable unaligned accesses.
12166 case SSI_IEEE_STATE_AT_SIGNAL:
12167 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12168 -- Not implemented in linux kernel
12173 #ifdef TARGET_NR_osf_sigprocmask
12174 /* Alpha specific. */
12175 case TARGET_NR_osf_sigprocmask
:
12179 sigset_t set
, oldset
;
12182 case TARGET_SIG_BLOCK
:
12185 case TARGET_SIG_UNBLOCK
:
12188 case TARGET_SIG_SETMASK
:
12192 return -TARGET_EINVAL
;
12195 target_to_host_old_sigset(&set
, &mask
);
12196 ret
= do_sigprocmask(how
, &set
, &oldset
);
12198 host_to_target_old_sigset(&mask
, &oldset
);
12205 #ifdef TARGET_NR_getgid32
12206 case TARGET_NR_getgid32
:
12207 return get_errno(getgid());
12209 #ifdef TARGET_NR_geteuid32
12210 case TARGET_NR_geteuid32
:
12211 return get_errno(geteuid());
12213 #ifdef TARGET_NR_getegid32
12214 case TARGET_NR_getegid32
:
12215 return get_errno(getegid());
12217 #ifdef TARGET_NR_setreuid32
12218 case TARGET_NR_setreuid32
:
12219 return get_errno(setreuid(arg1
, arg2
));
12221 #ifdef TARGET_NR_setregid32
12222 case TARGET_NR_setregid32
:
12223 return get_errno(setregid(arg1
, arg2
));
12225 #ifdef TARGET_NR_getgroups32
12226 case TARGET_NR_getgroups32
:
12227 { /* the same code as for TARGET_NR_getgroups */
12228 int gidsetsize
= arg1
;
12229 uint32_t *target_grouplist
;
12230 g_autofree gid_t
*grouplist
= NULL
;
12233 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
12234 return -TARGET_EINVAL
;
12236 if (gidsetsize
> 0) {
12237 grouplist
= g_try_new(gid_t
, gidsetsize
);
12239 return -TARGET_ENOMEM
;
12242 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
12243 if (!is_error(ret
) && gidsetsize
> 0) {
12244 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
,
12245 gidsetsize
* 4, 0);
12246 if (!target_grouplist
) {
12247 return -TARGET_EFAULT
;
12249 for (i
= 0; i
< ret
; i
++) {
12250 target_grouplist
[i
] = tswap32(grouplist
[i
]);
12252 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
12257 #ifdef TARGET_NR_setgroups32
12258 case TARGET_NR_setgroups32
:
12259 { /* the same code as for TARGET_NR_setgroups */
12260 int gidsetsize
= arg1
;
12261 uint32_t *target_grouplist
;
12262 g_autofree gid_t
*grouplist
= NULL
;
12265 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
12266 return -TARGET_EINVAL
;
12268 if (gidsetsize
> 0) {
12269 grouplist
= g_try_new(gid_t
, gidsetsize
);
12271 return -TARGET_ENOMEM
;
12273 target_grouplist
= lock_user(VERIFY_READ
, arg2
,
12274 gidsetsize
* 4, 1);
12275 if (!target_grouplist
) {
12276 return -TARGET_EFAULT
;
12278 for (i
= 0; i
< gidsetsize
; i
++) {
12279 grouplist
[i
] = tswap32(target_grouplist
[i
]);
12281 unlock_user(target_grouplist
, arg2
, 0);
12283 return get_errno(setgroups(gidsetsize
, grouplist
));
12286 #ifdef TARGET_NR_fchown32
12287 case TARGET_NR_fchown32
:
12288 return get_errno(fchown(arg1
, arg2
, arg3
));
12290 #ifdef TARGET_NR_setresuid32
12291 case TARGET_NR_setresuid32
:
12292 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
12294 #ifdef TARGET_NR_getresuid32
12295 case TARGET_NR_getresuid32
:
12297 uid_t ruid
, euid
, suid
;
12298 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
12299 if (!is_error(ret
)) {
12300 if (put_user_u32(ruid
, arg1
)
12301 || put_user_u32(euid
, arg2
)
12302 || put_user_u32(suid
, arg3
))
12303 return -TARGET_EFAULT
;
12308 #ifdef TARGET_NR_setresgid32
12309 case TARGET_NR_setresgid32
:
12310 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
12312 #ifdef TARGET_NR_getresgid32
12313 case TARGET_NR_getresgid32
:
12315 gid_t rgid
, egid
, sgid
;
12316 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
12317 if (!is_error(ret
)) {
12318 if (put_user_u32(rgid
, arg1
)
12319 || put_user_u32(egid
, arg2
)
12320 || put_user_u32(sgid
, arg3
))
12321 return -TARGET_EFAULT
;
12326 #ifdef TARGET_NR_chown32
12327 case TARGET_NR_chown32
:
12328 if (!(p
= lock_user_string(arg1
)))
12329 return -TARGET_EFAULT
;
12330 ret
= get_errno(chown(p
, arg2
, arg3
));
12331 unlock_user(p
, arg1
, 0);
12334 #ifdef TARGET_NR_setuid32
12335 case TARGET_NR_setuid32
:
12336 return get_errno(sys_setuid(arg1
));
12338 #ifdef TARGET_NR_setgid32
12339 case TARGET_NR_setgid32
:
12340 return get_errno(sys_setgid(arg1
));
12342 #ifdef TARGET_NR_setfsuid32
12343 case TARGET_NR_setfsuid32
:
12344 return get_errno(setfsuid(arg1
));
12346 #ifdef TARGET_NR_setfsgid32
12347 case TARGET_NR_setfsgid32
:
12348 return get_errno(setfsgid(arg1
));
12350 #ifdef TARGET_NR_mincore
12351 case TARGET_NR_mincore
:
12353 void *a
= lock_user(VERIFY_NONE
, arg1
, arg2
, 0);
12355 return -TARGET_ENOMEM
;
12357 p
= lock_user_string(arg3
);
12359 ret
= -TARGET_EFAULT
;
12361 ret
= get_errno(mincore(a
, arg2
, p
));
12362 unlock_user(p
, arg3
, ret
);
12364 unlock_user(a
, arg1
, 0);
12368 #ifdef TARGET_NR_arm_fadvise64_64
12369 case TARGET_NR_arm_fadvise64_64
:
12370 /* arm_fadvise64_64 looks like fadvise64_64 but
12371 * with different argument order: fd, advice, offset, len
12372 * rather than the usual fd, offset, len, advice.
12373 * Note that offset and len are both 64-bit so appear as
12374 * pairs of 32-bit registers.
12376 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
12377 target_offset64(arg5
, arg6
), arg2
);
12378 return -host_to_target_errno(ret
);
12381 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12383 #ifdef TARGET_NR_fadvise64_64
12384 case TARGET_NR_fadvise64_64
:
12385 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12386 /* 6 args: fd, advice, offset (high, low), len (high, low) */
12394 /* 6 args: fd, offset (high, low), len (high, low), advice */
12395 if (regpairs_aligned(cpu_env
, num
)) {
12396 /* offset is in (3,4), len in (5,6) and advice in 7 */
12404 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
12405 target_offset64(arg4
, arg5
), arg6
);
12406 return -host_to_target_errno(ret
);
12409 #ifdef TARGET_NR_fadvise64
12410 case TARGET_NR_fadvise64
:
12411 /* 5 args: fd, offset (high, low), len, advice */
12412 if (regpairs_aligned(cpu_env
, num
)) {
12413 /* offset is in (3,4), len in 5 and advice in 6 */
12419 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
12420 return -host_to_target_errno(ret
);
12423 #else /* not a 32-bit ABI */
12424 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12425 #ifdef TARGET_NR_fadvise64_64
12426 case TARGET_NR_fadvise64_64
:
12428 #ifdef TARGET_NR_fadvise64
12429 case TARGET_NR_fadvise64
:
12431 #ifdef TARGET_S390X
12433 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
12434 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
12435 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
12436 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
12440 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
12442 #endif /* end of 64-bit ABI fadvise handling */
12444 #ifdef TARGET_NR_madvise
12445 case TARGET_NR_madvise
:
12446 return target_madvise(arg1
, arg2
, arg3
);
12448 #ifdef TARGET_NR_fcntl64
12449 case TARGET_NR_fcntl64
:
12453 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
12454 to_flock64_fn
*copyto
= copy_to_user_flock64
;
12457 if (!cpu_env
->eabi
) {
12458 copyfrom
= copy_from_user_oabi_flock64
;
12459 copyto
= copy_to_user_oabi_flock64
;
12463 cmd
= target_to_host_fcntl_cmd(arg2
);
12464 if (cmd
== -TARGET_EINVAL
) {
12469 case TARGET_F_GETLK64
:
12470 ret
= copyfrom(&fl
, arg3
);
12474 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
12476 ret
= copyto(arg3
, &fl
);
12480 case TARGET_F_SETLK64
:
12481 case TARGET_F_SETLKW64
:
12482 ret
= copyfrom(&fl
, arg3
);
12486 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
12489 ret
= do_fcntl(arg1
, arg2
, arg3
);
12495 #ifdef TARGET_NR_cacheflush
12496 case TARGET_NR_cacheflush
:
12497 /* self-modifying code is handled automatically, so nothing needed */
12500 #ifdef TARGET_NR_getpagesize
12501 case TARGET_NR_getpagesize
:
12502 return TARGET_PAGE_SIZE
;
12504 case TARGET_NR_gettid
:
12505 return get_errno(sys_gettid());
12506 #ifdef TARGET_NR_readahead
12507 case TARGET_NR_readahead
:
12508 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12509 if (regpairs_aligned(cpu_env
, num
)) {
12514 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
12516 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
12521 #ifdef TARGET_NR_setxattr
12522 case TARGET_NR_listxattr
:
12523 case TARGET_NR_llistxattr
:
12527 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
12529 return -TARGET_EFAULT
;
12532 p
= lock_user_string(arg1
);
12534 if (num
== TARGET_NR_listxattr
) {
12535 ret
= get_errno(listxattr(p
, b
, arg3
));
12537 ret
= get_errno(llistxattr(p
, b
, arg3
));
12540 ret
= -TARGET_EFAULT
;
12542 unlock_user(p
, arg1
, 0);
12543 unlock_user(b
, arg2
, arg3
);
12546 case TARGET_NR_flistxattr
:
12550 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
12552 return -TARGET_EFAULT
;
12555 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
12556 unlock_user(b
, arg2
, arg3
);
12559 case TARGET_NR_setxattr
:
12560 case TARGET_NR_lsetxattr
:
12562 void *p
, *n
, *v
= 0;
12564 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12566 return -TARGET_EFAULT
;
12569 p
= lock_user_string(arg1
);
12570 n
= lock_user_string(arg2
);
12572 if (num
== TARGET_NR_setxattr
) {
12573 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
12575 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
12578 ret
= -TARGET_EFAULT
;
12580 unlock_user(p
, arg1
, 0);
12581 unlock_user(n
, arg2
, 0);
12582 unlock_user(v
, arg3
, 0);
12585 case TARGET_NR_fsetxattr
:
12589 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12591 return -TARGET_EFAULT
;
12594 n
= lock_user_string(arg2
);
12596 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
12598 ret
= -TARGET_EFAULT
;
12600 unlock_user(n
, arg2
, 0);
12601 unlock_user(v
, arg3
, 0);
12604 case TARGET_NR_getxattr
:
12605 case TARGET_NR_lgetxattr
:
12607 void *p
, *n
, *v
= 0;
12609 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12611 return -TARGET_EFAULT
;
12614 p
= lock_user_string(arg1
);
12615 n
= lock_user_string(arg2
);
12617 if (num
== TARGET_NR_getxattr
) {
12618 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
12620 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
12623 ret
= -TARGET_EFAULT
;
12625 unlock_user(p
, arg1
, 0);
12626 unlock_user(n
, arg2
, 0);
12627 unlock_user(v
, arg3
, arg4
);
12630 case TARGET_NR_fgetxattr
:
12634 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12636 return -TARGET_EFAULT
;
12639 n
= lock_user_string(arg2
);
12641 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12643 ret
= -TARGET_EFAULT
;
12645 unlock_user(n
, arg2
, 0);
12646 unlock_user(v
, arg3
, arg4
);
12649 case TARGET_NR_removexattr
:
12650 case TARGET_NR_lremovexattr
:
12653 p
= lock_user_string(arg1
);
12654 n
= lock_user_string(arg2
);
12656 if (num
== TARGET_NR_removexattr
) {
12657 ret
= get_errno(removexattr(p
, n
));
12659 ret
= get_errno(lremovexattr(p
, n
));
12662 ret
= -TARGET_EFAULT
;
12664 unlock_user(p
, arg1
, 0);
12665 unlock_user(n
, arg2
, 0);
12668 case TARGET_NR_fremovexattr
:
12671 n
= lock_user_string(arg2
);
12673 ret
= get_errno(fremovexattr(arg1
, n
));
12675 ret
= -TARGET_EFAULT
;
12677 unlock_user(n
, arg2
, 0);
12681 #endif /* CONFIG_ATTR */
12682 #ifdef TARGET_NR_set_thread_area
12683 case TARGET_NR_set_thread_area
:
12684 #if defined(TARGET_MIPS)
12685 cpu_env
->active_tc
.CP0_UserLocal
= arg1
;
12687 #elif defined(TARGET_CRIS)
12689 ret
= -TARGET_EINVAL
;
12691 cpu_env
->pregs
[PR_PID
] = arg1
;
12695 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12696 return do_set_thread_area(cpu_env
, arg1
);
12697 #elif defined(TARGET_M68K)
12699 TaskState
*ts
= cpu
->opaque
;
12700 ts
->tp_value
= arg1
;
12704 return -TARGET_ENOSYS
;
12707 #ifdef TARGET_NR_get_thread_area
12708 case TARGET_NR_get_thread_area
:
12709 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12710 return do_get_thread_area(cpu_env
, arg1
);
12711 #elif defined(TARGET_M68K)
12713 TaskState
*ts
= cpu
->opaque
;
12714 return ts
->tp_value
;
12717 return -TARGET_ENOSYS
;
12720 #ifdef TARGET_NR_getdomainname
12721 case TARGET_NR_getdomainname
:
12722 return -TARGET_ENOSYS
;
12725 #ifdef TARGET_NR_clock_settime
12726 case TARGET_NR_clock_settime
:
12728 struct timespec ts
;
12730 ret
= target_to_host_timespec(&ts
, arg2
);
12731 if (!is_error(ret
)) {
12732 ret
= get_errno(clock_settime(arg1
, &ts
));
12737 #ifdef TARGET_NR_clock_settime64
12738 case TARGET_NR_clock_settime64
:
12740 struct timespec ts
;
12742 ret
= target_to_host_timespec64(&ts
, arg2
);
12743 if (!is_error(ret
)) {
12744 ret
= get_errno(clock_settime(arg1
, &ts
));
12749 #ifdef TARGET_NR_clock_gettime
12750 case TARGET_NR_clock_gettime
:
12752 struct timespec ts
;
12753 ret
= get_errno(clock_gettime(arg1
, &ts
));
12754 if (!is_error(ret
)) {
12755 ret
= host_to_target_timespec(arg2
, &ts
);
12760 #ifdef TARGET_NR_clock_gettime64
12761 case TARGET_NR_clock_gettime64
:
12763 struct timespec ts
;
12764 ret
= get_errno(clock_gettime(arg1
, &ts
));
12765 if (!is_error(ret
)) {
12766 ret
= host_to_target_timespec64(arg2
, &ts
);
12771 #ifdef TARGET_NR_clock_getres
12772 case TARGET_NR_clock_getres
:
12774 struct timespec ts
;
12775 ret
= get_errno(clock_getres(arg1
, &ts
));
12776 if (!is_error(ret
)) {
12777 host_to_target_timespec(arg2
, &ts
);
12782 #ifdef TARGET_NR_clock_getres_time64
12783 case TARGET_NR_clock_getres_time64
:
12785 struct timespec ts
;
12786 ret
= get_errno(clock_getres(arg1
, &ts
));
12787 if (!is_error(ret
)) {
12788 host_to_target_timespec64(arg2
, &ts
);
12793 #ifdef TARGET_NR_clock_nanosleep
12794 case TARGET_NR_clock_nanosleep
:
12796 struct timespec ts
;
12797 if (target_to_host_timespec(&ts
, arg3
)) {
12798 return -TARGET_EFAULT
;
12800 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12801 &ts
, arg4
? &ts
: NULL
));
12803 * if the call is interrupted by a signal handler, it fails
12804 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12805 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12807 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12808 host_to_target_timespec(arg4
, &ts
)) {
12809 return -TARGET_EFAULT
;
12815 #ifdef TARGET_NR_clock_nanosleep_time64
12816 case TARGET_NR_clock_nanosleep_time64
:
12818 struct timespec ts
;
12820 if (target_to_host_timespec64(&ts
, arg3
)) {
12821 return -TARGET_EFAULT
;
12824 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12825 &ts
, arg4
? &ts
: NULL
));
12827 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12828 host_to_target_timespec64(arg4
, &ts
)) {
12829 return -TARGET_EFAULT
;
12835 #if defined(TARGET_NR_set_tid_address)
12836 case TARGET_NR_set_tid_address
:
12838 TaskState
*ts
= cpu
->opaque
;
12839 ts
->child_tidptr
= arg1
;
12840 /* do not call host set_tid_address() syscall, instead return tid() */
12841 return get_errno(sys_gettid());
12845 case TARGET_NR_tkill
:
12846 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12848 case TARGET_NR_tgkill
:
12849 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12850 target_to_host_signal(arg3
)));
12852 #ifdef TARGET_NR_set_robust_list
12853 case TARGET_NR_set_robust_list
:
12854 case TARGET_NR_get_robust_list
:
12855 /* The ABI for supporting robust futexes has userspace pass
12856 * the kernel a pointer to a linked list which is updated by
12857 * userspace after the syscall; the list is walked by the kernel
12858 * when the thread exits. Since the linked list in QEMU guest
12859 * memory isn't a valid linked list for the host and we have
12860 * no way to reliably intercept the thread-death event, we can't
12861 * support these. Silently return ENOSYS so that guest userspace
12862 * falls back to a non-robust futex implementation (which should
12863 * be OK except in the corner case of the guest crashing while
12864 * holding a mutex that is shared with another process via
12867 return -TARGET_ENOSYS
;
12870 #if defined(TARGET_NR_utimensat)
12871 case TARGET_NR_utimensat
:
12873 struct timespec
*tsp
, ts
[2];
12877 if (target_to_host_timespec(ts
, arg3
)) {
12878 return -TARGET_EFAULT
;
12880 if (target_to_host_timespec(ts
+ 1, arg3
+
12881 sizeof(struct target_timespec
))) {
12882 return -TARGET_EFAULT
;
12887 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12889 if (!(p
= lock_user_string(arg2
))) {
12890 return -TARGET_EFAULT
;
12892 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12893 unlock_user(p
, arg2
, 0);
12898 #ifdef TARGET_NR_utimensat_time64
12899 case TARGET_NR_utimensat_time64
:
12901 struct timespec
*tsp
, ts
[2];
12905 if (target_to_host_timespec64(ts
, arg3
)) {
12906 return -TARGET_EFAULT
;
12908 if (target_to_host_timespec64(ts
+ 1, arg3
+
12909 sizeof(struct target__kernel_timespec
))) {
12910 return -TARGET_EFAULT
;
12915 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12917 p
= lock_user_string(arg2
);
12919 return -TARGET_EFAULT
;
12921 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12922 unlock_user(p
, arg2
, 0);
12927 #ifdef TARGET_NR_futex
12928 case TARGET_NR_futex
:
12929 return do_futex(cpu
, false, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12931 #ifdef TARGET_NR_futex_time64
12932 case TARGET_NR_futex_time64
:
12933 return do_futex(cpu
, true, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12935 #ifdef CONFIG_INOTIFY
12936 #if defined(TARGET_NR_inotify_init)
12937 case TARGET_NR_inotify_init
:
12938 ret
= get_errno(inotify_init());
12940 fd_trans_register(ret
, &target_inotify_trans
);
12944 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12945 case TARGET_NR_inotify_init1
:
12946 ret
= get_errno(inotify_init1(target_to_host_bitmask(arg1
,
12947 fcntl_flags_tbl
)));
12949 fd_trans_register(ret
, &target_inotify_trans
);
12953 #if defined(TARGET_NR_inotify_add_watch)
12954 case TARGET_NR_inotify_add_watch
:
12955 p
= lock_user_string(arg2
);
12956 ret
= get_errno(inotify_add_watch(arg1
, path(p
), arg3
));
12957 unlock_user(p
, arg2
, 0);
12960 #if defined(TARGET_NR_inotify_rm_watch)
12961 case TARGET_NR_inotify_rm_watch
:
12962 return get_errno(inotify_rm_watch(arg1
, arg2
));
12966 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12967 case TARGET_NR_mq_open
:
12969 struct mq_attr posix_mq_attr
;
12970 struct mq_attr
*pposix_mq_attr
;
12973 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12974 pposix_mq_attr
= NULL
;
12976 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12977 return -TARGET_EFAULT
;
12979 pposix_mq_attr
= &posix_mq_attr
;
12981 p
= lock_user_string(arg1
- 1);
12983 return -TARGET_EFAULT
;
12985 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12986 unlock_user (p
, arg1
, 0);
12990 case TARGET_NR_mq_unlink
:
12991 p
= lock_user_string(arg1
- 1);
12993 return -TARGET_EFAULT
;
12995 ret
= get_errno(mq_unlink(p
));
12996 unlock_user (p
, arg1
, 0);
12999 #ifdef TARGET_NR_mq_timedsend
13000 case TARGET_NR_mq_timedsend
:
13002 struct timespec ts
;
13004 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
13006 if (target_to_host_timespec(&ts
, arg5
)) {
13007 return -TARGET_EFAULT
;
13009 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
13010 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
13011 return -TARGET_EFAULT
;
13014 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
13016 unlock_user (p
, arg2
, arg3
);
13020 #ifdef TARGET_NR_mq_timedsend_time64
13021 case TARGET_NR_mq_timedsend_time64
:
13023 struct timespec ts
;
13025 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
13027 if (target_to_host_timespec64(&ts
, arg5
)) {
13028 return -TARGET_EFAULT
;
13030 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
13031 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
13032 return -TARGET_EFAULT
;
13035 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
13037 unlock_user(p
, arg2
, arg3
);
13042 #ifdef TARGET_NR_mq_timedreceive
13043 case TARGET_NR_mq_timedreceive
:
13045 struct timespec ts
;
13048 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
13050 if (target_to_host_timespec(&ts
, arg5
)) {
13051 return -TARGET_EFAULT
;
13053 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
13055 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
13056 return -TARGET_EFAULT
;
13059 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
13062 unlock_user (p
, arg2
, arg3
);
13064 put_user_u32(prio
, arg4
);
13068 #ifdef TARGET_NR_mq_timedreceive_time64
13069 case TARGET_NR_mq_timedreceive_time64
:
13071 struct timespec ts
;
13074 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
13076 if (target_to_host_timespec64(&ts
, arg5
)) {
13077 return -TARGET_EFAULT
;
13079 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
13081 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
13082 return -TARGET_EFAULT
;
13085 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
13088 unlock_user(p
, arg2
, arg3
);
13090 put_user_u32(prio
, arg4
);
13096 /* Not implemented for now... */
13097 /* case TARGET_NR_mq_notify: */
13100 case TARGET_NR_mq_getsetattr
:
13102 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
13105 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
13106 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
13107 &posix_mq_attr_out
));
13108 } else if (arg3
!= 0) {
13109 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
13111 if (ret
== 0 && arg3
!= 0) {
13112 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
13118 #ifdef CONFIG_SPLICE
13119 #ifdef TARGET_NR_tee
13120 case TARGET_NR_tee
:
13122 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
13126 #ifdef TARGET_NR_splice
13127 case TARGET_NR_splice
:
13129 loff_t loff_in
, loff_out
;
13130 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
13132 if (get_user_u64(loff_in
, arg2
)) {
13133 return -TARGET_EFAULT
;
13135 ploff_in
= &loff_in
;
13138 if (get_user_u64(loff_out
, arg4
)) {
13139 return -TARGET_EFAULT
;
13141 ploff_out
= &loff_out
;
13143 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
13145 if (put_user_u64(loff_in
, arg2
)) {
13146 return -TARGET_EFAULT
;
13150 if (put_user_u64(loff_out
, arg4
)) {
13151 return -TARGET_EFAULT
;
13157 #ifdef TARGET_NR_vmsplice
13158 case TARGET_NR_vmsplice
:
13160 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
13162 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
13163 unlock_iovec(vec
, arg2
, arg3
, 0);
13165 ret
= -host_to_target_errno(errno
);
13170 #endif /* CONFIG_SPLICE */
13171 #ifdef CONFIG_EVENTFD
13172 #if defined(TARGET_NR_eventfd)
13173 case TARGET_NR_eventfd
:
13174 ret
= get_errno(eventfd(arg1
, 0));
13176 fd_trans_register(ret
, &target_eventfd_trans
);
13180 #if defined(TARGET_NR_eventfd2)
13181 case TARGET_NR_eventfd2
:
13183 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
13184 if (arg2
& TARGET_O_NONBLOCK
) {
13185 host_flags
|= O_NONBLOCK
;
13187 if (arg2
& TARGET_O_CLOEXEC
) {
13188 host_flags
|= O_CLOEXEC
;
13190 ret
= get_errno(eventfd(arg1
, host_flags
));
13192 fd_trans_register(ret
, &target_eventfd_trans
);
13197 #endif /* CONFIG_EVENTFD */
13198 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13199 case TARGET_NR_fallocate
:
13200 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13201 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
13202 target_offset64(arg5
, arg6
)));
13204 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
13208 #if defined(CONFIG_SYNC_FILE_RANGE)
13209 #if defined(TARGET_NR_sync_file_range)
13210 case TARGET_NR_sync_file_range
:
13211 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13212 #if defined(TARGET_MIPS)
13213 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
13214 target_offset64(arg5
, arg6
), arg7
));
13216 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
13217 target_offset64(arg4
, arg5
), arg6
));
13218 #endif /* !TARGET_MIPS */
13220 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
13224 #if defined(TARGET_NR_sync_file_range2) || \
13225 defined(TARGET_NR_arm_sync_file_range)
13226 #if defined(TARGET_NR_sync_file_range2)
13227 case TARGET_NR_sync_file_range2
:
13229 #if defined(TARGET_NR_arm_sync_file_range)
13230 case TARGET_NR_arm_sync_file_range
:
13232 /* This is like sync_file_range but the arguments are reordered */
13233 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13234 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
13235 target_offset64(arg5
, arg6
), arg2
));
13237 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
13242 #if defined(TARGET_NR_signalfd4)
13243 case TARGET_NR_signalfd4
:
13244 return do_signalfd4(arg1
, arg2
, arg4
);
13246 #if defined(TARGET_NR_signalfd)
13247 case TARGET_NR_signalfd
:
13248 return do_signalfd4(arg1
, arg2
, 0);
13250 #if defined(CONFIG_EPOLL)
13251 #if defined(TARGET_NR_epoll_create)
13252 case TARGET_NR_epoll_create
:
13253 return get_errno(epoll_create(arg1
));
13255 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13256 case TARGET_NR_epoll_create1
:
13257 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
13259 #if defined(TARGET_NR_epoll_ctl)
13260 case TARGET_NR_epoll_ctl
:
13262 struct epoll_event ep
;
13263 struct epoll_event
*epp
= 0;
13265 if (arg2
!= EPOLL_CTL_DEL
) {
13266 struct target_epoll_event
*target_ep
;
13267 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
13268 return -TARGET_EFAULT
;
13270 ep
.events
= tswap32(target_ep
->events
);
13272 * The epoll_data_t union is just opaque data to the kernel,
13273 * so we transfer all 64 bits across and need not worry what
13274 * actual data type it is.
13276 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
13277 unlock_user_struct(target_ep
, arg4
, 0);
13280 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13281 * non-null pointer, even though this argument is ignored.
13286 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
13290 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13291 #if defined(TARGET_NR_epoll_wait)
13292 case TARGET_NR_epoll_wait
:
13294 #if defined(TARGET_NR_epoll_pwait)
13295 case TARGET_NR_epoll_pwait
:
13298 struct target_epoll_event
*target_ep
;
13299 struct epoll_event
*ep
;
13301 int maxevents
= arg3
;
13302 int timeout
= arg4
;
13304 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
13305 return -TARGET_EINVAL
;
13308 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
13309 maxevents
* sizeof(struct target_epoll_event
), 1);
13311 return -TARGET_EFAULT
;
13314 ep
= g_try_new(struct epoll_event
, maxevents
);
13316 unlock_user(target_ep
, arg2
, 0);
13317 return -TARGET_ENOMEM
;
13321 #if defined(TARGET_NR_epoll_pwait)
13322 case TARGET_NR_epoll_pwait
:
13324 sigset_t
*set
= NULL
;
13327 ret
= process_sigsuspend_mask(&set
, arg5
, arg6
);
13333 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
13334 set
, SIGSET_T_SIZE
));
13337 finish_sigsuspend_mask(ret
);
13342 #if defined(TARGET_NR_epoll_wait)
13343 case TARGET_NR_epoll_wait
:
13344 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
13349 ret
= -TARGET_ENOSYS
;
13351 if (!is_error(ret
)) {
13353 for (i
= 0; i
< ret
; i
++) {
13354 target_ep
[i
].events
= tswap32(ep
[i
].events
);
13355 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
13357 unlock_user(target_ep
, arg2
,
13358 ret
* sizeof(struct target_epoll_event
));
13360 unlock_user(target_ep
, arg2
, 0);
13367 #ifdef TARGET_NR_prlimit64
13368 case TARGET_NR_prlimit64
:
13370 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13371 struct target_rlimit64
*target_rnew
, *target_rold
;
13372 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
13373 int resource
= target_to_host_resource(arg2
);
13375 if (arg3
&& (resource
!= RLIMIT_AS
&&
13376 resource
!= RLIMIT_DATA
&&
13377 resource
!= RLIMIT_STACK
)) {
13378 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
13379 return -TARGET_EFAULT
;
13381 __get_user(rnew
.rlim_cur
, &target_rnew
->rlim_cur
);
13382 __get_user(rnew
.rlim_max
, &target_rnew
->rlim_max
);
13383 unlock_user_struct(target_rnew
, arg3
, 0);
13387 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
13388 if (!is_error(ret
) && arg4
) {
13389 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
13390 return -TARGET_EFAULT
;
13392 __put_user(rold
.rlim_cur
, &target_rold
->rlim_cur
);
13393 __put_user(rold
.rlim_max
, &target_rold
->rlim_max
);
13394 unlock_user_struct(target_rold
, arg4
, 1);
13399 #ifdef TARGET_NR_gethostname
13400 case TARGET_NR_gethostname
:
13402 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
13404 ret
= get_errno(gethostname(name
, arg2
));
13405 unlock_user(name
, arg1
, arg2
);
13407 ret
= -TARGET_EFAULT
;
13412 #ifdef TARGET_NR_atomic_cmpxchg_32
13413 case TARGET_NR_atomic_cmpxchg_32
:
13415 /* should use start_exclusive from main.c */
13416 abi_ulong mem_value
;
13417 if (get_user_u32(mem_value
, arg6
)) {
13418 target_siginfo_t info
;
13419 info
.si_signo
= SIGSEGV
;
13421 info
.si_code
= TARGET_SEGV_MAPERR
;
13422 info
._sifields
._sigfault
._addr
= arg6
;
13423 queue_signal(cpu_env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
13427 if (mem_value
== arg2
)
13428 put_user_u32(arg1
, arg6
);
13432 #ifdef TARGET_NR_atomic_barrier
13433 case TARGET_NR_atomic_barrier
:
13434 /* Like the kernel implementation and the
13435 qemu arm barrier, no-op this? */
13439 #ifdef TARGET_NR_timer_create
13440 case TARGET_NR_timer_create
:
13442 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13444 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
13447 int timer_index
= next_free_host_timer();
13449 if (timer_index
< 0) {
13450 ret
= -TARGET_EAGAIN
;
13452 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
13455 phost_sevp
= &host_sevp
;
13456 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
13458 free_host_timer_slot(timer_index
);
13463 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
13465 free_host_timer_slot(timer_index
);
13467 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
13468 timer_delete(*phtimer
);
13469 free_host_timer_slot(timer_index
);
13470 return -TARGET_EFAULT
;
13478 #ifdef TARGET_NR_timer_settime
13479 case TARGET_NR_timer_settime
:
13481 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13482 * struct itimerspec * old_value */
13483 target_timer_t timerid
= get_timer_id(arg1
);
13487 } else if (arg3
== 0) {
13488 ret
= -TARGET_EINVAL
;
13490 timer_t htimer
= g_posix_timers
[timerid
];
13491 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
13493 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
13494 return -TARGET_EFAULT
;
13497 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
13498 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
13499 return -TARGET_EFAULT
;
13506 #ifdef TARGET_NR_timer_settime64
13507 case TARGET_NR_timer_settime64
:
13509 target_timer_t timerid
= get_timer_id(arg1
);
13513 } else if (arg3
== 0) {
13514 ret
= -TARGET_EINVAL
;
13516 timer_t htimer
= g_posix_timers
[timerid
];
13517 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
13519 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
13520 return -TARGET_EFAULT
;
13523 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
13524 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
13525 return -TARGET_EFAULT
;
13532 #ifdef TARGET_NR_timer_gettime
13533 case TARGET_NR_timer_gettime
:
13535 /* args: timer_t timerid, struct itimerspec *curr_value */
13536 target_timer_t timerid
= get_timer_id(arg1
);
13540 } else if (!arg2
) {
13541 ret
= -TARGET_EFAULT
;
13543 timer_t htimer
= g_posix_timers
[timerid
];
13544 struct itimerspec hspec
;
13545 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13547 if (host_to_target_itimerspec(arg2
, &hspec
)) {
13548 ret
= -TARGET_EFAULT
;
13555 #ifdef TARGET_NR_timer_gettime64
13556 case TARGET_NR_timer_gettime64
:
13558 /* args: timer_t timerid, struct itimerspec64 *curr_value */
13559 target_timer_t timerid
= get_timer_id(arg1
);
13563 } else if (!arg2
) {
13564 ret
= -TARGET_EFAULT
;
13566 timer_t htimer
= g_posix_timers
[timerid
];
13567 struct itimerspec hspec
;
13568 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13570 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
13571 ret
= -TARGET_EFAULT
;
13578 #ifdef TARGET_NR_timer_getoverrun
13579 case TARGET_NR_timer_getoverrun
:
13581 /* args: timer_t timerid */
13582 target_timer_t timerid
= get_timer_id(arg1
);
13587 timer_t htimer
= g_posix_timers
[timerid
];
13588 ret
= get_errno(timer_getoverrun(htimer
));
13594 #ifdef TARGET_NR_timer_delete
13595 case TARGET_NR_timer_delete
:
13597 /* args: timer_t timerid */
13598 target_timer_t timerid
= get_timer_id(arg1
);
13603 timer_t htimer
= g_posix_timers
[timerid
];
13604 ret
= get_errno(timer_delete(htimer
));
13605 free_host_timer_slot(timerid
);
13611 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13612 case TARGET_NR_timerfd_create
:
13613 ret
= get_errno(timerfd_create(arg1
,
13614 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
13616 fd_trans_register(ret
, &target_timerfd_trans
);
13621 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13622 case TARGET_NR_timerfd_gettime
:
13624 struct itimerspec its_curr
;
13626 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13628 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
13629 return -TARGET_EFAULT
;
13635 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13636 case TARGET_NR_timerfd_gettime64
:
13638 struct itimerspec its_curr
;
13640 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13642 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
13643 return -TARGET_EFAULT
;
13649 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13650 case TARGET_NR_timerfd_settime
:
13652 struct itimerspec its_new
, its_old
, *p_new
;
13655 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13656 return -TARGET_EFAULT
;
13663 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13665 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13666 return -TARGET_EFAULT
;
13672 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13673 case TARGET_NR_timerfd_settime64
:
13675 struct itimerspec its_new
, its_old
, *p_new
;
13678 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13679 return -TARGET_EFAULT
;
13686 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13688 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13689 return -TARGET_EFAULT
;
13695 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13696 case TARGET_NR_ioprio_get
:
13697 return get_errno(ioprio_get(arg1
, arg2
));
13700 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13701 case TARGET_NR_ioprio_set
:
13702 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13705 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13706 case TARGET_NR_setns
:
13707 return get_errno(setns(arg1
, arg2
));
13709 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13710 case TARGET_NR_unshare
:
13711 return get_errno(unshare(arg1
));
13713 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13714 case TARGET_NR_kcmp
:
13715 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13717 #ifdef TARGET_NR_swapcontext
13718 case TARGET_NR_swapcontext
:
13719 /* PowerPC specific. */
13720 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13722 #ifdef TARGET_NR_memfd_create
13723 case TARGET_NR_memfd_create
:
13724 p
= lock_user_string(arg1
);
13726 return -TARGET_EFAULT
;
13728 ret
= get_errno(memfd_create(p
, arg2
));
13729 fd_trans_unregister(ret
);
13730 unlock_user(p
, arg1
, 0);
13733 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13734 case TARGET_NR_membarrier
:
13735 return get_errno(membarrier(arg1
, arg2
));
13738 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13739 case TARGET_NR_copy_file_range
:
13741 loff_t inoff
, outoff
;
13742 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13745 if (get_user_u64(inoff
, arg2
)) {
13746 return -TARGET_EFAULT
;
13751 if (get_user_u64(outoff
, arg4
)) {
13752 return -TARGET_EFAULT
;
13756 /* Do not sign-extend the count parameter. */
13757 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13758 (abi_ulong
)arg5
, arg6
));
13759 if (!is_error(ret
) && ret
> 0) {
13761 if (put_user_u64(inoff
, arg2
)) {
13762 return -TARGET_EFAULT
;
13766 if (put_user_u64(outoff
, arg4
)) {
13767 return -TARGET_EFAULT
;
13775 #if defined(TARGET_NR_pivot_root)
13776 case TARGET_NR_pivot_root
:
13779 p
= lock_user_string(arg1
); /* new_root */
13780 p2
= lock_user_string(arg2
); /* put_old */
13782 ret
= -TARGET_EFAULT
;
13784 ret
= get_errno(pivot_root(p
, p2
));
13786 unlock_user(p2
, arg2
, 0);
13787 unlock_user(p
, arg1
, 0);
13792 #if defined(TARGET_NR_riscv_hwprobe)
13793 case TARGET_NR_riscv_hwprobe
:
13794 return do_riscv_hwprobe(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
);
13798 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13799 return -TARGET_ENOSYS
;
13804 abi_long
do_syscall(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
13805 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13806 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13809 CPUState
*cpu
= env_cpu(cpu_env
);
13812 #ifdef DEBUG_ERESTARTSYS
13813 /* Debug-only code for exercising the syscall-restart code paths
13814 * in the per-architecture cpu main loops: restart every syscall
13815 * the guest makes once before letting it through.
13821 return -QEMU_ERESTARTSYS
;
13826 record_syscall_start(cpu
, num
, arg1
,
13827 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13829 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13830 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13833 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13834 arg5
, arg6
, arg7
, arg8
);
13836 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13837 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13838 arg3
, arg4
, arg5
, arg6
);
13841 record_syscall_return(cpu
, num
, ret
);