4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "target_mman.h"
33 #include <sys/mount.h>
35 #include <sys/fsuid.h>
36 #include <sys/personality.h>
37 #include <sys/prctl.h>
38 #include <sys/resource.h>
40 #include <linux/capability.h>
42 #include <sys/timex.h>
43 #include <sys/socket.h>
44 #include <linux/sockios.h>
48 #include <sys/times.h>
51 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <netinet/udp.h>
60 #include <linux/wireless.h>
61 #include <linux/icmp.h>
62 #include <linux/icmpv6.h>
63 #include <linux/if_tun.h>
64 #include <linux/in6.h>
65 #include <linux/errqueue.h>
66 #include <linux/random.h>
68 #include <sys/timerfd.h>
71 #include <sys/eventfd.h>
74 #include <sys/epoll.h>
77 #include "qemu/xattr.h"
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
82 #ifdef HAVE_SYS_KCOV_H
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #include <linux/fd.h>
102 #if defined(CONFIG_FIEMAP)
103 #include <linux/fiemap.h>
105 #include <linux/fb.h>
106 #if defined(CONFIG_USBFS)
107 #include <linux/usbdevice_fs.h>
108 #include <linux/usb/ch9.h>
110 #include <linux/vt.h>
111 #include <linux/dm-ioctl.h>
112 #include <linux/reboot.h>
113 #include <linux/route.h>
114 #include <linux/filter.h>
115 #include <linux/blkpg.h>
116 #include <netpacket/packet.h>
117 #include <linux/netlink.h>
118 #include <linux/if_alg.h>
119 #include <linux/rtc.h>
120 #include <sound/asound.h>
122 #include <linux/btrfs.h>
125 #include <libdrm/drm.h>
126 #include <libdrm/i915_drm.h>
128 #include "linux_loop.h"
132 #include "user-internals.h"
134 #include "signal-common.h"
136 #include "user-mmap.h"
137 #include "user/safe-syscall.h"
138 #include "qemu/guest-random.h"
139 #include "qemu/selfmap.h"
140 #include "user/syscall-trace.h"
141 #include "special-errno.h"
142 #include "qapi/error.h"
143 #include "fd-trans.h"
145 #include "cpu_loop-common.h"
148 #define CLONE_IO 0x80000000 /* Clone io context */
151 /* We can't directly call the host clone syscall, because this will
152 * badly confuse libc (breaking mutexes, for example). So we must
153 * divide clone flags into:
154 * * flag combinations that look like pthread_create()
155 * * flag combinations that look like fork()
156 * * flags we can implement within QEMU itself
157 * * flags we can't support and will return an error for
159 /* For thread creation, all these flags must be present; for
160 * fork, none must be present.
162 #define CLONE_THREAD_FLAGS \
163 (CLONE_VM | CLONE_FS | CLONE_FILES | \
164 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
166 /* These flags are ignored:
167 * CLONE_DETACHED is now ignored by the kernel;
168 * CLONE_IO is just an optimisation hint to the I/O scheduler
170 #define CLONE_IGNORED_FLAGS \
171 (CLONE_DETACHED | CLONE_IO)
174 # define CLONE_PIDFD 0x00001000
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS \
179 (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS \
184 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
185 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
187 #define CLONE_INVALID_FORK_FLAGS \
188 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
190 #define CLONE_INVALID_THREAD_FLAGS \
191 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
192 CLONE_IGNORED_FLAGS))
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195 * have almost all been allocated. We cannot support any of
196 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198 * The checks against the invalid thread masks above will catch these.
199 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203 * once. This exercises the codepaths for restart.
205 //#define DEBUG_ERESTARTSYS
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
221 #define _syscall0(type,name) \
222 static type name (void) \
224 return syscall(__NR_##name); \
227 #define _syscall1(type,name,type1,arg1) \
228 static type name (type1 arg1) \
230 return syscall(__NR_##name, arg1); \
233 #define _syscall2(type,name,type1,arg1,type2,arg2) \
234 static type name (type1 arg1,type2 arg2) \
236 return syscall(__NR_##name, arg1, arg2); \
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
240 static type name (type1 arg1,type2 arg2,type3 arg3) \
242 return syscall(__NR_##name, arg1, arg2, arg3); \
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
255 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
260 type5,arg5,type6,arg6) \
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
264 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
282 #define __NR_sys_statx __NR_statx
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid
)
301 /* For the 64-bit guest on 32-bit host case we must emulate
302 * getdents using getdents64, because otherwise the host
303 * might hand us back more dirent records than we can fit
304 * into the guest buffer after structure format conversion.
305 * Otherwise we emulate getdents with getdents if the host has it.
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents
, unsigned int, fd
, struct linux_dirent
*, dirp
, unsigned int, count
);
314 #if (defined(TARGET_NR_getdents) && \
315 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64
, unsigned int, fd
, struct linux_dirent64
*, dirp
, unsigned int, count
);
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek
, unsigned int, fd
, unsigned long, hi
, unsigned long, lo
,
321 loff_t
*, res
, unsigned int, wh
);
323 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
324 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
326 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group
,int,error_code
)
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range
,int,first
,int,last
,int,flags
)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC (1U << 2)
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
339 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
343 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open
, pid_t
, pid
, unsigned int, flags
);
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal
, int, pidfd
, int, sig
, siginfo_t
*, info
,
350 unsigned int, flags
);
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd
, int, pidfd
, int, targetfd
, unsigned int, flags
);
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
357 unsigned long *, user_mask_ptr
);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
360 unsigned long *, user_mask_ptr
);
361 /* sched_attr is not defined in glibc */
364 uint32_t sched_policy
;
365 uint64_t sched_flags
;
367 uint32_t sched_priority
;
368 uint64_t sched_runtime
;
369 uint64_t sched_deadline
;
370 uint64_t sched_period
;
371 uint32_t sched_util_min
;
372 uint32_t sched_util_max
;
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr
, pid_t
, pid
, struct sched_attr
*, attr
,
376 unsigned int, size
, unsigned int, flags
);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr
, pid_t
, pid
, struct sched_attr
*, attr
,
379 unsigned int, flags
);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler
, pid_t
, pid
);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler
, pid_t
, pid
, int, policy
,
384 const struct sched_param
*, param
);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam
, pid_t
, pid
,
387 struct sched_param
*, param
);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam
, pid_t
, pid
,
390 const struct sched_param
*, param
);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
393 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
395 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
396 struct __user_cap_data_struct
*, data
);
397 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
398 struct __user_cap_data_struct
*, data
);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get
, int, which
, int, who
)
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
411 unsigned long, idx1
, unsigned long, idx2
)
415 * It is assumed that struct statx is architecture independent.
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
419 unsigned int, mask
, struct target_statx
*, statxbuf
)
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier
, int, cmd
, int, flags
)
425 static const bitmask_transtbl fcntl_flags_tbl
[] = {
426 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
427 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
428 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
429 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
430 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
431 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
432 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
433 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
434 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
435 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
436 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
437 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
438 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
439 #if defined(O_DIRECT)
440 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
442 #if defined(O_NOATIME)
443 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
445 #if defined(O_CLOEXEC)
446 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
449 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
451 #if defined(O_TMPFILE)
452 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
454 /* Don't terminate the list prematurely on 64-bit host+guest. */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
461 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
467 const struct timespec
*,tsp
,int,flags
)
469 static int sys_utimensat(int dirfd
, const char *pathname
,
470 const struct timespec times
[2], int flags
)
476 #endif /* TARGET_NR_utimensat */
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
482 const char *, new, unsigned int, flags
)
484 static int sys_renameat2(int oldfd
, const char *old
,
485 int newfd
, const char *new, int flags
)
488 return renameat(oldfd
, old
, newfd
, new);
494 #endif /* TARGET_NR_renameat2 */
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY */
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64
{
516 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
517 const struct host_rlimit64
*, new_limit
,
518 struct host_rlimit64
*, old_limit
)
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers
[GUEST_TIMER_MAX
];
526 static int g_posix_timer_allocated
[GUEST_TIMER_MAX
];
528 static inline int next_free_host_timer(void)
531 for (k
= 0; k
< ARRAY_SIZE(g_posix_timer_allocated
); k
++) {
532 if (qatomic_xchg(g_posix_timer_allocated
+ k
, 1) == 0) {
539 static inline void free_host_timer_slot(int id
)
541 qatomic_store_release(g_posix_timer_allocated
+ id
, 0);
545 static inline int host_to_target_errno(int host_errno
)
547 switch (host_errno
) {
548 #define E(X) case X: return TARGET_##X;
549 #include "errnos.c.inc"
556 static inline int target_to_host_errno(int target_errno
)
558 switch (target_errno
) {
559 #define E(X) case TARGET_##X: return X;
560 #include "errnos.c.inc"
567 abi_long
get_errno(abi_long ret
)
570 return -host_to_target_errno(errno
);
575 const char *target_strerror(int err
)
577 if (err
== QEMU_ERESTARTSYS
) {
578 return "To be restarted";
580 if (err
== QEMU_ESIGRETURN
) {
581 return "Successful exit from sigreturn";
584 return strerror(target_to_host_errno(err
));
587 static int check_zeroed_user(abi_long addr
, size_t ksize
, size_t usize
)
591 if (usize
<= ksize
) {
594 for (i
= ksize
; i
< usize
; i
++) {
595 if (get_user_u8(b
, addr
+ i
)) {
596 return -TARGET_EFAULT
;
605 #define safe_syscall0(type, name) \
606 static type safe_##name(void) \
608 return safe_syscall(__NR_##name); \
611 #define safe_syscall1(type, name, type1, arg1) \
612 static type safe_##name(type1 arg1) \
614 return safe_syscall(__NR_##name, arg1); \
617 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
618 static type safe_##name(type1 arg1, type2 arg2) \
620 return safe_syscall(__NR_##name, arg1, arg2); \
623 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
626 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
629 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
633 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
636 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
637 type4, arg4, type5, arg5) \
638 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
641 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
644 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
645 type4, arg4, type5, arg5, type6, arg6) \
646 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
647 type5 arg5, type6 arg6) \
649 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
652 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
653 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
654 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
655 int, flags
, mode_t
, mode
)
656 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
657 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
658 struct rusage
*, rusage
)
660 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
661 int, options
, struct rusage
*, rusage
)
662 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
663 safe_syscall5(int, execveat
, int, dirfd
, const char *, filename
,
664 char **, argv
, char **, envp
, int, flags
)
665 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
666 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
667 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
668 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
670 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
671 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
672 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
675 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
676 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
678 #if defined(__NR_futex)
679 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
680 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
682 #if defined(__NR_futex_time64)
683 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
684 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
686 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
687 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
688 safe_syscall2(int, tkill
, int, tid
, int, sig
)
689 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
690 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
691 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
692 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
693 unsigned long, pos_l
, unsigned long, pos_h
)
694 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
695 unsigned long, pos_l
, unsigned long, pos_h
)
696 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
698 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
699 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
700 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
701 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
702 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
703 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
704 safe_syscall2(int, flock
, int, fd
, int, operation
)
705 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
706 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
707 const struct timespec
*, uts
, size_t, sigsetsize
)
709 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
711 #if defined(TARGET_NR_nanosleep)
712 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
713 struct timespec
*, rem
)
715 #if defined(TARGET_NR_clock_nanosleep) || \
716 defined(TARGET_NR_clock_nanosleep_time64)
717 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
718 const struct timespec
*, req
, struct timespec
*, rem
)
722 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
725 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
726 void *, ptr
, long, fifth
)
730 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
734 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
735 long, msgtype
, int, flags
)
737 #ifdef __NR_semtimedop
738 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
739 unsigned, nsops
, const struct timespec
*, timeout
)
741 #if defined(TARGET_NR_mq_timedsend) || \
742 defined(TARGET_NR_mq_timedsend_time64)
743 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
744 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
746 #if defined(TARGET_NR_mq_timedreceive) || \
747 defined(TARGET_NR_mq_timedreceive_time64)
748 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
749 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
751 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
752 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
753 int, outfd
, loff_t
*, poutoff
, size_t, length
,
757 /* We do ioctl like this rather than via safe_syscall3 to preserve the
758 * "third argument might be integer or pointer or not present" behaviour of
761 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
762 /* Similarly for fcntl. Note that callers must always:
763 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
764 * use the flock64 struct rather than unsuffixed flock
765 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
768 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
770 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
773 static inline int host_to_target_sock_type(int host_type
)
777 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
779 target_type
= TARGET_SOCK_DGRAM
;
782 target_type
= TARGET_SOCK_STREAM
;
785 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
789 #if defined(SOCK_CLOEXEC)
790 if (host_type
& SOCK_CLOEXEC
) {
791 target_type
|= TARGET_SOCK_CLOEXEC
;
795 #if defined(SOCK_NONBLOCK)
796 if (host_type
& SOCK_NONBLOCK
) {
797 target_type
|= TARGET_SOCK_NONBLOCK
;
804 static abi_ulong target_brk
;
805 static abi_ulong brk_page
;
807 void target_set_brk(abi_ulong new_brk
)
809 target_brk
= TARGET_PAGE_ALIGN(new_brk
);
810 brk_page
= HOST_PAGE_ALIGN(target_brk
);
813 /* do_brk() must return target values and target errnos. */
814 abi_long
do_brk(abi_ulong brk_val
)
816 abi_long mapped_addr
;
817 abi_ulong new_alloc_size
;
818 abi_ulong new_brk
, new_host_brk_page
;
820 /* brk pointers are always untagged */
822 /* return old brk value if brk_val unchanged or zero */
823 if (!brk_val
|| brk_val
== target_brk
) {
827 new_brk
= TARGET_PAGE_ALIGN(brk_val
);
828 new_host_brk_page
= HOST_PAGE_ALIGN(brk_val
);
830 /* brk_val and old target_brk might be on the same page */
831 if (new_brk
== TARGET_PAGE_ALIGN(target_brk
)) {
832 /* empty remaining bytes in (possibly larger) host page */
833 memset(g2h_untagged(new_brk
), 0, new_host_brk_page
- new_brk
);
834 target_brk
= brk_val
;
838 /* Release heap if necesary */
839 if (new_brk
< target_brk
) {
840 /* empty remaining bytes in (possibly larger) host page */
841 memset(g2h_untagged(new_brk
), 0, new_host_brk_page
- new_brk
);
843 /* free unused host pages and set new brk_page */
844 target_munmap(new_host_brk_page
, brk_page
- new_host_brk_page
);
845 brk_page
= new_host_brk_page
;
847 target_brk
= brk_val
;
851 /* We need to allocate more memory after the brk... Note that
852 * we don't use MAP_FIXED because that will map over the top of
853 * any existing mapping (like the one with the host libc or qemu
854 * itself); instead we treat "mapped but at wrong address" as
855 * a failure and unmap again.
857 new_alloc_size
= new_host_brk_page
- brk_page
;
858 if (new_alloc_size
) {
859 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
860 PROT_READ
|PROT_WRITE
,
861 MAP_ANON
|MAP_PRIVATE
, 0, 0));
863 mapped_addr
= brk_page
;
866 if (mapped_addr
== brk_page
) {
867 /* Heap contents are initialized to zero, as for anonymous
868 * mapped pages. Technically the new pages are already
869 * initialized to zero since they *are* anonymous mapped
870 * pages, however we have to take care with the contents that
871 * come from the remaining part of the previous page: it may
872 * contains garbage data due to a previous heap usage (grown
874 memset(g2h_untagged(brk_page
), 0, HOST_PAGE_ALIGN(brk_page
) - brk_page
);
876 target_brk
= brk_val
;
877 brk_page
= new_host_brk_page
;
879 } else if (mapped_addr
!= -1) {
880 /* Mapped but at wrong address, meaning there wasn't actually
881 * enough space for this brk.
883 target_munmap(mapped_addr
, new_alloc_size
);
887 #if defined(TARGET_ALPHA)
888 /* We (partially) emulate OSF/1 on Alpha, which requires we
889 return a proper errno, not an unchanged brk value. */
890 return -TARGET_ENOMEM
;
892 /* For everything else, return the previous break. */
896 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
897 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
898 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
899 abi_ulong target_fds_addr
,
903 abi_ulong b
, *target_fds
;
905 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
906 if (!(target_fds
= lock_user(VERIFY_READ
,
908 sizeof(abi_ulong
) * nw
,
910 return -TARGET_EFAULT
;
914 for (i
= 0; i
< nw
; i
++) {
915 /* grab the abi_ulong */
916 __get_user(b
, &target_fds
[i
]);
917 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
918 /* check the bit inside the abi_ulong */
925 unlock_user(target_fds
, target_fds_addr
, 0);
930 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
931 abi_ulong target_fds_addr
,
934 if (target_fds_addr
) {
935 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
936 return -TARGET_EFAULT
;
944 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
950 abi_ulong
*target_fds
;
952 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
953 if (!(target_fds
= lock_user(VERIFY_WRITE
,
955 sizeof(abi_ulong
) * nw
,
957 return -TARGET_EFAULT
;
960 for (i
= 0; i
< nw
; i
++) {
962 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
963 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
966 __put_user(v
, &target_fds
[i
]);
969 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
975 #if defined(__alpha__)
981 static inline abi_long
host_to_target_clock_t(long ticks
)
983 #if HOST_HZ == TARGET_HZ
986 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
990 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
991 const struct rusage
*rusage
)
993 struct target_rusage
*target_rusage
;
995 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
996 return -TARGET_EFAULT
;
997 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
998 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
999 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1000 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1001 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1002 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1003 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1004 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1005 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1006 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1007 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1008 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1009 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1010 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1011 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1012 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1013 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1014 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1015 unlock_user_struct(target_rusage
, target_addr
, 1);
1020 #ifdef TARGET_NR_setrlimit
1021 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1023 abi_ulong target_rlim_swap
;
1026 target_rlim_swap
= tswapal(target_rlim
);
1027 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1028 return RLIM_INFINITY
;
1030 result
= target_rlim_swap
;
1031 if (target_rlim_swap
!= (rlim_t
)result
)
1032 return RLIM_INFINITY
;
1038 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1039 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1041 abi_ulong target_rlim_swap
;
1044 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1045 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1047 target_rlim_swap
= rlim
;
1048 result
= tswapal(target_rlim_swap
);
1054 static inline int target_to_host_resource(int code
)
1057 case TARGET_RLIMIT_AS
:
1059 case TARGET_RLIMIT_CORE
:
1061 case TARGET_RLIMIT_CPU
:
1063 case TARGET_RLIMIT_DATA
:
1065 case TARGET_RLIMIT_FSIZE
:
1066 return RLIMIT_FSIZE
;
1067 case TARGET_RLIMIT_LOCKS
:
1068 return RLIMIT_LOCKS
;
1069 case TARGET_RLIMIT_MEMLOCK
:
1070 return RLIMIT_MEMLOCK
;
1071 case TARGET_RLIMIT_MSGQUEUE
:
1072 return RLIMIT_MSGQUEUE
;
1073 case TARGET_RLIMIT_NICE
:
1075 case TARGET_RLIMIT_NOFILE
:
1076 return RLIMIT_NOFILE
;
1077 case TARGET_RLIMIT_NPROC
:
1078 return RLIMIT_NPROC
;
1079 case TARGET_RLIMIT_RSS
:
1081 case TARGET_RLIMIT_RTPRIO
:
1082 return RLIMIT_RTPRIO
;
1083 #ifdef RLIMIT_RTTIME
1084 case TARGET_RLIMIT_RTTIME
:
1085 return RLIMIT_RTTIME
;
1087 case TARGET_RLIMIT_SIGPENDING
:
1088 return RLIMIT_SIGPENDING
;
1089 case TARGET_RLIMIT_STACK
:
1090 return RLIMIT_STACK
;
1096 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1097 abi_ulong target_tv_addr
)
1099 struct target_timeval
*target_tv
;
1101 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1102 return -TARGET_EFAULT
;
1105 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1106 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1108 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1113 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1114 const struct timeval
*tv
)
1116 struct target_timeval
*target_tv
;
1118 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1119 return -TARGET_EFAULT
;
1122 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1123 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1125 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1130 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1131 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1132 abi_ulong target_tv_addr
)
1134 struct target__kernel_sock_timeval
*target_tv
;
1136 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1137 return -TARGET_EFAULT
;
1140 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1141 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1143 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1149 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1150 const struct timeval
*tv
)
1152 struct target__kernel_sock_timeval
*target_tv
;
1154 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1155 return -TARGET_EFAULT
;
1158 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1159 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1161 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1166 #if defined(TARGET_NR_futex) || \
1167 defined(TARGET_NR_rt_sigtimedwait) || \
1168 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1169 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1170 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1171 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1172 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1173 defined(TARGET_NR_timer_settime) || \
1174 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1175 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1176 abi_ulong target_addr
)
1178 struct target_timespec
*target_ts
;
1180 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1181 return -TARGET_EFAULT
;
1183 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1184 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1185 unlock_user_struct(target_ts
, target_addr
, 0);
1190 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1191 defined(TARGET_NR_timer_settime64) || \
1192 defined(TARGET_NR_mq_timedsend_time64) || \
1193 defined(TARGET_NR_mq_timedreceive_time64) || \
1194 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1195 defined(TARGET_NR_clock_nanosleep_time64) || \
1196 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1197 defined(TARGET_NR_utimensat) || \
1198 defined(TARGET_NR_utimensat_time64) || \
1199 defined(TARGET_NR_semtimedop_time64) || \
1200 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1201 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1202 abi_ulong target_addr
)
1204 struct target__kernel_timespec
*target_ts
;
1206 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1207 return -TARGET_EFAULT
;
1209 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1210 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1211 /* in 32bit mode, this drops the padding */
1212 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1213 unlock_user_struct(target_ts
, target_addr
, 0);
1218 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1219 struct timespec
*host_ts
)
1221 struct target_timespec
*target_ts
;
1223 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1224 return -TARGET_EFAULT
;
1226 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1227 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1228 unlock_user_struct(target_ts
, target_addr
, 1);
1232 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1233 struct timespec
*host_ts
)
1235 struct target__kernel_timespec
*target_ts
;
1237 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1238 return -TARGET_EFAULT
;
1240 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1241 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1242 unlock_user_struct(target_ts
, target_addr
, 1);
1246 #if defined(TARGET_NR_gettimeofday)
1247 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1248 struct timezone
*tz
)
1250 struct target_timezone
*target_tz
;
1252 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1253 return -TARGET_EFAULT
;
1256 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1257 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1259 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1265 #if defined(TARGET_NR_settimeofday)
1266 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1267 abi_ulong target_tz_addr
)
1269 struct target_timezone
*target_tz
;
1271 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1272 return -TARGET_EFAULT
;
1275 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1276 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1278 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1284 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1287 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1288 abi_ulong target_mq_attr_addr
)
1290 struct target_mq_attr
*target_mq_attr
;
1292 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1293 target_mq_attr_addr
, 1))
1294 return -TARGET_EFAULT
;
1296 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1297 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1298 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1299 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1301 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1306 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1307 const struct mq_attr
*attr
)
1309 struct target_mq_attr
*target_mq_attr
;
1311 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1312 target_mq_attr_addr
, 0))
1313 return -TARGET_EFAULT
;
1315 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1316 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1317 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1318 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1320 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1326 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1327 /* do_select() must return target values and target errnos. */
1328 static abi_long
do_select(int n
,
1329 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1330 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1332 fd_set rfds
, wfds
, efds
;
1333 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1335 struct timespec ts
, *ts_ptr
;
1338 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1342 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1346 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1351 if (target_tv_addr
) {
1352 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1353 return -TARGET_EFAULT
;
1354 ts
.tv_sec
= tv
.tv_sec
;
1355 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1361 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1364 if (!is_error(ret
)) {
1365 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1366 return -TARGET_EFAULT
;
1367 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1368 return -TARGET_EFAULT
;
1369 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1370 return -TARGET_EFAULT
;
1372 if (target_tv_addr
) {
1373 tv
.tv_sec
= ts
.tv_sec
;
1374 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1375 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1376 return -TARGET_EFAULT
;
1384 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1385 static abi_long
do_old_select(abi_ulong arg1
)
1387 struct target_sel_arg_struct
*sel
;
1388 abi_ulong inp
, outp
, exp
, tvp
;
1391 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1392 return -TARGET_EFAULT
;
1395 nsel
= tswapal(sel
->n
);
1396 inp
= tswapal(sel
->inp
);
1397 outp
= tswapal(sel
->outp
);
1398 exp
= tswapal(sel
->exp
);
1399 tvp
= tswapal(sel
->tvp
);
1401 unlock_user_struct(sel
, arg1
, 0);
1403 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1408 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1409 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1410 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1413 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1414 fd_set rfds
, wfds
, efds
;
1415 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1416 struct timespec ts
, *ts_ptr
;
1420 * The 6th arg is actually two args smashed together,
1421 * so we cannot use the C library.
1428 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1436 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1440 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1444 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1450 * This takes a timespec, and not a timeval, so we cannot
1451 * use the do_select() helper ...
1455 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1456 return -TARGET_EFAULT
;
1459 if (target_to_host_timespec(&ts
, ts_addr
)) {
1460 return -TARGET_EFAULT
;
1468 /* Extract the two packed args for the sigset */
1471 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1473 return -TARGET_EFAULT
;
1475 arg_sigset
= tswapal(arg7
[0]);
1476 arg_sigsize
= tswapal(arg7
[1]);
1477 unlock_user(arg7
, arg6
, 0);
1480 ret
= process_sigsuspend_mask(&sig
.set
, arg_sigset
, arg_sigsize
);
1485 sig
.size
= SIGSET_T_SIZE
;
1489 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1493 finish_sigsuspend_mask(ret
);
1496 if (!is_error(ret
)) {
1497 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1498 return -TARGET_EFAULT
;
1500 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1501 return -TARGET_EFAULT
;
1503 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1504 return -TARGET_EFAULT
;
1507 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1508 return -TARGET_EFAULT
;
1511 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1512 return -TARGET_EFAULT
;
1520 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1521 defined(TARGET_NR_ppoll_time64)
1522 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1523 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1525 struct target_pollfd
*target_pfd
;
1526 unsigned int nfds
= arg2
;
1534 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1535 return -TARGET_EINVAL
;
1537 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1538 sizeof(struct target_pollfd
) * nfds
, 1);
1540 return -TARGET_EFAULT
;
1543 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1544 for (i
= 0; i
< nfds
; i
++) {
1545 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1546 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1550 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1551 sigset_t
*set
= NULL
;
1555 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1556 unlock_user(target_pfd
, arg1
, 0);
1557 return -TARGET_EFAULT
;
1560 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1561 unlock_user(target_pfd
, arg1
, 0);
1562 return -TARGET_EFAULT
;
1570 ret
= process_sigsuspend_mask(&set
, arg4
, arg5
);
1572 unlock_user(target_pfd
, arg1
, 0);
1577 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1578 set
, SIGSET_T_SIZE
));
1581 finish_sigsuspend_mask(ret
);
1583 if (!is_error(ret
) && arg3
) {
1585 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1586 return -TARGET_EFAULT
;
1589 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1590 return -TARGET_EFAULT
;
1595 struct timespec ts
, *pts
;
1598 /* Convert ms to secs, ns */
1599 ts
.tv_sec
= arg3
/ 1000;
1600 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1603 /* -ve poll() timeout means "infinite" */
1606 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1609 if (!is_error(ret
)) {
1610 for (i
= 0; i
< nfds
; i
++) {
1611 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1614 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1619 static abi_long
do_pipe(CPUArchState
*cpu_env
, abi_ulong pipedes
,
1620 int flags
, int is_pipe2
)
1624 ret
= pipe2(host_pipe
, flags
);
1627 return get_errno(ret
);
1629 /* Several targets have special calling conventions for the original
1630 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1632 #if defined(TARGET_ALPHA)
1633 cpu_env
->ir
[IR_A4
] = host_pipe
[1];
1634 return host_pipe
[0];
1635 #elif defined(TARGET_MIPS)
1636 cpu_env
->active_tc
.gpr
[3] = host_pipe
[1];
1637 return host_pipe
[0];
1638 #elif defined(TARGET_SH4)
1639 cpu_env
->gregs
[1] = host_pipe
[1];
1640 return host_pipe
[0];
1641 #elif defined(TARGET_SPARC)
1642 cpu_env
->regwptr
[1] = host_pipe
[1];
1643 return host_pipe
[0];
1647 if (put_user_s32(host_pipe
[0], pipedes
)
1648 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(abi_int
)))
1649 return -TARGET_EFAULT
;
1650 return get_errno(ret
);
1653 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1654 abi_ulong target_addr
,
1657 struct target_ip_mreqn
*target_smreqn
;
1659 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1661 return -TARGET_EFAULT
;
1662 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1663 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1664 if (len
== sizeof(struct target_ip_mreqn
))
1665 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1666 unlock_user(target_smreqn
, target_addr
, 0);
1671 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1672 abi_ulong target_addr
,
1675 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1676 sa_family_t sa_family
;
1677 struct target_sockaddr
*target_saddr
;
1679 if (fd_trans_target_to_host_addr(fd
)) {
1680 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1683 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1685 return -TARGET_EFAULT
;
1687 sa_family
= tswap16(target_saddr
->sa_family
);
1689 /* Oops. The caller might send a incomplete sun_path; sun_path
1690 * must be terminated by \0 (see the manual page), but
1691 * unfortunately it is quite common to specify sockaddr_un
1692 * length as "strlen(x->sun_path)" while it should be
1693 * "strlen(...) + 1". We'll fix that here if needed.
1694 * Linux kernel has a similar feature.
1697 if (sa_family
== AF_UNIX
) {
1698 if (len
< unix_maxlen
&& len
> 0) {
1699 char *cp
= (char*)target_saddr
;
1701 if ( cp
[len
-1] && !cp
[len
] )
1704 if (len
> unix_maxlen
)
1708 memcpy(addr
, target_saddr
, len
);
1709 addr
->sa_family
= sa_family
;
1710 if (sa_family
== AF_NETLINK
) {
1711 struct sockaddr_nl
*nladdr
;
1713 nladdr
= (struct sockaddr_nl
*)addr
;
1714 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1715 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1716 } else if (sa_family
== AF_PACKET
) {
1717 struct target_sockaddr_ll
*lladdr
;
1719 lladdr
= (struct target_sockaddr_ll
*)addr
;
1720 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1721 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1722 } else if (sa_family
== AF_INET6
) {
1723 struct sockaddr_in6
*in6addr
;
1725 in6addr
= (struct sockaddr_in6
*)addr
;
1726 in6addr
->sin6_scope_id
= tswap32(in6addr
->sin6_scope_id
);
1728 unlock_user(target_saddr
, target_addr
, 0);
1733 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1734 struct sockaddr
*addr
,
1737 struct target_sockaddr
*target_saddr
;
1744 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1746 return -TARGET_EFAULT
;
1747 memcpy(target_saddr
, addr
, len
);
1748 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1749 sizeof(target_saddr
->sa_family
)) {
1750 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1752 if (addr
->sa_family
== AF_NETLINK
&&
1753 len
>= sizeof(struct target_sockaddr_nl
)) {
1754 struct target_sockaddr_nl
*target_nl
=
1755 (struct target_sockaddr_nl
*)target_saddr
;
1756 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1757 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1758 } else if (addr
->sa_family
== AF_PACKET
) {
1759 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1760 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1761 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1762 } else if (addr
->sa_family
== AF_INET6
&&
1763 len
>= sizeof(struct target_sockaddr_in6
)) {
1764 struct target_sockaddr_in6
*target_in6
=
1765 (struct target_sockaddr_in6
*)target_saddr
;
1766 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1768 unlock_user(target_saddr
, target_addr
, len
);
1773 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1774 struct target_msghdr
*target_msgh
)
1776 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1777 abi_long msg_controllen
;
1778 abi_ulong target_cmsg_addr
;
1779 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1780 socklen_t space
= 0;
1782 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1783 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1785 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1786 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1787 target_cmsg_start
= target_cmsg
;
1789 return -TARGET_EFAULT
;
1791 while (cmsg
&& target_cmsg
) {
1792 void *data
= CMSG_DATA(cmsg
);
1793 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1795 int len
= tswapal(target_cmsg
->cmsg_len
)
1796 - sizeof(struct target_cmsghdr
);
1798 space
+= CMSG_SPACE(len
);
1799 if (space
> msgh
->msg_controllen
) {
1800 space
-= CMSG_SPACE(len
);
1801 /* This is a QEMU bug, since we allocated the payload
1802 * area ourselves (unlike overflow in host-to-target
1803 * conversion, which is just the guest giving us a buffer
1804 * that's too small). It can't happen for the payload types
1805 * we currently support; if it becomes an issue in future
1806 * we would need to improve our allocation strategy to
1807 * something more intelligent than "twice the size of the
1808 * target buffer we're reading from".
1810 qemu_log_mask(LOG_UNIMP
,
1811 ("Unsupported ancillary data %d/%d: "
1812 "unhandled msg size\n"),
1813 tswap32(target_cmsg
->cmsg_level
),
1814 tswap32(target_cmsg
->cmsg_type
));
1818 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1819 cmsg
->cmsg_level
= SOL_SOCKET
;
1821 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1823 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1824 cmsg
->cmsg_len
= CMSG_LEN(len
);
1826 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1827 int *fd
= (int *)data
;
1828 int *target_fd
= (int *)target_data
;
1829 int i
, numfds
= len
/ sizeof(int);
1831 for (i
= 0; i
< numfds
; i
++) {
1832 __get_user(fd
[i
], target_fd
+ i
);
1834 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1835 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1836 struct ucred
*cred
= (struct ucred
*)data
;
1837 struct target_ucred
*target_cred
=
1838 (struct target_ucred
*)target_data
;
1840 __get_user(cred
->pid
, &target_cred
->pid
);
1841 __get_user(cred
->uid
, &target_cred
->uid
);
1842 __get_user(cred
->gid
, &target_cred
->gid
);
1843 } else if (cmsg
->cmsg_level
== SOL_ALG
) {
1844 uint32_t *dst
= (uint32_t *)data
;
1846 memcpy(dst
, target_data
, len
);
1847 /* fix endianess of first 32-bit word */
1848 if (len
>= sizeof(uint32_t)) {
1849 *dst
= tswap32(*dst
);
1852 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1853 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1854 memcpy(data
, target_data
, len
);
1857 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1858 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1861 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1863 msgh
->msg_controllen
= space
;
1867 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1868 struct msghdr
*msgh
)
1870 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1871 abi_long msg_controllen
;
1872 abi_ulong target_cmsg_addr
;
1873 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1874 socklen_t space
= 0;
1876 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1877 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1879 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1880 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1881 target_cmsg_start
= target_cmsg
;
1883 return -TARGET_EFAULT
;
1885 while (cmsg
&& target_cmsg
) {
1886 void *data
= CMSG_DATA(cmsg
);
1887 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1889 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1890 int tgt_len
, tgt_space
;
1892 /* We never copy a half-header but may copy half-data;
1893 * this is Linux's behaviour in put_cmsg(). Note that
1894 * truncation here is a guest problem (which we report
1895 * to the guest via the CTRUNC bit), unlike truncation
1896 * in target_to_host_cmsg, which is a QEMU bug.
1898 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1899 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1903 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1904 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1906 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1908 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1910 /* Payload types which need a different size of payload on
1911 * the target must adjust tgt_len here.
1914 switch (cmsg
->cmsg_level
) {
1916 switch (cmsg
->cmsg_type
) {
1918 tgt_len
= sizeof(struct target_timeval
);
1928 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1929 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1930 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1933 /* We must now copy-and-convert len bytes of payload
1934 * into tgt_len bytes of destination space. Bear in mind
1935 * that in both source and destination we may be dealing
1936 * with a truncated value!
1938 switch (cmsg
->cmsg_level
) {
1940 switch (cmsg
->cmsg_type
) {
1943 int *fd
= (int *)data
;
1944 int *target_fd
= (int *)target_data
;
1945 int i
, numfds
= tgt_len
/ sizeof(int);
1947 for (i
= 0; i
< numfds
; i
++) {
1948 __put_user(fd
[i
], target_fd
+ i
);
1954 struct timeval
*tv
= (struct timeval
*)data
;
1955 struct target_timeval
*target_tv
=
1956 (struct target_timeval
*)target_data
;
1958 if (len
!= sizeof(struct timeval
) ||
1959 tgt_len
!= sizeof(struct target_timeval
)) {
1963 /* copy struct timeval to target */
1964 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1965 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1968 case SCM_CREDENTIALS
:
1970 struct ucred
*cred
= (struct ucred
*)data
;
1971 struct target_ucred
*target_cred
=
1972 (struct target_ucred
*)target_data
;
1974 __put_user(cred
->pid
, &target_cred
->pid
);
1975 __put_user(cred
->uid
, &target_cred
->uid
);
1976 __put_user(cred
->gid
, &target_cred
->gid
);
1985 switch (cmsg
->cmsg_type
) {
1988 uint32_t *v
= (uint32_t *)data
;
1989 uint32_t *t_int
= (uint32_t *)target_data
;
1991 if (len
!= sizeof(uint32_t) ||
1992 tgt_len
!= sizeof(uint32_t)) {
1995 __put_user(*v
, t_int
);
2001 struct sock_extended_err ee
;
2002 struct sockaddr_in offender
;
2004 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
2005 struct errhdr_t
*target_errh
=
2006 (struct errhdr_t
*)target_data
;
2008 if (len
!= sizeof(struct errhdr_t
) ||
2009 tgt_len
!= sizeof(struct errhdr_t
)) {
2012 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2013 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2014 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2015 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2016 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2017 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2018 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2019 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2020 (void *) &errh
->offender
, sizeof(errh
->offender
));
2029 switch (cmsg
->cmsg_type
) {
2032 uint32_t *v
= (uint32_t *)data
;
2033 uint32_t *t_int
= (uint32_t *)target_data
;
2035 if (len
!= sizeof(uint32_t) ||
2036 tgt_len
!= sizeof(uint32_t)) {
2039 __put_user(*v
, t_int
);
2045 struct sock_extended_err ee
;
2046 struct sockaddr_in6 offender
;
2048 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2049 struct errhdr6_t
*target_errh
=
2050 (struct errhdr6_t
*)target_data
;
2052 if (len
!= sizeof(struct errhdr6_t
) ||
2053 tgt_len
!= sizeof(struct errhdr6_t
)) {
2056 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2057 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2058 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2059 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2060 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2061 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2062 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2063 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2064 (void *) &errh
->offender
, sizeof(errh
->offender
));
2074 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2075 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2076 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2077 if (tgt_len
> len
) {
2078 memset(target_data
+ len
, 0, tgt_len
- len
);
2082 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2083 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2084 if (msg_controllen
< tgt_space
) {
2085 tgt_space
= msg_controllen
;
2087 msg_controllen
-= tgt_space
;
2089 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2090 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2093 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2095 target_msgh
->msg_controllen
= tswapal(space
);
2099 /* do_setsockopt() Must return target values and target errnos. */
2100 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2101 abi_ulong optval_addr
, socklen_t optlen
)
2105 struct ip_mreqn
*ip_mreq
;
2106 struct ip_mreq_source
*ip_mreq_source
;
2111 /* TCP and UDP options all take an 'int' value. */
2112 if (optlen
< sizeof(uint32_t))
2113 return -TARGET_EINVAL
;
2115 if (get_user_u32(val
, optval_addr
))
2116 return -TARGET_EFAULT
;
2117 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2124 case IP_ROUTER_ALERT
:
2128 case IP_MTU_DISCOVER
:
2135 case IP_MULTICAST_TTL
:
2136 case IP_MULTICAST_LOOP
:
2138 if (optlen
>= sizeof(uint32_t)) {
2139 if (get_user_u32(val
, optval_addr
))
2140 return -TARGET_EFAULT
;
2141 } else if (optlen
>= 1) {
2142 if (get_user_u8(val
, optval_addr
))
2143 return -TARGET_EFAULT
;
2145 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2147 case IP_ADD_MEMBERSHIP
:
2148 case IP_DROP_MEMBERSHIP
:
2149 if (optlen
< sizeof (struct target_ip_mreq
) ||
2150 optlen
> sizeof (struct target_ip_mreqn
))
2151 return -TARGET_EINVAL
;
2153 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2154 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2155 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2158 case IP_BLOCK_SOURCE
:
2159 case IP_UNBLOCK_SOURCE
:
2160 case IP_ADD_SOURCE_MEMBERSHIP
:
2161 case IP_DROP_SOURCE_MEMBERSHIP
:
2162 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2163 return -TARGET_EINVAL
;
2165 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2166 if (!ip_mreq_source
) {
2167 return -TARGET_EFAULT
;
2169 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2170 unlock_user (ip_mreq_source
, optval_addr
, 0);
2179 case IPV6_MTU_DISCOVER
:
2182 case IPV6_RECVPKTINFO
:
2183 case IPV6_UNICAST_HOPS
:
2184 case IPV6_MULTICAST_HOPS
:
2185 case IPV6_MULTICAST_LOOP
:
2187 case IPV6_RECVHOPLIMIT
:
2188 case IPV6_2292HOPLIMIT
:
2191 case IPV6_2292PKTINFO
:
2192 case IPV6_RECVTCLASS
:
2193 case IPV6_RECVRTHDR
:
2194 case IPV6_2292RTHDR
:
2195 case IPV6_RECVHOPOPTS
:
2196 case IPV6_2292HOPOPTS
:
2197 case IPV6_RECVDSTOPTS
:
2198 case IPV6_2292DSTOPTS
:
2200 case IPV6_ADDR_PREFERENCES
:
2201 #ifdef IPV6_RECVPATHMTU
2202 case IPV6_RECVPATHMTU
:
2204 #ifdef IPV6_TRANSPARENT
2205 case IPV6_TRANSPARENT
:
2207 #ifdef IPV6_FREEBIND
2210 #ifdef IPV6_RECVORIGDSTADDR
2211 case IPV6_RECVORIGDSTADDR
:
2214 if (optlen
< sizeof(uint32_t)) {
2215 return -TARGET_EINVAL
;
2217 if (get_user_u32(val
, optval_addr
)) {
2218 return -TARGET_EFAULT
;
2220 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2221 &val
, sizeof(val
)));
2225 struct in6_pktinfo pki
;
2227 if (optlen
< sizeof(pki
)) {
2228 return -TARGET_EINVAL
;
2231 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2232 return -TARGET_EFAULT
;
2235 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2237 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2238 &pki
, sizeof(pki
)));
2241 case IPV6_ADD_MEMBERSHIP
:
2242 case IPV6_DROP_MEMBERSHIP
:
2244 struct ipv6_mreq ipv6mreq
;
2246 if (optlen
< sizeof(ipv6mreq
)) {
2247 return -TARGET_EINVAL
;
2250 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2251 return -TARGET_EFAULT
;
2254 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2256 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2257 &ipv6mreq
, sizeof(ipv6mreq
)));
2268 struct icmp6_filter icmp6f
;
2270 if (optlen
> sizeof(icmp6f
)) {
2271 optlen
= sizeof(icmp6f
);
2274 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2275 return -TARGET_EFAULT
;
2278 for (val
= 0; val
< 8; val
++) {
2279 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2282 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2294 /* those take an u32 value */
2295 if (optlen
< sizeof(uint32_t)) {
2296 return -TARGET_EINVAL
;
2299 if (get_user_u32(val
, optval_addr
)) {
2300 return -TARGET_EFAULT
;
2302 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2303 &val
, sizeof(val
)));
2310 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2315 char *alg_key
= g_malloc(optlen
);
2318 return -TARGET_ENOMEM
;
2320 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2322 return -TARGET_EFAULT
;
2324 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2329 case ALG_SET_AEAD_AUTHSIZE
:
2331 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2340 case TARGET_SOL_SOCKET
:
2342 case TARGET_SO_RCVTIMEO
:
2346 optname
= SO_RCVTIMEO
;
2349 if (optlen
!= sizeof(struct target_timeval
)) {
2350 return -TARGET_EINVAL
;
2353 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2354 return -TARGET_EFAULT
;
2357 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2361 case TARGET_SO_SNDTIMEO
:
2362 optname
= SO_SNDTIMEO
;
2364 case TARGET_SO_ATTACH_FILTER
:
2366 struct target_sock_fprog
*tfprog
;
2367 struct target_sock_filter
*tfilter
;
2368 struct sock_fprog fprog
;
2369 struct sock_filter
*filter
;
2372 if (optlen
!= sizeof(*tfprog
)) {
2373 return -TARGET_EINVAL
;
2375 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2376 return -TARGET_EFAULT
;
2378 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2379 tswapal(tfprog
->filter
), 0)) {
2380 unlock_user_struct(tfprog
, optval_addr
, 1);
2381 return -TARGET_EFAULT
;
2384 fprog
.len
= tswap16(tfprog
->len
);
2385 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2386 if (filter
== NULL
) {
2387 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2388 unlock_user_struct(tfprog
, optval_addr
, 1);
2389 return -TARGET_ENOMEM
;
2391 for (i
= 0; i
< fprog
.len
; i
++) {
2392 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2393 filter
[i
].jt
= tfilter
[i
].jt
;
2394 filter
[i
].jf
= tfilter
[i
].jf
;
2395 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2397 fprog
.filter
= filter
;
2399 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2400 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2403 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2404 unlock_user_struct(tfprog
, optval_addr
, 1);
2407 case TARGET_SO_BINDTODEVICE
:
2409 char *dev_ifname
, *addr_ifname
;
2411 if (optlen
> IFNAMSIZ
- 1) {
2412 optlen
= IFNAMSIZ
- 1;
2414 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2416 return -TARGET_EFAULT
;
2418 optname
= SO_BINDTODEVICE
;
2419 addr_ifname
= alloca(IFNAMSIZ
);
2420 memcpy(addr_ifname
, dev_ifname
, optlen
);
2421 addr_ifname
[optlen
] = 0;
2422 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2423 addr_ifname
, optlen
));
2424 unlock_user (dev_ifname
, optval_addr
, 0);
2427 case TARGET_SO_LINGER
:
2430 struct target_linger
*tlg
;
2432 if (optlen
!= sizeof(struct target_linger
)) {
2433 return -TARGET_EINVAL
;
2435 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2436 return -TARGET_EFAULT
;
2438 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2439 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2440 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2442 unlock_user_struct(tlg
, optval_addr
, 0);
2445 /* Options with 'int' argument. */
2446 case TARGET_SO_DEBUG
:
2449 case TARGET_SO_REUSEADDR
:
2450 optname
= SO_REUSEADDR
;
2453 case TARGET_SO_REUSEPORT
:
2454 optname
= SO_REUSEPORT
;
2457 case TARGET_SO_TYPE
:
2460 case TARGET_SO_ERROR
:
2463 case TARGET_SO_DONTROUTE
:
2464 optname
= SO_DONTROUTE
;
2466 case TARGET_SO_BROADCAST
:
2467 optname
= SO_BROADCAST
;
2469 case TARGET_SO_SNDBUF
:
2470 optname
= SO_SNDBUF
;
2472 case TARGET_SO_SNDBUFFORCE
:
2473 optname
= SO_SNDBUFFORCE
;
2475 case TARGET_SO_RCVBUF
:
2476 optname
= SO_RCVBUF
;
2478 case TARGET_SO_RCVBUFFORCE
:
2479 optname
= SO_RCVBUFFORCE
;
2481 case TARGET_SO_KEEPALIVE
:
2482 optname
= SO_KEEPALIVE
;
2484 case TARGET_SO_OOBINLINE
:
2485 optname
= SO_OOBINLINE
;
2487 case TARGET_SO_NO_CHECK
:
2488 optname
= SO_NO_CHECK
;
2490 case TARGET_SO_PRIORITY
:
2491 optname
= SO_PRIORITY
;
2494 case TARGET_SO_BSDCOMPAT
:
2495 optname
= SO_BSDCOMPAT
;
2498 case TARGET_SO_PASSCRED
:
2499 optname
= SO_PASSCRED
;
2501 case TARGET_SO_PASSSEC
:
2502 optname
= SO_PASSSEC
;
2504 case TARGET_SO_TIMESTAMP
:
2505 optname
= SO_TIMESTAMP
;
2507 case TARGET_SO_RCVLOWAT
:
2508 optname
= SO_RCVLOWAT
;
2513 if (optlen
< sizeof(uint32_t))
2514 return -TARGET_EINVAL
;
2516 if (get_user_u32(val
, optval_addr
))
2517 return -TARGET_EFAULT
;
2518 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2523 case NETLINK_PKTINFO
:
2524 case NETLINK_ADD_MEMBERSHIP
:
2525 case NETLINK_DROP_MEMBERSHIP
:
2526 case NETLINK_BROADCAST_ERROR
:
2527 case NETLINK_NO_ENOBUFS
:
2528 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2529 case NETLINK_LISTEN_ALL_NSID
:
2530 case NETLINK_CAP_ACK
:
2531 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2532 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2533 case NETLINK_EXT_ACK
:
2534 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2535 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2536 case NETLINK_GET_STRICT_CHK
:
2537 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2543 if (optlen
< sizeof(uint32_t)) {
2544 return -TARGET_EINVAL
;
2546 if (get_user_u32(val
, optval_addr
)) {
2547 return -TARGET_EFAULT
;
2549 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2552 #endif /* SOL_NETLINK */
2555 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2557 ret
= -TARGET_ENOPROTOOPT
;
2562 /* do_getsockopt() Must return target values and target errnos. */
2563 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2564 abi_ulong optval_addr
, abi_ulong optlen
)
2571 case TARGET_SOL_SOCKET
:
2574 /* These don't just return a single integer */
2575 case TARGET_SO_PEERNAME
:
2577 case TARGET_SO_RCVTIMEO
: {
2581 optname
= SO_RCVTIMEO
;
2584 if (get_user_u32(len
, optlen
)) {
2585 return -TARGET_EFAULT
;
2588 return -TARGET_EINVAL
;
2592 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2597 if (len
> sizeof(struct target_timeval
)) {
2598 len
= sizeof(struct target_timeval
);
2600 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2601 return -TARGET_EFAULT
;
2603 if (put_user_u32(len
, optlen
)) {
2604 return -TARGET_EFAULT
;
2608 case TARGET_SO_SNDTIMEO
:
2609 optname
= SO_SNDTIMEO
;
2611 case TARGET_SO_PEERCRED
: {
2614 struct target_ucred
*tcr
;
2616 if (get_user_u32(len
, optlen
)) {
2617 return -TARGET_EFAULT
;
2620 return -TARGET_EINVAL
;
2624 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2632 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2633 return -TARGET_EFAULT
;
2635 __put_user(cr
.pid
, &tcr
->pid
);
2636 __put_user(cr
.uid
, &tcr
->uid
);
2637 __put_user(cr
.gid
, &tcr
->gid
);
2638 unlock_user_struct(tcr
, optval_addr
, 1);
2639 if (put_user_u32(len
, optlen
)) {
2640 return -TARGET_EFAULT
;
2644 case TARGET_SO_PEERSEC
: {
2647 if (get_user_u32(len
, optlen
)) {
2648 return -TARGET_EFAULT
;
2651 return -TARGET_EINVAL
;
2653 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2655 return -TARGET_EFAULT
;
2658 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2660 if (put_user_u32(lv
, optlen
)) {
2661 ret
= -TARGET_EFAULT
;
2663 unlock_user(name
, optval_addr
, lv
);
2666 case TARGET_SO_LINGER
:
2670 struct target_linger
*tlg
;
2672 if (get_user_u32(len
, optlen
)) {
2673 return -TARGET_EFAULT
;
2676 return -TARGET_EINVAL
;
2680 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2688 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2689 return -TARGET_EFAULT
;
2691 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2692 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2693 unlock_user_struct(tlg
, optval_addr
, 1);
2694 if (put_user_u32(len
, optlen
)) {
2695 return -TARGET_EFAULT
;
2699 /* Options with 'int' argument. */
2700 case TARGET_SO_DEBUG
:
2703 case TARGET_SO_REUSEADDR
:
2704 optname
= SO_REUSEADDR
;
2707 case TARGET_SO_REUSEPORT
:
2708 optname
= SO_REUSEPORT
;
2711 case TARGET_SO_TYPE
:
2714 case TARGET_SO_ERROR
:
2717 case TARGET_SO_DONTROUTE
:
2718 optname
= SO_DONTROUTE
;
2720 case TARGET_SO_BROADCAST
:
2721 optname
= SO_BROADCAST
;
2723 case TARGET_SO_SNDBUF
:
2724 optname
= SO_SNDBUF
;
2726 case TARGET_SO_RCVBUF
:
2727 optname
= SO_RCVBUF
;
2729 case TARGET_SO_KEEPALIVE
:
2730 optname
= SO_KEEPALIVE
;
2732 case TARGET_SO_OOBINLINE
:
2733 optname
= SO_OOBINLINE
;
2735 case TARGET_SO_NO_CHECK
:
2736 optname
= SO_NO_CHECK
;
2738 case TARGET_SO_PRIORITY
:
2739 optname
= SO_PRIORITY
;
2742 case TARGET_SO_BSDCOMPAT
:
2743 optname
= SO_BSDCOMPAT
;
2746 case TARGET_SO_PASSCRED
:
2747 optname
= SO_PASSCRED
;
2749 case TARGET_SO_TIMESTAMP
:
2750 optname
= SO_TIMESTAMP
;
2752 case TARGET_SO_RCVLOWAT
:
2753 optname
= SO_RCVLOWAT
;
2755 case TARGET_SO_ACCEPTCONN
:
2756 optname
= SO_ACCEPTCONN
;
2758 case TARGET_SO_PROTOCOL
:
2759 optname
= SO_PROTOCOL
;
2761 case TARGET_SO_DOMAIN
:
2762 optname
= SO_DOMAIN
;
2770 /* TCP and UDP options all take an 'int' value. */
2772 if (get_user_u32(len
, optlen
))
2773 return -TARGET_EFAULT
;
2775 return -TARGET_EINVAL
;
2777 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2782 val
= host_to_target_sock_type(val
);
2785 val
= host_to_target_errno(val
);
2791 if (put_user_u32(val
, optval_addr
))
2792 return -TARGET_EFAULT
;
2794 if (put_user_u8(val
, optval_addr
))
2795 return -TARGET_EFAULT
;
2797 if (put_user_u32(len
, optlen
))
2798 return -TARGET_EFAULT
;
2805 case IP_ROUTER_ALERT
:
2809 case IP_MTU_DISCOVER
:
2815 case IP_MULTICAST_TTL
:
2816 case IP_MULTICAST_LOOP
:
2817 if (get_user_u32(len
, optlen
))
2818 return -TARGET_EFAULT
;
2820 return -TARGET_EINVAL
;
2822 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2825 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2827 if (put_user_u32(len
, optlen
)
2828 || put_user_u8(val
, optval_addr
))
2829 return -TARGET_EFAULT
;
2831 if (len
> sizeof(int))
2833 if (put_user_u32(len
, optlen
)
2834 || put_user_u32(val
, optval_addr
))
2835 return -TARGET_EFAULT
;
2839 ret
= -TARGET_ENOPROTOOPT
;
2845 case IPV6_MTU_DISCOVER
:
2848 case IPV6_RECVPKTINFO
:
2849 case IPV6_UNICAST_HOPS
:
2850 case IPV6_MULTICAST_HOPS
:
2851 case IPV6_MULTICAST_LOOP
:
2853 case IPV6_RECVHOPLIMIT
:
2854 case IPV6_2292HOPLIMIT
:
2857 case IPV6_2292PKTINFO
:
2858 case IPV6_RECVTCLASS
:
2859 case IPV6_RECVRTHDR
:
2860 case IPV6_2292RTHDR
:
2861 case IPV6_RECVHOPOPTS
:
2862 case IPV6_2292HOPOPTS
:
2863 case IPV6_RECVDSTOPTS
:
2864 case IPV6_2292DSTOPTS
:
2866 case IPV6_ADDR_PREFERENCES
:
2867 #ifdef IPV6_RECVPATHMTU
2868 case IPV6_RECVPATHMTU
:
2870 #ifdef IPV6_TRANSPARENT
2871 case IPV6_TRANSPARENT
:
2873 #ifdef IPV6_FREEBIND
2876 #ifdef IPV6_RECVORIGDSTADDR
2877 case IPV6_RECVORIGDSTADDR
:
2879 if (get_user_u32(len
, optlen
))
2880 return -TARGET_EFAULT
;
2882 return -TARGET_EINVAL
;
2884 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2887 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2889 if (put_user_u32(len
, optlen
)
2890 || put_user_u8(val
, optval_addr
))
2891 return -TARGET_EFAULT
;
2893 if (len
> sizeof(int))
2895 if (put_user_u32(len
, optlen
)
2896 || put_user_u32(val
, optval_addr
))
2897 return -TARGET_EFAULT
;
2901 ret
= -TARGET_ENOPROTOOPT
;
2908 case NETLINK_PKTINFO
:
2909 case NETLINK_BROADCAST_ERROR
:
2910 case NETLINK_NO_ENOBUFS
:
2911 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2912 case NETLINK_LISTEN_ALL_NSID
:
2913 case NETLINK_CAP_ACK
:
2914 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2915 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2916 case NETLINK_EXT_ACK
:
2917 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2918 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2919 case NETLINK_GET_STRICT_CHK
:
2920 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2921 if (get_user_u32(len
, optlen
)) {
2922 return -TARGET_EFAULT
;
2924 if (len
!= sizeof(val
)) {
2925 return -TARGET_EINVAL
;
2928 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2932 if (put_user_u32(lv
, optlen
)
2933 || put_user_u32(val
, optval_addr
)) {
2934 return -TARGET_EFAULT
;
2937 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2938 case NETLINK_LIST_MEMBERSHIPS
:
2942 if (get_user_u32(len
, optlen
)) {
2943 return -TARGET_EFAULT
;
2946 return -TARGET_EINVAL
;
2948 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2949 if (!results
&& len
> 0) {
2950 return -TARGET_EFAULT
;
2953 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2955 unlock_user(results
, optval_addr
, 0);
2958 /* swap host endianess to target endianess. */
2959 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2960 results
[i
] = tswap32(results
[i
]);
2962 if (put_user_u32(lv
, optlen
)) {
2963 return -TARGET_EFAULT
;
2965 unlock_user(results
, optval_addr
, 0);
2968 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2973 #endif /* SOL_NETLINK */
2976 qemu_log_mask(LOG_UNIMP
,
2977 "getsockopt level=%d optname=%d not yet supported\n",
2979 ret
= -TARGET_EOPNOTSUPP
;
2985 /* Convert target low/high pair representing file offset into the host
2986 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2987 * as the kernel doesn't handle them either.
2989 static void target_to_host_low_high(abi_ulong tlow
,
2991 unsigned long *hlow
,
2992 unsigned long *hhigh
)
2994 uint64_t off
= tlow
|
2995 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2996 TARGET_LONG_BITS
/ 2;
2999 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3002 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3003 abi_ulong count
, int copy
)
3005 struct target_iovec
*target_vec
;
3007 abi_ulong total_len
, max_len
;
3010 bool bad_address
= false;
3016 if (count
> IOV_MAX
) {
3021 vec
= g_try_new0(struct iovec
, count
);
3027 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3028 count
* sizeof(struct target_iovec
), 1);
3029 if (target_vec
== NULL
) {
3034 /* ??? If host page size > target page size, this will result in a
3035 value larger than what we can actually support. */
3036 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3039 for (i
= 0; i
< count
; i
++) {
3040 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3041 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3046 } else if (len
== 0) {
3047 /* Zero length pointer is ignored. */
3048 vec
[i
].iov_base
= 0;
3050 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3051 /* If the first buffer pointer is bad, this is a fault. But
3052 * subsequent bad buffers will result in a partial write; this
3053 * is realized by filling the vector with null pointers and
3055 if (!vec
[i
].iov_base
) {
3066 if (len
> max_len
- total_len
) {
3067 len
= max_len
- total_len
;
3070 vec
[i
].iov_len
= len
;
3074 unlock_user(target_vec
, target_addr
, 0);
3079 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3080 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3083 unlock_user(target_vec
, target_addr
, 0);
3090 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3091 abi_ulong count
, int copy
)
3093 struct target_iovec
*target_vec
;
3096 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3097 count
* sizeof(struct target_iovec
), 1);
3099 for (i
= 0; i
< count
; i
++) {
3100 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3101 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3105 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3107 unlock_user(target_vec
, target_addr
, 0);
3113 static inline int target_to_host_sock_type(int *type
)
3116 int target_type
= *type
;
3118 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3119 case TARGET_SOCK_DGRAM
:
3120 host_type
= SOCK_DGRAM
;
3122 case TARGET_SOCK_STREAM
:
3123 host_type
= SOCK_STREAM
;
3126 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3129 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3130 #if defined(SOCK_CLOEXEC)
3131 host_type
|= SOCK_CLOEXEC
;
3133 return -TARGET_EINVAL
;
3136 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3137 #if defined(SOCK_NONBLOCK)
3138 host_type
|= SOCK_NONBLOCK
;
3139 #elif !defined(O_NONBLOCK)
3140 return -TARGET_EINVAL
;
3147 /* Try to emulate socket type flags after socket creation. */
3148 static int sock_flags_fixup(int fd
, int target_type
)
3150 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3151 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3152 int flags
= fcntl(fd
, F_GETFL
);
3153 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3155 return -TARGET_EINVAL
;
3162 /* do_socket() Must return target values and target errnos. */
3163 static abi_long
do_socket(int domain
, int type
, int protocol
)
3165 int target_type
= type
;
3168 ret
= target_to_host_sock_type(&type
);
3173 if (domain
== PF_NETLINK
&& !(
3174 #ifdef CONFIG_RTNETLINK
3175 protocol
== NETLINK_ROUTE
||
3177 protocol
== NETLINK_KOBJECT_UEVENT
||
3178 protocol
== NETLINK_AUDIT
)) {
3179 return -TARGET_EPROTONOSUPPORT
;
3182 if (domain
== AF_PACKET
||
3183 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3184 protocol
= tswap16(protocol
);
3187 ret
= get_errno(socket(domain
, type
, protocol
));
3189 ret
= sock_flags_fixup(ret
, target_type
);
3190 if (type
== SOCK_PACKET
) {
3191 /* Manage an obsolete case :
3192 * if socket type is SOCK_PACKET, bind by name
3194 fd_trans_register(ret
, &target_packet_trans
);
3195 } else if (domain
== PF_NETLINK
) {
3197 #ifdef CONFIG_RTNETLINK
3199 fd_trans_register(ret
, &target_netlink_route_trans
);
3202 case NETLINK_KOBJECT_UEVENT
:
3203 /* nothing to do: messages are strings */
3206 fd_trans_register(ret
, &target_netlink_audit_trans
);
3209 g_assert_not_reached();
3216 /* do_bind() Must return target values and target errnos. */
3217 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3223 if ((int)addrlen
< 0) {
3224 return -TARGET_EINVAL
;
3227 addr
= alloca(addrlen
+1);
3229 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3233 return get_errno(bind(sockfd
, addr
, addrlen
));
3236 /* do_connect() Must return target values and target errnos. */
3237 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3243 if ((int)addrlen
< 0) {
3244 return -TARGET_EINVAL
;
3247 addr
= alloca(addrlen
+1);
3249 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3253 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3256 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3257 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3258 int flags
, int send
)
3264 abi_ulong target_vec
;
3266 if (msgp
->msg_name
) {
3267 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3268 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3269 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3270 tswapal(msgp
->msg_name
),
3272 if (ret
== -TARGET_EFAULT
) {
3273 /* For connected sockets msg_name and msg_namelen must
3274 * be ignored, so returning EFAULT immediately is wrong.
3275 * Instead, pass a bad msg_name to the host kernel, and
3276 * let it decide whether to return EFAULT or not.
3278 msg
.msg_name
= (void *)-1;
3283 msg
.msg_name
= NULL
;
3284 msg
.msg_namelen
= 0;
3286 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3287 msg
.msg_control
= alloca(msg
.msg_controllen
);
3288 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3290 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3292 count
= tswapal(msgp
->msg_iovlen
);
3293 target_vec
= tswapal(msgp
->msg_iov
);
3295 if (count
> IOV_MAX
) {
3296 /* sendrcvmsg returns a different errno for this condition than
3297 * readv/writev, so we must catch it here before lock_iovec() does.
3299 ret
= -TARGET_EMSGSIZE
;
3303 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3304 target_vec
, count
, send
);
3306 ret
= -host_to_target_errno(errno
);
3307 /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3312 msg
.msg_iovlen
= count
;
3316 if (fd_trans_target_to_host_data(fd
)) {
3319 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3320 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3321 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3322 msg
.msg_iov
->iov_len
);
3324 msg
.msg_iov
->iov_base
= host_msg
;
3325 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3329 ret
= target_to_host_cmsg(&msg
, msgp
);
3331 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3335 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3336 if (!is_error(ret
)) {
3338 if (fd_trans_host_to_target_data(fd
)) {
3339 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3340 MIN(msg
.msg_iov
->iov_len
, len
));
3342 if (!is_error(ret
)) {
3343 ret
= host_to_target_cmsg(msgp
, &msg
);
3345 if (!is_error(ret
)) {
3346 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3347 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3348 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3349 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3350 msg
.msg_name
, msg
.msg_namelen
);
3363 unlock_iovec(vec
, target_vec
, count
, !send
);
3369 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3370 int flags
, int send
)
3373 struct target_msghdr
*msgp
;
3375 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3379 return -TARGET_EFAULT
;
3381 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3382 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3386 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3387 * so it might not have this *mmsg-specific flag either.
3389 #ifndef MSG_WAITFORONE
3390 #define MSG_WAITFORONE 0x10000
3393 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3394 unsigned int vlen
, unsigned int flags
,
3397 struct target_mmsghdr
*mmsgp
;
3401 if (vlen
> UIO_MAXIOV
) {
3405 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3407 return -TARGET_EFAULT
;
3410 for (i
= 0; i
< vlen
; i
++) {
3411 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3412 if (is_error(ret
)) {
3415 mmsgp
[i
].msg_len
= tswap32(ret
);
3416 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3417 if (flags
& MSG_WAITFORONE
) {
3418 flags
|= MSG_DONTWAIT
;
3422 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3424 /* Return number of datagrams sent if we sent any at all;
3425 * otherwise return the error.
3433 /* do_accept4() Must return target values and target errnos. */
3434 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3435 abi_ulong target_addrlen_addr
, int flags
)
3437 socklen_t addrlen
, ret_addrlen
;
3442 if (flags
& ~(TARGET_SOCK_CLOEXEC
| TARGET_SOCK_NONBLOCK
)) {
3443 return -TARGET_EINVAL
;
3447 if (flags
& TARGET_SOCK_NONBLOCK
) {
3448 host_flags
|= SOCK_NONBLOCK
;
3450 if (flags
& TARGET_SOCK_CLOEXEC
) {
3451 host_flags
|= SOCK_CLOEXEC
;
3454 if (target_addr
== 0) {
3455 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3458 /* linux returns EFAULT if addrlen pointer is invalid */
3459 if (get_user_u32(addrlen
, target_addrlen_addr
))
3460 return -TARGET_EFAULT
;
3462 if ((int)addrlen
< 0) {
3463 return -TARGET_EINVAL
;
3466 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3467 return -TARGET_EFAULT
;
3470 addr
= alloca(addrlen
);
3472 ret_addrlen
= addrlen
;
3473 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3474 if (!is_error(ret
)) {
3475 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3476 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3477 ret
= -TARGET_EFAULT
;
3483 /* do_getpeername() Must return target values and target errnos. */
3484 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3485 abi_ulong target_addrlen_addr
)
3487 socklen_t addrlen
, ret_addrlen
;
3491 if (get_user_u32(addrlen
, target_addrlen_addr
))
3492 return -TARGET_EFAULT
;
3494 if ((int)addrlen
< 0) {
3495 return -TARGET_EINVAL
;
3498 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3499 return -TARGET_EFAULT
;
3502 addr
= alloca(addrlen
);
3504 ret_addrlen
= addrlen
;
3505 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3506 if (!is_error(ret
)) {
3507 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3508 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3509 ret
= -TARGET_EFAULT
;
3515 /* do_getsockname() Must return target values and target errnos. */
3516 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3517 abi_ulong target_addrlen_addr
)
3519 socklen_t addrlen
, ret_addrlen
;
3523 if (get_user_u32(addrlen
, target_addrlen_addr
))
3524 return -TARGET_EFAULT
;
3526 if ((int)addrlen
< 0) {
3527 return -TARGET_EINVAL
;
3530 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3531 return -TARGET_EFAULT
;
3534 addr
= alloca(addrlen
);
3536 ret_addrlen
= addrlen
;
3537 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3538 if (!is_error(ret
)) {
3539 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3540 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3541 ret
= -TARGET_EFAULT
;
3547 /* do_socketpair() Must return target values and target errnos. */
3548 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3549 abi_ulong target_tab_addr
)
3554 target_to_host_sock_type(&type
);
3556 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3557 if (!is_error(ret
)) {
3558 if (put_user_s32(tab
[0], target_tab_addr
)
3559 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3560 ret
= -TARGET_EFAULT
;
3565 /* do_sendto() Must return target values and target errnos. */
3566 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3567 abi_ulong target_addr
, socklen_t addrlen
)
3571 void *copy_msg
= NULL
;
3574 if ((int)addrlen
< 0) {
3575 return -TARGET_EINVAL
;
3578 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3580 return -TARGET_EFAULT
;
3581 if (fd_trans_target_to_host_data(fd
)) {
3582 copy_msg
= host_msg
;
3583 host_msg
= g_malloc(len
);
3584 memcpy(host_msg
, copy_msg
, len
);
3585 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3591 addr
= alloca(addrlen
+1);
3592 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3596 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3598 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3603 host_msg
= copy_msg
;
3605 unlock_user(host_msg
, msg
, 0);
3609 /* do_recvfrom() Must return target values and target errnos. */
3610 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3611 abi_ulong target_addr
,
3612 abi_ulong target_addrlen
)
3614 socklen_t addrlen
, ret_addrlen
;
3622 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3624 return -TARGET_EFAULT
;
3628 if (get_user_u32(addrlen
, target_addrlen
)) {
3629 ret
= -TARGET_EFAULT
;
3632 if ((int)addrlen
< 0) {
3633 ret
= -TARGET_EINVAL
;
3636 addr
= alloca(addrlen
);
3637 ret_addrlen
= addrlen
;
3638 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3639 addr
, &ret_addrlen
));
3641 addr
= NULL
; /* To keep compiler quiet. */
3642 addrlen
= 0; /* To keep compiler quiet. */
3643 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3645 if (!is_error(ret
)) {
3646 if (fd_trans_host_to_target_data(fd
)) {
3648 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3649 if (is_error(trans
)) {
3655 host_to_target_sockaddr(target_addr
, addr
,
3656 MIN(addrlen
, ret_addrlen
));
3657 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3658 ret
= -TARGET_EFAULT
;
3662 unlock_user(host_msg
, msg
, len
);
3665 unlock_user(host_msg
, msg
, 0);
3670 #ifdef TARGET_NR_socketcall
3671 /* do_socketcall() must return target values and target errnos. */
3672 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3674 static const unsigned nargs
[] = { /* number of arguments per operation */
3675 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3676 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3677 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3678 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3679 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3680 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3681 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3682 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3683 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3684 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3685 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3686 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3687 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3688 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3689 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3690 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3691 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3692 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3693 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3694 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3696 abi_long a
[6]; /* max 6 args */
3699 /* check the range of the first argument num */
3700 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3701 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3702 return -TARGET_EINVAL
;
3704 /* ensure we have space for args */
3705 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3706 return -TARGET_EINVAL
;
3708 /* collect the arguments in a[] according to nargs[] */
3709 for (i
= 0; i
< nargs
[num
]; ++i
) {
3710 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3711 return -TARGET_EFAULT
;
3714 /* now when we have the args, invoke the appropriate underlying function */
3716 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3717 return do_socket(a
[0], a
[1], a
[2]);
3718 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3719 return do_bind(a
[0], a
[1], a
[2]);
3720 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3721 return do_connect(a
[0], a
[1], a
[2]);
3722 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3723 return get_errno(listen(a
[0], a
[1]));
3724 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3725 return do_accept4(a
[0], a
[1], a
[2], 0);
3726 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3727 return do_getsockname(a
[0], a
[1], a
[2]);
3728 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3729 return do_getpeername(a
[0], a
[1], a
[2]);
3730 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3731 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3732 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3733 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3734 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3735 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3736 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3737 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3738 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3739 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3740 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3741 return get_errno(shutdown(a
[0], a
[1]));
3742 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3743 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3744 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3745 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3746 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3747 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3748 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3749 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3750 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3751 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3752 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3753 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3754 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3755 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3757 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3758 return -TARGET_EINVAL
;
3763 #define N_SHM_REGIONS 32
3765 static struct shm_region
{
3769 } shm_regions
[N_SHM_REGIONS
];
3771 #ifndef TARGET_SEMID64_DS
3772 /* asm-generic version of this struct */
3773 struct target_semid64_ds
3775 struct target_ipc_perm sem_perm
;
3776 abi_ulong sem_otime
;
3777 #if TARGET_ABI_BITS == 32
3778 abi_ulong __unused1
;
3780 abi_ulong sem_ctime
;
3781 #if TARGET_ABI_BITS == 32
3782 abi_ulong __unused2
;
3784 abi_ulong sem_nsems
;
3785 abi_ulong __unused3
;
3786 abi_ulong __unused4
;
3790 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3791 abi_ulong target_addr
)
3793 struct target_ipc_perm
*target_ip
;
3794 struct target_semid64_ds
*target_sd
;
3796 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3797 return -TARGET_EFAULT
;
3798 target_ip
= &(target_sd
->sem_perm
);
3799 host_ip
->__key
= tswap32(target_ip
->__key
);
3800 host_ip
->uid
= tswap32(target_ip
->uid
);
3801 host_ip
->gid
= tswap32(target_ip
->gid
);
3802 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3803 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3804 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3805 host_ip
->mode
= tswap32(target_ip
->mode
);
3807 host_ip
->mode
= tswap16(target_ip
->mode
);
3809 #if defined(TARGET_PPC)
3810 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3812 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3814 unlock_user_struct(target_sd
, target_addr
, 0);
3818 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3819 struct ipc_perm
*host_ip
)
3821 struct target_ipc_perm
*target_ip
;
3822 struct target_semid64_ds
*target_sd
;
3824 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3825 return -TARGET_EFAULT
;
3826 target_ip
= &(target_sd
->sem_perm
);
3827 target_ip
->__key
= tswap32(host_ip
->__key
);
3828 target_ip
->uid
= tswap32(host_ip
->uid
);
3829 target_ip
->gid
= tswap32(host_ip
->gid
);
3830 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3831 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3832 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3833 target_ip
->mode
= tswap32(host_ip
->mode
);
3835 target_ip
->mode
= tswap16(host_ip
->mode
);
3837 #if defined(TARGET_PPC)
3838 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3840 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3842 unlock_user_struct(target_sd
, target_addr
, 1);
3846 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3847 abi_ulong target_addr
)
3849 struct target_semid64_ds
*target_sd
;
3851 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3852 return -TARGET_EFAULT
;
3853 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3854 return -TARGET_EFAULT
;
3855 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3856 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3857 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3858 unlock_user_struct(target_sd
, target_addr
, 0);
3862 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3863 struct semid_ds
*host_sd
)
3865 struct target_semid64_ds
*target_sd
;
3867 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3868 return -TARGET_EFAULT
;
3869 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3870 return -TARGET_EFAULT
;
3871 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3872 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3873 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3874 unlock_user_struct(target_sd
, target_addr
, 1);
3878 struct target_seminfo
{
3891 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3892 struct seminfo
*host_seminfo
)
3894 struct target_seminfo
*target_seminfo
;
3895 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3896 return -TARGET_EFAULT
;
3897 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3898 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3899 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3900 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3901 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3902 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3903 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3904 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3905 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3906 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3907 unlock_user_struct(target_seminfo
, target_addr
, 1);
3913 struct semid_ds
*buf
;
3914 unsigned short *array
;
3915 struct seminfo
*__buf
;
3918 union target_semun
{
3925 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3926 abi_ulong target_addr
)
3929 unsigned short *array
;
3931 struct semid_ds semid_ds
;
3934 semun
.buf
= &semid_ds
;
3936 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3938 return get_errno(ret
);
3940 nsems
= semid_ds
.sem_nsems
;
3942 *host_array
= g_try_new(unsigned short, nsems
);
3944 return -TARGET_ENOMEM
;
3946 array
= lock_user(VERIFY_READ
, target_addr
,
3947 nsems
*sizeof(unsigned short), 1);
3949 g_free(*host_array
);
3950 return -TARGET_EFAULT
;
3953 for(i
=0; i
<nsems
; i
++) {
3954 __get_user((*host_array
)[i
], &array
[i
]);
3956 unlock_user(array
, target_addr
, 0);
3961 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3962 unsigned short **host_array
)
3965 unsigned short *array
;
3967 struct semid_ds semid_ds
;
3970 semun
.buf
= &semid_ds
;
3972 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3974 return get_errno(ret
);
3976 nsems
= semid_ds
.sem_nsems
;
3978 array
= lock_user(VERIFY_WRITE
, target_addr
,
3979 nsems
*sizeof(unsigned short), 0);
3981 return -TARGET_EFAULT
;
3983 for(i
=0; i
<nsems
; i
++) {
3984 __put_user((*host_array
)[i
], &array
[i
]);
3986 g_free(*host_array
);
3987 unlock_user(array
, target_addr
, 1);
3992 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3993 abi_ulong target_arg
)
3995 union target_semun target_su
= { .buf
= target_arg
};
3997 struct semid_ds dsarg
;
3998 unsigned short *array
= NULL
;
3999 struct seminfo seminfo
;
4000 abi_long ret
= -TARGET_EINVAL
;
4007 /* In 64 bit cross-endian situations, we will erroneously pick up
4008 * the wrong half of the union for the "val" element. To rectify
4009 * this, the entire 8-byte structure is byteswapped, followed by
4010 * a swap of the 4 byte val field. In other cases, the data is
4011 * already in proper host byte order. */
4012 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4013 target_su
.buf
= tswapal(target_su
.buf
);
4014 arg
.val
= tswap32(target_su
.val
);
4016 arg
.val
= target_su
.val
;
4018 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4022 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4026 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4027 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4034 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4038 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4039 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4045 arg
.__buf
= &seminfo
;
4046 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4047 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4055 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4062 struct target_sembuf
{
4063 unsigned short sem_num
;
4068 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4069 abi_ulong target_addr
,
4072 struct target_sembuf
*target_sembuf
;
4075 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4076 nsops
*sizeof(struct target_sembuf
), 1);
4078 return -TARGET_EFAULT
;
4080 for(i
=0; i
<nsops
; i
++) {
4081 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4082 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4083 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4086 unlock_user(target_sembuf
, target_addr
, 0);
4091 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4092 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4095 * This macro is required to handle the s390 variants, which passes the
4096 * arguments in a different order than default.
4099 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4100 (__nsops), (__timeout), (__sops)
4102 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4103 (__nsops), 0, (__sops), (__timeout)
4106 static inline abi_long
do_semtimedop(int semid
,
4109 abi_long timeout
, bool time64
)
4111 struct sembuf
*sops
;
4112 struct timespec ts
, *pts
= NULL
;
4118 if (target_to_host_timespec64(pts
, timeout
)) {
4119 return -TARGET_EFAULT
;
4122 if (target_to_host_timespec(pts
, timeout
)) {
4123 return -TARGET_EFAULT
;
4128 if (nsops
> TARGET_SEMOPM
) {
4129 return -TARGET_E2BIG
;
4132 sops
= g_new(struct sembuf
, nsops
);
4134 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4136 return -TARGET_EFAULT
;
4139 ret
= -TARGET_ENOSYS
;
4140 #ifdef __NR_semtimedop
4141 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4144 if (ret
== -TARGET_ENOSYS
) {
4145 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4146 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4154 struct target_msqid_ds
4156 struct target_ipc_perm msg_perm
;
4157 abi_ulong msg_stime
;
4158 #if TARGET_ABI_BITS == 32
4159 abi_ulong __unused1
;
4161 abi_ulong msg_rtime
;
4162 #if TARGET_ABI_BITS == 32
4163 abi_ulong __unused2
;
4165 abi_ulong msg_ctime
;
4166 #if TARGET_ABI_BITS == 32
4167 abi_ulong __unused3
;
4169 abi_ulong __msg_cbytes
;
4171 abi_ulong msg_qbytes
;
4172 abi_ulong msg_lspid
;
4173 abi_ulong msg_lrpid
;
4174 abi_ulong __unused4
;
4175 abi_ulong __unused5
;
4178 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4179 abi_ulong target_addr
)
4181 struct target_msqid_ds
*target_md
;
4183 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4184 return -TARGET_EFAULT
;
4185 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4186 return -TARGET_EFAULT
;
4187 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4188 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4189 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4190 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4191 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4192 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4193 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4194 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4195 unlock_user_struct(target_md
, target_addr
, 0);
4199 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4200 struct msqid_ds
*host_md
)
4202 struct target_msqid_ds
*target_md
;
4204 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4205 return -TARGET_EFAULT
;
4206 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4207 return -TARGET_EFAULT
;
4208 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4209 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4210 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4211 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4212 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4213 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4214 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4215 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4216 unlock_user_struct(target_md
, target_addr
, 1);
4220 struct target_msginfo
{
4228 unsigned short int msgseg
;
4231 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4232 struct msginfo
*host_msginfo
)
4234 struct target_msginfo
*target_msginfo
;
4235 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4236 return -TARGET_EFAULT
;
4237 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4238 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4239 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4240 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4241 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4242 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4243 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4244 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4245 unlock_user_struct(target_msginfo
, target_addr
, 1);
4249 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4251 struct msqid_ds dsarg
;
4252 struct msginfo msginfo
;
4253 abi_long ret
= -TARGET_EINVAL
;
4261 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4262 return -TARGET_EFAULT
;
4263 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4264 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4265 return -TARGET_EFAULT
;
4268 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4272 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4273 if (host_to_target_msginfo(ptr
, &msginfo
))
4274 return -TARGET_EFAULT
;
4281 struct target_msgbuf
{
4286 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4287 ssize_t msgsz
, int msgflg
)
4289 struct target_msgbuf
*target_mb
;
4290 struct msgbuf
*host_mb
;
4294 return -TARGET_EINVAL
;
4297 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4298 return -TARGET_EFAULT
;
4299 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4301 unlock_user_struct(target_mb
, msgp
, 0);
4302 return -TARGET_ENOMEM
;
4304 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4305 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4306 ret
= -TARGET_ENOSYS
;
4308 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4311 if (ret
== -TARGET_ENOSYS
) {
4313 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4316 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4322 unlock_user_struct(target_mb
, msgp
, 0);
4328 #if defined(__sparc__)
4329 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4330 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4331 #elif defined(__s390x__)
4332 /* The s390 sys_ipc variant has only five parameters. */
4333 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4334 ((long int[]){(long int)__msgp, __msgtyp})
4336 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4337 ((long int[]){(long int)__msgp, __msgtyp}), 0
4341 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4342 ssize_t msgsz
, abi_long msgtyp
,
4345 struct target_msgbuf
*target_mb
;
4347 struct msgbuf
*host_mb
;
4351 return -TARGET_EINVAL
;
4354 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4355 return -TARGET_EFAULT
;
4357 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4359 ret
= -TARGET_ENOMEM
;
4362 ret
= -TARGET_ENOSYS
;
4364 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4367 if (ret
== -TARGET_ENOSYS
) {
4368 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4369 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4374 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4375 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4376 if (!target_mtext
) {
4377 ret
= -TARGET_EFAULT
;
4380 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4381 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4384 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4388 unlock_user_struct(target_mb
, msgp
, 1);
4393 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4394 abi_ulong target_addr
)
4396 struct target_shmid_ds
*target_sd
;
4398 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4399 return -TARGET_EFAULT
;
4400 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4401 return -TARGET_EFAULT
;
4402 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4403 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4404 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4405 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4406 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4407 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4408 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4409 unlock_user_struct(target_sd
, target_addr
, 0);
4413 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4414 struct shmid_ds
*host_sd
)
4416 struct target_shmid_ds
*target_sd
;
4418 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4419 return -TARGET_EFAULT
;
4420 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4421 return -TARGET_EFAULT
;
4422 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4423 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4424 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4425 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4426 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4427 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4428 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4429 unlock_user_struct(target_sd
, target_addr
, 1);
4433 struct target_shminfo
{
4441 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4442 struct shminfo
*host_shminfo
)
4444 struct target_shminfo
*target_shminfo
;
4445 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4446 return -TARGET_EFAULT
;
4447 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4448 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4449 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4450 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4451 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4452 unlock_user_struct(target_shminfo
, target_addr
, 1);
4456 struct target_shm_info
{
4461 abi_ulong swap_attempts
;
4462 abi_ulong swap_successes
;
4465 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4466 struct shm_info
*host_shm_info
)
4468 struct target_shm_info
*target_shm_info
;
4469 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4470 return -TARGET_EFAULT
;
4471 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4472 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4473 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4474 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4475 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4476 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4477 unlock_user_struct(target_shm_info
, target_addr
, 1);
4481 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4483 struct shmid_ds dsarg
;
4484 struct shminfo shminfo
;
4485 struct shm_info shm_info
;
4486 abi_long ret
= -TARGET_EINVAL
;
4494 if (target_to_host_shmid_ds(&dsarg
, buf
))
4495 return -TARGET_EFAULT
;
4496 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4497 if (host_to_target_shmid_ds(buf
, &dsarg
))
4498 return -TARGET_EFAULT
;
4501 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4502 if (host_to_target_shminfo(buf
, &shminfo
))
4503 return -TARGET_EFAULT
;
4506 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4507 if (host_to_target_shm_info(buf
, &shm_info
))
4508 return -TARGET_EFAULT
;
4513 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4520 #ifndef TARGET_FORCE_SHMLBA
4521 /* For most architectures, SHMLBA is the same as the page size;
4522 * some architectures have larger values, in which case they should
4523 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4524 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4525 * and defining its own value for SHMLBA.
4527 * The kernel also permits SHMLBA to be set by the architecture to a
4528 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4529 * this means that addresses are rounded to the large size if
4530 * SHM_RND is set but addresses not aligned to that size are not rejected
4531 * as long as they are at least page-aligned. Since the only architecture
4532 * which uses this is ia64 this code doesn't provide for that oddity.
4534 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4536 return TARGET_PAGE_SIZE
;
4540 static abi_ulong
do_shmat(CPUArchState
*cpu_env
, int shmid
,
4541 abi_ulong shmaddr
, int shmflg
)
4543 CPUState
*cpu
= env_cpu(cpu_env
);
4546 struct shmid_ds shm_info
;
4550 /* shmat pointers are always untagged */
4552 /* find out the length of the shared memory segment */
4553 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4554 if (is_error(ret
)) {
4555 /* can't get length, bail out */
4559 shmlba
= target_shmlba(cpu_env
);
4561 if (shmaddr
& (shmlba
- 1)) {
4562 if (shmflg
& SHM_RND
) {
4563 shmaddr
&= ~(shmlba
- 1);
4565 return -TARGET_EINVAL
;
4568 if (!guest_range_valid_untagged(shmaddr
, shm_info
.shm_segsz
)) {
4569 return -TARGET_EINVAL
;
4575 * We're mapping shared memory, so ensure we generate code for parallel
4576 * execution and flush old translations. This will work up to the level
4577 * supported by the host -- anything that requires EXCP_ATOMIC will not
4578 * be atomic with respect to an external process.
4580 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
4581 cpu
->tcg_cflags
|= CF_PARALLEL
;
4586 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4588 abi_ulong mmap_start
;
4590 /* In order to use the host shmat, we need to honor host SHMLBA. */
4591 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4593 if (mmap_start
== -1) {
4595 host_raddr
= (void *)-1;
4597 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4598 shmflg
| SHM_REMAP
);
4601 if (host_raddr
== (void *)-1) {
4603 return get_errno((intptr_t)host_raddr
);
4605 raddr
= h2g((uintptr_t)host_raddr
);
4607 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
- 1,
4608 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4609 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4611 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4612 if (!shm_regions
[i
].in_use
) {
4613 shm_regions
[i
].in_use
= true;
4614 shm_regions
[i
].start
= raddr
;
4615 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4624 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4629 /* shmdt pointers are always untagged */
4633 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4634 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4635 shm_regions
[i
].in_use
= false;
4636 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
- 1, 0);
4640 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4647 #ifdef TARGET_NR_ipc
4648 /* ??? This only works with linear mappings. */
4649 /* do_ipc() must return target values and target errnos. */
4650 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4651 unsigned int call
, abi_long first
,
4652 abi_long second
, abi_long third
,
4653 abi_long ptr
, abi_long fifth
)
4658 version
= call
>> 16;
4663 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4665 case IPCOP_semtimedop
:
4667 * The s390 sys_ipc variant has only five parameters instead of six
4668 * (as for default variant) and the only difference is the handling of
4669 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4670 * to a struct timespec where the generic variant uses fifth parameter.
4672 #if defined(TARGET_S390X)
4673 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4675 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4680 ret
= get_errno(semget(first
, second
, third
));
4683 case IPCOP_semctl
: {
4684 /* The semun argument to semctl is passed by value, so dereference the
4687 get_user_ual(atptr
, ptr
);
4688 ret
= do_semctl(first
, second
, third
, atptr
);
4693 ret
= get_errno(msgget(first
, second
));
4697 ret
= do_msgsnd(first
, ptr
, second
, third
);
4701 ret
= do_msgctl(first
, second
, ptr
);
4708 struct target_ipc_kludge
{
4713 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4714 ret
= -TARGET_EFAULT
;
4718 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4720 unlock_user_struct(tmp
, ptr
, 0);
4724 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4733 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4734 if (is_error(raddr
))
4735 return get_errno(raddr
);
4736 if (put_user_ual(raddr
, third
))
4737 return -TARGET_EFAULT
;
4741 ret
= -TARGET_EINVAL
;
4746 ret
= do_shmdt(ptr
);
4750 /* IPC_* flag values are the same on all linux platforms */
4751 ret
= get_errno(shmget(first
, second
, third
));
4754 /* IPC_* and SHM_* command values are the same on all linux platforms */
4756 ret
= do_shmctl(first
, second
, ptr
);
4759 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4761 ret
= -TARGET_ENOSYS
;
4768 /* kernel structure types definitions */
4770 #define STRUCT(name, ...) STRUCT_ ## name,
4771 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4773 #include "syscall_types.h"
4777 #undef STRUCT_SPECIAL
4779 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4780 #define STRUCT_SPECIAL(name)
4781 #include "syscall_types.h"
4783 #undef STRUCT_SPECIAL
4785 #define MAX_STRUCT_SIZE 4096
4787 #ifdef CONFIG_FIEMAP
4788 /* So fiemap access checks don't overflow on 32 bit systems.
4789 * This is very slightly smaller than the limit imposed by
4790 * the underlying kernel.
4792 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4793 / sizeof(struct fiemap_extent))
4795 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4796 int fd
, int cmd
, abi_long arg
)
4798 /* The parameter for this ioctl is a struct fiemap followed
4799 * by an array of struct fiemap_extent whose size is set
4800 * in fiemap->fm_extent_count. The array is filled in by the
4803 int target_size_in
, target_size_out
;
4805 const argtype
*arg_type
= ie
->arg_type
;
4806 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4809 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4813 assert(arg_type
[0] == TYPE_PTR
);
4814 assert(ie
->access
== IOC_RW
);
4816 target_size_in
= thunk_type_size(arg_type
, 0);
4817 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4819 return -TARGET_EFAULT
;
4821 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4822 unlock_user(argptr
, arg
, 0);
4823 fm
= (struct fiemap
*)buf_temp
;
4824 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4825 return -TARGET_EINVAL
;
4828 outbufsz
= sizeof (*fm
) +
4829 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4831 if (outbufsz
> MAX_STRUCT_SIZE
) {
4832 /* We can't fit all the extents into the fixed size buffer.
4833 * Allocate one that is large enough and use it instead.
4835 fm
= g_try_malloc(outbufsz
);
4837 return -TARGET_ENOMEM
;
4839 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4842 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4843 if (!is_error(ret
)) {
4844 target_size_out
= target_size_in
;
4845 /* An extent_count of 0 means we were only counting the extents
4846 * so there are no structs to copy
4848 if (fm
->fm_extent_count
!= 0) {
4849 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4851 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4853 ret
= -TARGET_EFAULT
;
4855 /* Convert the struct fiemap */
4856 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4857 if (fm
->fm_extent_count
!= 0) {
4858 p
= argptr
+ target_size_in
;
4859 /* ...and then all the struct fiemap_extents */
4860 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4861 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4866 unlock_user(argptr
, arg
, target_size_out
);
4876 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4877 int fd
, int cmd
, abi_long arg
)
4879 const argtype
*arg_type
= ie
->arg_type
;
4883 struct ifconf
*host_ifconf
;
4885 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4886 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4887 int target_ifreq_size
;
4892 abi_long target_ifc_buf
;
4896 assert(arg_type
[0] == TYPE_PTR
);
4897 assert(ie
->access
== IOC_RW
);
4900 target_size
= thunk_type_size(arg_type
, 0);
4902 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4904 return -TARGET_EFAULT
;
4905 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4906 unlock_user(argptr
, arg
, 0);
4908 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4909 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4910 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4912 if (target_ifc_buf
!= 0) {
4913 target_ifc_len
= host_ifconf
->ifc_len
;
4914 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4915 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4917 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4918 if (outbufsz
> MAX_STRUCT_SIZE
) {
4920 * We can't fit all the extents into the fixed size buffer.
4921 * Allocate one that is large enough and use it instead.
4923 host_ifconf
= g_try_malloc(outbufsz
);
4925 return -TARGET_ENOMEM
;
4927 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4930 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4932 host_ifconf
->ifc_len
= host_ifc_len
;
4934 host_ifc_buf
= NULL
;
4936 host_ifconf
->ifc_buf
= host_ifc_buf
;
4938 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4939 if (!is_error(ret
)) {
4940 /* convert host ifc_len to target ifc_len */
4942 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4943 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4944 host_ifconf
->ifc_len
= target_ifc_len
;
4946 /* restore target ifc_buf */
4948 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4950 /* copy struct ifconf to target user */
4952 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4954 return -TARGET_EFAULT
;
4955 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4956 unlock_user(argptr
, arg
, target_size
);
4958 if (target_ifc_buf
!= 0) {
4959 /* copy ifreq[] to target user */
4960 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4961 for (i
= 0; i
< nb_ifreq
; i
++) {
4962 thunk_convert(argptr
+ i
* target_ifreq_size
,
4963 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4964 ifreq_arg_type
, THUNK_TARGET
);
4966 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4971 g_free(host_ifconf
);
4977 #if defined(CONFIG_USBFS)
4978 #if HOST_LONG_BITS > 64
4979 #error USBDEVFS thunks do not support >64 bit hosts yet.
4982 uint64_t target_urb_adr
;
4983 uint64_t target_buf_adr
;
4984 char *target_buf_ptr
;
4985 struct usbdevfs_urb host_urb
;
4988 static GHashTable
*usbdevfs_urb_hashtable(void)
4990 static GHashTable
*urb_hashtable
;
4992 if (!urb_hashtable
) {
4993 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4995 return urb_hashtable
;
4998 static void urb_hashtable_insert(struct live_urb
*urb
)
5000 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5001 g_hash_table_insert(urb_hashtable
, urb
, urb
);
5004 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
5006 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5007 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
5010 static void urb_hashtable_remove(struct live_urb
*urb
)
5012 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5013 g_hash_table_remove(urb_hashtable
, urb
);
5017 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5018 int fd
, int cmd
, abi_long arg
)
5020 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
5021 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
5022 struct live_urb
*lurb
;
5026 uintptr_t target_urb_adr
;
5029 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
5031 memset(buf_temp
, 0, sizeof(uint64_t));
5032 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5033 if (is_error(ret
)) {
5037 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
5038 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
5039 if (!lurb
->target_urb_adr
) {
5040 return -TARGET_EFAULT
;
5042 urb_hashtable_remove(lurb
);
5043 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
5044 lurb
->host_urb
.buffer_length
);
5045 lurb
->target_buf_ptr
= NULL
;
5047 /* restore the guest buffer pointer */
5048 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
5050 /* update the guest urb struct */
5051 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5054 return -TARGET_EFAULT
;
5056 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5057 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5059 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5060 /* write back the urb handle */
5061 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5064 return -TARGET_EFAULT
;
5067 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5068 target_urb_adr
= lurb
->target_urb_adr
;
5069 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5070 unlock_user(argptr
, arg
, target_size
);
5077 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5078 uint8_t *buf_temp
__attribute__((unused
)),
5079 int fd
, int cmd
, abi_long arg
)
5081 struct live_urb
*lurb
;
5083 /* map target address back to host URB with metadata. */
5084 lurb
= urb_hashtable_lookup(arg
);
5086 return -TARGET_EFAULT
;
5088 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5092 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5093 int fd
, int cmd
, abi_long arg
)
5095 const argtype
*arg_type
= ie
->arg_type
;
5100 struct live_urb
*lurb
;
5103 * each submitted URB needs to map to a unique ID for the
5104 * kernel, and that unique ID needs to be a pointer to
5105 * host memory. hence, we need to malloc for each URB.
5106 * isochronous transfers have a variable length struct.
5109 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5111 /* construct host copy of urb and metadata */
5112 lurb
= g_try_new0(struct live_urb
, 1);
5114 return -TARGET_ENOMEM
;
5117 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5120 return -TARGET_EFAULT
;
5122 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5123 unlock_user(argptr
, arg
, 0);
5125 lurb
->target_urb_adr
= arg
;
5126 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5128 /* buffer space used depends on endpoint type so lock the entire buffer */
5129 /* control type urbs should check the buffer contents for true direction */
5130 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5131 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5132 lurb
->host_urb
.buffer_length
, 1);
5133 if (lurb
->target_buf_ptr
== NULL
) {
5135 return -TARGET_EFAULT
;
5138 /* update buffer pointer in host copy */
5139 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5141 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5142 if (is_error(ret
)) {
5143 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5146 urb_hashtable_insert(lurb
);
5151 #endif /* CONFIG_USBFS */
5153 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5154 int cmd
, abi_long arg
)
5157 struct dm_ioctl
*host_dm
;
5158 abi_long guest_data
;
5159 uint32_t guest_data_size
;
5161 const argtype
*arg_type
= ie
->arg_type
;
5163 void *big_buf
= NULL
;
5167 target_size
= thunk_type_size(arg_type
, 0);
5168 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5170 ret
= -TARGET_EFAULT
;
5173 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5174 unlock_user(argptr
, arg
, 0);
5176 /* buf_temp is too small, so fetch things into a bigger buffer */
5177 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5178 memcpy(big_buf
, buf_temp
, target_size
);
5182 guest_data
= arg
+ host_dm
->data_start
;
5183 if ((guest_data
- arg
) < 0) {
5184 ret
= -TARGET_EINVAL
;
5187 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5188 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5190 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5192 ret
= -TARGET_EFAULT
;
5196 switch (ie
->host_cmd
) {
5198 case DM_LIST_DEVICES
:
5201 case DM_DEV_SUSPEND
:
5204 case DM_TABLE_STATUS
:
5205 case DM_TABLE_CLEAR
:
5207 case DM_LIST_VERSIONS
:
5211 case DM_DEV_SET_GEOMETRY
:
5212 /* data contains only strings */
5213 memcpy(host_data
, argptr
, guest_data_size
);
5216 memcpy(host_data
, argptr
, guest_data_size
);
5217 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5221 void *gspec
= argptr
;
5222 void *cur_data
= host_data
;
5223 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5224 int spec_size
= thunk_type_size(arg_type
, 0);
5227 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5228 struct dm_target_spec
*spec
= cur_data
;
5232 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5233 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5235 spec
->next
= sizeof(*spec
) + slen
;
5236 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5238 cur_data
+= spec
->next
;
5243 ret
= -TARGET_EINVAL
;
5244 unlock_user(argptr
, guest_data
, 0);
5247 unlock_user(argptr
, guest_data
, 0);
5249 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5250 if (!is_error(ret
)) {
5251 guest_data
= arg
+ host_dm
->data_start
;
5252 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5253 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5254 switch (ie
->host_cmd
) {
5259 case DM_DEV_SUSPEND
:
5262 case DM_TABLE_CLEAR
:
5264 case DM_DEV_SET_GEOMETRY
:
5265 /* no return data */
5267 case DM_LIST_DEVICES
:
5269 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5270 uint32_t remaining_data
= guest_data_size
;
5271 void *cur_data
= argptr
;
5272 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5273 int nl_size
= 12; /* can't use thunk_size due to alignment */
5276 uint32_t next
= nl
->next
;
5278 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5280 if (remaining_data
< nl
->next
) {
5281 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5284 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5285 strcpy(cur_data
+ nl_size
, nl
->name
);
5286 cur_data
+= nl
->next
;
5287 remaining_data
-= nl
->next
;
5291 nl
= (void*)nl
+ next
;
5296 case DM_TABLE_STATUS
:
5298 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5299 void *cur_data
= argptr
;
5300 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5301 int spec_size
= thunk_type_size(arg_type
, 0);
5304 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5305 uint32_t next
= spec
->next
;
5306 int slen
= strlen((char*)&spec
[1]) + 1;
5307 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5308 if (guest_data_size
< spec
->next
) {
5309 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5312 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5313 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5314 cur_data
= argptr
+ spec
->next
;
5315 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5321 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5322 int count
= *(uint32_t*)hdata
;
5323 uint64_t *hdev
= hdata
+ 8;
5324 uint64_t *gdev
= argptr
+ 8;
5327 *(uint32_t*)argptr
= tswap32(count
);
5328 for (i
= 0; i
< count
; i
++) {
5329 *gdev
= tswap64(*hdev
);
5335 case DM_LIST_VERSIONS
:
5337 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5338 uint32_t remaining_data
= guest_data_size
;
5339 void *cur_data
= argptr
;
5340 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5341 int vers_size
= thunk_type_size(arg_type
, 0);
5344 uint32_t next
= vers
->next
;
5346 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5348 if (remaining_data
< vers
->next
) {
5349 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5352 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5353 strcpy(cur_data
+ vers_size
, vers
->name
);
5354 cur_data
+= vers
->next
;
5355 remaining_data
-= vers
->next
;
5359 vers
= (void*)vers
+ next
;
5364 unlock_user(argptr
, guest_data
, 0);
5365 ret
= -TARGET_EINVAL
;
5368 unlock_user(argptr
, guest_data
, guest_data_size
);
5370 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5372 ret
= -TARGET_EFAULT
;
5375 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5376 unlock_user(argptr
, arg
, target_size
);
5383 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5384 int cmd
, abi_long arg
)
5388 const argtype
*arg_type
= ie
->arg_type
;
5389 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5392 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5393 struct blkpg_partition host_part
;
5395 /* Read and convert blkpg */
5397 target_size
= thunk_type_size(arg_type
, 0);
5398 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5400 ret
= -TARGET_EFAULT
;
5403 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5404 unlock_user(argptr
, arg
, 0);
5406 switch (host_blkpg
->op
) {
5407 case BLKPG_ADD_PARTITION
:
5408 case BLKPG_DEL_PARTITION
:
5409 /* payload is struct blkpg_partition */
5412 /* Unknown opcode */
5413 ret
= -TARGET_EINVAL
;
5417 /* Read and convert blkpg->data */
5418 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5419 target_size
= thunk_type_size(part_arg_type
, 0);
5420 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5422 ret
= -TARGET_EFAULT
;
5425 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5426 unlock_user(argptr
, arg
, 0);
5428 /* Swizzle the data pointer to our local copy and call! */
5429 host_blkpg
->data
= &host_part
;
5430 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5436 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5437 int fd
, int cmd
, abi_long arg
)
5439 const argtype
*arg_type
= ie
->arg_type
;
5440 const StructEntry
*se
;
5441 const argtype
*field_types
;
5442 const int *dst_offsets
, *src_offsets
;
5445 abi_ulong
*target_rt_dev_ptr
= NULL
;
5446 unsigned long *host_rt_dev_ptr
= NULL
;
5450 assert(ie
->access
== IOC_W
);
5451 assert(*arg_type
== TYPE_PTR
);
5453 assert(*arg_type
== TYPE_STRUCT
);
5454 target_size
= thunk_type_size(arg_type
, 0);
5455 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5457 return -TARGET_EFAULT
;
5460 assert(*arg_type
== (int)STRUCT_rtentry
);
5461 se
= struct_entries
+ *arg_type
++;
5462 assert(se
->convert
[0] == NULL
);
5463 /* convert struct here to be able to catch rt_dev string */
5464 field_types
= se
->field_types
;
5465 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5466 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5467 for (i
= 0; i
< se
->nb_fields
; i
++) {
5468 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5469 assert(*field_types
== TYPE_PTRVOID
);
5470 target_rt_dev_ptr
= argptr
+ src_offsets
[i
];
5471 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5472 if (*target_rt_dev_ptr
!= 0) {
5473 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5474 tswapal(*target_rt_dev_ptr
));
5475 if (!*host_rt_dev_ptr
) {
5476 unlock_user(argptr
, arg
, 0);
5477 return -TARGET_EFAULT
;
5480 *host_rt_dev_ptr
= 0;
5485 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5486 argptr
+ src_offsets
[i
],
5487 field_types
, THUNK_HOST
);
5489 unlock_user(argptr
, arg
, 0);
5491 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5493 assert(host_rt_dev_ptr
!= NULL
);
5494 assert(target_rt_dev_ptr
!= NULL
);
5495 if (*host_rt_dev_ptr
!= 0) {
5496 unlock_user((void *)*host_rt_dev_ptr
,
5497 *target_rt_dev_ptr
, 0);
5502 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5503 int fd
, int cmd
, abi_long arg
)
5505 int sig
= target_to_host_signal(arg
);
5506 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5509 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5510 int fd
, int cmd
, abi_long arg
)
5515 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5516 if (is_error(ret
)) {
5520 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5521 if (copy_to_user_timeval(arg
, &tv
)) {
5522 return -TARGET_EFAULT
;
5525 if (copy_to_user_timeval64(arg
, &tv
)) {
5526 return -TARGET_EFAULT
;
5533 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5534 int fd
, int cmd
, abi_long arg
)
5539 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5540 if (is_error(ret
)) {
5544 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5545 if (host_to_target_timespec(arg
, &ts
)) {
5546 return -TARGET_EFAULT
;
5549 if (host_to_target_timespec64(arg
, &ts
)) {
5550 return -TARGET_EFAULT
;
5558 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5559 int fd
, int cmd
, abi_long arg
)
5561 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5562 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5568 static void unlock_drm_version(struct drm_version
*host_ver
,
5569 struct target_drm_version
*target_ver
,
5572 unlock_user(host_ver
->name
, target_ver
->name
,
5573 copy
? host_ver
->name_len
: 0);
5574 unlock_user(host_ver
->date
, target_ver
->date
,
5575 copy
? host_ver
->date_len
: 0);
5576 unlock_user(host_ver
->desc
, target_ver
->desc
,
5577 copy
? host_ver
->desc_len
: 0);
5580 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5581 struct target_drm_version
*target_ver
)
5583 memset(host_ver
, 0, sizeof(*host_ver
));
5585 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5586 if (host_ver
->name_len
) {
5587 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5588 target_ver
->name_len
, 0);
5589 if (!host_ver
->name
) {
5594 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5595 if (host_ver
->date_len
) {
5596 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5597 target_ver
->date_len
, 0);
5598 if (!host_ver
->date
) {
5603 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5604 if (host_ver
->desc_len
) {
5605 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5606 target_ver
->desc_len
, 0);
5607 if (!host_ver
->desc
) {
5614 unlock_drm_version(host_ver
, target_ver
, false);
5618 static inline void host_to_target_drmversion(
5619 struct target_drm_version
*target_ver
,
5620 struct drm_version
*host_ver
)
5622 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5623 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5624 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5625 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5626 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5627 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5628 unlock_drm_version(host_ver
, target_ver
, true);
5631 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5632 int fd
, int cmd
, abi_long arg
)
5634 struct drm_version
*ver
;
5635 struct target_drm_version
*target_ver
;
5638 switch (ie
->host_cmd
) {
5639 case DRM_IOCTL_VERSION
:
5640 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5641 return -TARGET_EFAULT
;
5643 ver
= (struct drm_version
*)buf_temp
;
5644 ret
= target_to_host_drmversion(ver
, target_ver
);
5645 if (!is_error(ret
)) {
5646 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5647 if (is_error(ret
)) {
5648 unlock_drm_version(ver
, target_ver
, false);
5650 host_to_target_drmversion(target_ver
, ver
);
5653 unlock_user_struct(target_ver
, arg
, 0);
5656 return -TARGET_ENOSYS
;
5659 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5660 struct drm_i915_getparam
*gparam
,
5661 int fd
, abi_long arg
)
5665 struct target_drm_i915_getparam
*target_gparam
;
5667 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5668 return -TARGET_EFAULT
;
5671 __get_user(gparam
->param
, &target_gparam
->param
);
5672 gparam
->value
= &value
;
5673 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5674 put_user_s32(value
, target_gparam
->value
);
5676 unlock_user_struct(target_gparam
, arg
, 0);
5680 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5681 int fd
, int cmd
, abi_long arg
)
5683 switch (ie
->host_cmd
) {
5684 case DRM_IOCTL_I915_GETPARAM
:
5685 return do_ioctl_drm_i915_getparam(ie
,
5686 (struct drm_i915_getparam
*)buf_temp
,
5689 return -TARGET_ENOSYS
;
5695 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5696 int fd
, int cmd
, abi_long arg
)
5698 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5699 struct tun_filter
*target_filter
;
5702 assert(ie
->access
== IOC_W
);
5704 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5705 if (!target_filter
) {
5706 return -TARGET_EFAULT
;
5708 filter
->flags
= tswap16(target_filter
->flags
);
5709 filter
->count
= tswap16(target_filter
->count
);
5710 unlock_user(target_filter
, arg
, 0);
5712 if (filter
->count
) {
5713 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5715 return -TARGET_EFAULT
;
5718 target_addr
= lock_user(VERIFY_READ
,
5719 arg
+ offsetof(struct tun_filter
, addr
),
5720 filter
->count
* ETH_ALEN
, 1);
5722 return -TARGET_EFAULT
;
5724 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5725 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5728 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5731 IOCTLEntry ioctl_entries
[] = {
5732 #define IOCTL(cmd, access, ...) \
5733 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5734 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5735 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5736 #define IOCTL_IGNORE(cmd) \
5737 { TARGET_ ## cmd, 0, #cmd },
5742 /* ??? Implement proper locking for ioctls. */
5743 /* do_ioctl() Must return target values and target errnos. */
5744 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5746 const IOCTLEntry
*ie
;
5747 const argtype
*arg_type
;
5749 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5755 if (ie
->target_cmd
== 0) {
5757 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5758 return -TARGET_ENOTTY
;
5760 if (ie
->target_cmd
== cmd
)
5764 arg_type
= ie
->arg_type
;
5766 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5767 } else if (!ie
->host_cmd
) {
5768 /* Some architectures define BSD ioctls in their headers
5769 that are not implemented in Linux. */
5770 return -TARGET_ENOTTY
;
5773 switch(arg_type
[0]) {
5776 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5782 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5786 target_size
= thunk_type_size(arg_type
, 0);
5787 switch(ie
->access
) {
5789 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5790 if (!is_error(ret
)) {
5791 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5793 return -TARGET_EFAULT
;
5794 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5795 unlock_user(argptr
, arg
, target_size
);
5799 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5801 return -TARGET_EFAULT
;
5802 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5803 unlock_user(argptr
, arg
, 0);
5804 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5808 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5810 return -TARGET_EFAULT
;
5811 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5812 unlock_user(argptr
, arg
, 0);
5813 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5814 if (!is_error(ret
)) {
5815 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5817 return -TARGET_EFAULT
;
5818 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5819 unlock_user(argptr
, arg
, target_size
);
5825 qemu_log_mask(LOG_UNIMP
,
5826 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5827 (long)cmd
, arg_type
[0]);
5828 ret
= -TARGET_ENOTTY
;
5834 static const bitmask_transtbl iflag_tbl
[] = {
5835 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5836 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5837 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5838 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5839 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5840 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5841 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5842 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5843 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5844 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5845 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5846 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5847 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5848 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5849 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5853 static const bitmask_transtbl oflag_tbl
[] = {
5854 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5855 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5856 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5857 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5858 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5859 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5860 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5861 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5862 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5863 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5864 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5865 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5866 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5867 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5868 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5869 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5870 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5871 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5872 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5873 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5874 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5875 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5876 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5877 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5881 static const bitmask_transtbl cflag_tbl
[] = {
5882 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5883 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5884 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5885 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5886 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5887 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5888 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5889 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5890 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5891 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5892 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5893 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5894 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5895 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5896 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5897 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5898 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5899 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5900 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5901 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5902 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5903 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5904 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5905 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5906 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5907 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5908 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5909 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5910 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5911 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5912 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5916 static const bitmask_transtbl lflag_tbl
[] = {
5917 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5918 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5919 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5920 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5921 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5922 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5923 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5924 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5925 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5926 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5927 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5928 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5929 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5930 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5931 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5932 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5936 static void target_to_host_termios (void *dst
, const void *src
)
5938 struct host_termios
*host
= dst
;
5939 const struct target_termios
*target
= src
;
5942 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5944 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5946 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5948 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5949 host
->c_line
= target
->c_line
;
5951 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5952 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5953 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5954 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5955 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5956 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5957 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5958 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5959 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5960 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5961 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5962 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5963 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5964 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5965 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5966 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5967 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5968 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5971 static void host_to_target_termios (void *dst
, const void *src
)
5973 struct target_termios
*target
= dst
;
5974 const struct host_termios
*host
= src
;
5977 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5979 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5981 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5983 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5984 target
->c_line
= host
->c_line
;
5986 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5987 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5988 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5989 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5990 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5991 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5992 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5993 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5994 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5995 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5996 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5997 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5998 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5999 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
6000 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
6001 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
6002 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
6003 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
6006 static const StructEntry struct_termios_def
= {
6007 .convert
= { host_to_target_termios
, target_to_host_termios
},
6008 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
6009 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
6010 .print
= print_termios
,
6013 /* If the host does not provide these bits, they may be safely discarded. */
6017 #ifndef MAP_UNINITIALIZED
6018 #define MAP_UNINITIALIZED 0
6021 static const bitmask_transtbl mmap_flags_tbl
[] = {
6022 { TARGET_MAP_TYPE
, TARGET_MAP_SHARED
, MAP_TYPE
, MAP_SHARED
},
6023 { TARGET_MAP_TYPE
, TARGET_MAP_PRIVATE
, MAP_TYPE
, MAP_PRIVATE
},
6024 { TARGET_MAP_TYPE
, TARGET_MAP_SHARED_VALIDATE
,
6025 MAP_TYPE
, MAP_SHARED_VALIDATE
},
6026 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
6027 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
6028 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
6029 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
6030 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
6031 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
6032 MAP_DENYWRITE
, MAP_DENYWRITE
},
6033 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
6034 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
6035 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
6036 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
6037 MAP_NORESERVE
, MAP_NORESERVE
},
6038 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
6039 /* MAP_STACK had been ignored by the kernel for quite some time.
6040 Recognize it for the target insofar as we do not want to pass
6041 it through to the host. */
6042 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6043 { TARGET_MAP_SYNC
, TARGET_MAP_SYNC
, MAP_SYNC
, MAP_SYNC
},
6044 { TARGET_MAP_NONBLOCK
, TARGET_MAP_NONBLOCK
, MAP_NONBLOCK
, MAP_NONBLOCK
},
6045 { TARGET_MAP_POPULATE
, TARGET_MAP_POPULATE
, MAP_POPULATE
, MAP_POPULATE
},
6046 { TARGET_MAP_FIXED_NOREPLACE
, TARGET_MAP_FIXED_NOREPLACE
,
6047 MAP_FIXED_NOREPLACE
, MAP_FIXED_NOREPLACE
},
6048 { TARGET_MAP_UNINITIALIZED
, TARGET_MAP_UNINITIALIZED
,
6049 MAP_UNINITIALIZED
, MAP_UNINITIALIZED
},
6054 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6055 * TARGET_I386 is defined if TARGET_X86_64 is defined
6057 #if defined(TARGET_I386)
6059 /* NOTE: there is really one LDT for all the threads */
6060 static uint8_t *ldt_table
;
6062 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6069 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6070 if (size
> bytecount
)
6072 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6074 return -TARGET_EFAULT
;
6075 /* ??? Should this by byteswapped? */
6076 memcpy(p
, ldt_table
, size
);
6077 unlock_user(p
, ptr
, size
);
6081 /* XXX: add locking support */
6082 static abi_long
write_ldt(CPUX86State
*env
,
6083 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6085 struct target_modify_ldt_ldt_s ldt_info
;
6086 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6087 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6088 int seg_not_present
, useable
, lm
;
6089 uint32_t *lp
, entry_1
, entry_2
;
6091 if (bytecount
!= sizeof(ldt_info
))
6092 return -TARGET_EINVAL
;
6093 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6094 return -TARGET_EFAULT
;
6095 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6096 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6097 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6098 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6099 unlock_user_struct(target_ldt_info
, ptr
, 0);
6101 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6102 return -TARGET_EINVAL
;
6103 seg_32bit
= ldt_info
.flags
& 1;
6104 contents
= (ldt_info
.flags
>> 1) & 3;
6105 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6106 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6107 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6108 useable
= (ldt_info
.flags
>> 6) & 1;
6112 lm
= (ldt_info
.flags
>> 7) & 1;
6114 if (contents
== 3) {
6116 return -TARGET_EINVAL
;
6117 if (seg_not_present
== 0)
6118 return -TARGET_EINVAL
;
6120 /* allocate the LDT */
6122 env
->ldt
.base
= target_mmap(0,
6123 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6124 PROT_READ
|PROT_WRITE
,
6125 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6126 if (env
->ldt
.base
== -1)
6127 return -TARGET_ENOMEM
;
6128 memset(g2h_untagged(env
->ldt
.base
), 0,
6129 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6130 env
->ldt
.limit
= 0xffff;
6131 ldt_table
= g2h_untagged(env
->ldt
.base
);
6134 /* NOTE: same code as Linux kernel */
6135 /* Allow LDTs to be cleared by the user. */
6136 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6139 read_exec_only
== 1 &&
6141 limit_in_pages
== 0 &&
6142 seg_not_present
== 1 &&
6150 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6151 (ldt_info
.limit
& 0x0ffff);
6152 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6153 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6154 (ldt_info
.limit
& 0xf0000) |
6155 ((read_exec_only
^ 1) << 9) |
6157 ((seg_not_present
^ 1) << 15) |
6159 (limit_in_pages
<< 23) |
6163 entry_2
|= (useable
<< 20);
6165 /* Install the new entry ... */
6167 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6168 lp
[0] = tswap32(entry_1
);
6169 lp
[1] = tswap32(entry_2
);
6173 /* specific and weird i386 syscalls */
6174 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6175 unsigned long bytecount
)
6181 ret
= read_ldt(ptr
, bytecount
);
6184 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6187 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6190 ret
= -TARGET_ENOSYS
;
6196 #if defined(TARGET_ABI32)
6197 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6199 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6200 struct target_modify_ldt_ldt_s ldt_info
;
6201 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6202 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6203 int seg_not_present
, useable
, lm
;
6204 uint32_t *lp
, entry_1
, entry_2
;
6207 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6208 if (!target_ldt_info
)
6209 return -TARGET_EFAULT
;
6210 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6211 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6212 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6213 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6214 if (ldt_info
.entry_number
== -1) {
6215 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6216 if (gdt_table
[i
] == 0) {
6217 ldt_info
.entry_number
= i
;
6218 target_ldt_info
->entry_number
= tswap32(i
);
6223 unlock_user_struct(target_ldt_info
, ptr
, 1);
6225 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6226 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6227 return -TARGET_EINVAL
;
6228 seg_32bit
= ldt_info
.flags
& 1;
6229 contents
= (ldt_info
.flags
>> 1) & 3;
6230 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6231 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6232 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6233 useable
= (ldt_info
.flags
>> 6) & 1;
6237 lm
= (ldt_info
.flags
>> 7) & 1;
6240 if (contents
== 3) {
6241 if (seg_not_present
== 0)
6242 return -TARGET_EINVAL
;
6245 /* NOTE: same code as Linux kernel */
6246 /* Allow LDTs to be cleared by the user. */
6247 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6248 if ((contents
== 0 &&
6249 read_exec_only
== 1 &&
6251 limit_in_pages
== 0 &&
6252 seg_not_present
== 1 &&
6260 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6261 (ldt_info
.limit
& 0x0ffff);
6262 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6263 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6264 (ldt_info
.limit
& 0xf0000) |
6265 ((read_exec_only
^ 1) << 9) |
6267 ((seg_not_present
^ 1) << 15) |
6269 (limit_in_pages
<< 23) |
6274 /* Install the new entry ... */
6276 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6277 lp
[0] = tswap32(entry_1
);
6278 lp
[1] = tswap32(entry_2
);
6282 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6284 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6285 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6286 uint32_t base_addr
, limit
, flags
;
6287 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6288 int seg_not_present
, useable
, lm
;
6289 uint32_t *lp
, entry_1
, entry_2
;
6291 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6292 if (!target_ldt_info
)
6293 return -TARGET_EFAULT
;
6294 idx
= tswap32(target_ldt_info
->entry_number
);
6295 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6296 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6297 unlock_user_struct(target_ldt_info
, ptr
, 1);
6298 return -TARGET_EINVAL
;
6300 lp
= (uint32_t *)(gdt_table
+ idx
);
6301 entry_1
= tswap32(lp
[0]);
6302 entry_2
= tswap32(lp
[1]);
6304 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6305 contents
= (entry_2
>> 10) & 3;
6306 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6307 seg_32bit
= (entry_2
>> 22) & 1;
6308 limit_in_pages
= (entry_2
>> 23) & 1;
6309 useable
= (entry_2
>> 20) & 1;
6313 lm
= (entry_2
>> 21) & 1;
6315 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6316 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6317 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6318 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6319 base_addr
= (entry_1
>> 16) |
6320 (entry_2
& 0xff000000) |
6321 ((entry_2
& 0xff) << 16);
6322 target_ldt_info
->base_addr
= tswapal(base_addr
);
6323 target_ldt_info
->limit
= tswap32(limit
);
6324 target_ldt_info
->flags
= tswap32(flags
);
6325 unlock_user_struct(target_ldt_info
, ptr
, 1);
6329 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6331 return -TARGET_ENOSYS
;
6334 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6341 case TARGET_ARCH_SET_GS
:
6342 case TARGET_ARCH_SET_FS
:
6343 if (code
== TARGET_ARCH_SET_GS
)
6347 cpu_x86_load_seg(env
, idx
, 0);
6348 env
->segs
[idx
].base
= addr
;
6350 case TARGET_ARCH_GET_GS
:
6351 case TARGET_ARCH_GET_FS
:
6352 if (code
== TARGET_ARCH_GET_GS
)
6356 val
= env
->segs
[idx
].base
;
6357 if (put_user(val
, addr
, abi_ulong
))
6358 ret
= -TARGET_EFAULT
;
6361 ret
= -TARGET_EINVAL
;
6366 #endif /* defined(TARGET_ABI32 */
6367 #endif /* defined(TARGET_I386) */
6370 * These constants are generic. Supply any that are missing from the host.
6373 # define PR_SET_NAME 15
6374 # define PR_GET_NAME 16
6376 #ifndef PR_SET_FP_MODE
6377 # define PR_SET_FP_MODE 45
6378 # define PR_GET_FP_MODE 46
6379 # define PR_FP_MODE_FR (1 << 0)
6380 # define PR_FP_MODE_FRE (1 << 1)
6382 #ifndef PR_SVE_SET_VL
6383 # define PR_SVE_SET_VL 50
6384 # define PR_SVE_GET_VL 51
6385 # define PR_SVE_VL_LEN_MASK 0xffff
6386 # define PR_SVE_VL_INHERIT (1 << 17)
6388 #ifndef PR_PAC_RESET_KEYS
6389 # define PR_PAC_RESET_KEYS 54
6390 # define PR_PAC_APIAKEY (1 << 0)
6391 # define PR_PAC_APIBKEY (1 << 1)
6392 # define PR_PAC_APDAKEY (1 << 2)
6393 # define PR_PAC_APDBKEY (1 << 3)
6394 # define PR_PAC_APGAKEY (1 << 4)
6396 #ifndef PR_SET_TAGGED_ADDR_CTRL
6397 # define PR_SET_TAGGED_ADDR_CTRL 55
6398 # define PR_GET_TAGGED_ADDR_CTRL 56
6399 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6401 #ifndef PR_MTE_TCF_SHIFT
6402 # define PR_MTE_TCF_SHIFT 1
6403 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6404 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6405 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6406 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6407 # define PR_MTE_TAG_SHIFT 3
6408 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6410 #ifndef PR_SET_IO_FLUSHER
6411 # define PR_SET_IO_FLUSHER 57
6412 # define PR_GET_IO_FLUSHER 58
6414 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6415 # define PR_SET_SYSCALL_USER_DISPATCH 59
6417 #ifndef PR_SME_SET_VL
6418 # define PR_SME_SET_VL 63
6419 # define PR_SME_GET_VL 64
6420 # define PR_SME_VL_LEN_MASK 0xffff
6421 # define PR_SME_VL_INHERIT (1 << 17)
6424 #include "target_prctl.h"
6426 static abi_long
do_prctl_inval0(CPUArchState
*env
)
6428 return -TARGET_EINVAL
;
6431 static abi_long
do_prctl_inval1(CPUArchState
*env
, abi_long arg2
)
6433 return -TARGET_EINVAL
;
6436 #ifndef do_prctl_get_fp_mode
6437 #define do_prctl_get_fp_mode do_prctl_inval0
6439 #ifndef do_prctl_set_fp_mode
6440 #define do_prctl_set_fp_mode do_prctl_inval1
6442 #ifndef do_prctl_sve_get_vl
6443 #define do_prctl_sve_get_vl do_prctl_inval0
6445 #ifndef do_prctl_sve_set_vl
6446 #define do_prctl_sve_set_vl do_prctl_inval1
6448 #ifndef do_prctl_reset_keys
6449 #define do_prctl_reset_keys do_prctl_inval1
6451 #ifndef do_prctl_set_tagged_addr_ctrl
6452 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6454 #ifndef do_prctl_get_tagged_addr_ctrl
6455 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6457 #ifndef do_prctl_get_unalign
6458 #define do_prctl_get_unalign do_prctl_inval1
6460 #ifndef do_prctl_set_unalign
6461 #define do_prctl_set_unalign do_prctl_inval1
6463 #ifndef do_prctl_sme_get_vl
6464 #define do_prctl_sme_get_vl do_prctl_inval0
6466 #ifndef do_prctl_sme_set_vl
6467 #define do_prctl_sme_set_vl do_prctl_inval1
6470 static abi_long
do_prctl(CPUArchState
*env
, abi_long option
, abi_long arg2
,
6471 abi_long arg3
, abi_long arg4
, abi_long arg5
)
6476 case PR_GET_PDEATHSIG
:
6479 ret
= get_errno(prctl(PR_GET_PDEATHSIG
, &deathsig
,
6481 if (!is_error(ret
) &&
6482 put_user_s32(host_to_target_signal(deathsig
), arg2
)) {
6483 return -TARGET_EFAULT
;
6487 case PR_SET_PDEATHSIG
:
6488 return get_errno(prctl(PR_SET_PDEATHSIG
, target_to_host_signal(arg2
),
6492 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
6494 return -TARGET_EFAULT
;
6496 ret
= get_errno(prctl(PR_GET_NAME
, (uintptr_t)name
,
6498 unlock_user(name
, arg2
, 16);
6503 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
6505 return -TARGET_EFAULT
;
6507 ret
= get_errno(prctl(PR_SET_NAME
, (uintptr_t)name
,
6509 unlock_user(name
, arg2
, 0);
6512 case PR_GET_FP_MODE
:
6513 return do_prctl_get_fp_mode(env
);
6514 case PR_SET_FP_MODE
:
6515 return do_prctl_set_fp_mode(env
, arg2
);
6517 return do_prctl_sve_get_vl(env
);
6519 return do_prctl_sve_set_vl(env
, arg2
);
6521 return do_prctl_sme_get_vl(env
);
6523 return do_prctl_sme_set_vl(env
, arg2
);
6524 case PR_PAC_RESET_KEYS
:
6525 if (arg3
|| arg4
|| arg5
) {
6526 return -TARGET_EINVAL
;
6528 return do_prctl_reset_keys(env
, arg2
);
6529 case PR_SET_TAGGED_ADDR_CTRL
:
6530 if (arg3
|| arg4
|| arg5
) {
6531 return -TARGET_EINVAL
;
6533 return do_prctl_set_tagged_addr_ctrl(env
, arg2
);
6534 case PR_GET_TAGGED_ADDR_CTRL
:
6535 if (arg2
|| arg3
|| arg4
|| arg5
) {
6536 return -TARGET_EINVAL
;
6538 return do_prctl_get_tagged_addr_ctrl(env
);
6540 case PR_GET_UNALIGN
:
6541 return do_prctl_get_unalign(env
, arg2
);
6542 case PR_SET_UNALIGN
:
6543 return do_prctl_set_unalign(env
, arg2
);
6545 case PR_CAP_AMBIENT
:
6546 case PR_CAPBSET_READ
:
6547 case PR_CAPBSET_DROP
:
6548 case PR_GET_DUMPABLE
:
6549 case PR_SET_DUMPABLE
:
6550 case PR_GET_KEEPCAPS
:
6551 case PR_SET_KEEPCAPS
:
6552 case PR_GET_SECUREBITS
:
6553 case PR_SET_SECUREBITS
:
6556 case PR_GET_TIMERSLACK
:
6557 case PR_SET_TIMERSLACK
:
6559 case PR_MCE_KILL_GET
:
6560 case PR_GET_NO_NEW_PRIVS
:
6561 case PR_SET_NO_NEW_PRIVS
:
6562 case PR_GET_IO_FLUSHER
:
6563 case PR_SET_IO_FLUSHER
:
6564 /* Some prctl options have no pointer arguments and we can pass on. */
6565 return get_errno(prctl(option
, arg2
, arg3
, arg4
, arg5
));
6567 case PR_GET_CHILD_SUBREAPER
:
6568 case PR_SET_CHILD_SUBREAPER
:
6569 case PR_GET_SPECULATION_CTRL
:
6570 case PR_SET_SPECULATION_CTRL
:
6571 case PR_GET_TID_ADDRESS
:
6573 return -TARGET_EINVAL
;
6577 /* Was used for SPE on PowerPC. */
6578 return -TARGET_EINVAL
;
6585 case PR_GET_SECCOMP
:
6586 case PR_SET_SECCOMP
:
6587 case PR_SET_SYSCALL_USER_DISPATCH
:
6588 case PR_GET_THP_DISABLE
:
6589 case PR_SET_THP_DISABLE
:
6592 /* Disable to prevent the target disabling stuff we need. */
6593 return -TARGET_EINVAL
;
6596 qemu_log_mask(LOG_UNIMP
, "Unsupported prctl: " TARGET_ABI_FMT_ld
"\n",
6598 return -TARGET_EINVAL
;
6602 #define NEW_STACK_SIZE 0x40000
6605 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6608 pthread_mutex_t mutex
;
6609 pthread_cond_t cond
;
6612 abi_ulong child_tidptr
;
6613 abi_ulong parent_tidptr
;
6617 static void *clone_func(void *arg
)
6619 new_thread_info
*info
= arg
;
6624 rcu_register_thread();
6625 tcg_register_thread();
6629 ts
= (TaskState
*)cpu
->opaque
;
6630 info
->tid
= sys_gettid();
6632 if (info
->child_tidptr
)
6633 put_user_u32(info
->tid
, info
->child_tidptr
);
6634 if (info
->parent_tidptr
)
6635 put_user_u32(info
->tid
, info
->parent_tidptr
);
6636 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6637 /* Enable signals. */
6638 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6639 /* Signal to the parent that we're ready. */
6640 pthread_mutex_lock(&info
->mutex
);
6641 pthread_cond_broadcast(&info
->cond
);
6642 pthread_mutex_unlock(&info
->mutex
);
6643 /* Wait until the parent has finished initializing the tls state. */
6644 pthread_mutex_lock(&clone_lock
);
6645 pthread_mutex_unlock(&clone_lock
);
6651 /* do_fork() Must return host values and target errnos (unlike most
6652 do_*() functions). */
6653 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6654 abi_ulong parent_tidptr
, target_ulong newtls
,
6655 abi_ulong child_tidptr
)
6657 CPUState
*cpu
= env_cpu(env
);
6661 CPUArchState
*new_env
;
6664 flags
&= ~CLONE_IGNORED_FLAGS
;
6666 /* Emulate vfork() with fork() */
6667 if (flags
& CLONE_VFORK
)
6668 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6670 if (flags
& CLONE_VM
) {
6671 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6672 new_thread_info info
;
6673 pthread_attr_t attr
;
6675 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6676 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6677 return -TARGET_EINVAL
;
6680 ts
= g_new0(TaskState
, 1);
6681 init_task_state(ts
);
6683 /* Grab a mutex so that thread setup appears atomic. */
6684 pthread_mutex_lock(&clone_lock
);
6687 * If this is our first additional thread, we need to ensure we
6688 * generate code for parallel execution and flush old translations.
6689 * Do this now so that the copy gets CF_PARALLEL too.
6691 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
6692 cpu
->tcg_cflags
|= CF_PARALLEL
;
6696 /* we create a new CPU instance. */
6697 new_env
= cpu_copy(env
);
6698 /* Init regs that differ from the parent. */
6699 cpu_clone_regs_child(new_env
, newsp
, flags
);
6700 cpu_clone_regs_parent(env
, flags
);
6701 new_cpu
= env_cpu(new_env
);
6702 new_cpu
->opaque
= ts
;
6703 ts
->bprm
= parent_ts
->bprm
;
6704 ts
->info
= parent_ts
->info
;
6705 ts
->signal_mask
= parent_ts
->signal_mask
;
6707 if (flags
& CLONE_CHILD_CLEARTID
) {
6708 ts
->child_tidptr
= child_tidptr
;
6711 if (flags
& CLONE_SETTLS
) {
6712 cpu_set_tls (new_env
, newtls
);
6715 memset(&info
, 0, sizeof(info
));
6716 pthread_mutex_init(&info
.mutex
, NULL
);
6717 pthread_mutex_lock(&info
.mutex
);
6718 pthread_cond_init(&info
.cond
, NULL
);
6720 if (flags
& CLONE_CHILD_SETTID
) {
6721 info
.child_tidptr
= child_tidptr
;
6723 if (flags
& CLONE_PARENT_SETTID
) {
6724 info
.parent_tidptr
= parent_tidptr
;
6727 ret
= pthread_attr_init(&attr
);
6728 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6729 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6730 /* It is not safe to deliver signals until the child has finished
6731 initializing, so temporarily block all signals. */
6732 sigfillset(&sigmask
);
6733 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6734 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6736 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6737 /* TODO: Free new CPU state if thread creation failed. */
6739 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6740 pthread_attr_destroy(&attr
);
6742 /* Wait for the child to initialize. */
6743 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6748 pthread_mutex_unlock(&info
.mutex
);
6749 pthread_cond_destroy(&info
.cond
);
6750 pthread_mutex_destroy(&info
.mutex
);
6751 pthread_mutex_unlock(&clone_lock
);
6753 /* if no CLONE_VM, we consider it is a fork */
6754 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6755 return -TARGET_EINVAL
;
6758 /* We can't support custom termination signals */
6759 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6760 return -TARGET_EINVAL
;
6763 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6764 if (flags
& CLONE_PIDFD
) {
6765 return -TARGET_EINVAL
;
6769 /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6770 if ((flags
& CLONE_PIDFD
) && (flags
& CLONE_PARENT_SETTID
)) {
6771 return -TARGET_EINVAL
;
6774 if (block_signals()) {
6775 return -QEMU_ERESTARTSYS
;
6781 /* Child Process. */
6782 cpu_clone_regs_child(env
, newsp
, flags
);
6784 /* There is a race condition here. The parent process could
6785 theoretically read the TID in the child process before the child
6786 tid is set. This would require using either ptrace
6787 (not implemented) or having *_tidptr to point at a shared memory
6788 mapping. We can't repeat the spinlock hack used above because
6789 the child process gets its own copy of the lock. */
6790 if (flags
& CLONE_CHILD_SETTID
)
6791 put_user_u32(sys_gettid(), child_tidptr
);
6792 if (flags
& CLONE_PARENT_SETTID
)
6793 put_user_u32(sys_gettid(), parent_tidptr
);
6794 ts
= (TaskState
*)cpu
->opaque
;
6795 if (flags
& CLONE_SETTLS
)
6796 cpu_set_tls (env
, newtls
);
6797 if (flags
& CLONE_CHILD_CLEARTID
)
6798 ts
->child_tidptr
= child_tidptr
;
6800 cpu_clone_regs_parent(env
, flags
);
6801 if (flags
& CLONE_PIDFD
) {
6803 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6804 int pid_child
= ret
;
6805 pid_fd
= pidfd_open(pid_child
, 0);
6807 fcntl(pid_fd
, F_SETFD
, fcntl(pid_fd
, F_GETFL
)
6813 put_user_u32(pid_fd
, parent_tidptr
);
6817 g_assert(!cpu_in_exclusive_context(cpu
));
6822 /* warning : doesn't handle linux specific flags... */
6823 static int target_to_host_fcntl_cmd(int cmd
)
6828 case TARGET_F_DUPFD
:
6829 case TARGET_F_GETFD
:
6830 case TARGET_F_SETFD
:
6831 case TARGET_F_GETFL
:
6832 case TARGET_F_SETFL
:
6833 case TARGET_F_OFD_GETLK
:
6834 case TARGET_F_OFD_SETLK
:
6835 case TARGET_F_OFD_SETLKW
:
6838 case TARGET_F_GETLK
:
6841 case TARGET_F_SETLK
:
6844 case TARGET_F_SETLKW
:
6847 case TARGET_F_GETOWN
:
6850 case TARGET_F_SETOWN
:
6853 case TARGET_F_GETSIG
:
6856 case TARGET_F_SETSIG
:
6859 #if TARGET_ABI_BITS == 32
6860 case TARGET_F_GETLK64
:
6863 case TARGET_F_SETLK64
:
6866 case TARGET_F_SETLKW64
:
6870 case TARGET_F_SETLEASE
:
6873 case TARGET_F_GETLEASE
:
6876 #ifdef F_DUPFD_CLOEXEC
6877 case TARGET_F_DUPFD_CLOEXEC
:
6878 ret
= F_DUPFD_CLOEXEC
;
6881 case TARGET_F_NOTIFY
:
6885 case TARGET_F_GETOWN_EX
:
6890 case TARGET_F_SETOWN_EX
:
6895 case TARGET_F_SETPIPE_SZ
:
6898 case TARGET_F_GETPIPE_SZ
:
6903 case TARGET_F_ADD_SEALS
:
6906 case TARGET_F_GET_SEALS
:
6911 ret
= -TARGET_EINVAL
;
6915 #if defined(__powerpc64__)
6916 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6917 * is not supported by kernel. The glibc fcntl call actually adjusts
6918 * them to 5, 6 and 7 before making the syscall(). Since we make the
6919 * syscall directly, adjust to what is supported by the kernel.
6921 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6922 ret
-= F_GETLK64
- 5;
6929 #define FLOCK_TRANSTBL \
6931 TRANSTBL_CONVERT(F_RDLCK); \
6932 TRANSTBL_CONVERT(F_WRLCK); \
6933 TRANSTBL_CONVERT(F_UNLCK); \
6936 static int target_to_host_flock(int type
)
6938 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6940 #undef TRANSTBL_CONVERT
6941 return -TARGET_EINVAL
;
6944 static int host_to_target_flock(int type
)
6946 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6948 #undef TRANSTBL_CONVERT
6949 /* if we don't know how to convert the value coming
6950 * from the host we copy to the target field as-is
6955 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6956 abi_ulong target_flock_addr
)
6958 struct target_flock
*target_fl
;
6961 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6962 return -TARGET_EFAULT
;
6965 __get_user(l_type
, &target_fl
->l_type
);
6966 l_type
= target_to_host_flock(l_type
);
6970 fl
->l_type
= l_type
;
6971 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6972 __get_user(fl
->l_start
, &target_fl
->l_start
);
6973 __get_user(fl
->l_len
, &target_fl
->l_len
);
6974 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6975 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6979 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6980 const struct flock64
*fl
)
6982 struct target_flock
*target_fl
;
6985 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6986 return -TARGET_EFAULT
;
6989 l_type
= host_to_target_flock(fl
->l_type
);
6990 __put_user(l_type
, &target_fl
->l_type
);
6991 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6992 __put_user(fl
->l_start
, &target_fl
->l_start
);
6993 __put_user(fl
->l_len
, &target_fl
->l_len
);
6994 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6995 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6999 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
7000 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
7002 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
7003 struct target_oabi_flock64
{
7011 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
7012 abi_ulong target_flock_addr
)
7014 struct target_oabi_flock64
*target_fl
;
7017 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
7018 return -TARGET_EFAULT
;
7021 __get_user(l_type
, &target_fl
->l_type
);
7022 l_type
= target_to_host_flock(l_type
);
7026 fl
->l_type
= l_type
;
7027 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
7028 __get_user(fl
->l_start
, &target_fl
->l_start
);
7029 __get_user(fl
->l_len
, &target_fl
->l_len
);
7030 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
7031 unlock_user_struct(target_fl
, target_flock_addr
, 0);
7035 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
7036 const struct flock64
*fl
)
7038 struct target_oabi_flock64
*target_fl
;
7041 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
7042 return -TARGET_EFAULT
;
7045 l_type
= host_to_target_flock(fl
->l_type
);
7046 __put_user(l_type
, &target_fl
->l_type
);
7047 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
7048 __put_user(fl
->l_start
, &target_fl
->l_start
);
7049 __put_user(fl
->l_len
, &target_fl
->l_len
);
7050 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
7051 unlock_user_struct(target_fl
, target_flock_addr
, 1);
7056 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
7057 abi_ulong target_flock_addr
)
7059 struct target_flock64
*target_fl
;
7062 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
7063 return -TARGET_EFAULT
;
7066 __get_user(l_type
, &target_fl
->l_type
);
7067 l_type
= target_to_host_flock(l_type
);
7071 fl
->l_type
= l_type
;
7072 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
7073 __get_user(fl
->l_start
, &target_fl
->l_start
);
7074 __get_user(fl
->l_len
, &target_fl
->l_len
);
7075 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
7076 unlock_user_struct(target_fl
, target_flock_addr
, 0);
7080 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
7081 const struct flock64
*fl
)
7083 struct target_flock64
*target_fl
;
7086 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
7087 return -TARGET_EFAULT
;
7090 l_type
= host_to_target_flock(fl
->l_type
);
7091 __put_user(l_type
, &target_fl
->l_type
);
7092 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
7093 __put_user(fl
->l_start
, &target_fl
->l_start
);
7094 __put_user(fl
->l_len
, &target_fl
->l_len
);
7095 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
7096 unlock_user_struct(target_fl
, target_flock_addr
, 1);
7100 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
7102 struct flock64 fl64
;
7104 struct f_owner_ex fox
;
7105 struct target_f_owner_ex
*target_fox
;
7108 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
7110 if (host_cmd
== -TARGET_EINVAL
)
7114 case TARGET_F_GETLK
:
7115 ret
= copy_from_user_flock(&fl64
, arg
);
7119 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7121 ret
= copy_to_user_flock(arg
, &fl64
);
7125 case TARGET_F_SETLK
:
7126 case TARGET_F_SETLKW
:
7127 ret
= copy_from_user_flock(&fl64
, arg
);
7131 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7134 case TARGET_F_GETLK64
:
7135 case TARGET_F_OFD_GETLK
:
7136 ret
= copy_from_user_flock64(&fl64
, arg
);
7140 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7142 ret
= copy_to_user_flock64(arg
, &fl64
);
7145 case TARGET_F_SETLK64
:
7146 case TARGET_F_SETLKW64
:
7147 case TARGET_F_OFD_SETLK
:
7148 case TARGET_F_OFD_SETLKW
:
7149 ret
= copy_from_user_flock64(&fl64
, arg
);
7153 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7156 case TARGET_F_GETFL
:
7157 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7159 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
7160 /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7161 if (O_LARGEFILE
== 0 && HOST_LONG_BITS
== 64) {
7162 ret
|= TARGET_O_LARGEFILE
;
7167 case TARGET_F_SETFL
:
7168 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
7169 target_to_host_bitmask(arg
,
7174 case TARGET_F_GETOWN_EX
:
7175 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7177 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
7178 return -TARGET_EFAULT
;
7179 target_fox
->type
= tswap32(fox
.type
);
7180 target_fox
->pid
= tswap32(fox
.pid
);
7181 unlock_user_struct(target_fox
, arg
, 1);
7187 case TARGET_F_SETOWN_EX
:
7188 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
7189 return -TARGET_EFAULT
;
7190 fox
.type
= tswap32(target_fox
->type
);
7191 fox
.pid
= tswap32(target_fox
->pid
);
7192 unlock_user_struct(target_fox
, arg
, 0);
7193 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7197 case TARGET_F_SETSIG
:
7198 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
7201 case TARGET_F_GETSIG
:
7202 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
7205 case TARGET_F_SETOWN
:
7206 case TARGET_F_GETOWN
:
7207 case TARGET_F_SETLEASE
:
7208 case TARGET_F_GETLEASE
:
7209 case TARGET_F_SETPIPE_SZ
:
7210 case TARGET_F_GETPIPE_SZ
:
7211 case TARGET_F_ADD_SEALS
:
7212 case TARGET_F_GET_SEALS
:
7213 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7217 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
7225 static inline int high2lowuid(int uid
)
7233 static inline int high2lowgid(int gid
)
7241 static inline int low2highuid(int uid
)
7243 if ((int16_t)uid
== -1)
7249 static inline int low2highgid(int gid
)
7251 if ((int16_t)gid
== -1)
7256 static inline int tswapid(int id
)
7261 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7263 #else /* !USE_UID16 */
7264 static inline int high2lowuid(int uid
)
7268 static inline int high2lowgid(int gid
)
7272 static inline int low2highuid(int uid
)
7276 static inline int low2highgid(int gid
)
7280 static inline int tswapid(int id
)
7285 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7287 #endif /* USE_UID16 */
7289 /* We must do direct syscalls for setting UID/GID, because we want to
7290 * implement the Linux system call semantics of "change only for this thread",
7291 * not the libc/POSIX semantics of "change for all threads in process".
7292 * (See http://ewontfix.com/17/ for more details.)
7293 * We use the 32-bit version of the syscalls if present; if it is not
7294 * then either the host architecture supports 32-bit UIDs natively with
7295 * the standard syscall, or the 16-bit UID is the best we can do.
7297 #ifdef __NR_setuid32
7298 #define __NR_sys_setuid __NR_setuid32
7300 #define __NR_sys_setuid __NR_setuid
7302 #ifdef __NR_setgid32
7303 #define __NR_sys_setgid __NR_setgid32
7305 #define __NR_sys_setgid __NR_setgid
7307 #ifdef __NR_setresuid32
7308 #define __NR_sys_setresuid __NR_setresuid32
7310 #define __NR_sys_setresuid __NR_setresuid
7312 #ifdef __NR_setresgid32
7313 #define __NR_sys_setresgid __NR_setresgid32
7315 #define __NR_sys_setresgid __NR_setresgid
7318 _syscall1(int, sys_setuid
, uid_t
, uid
)
7319 _syscall1(int, sys_setgid
, gid_t
, gid
)
7320 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7321 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7323 void syscall_init(void)
7326 const argtype
*arg_type
;
7329 thunk_init(STRUCT_MAX
);
7331 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7332 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7333 #include "syscall_types.h"
7335 #undef STRUCT_SPECIAL
7337 /* we patch the ioctl size if necessary. We rely on the fact that
7338 no ioctl has all the bits at '1' in the size field */
7340 while (ie
->target_cmd
!= 0) {
7341 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7342 TARGET_IOC_SIZEMASK
) {
7343 arg_type
= ie
->arg_type
;
7344 if (arg_type
[0] != TYPE_PTR
) {
7345 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7350 size
= thunk_type_size(arg_type
, 0);
7351 ie
->target_cmd
= (ie
->target_cmd
&
7352 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7353 (size
<< TARGET_IOC_SIZESHIFT
);
7356 /* automatic consistency check if same arch */
7357 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7358 (defined(__x86_64__) && defined(TARGET_X86_64))
7359 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7360 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7361 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7368 #ifdef TARGET_NR_truncate64
7369 static inline abi_long
target_truncate64(CPUArchState
*cpu_env
, const char *arg1
,
7374 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7378 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7382 #ifdef TARGET_NR_ftruncate64
7383 static inline abi_long
target_ftruncate64(CPUArchState
*cpu_env
, abi_long arg1
,
7388 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7392 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7396 #if defined(TARGET_NR_timer_settime) || \
7397 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7398 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7399 abi_ulong target_addr
)
7401 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7402 offsetof(struct target_itimerspec
,
7404 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7405 offsetof(struct target_itimerspec
,
7407 return -TARGET_EFAULT
;
7414 #if defined(TARGET_NR_timer_settime64) || \
7415 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7416 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7417 abi_ulong target_addr
)
7419 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7420 offsetof(struct target__kernel_itimerspec
,
7422 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7423 offsetof(struct target__kernel_itimerspec
,
7425 return -TARGET_EFAULT
;
7432 #if ((defined(TARGET_NR_timerfd_gettime) || \
7433 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7434 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7435 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7436 struct itimerspec
*host_its
)
7438 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7440 &host_its
->it_interval
) ||
7441 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7443 &host_its
->it_value
)) {
7444 return -TARGET_EFAULT
;
7450 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7451 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7452 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7453 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7454 struct itimerspec
*host_its
)
7456 if (host_to_target_timespec64(target_addr
+
7457 offsetof(struct target__kernel_itimerspec
,
7459 &host_its
->it_interval
) ||
7460 host_to_target_timespec64(target_addr
+
7461 offsetof(struct target__kernel_itimerspec
,
7463 &host_its
->it_value
)) {
7464 return -TARGET_EFAULT
;
7470 #if defined(TARGET_NR_adjtimex) || \
7471 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7472 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7473 abi_long target_addr
)
7475 struct target_timex
*target_tx
;
7477 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7478 return -TARGET_EFAULT
;
7481 __get_user(host_tx
->modes
, &target_tx
->modes
);
7482 __get_user(host_tx
->offset
, &target_tx
->offset
);
7483 __get_user(host_tx
->freq
, &target_tx
->freq
);
7484 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7485 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7486 __get_user(host_tx
->status
, &target_tx
->status
);
7487 __get_user(host_tx
->constant
, &target_tx
->constant
);
7488 __get_user(host_tx
->precision
, &target_tx
->precision
);
7489 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7490 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7491 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7492 __get_user(host_tx
->tick
, &target_tx
->tick
);
7493 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7494 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7495 __get_user(host_tx
->shift
, &target_tx
->shift
);
7496 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7497 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7498 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7499 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7500 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7501 __get_user(host_tx
->tai
, &target_tx
->tai
);
7503 unlock_user_struct(target_tx
, target_addr
, 0);
7507 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7508 struct timex
*host_tx
)
7510 struct target_timex
*target_tx
;
7512 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7513 return -TARGET_EFAULT
;
7516 __put_user(host_tx
->modes
, &target_tx
->modes
);
7517 __put_user(host_tx
->offset
, &target_tx
->offset
);
7518 __put_user(host_tx
->freq
, &target_tx
->freq
);
7519 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7520 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7521 __put_user(host_tx
->status
, &target_tx
->status
);
7522 __put_user(host_tx
->constant
, &target_tx
->constant
);
7523 __put_user(host_tx
->precision
, &target_tx
->precision
);
7524 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7525 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7526 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7527 __put_user(host_tx
->tick
, &target_tx
->tick
);
7528 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7529 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7530 __put_user(host_tx
->shift
, &target_tx
->shift
);
7531 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7532 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7533 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7534 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7535 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7536 __put_user(host_tx
->tai
, &target_tx
->tai
);
7538 unlock_user_struct(target_tx
, target_addr
, 1);
7544 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7545 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7546 abi_long target_addr
)
7548 struct target__kernel_timex
*target_tx
;
7550 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7551 offsetof(struct target__kernel_timex
,
7553 return -TARGET_EFAULT
;
7556 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7557 return -TARGET_EFAULT
;
7560 __get_user(host_tx
->modes
, &target_tx
->modes
);
7561 __get_user(host_tx
->offset
, &target_tx
->offset
);
7562 __get_user(host_tx
->freq
, &target_tx
->freq
);
7563 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7564 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7565 __get_user(host_tx
->status
, &target_tx
->status
);
7566 __get_user(host_tx
->constant
, &target_tx
->constant
);
7567 __get_user(host_tx
->precision
, &target_tx
->precision
);
7568 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7569 __get_user(host_tx
->tick
, &target_tx
->tick
);
7570 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7571 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7572 __get_user(host_tx
->shift
, &target_tx
->shift
);
7573 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7574 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7575 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7576 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7577 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7578 __get_user(host_tx
->tai
, &target_tx
->tai
);
7580 unlock_user_struct(target_tx
, target_addr
, 0);
7584 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7585 struct timex
*host_tx
)
7587 struct target__kernel_timex
*target_tx
;
7589 if (copy_to_user_timeval64(target_addr
+
7590 offsetof(struct target__kernel_timex
, time
),
7592 return -TARGET_EFAULT
;
7595 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7596 return -TARGET_EFAULT
;
7599 __put_user(host_tx
->modes
, &target_tx
->modes
);
7600 __put_user(host_tx
->offset
, &target_tx
->offset
);
7601 __put_user(host_tx
->freq
, &target_tx
->freq
);
7602 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7603 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7604 __put_user(host_tx
->status
, &target_tx
->status
);
7605 __put_user(host_tx
->constant
, &target_tx
->constant
);
7606 __put_user(host_tx
->precision
, &target_tx
->precision
);
7607 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7608 __put_user(host_tx
->tick
, &target_tx
->tick
);
7609 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7610 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7611 __put_user(host_tx
->shift
, &target_tx
->shift
);
7612 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7613 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7614 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7615 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7616 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7617 __put_user(host_tx
->tai
, &target_tx
->tai
);
7619 unlock_user_struct(target_tx
, target_addr
, 1);
7624 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7625 #define sigev_notify_thread_id _sigev_un._tid
7628 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7629 abi_ulong target_addr
)
7631 struct target_sigevent
*target_sevp
;
7633 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7634 return -TARGET_EFAULT
;
7637 /* This union is awkward on 64 bit systems because it has a 32 bit
7638 * integer and a pointer in it; we follow the conversion approach
7639 * used for handling sigval types in signal.c so the guest should get
7640 * the correct value back even if we did a 64 bit byteswap and it's
7641 * using the 32 bit integer.
7643 host_sevp
->sigev_value
.sival_ptr
=
7644 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7645 host_sevp
->sigev_signo
=
7646 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7647 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7648 host_sevp
->sigev_notify_thread_id
= tswap32(target_sevp
->_sigev_un
._tid
);
7650 unlock_user_struct(target_sevp
, target_addr
, 1);
7654 #if defined(TARGET_NR_mlockall)
7655 static inline int target_to_host_mlockall_arg(int arg
)
7659 if (arg
& TARGET_MCL_CURRENT
) {
7660 result
|= MCL_CURRENT
;
7662 if (arg
& TARGET_MCL_FUTURE
) {
7663 result
|= MCL_FUTURE
;
7666 if (arg
& TARGET_MCL_ONFAULT
) {
7667 result
|= MCL_ONFAULT
;
7675 static inline int target_to_host_msync_arg(abi_long arg
)
7677 return ((arg
& TARGET_MS_ASYNC
) ? MS_ASYNC
: 0) |
7678 ((arg
& TARGET_MS_INVALIDATE
) ? MS_INVALIDATE
: 0) |
7679 ((arg
& TARGET_MS_SYNC
) ? MS_SYNC
: 0) |
7680 (arg
& ~(TARGET_MS_ASYNC
| TARGET_MS_INVALIDATE
| TARGET_MS_SYNC
));
7683 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7684 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7685 defined(TARGET_NR_newfstatat))
7686 static inline abi_long
host_to_target_stat64(CPUArchState
*cpu_env
,
7687 abi_ulong target_addr
,
7688 struct stat
*host_st
)
7690 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7691 if (cpu_env
->eabi
) {
7692 struct target_eabi_stat64
*target_st
;
7694 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7695 return -TARGET_EFAULT
;
7696 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7697 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7698 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7699 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7700 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7702 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7703 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7704 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7705 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7706 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7707 __put_user(host_st
->st_size
, &target_st
->st_size
);
7708 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7709 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7710 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7711 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7712 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7713 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7714 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7715 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7716 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7718 unlock_user_struct(target_st
, target_addr
, 1);
7722 #if defined(TARGET_HAS_STRUCT_STAT64)
7723 struct target_stat64
*target_st
;
7725 struct target_stat
*target_st
;
7728 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7729 return -TARGET_EFAULT
;
7730 memset(target_st
, 0, sizeof(*target_st
));
7731 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7732 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7733 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7734 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7736 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7737 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7738 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7739 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7740 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7741 /* XXX: better use of kernel struct */
7742 __put_user(host_st
->st_size
, &target_st
->st_size
);
7743 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7744 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7745 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7746 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7747 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7748 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7749 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7750 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7751 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7753 unlock_user_struct(target_st
, target_addr
, 1);
7760 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7761 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7762 abi_ulong target_addr
)
7764 struct target_statx
*target_stx
;
7766 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7767 return -TARGET_EFAULT
;
7769 memset(target_stx
, 0, sizeof(*target_stx
));
7771 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7772 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7773 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7774 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7775 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7776 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7777 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7778 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7779 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7780 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7781 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7782 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7783 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7784 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7785 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7786 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7787 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7788 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7789 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7790 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7791 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7792 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7793 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7795 unlock_user_struct(target_stx
, target_addr
, 1);
7801 static int do_sys_futex(int *uaddr
, int op
, int val
,
7802 const struct timespec
*timeout
, int *uaddr2
,
7805 #if HOST_LONG_BITS == 64
7806 #if defined(__NR_futex)
7807 /* always a 64-bit time_t, it doesn't define _time64 version */
7808 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7811 #else /* HOST_LONG_BITS == 64 */
7812 #if defined(__NR_futex_time64)
7813 if (sizeof(timeout
->tv_sec
) == 8) {
7814 /* _time64 function on 32bit arch */
7815 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7818 #if defined(__NR_futex)
7819 /* old function on 32bit arch */
7820 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7822 #endif /* HOST_LONG_BITS == 64 */
7823 g_assert_not_reached();
7826 static int do_safe_futex(int *uaddr
, int op
, int val
,
7827 const struct timespec
*timeout
, int *uaddr2
,
7830 #if HOST_LONG_BITS == 64
7831 #if defined(__NR_futex)
7832 /* always a 64-bit time_t, it doesn't define _time64 version */
7833 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7835 #else /* HOST_LONG_BITS == 64 */
7836 #if defined(__NR_futex_time64)
7837 if (sizeof(timeout
->tv_sec
) == 8) {
7838 /* _time64 function on 32bit arch */
7839 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7843 #if defined(__NR_futex)
7844 /* old function on 32bit arch */
7845 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7847 #endif /* HOST_LONG_BITS == 64 */
7848 return -TARGET_ENOSYS
;
7851 /* ??? Using host futex calls even when target atomic operations
7852 are not really atomic probably breaks things. However implementing
7853 futexes locally would make futexes shared between multiple processes
7854 tricky. However they're probably useless because guest atomic
7855 operations won't work either. */
7856 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7857 static int do_futex(CPUState
*cpu
, bool time64
, target_ulong uaddr
,
7858 int op
, int val
, target_ulong timeout
,
7859 target_ulong uaddr2
, int val3
)
7861 struct timespec ts
, *pts
= NULL
;
7862 void *haddr2
= NULL
;
7865 /* We assume FUTEX_* constants are the same on both host and target. */
7866 #ifdef FUTEX_CMD_MASK
7867 base_op
= op
& FUTEX_CMD_MASK
;
7873 case FUTEX_WAIT_BITSET
:
7876 case FUTEX_WAIT_REQUEUE_PI
:
7878 haddr2
= g2h(cpu
, uaddr2
);
7881 case FUTEX_LOCK_PI2
:
7884 case FUTEX_WAKE_BITSET
:
7885 case FUTEX_TRYLOCK_PI
:
7886 case FUTEX_UNLOCK_PI
:
7890 val
= target_to_host_signal(val
);
7893 case FUTEX_CMP_REQUEUE
:
7894 case FUTEX_CMP_REQUEUE_PI
:
7895 val3
= tswap32(val3
);
7900 * For these, the 4th argument is not TIMEOUT, but VAL2.
7901 * But the prototype of do_safe_futex takes a pointer, so
7902 * insert casts to satisfy the compiler. We do not need
7903 * to tswap VAL2 since it's not compared to guest memory.
7905 pts
= (struct timespec
*)(uintptr_t)timeout
;
7907 haddr2
= g2h(cpu
, uaddr2
);
7910 return -TARGET_ENOSYS
;
7915 ? target_to_host_timespec64(pts
, timeout
)
7916 : target_to_host_timespec(pts
, timeout
)) {
7917 return -TARGET_EFAULT
;
7920 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, haddr2
, val3
);
7924 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7925 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7926 abi_long handle
, abi_long mount_id
,
7929 struct file_handle
*target_fh
;
7930 struct file_handle
*fh
;
7934 unsigned int size
, total_size
;
7936 if (get_user_s32(size
, handle
)) {
7937 return -TARGET_EFAULT
;
7940 name
= lock_user_string(pathname
);
7942 return -TARGET_EFAULT
;
7945 total_size
= sizeof(struct file_handle
) + size
;
7946 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7948 unlock_user(name
, pathname
, 0);
7949 return -TARGET_EFAULT
;
7952 fh
= g_malloc0(total_size
);
7953 fh
->handle_bytes
= size
;
7955 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7956 unlock_user(name
, pathname
, 0);
7958 /* man name_to_handle_at(2):
7959 * Other than the use of the handle_bytes field, the caller should treat
7960 * the file_handle structure as an opaque data type
7963 memcpy(target_fh
, fh
, total_size
);
7964 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7965 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7967 unlock_user(target_fh
, handle
, total_size
);
7969 if (put_user_s32(mid
, mount_id
)) {
7970 return -TARGET_EFAULT
;
7978 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7979 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7982 struct file_handle
*target_fh
;
7983 struct file_handle
*fh
;
7984 unsigned int size
, total_size
;
7987 if (get_user_s32(size
, handle
)) {
7988 return -TARGET_EFAULT
;
7991 total_size
= sizeof(struct file_handle
) + size
;
7992 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7994 return -TARGET_EFAULT
;
7997 fh
= g_memdup(target_fh
, total_size
);
7998 fh
->handle_bytes
= size
;
7999 fh
->handle_type
= tswap32(target_fh
->handle_type
);
8001 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
8002 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
8006 unlock_user(target_fh
, handle
, total_size
);
8012 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
8014 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
8017 target_sigset_t
*target_mask
;
8021 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
8022 return -TARGET_EINVAL
;
8024 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
8025 return -TARGET_EFAULT
;
8028 target_to_host_sigset(&host_mask
, target_mask
);
8030 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
8032 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
8034 fd_trans_register(ret
, &target_signalfd_trans
);
8037 unlock_user_struct(target_mask
, mask
, 0);
8043 /* Map host to target signal numbers for the wait family of syscalls.
8044 Assume all other status bits are the same. */
8045 int host_to_target_waitstatus(int status
)
8047 if (WIFSIGNALED(status
)) {
8048 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
8050 if (WIFSTOPPED(status
)) {
8051 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
8057 static int open_self_cmdline(CPUArchState
*cpu_env
, int fd
)
8059 CPUState
*cpu
= env_cpu(cpu_env
);
8060 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
8063 for (i
= 0; i
< bprm
->argc
; i
++) {
8064 size_t len
= strlen(bprm
->argv
[i
]) + 1;
8066 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
8074 static void show_smaps(int fd
, unsigned long size
)
8076 unsigned long page_size_kb
= TARGET_PAGE_SIZE
>> 10;
8077 unsigned long size_kb
= size
>> 10;
8079 dprintf(fd
, "Size: %lu kB\n"
8080 "KernelPageSize: %lu kB\n"
8081 "MMUPageSize: %lu kB\n"
8085 "Shared_Clean: 0 kB\n"
8086 "Shared_Dirty: 0 kB\n"
8087 "Private_Clean: 0 kB\n"
8088 "Private_Dirty: 0 kB\n"
8089 "Referenced: 0 kB\n"
8092 "AnonHugePages: 0 kB\n"
8093 "ShmemPmdMapped: 0 kB\n"
8094 "FilePmdMapped: 0 kB\n"
8095 "Shared_Hugetlb: 0 kB\n"
8096 "Private_Hugetlb: 0 kB\n"
8100 "THPeligible: 0\n", size_kb
, page_size_kb
, page_size_kb
);
8103 static int open_self_maps_1(CPUArchState
*cpu_env
, int fd
, bool smaps
)
8105 CPUState
*cpu
= env_cpu(cpu_env
);
8106 TaskState
*ts
= cpu
->opaque
;
8107 GSList
*map_info
= read_self_maps();
8111 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
8112 MapInfo
*e
= (MapInfo
*) s
->data
;
8114 if (h2g_valid(e
->start
)) {
8115 unsigned long min
= e
->start
;
8116 unsigned long max
= e
->end
;
8117 int flags
= page_get_flags(h2g(min
));
8120 max
= h2g_valid(max
- 1) ?
8121 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
8123 if (!page_check_range(h2g(min
), max
- min
, flags
)) {
8128 if (h2g(max
) == ts
->info
->stack_limit
) {
8130 if (h2g(min
) == ts
->info
->stack_limit
) {
8137 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
8138 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
8139 h2g(min
), h2g(max
- 1) + 1,
8140 (flags
& PAGE_READ
) ? 'r' : '-',
8141 (flags
& PAGE_WRITE_ORG
) ? 'w' : '-',
8142 (flags
& PAGE_EXEC
) ? 'x' : '-',
8143 e
->is_priv
? 'p' : 's',
8144 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
8146 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
8151 show_smaps(fd
, max
- min
);
8152 dprintf(fd
, "VmFlags:%s%s%s%s%s%s%s%s\n",
8153 (flags
& PAGE_READ
) ? " rd" : "",
8154 (flags
& PAGE_WRITE_ORG
) ? " wr" : "",
8155 (flags
& PAGE_EXEC
) ? " ex" : "",
8156 e
->is_priv
? "" : " sh",
8157 (flags
& PAGE_READ
) ? " mr" : "",
8158 (flags
& PAGE_WRITE_ORG
) ? " mw" : "",
8159 (flags
& PAGE_EXEC
) ? " me" : "",
8160 e
->is_priv
? "" : " ms");
8165 free_self_maps(map_info
);
8167 #ifdef TARGET_VSYSCALL_PAGE
8169 * We only support execution from the vsyscall page.
8170 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8172 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
8173 " --xp 00000000 00:00 0",
8174 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
8175 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
8177 show_smaps(fd
, TARGET_PAGE_SIZE
);
8178 dprintf(fd
, "VmFlags: ex\n");
8185 static int open_self_maps(CPUArchState
*cpu_env
, int fd
)
8187 return open_self_maps_1(cpu_env
, fd
, false);
8190 static int open_self_smaps(CPUArchState
*cpu_env
, int fd
)
8192 return open_self_maps_1(cpu_env
, fd
, true);
8195 static int open_self_stat(CPUArchState
*cpu_env
, int fd
)
8197 CPUState
*cpu
= env_cpu(cpu_env
);
8198 TaskState
*ts
= cpu
->opaque
;
8199 g_autoptr(GString
) buf
= g_string_new(NULL
);
8202 for (i
= 0; i
< 44; i
++) {
8205 g_string_printf(buf
, FMT_pid
" ", getpid());
8206 } else if (i
== 1) {
8208 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
8209 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
8210 g_string_printf(buf
, "(%.15s) ", bin
);
8211 } else if (i
== 2) {
8213 g_string_assign(buf
, "R "); /* we are running right now */
8214 } else if (i
== 3) {
8216 g_string_printf(buf
, FMT_pid
" ", getppid());
8217 } else if (i
== 21) {
8219 g_string_printf(buf
, "%" PRIu64
" ", ts
->start_boottime
);
8220 } else if (i
== 27) {
8222 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
8224 /* for the rest, there is MasterCard */
8225 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
8228 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
8236 static int open_self_auxv(CPUArchState
*cpu_env
, int fd
)
8238 CPUState
*cpu
= env_cpu(cpu_env
);
8239 TaskState
*ts
= cpu
->opaque
;
8240 abi_ulong auxv
= ts
->info
->saved_auxv
;
8241 abi_ulong len
= ts
->info
->auxv_len
;
8245 * Auxiliary vector is stored in target process stack.
8246 * read in whole auxv vector and copy it to file
8248 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
8252 r
= write(fd
, ptr
, len
);
8259 lseek(fd
, 0, SEEK_SET
);
8260 unlock_user(ptr
, auxv
, len
);
8266 static int is_proc_myself(const char *filename
, const char *entry
)
8268 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
8269 filename
+= strlen("/proc/");
8270 if (!strncmp(filename
, "self/", strlen("self/"))) {
8271 filename
+= strlen("self/");
8272 } else if (*filename
>= '1' && *filename
<= '9') {
8274 snprintf(myself
, sizeof(myself
), "%d/", getpid());
8275 if (!strncmp(filename
, myself
, strlen(myself
))) {
8276 filename
+= strlen(myself
);
8283 if (!strcmp(filename
, entry
)) {
8290 static void excp_dump_file(FILE *logfile
, CPUArchState
*env
,
8291 const char *fmt
, int code
)
8294 CPUState
*cs
= env_cpu(env
);
8296 fprintf(logfile
, fmt
, code
);
8297 fprintf(logfile
, "Failing executable: %s\n", exec_path
);
8298 cpu_dump_state(cs
, logfile
, 0);
8299 open_self_maps(env
, fileno(logfile
));
8303 void target_exception_dump(CPUArchState
*env
, const char *fmt
, int code
)
8305 /* dump to console */
8306 excp_dump_file(stderr
, env
, fmt
, code
);
8308 /* dump to log file */
8309 if (qemu_log_separate()) {
8310 FILE *logfile
= qemu_log_trylock();
8312 excp_dump_file(logfile
, env
, fmt
, code
);
8313 qemu_log_unlock(logfile
);
8317 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8318 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) || \
8319 defined(TARGET_RISCV) || defined(TARGET_S390X)
8320 static int is_proc(const char *filename
, const char *entry
)
8322 return strcmp(filename
, entry
) == 0;
8326 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8327 static int open_net_route(CPUArchState
*cpu_env
, int fd
)
8334 fp
= fopen("/proc/net/route", "r");
8341 read
= getline(&line
, &len
, fp
);
8342 dprintf(fd
, "%s", line
);
8346 while ((read
= getline(&line
, &len
, fp
)) != -1) {
8348 uint32_t dest
, gw
, mask
;
8349 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8352 fields
= sscanf(line
,
8353 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8354 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8355 &mask
, &mtu
, &window
, &irtt
);
8359 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8360 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8361 metric
, tswap32(mask
), mtu
, window
, irtt
);
8371 #if defined(TARGET_SPARC)
8372 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8374 dprintf(fd
, "type\t\t: sun4u\n");
8379 #if defined(TARGET_HPPA)
8380 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8384 num_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
8385 for (i
= 0; i
< num_cpus
; i
++) {
8386 dprintf(fd
, "processor\t: %d\n", i
);
8387 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
8388 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
8389 dprintf(fd
, "capabilities\t: os32\n");
8390 dprintf(fd
, "model\t\t: 9000/778/B160L - "
8391 "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8397 #if defined(TARGET_RISCV)
8398 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8401 int num_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
8402 RISCVCPU
*cpu
= env_archcpu(cpu_env
);
8403 const RISCVCPUConfig
*cfg
= riscv_cpu_cfg((CPURISCVState
*) cpu_env
);
8404 char *isa_string
= riscv_isa_string(cpu
);
8408 mmu
= (cpu_env
->xl
== MXL_RV32
) ? "sv32" : "sv48";
8413 for (i
= 0; i
< num_cpus
; i
++) {
8414 dprintf(fd
, "processor\t: %d\n", i
);
8415 dprintf(fd
, "hart\t\t: %d\n", i
);
8416 dprintf(fd
, "isa\t\t: %s\n", isa_string
);
8417 dprintf(fd
, "mmu\t\t: %s\n", mmu
);
8418 dprintf(fd
, "uarch\t\t: qemu\n\n");
8426 #if defined(TARGET_S390X)
8428 * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would
8429 * show in /proc/cpuinfo.
8431 * Skip the following in order to match the missing support in op_ecag():
8432 * - show_cacheinfo().
8433 * - show_cpu_topology().
8436 * Use fixed values for certain fields:
8437 * - bogomips per cpu - from a qemu-system-s390x run.
8438 * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported.
8440 * Keep the code structure close to arch/s390/kernel/processor.c.
8443 static void show_facilities(int fd
)
8445 size_t sizeof_stfl_bytes
= 2048;
8446 g_autofree
uint8_t *stfl_bytes
= g_new0(uint8_t, sizeof_stfl_bytes
);
8449 dprintf(fd
, "facilities :");
8450 s390_get_feat_block(S390_FEAT_TYPE_STFL
, stfl_bytes
);
8451 for (bit
= 0; bit
< sizeof_stfl_bytes
* 8; bit
++) {
8452 if (test_be_bit(bit
, stfl_bytes
)) {
8453 dprintf(fd
, " %d", bit
);
8459 static int cpu_ident(unsigned long n
)
8461 return deposit32(0, CPU_ID_BITS
- CPU_PHYS_ADDR_BITS
, CPU_PHYS_ADDR_BITS
,
8465 static void show_cpu_summary(CPUArchState
*cpu_env
, int fd
)
8467 S390CPUModel
*model
= env_archcpu(cpu_env
)->model
;
8468 int num_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
8469 uint32_t elf_hwcap
= get_elf_hwcap();
8470 const char *hwcap_str
;
8473 dprintf(fd
, "vendor_id : IBM/S390\n"
8474 "# processors : %i\n"
8475 "bogomips per cpu: 13370.00\n",
8477 dprintf(fd
, "max thread id : 0\n");
8478 dprintf(fd
, "features\t: ");
8479 for (i
= 0; i
< sizeof(elf_hwcap
) * 8; i
++) {
8480 if (!(elf_hwcap
& (1 << i
))) {
8483 hwcap_str
= elf_hwcap_str(i
);
8485 dprintf(fd
, "%s ", hwcap_str
);
8489 show_facilities(fd
);
8490 for (i
= 0; i
< num_cpus
; i
++) {
8491 dprintf(fd
, "processor %d: "
8493 "identification = %06X, "
8495 i
, model
->cpu_ver
, cpu_ident(i
), model
->def
->type
);
8499 static void show_cpu_ids(CPUArchState
*cpu_env
, int fd
, unsigned long n
)
8501 S390CPUModel
*model
= env_archcpu(cpu_env
)->model
;
8503 dprintf(fd
, "version : %02X\n", model
->cpu_ver
);
8504 dprintf(fd
, "identification : %06X\n", cpu_ident(n
));
8505 dprintf(fd
, "machine : %04X\n", model
->def
->type
);
8508 static void show_cpuinfo(CPUArchState
*cpu_env
, int fd
, unsigned long n
)
8510 dprintf(fd
, "\ncpu number : %ld\n", n
);
8511 show_cpu_ids(cpu_env
, fd
, n
);
8514 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8516 int num_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
8519 show_cpu_summary(cpu_env
, fd
);
8520 for (i
= 0; i
< num_cpus
; i
++) {
8521 show_cpuinfo(cpu_env
, fd
, i
);
8527 #if defined(TARGET_M68K)
8528 static int open_hardware(CPUArchState
*cpu_env
, int fd
)
8530 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8535 int do_guest_openat(CPUArchState
*cpu_env
, int dirfd
, const char *pathname
,
8536 int flags
, mode_t mode
, bool safe
)
8539 const char *filename
;
8540 int (*fill
)(CPUArchState
*cpu_env
, int fd
);
8541 int (*cmp
)(const char *s1
, const char *s2
);
8543 const struct fake_open
*fake_open
;
8544 static const struct fake_open fakes
[] = {
8545 { "maps", open_self_maps
, is_proc_myself
},
8546 { "smaps", open_self_smaps
, is_proc_myself
},
8547 { "stat", open_self_stat
, is_proc_myself
},
8548 { "auxv", open_self_auxv
, is_proc_myself
},
8549 { "cmdline", open_self_cmdline
, is_proc_myself
},
8550 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8551 { "/proc/net/route", open_net_route
, is_proc
},
8553 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) || \
8554 defined(TARGET_RISCV) || defined(TARGET_S390X)
8555 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8557 #if defined(TARGET_M68K)
8558 { "/proc/hardware", open_hardware
, is_proc
},
8560 { NULL
, NULL
, NULL
}
8563 if (is_proc_myself(pathname
, "exe")) {
8565 return safe_openat(dirfd
, exec_path
, flags
, mode
);
8567 return openat(dirfd
, exec_path
, flags
, mode
);
8571 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8572 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8577 if (fake_open
->filename
) {
8579 char filename
[PATH_MAX
];
8582 fd
= memfd_create("qemu-open", 0);
8584 if (errno
!= ENOSYS
) {
8587 /* create temporary file to map stat to */
8588 tmpdir
= getenv("TMPDIR");
8591 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8592 fd
= mkstemp(filename
);
8599 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8605 lseek(fd
, 0, SEEK_SET
);
8611 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8613 return openat(dirfd
, path(pathname
), flags
, mode
);
8617 ssize_t
do_guest_readlink(const char *pathname
, char *buf
, size_t bufsiz
)
8621 if (!pathname
|| !buf
) {
8627 /* Short circuit this for the magic exe check. */
8632 if (is_proc_myself((const char *)pathname
, "exe")) {
8634 * Don't worry about sign mismatch as earlier mapping
8635 * logic would have thrown a bad address error.
8637 ret
= MIN(strlen(exec_path
), bufsiz
);
8638 /* We cannot NUL terminate the string. */
8639 memcpy(buf
, exec_path
, ret
);
8641 ret
= readlink(path(pathname
), buf
, bufsiz
);
8647 static int do_execv(CPUArchState
*cpu_env
, int dirfd
,
8648 abi_long pathname
, abi_long guest_argp
,
8649 abi_long guest_envp
, int flags
, bool is_execveat
)
8652 char **argp
, **envp
;
8661 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8662 if (get_user_ual(addr
, gp
)) {
8663 return -TARGET_EFAULT
;
8671 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8672 if (get_user_ual(addr
, gp
)) {
8673 return -TARGET_EFAULT
;
8681 argp
= g_new0(char *, argc
+ 1);
8682 envp
= g_new0(char *, envc
+ 1);
8684 for (gp
= guest_argp
, q
= argp
; gp
; gp
+= sizeof(abi_ulong
), q
++) {
8685 if (get_user_ual(addr
, gp
)) {
8691 *q
= lock_user_string(addr
);
8698 for (gp
= guest_envp
, q
= envp
; gp
; gp
+= sizeof(abi_ulong
), q
++) {
8699 if (get_user_ual(addr
, gp
)) {
8705 *q
= lock_user_string(addr
);
8713 * Although execve() is not an interruptible syscall it is
8714 * a special case where we must use the safe_syscall wrapper:
8715 * if we allow a signal to happen before we make the host
8716 * syscall then we will 'lose' it, because at the point of
8717 * execve the process leaves QEMU's control. So we use the
8718 * safe syscall wrapper to ensure that we either take the
8719 * signal as a guest signal, or else it does not happen
8720 * before the execve completes and makes it the other
8721 * program's problem.
8723 p
= lock_user_string(pathname
);
8728 const char *exe
= p
;
8729 if (is_proc_myself(p
, "exe")) {
8733 ? safe_execveat(dirfd
, exe
, argp
, envp
, flags
)
8734 : safe_execve(exe
, argp
, envp
);
8735 ret
= get_errno(ret
);
8737 unlock_user(p
, pathname
, 0);
8742 ret
= -TARGET_EFAULT
;
8745 for (gp
= guest_argp
, q
= argp
; *q
; gp
+= sizeof(abi_ulong
), q
++) {
8746 if (get_user_ual(addr
, gp
) || !addr
) {
8749 unlock_user(*q
, addr
, 0);
8751 for (gp
= guest_envp
, q
= envp
; *q
; gp
+= sizeof(abi_ulong
), q
++) {
8752 if (get_user_ual(addr
, gp
) || !addr
) {
8755 unlock_user(*q
, addr
, 0);
8763 #define TIMER_MAGIC 0x0caf0000
8764 #define TIMER_MAGIC_MASK 0xffff0000
8766 /* Convert QEMU provided timer ID back to internal 16bit index format */
8767 static target_timer_t
get_timer_id(abi_long arg
)
8769 target_timer_t timerid
= arg
;
8771 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8772 return -TARGET_EINVAL
;
8777 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8778 return -TARGET_EINVAL
;
8784 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8786 abi_ulong target_addr
,
8789 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8790 unsigned host_bits
= sizeof(*host_mask
) * 8;
8791 abi_ulong
*target_mask
;
8794 assert(host_size
>= target_size
);
8796 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8798 return -TARGET_EFAULT
;
8800 memset(host_mask
, 0, host_size
);
8802 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8803 unsigned bit
= i
* target_bits
;
8806 __get_user(val
, &target_mask
[i
]);
8807 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8808 if (val
& (1UL << j
)) {
8809 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8814 unlock_user(target_mask
, target_addr
, 0);
8818 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8820 abi_ulong target_addr
,
8823 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8824 unsigned host_bits
= sizeof(*host_mask
) * 8;
8825 abi_ulong
*target_mask
;
8828 assert(host_size
>= target_size
);
8830 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8832 return -TARGET_EFAULT
;
8835 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8836 unsigned bit
= i
* target_bits
;
8839 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8840 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8844 __put_user(val
, &target_mask
[i
]);
8847 unlock_user(target_mask
, target_addr
, target_size
);
8851 #ifdef TARGET_NR_getdents
8852 static int do_getdents(abi_long dirfd
, abi_long arg2
, abi_long count
)
8854 g_autofree
void *hdirp
= NULL
;
8856 int hlen
, hoff
, toff
;
8857 int hreclen
, treclen
;
8858 off64_t prev_diroff
= 0;
8860 hdirp
= g_try_malloc(count
);
8862 return -TARGET_ENOMEM
;
8865 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8866 hlen
= sys_getdents(dirfd
, hdirp
, count
);
8868 hlen
= sys_getdents64(dirfd
, hdirp
, count
);
8871 hlen
= get_errno(hlen
);
8872 if (is_error(hlen
)) {
8876 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8878 return -TARGET_EFAULT
;
8881 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8882 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8883 struct linux_dirent
*hde
= hdirp
+ hoff
;
8885 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8887 struct target_dirent
*tde
= tdirp
+ toff
;
8891 namelen
= strlen(hde
->d_name
);
8892 hreclen
= hde
->d_reclen
;
8893 treclen
= offsetof(struct target_dirent
, d_name
) + namelen
+ 2;
8894 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent
));
8896 if (toff
+ treclen
> count
) {
8898 * If the host struct is smaller than the target struct, or
8899 * requires less alignment and thus packs into less space,
8900 * then the host can return more entries than we can pass
8904 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8908 * Return what we have, resetting the file pointer to the
8909 * location of the first record not returned.
8911 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8915 prev_diroff
= hde
->d_off
;
8916 tde
->d_ino
= tswapal(hde
->d_ino
);
8917 tde
->d_off
= tswapal(hde
->d_off
);
8918 tde
->d_reclen
= tswap16(treclen
);
8919 memcpy(tde
->d_name
, hde
->d_name
, namelen
+ 1);
8922 * The getdents type is in what was formerly a padding byte at the
8923 * end of the structure.
8925 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8926 type
= *((uint8_t *)hde
+ hreclen
- 1);
8930 *((uint8_t *)tde
+ treclen
- 1) = type
;
8933 unlock_user(tdirp
, arg2
, toff
);
8936 #endif /* TARGET_NR_getdents */
8938 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8939 static int do_getdents64(abi_long dirfd
, abi_long arg2
, abi_long count
)
8941 g_autofree
void *hdirp
= NULL
;
8943 int hlen
, hoff
, toff
;
8944 int hreclen
, treclen
;
8945 off64_t prev_diroff
= 0;
8947 hdirp
= g_try_malloc(count
);
8949 return -TARGET_ENOMEM
;
8952 hlen
= get_errno(sys_getdents64(dirfd
, hdirp
, count
));
8953 if (is_error(hlen
)) {
8957 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8959 return -TARGET_EFAULT
;
8962 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8963 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8964 struct target_dirent64
*tde
= tdirp
+ toff
;
8967 namelen
= strlen(hde
->d_name
) + 1;
8968 hreclen
= hde
->d_reclen
;
8969 treclen
= offsetof(struct target_dirent64
, d_name
) + namelen
;
8970 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent64
));
8972 if (toff
+ treclen
> count
) {
8974 * If the host struct is smaller than the target struct, or
8975 * requires less alignment and thus packs into less space,
8976 * then the host can return more entries than we can pass
8980 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8984 * Return what we have, resetting the file pointer to the
8985 * location of the first record not returned.
8987 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8991 prev_diroff
= hde
->d_off
;
8992 tde
->d_ino
= tswap64(hde
->d_ino
);
8993 tde
->d_off
= tswap64(hde
->d_off
);
8994 tde
->d_reclen
= tswap16(treclen
);
8995 tde
->d_type
= hde
->d_type
;
8996 memcpy(tde
->d_name
, hde
->d_name
, namelen
);
8999 unlock_user(tdirp
, arg2
, toff
);
9002 #endif /* TARGET_NR_getdents64 */
9004 #if defined(TARGET_NR_riscv_hwprobe)
9006 #define RISCV_HWPROBE_KEY_MVENDORID 0
9007 #define RISCV_HWPROBE_KEY_MARCHID 1
9008 #define RISCV_HWPROBE_KEY_MIMPID 2
9010 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
9011 #define RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
9013 #define RISCV_HWPROBE_KEY_IMA_EXT_0 4
9014 #define RISCV_HWPROBE_IMA_FD (1 << 0)
9015 #define RISCV_HWPROBE_IMA_C (1 << 1)
9017 #define RISCV_HWPROBE_KEY_CPUPERF_0 5
9018 #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
9019 #define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
9020 #define RISCV_HWPROBE_MISALIGNED_SLOW (2 << 0)
9021 #define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0)
9022 #define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
9023 #define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0)
9025 struct riscv_hwprobe
{
9030 static void risc_hwprobe_fill_pairs(CPURISCVState
*env
,
9031 struct riscv_hwprobe
*pair
,
9034 const RISCVCPUConfig
*cfg
= riscv_cpu_cfg(env
);
9036 for (; pair_count
> 0; pair_count
--, pair
++) {
9039 __put_user(0, &pair
->value
);
9040 __get_user(key
, &pair
->key
);
9042 case RISCV_HWPROBE_KEY_MVENDORID
:
9043 __put_user(cfg
->mvendorid
, &pair
->value
);
9045 case RISCV_HWPROBE_KEY_MARCHID
:
9046 __put_user(cfg
->marchid
, &pair
->value
);
9048 case RISCV_HWPROBE_KEY_MIMPID
:
9049 __put_user(cfg
->mimpid
, &pair
->value
);
9051 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR
:
9052 value
= riscv_has_ext(env
, RVI
) &&
9053 riscv_has_ext(env
, RVM
) &&
9054 riscv_has_ext(env
, RVA
) ?
9055 RISCV_HWPROBE_BASE_BEHAVIOR_IMA
: 0;
9056 __put_user(value
, &pair
->value
);
9058 case RISCV_HWPROBE_KEY_IMA_EXT_0
:
9059 value
= riscv_has_ext(env
, RVF
) &&
9060 riscv_has_ext(env
, RVD
) ?
9061 RISCV_HWPROBE_IMA_FD
: 0;
9062 value
|= riscv_has_ext(env
, RVC
) ?
9063 RISCV_HWPROBE_IMA_C
: pair
->value
;
9064 __put_user(value
, &pair
->value
);
9066 case RISCV_HWPROBE_KEY_CPUPERF_0
:
9067 __put_user(RISCV_HWPROBE_MISALIGNED_FAST
, &pair
->value
);
9070 __put_user(-1, &pair
->key
);
9076 static int cpu_set_valid(abi_long arg3
, abi_long arg4
)
9079 size_t host_mask_size
, target_mask_size
;
9080 unsigned long *host_mask
;
9083 * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9084 * arg3 contains the cpu count.
9086 tmp
= (8 * sizeof(abi_ulong
));
9087 target_mask_size
= ((arg3
+ tmp
- 1) / tmp
) * sizeof(abi_ulong
);
9088 host_mask_size
= (target_mask_size
+ (sizeof(*host_mask
) - 1)) &
9089 ~(sizeof(*host_mask
) - 1);
9091 host_mask
= alloca(host_mask_size
);
9093 ret
= target_to_host_cpu_mask(host_mask
, host_mask_size
,
9094 arg4
, target_mask_size
);
9099 for (i
= 0 ; i
< host_mask_size
/ sizeof(*host_mask
); i
++) {
9100 if (host_mask
[i
] != 0) {
9104 return -TARGET_EINVAL
;
9107 static abi_long
do_riscv_hwprobe(CPUArchState
*cpu_env
, abi_long arg1
,
9108 abi_long arg2
, abi_long arg3
,
9109 abi_long arg4
, abi_long arg5
)
9112 struct riscv_hwprobe
*host_pairs
;
9114 /* flags must be 0 */
9116 return -TARGET_EINVAL
;
9121 ret
= cpu_set_valid(arg3
, arg4
);
9125 } else if (arg4
!= 0) {
9126 return -TARGET_EINVAL
;
9134 host_pairs
= lock_user(VERIFY_WRITE
, arg1
,
9135 sizeof(*host_pairs
) * (size_t)arg2
, 0);
9136 if (host_pairs
== NULL
) {
9137 return -TARGET_EFAULT
;
9139 risc_hwprobe_fill_pairs(cpu_env
, host_pairs
, arg2
);
9140 unlock_user(host_pairs
, arg1
, sizeof(*host_pairs
) * (size_t)arg2
);
9143 #endif /* TARGET_NR_riscv_hwprobe */
9145 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9146 _syscall2(int, pivot_root
, const char *, new_root
, const char *, put_old
)
9149 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9150 #define __NR_sys_open_tree __NR_open_tree
9151 _syscall3(int, sys_open_tree
, int, __dfd
, const char *, __filename
,
9152 unsigned int, __flags
)
9155 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9156 #define __NR_sys_move_mount __NR_move_mount
9157 _syscall5(int, sys_move_mount
, int, __from_dfd
, const char *, __from_pathname
,
9158 int, __to_dfd
, const char *, __to_pathname
, unsigned int, flag
)
9161 /* This is an internal helper for do_syscall so that it is easier
9162 * to have a single return point, so that actions, such as logging
9163 * of syscall results, can be performed.
9164 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9166 static abi_long
do_syscall1(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
9167 abi_long arg2
, abi_long arg3
, abi_long arg4
,
9168 abi_long arg5
, abi_long arg6
, abi_long arg7
,
9171 CPUState
*cpu
= env_cpu(cpu_env
);
9173 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9174 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9175 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9176 || defined(TARGET_NR_statx)
9179 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9180 || defined(TARGET_NR_fstatfs)
9186 case TARGET_NR_exit
:
9187 /* In old applications this may be used to implement _exit(2).
9188 However in threaded applications it is used for thread termination,
9189 and _exit_group is used for application termination.
9190 Do thread termination if we have more then one thread. */
9192 if (block_signals()) {
9193 return -QEMU_ERESTARTSYS
;
9196 pthread_mutex_lock(&clone_lock
);
9198 if (CPU_NEXT(first_cpu
)) {
9199 TaskState
*ts
= cpu
->opaque
;
9201 if (ts
->child_tidptr
) {
9202 put_user_u32(0, ts
->child_tidptr
);
9203 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
9204 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
9207 object_unparent(OBJECT(cpu
));
9208 object_unref(OBJECT(cpu
));
9210 * At this point the CPU should be unrealized and removed
9211 * from cpu lists. We can clean-up the rest of the thread
9212 * data without the lock held.
9215 pthread_mutex_unlock(&clone_lock
);
9219 rcu_unregister_thread();
9223 pthread_mutex_unlock(&clone_lock
);
9224 preexit_cleanup(cpu_env
, arg1
);
9226 return 0; /* avoid warning */
9227 case TARGET_NR_read
:
9228 if (arg2
== 0 && arg3
== 0) {
9229 return get_errno(safe_read(arg1
, 0, 0));
9231 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
9232 return -TARGET_EFAULT
;
9233 ret
= get_errno(safe_read(arg1
, p
, arg3
));
9235 fd_trans_host_to_target_data(arg1
)) {
9236 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
9238 unlock_user(p
, arg2
, ret
);
9241 case TARGET_NR_write
:
9242 if (arg2
== 0 && arg3
== 0) {
9243 return get_errno(safe_write(arg1
, 0, 0));
9245 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
9246 return -TARGET_EFAULT
;
9247 if (fd_trans_target_to_host_data(arg1
)) {
9248 void *copy
= g_malloc(arg3
);
9249 memcpy(copy
, p
, arg3
);
9250 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
9252 ret
= get_errno(safe_write(arg1
, copy
, ret
));
9256 ret
= get_errno(safe_write(arg1
, p
, arg3
));
9258 unlock_user(p
, arg2
, 0);
9261 #ifdef TARGET_NR_open
9262 case TARGET_NR_open
:
9263 if (!(p
= lock_user_string(arg1
)))
9264 return -TARGET_EFAULT
;
9265 ret
= get_errno(do_guest_openat(cpu_env
, AT_FDCWD
, p
,
9266 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
9268 fd_trans_unregister(ret
);
9269 unlock_user(p
, arg1
, 0);
9272 case TARGET_NR_openat
:
9273 if (!(p
= lock_user_string(arg2
)))
9274 return -TARGET_EFAULT
;
9275 ret
= get_errno(do_guest_openat(cpu_env
, arg1
, p
,
9276 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
9278 fd_trans_unregister(ret
);
9279 unlock_user(p
, arg2
, 0);
9281 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9282 case TARGET_NR_name_to_handle_at
:
9283 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
9286 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9287 case TARGET_NR_open_by_handle_at
:
9288 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
9289 fd_trans_unregister(ret
);
9292 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9293 case TARGET_NR_pidfd_open
:
9294 return get_errno(pidfd_open(arg1
, arg2
));
9296 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9297 case TARGET_NR_pidfd_send_signal
:
9299 siginfo_t uinfo
, *puinfo
;
9302 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9304 return -TARGET_EFAULT
;
9306 target_to_host_siginfo(&uinfo
, p
);
9307 unlock_user(p
, arg3
, 0);
9312 ret
= get_errno(pidfd_send_signal(arg1
, target_to_host_signal(arg2
),
9317 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9318 case TARGET_NR_pidfd_getfd
:
9319 return get_errno(pidfd_getfd(arg1
, arg2
, arg3
));
9321 case TARGET_NR_close
:
9322 fd_trans_unregister(arg1
);
9323 return get_errno(close(arg1
));
9324 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9325 case TARGET_NR_close_range
:
9326 ret
= get_errno(sys_close_range(arg1
, arg2
, arg3
));
9327 if (ret
== 0 && !(arg3
& CLOSE_RANGE_CLOEXEC
)) {
9329 maxfd
= MIN(arg2
, target_fd_max
);
9330 for (fd
= arg1
; fd
< maxfd
; fd
++) {
9331 fd_trans_unregister(fd
);
9338 return do_brk(arg1
);
9339 #ifdef TARGET_NR_fork
9340 case TARGET_NR_fork
:
9341 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
9343 #ifdef TARGET_NR_waitpid
9344 case TARGET_NR_waitpid
:
9347 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
9348 if (!is_error(ret
) && arg2
&& ret
9349 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
9350 return -TARGET_EFAULT
;
9354 #ifdef TARGET_NR_waitid
9355 case TARGET_NR_waitid
:
9359 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
9360 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
9361 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
9362 return -TARGET_EFAULT
;
9363 host_to_target_siginfo(p
, &info
);
9364 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
9369 #ifdef TARGET_NR_creat /* not on alpha */
9370 case TARGET_NR_creat
:
9371 if (!(p
= lock_user_string(arg1
)))
9372 return -TARGET_EFAULT
;
9373 ret
= get_errno(creat(p
, arg2
));
9374 fd_trans_unregister(ret
);
9375 unlock_user(p
, arg1
, 0);
9378 #ifdef TARGET_NR_link
9379 case TARGET_NR_link
:
9382 p
= lock_user_string(arg1
);
9383 p2
= lock_user_string(arg2
);
9385 ret
= -TARGET_EFAULT
;
9387 ret
= get_errno(link(p
, p2
));
9388 unlock_user(p2
, arg2
, 0);
9389 unlock_user(p
, arg1
, 0);
9393 #if defined(TARGET_NR_linkat)
9394 case TARGET_NR_linkat
:
9398 return -TARGET_EFAULT
;
9399 p
= lock_user_string(arg2
);
9400 p2
= lock_user_string(arg4
);
9402 ret
= -TARGET_EFAULT
;
9404 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
9405 unlock_user(p
, arg2
, 0);
9406 unlock_user(p2
, arg4
, 0);
9410 #ifdef TARGET_NR_unlink
9411 case TARGET_NR_unlink
:
9412 if (!(p
= lock_user_string(arg1
)))
9413 return -TARGET_EFAULT
;
9414 ret
= get_errno(unlink(p
));
9415 unlock_user(p
, arg1
, 0);
9418 #if defined(TARGET_NR_unlinkat)
9419 case TARGET_NR_unlinkat
:
9420 if (!(p
= lock_user_string(arg2
)))
9421 return -TARGET_EFAULT
;
9422 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
9423 unlock_user(p
, arg2
, 0);
9426 case TARGET_NR_execveat
:
9427 return do_execv(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, true);
9428 case TARGET_NR_execve
:
9429 return do_execv(cpu_env
, AT_FDCWD
, arg1
, arg2
, arg3
, 0, false);
9430 case TARGET_NR_chdir
:
9431 if (!(p
= lock_user_string(arg1
)))
9432 return -TARGET_EFAULT
;
9433 ret
= get_errno(chdir(p
));
9434 unlock_user(p
, arg1
, 0);
9436 #ifdef TARGET_NR_time
9437 case TARGET_NR_time
:
9440 ret
= get_errno(time(&host_time
));
9443 && put_user_sal(host_time
, arg1
))
9444 return -TARGET_EFAULT
;
9448 #ifdef TARGET_NR_mknod
9449 case TARGET_NR_mknod
:
9450 if (!(p
= lock_user_string(arg1
)))
9451 return -TARGET_EFAULT
;
9452 ret
= get_errno(mknod(p
, arg2
, arg3
));
9453 unlock_user(p
, arg1
, 0);
9456 #if defined(TARGET_NR_mknodat)
9457 case TARGET_NR_mknodat
:
9458 if (!(p
= lock_user_string(arg2
)))
9459 return -TARGET_EFAULT
;
9460 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
9461 unlock_user(p
, arg2
, 0);
9464 #ifdef TARGET_NR_chmod
9465 case TARGET_NR_chmod
:
9466 if (!(p
= lock_user_string(arg1
)))
9467 return -TARGET_EFAULT
;
9468 ret
= get_errno(chmod(p
, arg2
));
9469 unlock_user(p
, arg1
, 0);
9472 #ifdef TARGET_NR_lseek
9473 case TARGET_NR_lseek
:
9474 return get_errno(lseek(arg1
, arg2
, arg3
));
9476 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9477 /* Alpha specific */
9478 case TARGET_NR_getxpid
:
9479 cpu_env
->ir
[IR_A4
] = getppid();
9480 return get_errno(getpid());
9482 #ifdef TARGET_NR_getpid
9483 case TARGET_NR_getpid
:
9484 return get_errno(getpid());
9486 case TARGET_NR_mount
:
9488 /* need to look at the data field */
9492 p
= lock_user_string(arg1
);
9494 return -TARGET_EFAULT
;
9500 p2
= lock_user_string(arg2
);
9503 unlock_user(p
, arg1
, 0);
9505 return -TARGET_EFAULT
;
9509 p3
= lock_user_string(arg3
);
9512 unlock_user(p
, arg1
, 0);
9514 unlock_user(p2
, arg2
, 0);
9515 return -TARGET_EFAULT
;
9521 /* FIXME - arg5 should be locked, but it isn't clear how to
9522 * do that since it's not guaranteed to be a NULL-terminated
9526 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
9528 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
9530 ret
= get_errno(ret
);
9533 unlock_user(p
, arg1
, 0);
9535 unlock_user(p2
, arg2
, 0);
9537 unlock_user(p3
, arg3
, 0);
9541 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9542 #if defined(TARGET_NR_umount)
9543 case TARGET_NR_umount
:
9545 #if defined(TARGET_NR_oldumount)
9546 case TARGET_NR_oldumount
:
9548 if (!(p
= lock_user_string(arg1
)))
9549 return -TARGET_EFAULT
;
9550 ret
= get_errno(umount(p
));
9551 unlock_user(p
, arg1
, 0);
9554 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9555 case TARGET_NR_move_mount
:
9559 if (!arg2
|| !arg4
) {
9560 return -TARGET_EFAULT
;
9563 p2
= lock_user_string(arg2
);
9565 return -TARGET_EFAULT
;
9568 p4
= lock_user_string(arg4
);
9570 unlock_user(p2
, arg2
, 0);
9571 return -TARGET_EFAULT
;
9573 ret
= get_errno(sys_move_mount(arg1
, p2
, arg3
, p4
, arg5
));
9575 unlock_user(p2
, arg2
, 0);
9576 unlock_user(p4
, arg4
, 0);
9581 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9582 case TARGET_NR_open_tree
:
9588 return -TARGET_EFAULT
;
9591 p2
= lock_user_string(arg2
);
9593 return -TARGET_EFAULT
;
9596 host_flags
= arg3
& ~TARGET_O_CLOEXEC
;
9597 if (arg3
& TARGET_O_CLOEXEC
) {
9598 host_flags
|= O_CLOEXEC
;
9601 ret
= get_errno(sys_open_tree(arg1
, p2
, host_flags
));
9603 unlock_user(p2
, arg2
, 0);
9608 #ifdef TARGET_NR_stime /* not on alpha */
9609 case TARGET_NR_stime
:
9613 if (get_user_sal(ts
.tv_sec
, arg1
)) {
9614 return -TARGET_EFAULT
;
9616 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
9619 #ifdef TARGET_NR_alarm /* not on alpha */
9620 case TARGET_NR_alarm
:
9623 #ifdef TARGET_NR_pause /* not on alpha */
9624 case TARGET_NR_pause
:
9625 if (!block_signals()) {
9626 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
9628 return -TARGET_EINTR
;
9630 #ifdef TARGET_NR_utime
9631 case TARGET_NR_utime
:
9633 struct utimbuf tbuf
, *host_tbuf
;
9634 struct target_utimbuf
*target_tbuf
;
9636 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
9637 return -TARGET_EFAULT
;
9638 tbuf
.actime
= tswapal(target_tbuf
->actime
);
9639 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
9640 unlock_user_struct(target_tbuf
, arg2
, 0);
9645 if (!(p
= lock_user_string(arg1
)))
9646 return -TARGET_EFAULT
;
9647 ret
= get_errno(utime(p
, host_tbuf
));
9648 unlock_user(p
, arg1
, 0);
9652 #ifdef TARGET_NR_utimes
9653 case TARGET_NR_utimes
:
9655 struct timeval
*tvp
, tv
[2];
9657 if (copy_from_user_timeval(&tv
[0], arg2
)
9658 || copy_from_user_timeval(&tv
[1],
9659 arg2
+ sizeof(struct target_timeval
)))
9660 return -TARGET_EFAULT
;
9665 if (!(p
= lock_user_string(arg1
)))
9666 return -TARGET_EFAULT
;
9667 ret
= get_errno(utimes(p
, tvp
));
9668 unlock_user(p
, arg1
, 0);
9672 #if defined(TARGET_NR_futimesat)
9673 case TARGET_NR_futimesat
:
9675 struct timeval
*tvp
, tv
[2];
9677 if (copy_from_user_timeval(&tv
[0], arg3
)
9678 || copy_from_user_timeval(&tv
[1],
9679 arg3
+ sizeof(struct target_timeval
)))
9680 return -TARGET_EFAULT
;
9685 if (!(p
= lock_user_string(arg2
))) {
9686 return -TARGET_EFAULT
;
9688 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
9689 unlock_user(p
, arg2
, 0);
9693 #ifdef TARGET_NR_access
9694 case TARGET_NR_access
:
9695 if (!(p
= lock_user_string(arg1
))) {
9696 return -TARGET_EFAULT
;
9698 ret
= get_errno(access(path(p
), arg2
));
9699 unlock_user(p
, arg1
, 0);
9702 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9703 case TARGET_NR_faccessat
:
9704 if (!(p
= lock_user_string(arg2
))) {
9705 return -TARGET_EFAULT
;
9707 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
9708 unlock_user(p
, arg2
, 0);
9711 #if defined(TARGET_NR_faccessat2)
9712 case TARGET_NR_faccessat2
:
9713 if (!(p
= lock_user_string(arg2
))) {
9714 return -TARGET_EFAULT
;
9716 ret
= get_errno(faccessat(arg1
, p
, arg3
, arg4
));
9717 unlock_user(p
, arg2
, 0);
9720 #ifdef TARGET_NR_nice /* not on alpha */
9721 case TARGET_NR_nice
:
9722 return get_errno(nice(arg1
));
9724 case TARGET_NR_sync
:
9727 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9728 case TARGET_NR_syncfs
:
9729 return get_errno(syncfs(arg1
));
9731 case TARGET_NR_kill
:
9732 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
9733 #ifdef TARGET_NR_rename
9734 case TARGET_NR_rename
:
9737 p
= lock_user_string(arg1
);
9738 p2
= lock_user_string(arg2
);
9740 ret
= -TARGET_EFAULT
;
9742 ret
= get_errno(rename(p
, p2
));
9743 unlock_user(p2
, arg2
, 0);
9744 unlock_user(p
, arg1
, 0);
9748 #if defined(TARGET_NR_renameat)
9749 case TARGET_NR_renameat
:
9752 p
= lock_user_string(arg2
);
9753 p2
= lock_user_string(arg4
);
9755 ret
= -TARGET_EFAULT
;
9757 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
9758 unlock_user(p2
, arg4
, 0);
9759 unlock_user(p
, arg2
, 0);
9763 #if defined(TARGET_NR_renameat2)
9764 case TARGET_NR_renameat2
:
9767 p
= lock_user_string(arg2
);
9768 p2
= lock_user_string(arg4
);
9770 ret
= -TARGET_EFAULT
;
9772 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
9774 unlock_user(p2
, arg4
, 0);
9775 unlock_user(p
, arg2
, 0);
9779 #ifdef TARGET_NR_mkdir
9780 case TARGET_NR_mkdir
:
9781 if (!(p
= lock_user_string(arg1
)))
9782 return -TARGET_EFAULT
;
9783 ret
= get_errno(mkdir(p
, arg2
));
9784 unlock_user(p
, arg1
, 0);
9787 #if defined(TARGET_NR_mkdirat)
9788 case TARGET_NR_mkdirat
:
9789 if (!(p
= lock_user_string(arg2
)))
9790 return -TARGET_EFAULT
;
9791 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
9792 unlock_user(p
, arg2
, 0);
9795 #ifdef TARGET_NR_rmdir
9796 case TARGET_NR_rmdir
:
9797 if (!(p
= lock_user_string(arg1
)))
9798 return -TARGET_EFAULT
;
9799 ret
= get_errno(rmdir(p
));
9800 unlock_user(p
, arg1
, 0);
9804 ret
= get_errno(dup(arg1
));
9806 fd_trans_dup(arg1
, ret
);
9809 #ifdef TARGET_NR_pipe
9810 case TARGET_NR_pipe
:
9811 return do_pipe(cpu_env
, arg1
, 0, 0);
9813 #ifdef TARGET_NR_pipe2
9814 case TARGET_NR_pipe2
:
9815 return do_pipe(cpu_env
, arg1
,
9816 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
9818 case TARGET_NR_times
:
9820 struct target_tms
*tmsp
;
9822 ret
= get_errno(times(&tms
));
9824 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
9826 return -TARGET_EFAULT
;
9827 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
9828 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
9829 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
9830 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
9833 ret
= host_to_target_clock_t(ret
);
9836 case TARGET_NR_acct
:
9838 ret
= get_errno(acct(NULL
));
9840 if (!(p
= lock_user_string(arg1
))) {
9841 return -TARGET_EFAULT
;
9843 ret
= get_errno(acct(path(p
)));
9844 unlock_user(p
, arg1
, 0);
9847 #ifdef TARGET_NR_umount2
9848 case TARGET_NR_umount2
:
9849 if (!(p
= lock_user_string(arg1
)))
9850 return -TARGET_EFAULT
;
9851 ret
= get_errno(umount2(p
, arg2
));
9852 unlock_user(p
, arg1
, 0);
9855 case TARGET_NR_ioctl
:
9856 return do_ioctl(arg1
, arg2
, arg3
);
9857 #ifdef TARGET_NR_fcntl
9858 case TARGET_NR_fcntl
:
9859 return do_fcntl(arg1
, arg2
, arg3
);
9861 case TARGET_NR_setpgid
:
9862 return get_errno(setpgid(arg1
, arg2
));
9863 case TARGET_NR_umask
:
9864 return get_errno(umask(arg1
));
9865 case TARGET_NR_chroot
:
9866 if (!(p
= lock_user_string(arg1
)))
9867 return -TARGET_EFAULT
;
9868 ret
= get_errno(chroot(p
));
9869 unlock_user(p
, arg1
, 0);
9871 #ifdef TARGET_NR_dup2
9872 case TARGET_NR_dup2
:
9873 ret
= get_errno(dup2(arg1
, arg2
));
9875 fd_trans_dup(arg1
, arg2
);
9879 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9880 case TARGET_NR_dup3
:
9884 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
9887 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
9888 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
9890 fd_trans_dup(arg1
, arg2
);
9895 #ifdef TARGET_NR_getppid /* not on alpha */
9896 case TARGET_NR_getppid
:
9897 return get_errno(getppid());
9899 #ifdef TARGET_NR_getpgrp
9900 case TARGET_NR_getpgrp
:
9901 return get_errno(getpgrp());
9903 case TARGET_NR_setsid
:
9904 return get_errno(setsid());
9905 #ifdef TARGET_NR_sigaction
9906 case TARGET_NR_sigaction
:
9908 #if defined(TARGET_MIPS)
9909 struct target_sigaction act
, oact
, *pact
, *old_act
;
9912 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9913 return -TARGET_EFAULT
;
9914 act
._sa_handler
= old_act
->_sa_handler
;
9915 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
9916 act
.sa_flags
= old_act
->sa_flags
;
9917 unlock_user_struct(old_act
, arg2
, 0);
9923 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9925 if (!is_error(ret
) && arg3
) {
9926 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9927 return -TARGET_EFAULT
;
9928 old_act
->_sa_handler
= oact
._sa_handler
;
9929 old_act
->sa_flags
= oact
.sa_flags
;
9930 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
9931 old_act
->sa_mask
.sig
[1] = 0;
9932 old_act
->sa_mask
.sig
[2] = 0;
9933 old_act
->sa_mask
.sig
[3] = 0;
9934 unlock_user_struct(old_act
, arg3
, 1);
9937 struct target_old_sigaction
*old_act
;
9938 struct target_sigaction act
, oact
, *pact
;
9940 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9941 return -TARGET_EFAULT
;
9942 act
._sa_handler
= old_act
->_sa_handler
;
9943 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9944 act
.sa_flags
= old_act
->sa_flags
;
9945 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9946 act
.sa_restorer
= old_act
->sa_restorer
;
9948 unlock_user_struct(old_act
, arg2
, 0);
9953 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9954 if (!is_error(ret
) && arg3
) {
9955 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9956 return -TARGET_EFAULT
;
9957 old_act
->_sa_handler
= oact
._sa_handler
;
9958 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9959 old_act
->sa_flags
= oact
.sa_flags
;
9960 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9961 old_act
->sa_restorer
= oact
.sa_restorer
;
9963 unlock_user_struct(old_act
, arg3
, 1);
9969 case TARGET_NR_rt_sigaction
:
9972 * For Alpha and SPARC this is a 5 argument syscall, with
9973 * a 'restorer' parameter which must be copied into the
9974 * sa_restorer field of the sigaction struct.
9975 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9976 * and arg5 is the sigsetsize.
9978 #if defined(TARGET_ALPHA)
9979 target_ulong sigsetsize
= arg4
;
9980 target_ulong restorer
= arg5
;
9981 #elif defined(TARGET_SPARC)
9982 target_ulong restorer
= arg4
;
9983 target_ulong sigsetsize
= arg5
;
9985 target_ulong sigsetsize
= arg4
;
9986 target_ulong restorer
= 0;
9988 struct target_sigaction
*act
= NULL
;
9989 struct target_sigaction
*oact
= NULL
;
9991 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9992 return -TARGET_EINVAL
;
9994 if (arg2
&& !lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9995 return -TARGET_EFAULT
;
9997 if (arg3
&& !lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9998 ret
= -TARGET_EFAULT
;
10000 ret
= get_errno(do_sigaction(arg1
, act
, oact
, restorer
));
10002 unlock_user_struct(oact
, arg3
, 1);
10006 unlock_user_struct(act
, arg2
, 0);
10010 #ifdef TARGET_NR_sgetmask /* not on alpha */
10011 case TARGET_NR_sgetmask
:
10014 abi_ulong target_set
;
10015 ret
= do_sigprocmask(0, NULL
, &cur_set
);
10017 host_to_target_old_sigset(&target_set
, &cur_set
);
10023 #ifdef TARGET_NR_ssetmask /* not on alpha */
10024 case TARGET_NR_ssetmask
:
10026 sigset_t set
, oset
;
10027 abi_ulong target_set
= arg1
;
10028 target_to_host_old_sigset(&set
, &target_set
);
10029 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
10031 host_to_target_old_sigset(&target_set
, &oset
);
10037 #ifdef TARGET_NR_sigprocmask
10038 case TARGET_NR_sigprocmask
:
10040 #if defined(TARGET_ALPHA)
10041 sigset_t set
, oldset
;
10046 case TARGET_SIG_BLOCK
:
10049 case TARGET_SIG_UNBLOCK
:
10052 case TARGET_SIG_SETMASK
:
10056 return -TARGET_EINVAL
;
10059 target_to_host_old_sigset(&set
, &mask
);
10061 ret
= do_sigprocmask(how
, &set
, &oldset
);
10062 if (!is_error(ret
)) {
10063 host_to_target_old_sigset(&mask
, &oldset
);
10065 cpu_env
->ir
[IR_V0
] = 0; /* force no error */
10068 sigset_t set
, oldset
, *set_ptr
;
10072 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
10074 return -TARGET_EFAULT
;
10076 target_to_host_old_sigset(&set
, p
);
10077 unlock_user(p
, arg2
, 0);
10080 case TARGET_SIG_BLOCK
:
10083 case TARGET_SIG_UNBLOCK
:
10086 case TARGET_SIG_SETMASK
:
10090 return -TARGET_EINVAL
;
10096 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
10097 if (!is_error(ret
) && arg3
) {
10098 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
10099 return -TARGET_EFAULT
;
10100 host_to_target_old_sigset(p
, &oldset
);
10101 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
10107 case TARGET_NR_rt_sigprocmask
:
10110 sigset_t set
, oldset
, *set_ptr
;
10112 if (arg4
!= sizeof(target_sigset_t
)) {
10113 return -TARGET_EINVAL
;
10117 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
10119 return -TARGET_EFAULT
;
10121 target_to_host_sigset(&set
, p
);
10122 unlock_user(p
, arg2
, 0);
10125 case TARGET_SIG_BLOCK
:
10128 case TARGET_SIG_UNBLOCK
:
10131 case TARGET_SIG_SETMASK
:
10135 return -TARGET_EINVAL
;
10141 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
10142 if (!is_error(ret
) && arg3
) {
10143 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
10144 return -TARGET_EFAULT
;
10145 host_to_target_sigset(p
, &oldset
);
10146 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
10150 #ifdef TARGET_NR_sigpending
10151 case TARGET_NR_sigpending
:
10154 ret
= get_errno(sigpending(&set
));
10155 if (!is_error(ret
)) {
10156 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
10157 return -TARGET_EFAULT
;
10158 host_to_target_old_sigset(p
, &set
);
10159 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
10164 case TARGET_NR_rt_sigpending
:
10168 /* Yes, this check is >, not != like most. We follow the kernel's
10169 * logic and it does it like this because it implements
10170 * NR_sigpending through the same code path, and in that case
10171 * the old_sigset_t is smaller in size.
10173 if (arg2
> sizeof(target_sigset_t
)) {
10174 return -TARGET_EINVAL
;
10177 ret
= get_errno(sigpending(&set
));
10178 if (!is_error(ret
)) {
10179 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
10180 return -TARGET_EFAULT
;
10181 host_to_target_sigset(p
, &set
);
10182 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
10186 #ifdef TARGET_NR_sigsuspend
10187 case TARGET_NR_sigsuspend
:
10191 #if defined(TARGET_ALPHA)
10192 TaskState
*ts
= cpu
->opaque
;
10193 /* target_to_host_old_sigset will bswap back */
10194 abi_ulong mask
= tswapal(arg1
);
10195 set
= &ts
->sigsuspend_mask
;
10196 target_to_host_old_sigset(set
, &mask
);
10198 ret
= process_sigsuspend_mask(&set
, arg1
, sizeof(target_sigset_t
));
10203 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
10204 finish_sigsuspend_mask(ret
);
10208 case TARGET_NR_rt_sigsuspend
:
10212 ret
= process_sigsuspend_mask(&set
, arg1
, arg2
);
10216 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
10217 finish_sigsuspend_mask(ret
);
10220 #ifdef TARGET_NR_rt_sigtimedwait
10221 case TARGET_NR_rt_sigtimedwait
:
10224 struct timespec uts
, *puts
;
10227 if (arg4
!= sizeof(target_sigset_t
)) {
10228 return -TARGET_EINVAL
;
10231 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
10232 return -TARGET_EFAULT
;
10233 target_to_host_sigset(&set
, p
);
10234 unlock_user(p
, arg1
, 0);
10237 if (target_to_host_timespec(puts
, arg3
)) {
10238 return -TARGET_EFAULT
;
10243 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
10245 if (!is_error(ret
)) {
10247 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
10250 return -TARGET_EFAULT
;
10252 host_to_target_siginfo(p
, &uinfo
);
10253 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
10255 ret
= host_to_target_signal(ret
);
10260 #ifdef TARGET_NR_rt_sigtimedwait_time64
10261 case TARGET_NR_rt_sigtimedwait_time64
:
10264 struct timespec uts
, *puts
;
10267 if (arg4
!= sizeof(target_sigset_t
)) {
10268 return -TARGET_EINVAL
;
10271 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
10273 return -TARGET_EFAULT
;
10275 target_to_host_sigset(&set
, p
);
10276 unlock_user(p
, arg1
, 0);
10279 if (target_to_host_timespec64(puts
, arg3
)) {
10280 return -TARGET_EFAULT
;
10285 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
10287 if (!is_error(ret
)) {
10289 p
= lock_user(VERIFY_WRITE
, arg2
,
10290 sizeof(target_siginfo_t
), 0);
10292 return -TARGET_EFAULT
;
10294 host_to_target_siginfo(p
, &uinfo
);
10295 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
10297 ret
= host_to_target_signal(ret
);
10302 case TARGET_NR_rt_sigqueueinfo
:
10306 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
10308 return -TARGET_EFAULT
;
10310 target_to_host_siginfo(&uinfo
, p
);
10311 unlock_user(p
, arg3
, 0);
10312 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, target_to_host_signal(arg2
), &uinfo
));
10315 case TARGET_NR_rt_tgsigqueueinfo
:
10319 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
10321 return -TARGET_EFAULT
;
10323 target_to_host_siginfo(&uinfo
, p
);
10324 unlock_user(p
, arg4
, 0);
10325 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, target_to_host_signal(arg3
), &uinfo
));
10328 #ifdef TARGET_NR_sigreturn
10329 case TARGET_NR_sigreturn
:
10330 if (block_signals()) {
10331 return -QEMU_ERESTARTSYS
;
10333 return do_sigreturn(cpu_env
);
10335 case TARGET_NR_rt_sigreturn
:
10336 if (block_signals()) {
10337 return -QEMU_ERESTARTSYS
;
10339 return do_rt_sigreturn(cpu_env
);
10340 case TARGET_NR_sethostname
:
10341 if (!(p
= lock_user_string(arg1
)))
10342 return -TARGET_EFAULT
;
10343 ret
= get_errno(sethostname(p
, arg2
));
10344 unlock_user(p
, arg1
, 0);
10346 #ifdef TARGET_NR_setrlimit
10347 case TARGET_NR_setrlimit
:
10349 int resource
= target_to_host_resource(arg1
);
10350 struct target_rlimit
*target_rlim
;
10351 struct rlimit rlim
;
10352 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
10353 return -TARGET_EFAULT
;
10354 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
10355 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
10356 unlock_user_struct(target_rlim
, arg2
, 0);
10358 * If we just passed through resource limit settings for memory then
10359 * they would also apply to QEMU's own allocations, and QEMU will
10360 * crash or hang or die if its allocations fail. Ideally we would
10361 * track the guest allocations in QEMU and apply the limits ourselves.
10362 * For now, just tell the guest the call succeeded but don't actually
10365 if (resource
!= RLIMIT_AS
&&
10366 resource
!= RLIMIT_DATA
&&
10367 resource
!= RLIMIT_STACK
) {
10368 return get_errno(setrlimit(resource
, &rlim
));
10374 #ifdef TARGET_NR_getrlimit
10375 case TARGET_NR_getrlimit
:
10377 int resource
= target_to_host_resource(arg1
);
10378 struct target_rlimit
*target_rlim
;
10379 struct rlimit rlim
;
10381 ret
= get_errno(getrlimit(resource
, &rlim
));
10382 if (!is_error(ret
)) {
10383 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10384 return -TARGET_EFAULT
;
10385 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10386 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10387 unlock_user_struct(target_rlim
, arg2
, 1);
10392 case TARGET_NR_getrusage
:
10394 struct rusage rusage
;
10395 ret
= get_errno(getrusage(arg1
, &rusage
));
10396 if (!is_error(ret
)) {
10397 ret
= host_to_target_rusage(arg2
, &rusage
);
10401 #if defined(TARGET_NR_gettimeofday)
10402 case TARGET_NR_gettimeofday
:
10405 struct timezone tz
;
10407 ret
= get_errno(gettimeofday(&tv
, &tz
));
10408 if (!is_error(ret
)) {
10409 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
10410 return -TARGET_EFAULT
;
10412 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
10413 return -TARGET_EFAULT
;
10419 #if defined(TARGET_NR_settimeofday)
10420 case TARGET_NR_settimeofday
:
10422 struct timeval tv
, *ptv
= NULL
;
10423 struct timezone tz
, *ptz
= NULL
;
10426 if (copy_from_user_timeval(&tv
, arg1
)) {
10427 return -TARGET_EFAULT
;
10433 if (copy_from_user_timezone(&tz
, arg2
)) {
10434 return -TARGET_EFAULT
;
10439 return get_errno(settimeofday(ptv
, ptz
));
10442 #if defined(TARGET_NR_select)
10443 case TARGET_NR_select
:
10444 #if defined(TARGET_WANT_NI_OLD_SELECT)
10445 /* some architectures used to have old_select here
10446 * but now ENOSYS it.
10448 ret
= -TARGET_ENOSYS
;
10449 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10450 ret
= do_old_select(arg1
);
10452 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10456 #ifdef TARGET_NR_pselect6
10457 case TARGET_NR_pselect6
:
10458 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
10460 #ifdef TARGET_NR_pselect6_time64
10461 case TARGET_NR_pselect6_time64
:
10462 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
10464 #ifdef TARGET_NR_symlink
10465 case TARGET_NR_symlink
:
10468 p
= lock_user_string(arg1
);
10469 p2
= lock_user_string(arg2
);
10471 ret
= -TARGET_EFAULT
;
10473 ret
= get_errno(symlink(p
, p2
));
10474 unlock_user(p2
, arg2
, 0);
10475 unlock_user(p
, arg1
, 0);
10479 #if defined(TARGET_NR_symlinkat)
10480 case TARGET_NR_symlinkat
:
10483 p
= lock_user_string(arg1
);
10484 p2
= lock_user_string(arg3
);
10486 ret
= -TARGET_EFAULT
;
10488 ret
= get_errno(symlinkat(p
, arg2
, p2
));
10489 unlock_user(p2
, arg3
, 0);
10490 unlock_user(p
, arg1
, 0);
10494 #ifdef TARGET_NR_readlink
10495 case TARGET_NR_readlink
:
10498 p
= lock_user_string(arg1
);
10499 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10500 ret
= get_errno(do_guest_readlink(p
, p2
, arg3
));
10501 unlock_user(p2
, arg2
, ret
);
10502 unlock_user(p
, arg1
, 0);
10506 #if defined(TARGET_NR_readlinkat)
10507 case TARGET_NR_readlinkat
:
10510 p
= lock_user_string(arg2
);
10511 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10513 ret
= -TARGET_EFAULT
;
10514 } else if (!arg4
) {
10515 /* Short circuit this for the magic exe check. */
10516 ret
= -TARGET_EINVAL
;
10517 } else if (is_proc_myself((const char *)p
, "exe")) {
10519 * Don't worry about sign mismatch as earlier mapping
10520 * logic would have thrown a bad address error.
10522 ret
= MIN(strlen(exec_path
), arg4
);
10523 /* We cannot NUL terminate the string. */
10524 memcpy(p2
, exec_path
, ret
);
10526 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
10528 unlock_user(p2
, arg3
, ret
);
10529 unlock_user(p
, arg2
, 0);
10533 #ifdef TARGET_NR_swapon
10534 case TARGET_NR_swapon
:
10535 if (!(p
= lock_user_string(arg1
)))
10536 return -TARGET_EFAULT
;
10537 ret
= get_errno(swapon(p
, arg2
));
10538 unlock_user(p
, arg1
, 0);
10541 case TARGET_NR_reboot
:
10542 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
10543 /* arg4 must be ignored in all other cases */
10544 p
= lock_user_string(arg4
);
10546 return -TARGET_EFAULT
;
10548 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
10549 unlock_user(p
, arg4
, 0);
10551 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
10554 #ifdef TARGET_NR_mmap
10555 case TARGET_NR_mmap
:
10556 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10557 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10558 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10559 || defined(TARGET_S390X)
10562 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
10563 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
10564 return -TARGET_EFAULT
;
10565 v1
= tswapal(v
[0]);
10566 v2
= tswapal(v
[1]);
10567 v3
= tswapal(v
[2]);
10568 v4
= tswapal(v
[3]);
10569 v5
= tswapal(v
[4]);
10570 v6
= tswapal(v
[5]);
10571 unlock_user(v
, arg1
, 0);
10572 ret
= get_errno(target_mmap(v1
, v2
, v3
,
10573 target_to_host_bitmask(v4
, mmap_flags_tbl
),
10577 /* mmap pointers are always untagged */
10578 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
10579 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
10585 #ifdef TARGET_NR_mmap2
10586 case TARGET_NR_mmap2
:
10588 #define MMAP_SHIFT 12
10590 ret
= target_mmap(arg1
, arg2
, arg3
,
10591 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
10592 arg5
, (off_t
)(abi_ulong
)arg6
<< MMAP_SHIFT
);
10593 return get_errno(ret
);
10595 case TARGET_NR_munmap
:
10596 arg1
= cpu_untagged_addr(cpu
, arg1
);
10597 return get_errno(target_munmap(arg1
, arg2
));
10598 case TARGET_NR_mprotect
:
10599 arg1
= cpu_untagged_addr(cpu
, arg1
);
10601 TaskState
*ts
= cpu
->opaque
;
10602 /* Special hack to detect libc making the stack executable. */
10603 if ((arg3
& PROT_GROWSDOWN
)
10604 && arg1
>= ts
->info
->stack_limit
10605 && arg1
<= ts
->info
->start_stack
) {
10606 arg3
&= ~PROT_GROWSDOWN
;
10607 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
10608 arg1
= ts
->info
->stack_limit
;
10611 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
10612 #ifdef TARGET_NR_mremap
10613 case TARGET_NR_mremap
:
10614 arg1
= cpu_untagged_addr(cpu
, arg1
);
10615 /* mremap new_addr (arg5) is always untagged */
10616 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
10618 /* ??? msync/mlock/munlock are broken for softmmu. */
10619 #ifdef TARGET_NR_msync
10620 case TARGET_NR_msync
:
10621 return get_errno(msync(g2h(cpu
, arg1
), arg2
,
10622 target_to_host_msync_arg(arg3
)));
10624 #ifdef TARGET_NR_mlock
10625 case TARGET_NR_mlock
:
10626 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
10628 #ifdef TARGET_NR_munlock
10629 case TARGET_NR_munlock
:
10630 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
10632 #ifdef TARGET_NR_mlockall
10633 case TARGET_NR_mlockall
:
10634 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
10636 #ifdef TARGET_NR_munlockall
10637 case TARGET_NR_munlockall
:
10638 return get_errno(munlockall());
10640 #ifdef TARGET_NR_truncate
10641 case TARGET_NR_truncate
:
10642 if (!(p
= lock_user_string(arg1
)))
10643 return -TARGET_EFAULT
;
10644 ret
= get_errno(truncate(p
, arg2
));
10645 unlock_user(p
, arg1
, 0);
10648 #ifdef TARGET_NR_ftruncate
10649 case TARGET_NR_ftruncate
:
10650 return get_errno(ftruncate(arg1
, arg2
));
10652 case TARGET_NR_fchmod
:
10653 return get_errno(fchmod(arg1
, arg2
));
10654 #if defined(TARGET_NR_fchmodat)
10655 case TARGET_NR_fchmodat
:
10656 if (!(p
= lock_user_string(arg2
)))
10657 return -TARGET_EFAULT
;
10658 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
10659 unlock_user(p
, arg2
, 0);
10662 case TARGET_NR_getpriority
:
10663 /* Note that negative values are valid for getpriority, so we must
10664 differentiate based on errno settings. */
10666 ret
= getpriority(arg1
, arg2
);
10667 if (ret
== -1 && errno
!= 0) {
10668 return -host_to_target_errno(errno
);
10670 #ifdef TARGET_ALPHA
10671 /* Return value is the unbiased priority. Signal no error. */
10672 cpu_env
->ir
[IR_V0
] = 0;
10674 /* Return value is a biased priority to avoid negative numbers. */
10678 case TARGET_NR_setpriority
:
10679 return get_errno(setpriority(arg1
, arg2
, arg3
));
10680 #ifdef TARGET_NR_statfs
10681 case TARGET_NR_statfs
:
10682 if (!(p
= lock_user_string(arg1
))) {
10683 return -TARGET_EFAULT
;
10685 ret
= get_errno(statfs(path(p
), &stfs
));
10686 unlock_user(p
, arg1
, 0);
10688 if (!is_error(ret
)) {
10689 struct target_statfs
*target_stfs
;
10691 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
10692 return -TARGET_EFAULT
;
10693 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10694 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10695 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10696 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10697 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10698 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10699 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10700 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10701 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10702 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10703 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10704 #ifdef _STATFS_F_FLAGS
10705 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10707 __put_user(0, &target_stfs
->f_flags
);
10709 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10710 unlock_user_struct(target_stfs
, arg2
, 1);
10714 #ifdef TARGET_NR_fstatfs
10715 case TARGET_NR_fstatfs
:
10716 ret
= get_errno(fstatfs(arg1
, &stfs
));
10717 goto convert_statfs
;
10719 #ifdef TARGET_NR_statfs64
10720 case TARGET_NR_statfs64
:
10721 if (!(p
= lock_user_string(arg1
))) {
10722 return -TARGET_EFAULT
;
10724 ret
= get_errno(statfs(path(p
), &stfs
));
10725 unlock_user(p
, arg1
, 0);
10727 if (!is_error(ret
)) {
10728 struct target_statfs64
*target_stfs
;
10730 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
10731 return -TARGET_EFAULT
;
10732 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10733 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10734 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10735 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10736 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10737 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10738 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10739 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10740 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10741 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10742 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10743 #ifdef _STATFS_F_FLAGS
10744 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10746 __put_user(0, &target_stfs
->f_flags
);
10748 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10749 unlock_user_struct(target_stfs
, arg3
, 1);
10752 case TARGET_NR_fstatfs64
:
10753 ret
= get_errno(fstatfs(arg1
, &stfs
));
10754 goto convert_statfs64
;
10756 #ifdef TARGET_NR_socketcall
10757 case TARGET_NR_socketcall
:
10758 return do_socketcall(arg1
, arg2
);
10760 #ifdef TARGET_NR_accept
10761 case TARGET_NR_accept
:
10762 return do_accept4(arg1
, arg2
, arg3
, 0);
10764 #ifdef TARGET_NR_accept4
10765 case TARGET_NR_accept4
:
10766 return do_accept4(arg1
, arg2
, arg3
, arg4
);
10768 #ifdef TARGET_NR_bind
10769 case TARGET_NR_bind
:
10770 return do_bind(arg1
, arg2
, arg3
);
10772 #ifdef TARGET_NR_connect
10773 case TARGET_NR_connect
:
10774 return do_connect(arg1
, arg2
, arg3
);
10776 #ifdef TARGET_NR_getpeername
10777 case TARGET_NR_getpeername
:
10778 return do_getpeername(arg1
, arg2
, arg3
);
10780 #ifdef TARGET_NR_getsockname
10781 case TARGET_NR_getsockname
:
10782 return do_getsockname(arg1
, arg2
, arg3
);
10784 #ifdef TARGET_NR_getsockopt
10785 case TARGET_NR_getsockopt
:
10786 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
10788 #ifdef TARGET_NR_listen
10789 case TARGET_NR_listen
:
10790 return get_errno(listen(arg1
, arg2
));
10792 #ifdef TARGET_NR_recv
10793 case TARGET_NR_recv
:
10794 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
10796 #ifdef TARGET_NR_recvfrom
10797 case TARGET_NR_recvfrom
:
10798 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10800 #ifdef TARGET_NR_recvmsg
10801 case TARGET_NR_recvmsg
:
10802 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
10804 #ifdef TARGET_NR_send
10805 case TARGET_NR_send
:
10806 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
10808 #ifdef TARGET_NR_sendmsg
10809 case TARGET_NR_sendmsg
:
10810 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
10812 #ifdef TARGET_NR_sendmmsg
10813 case TARGET_NR_sendmmsg
:
10814 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
10816 #ifdef TARGET_NR_recvmmsg
10817 case TARGET_NR_recvmmsg
:
10818 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
10820 #ifdef TARGET_NR_sendto
10821 case TARGET_NR_sendto
:
10822 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10824 #ifdef TARGET_NR_shutdown
10825 case TARGET_NR_shutdown
:
10826 return get_errno(shutdown(arg1
, arg2
));
10828 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10829 case TARGET_NR_getrandom
:
10830 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10832 return -TARGET_EFAULT
;
10834 ret
= get_errno(getrandom(p
, arg2
, arg3
));
10835 unlock_user(p
, arg1
, ret
);
10838 #ifdef TARGET_NR_socket
10839 case TARGET_NR_socket
:
10840 return do_socket(arg1
, arg2
, arg3
);
10842 #ifdef TARGET_NR_socketpair
10843 case TARGET_NR_socketpair
:
10844 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
10846 #ifdef TARGET_NR_setsockopt
10847 case TARGET_NR_setsockopt
:
10848 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
10850 #if defined(TARGET_NR_syslog)
10851 case TARGET_NR_syslog
:
10856 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
10857 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
10858 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
10859 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
10860 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
10861 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
10862 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
10863 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
10864 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
10865 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
10866 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
10867 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
10870 return -TARGET_EINVAL
;
10875 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10877 return -TARGET_EFAULT
;
10879 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
10880 unlock_user(p
, arg2
, arg3
);
10884 return -TARGET_EINVAL
;
10889 case TARGET_NR_setitimer
:
10891 struct itimerval value
, ovalue
, *pvalue
;
10895 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10896 || copy_from_user_timeval(&pvalue
->it_value
,
10897 arg2
+ sizeof(struct target_timeval
)))
10898 return -TARGET_EFAULT
;
10902 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10903 if (!is_error(ret
) && arg3
) {
10904 if (copy_to_user_timeval(arg3
,
10905 &ovalue
.it_interval
)
10906 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10908 return -TARGET_EFAULT
;
10912 case TARGET_NR_getitimer
:
10914 struct itimerval value
;
10916 ret
= get_errno(getitimer(arg1
, &value
));
10917 if (!is_error(ret
) && arg2
) {
10918 if (copy_to_user_timeval(arg2
,
10919 &value
.it_interval
)
10920 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10922 return -TARGET_EFAULT
;
10926 #ifdef TARGET_NR_stat
10927 case TARGET_NR_stat
:
10928 if (!(p
= lock_user_string(arg1
))) {
10929 return -TARGET_EFAULT
;
10931 ret
= get_errno(stat(path(p
), &st
));
10932 unlock_user(p
, arg1
, 0);
10935 #ifdef TARGET_NR_lstat
10936 case TARGET_NR_lstat
:
10937 if (!(p
= lock_user_string(arg1
))) {
10938 return -TARGET_EFAULT
;
10940 ret
= get_errno(lstat(path(p
), &st
));
10941 unlock_user(p
, arg1
, 0);
10944 #ifdef TARGET_NR_fstat
10945 case TARGET_NR_fstat
:
10947 ret
= get_errno(fstat(arg1
, &st
));
10948 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10951 if (!is_error(ret
)) {
10952 struct target_stat
*target_st
;
10954 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10955 return -TARGET_EFAULT
;
10956 memset(target_st
, 0, sizeof(*target_st
));
10957 __put_user(st
.st_dev
, &target_st
->st_dev
);
10958 __put_user(st
.st_ino
, &target_st
->st_ino
);
10959 __put_user(st
.st_mode
, &target_st
->st_mode
);
10960 __put_user(st
.st_uid
, &target_st
->st_uid
);
10961 __put_user(st
.st_gid
, &target_st
->st_gid
);
10962 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10963 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10964 __put_user(st
.st_size
, &target_st
->st_size
);
10965 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10966 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10967 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10968 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10969 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10970 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10971 __put_user(st
.st_atim
.tv_nsec
,
10972 &target_st
->target_st_atime_nsec
);
10973 __put_user(st
.st_mtim
.tv_nsec
,
10974 &target_st
->target_st_mtime_nsec
);
10975 __put_user(st
.st_ctim
.tv_nsec
,
10976 &target_st
->target_st_ctime_nsec
);
10978 unlock_user_struct(target_st
, arg2
, 1);
10983 case TARGET_NR_vhangup
:
10984 return get_errno(vhangup());
10985 #ifdef TARGET_NR_syscall
10986 case TARGET_NR_syscall
:
10987 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10988 arg6
, arg7
, arg8
, 0);
10990 #if defined(TARGET_NR_wait4)
10991 case TARGET_NR_wait4
:
10994 abi_long status_ptr
= arg2
;
10995 struct rusage rusage
, *rusage_ptr
;
10996 abi_ulong target_rusage
= arg4
;
10997 abi_long rusage_err
;
10999 rusage_ptr
= &rusage
;
11002 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
11003 if (!is_error(ret
)) {
11004 if (status_ptr
&& ret
) {
11005 status
= host_to_target_waitstatus(status
);
11006 if (put_user_s32(status
, status_ptr
))
11007 return -TARGET_EFAULT
;
11009 if (target_rusage
) {
11010 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
11019 #ifdef TARGET_NR_swapoff
11020 case TARGET_NR_swapoff
:
11021 if (!(p
= lock_user_string(arg1
)))
11022 return -TARGET_EFAULT
;
11023 ret
= get_errno(swapoff(p
));
11024 unlock_user(p
, arg1
, 0);
11027 case TARGET_NR_sysinfo
:
11029 struct target_sysinfo
*target_value
;
11030 struct sysinfo value
;
11031 ret
= get_errno(sysinfo(&value
));
11032 if (!is_error(ret
) && arg1
)
11034 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
11035 return -TARGET_EFAULT
;
11036 __put_user(value
.uptime
, &target_value
->uptime
);
11037 __put_user(value
.loads
[0], &target_value
->loads
[0]);
11038 __put_user(value
.loads
[1], &target_value
->loads
[1]);
11039 __put_user(value
.loads
[2], &target_value
->loads
[2]);
11040 __put_user(value
.totalram
, &target_value
->totalram
);
11041 __put_user(value
.freeram
, &target_value
->freeram
);
11042 __put_user(value
.sharedram
, &target_value
->sharedram
);
11043 __put_user(value
.bufferram
, &target_value
->bufferram
);
11044 __put_user(value
.totalswap
, &target_value
->totalswap
);
11045 __put_user(value
.freeswap
, &target_value
->freeswap
);
11046 __put_user(value
.procs
, &target_value
->procs
);
11047 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
11048 __put_user(value
.freehigh
, &target_value
->freehigh
);
11049 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
11050 unlock_user_struct(target_value
, arg1
, 1);
11054 #ifdef TARGET_NR_ipc
11055 case TARGET_NR_ipc
:
11056 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11058 #ifdef TARGET_NR_semget
11059 case TARGET_NR_semget
:
11060 return get_errno(semget(arg1
, arg2
, arg3
));
11062 #ifdef TARGET_NR_semop
11063 case TARGET_NR_semop
:
11064 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
11066 #ifdef TARGET_NR_semtimedop
11067 case TARGET_NR_semtimedop
:
11068 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
11070 #ifdef TARGET_NR_semtimedop_time64
11071 case TARGET_NR_semtimedop_time64
:
11072 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
11074 #ifdef TARGET_NR_semctl
11075 case TARGET_NR_semctl
:
11076 return do_semctl(arg1
, arg2
, arg3
, arg4
);
11078 #ifdef TARGET_NR_msgctl
11079 case TARGET_NR_msgctl
:
11080 return do_msgctl(arg1
, arg2
, arg3
);
11082 #ifdef TARGET_NR_msgget
11083 case TARGET_NR_msgget
:
11084 return get_errno(msgget(arg1
, arg2
));
11086 #ifdef TARGET_NR_msgrcv
11087 case TARGET_NR_msgrcv
:
11088 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
11090 #ifdef TARGET_NR_msgsnd
11091 case TARGET_NR_msgsnd
:
11092 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
11094 #ifdef TARGET_NR_shmget
11095 case TARGET_NR_shmget
:
11096 return get_errno(shmget(arg1
, arg2
, arg3
));
11098 #ifdef TARGET_NR_shmctl
11099 case TARGET_NR_shmctl
:
11100 return do_shmctl(arg1
, arg2
, arg3
);
11102 #ifdef TARGET_NR_shmat
11103 case TARGET_NR_shmat
:
11104 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
11106 #ifdef TARGET_NR_shmdt
11107 case TARGET_NR_shmdt
:
11108 return do_shmdt(arg1
);
11110 case TARGET_NR_fsync
:
11111 return get_errno(fsync(arg1
));
11112 case TARGET_NR_clone
:
11113 /* Linux manages to have three different orderings for its
11114 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11115 * match the kernel's CONFIG_CLONE_* settings.
11116 * Microblaze is further special in that it uses a sixth
11117 * implicit argument to clone for the TLS pointer.
11119 #if defined(TARGET_MICROBLAZE)
11120 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
11121 #elif defined(TARGET_CLONE_BACKWARDS)
11122 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
11123 #elif defined(TARGET_CLONE_BACKWARDS2)
11124 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
11126 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
11129 #ifdef __NR_exit_group
11130 /* new thread calls */
11131 case TARGET_NR_exit_group
:
11132 preexit_cleanup(cpu_env
, arg1
);
11133 return get_errno(exit_group(arg1
));
11135 case TARGET_NR_setdomainname
:
11136 if (!(p
= lock_user_string(arg1
)))
11137 return -TARGET_EFAULT
;
11138 ret
= get_errno(setdomainname(p
, arg2
));
11139 unlock_user(p
, arg1
, 0);
11141 case TARGET_NR_uname
:
11142 /* no need to transcode because we use the linux syscall */
11144 struct new_utsname
* buf
;
11146 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
11147 return -TARGET_EFAULT
;
11148 ret
= get_errno(sys_uname(buf
));
11149 if (!is_error(ret
)) {
11150 /* Overwrite the native machine name with whatever is being
11152 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
11153 sizeof(buf
->machine
));
11154 /* Allow the user to override the reported release. */
11155 if (qemu_uname_release
&& *qemu_uname_release
) {
11156 g_strlcpy(buf
->release
, qemu_uname_release
,
11157 sizeof(buf
->release
));
11160 unlock_user_struct(buf
, arg1
, 1);
11164 case TARGET_NR_modify_ldt
:
11165 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
11166 #if !defined(TARGET_X86_64)
11167 case TARGET_NR_vm86
:
11168 return do_vm86(cpu_env
, arg1
, arg2
);
11171 #if defined(TARGET_NR_adjtimex)
11172 case TARGET_NR_adjtimex
:
11174 struct timex host_buf
;
11176 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
11177 return -TARGET_EFAULT
;
11179 ret
= get_errno(adjtimex(&host_buf
));
11180 if (!is_error(ret
)) {
11181 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
11182 return -TARGET_EFAULT
;
11188 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11189 case TARGET_NR_clock_adjtime
:
11193 if (target_to_host_timex(&htx
, arg2
) != 0) {
11194 return -TARGET_EFAULT
;
11196 ret
= get_errno(clock_adjtime(arg1
, &htx
));
11197 if (!is_error(ret
) && host_to_target_timex(arg2
, &htx
)) {
11198 return -TARGET_EFAULT
;
11203 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11204 case TARGET_NR_clock_adjtime64
:
11208 if (target_to_host_timex64(&htx
, arg2
) != 0) {
11209 return -TARGET_EFAULT
;
11211 ret
= get_errno(clock_adjtime(arg1
, &htx
));
11212 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
11213 return -TARGET_EFAULT
;
11218 case TARGET_NR_getpgid
:
11219 return get_errno(getpgid(arg1
));
11220 case TARGET_NR_fchdir
:
11221 return get_errno(fchdir(arg1
));
11222 case TARGET_NR_personality
:
11223 return get_errno(personality(arg1
));
11224 #ifdef TARGET_NR__llseek /* Not on alpha */
11225 case TARGET_NR__llseek
:
11228 #if !defined(__NR_llseek)
11229 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
11231 ret
= get_errno(res
);
11236 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
11238 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
11239 return -TARGET_EFAULT
;
11244 #ifdef TARGET_NR_getdents
11245 case TARGET_NR_getdents
:
11246 return do_getdents(arg1
, arg2
, arg3
);
11247 #endif /* TARGET_NR_getdents */
11248 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11249 case TARGET_NR_getdents64
:
11250 return do_getdents64(arg1
, arg2
, arg3
);
11251 #endif /* TARGET_NR_getdents64 */
11252 #if defined(TARGET_NR__newselect)
11253 case TARGET_NR__newselect
:
11254 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
11256 #ifdef TARGET_NR_poll
11257 case TARGET_NR_poll
:
11258 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
11260 #ifdef TARGET_NR_ppoll
11261 case TARGET_NR_ppoll
:
11262 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
11264 #ifdef TARGET_NR_ppoll_time64
11265 case TARGET_NR_ppoll_time64
:
11266 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
11268 case TARGET_NR_flock
:
11269 /* NOTE: the flock constant seems to be the same for every
11271 return get_errno(safe_flock(arg1
, arg2
));
11272 case TARGET_NR_readv
:
11274 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
11276 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
11277 unlock_iovec(vec
, arg2
, arg3
, 1);
11279 ret
= -host_to_target_errno(errno
);
11283 case TARGET_NR_writev
:
11285 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11287 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
11288 unlock_iovec(vec
, arg2
, arg3
, 0);
11290 ret
= -host_to_target_errno(errno
);
11294 #if defined(TARGET_NR_preadv)
11295 case TARGET_NR_preadv
:
11297 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
11299 unsigned long low
, high
;
11301 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
11302 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
11303 unlock_iovec(vec
, arg2
, arg3
, 1);
11305 ret
= -host_to_target_errno(errno
);
11310 #if defined(TARGET_NR_pwritev)
11311 case TARGET_NR_pwritev
:
11313 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11315 unsigned long low
, high
;
11317 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
11318 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
11319 unlock_iovec(vec
, arg2
, arg3
, 0);
11321 ret
= -host_to_target_errno(errno
);
11326 case TARGET_NR_getsid
:
11327 return get_errno(getsid(arg1
));
11328 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11329 case TARGET_NR_fdatasync
:
11330 return get_errno(fdatasync(arg1
));
11332 case TARGET_NR_sched_getaffinity
:
11334 unsigned int mask_size
;
11335 unsigned long *mask
;
11338 * sched_getaffinity needs multiples of ulong, so need to take
11339 * care of mismatches between target ulong and host ulong sizes.
11341 if (arg2
& (sizeof(abi_ulong
) - 1)) {
11342 return -TARGET_EINVAL
;
11344 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
11346 mask
= alloca(mask_size
);
11347 memset(mask
, 0, mask_size
);
11348 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
11350 if (!is_error(ret
)) {
11352 /* More data returned than the caller's buffer will fit.
11353 * This only happens if sizeof(abi_long) < sizeof(long)
11354 * and the caller passed us a buffer holding an odd number
11355 * of abi_longs. If the host kernel is actually using the
11356 * extra 4 bytes then fail EINVAL; otherwise we can just
11357 * ignore them and only copy the interesting part.
11359 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
11360 if (numcpus
> arg2
* 8) {
11361 return -TARGET_EINVAL
;
11366 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
11367 return -TARGET_EFAULT
;
11372 case TARGET_NR_sched_setaffinity
:
11374 unsigned int mask_size
;
11375 unsigned long *mask
;
11378 * sched_setaffinity needs multiples of ulong, so need to take
11379 * care of mismatches between target ulong and host ulong sizes.
11381 if (arg2
& (sizeof(abi_ulong
) - 1)) {
11382 return -TARGET_EINVAL
;
11384 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
11385 mask
= alloca(mask_size
);
11387 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
11392 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
11394 case TARGET_NR_getcpu
:
11396 unsigned cpu
, node
;
11397 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
11398 arg2
? &node
: NULL
,
11400 if (is_error(ret
)) {
11403 if (arg1
&& put_user_u32(cpu
, arg1
)) {
11404 return -TARGET_EFAULT
;
11406 if (arg2
&& put_user_u32(node
, arg2
)) {
11407 return -TARGET_EFAULT
;
11411 case TARGET_NR_sched_setparam
:
11413 struct target_sched_param
*target_schp
;
11414 struct sched_param schp
;
11417 return -TARGET_EINVAL
;
11419 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1)) {
11420 return -TARGET_EFAULT
;
11422 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
11423 unlock_user_struct(target_schp
, arg2
, 0);
11424 return get_errno(sys_sched_setparam(arg1
, &schp
));
11426 case TARGET_NR_sched_getparam
:
11428 struct target_sched_param
*target_schp
;
11429 struct sched_param schp
;
11432 return -TARGET_EINVAL
;
11434 ret
= get_errno(sys_sched_getparam(arg1
, &schp
));
11435 if (!is_error(ret
)) {
11436 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0)) {
11437 return -TARGET_EFAULT
;
11439 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
11440 unlock_user_struct(target_schp
, arg2
, 1);
11444 case TARGET_NR_sched_setscheduler
:
11446 struct target_sched_param
*target_schp
;
11447 struct sched_param schp
;
11449 return -TARGET_EINVAL
;
11451 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1)) {
11452 return -TARGET_EFAULT
;
11454 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
11455 unlock_user_struct(target_schp
, arg3
, 0);
11456 return get_errno(sys_sched_setscheduler(arg1
, arg2
, &schp
));
11458 case TARGET_NR_sched_getscheduler
:
11459 return get_errno(sys_sched_getscheduler(arg1
));
11460 case TARGET_NR_sched_getattr
:
11462 struct target_sched_attr
*target_scha
;
11463 struct sched_attr scha
;
11465 return -TARGET_EINVAL
;
11467 if (arg3
> sizeof(scha
)) {
11468 arg3
= sizeof(scha
);
11470 ret
= get_errno(sys_sched_getattr(arg1
, &scha
, arg3
, arg4
));
11471 if (!is_error(ret
)) {
11472 target_scha
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11473 if (!target_scha
) {
11474 return -TARGET_EFAULT
;
11476 target_scha
->size
= tswap32(scha
.size
);
11477 target_scha
->sched_policy
= tswap32(scha
.sched_policy
);
11478 target_scha
->sched_flags
= tswap64(scha
.sched_flags
);
11479 target_scha
->sched_nice
= tswap32(scha
.sched_nice
);
11480 target_scha
->sched_priority
= tswap32(scha
.sched_priority
);
11481 target_scha
->sched_runtime
= tswap64(scha
.sched_runtime
);
11482 target_scha
->sched_deadline
= tswap64(scha
.sched_deadline
);
11483 target_scha
->sched_period
= tswap64(scha
.sched_period
);
11484 if (scha
.size
> offsetof(struct sched_attr
, sched_util_min
)) {
11485 target_scha
->sched_util_min
= tswap32(scha
.sched_util_min
);
11486 target_scha
->sched_util_max
= tswap32(scha
.sched_util_max
);
11488 unlock_user(target_scha
, arg2
, arg3
);
11492 case TARGET_NR_sched_setattr
:
11494 struct target_sched_attr
*target_scha
;
11495 struct sched_attr scha
;
11499 return -TARGET_EINVAL
;
11501 if (get_user_u32(size
, arg2
)) {
11502 return -TARGET_EFAULT
;
11505 size
= offsetof(struct target_sched_attr
, sched_util_min
);
11507 if (size
< offsetof(struct target_sched_attr
, sched_util_min
)) {
11508 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
11509 return -TARGET_EFAULT
;
11511 return -TARGET_E2BIG
;
11514 zeroed
= check_zeroed_user(arg2
, sizeof(struct target_sched_attr
), size
);
11517 } else if (zeroed
== 0) {
11518 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
11519 return -TARGET_EFAULT
;
11521 return -TARGET_E2BIG
;
11523 if (size
> sizeof(struct target_sched_attr
)) {
11524 size
= sizeof(struct target_sched_attr
);
11527 target_scha
= lock_user(VERIFY_READ
, arg2
, size
, 1);
11528 if (!target_scha
) {
11529 return -TARGET_EFAULT
;
11532 scha
.sched_policy
= tswap32(target_scha
->sched_policy
);
11533 scha
.sched_flags
= tswap64(target_scha
->sched_flags
);
11534 scha
.sched_nice
= tswap32(target_scha
->sched_nice
);
11535 scha
.sched_priority
= tswap32(target_scha
->sched_priority
);
11536 scha
.sched_runtime
= tswap64(target_scha
->sched_runtime
);
11537 scha
.sched_deadline
= tswap64(target_scha
->sched_deadline
);
11538 scha
.sched_period
= tswap64(target_scha
->sched_period
);
11539 if (size
> offsetof(struct target_sched_attr
, sched_util_min
)) {
11540 scha
.sched_util_min
= tswap32(target_scha
->sched_util_min
);
11541 scha
.sched_util_max
= tswap32(target_scha
->sched_util_max
);
11543 unlock_user(target_scha
, arg2
, 0);
11544 return get_errno(sys_sched_setattr(arg1
, &scha
, arg3
));
11546 case TARGET_NR_sched_yield
:
11547 return get_errno(sched_yield());
11548 case TARGET_NR_sched_get_priority_max
:
11549 return get_errno(sched_get_priority_max(arg1
));
11550 case TARGET_NR_sched_get_priority_min
:
11551 return get_errno(sched_get_priority_min(arg1
));
11552 #ifdef TARGET_NR_sched_rr_get_interval
11553 case TARGET_NR_sched_rr_get_interval
:
11555 struct timespec ts
;
11556 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
11557 if (!is_error(ret
)) {
11558 ret
= host_to_target_timespec(arg2
, &ts
);
11563 #ifdef TARGET_NR_sched_rr_get_interval_time64
11564 case TARGET_NR_sched_rr_get_interval_time64
:
11566 struct timespec ts
;
11567 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
11568 if (!is_error(ret
)) {
11569 ret
= host_to_target_timespec64(arg2
, &ts
);
11574 #if defined(TARGET_NR_nanosleep)
11575 case TARGET_NR_nanosleep
:
11577 struct timespec req
, rem
;
11578 target_to_host_timespec(&req
, arg1
);
11579 ret
= get_errno(safe_nanosleep(&req
, &rem
));
11580 if (is_error(ret
) && arg2
) {
11581 host_to_target_timespec(arg2
, &rem
);
11586 case TARGET_NR_prctl
:
11587 return do_prctl(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
);
11589 #ifdef TARGET_NR_arch_prctl
11590 case TARGET_NR_arch_prctl
:
11591 return do_arch_prctl(cpu_env
, arg1
, arg2
);
11593 #ifdef TARGET_NR_pread64
11594 case TARGET_NR_pread64
:
11595 if (regpairs_aligned(cpu_env
, num
)) {
11599 if (arg2
== 0 && arg3
== 0) {
11600 /* Special-case NULL buffer and zero length, which should succeed */
11603 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11605 return -TARGET_EFAULT
;
11608 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11609 unlock_user(p
, arg2
, ret
);
11611 case TARGET_NR_pwrite64
:
11612 if (regpairs_aligned(cpu_env
, num
)) {
11616 if (arg2
== 0 && arg3
== 0) {
11617 /* Special-case NULL buffer and zero length, which should succeed */
11620 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
11622 return -TARGET_EFAULT
;
11625 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11626 unlock_user(p
, arg2
, 0);
11629 case TARGET_NR_getcwd
:
11630 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11631 return -TARGET_EFAULT
;
11632 ret
= get_errno(sys_getcwd1(p
, arg2
));
11633 unlock_user(p
, arg1
, ret
);
11635 case TARGET_NR_capget
:
11636 case TARGET_NR_capset
:
11638 struct target_user_cap_header
*target_header
;
11639 struct target_user_cap_data
*target_data
= NULL
;
11640 struct __user_cap_header_struct header
;
11641 struct __user_cap_data_struct data
[2];
11642 struct __user_cap_data_struct
*dataptr
= NULL
;
11643 int i
, target_datalen
;
11644 int data_items
= 1;
11646 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11647 return -TARGET_EFAULT
;
11649 header
.version
= tswap32(target_header
->version
);
11650 header
.pid
= tswap32(target_header
->pid
);
11652 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11653 /* Version 2 and up takes pointer to two user_data structs */
11657 target_datalen
= sizeof(*target_data
) * data_items
;
11660 if (num
== TARGET_NR_capget
) {
11661 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11663 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11665 if (!target_data
) {
11666 unlock_user_struct(target_header
, arg1
, 0);
11667 return -TARGET_EFAULT
;
11670 if (num
== TARGET_NR_capset
) {
11671 for (i
= 0; i
< data_items
; i
++) {
11672 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11673 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11674 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11681 if (num
== TARGET_NR_capget
) {
11682 ret
= get_errno(capget(&header
, dataptr
));
11684 ret
= get_errno(capset(&header
, dataptr
));
11687 /* The kernel always updates version for both capget and capset */
11688 target_header
->version
= tswap32(header
.version
);
11689 unlock_user_struct(target_header
, arg1
, 1);
11692 if (num
== TARGET_NR_capget
) {
11693 for (i
= 0; i
< data_items
; i
++) {
11694 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11695 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11696 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11698 unlock_user(target_data
, arg2
, target_datalen
);
11700 unlock_user(target_data
, arg2
, 0);
11705 case TARGET_NR_sigaltstack
:
11706 return do_sigaltstack(arg1
, arg2
, cpu_env
);
11708 #ifdef CONFIG_SENDFILE
11709 #ifdef TARGET_NR_sendfile
11710 case TARGET_NR_sendfile
:
11712 off_t
*offp
= NULL
;
11715 ret
= get_user_sal(off
, arg3
);
11716 if (is_error(ret
)) {
11721 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11722 if (!is_error(ret
) && arg3
) {
11723 abi_long ret2
= put_user_sal(off
, arg3
);
11724 if (is_error(ret2
)) {
11731 #ifdef TARGET_NR_sendfile64
11732 case TARGET_NR_sendfile64
:
11734 off_t
*offp
= NULL
;
11737 ret
= get_user_s64(off
, arg3
);
11738 if (is_error(ret
)) {
11743 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11744 if (!is_error(ret
) && arg3
) {
11745 abi_long ret2
= put_user_s64(off
, arg3
);
11746 if (is_error(ret2
)) {
11754 #ifdef TARGET_NR_vfork
11755 case TARGET_NR_vfork
:
11756 return get_errno(do_fork(cpu_env
,
11757 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11760 #ifdef TARGET_NR_ugetrlimit
11761 case TARGET_NR_ugetrlimit
:
11763 struct rlimit rlim
;
11764 int resource
= target_to_host_resource(arg1
);
11765 ret
= get_errno(getrlimit(resource
, &rlim
));
11766 if (!is_error(ret
)) {
11767 struct target_rlimit
*target_rlim
;
11768 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11769 return -TARGET_EFAULT
;
11770 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11771 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11772 unlock_user_struct(target_rlim
, arg2
, 1);
11777 #ifdef TARGET_NR_truncate64
11778 case TARGET_NR_truncate64
:
11779 if (!(p
= lock_user_string(arg1
)))
11780 return -TARGET_EFAULT
;
11781 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11782 unlock_user(p
, arg1
, 0);
11785 #ifdef TARGET_NR_ftruncate64
11786 case TARGET_NR_ftruncate64
:
11787 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11789 #ifdef TARGET_NR_stat64
11790 case TARGET_NR_stat64
:
11791 if (!(p
= lock_user_string(arg1
))) {
11792 return -TARGET_EFAULT
;
11794 ret
= get_errno(stat(path(p
), &st
));
11795 unlock_user(p
, arg1
, 0);
11796 if (!is_error(ret
))
11797 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11800 #ifdef TARGET_NR_lstat64
11801 case TARGET_NR_lstat64
:
11802 if (!(p
= lock_user_string(arg1
))) {
11803 return -TARGET_EFAULT
;
11805 ret
= get_errno(lstat(path(p
), &st
));
11806 unlock_user(p
, arg1
, 0);
11807 if (!is_error(ret
))
11808 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11811 #ifdef TARGET_NR_fstat64
11812 case TARGET_NR_fstat64
:
11813 ret
= get_errno(fstat(arg1
, &st
));
11814 if (!is_error(ret
))
11815 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11818 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11819 #ifdef TARGET_NR_fstatat64
11820 case TARGET_NR_fstatat64
:
11822 #ifdef TARGET_NR_newfstatat
11823 case TARGET_NR_newfstatat
:
11825 if (!(p
= lock_user_string(arg2
))) {
11826 return -TARGET_EFAULT
;
11828 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11829 unlock_user(p
, arg2
, 0);
11830 if (!is_error(ret
))
11831 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11834 #if defined(TARGET_NR_statx)
11835 case TARGET_NR_statx
:
11837 struct target_statx
*target_stx
;
11841 p
= lock_user_string(arg2
);
11843 return -TARGET_EFAULT
;
11845 #if defined(__NR_statx)
11848 * It is assumed that struct statx is architecture independent.
11850 struct target_statx host_stx
;
11853 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11854 if (!is_error(ret
)) {
11855 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11856 unlock_user(p
, arg2
, 0);
11857 return -TARGET_EFAULT
;
11861 if (ret
!= -TARGET_ENOSYS
) {
11862 unlock_user(p
, arg2
, 0);
11867 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11868 unlock_user(p
, arg2
, 0);
11870 if (!is_error(ret
)) {
11871 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11872 return -TARGET_EFAULT
;
11874 memset(target_stx
, 0, sizeof(*target_stx
));
11875 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11876 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11877 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11878 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11879 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11880 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11881 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11882 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11883 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11884 __put_user(st
.st_size
, &target_stx
->stx_size
);
11885 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11886 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11887 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11888 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11889 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11890 unlock_user_struct(target_stx
, arg5
, 1);
11895 #ifdef TARGET_NR_lchown
11896 case TARGET_NR_lchown
:
11897 if (!(p
= lock_user_string(arg1
)))
11898 return -TARGET_EFAULT
;
11899 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11900 unlock_user(p
, arg1
, 0);
11903 #ifdef TARGET_NR_getuid
11904 case TARGET_NR_getuid
:
11905 return get_errno(high2lowuid(getuid()));
11907 #ifdef TARGET_NR_getgid
11908 case TARGET_NR_getgid
:
11909 return get_errno(high2lowgid(getgid()));
11911 #ifdef TARGET_NR_geteuid
11912 case TARGET_NR_geteuid
:
11913 return get_errno(high2lowuid(geteuid()));
11915 #ifdef TARGET_NR_getegid
11916 case TARGET_NR_getegid
:
11917 return get_errno(high2lowgid(getegid()));
11919 case TARGET_NR_setreuid
:
11920 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11921 case TARGET_NR_setregid
:
11922 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11923 case TARGET_NR_getgroups
:
11924 { /* the same code as for TARGET_NR_getgroups32 */
11925 int gidsetsize
= arg1
;
11926 target_id
*target_grouplist
;
11927 g_autofree gid_t
*grouplist
= NULL
;
11930 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
11931 return -TARGET_EINVAL
;
11933 if (gidsetsize
> 0) {
11934 grouplist
= g_try_new(gid_t
, gidsetsize
);
11936 return -TARGET_ENOMEM
;
11939 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11940 if (!is_error(ret
) && gidsetsize
> 0) {
11941 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
,
11942 gidsetsize
* sizeof(target_id
), 0);
11943 if (!target_grouplist
) {
11944 return -TARGET_EFAULT
;
11946 for (i
= 0; i
< ret
; i
++) {
11947 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11949 unlock_user(target_grouplist
, arg2
,
11950 gidsetsize
* sizeof(target_id
));
11954 case TARGET_NR_setgroups
:
11955 { /* the same code as for TARGET_NR_setgroups32 */
11956 int gidsetsize
= arg1
;
11957 target_id
*target_grouplist
;
11958 g_autofree gid_t
*grouplist
= NULL
;
11961 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
11962 return -TARGET_EINVAL
;
11964 if (gidsetsize
> 0) {
11965 grouplist
= g_try_new(gid_t
, gidsetsize
);
11967 return -TARGET_ENOMEM
;
11969 target_grouplist
= lock_user(VERIFY_READ
, arg2
,
11970 gidsetsize
* sizeof(target_id
), 1);
11971 if (!target_grouplist
) {
11972 return -TARGET_EFAULT
;
11974 for (i
= 0; i
< gidsetsize
; i
++) {
11975 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11977 unlock_user(target_grouplist
, arg2
,
11978 gidsetsize
* sizeof(target_id
));
11980 return get_errno(setgroups(gidsetsize
, grouplist
));
11982 case TARGET_NR_fchown
:
11983 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11984 #if defined(TARGET_NR_fchownat)
11985 case TARGET_NR_fchownat
:
11986 if (!(p
= lock_user_string(arg2
)))
11987 return -TARGET_EFAULT
;
11988 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11989 low2highgid(arg4
), arg5
));
11990 unlock_user(p
, arg2
, 0);
11993 #ifdef TARGET_NR_setresuid
11994 case TARGET_NR_setresuid
:
11995 return get_errno(sys_setresuid(low2highuid(arg1
),
11997 low2highuid(arg3
)));
11999 #ifdef TARGET_NR_getresuid
12000 case TARGET_NR_getresuid
:
12002 uid_t ruid
, euid
, suid
;
12003 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
12004 if (!is_error(ret
)) {
12005 if (put_user_id(high2lowuid(ruid
), arg1
)
12006 || put_user_id(high2lowuid(euid
), arg2
)
12007 || put_user_id(high2lowuid(suid
), arg3
))
12008 return -TARGET_EFAULT
;
12013 #ifdef TARGET_NR_getresgid
12014 case TARGET_NR_setresgid
:
12015 return get_errno(sys_setresgid(low2highgid(arg1
),
12017 low2highgid(arg3
)));
12019 #ifdef TARGET_NR_getresgid
12020 case TARGET_NR_getresgid
:
12022 gid_t rgid
, egid
, sgid
;
12023 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
12024 if (!is_error(ret
)) {
12025 if (put_user_id(high2lowgid(rgid
), arg1
)
12026 || put_user_id(high2lowgid(egid
), arg2
)
12027 || put_user_id(high2lowgid(sgid
), arg3
))
12028 return -TARGET_EFAULT
;
12033 #ifdef TARGET_NR_chown
12034 case TARGET_NR_chown
:
12035 if (!(p
= lock_user_string(arg1
)))
12036 return -TARGET_EFAULT
;
12037 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
12038 unlock_user(p
, arg1
, 0);
12041 case TARGET_NR_setuid
:
12042 return get_errno(sys_setuid(low2highuid(arg1
)));
12043 case TARGET_NR_setgid
:
12044 return get_errno(sys_setgid(low2highgid(arg1
)));
12045 case TARGET_NR_setfsuid
:
12046 return get_errno(setfsuid(arg1
));
12047 case TARGET_NR_setfsgid
:
12048 return get_errno(setfsgid(arg1
));
12050 #ifdef TARGET_NR_lchown32
12051 case TARGET_NR_lchown32
:
12052 if (!(p
= lock_user_string(arg1
)))
12053 return -TARGET_EFAULT
;
12054 ret
= get_errno(lchown(p
, arg2
, arg3
));
12055 unlock_user(p
, arg1
, 0);
12058 #ifdef TARGET_NR_getuid32
12059 case TARGET_NR_getuid32
:
12060 return get_errno(getuid());
12063 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12064 /* Alpha specific */
12065 case TARGET_NR_getxuid
:
12069 cpu_env
->ir
[IR_A4
]=euid
;
12071 return get_errno(getuid());
12073 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12074 /* Alpha specific */
12075 case TARGET_NR_getxgid
:
12079 cpu_env
->ir
[IR_A4
]=egid
;
12081 return get_errno(getgid());
12083 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12084 /* Alpha specific */
12085 case TARGET_NR_osf_getsysinfo
:
12086 ret
= -TARGET_EOPNOTSUPP
;
12088 case TARGET_GSI_IEEE_FP_CONTROL
:
12090 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
12091 uint64_t swcr
= cpu_env
->swcr
;
12093 swcr
&= ~SWCR_STATUS_MASK
;
12094 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
12096 if (put_user_u64 (swcr
, arg2
))
12097 return -TARGET_EFAULT
;
12102 /* case GSI_IEEE_STATE_AT_SIGNAL:
12103 -- Not implemented in linux kernel.
12105 -- Retrieves current unaligned access state; not much used.
12106 case GSI_PROC_TYPE:
12107 -- Retrieves implver information; surely not used.
12108 case GSI_GET_HWRPB:
12109 -- Grabs a copy of the HWRPB; surely not used.
12114 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12115 /* Alpha specific */
12116 case TARGET_NR_osf_setsysinfo
:
12117 ret
= -TARGET_EOPNOTSUPP
;
12119 case TARGET_SSI_IEEE_FP_CONTROL
:
12121 uint64_t swcr
, fpcr
;
12123 if (get_user_u64 (swcr
, arg2
)) {
12124 return -TARGET_EFAULT
;
12128 * The kernel calls swcr_update_status to update the
12129 * status bits from the fpcr at every point that it
12130 * could be queried. Therefore, we store the status
12131 * bits only in FPCR.
12133 cpu_env
->swcr
= swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
12135 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
12136 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
12137 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
12138 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
12143 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
12145 uint64_t exc
, fpcr
, fex
;
12147 if (get_user_u64(exc
, arg2
)) {
12148 return -TARGET_EFAULT
;
12150 exc
&= SWCR_STATUS_MASK
;
12151 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
12153 /* Old exceptions are not signaled. */
12154 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
12156 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
12157 fex
&= (cpu_env
)->swcr
;
12159 /* Update the hardware fpcr. */
12160 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
12161 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
12164 int si_code
= TARGET_FPE_FLTUNK
;
12165 target_siginfo_t info
;
12167 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
12168 si_code
= TARGET_FPE_FLTUND
;
12170 if (fex
& SWCR_TRAP_ENABLE_INE
) {
12171 si_code
= TARGET_FPE_FLTRES
;
12173 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
12174 si_code
= TARGET_FPE_FLTUND
;
12176 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
12177 si_code
= TARGET_FPE_FLTOVF
;
12179 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
12180 si_code
= TARGET_FPE_FLTDIV
;
12182 if (fex
& SWCR_TRAP_ENABLE_INV
) {
12183 si_code
= TARGET_FPE_FLTINV
;
12186 info
.si_signo
= SIGFPE
;
12188 info
.si_code
= si_code
;
12189 info
._sifields
._sigfault
._addr
= (cpu_env
)->pc
;
12190 queue_signal(cpu_env
, info
.si_signo
,
12191 QEMU_SI_FAULT
, &info
);
12197 /* case SSI_NVPAIRS:
12198 -- Used with SSIN_UACPROC to enable unaligned accesses.
12199 case SSI_IEEE_STATE_AT_SIGNAL:
12200 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12201 -- Not implemented in linux kernel
12206 #ifdef TARGET_NR_osf_sigprocmask
12207 /* Alpha specific. */
12208 case TARGET_NR_osf_sigprocmask
:
12212 sigset_t set
, oldset
;
12215 case TARGET_SIG_BLOCK
:
12218 case TARGET_SIG_UNBLOCK
:
12221 case TARGET_SIG_SETMASK
:
12225 return -TARGET_EINVAL
;
12228 target_to_host_old_sigset(&set
, &mask
);
12229 ret
= do_sigprocmask(how
, &set
, &oldset
);
12231 host_to_target_old_sigset(&mask
, &oldset
);
12238 #ifdef TARGET_NR_getgid32
12239 case TARGET_NR_getgid32
:
12240 return get_errno(getgid());
12242 #ifdef TARGET_NR_geteuid32
12243 case TARGET_NR_geteuid32
:
12244 return get_errno(geteuid());
12246 #ifdef TARGET_NR_getegid32
12247 case TARGET_NR_getegid32
:
12248 return get_errno(getegid());
12250 #ifdef TARGET_NR_setreuid32
12251 case TARGET_NR_setreuid32
:
12252 return get_errno(setreuid(arg1
, arg2
));
12254 #ifdef TARGET_NR_setregid32
12255 case TARGET_NR_setregid32
:
12256 return get_errno(setregid(arg1
, arg2
));
12258 #ifdef TARGET_NR_getgroups32
12259 case TARGET_NR_getgroups32
:
12260 { /* the same code as for TARGET_NR_getgroups */
12261 int gidsetsize
= arg1
;
12262 uint32_t *target_grouplist
;
12263 g_autofree gid_t
*grouplist
= NULL
;
12266 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
12267 return -TARGET_EINVAL
;
12269 if (gidsetsize
> 0) {
12270 grouplist
= g_try_new(gid_t
, gidsetsize
);
12272 return -TARGET_ENOMEM
;
12275 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
12276 if (!is_error(ret
) && gidsetsize
> 0) {
12277 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
,
12278 gidsetsize
* 4, 0);
12279 if (!target_grouplist
) {
12280 return -TARGET_EFAULT
;
12282 for (i
= 0; i
< ret
; i
++) {
12283 target_grouplist
[i
] = tswap32(grouplist
[i
]);
12285 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
12290 #ifdef TARGET_NR_setgroups32
12291 case TARGET_NR_setgroups32
:
12292 { /* the same code as for TARGET_NR_setgroups */
12293 int gidsetsize
= arg1
;
12294 uint32_t *target_grouplist
;
12295 g_autofree gid_t
*grouplist
= NULL
;
12298 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
12299 return -TARGET_EINVAL
;
12301 if (gidsetsize
> 0) {
12302 grouplist
= g_try_new(gid_t
, gidsetsize
);
12304 return -TARGET_ENOMEM
;
12306 target_grouplist
= lock_user(VERIFY_READ
, arg2
,
12307 gidsetsize
* 4, 1);
12308 if (!target_grouplist
) {
12309 return -TARGET_EFAULT
;
12311 for (i
= 0; i
< gidsetsize
; i
++) {
12312 grouplist
[i
] = tswap32(target_grouplist
[i
]);
12314 unlock_user(target_grouplist
, arg2
, 0);
12316 return get_errno(setgroups(gidsetsize
, grouplist
));
12319 #ifdef TARGET_NR_fchown32
12320 case TARGET_NR_fchown32
:
12321 return get_errno(fchown(arg1
, arg2
, arg3
));
12323 #ifdef TARGET_NR_setresuid32
12324 case TARGET_NR_setresuid32
:
12325 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
12327 #ifdef TARGET_NR_getresuid32
12328 case TARGET_NR_getresuid32
:
12330 uid_t ruid
, euid
, suid
;
12331 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
12332 if (!is_error(ret
)) {
12333 if (put_user_u32(ruid
, arg1
)
12334 || put_user_u32(euid
, arg2
)
12335 || put_user_u32(suid
, arg3
))
12336 return -TARGET_EFAULT
;
12341 #ifdef TARGET_NR_setresgid32
12342 case TARGET_NR_setresgid32
:
12343 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
12345 #ifdef TARGET_NR_getresgid32
12346 case TARGET_NR_getresgid32
:
12348 gid_t rgid
, egid
, sgid
;
12349 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
12350 if (!is_error(ret
)) {
12351 if (put_user_u32(rgid
, arg1
)
12352 || put_user_u32(egid
, arg2
)
12353 || put_user_u32(sgid
, arg3
))
12354 return -TARGET_EFAULT
;
12359 #ifdef TARGET_NR_chown32
12360 case TARGET_NR_chown32
:
12361 if (!(p
= lock_user_string(arg1
)))
12362 return -TARGET_EFAULT
;
12363 ret
= get_errno(chown(p
, arg2
, arg3
));
12364 unlock_user(p
, arg1
, 0);
12367 #ifdef TARGET_NR_setuid32
12368 case TARGET_NR_setuid32
:
12369 return get_errno(sys_setuid(arg1
));
12371 #ifdef TARGET_NR_setgid32
12372 case TARGET_NR_setgid32
:
12373 return get_errno(sys_setgid(arg1
));
12375 #ifdef TARGET_NR_setfsuid32
12376 case TARGET_NR_setfsuid32
:
12377 return get_errno(setfsuid(arg1
));
12379 #ifdef TARGET_NR_setfsgid32
12380 case TARGET_NR_setfsgid32
:
12381 return get_errno(setfsgid(arg1
));
12383 #ifdef TARGET_NR_mincore
12384 case TARGET_NR_mincore
:
12386 void *a
= lock_user(VERIFY_NONE
, arg1
, arg2
, 0);
12388 return -TARGET_ENOMEM
;
12390 p
= lock_user_string(arg3
);
12392 ret
= -TARGET_EFAULT
;
12394 ret
= get_errno(mincore(a
, arg2
, p
));
12395 unlock_user(p
, arg3
, ret
);
12397 unlock_user(a
, arg1
, 0);
12401 #ifdef TARGET_NR_arm_fadvise64_64
12402 case TARGET_NR_arm_fadvise64_64
:
12403 /* arm_fadvise64_64 looks like fadvise64_64 but
12404 * with different argument order: fd, advice, offset, len
12405 * rather than the usual fd, offset, len, advice.
12406 * Note that offset and len are both 64-bit so appear as
12407 * pairs of 32-bit registers.
12409 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
12410 target_offset64(arg5
, arg6
), arg2
);
12411 return -host_to_target_errno(ret
);
12414 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12416 #ifdef TARGET_NR_fadvise64_64
12417 case TARGET_NR_fadvise64_64
:
12418 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12419 /* 6 args: fd, advice, offset (high, low), len (high, low) */
12427 /* 6 args: fd, offset (high, low), len (high, low), advice */
12428 if (regpairs_aligned(cpu_env
, num
)) {
12429 /* offset is in (3,4), len in (5,6) and advice in 7 */
12437 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
12438 target_offset64(arg4
, arg5
), arg6
);
12439 return -host_to_target_errno(ret
);
12442 #ifdef TARGET_NR_fadvise64
12443 case TARGET_NR_fadvise64
:
12444 /* 5 args: fd, offset (high, low), len, advice */
12445 if (regpairs_aligned(cpu_env
, num
)) {
12446 /* offset is in (3,4), len in 5 and advice in 6 */
12452 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
12453 return -host_to_target_errno(ret
);
12456 #else /* not a 32-bit ABI */
12457 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12458 #ifdef TARGET_NR_fadvise64_64
12459 case TARGET_NR_fadvise64_64
:
12461 #ifdef TARGET_NR_fadvise64
12462 case TARGET_NR_fadvise64
:
12464 #ifdef TARGET_S390X
12466 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
12467 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
12468 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
12469 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
12473 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
12475 #endif /* end of 64-bit ABI fadvise handling */
12477 #ifdef TARGET_NR_madvise
12478 case TARGET_NR_madvise
:
12479 return target_madvise(arg1
, arg2
, arg3
);
12481 #ifdef TARGET_NR_fcntl64
12482 case TARGET_NR_fcntl64
:
12486 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
12487 to_flock64_fn
*copyto
= copy_to_user_flock64
;
12490 if (!cpu_env
->eabi
) {
12491 copyfrom
= copy_from_user_oabi_flock64
;
12492 copyto
= copy_to_user_oabi_flock64
;
12496 cmd
= target_to_host_fcntl_cmd(arg2
);
12497 if (cmd
== -TARGET_EINVAL
) {
12502 case TARGET_F_GETLK64
:
12503 ret
= copyfrom(&fl
, arg3
);
12507 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
12509 ret
= copyto(arg3
, &fl
);
12513 case TARGET_F_SETLK64
:
12514 case TARGET_F_SETLKW64
:
12515 ret
= copyfrom(&fl
, arg3
);
12519 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
12522 ret
= do_fcntl(arg1
, arg2
, arg3
);
12528 #ifdef TARGET_NR_cacheflush
12529 case TARGET_NR_cacheflush
:
12530 /* self-modifying code is handled automatically, so nothing needed */
12533 #ifdef TARGET_NR_getpagesize
12534 case TARGET_NR_getpagesize
:
12535 return TARGET_PAGE_SIZE
;
12537 case TARGET_NR_gettid
:
12538 return get_errno(sys_gettid());
12539 #ifdef TARGET_NR_readahead
12540 case TARGET_NR_readahead
:
12541 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12542 if (regpairs_aligned(cpu_env
, num
)) {
12547 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
12549 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
12554 #ifdef TARGET_NR_setxattr
12555 case TARGET_NR_listxattr
:
12556 case TARGET_NR_llistxattr
:
12560 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
12562 return -TARGET_EFAULT
;
12565 p
= lock_user_string(arg1
);
12567 if (num
== TARGET_NR_listxattr
) {
12568 ret
= get_errno(listxattr(p
, b
, arg3
));
12570 ret
= get_errno(llistxattr(p
, b
, arg3
));
12573 ret
= -TARGET_EFAULT
;
12575 unlock_user(p
, arg1
, 0);
12576 unlock_user(b
, arg2
, arg3
);
12579 case TARGET_NR_flistxattr
:
12583 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
12585 return -TARGET_EFAULT
;
12588 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
12589 unlock_user(b
, arg2
, arg3
);
12592 case TARGET_NR_setxattr
:
12593 case TARGET_NR_lsetxattr
:
12595 void *p
, *n
, *v
= 0;
12597 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12599 return -TARGET_EFAULT
;
12602 p
= lock_user_string(arg1
);
12603 n
= lock_user_string(arg2
);
12605 if (num
== TARGET_NR_setxattr
) {
12606 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
12608 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
12611 ret
= -TARGET_EFAULT
;
12613 unlock_user(p
, arg1
, 0);
12614 unlock_user(n
, arg2
, 0);
12615 unlock_user(v
, arg3
, 0);
12618 case TARGET_NR_fsetxattr
:
12622 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12624 return -TARGET_EFAULT
;
12627 n
= lock_user_string(arg2
);
12629 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
12631 ret
= -TARGET_EFAULT
;
12633 unlock_user(n
, arg2
, 0);
12634 unlock_user(v
, arg3
, 0);
12637 case TARGET_NR_getxattr
:
12638 case TARGET_NR_lgetxattr
:
12640 void *p
, *n
, *v
= 0;
12642 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12644 return -TARGET_EFAULT
;
12647 p
= lock_user_string(arg1
);
12648 n
= lock_user_string(arg2
);
12650 if (num
== TARGET_NR_getxattr
) {
12651 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
12653 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
12656 ret
= -TARGET_EFAULT
;
12658 unlock_user(p
, arg1
, 0);
12659 unlock_user(n
, arg2
, 0);
12660 unlock_user(v
, arg3
, arg4
);
12663 case TARGET_NR_fgetxattr
:
12667 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12669 return -TARGET_EFAULT
;
12672 n
= lock_user_string(arg2
);
12674 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12676 ret
= -TARGET_EFAULT
;
12678 unlock_user(n
, arg2
, 0);
12679 unlock_user(v
, arg3
, arg4
);
12682 case TARGET_NR_removexattr
:
12683 case TARGET_NR_lremovexattr
:
12686 p
= lock_user_string(arg1
);
12687 n
= lock_user_string(arg2
);
12689 if (num
== TARGET_NR_removexattr
) {
12690 ret
= get_errno(removexattr(p
, n
));
12692 ret
= get_errno(lremovexattr(p
, n
));
12695 ret
= -TARGET_EFAULT
;
12697 unlock_user(p
, arg1
, 0);
12698 unlock_user(n
, arg2
, 0);
12701 case TARGET_NR_fremovexattr
:
12704 n
= lock_user_string(arg2
);
12706 ret
= get_errno(fremovexattr(arg1
, n
));
12708 ret
= -TARGET_EFAULT
;
12710 unlock_user(n
, arg2
, 0);
12714 #endif /* CONFIG_ATTR */
12715 #ifdef TARGET_NR_set_thread_area
12716 case TARGET_NR_set_thread_area
:
12717 #if defined(TARGET_MIPS)
12718 cpu_env
->active_tc
.CP0_UserLocal
= arg1
;
12720 #elif defined(TARGET_CRIS)
12722 ret
= -TARGET_EINVAL
;
12724 cpu_env
->pregs
[PR_PID
] = arg1
;
12728 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12729 return do_set_thread_area(cpu_env
, arg1
);
12730 #elif defined(TARGET_M68K)
12732 TaskState
*ts
= cpu
->opaque
;
12733 ts
->tp_value
= arg1
;
12737 return -TARGET_ENOSYS
;
12740 #ifdef TARGET_NR_get_thread_area
12741 case TARGET_NR_get_thread_area
:
12742 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12743 return do_get_thread_area(cpu_env
, arg1
);
12744 #elif defined(TARGET_M68K)
12746 TaskState
*ts
= cpu
->opaque
;
12747 return ts
->tp_value
;
12750 return -TARGET_ENOSYS
;
12753 #ifdef TARGET_NR_getdomainname
12754 case TARGET_NR_getdomainname
:
12755 return -TARGET_ENOSYS
;
12758 #ifdef TARGET_NR_clock_settime
12759 case TARGET_NR_clock_settime
:
12761 struct timespec ts
;
12763 ret
= target_to_host_timespec(&ts
, arg2
);
12764 if (!is_error(ret
)) {
12765 ret
= get_errno(clock_settime(arg1
, &ts
));
12770 #ifdef TARGET_NR_clock_settime64
12771 case TARGET_NR_clock_settime64
:
12773 struct timespec ts
;
12775 ret
= target_to_host_timespec64(&ts
, arg2
);
12776 if (!is_error(ret
)) {
12777 ret
= get_errno(clock_settime(arg1
, &ts
));
12782 #ifdef TARGET_NR_clock_gettime
12783 case TARGET_NR_clock_gettime
:
12785 struct timespec ts
;
12786 ret
= get_errno(clock_gettime(arg1
, &ts
));
12787 if (!is_error(ret
)) {
12788 ret
= host_to_target_timespec(arg2
, &ts
);
12793 #ifdef TARGET_NR_clock_gettime64
12794 case TARGET_NR_clock_gettime64
:
12796 struct timespec ts
;
12797 ret
= get_errno(clock_gettime(arg1
, &ts
));
12798 if (!is_error(ret
)) {
12799 ret
= host_to_target_timespec64(arg2
, &ts
);
12804 #ifdef TARGET_NR_clock_getres
12805 case TARGET_NR_clock_getres
:
12807 struct timespec ts
;
12808 ret
= get_errno(clock_getres(arg1
, &ts
));
12809 if (!is_error(ret
)) {
12810 host_to_target_timespec(arg2
, &ts
);
12815 #ifdef TARGET_NR_clock_getres_time64
12816 case TARGET_NR_clock_getres_time64
:
12818 struct timespec ts
;
12819 ret
= get_errno(clock_getres(arg1
, &ts
));
12820 if (!is_error(ret
)) {
12821 host_to_target_timespec64(arg2
, &ts
);
12826 #ifdef TARGET_NR_clock_nanosleep
12827 case TARGET_NR_clock_nanosleep
:
12829 struct timespec ts
;
12830 if (target_to_host_timespec(&ts
, arg3
)) {
12831 return -TARGET_EFAULT
;
12833 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12834 &ts
, arg4
? &ts
: NULL
));
12836 * if the call is interrupted by a signal handler, it fails
12837 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12838 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12840 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12841 host_to_target_timespec(arg4
, &ts
)) {
12842 return -TARGET_EFAULT
;
12848 #ifdef TARGET_NR_clock_nanosleep_time64
12849 case TARGET_NR_clock_nanosleep_time64
:
12851 struct timespec ts
;
12853 if (target_to_host_timespec64(&ts
, arg3
)) {
12854 return -TARGET_EFAULT
;
12857 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12858 &ts
, arg4
? &ts
: NULL
));
12860 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12861 host_to_target_timespec64(arg4
, &ts
)) {
12862 return -TARGET_EFAULT
;
12868 #if defined(TARGET_NR_set_tid_address)
12869 case TARGET_NR_set_tid_address
:
12871 TaskState
*ts
= cpu
->opaque
;
12872 ts
->child_tidptr
= arg1
;
12873 /* do not call host set_tid_address() syscall, instead return tid() */
12874 return get_errno(sys_gettid());
12878 case TARGET_NR_tkill
:
12879 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12881 case TARGET_NR_tgkill
:
12882 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12883 target_to_host_signal(arg3
)));
12885 #ifdef TARGET_NR_set_robust_list
12886 case TARGET_NR_set_robust_list
:
12887 case TARGET_NR_get_robust_list
:
12888 /* The ABI for supporting robust futexes has userspace pass
12889 * the kernel a pointer to a linked list which is updated by
12890 * userspace after the syscall; the list is walked by the kernel
12891 * when the thread exits. Since the linked list in QEMU guest
12892 * memory isn't a valid linked list for the host and we have
12893 * no way to reliably intercept the thread-death event, we can't
12894 * support these. Silently return ENOSYS so that guest userspace
12895 * falls back to a non-robust futex implementation (which should
12896 * be OK except in the corner case of the guest crashing while
12897 * holding a mutex that is shared with another process via
12900 return -TARGET_ENOSYS
;
12903 #if defined(TARGET_NR_utimensat)
12904 case TARGET_NR_utimensat
:
12906 struct timespec
*tsp
, ts
[2];
12910 if (target_to_host_timespec(ts
, arg3
)) {
12911 return -TARGET_EFAULT
;
12913 if (target_to_host_timespec(ts
+ 1, arg3
+
12914 sizeof(struct target_timespec
))) {
12915 return -TARGET_EFAULT
;
12920 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12922 if (!(p
= lock_user_string(arg2
))) {
12923 return -TARGET_EFAULT
;
12925 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12926 unlock_user(p
, arg2
, 0);
12931 #ifdef TARGET_NR_utimensat_time64
12932 case TARGET_NR_utimensat_time64
:
12934 struct timespec
*tsp
, ts
[2];
12938 if (target_to_host_timespec64(ts
, arg3
)) {
12939 return -TARGET_EFAULT
;
12941 if (target_to_host_timespec64(ts
+ 1, arg3
+
12942 sizeof(struct target__kernel_timespec
))) {
12943 return -TARGET_EFAULT
;
12948 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12950 p
= lock_user_string(arg2
);
12952 return -TARGET_EFAULT
;
12954 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12955 unlock_user(p
, arg2
, 0);
12960 #ifdef TARGET_NR_futex
12961 case TARGET_NR_futex
:
12962 return do_futex(cpu
, false, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12964 #ifdef TARGET_NR_futex_time64
12965 case TARGET_NR_futex_time64
:
12966 return do_futex(cpu
, true, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12968 #ifdef CONFIG_INOTIFY
12969 #if defined(TARGET_NR_inotify_init)
12970 case TARGET_NR_inotify_init
:
12971 ret
= get_errno(inotify_init());
12973 fd_trans_register(ret
, &target_inotify_trans
);
12977 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12978 case TARGET_NR_inotify_init1
:
12979 ret
= get_errno(inotify_init1(target_to_host_bitmask(arg1
,
12980 fcntl_flags_tbl
)));
12982 fd_trans_register(ret
, &target_inotify_trans
);
12986 #if defined(TARGET_NR_inotify_add_watch)
12987 case TARGET_NR_inotify_add_watch
:
12988 p
= lock_user_string(arg2
);
12989 ret
= get_errno(inotify_add_watch(arg1
, path(p
), arg3
));
12990 unlock_user(p
, arg2
, 0);
12993 #if defined(TARGET_NR_inotify_rm_watch)
12994 case TARGET_NR_inotify_rm_watch
:
12995 return get_errno(inotify_rm_watch(arg1
, arg2
));
12999 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13000 case TARGET_NR_mq_open
:
13002 struct mq_attr posix_mq_attr
;
13003 struct mq_attr
*pposix_mq_attr
;
13006 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
13007 pposix_mq_attr
= NULL
;
13009 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
13010 return -TARGET_EFAULT
;
13012 pposix_mq_attr
= &posix_mq_attr
;
13014 p
= lock_user_string(arg1
- 1);
13016 return -TARGET_EFAULT
;
13018 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
13019 unlock_user (p
, arg1
, 0);
13023 case TARGET_NR_mq_unlink
:
13024 p
= lock_user_string(arg1
- 1);
13026 return -TARGET_EFAULT
;
13028 ret
= get_errno(mq_unlink(p
));
13029 unlock_user (p
, arg1
, 0);
13032 #ifdef TARGET_NR_mq_timedsend
13033 case TARGET_NR_mq_timedsend
:
13035 struct timespec ts
;
13037 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
13039 if (target_to_host_timespec(&ts
, arg5
)) {
13040 return -TARGET_EFAULT
;
13042 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
13043 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
13044 return -TARGET_EFAULT
;
13047 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
13049 unlock_user (p
, arg2
, arg3
);
13053 #ifdef TARGET_NR_mq_timedsend_time64
13054 case TARGET_NR_mq_timedsend_time64
:
13056 struct timespec ts
;
13058 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
13060 if (target_to_host_timespec64(&ts
, arg5
)) {
13061 return -TARGET_EFAULT
;
13063 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
13064 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
13065 return -TARGET_EFAULT
;
13068 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
13070 unlock_user(p
, arg2
, arg3
);
13075 #ifdef TARGET_NR_mq_timedreceive
13076 case TARGET_NR_mq_timedreceive
:
13078 struct timespec ts
;
13081 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
13083 if (target_to_host_timespec(&ts
, arg5
)) {
13084 return -TARGET_EFAULT
;
13086 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
13088 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
13089 return -TARGET_EFAULT
;
13092 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
13095 unlock_user (p
, arg2
, arg3
);
13097 put_user_u32(prio
, arg4
);
13101 #ifdef TARGET_NR_mq_timedreceive_time64
13102 case TARGET_NR_mq_timedreceive_time64
:
13104 struct timespec ts
;
13107 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
13109 if (target_to_host_timespec64(&ts
, arg5
)) {
13110 return -TARGET_EFAULT
;
13112 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
13114 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
13115 return -TARGET_EFAULT
;
13118 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
13121 unlock_user(p
, arg2
, arg3
);
13123 put_user_u32(prio
, arg4
);
13129 /* Not implemented for now... */
13130 /* case TARGET_NR_mq_notify: */
13133 case TARGET_NR_mq_getsetattr
:
13135 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
13138 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
13139 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
13140 &posix_mq_attr_out
));
13141 } else if (arg3
!= 0) {
13142 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
13144 if (ret
== 0 && arg3
!= 0) {
13145 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
13151 #ifdef CONFIG_SPLICE
13152 #ifdef TARGET_NR_tee
13153 case TARGET_NR_tee
:
13155 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
13159 #ifdef TARGET_NR_splice
13160 case TARGET_NR_splice
:
13162 loff_t loff_in
, loff_out
;
13163 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
13165 if (get_user_u64(loff_in
, arg2
)) {
13166 return -TARGET_EFAULT
;
13168 ploff_in
= &loff_in
;
13171 if (get_user_u64(loff_out
, arg4
)) {
13172 return -TARGET_EFAULT
;
13174 ploff_out
= &loff_out
;
13176 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
13178 if (put_user_u64(loff_in
, arg2
)) {
13179 return -TARGET_EFAULT
;
13183 if (put_user_u64(loff_out
, arg4
)) {
13184 return -TARGET_EFAULT
;
13190 #ifdef TARGET_NR_vmsplice
13191 case TARGET_NR_vmsplice
:
13193 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
13195 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
13196 unlock_iovec(vec
, arg2
, arg3
, 0);
13198 ret
= -host_to_target_errno(errno
);
13203 #endif /* CONFIG_SPLICE */
13204 #ifdef CONFIG_EVENTFD
13205 #if defined(TARGET_NR_eventfd)
13206 case TARGET_NR_eventfd
:
13207 ret
= get_errno(eventfd(arg1
, 0));
13209 fd_trans_register(ret
, &target_eventfd_trans
);
13213 #if defined(TARGET_NR_eventfd2)
13214 case TARGET_NR_eventfd2
:
13216 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
13217 if (arg2
& TARGET_O_NONBLOCK
) {
13218 host_flags
|= O_NONBLOCK
;
13220 if (arg2
& TARGET_O_CLOEXEC
) {
13221 host_flags
|= O_CLOEXEC
;
13223 ret
= get_errno(eventfd(arg1
, host_flags
));
13225 fd_trans_register(ret
, &target_eventfd_trans
);
13230 #endif /* CONFIG_EVENTFD */
13231 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13232 case TARGET_NR_fallocate
:
13233 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13234 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
13235 target_offset64(arg5
, arg6
)));
13237 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
13241 #if defined(CONFIG_SYNC_FILE_RANGE)
13242 #if defined(TARGET_NR_sync_file_range)
13243 case TARGET_NR_sync_file_range
:
13244 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13245 #if defined(TARGET_MIPS)
13246 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
13247 target_offset64(arg5
, arg6
), arg7
));
13249 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
13250 target_offset64(arg4
, arg5
), arg6
));
13251 #endif /* !TARGET_MIPS */
13253 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
13257 #if defined(TARGET_NR_sync_file_range2) || \
13258 defined(TARGET_NR_arm_sync_file_range)
13259 #if defined(TARGET_NR_sync_file_range2)
13260 case TARGET_NR_sync_file_range2
:
13262 #if defined(TARGET_NR_arm_sync_file_range)
13263 case TARGET_NR_arm_sync_file_range
:
13265 /* This is like sync_file_range but the arguments are reordered */
13266 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13267 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
13268 target_offset64(arg5
, arg6
), arg2
));
13270 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
13275 #if defined(TARGET_NR_signalfd4)
13276 case TARGET_NR_signalfd4
:
13277 return do_signalfd4(arg1
, arg2
, arg4
);
13279 #if defined(TARGET_NR_signalfd)
13280 case TARGET_NR_signalfd
:
13281 return do_signalfd4(arg1
, arg2
, 0);
13283 #if defined(CONFIG_EPOLL)
13284 #if defined(TARGET_NR_epoll_create)
13285 case TARGET_NR_epoll_create
:
13286 return get_errno(epoll_create(arg1
));
13288 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13289 case TARGET_NR_epoll_create1
:
13290 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
13292 #if defined(TARGET_NR_epoll_ctl)
13293 case TARGET_NR_epoll_ctl
:
13295 struct epoll_event ep
;
13296 struct epoll_event
*epp
= 0;
13298 if (arg2
!= EPOLL_CTL_DEL
) {
13299 struct target_epoll_event
*target_ep
;
13300 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
13301 return -TARGET_EFAULT
;
13303 ep
.events
= tswap32(target_ep
->events
);
13305 * The epoll_data_t union is just opaque data to the kernel,
13306 * so we transfer all 64 bits across and need not worry what
13307 * actual data type it is.
13309 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
13310 unlock_user_struct(target_ep
, arg4
, 0);
13313 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13314 * non-null pointer, even though this argument is ignored.
13319 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
13323 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13324 #if defined(TARGET_NR_epoll_wait)
13325 case TARGET_NR_epoll_wait
:
13327 #if defined(TARGET_NR_epoll_pwait)
13328 case TARGET_NR_epoll_pwait
:
13331 struct target_epoll_event
*target_ep
;
13332 struct epoll_event
*ep
;
13334 int maxevents
= arg3
;
13335 int timeout
= arg4
;
13337 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
13338 return -TARGET_EINVAL
;
13341 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
13342 maxevents
* sizeof(struct target_epoll_event
), 1);
13344 return -TARGET_EFAULT
;
13347 ep
= g_try_new(struct epoll_event
, maxevents
);
13349 unlock_user(target_ep
, arg2
, 0);
13350 return -TARGET_ENOMEM
;
13354 #if defined(TARGET_NR_epoll_pwait)
13355 case TARGET_NR_epoll_pwait
:
13357 sigset_t
*set
= NULL
;
13360 ret
= process_sigsuspend_mask(&set
, arg5
, arg6
);
13366 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
13367 set
, SIGSET_T_SIZE
));
13370 finish_sigsuspend_mask(ret
);
13375 #if defined(TARGET_NR_epoll_wait)
13376 case TARGET_NR_epoll_wait
:
13377 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
13382 ret
= -TARGET_ENOSYS
;
13384 if (!is_error(ret
)) {
13386 for (i
= 0; i
< ret
; i
++) {
13387 target_ep
[i
].events
= tswap32(ep
[i
].events
);
13388 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
13390 unlock_user(target_ep
, arg2
,
13391 ret
* sizeof(struct target_epoll_event
));
13393 unlock_user(target_ep
, arg2
, 0);
13400 #ifdef TARGET_NR_prlimit64
13401 case TARGET_NR_prlimit64
:
13403 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13404 struct target_rlimit64
*target_rnew
, *target_rold
;
13405 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
13406 int resource
= target_to_host_resource(arg2
);
13408 if (arg3
&& (resource
!= RLIMIT_AS
&&
13409 resource
!= RLIMIT_DATA
&&
13410 resource
!= RLIMIT_STACK
)) {
13411 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
13412 return -TARGET_EFAULT
;
13414 __get_user(rnew
.rlim_cur
, &target_rnew
->rlim_cur
);
13415 __get_user(rnew
.rlim_max
, &target_rnew
->rlim_max
);
13416 unlock_user_struct(target_rnew
, arg3
, 0);
13420 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
13421 if (!is_error(ret
) && arg4
) {
13422 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
13423 return -TARGET_EFAULT
;
13425 __put_user(rold
.rlim_cur
, &target_rold
->rlim_cur
);
13426 __put_user(rold
.rlim_max
, &target_rold
->rlim_max
);
13427 unlock_user_struct(target_rold
, arg4
, 1);
13432 #ifdef TARGET_NR_gethostname
13433 case TARGET_NR_gethostname
:
13435 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
13437 ret
= get_errno(gethostname(name
, arg2
));
13438 unlock_user(name
, arg1
, arg2
);
13440 ret
= -TARGET_EFAULT
;
13445 #ifdef TARGET_NR_atomic_cmpxchg_32
13446 case TARGET_NR_atomic_cmpxchg_32
:
13448 /* should use start_exclusive from main.c */
13449 abi_ulong mem_value
;
13450 if (get_user_u32(mem_value
, arg6
)) {
13451 target_siginfo_t info
;
13452 info
.si_signo
= SIGSEGV
;
13454 info
.si_code
= TARGET_SEGV_MAPERR
;
13455 info
._sifields
._sigfault
._addr
= arg6
;
13456 queue_signal(cpu_env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
13460 if (mem_value
== arg2
)
13461 put_user_u32(arg1
, arg6
);
13465 #ifdef TARGET_NR_atomic_barrier
13466 case TARGET_NR_atomic_barrier
:
13467 /* Like the kernel implementation and the
13468 qemu arm barrier, no-op this? */
13472 #ifdef TARGET_NR_timer_create
13473 case TARGET_NR_timer_create
:
13475 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13477 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
13480 int timer_index
= next_free_host_timer();
13482 if (timer_index
< 0) {
13483 ret
= -TARGET_EAGAIN
;
13485 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
13488 phost_sevp
= &host_sevp
;
13489 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
13491 free_host_timer_slot(timer_index
);
13496 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
13498 free_host_timer_slot(timer_index
);
13500 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
13501 timer_delete(*phtimer
);
13502 free_host_timer_slot(timer_index
);
13503 return -TARGET_EFAULT
;
13511 #ifdef TARGET_NR_timer_settime
13512 case TARGET_NR_timer_settime
:
13514 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13515 * struct itimerspec * old_value */
13516 target_timer_t timerid
= get_timer_id(arg1
);
13520 } else if (arg3
== 0) {
13521 ret
= -TARGET_EINVAL
;
13523 timer_t htimer
= g_posix_timers
[timerid
];
13524 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
13526 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
13527 return -TARGET_EFAULT
;
13530 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
13531 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
13532 return -TARGET_EFAULT
;
13539 #ifdef TARGET_NR_timer_settime64
13540 case TARGET_NR_timer_settime64
:
13542 target_timer_t timerid
= get_timer_id(arg1
);
13546 } else if (arg3
== 0) {
13547 ret
= -TARGET_EINVAL
;
13549 timer_t htimer
= g_posix_timers
[timerid
];
13550 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
13552 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
13553 return -TARGET_EFAULT
;
13556 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
13557 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
13558 return -TARGET_EFAULT
;
13565 #ifdef TARGET_NR_timer_gettime
13566 case TARGET_NR_timer_gettime
:
13568 /* args: timer_t timerid, struct itimerspec *curr_value */
13569 target_timer_t timerid
= get_timer_id(arg1
);
13573 } else if (!arg2
) {
13574 ret
= -TARGET_EFAULT
;
13576 timer_t htimer
= g_posix_timers
[timerid
];
13577 struct itimerspec hspec
;
13578 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13580 if (host_to_target_itimerspec(arg2
, &hspec
)) {
13581 ret
= -TARGET_EFAULT
;
13588 #ifdef TARGET_NR_timer_gettime64
13589 case TARGET_NR_timer_gettime64
:
13591 /* args: timer_t timerid, struct itimerspec64 *curr_value */
13592 target_timer_t timerid
= get_timer_id(arg1
);
13596 } else if (!arg2
) {
13597 ret
= -TARGET_EFAULT
;
13599 timer_t htimer
= g_posix_timers
[timerid
];
13600 struct itimerspec hspec
;
13601 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13603 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
13604 ret
= -TARGET_EFAULT
;
13611 #ifdef TARGET_NR_timer_getoverrun
13612 case TARGET_NR_timer_getoverrun
:
13614 /* args: timer_t timerid */
13615 target_timer_t timerid
= get_timer_id(arg1
);
13620 timer_t htimer
= g_posix_timers
[timerid
];
13621 ret
= get_errno(timer_getoverrun(htimer
));
13627 #ifdef TARGET_NR_timer_delete
13628 case TARGET_NR_timer_delete
:
13630 /* args: timer_t timerid */
13631 target_timer_t timerid
= get_timer_id(arg1
);
13636 timer_t htimer
= g_posix_timers
[timerid
];
13637 ret
= get_errno(timer_delete(htimer
));
13638 free_host_timer_slot(timerid
);
13644 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13645 case TARGET_NR_timerfd_create
:
13646 ret
= get_errno(timerfd_create(arg1
,
13647 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
13649 fd_trans_register(ret
, &target_timerfd_trans
);
13654 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13655 case TARGET_NR_timerfd_gettime
:
13657 struct itimerspec its_curr
;
13659 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13661 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
13662 return -TARGET_EFAULT
;
13668 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13669 case TARGET_NR_timerfd_gettime64
:
13671 struct itimerspec its_curr
;
13673 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13675 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
13676 return -TARGET_EFAULT
;
13682 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13683 case TARGET_NR_timerfd_settime
:
13685 struct itimerspec its_new
, its_old
, *p_new
;
13688 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13689 return -TARGET_EFAULT
;
13696 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13698 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13699 return -TARGET_EFAULT
;
13705 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13706 case TARGET_NR_timerfd_settime64
:
13708 struct itimerspec its_new
, its_old
, *p_new
;
13711 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13712 return -TARGET_EFAULT
;
13719 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13721 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13722 return -TARGET_EFAULT
;
13728 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13729 case TARGET_NR_ioprio_get
:
13730 return get_errno(ioprio_get(arg1
, arg2
));
13733 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13734 case TARGET_NR_ioprio_set
:
13735 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13738 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13739 case TARGET_NR_setns
:
13740 return get_errno(setns(arg1
, arg2
));
13742 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13743 case TARGET_NR_unshare
:
13744 return get_errno(unshare(arg1
));
13746 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13747 case TARGET_NR_kcmp
:
13748 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13750 #ifdef TARGET_NR_swapcontext
13751 case TARGET_NR_swapcontext
:
13752 /* PowerPC specific. */
13753 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13755 #ifdef TARGET_NR_memfd_create
13756 case TARGET_NR_memfd_create
:
13757 p
= lock_user_string(arg1
);
13759 return -TARGET_EFAULT
;
13761 ret
= get_errno(memfd_create(p
, arg2
));
13762 fd_trans_unregister(ret
);
13763 unlock_user(p
, arg1
, 0);
13766 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13767 case TARGET_NR_membarrier
:
13768 return get_errno(membarrier(arg1
, arg2
));
13771 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13772 case TARGET_NR_copy_file_range
:
13774 loff_t inoff
, outoff
;
13775 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13778 if (get_user_u64(inoff
, arg2
)) {
13779 return -TARGET_EFAULT
;
13784 if (get_user_u64(outoff
, arg4
)) {
13785 return -TARGET_EFAULT
;
13789 /* Do not sign-extend the count parameter. */
13790 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13791 (abi_ulong
)arg5
, arg6
));
13792 if (!is_error(ret
) && ret
> 0) {
13794 if (put_user_u64(inoff
, arg2
)) {
13795 return -TARGET_EFAULT
;
13799 if (put_user_u64(outoff
, arg4
)) {
13800 return -TARGET_EFAULT
;
13808 #if defined(TARGET_NR_pivot_root)
13809 case TARGET_NR_pivot_root
:
13812 p
= lock_user_string(arg1
); /* new_root */
13813 p2
= lock_user_string(arg2
); /* put_old */
13815 ret
= -TARGET_EFAULT
;
13817 ret
= get_errno(pivot_root(p
, p2
));
13819 unlock_user(p2
, arg2
, 0);
13820 unlock_user(p
, arg1
, 0);
13825 #if defined(TARGET_NR_riscv_hwprobe)
13826 case TARGET_NR_riscv_hwprobe
:
13827 return do_riscv_hwprobe(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
);
13831 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13832 return -TARGET_ENOSYS
;
13837 abi_long
do_syscall(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
13838 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13839 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13842 CPUState
*cpu
= env_cpu(cpu_env
);
13845 #ifdef DEBUG_ERESTARTSYS
13846 /* Debug-only code for exercising the syscall-restart code paths
13847 * in the per-architecture cpu main loops: restart every syscall
13848 * the guest makes once before letting it through.
13854 return -QEMU_ERESTARTSYS
;
13859 record_syscall_start(cpu
, num
, arg1
,
13860 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13862 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13863 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13866 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13867 arg5
, arg6
, arg7
, arg8
);
13869 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13870 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13871 arg3
, arg4
, arg5
, arg6
);
13874 record_syscall_return(cpu
, num
, ret
);