4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
66 #include <sys/timerfd.h>
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu/xattr.h"
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
80 #ifdef HAVE_SYS_KCOV_H
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
97 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
120 #include <linux/btrfs.h>
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
126 #include "linux_loop.h"
130 #include "user-internals.h"
132 #include "signal-common.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
145 #define CLONE_IO 0x80000000 /* Clone io context */
148 /* We can't directly call the host clone syscall, because this will
149 * badly confuse libc (breaking mutexes, for example). So we must
150 * divide clone flags into:
151 * * flag combinations that look like pthread_create()
152 * * flag combinations that look like fork()
153 * * flags we can implement within QEMU itself
154 * * flags we can't support and will return an error for
156 /* For thread creation, all these flags must be present; for
157 * fork, none must be present.
159 #define CLONE_THREAD_FLAGS \
160 (CLONE_VM | CLONE_FS | CLONE_FILES | \
161 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
163 /* These flags are ignored:
164 * CLONE_DETACHED is now ignored by the kernel;
165 * CLONE_IO is just an optimisation hint to the I/O scheduler
167 #define CLONE_IGNORED_FLAGS \
168 (CLONE_DETACHED | CLONE_IO)
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS \
172 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
173 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS \
177 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
178 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
180 #define CLONE_INVALID_FORK_FLAGS \
181 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
183 #define CLONE_INVALID_THREAD_FLAGS \
184 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
185 CLONE_IGNORED_FLAGS))
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188 * have almost all been allocated. We cannot support any of
189 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191 * The checks against the invalid thread masks above will catch these.
192 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196 * once. This exercises the codepaths for restart.
198 //#define DEBUG_ERESTARTSYS
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
214 #define _syscall0(type,name) \
215 static type name (void) \
217 return syscall(__NR_##name); \
220 #define _syscall1(type,name,type1,arg1) \
221 static type name (type1 arg1) \
223 return syscall(__NR_##name, arg1); \
226 #define _syscall2(type,name,type1,arg1,type2,arg2) \
227 static type name (type1 arg1,type2 arg2) \
229 return syscall(__NR_##name, arg1, arg2); \
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
233 static type name (type1 arg1,type2 arg2,type3 arg3) \
235 return syscall(__NR_##name, arg1, arg2, arg3); \
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
241 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
253 type5,arg5,type6,arg6) \
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
257 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
275 #define __NR_sys_statx __NR_statx
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid
)
294 /* For the 64-bit guest on 32-bit host case we must emulate
295 * getdents using getdents64, because otherwise the host
296 * might hand us back more dirent records than we can fit
297 * into the guest buffer after structure format conversion.
298 * Otherwise we emulate getdents with getdents if the host has it.
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
307 #if (defined(TARGET_NR_getdents) && \
308 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
314 loff_t
*, res
, uint
, wh
);
316 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
317 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
319 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group
,int,error_code
)
323 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
324 _syscall1(int,set_tid_address
,int *,tidptr
)
326 #if defined(__NR_futex)
327 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
328 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
330 #if defined(__NR_futex_time64)
331 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
332 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
334 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
335 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
336 unsigned long *, user_mask_ptr
);
337 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
338 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
339 unsigned long *, user_mask_ptr
);
340 /* sched_attr is not defined in glibc */
343 uint32_t sched_policy
;
344 uint64_t sched_flags
;
346 uint32_t sched_priority
;
347 uint64_t sched_runtime
;
348 uint64_t sched_deadline
;
349 uint64_t sched_period
;
350 uint32_t sched_util_min
;
351 uint32_t sched_util_max
;
353 #define __NR_sys_sched_getattr __NR_sched_getattr
354 _syscall4(int, sys_sched_getattr
, pid_t
, pid
, struct sched_attr
*, attr
,
355 unsigned int, size
, unsigned int, flags
);
356 #define __NR_sys_sched_setattr __NR_sched_setattr
357 _syscall3(int, sys_sched_setattr
, pid_t
, pid
, struct sched_attr
*, attr
,
358 unsigned int, flags
);
359 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
360 _syscall1(int, sys_sched_getscheduler
, pid_t
, pid
);
361 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
362 _syscall3(int, sys_sched_setscheduler
, pid_t
, pid
, int, policy
,
363 const struct sched_param
*, param
);
364 #define __NR_sys_sched_getparam __NR_sched_getparam
365 _syscall2(int, sys_sched_getparam
, pid_t
, pid
,
366 struct sched_param
*, param
);
367 #define __NR_sys_sched_setparam __NR_sched_setparam
368 _syscall2(int, sys_sched_setparam
, pid_t
, pid
,
369 const struct sched_param
*, param
);
370 #define __NR_sys_getcpu __NR_getcpu
371 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
372 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
374 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
375 struct __user_cap_data_struct
*, data
);
376 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
377 struct __user_cap_data_struct
*, data
);
378 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
379 _syscall2(int, ioprio_get
, int, which
, int, who
)
381 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
382 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
384 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
385 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
388 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
389 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
390 unsigned long, idx1
, unsigned long, idx2
)
394 * It is assumed that struct statx is architecture independent.
396 #if defined(TARGET_NR_statx) && defined(__NR_statx)
397 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
398 unsigned int, mask
, struct target_statx
*, statxbuf
)
400 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
401 _syscall2(int, membarrier
, int, cmd
, int, flags
)
404 static const bitmask_transtbl fcntl_flags_tbl
[] = {
405 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
406 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
407 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
408 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
409 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
410 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
411 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
412 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
413 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
414 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
415 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
416 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
417 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
418 #if defined(O_DIRECT)
419 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
421 #if defined(O_NOATIME)
422 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
424 #if defined(O_CLOEXEC)
425 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
428 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
430 #if defined(O_TMPFILE)
431 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
433 /* Don't terminate the list prematurely on 64-bit host+guest. */
434 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
435 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
440 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
442 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
443 #if defined(__NR_utimensat)
444 #define __NR_sys_utimensat __NR_utimensat
445 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
446 const struct timespec
*,tsp
,int,flags
)
448 static int sys_utimensat(int dirfd
, const char *pathname
,
449 const struct timespec times
[2], int flags
)
455 #endif /* TARGET_NR_utimensat */
457 #ifdef TARGET_NR_renameat2
458 #if defined(__NR_renameat2)
459 #define __NR_sys_renameat2 __NR_renameat2
460 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
461 const char *, new, unsigned int, flags
)
463 static int sys_renameat2(int oldfd
, const char *old
,
464 int newfd
, const char *new, int flags
)
467 return renameat(oldfd
, old
, newfd
, new);
473 #endif /* TARGET_NR_renameat2 */
475 #ifdef CONFIG_INOTIFY
476 #include <sys/inotify.h>
478 /* Userspace can usually survive runtime without inotify */
479 #undef TARGET_NR_inotify_init
480 #undef TARGET_NR_inotify_init1
481 #undef TARGET_NR_inotify_add_watch
482 #undef TARGET_NR_inotify_rm_watch
483 #endif /* CONFIG_INOTIFY */
485 #if defined(TARGET_NR_prlimit64)
486 #ifndef __NR_prlimit64
487 # define __NR_prlimit64 -1
489 #define __NR_sys_prlimit64 __NR_prlimit64
490 /* The glibc rlimit structure may not be that used by the underlying syscall */
491 struct host_rlimit64
{
495 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
496 const struct host_rlimit64
*, new_limit
,
497 struct host_rlimit64
*, old_limit
)
501 #if defined(TARGET_NR_timer_create)
502 /* Maximum of 32 active POSIX timers allowed at any one time. */
503 static timer_t g_posix_timers
[32] = { 0, } ;
505 static inline int next_free_host_timer(void)
508 /* FIXME: Does finding the next free slot require a lock? */
509 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
510 if (g_posix_timers
[k
] == 0) {
511 g_posix_timers
[k
] = (timer_t
) 1;
519 static inline int host_to_target_errno(int host_errno
)
521 switch (host_errno
) {
522 #define E(X) case X: return TARGET_##X;
523 #include "errnos.c.inc"
530 static inline int target_to_host_errno(int target_errno
)
532 switch (target_errno
) {
533 #define E(X) case TARGET_##X: return X;
534 #include "errnos.c.inc"
541 static inline abi_long
get_errno(abi_long ret
)
544 return -host_to_target_errno(errno
);
549 const char *target_strerror(int err
)
551 if (err
== QEMU_ERESTARTSYS
) {
552 return "To be restarted";
554 if (err
== QEMU_ESIGRETURN
) {
555 return "Successful exit from sigreturn";
558 return strerror(target_to_host_errno(err
));
561 static int check_zeroed_user(abi_long addr
, size_t ksize
, size_t usize
)
565 if (usize
<= ksize
) {
568 for (i
= ksize
; i
< usize
; i
++) {
569 if (get_user_u8(b
, addr
+ i
)) {
570 return -TARGET_EFAULT
;
579 #define safe_syscall0(type, name) \
580 static type safe_##name(void) \
582 return safe_syscall(__NR_##name); \
585 #define safe_syscall1(type, name, type1, arg1) \
586 static type safe_##name(type1 arg1) \
588 return safe_syscall(__NR_##name, arg1); \
591 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
592 static type safe_##name(type1 arg1, type2 arg2) \
594 return safe_syscall(__NR_##name, arg1, arg2); \
597 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
598 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
600 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
603 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
605 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
607 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
610 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
611 type4, arg4, type5, arg5) \
612 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
615 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
618 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
619 type4, arg4, type5, arg5, type6, arg6) \
620 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
621 type5 arg5, type6 arg6) \
623 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
626 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
627 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
628 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
629 int, flags
, mode_t
, mode
)
630 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
631 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
632 struct rusage
*, rusage
)
634 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
635 int, options
, struct rusage
*, rusage
)
636 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
637 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
638 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
639 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
640 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
642 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
643 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
644 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
647 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
648 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
650 #if defined(__NR_futex)
651 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
652 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
654 #if defined(__NR_futex_time64)
655 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
656 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
658 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
659 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
660 safe_syscall2(int, tkill
, int, tid
, int, sig
)
661 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
662 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
663 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
664 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
665 unsigned long, pos_l
, unsigned long, pos_h
)
666 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
667 unsigned long, pos_l
, unsigned long, pos_h
)
668 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
670 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
671 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
672 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
673 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
674 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
675 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
676 safe_syscall2(int, flock
, int, fd
, int, operation
)
677 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
678 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
679 const struct timespec
*, uts
, size_t, sigsetsize
)
681 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
683 #if defined(TARGET_NR_nanosleep)
684 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
685 struct timespec
*, rem
)
687 #if defined(TARGET_NR_clock_nanosleep) || \
688 defined(TARGET_NR_clock_nanosleep_time64)
689 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
690 const struct timespec
*, req
, struct timespec
*, rem
)
694 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
697 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
698 void *, ptr
, long, fifth
)
702 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
706 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
707 long, msgtype
, int, flags
)
709 #ifdef __NR_semtimedop
710 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
711 unsigned, nsops
, const struct timespec
*, timeout
)
713 #if defined(TARGET_NR_mq_timedsend) || \
714 defined(TARGET_NR_mq_timedsend_time64)
715 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
716 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
718 #if defined(TARGET_NR_mq_timedreceive) || \
719 defined(TARGET_NR_mq_timedreceive_time64)
720 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
721 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
723 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
724 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
725 int, outfd
, loff_t
*, poutoff
, size_t, length
,
729 /* We do ioctl like this rather than via safe_syscall3 to preserve the
730 * "third argument might be integer or pointer or not present" behaviour of
733 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
734 /* Similarly for fcntl. Note that callers must always:
735 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
736 * use the flock64 struct rather than unsuffixed flock
737 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
740 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
742 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
745 static inline int host_to_target_sock_type(int host_type
)
749 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
751 target_type
= TARGET_SOCK_DGRAM
;
754 target_type
= TARGET_SOCK_STREAM
;
757 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
761 #if defined(SOCK_CLOEXEC)
762 if (host_type
& SOCK_CLOEXEC
) {
763 target_type
|= TARGET_SOCK_CLOEXEC
;
767 #if defined(SOCK_NONBLOCK)
768 if (host_type
& SOCK_NONBLOCK
) {
769 target_type
|= TARGET_SOCK_NONBLOCK
;
776 static abi_ulong target_brk
;
777 static abi_ulong target_original_brk
;
778 static abi_ulong brk_page
;
780 void target_set_brk(abi_ulong new_brk
)
782 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
783 brk_page
= HOST_PAGE_ALIGN(target_brk
);
786 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
787 #define DEBUGF_BRK(message, args...)
789 /* do_brk() must return target values and target errnos. */
790 abi_long
do_brk(abi_ulong new_brk
)
792 abi_long mapped_addr
;
793 abi_ulong new_alloc_size
;
795 /* brk pointers are always untagged */
797 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
800 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
803 if (new_brk
< target_original_brk
) {
804 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
809 /* If the new brk is less than the highest page reserved to the
810 * target heap allocation, set it and we're almost done... */
811 if (new_brk
<= brk_page
) {
812 /* Heap contents are initialized to zero, as for anonymous
814 if (new_brk
> target_brk
) {
815 memset(g2h_untagged(target_brk
), 0, new_brk
- target_brk
);
817 target_brk
= new_brk
;
818 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
822 /* We need to allocate more memory after the brk... Note that
823 * we don't use MAP_FIXED because that will map over the top of
824 * any existing mapping (like the one with the host libc or qemu
825 * itself); instead we treat "mapped but at wrong address" as
826 * a failure and unmap again.
828 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
829 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
830 PROT_READ
|PROT_WRITE
,
831 MAP_ANON
|MAP_PRIVATE
, 0, 0));
833 if (mapped_addr
== brk_page
) {
834 /* Heap contents are initialized to zero, as for anonymous
835 * mapped pages. Technically the new pages are already
836 * initialized to zero since they *are* anonymous mapped
837 * pages, however we have to take care with the contents that
838 * come from the remaining part of the previous page: it may
839 * contains garbage data due to a previous heap usage (grown
841 memset(g2h_untagged(target_brk
), 0, brk_page
- target_brk
);
843 target_brk
= new_brk
;
844 brk_page
= HOST_PAGE_ALIGN(target_brk
);
845 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
848 } else if (mapped_addr
!= -1) {
849 /* Mapped but at wrong address, meaning there wasn't actually
850 * enough space for this brk.
852 target_munmap(mapped_addr
, new_alloc_size
);
854 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
857 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
860 #if defined(TARGET_ALPHA)
861 /* We (partially) emulate OSF/1 on Alpha, which requires we
862 return a proper errno, not an unchanged brk value. */
863 return -TARGET_ENOMEM
;
865 /* For everything else, return the previous break. */
869 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
870 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
871 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
872 abi_ulong target_fds_addr
,
876 abi_ulong b
, *target_fds
;
878 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
879 if (!(target_fds
= lock_user(VERIFY_READ
,
881 sizeof(abi_ulong
) * nw
,
883 return -TARGET_EFAULT
;
887 for (i
= 0; i
< nw
; i
++) {
888 /* grab the abi_ulong */
889 __get_user(b
, &target_fds
[i
]);
890 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
891 /* check the bit inside the abi_ulong */
898 unlock_user(target_fds
, target_fds_addr
, 0);
903 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
904 abi_ulong target_fds_addr
,
907 if (target_fds_addr
) {
908 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
909 return -TARGET_EFAULT
;
917 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
923 abi_ulong
*target_fds
;
925 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
926 if (!(target_fds
= lock_user(VERIFY_WRITE
,
928 sizeof(abi_ulong
) * nw
,
930 return -TARGET_EFAULT
;
933 for (i
= 0; i
< nw
; i
++) {
935 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
936 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
939 __put_user(v
, &target_fds
[i
]);
942 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
948 #if defined(__alpha__)
954 static inline abi_long
host_to_target_clock_t(long ticks
)
956 #if HOST_HZ == TARGET_HZ
959 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
963 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
964 const struct rusage
*rusage
)
966 struct target_rusage
*target_rusage
;
968 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
969 return -TARGET_EFAULT
;
970 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
971 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
972 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
973 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
974 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
975 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
976 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
977 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
978 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
979 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
980 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
981 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
982 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
983 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
984 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
985 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
986 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
987 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
988 unlock_user_struct(target_rusage
, target_addr
, 1);
993 #ifdef TARGET_NR_setrlimit
994 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
996 abi_ulong target_rlim_swap
;
999 target_rlim_swap
= tswapal(target_rlim
);
1000 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1001 return RLIM_INFINITY
;
1003 result
= target_rlim_swap
;
1004 if (target_rlim_swap
!= (rlim_t
)result
)
1005 return RLIM_INFINITY
;
1011 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1012 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1014 abi_ulong target_rlim_swap
;
1017 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1018 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1020 target_rlim_swap
= rlim
;
1021 result
= tswapal(target_rlim_swap
);
1027 static inline int target_to_host_resource(int code
)
1030 case TARGET_RLIMIT_AS
:
1032 case TARGET_RLIMIT_CORE
:
1034 case TARGET_RLIMIT_CPU
:
1036 case TARGET_RLIMIT_DATA
:
1038 case TARGET_RLIMIT_FSIZE
:
1039 return RLIMIT_FSIZE
;
1040 case TARGET_RLIMIT_LOCKS
:
1041 return RLIMIT_LOCKS
;
1042 case TARGET_RLIMIT_MEMLOCK
:
1043 return RLIMIT_MEMLOCK
;
1044 case TARGET_RLIMIT_MSGQUEUE
:
1045 return RLIMIT_MSGQUEUE
;
1046 case TARGET_RLIMIT_NICE
:
1048 case TARGET_RLIMIT_NOFILE
:
1049 return RLIMIT_NOFILE
;
1050 case TARGET_RLIMIT_NPROC
:
1051 return RLIMIT_NPROC
;
1052 case TARGET_RLIMIT_RSS
:
1054 case TARGET_RLIMIT_RTPRIO
:
1055 return RLIMIT_RTPRIO
;
1056 case TARGET_RLIMIT_RTTIME
:
1057 return RLIMIT_RTTIME
;
1058 case TARGET_RLIMIT_SIGPENDING
:
1059 return RLIMIT_SIGPENDING
;
1060 case TARGET_RLIMIT_STACK
:
1061 return RLIMIT_STACK
;
1067 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1068 abi_ulong target_tv_addr
)
1070 struct target_timeval
*target_tv
;
1072 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1073 return -TARGET_EFAULT
;
1076 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1077 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1079 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1084 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1085 const struct timeval
*tv
)
1087 struct target_timeval
*target_tv
;
1089 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1090 return -TARGET_EFAULT
;
1093 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1094 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1096 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1101 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1102 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1103 abi_ulong target_tv_addr
)
1105 struct target__kernel_sock_timeval
*target_tv
;
1107 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1108 return -TARGET_EFAULT
;
1111 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1112 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1114 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1120 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1121 const struct timeval
*tv
)
1123 struct target__kernel_sock_timeval
*target_tv
;
1125 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1126 return -TARGET_EFAULT
;
1129 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1130 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1132 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1137 #if defined(TARGET_NR_futex) || \
1138 defined(TARGET_NR_rt_sigtimedwait) || \
1139 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1140 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1141 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1142 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1143 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1144 defined(TARGET_NR_timer_settime) || \
1145 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1146 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1147 abi_ulong target_addr
)
1149 struct target_timespec
*target_ts
;
1151 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1152 return -TARGET_EFAULT
;
1154 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1155 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1156 unlock_user_struct(target_ts
, target_addr
, 0);
1161 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1162 defined(TARGET_NR_timer_settime64) || \
1163 defined(TARGET_NR_mq_timedsend_time64) || \
1164 defined(TARGET_NR_mq_timedreceive_time64) || \
1165 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1166 defined(TARGET_NR_clock_nanosleep_time64) || \
1167 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1168 defined(TARGET_NR_utimensat) || \
1169 defined(TARGET_NR_utimensat_time64) || \
1170 defined(TARGET_NR_semtimedop_time64) || \
1171 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1172 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1173 abi_ulong target_addr
)
1175 struct target__kernel_timespec
*target_ts
;
1177 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1178 return -TARGET_EFAULT
;
1180 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1181 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1182 /* in 32bit mode, this drops the padding */
1183 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1184 unlock_user_struct(target_ts
, target_addr
, 0);
1189 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1190 struct timespec
*host_ts
)
1192 struct target_timespec
*target_ts
;
1194 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1195 return -TARGET_EFAULT
;
1197 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1198 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1199 unlock_user_struct(target_ts
, target_addr
, 1);
1203 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1204 struct timespec
*host_ts
)
1206 struct target__kernel_timespec
*target_ts
;
1208 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1209 return -TARGET_EFAULT
;
1211 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1212 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1213 unlock_user_struct(target_ts
, target_addr
, 1);
1217 #if defined(TARGET_NR_gettimeofday)
1218 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1219 struct timezone
*tz
)
1221 struct target_timezone
*target_tz
;
1223 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1224 return -TARGET_EFAULT
;
1227 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1228 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1230 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1236 #if defined(TARGET_NR_settimeofday)
1237 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1238 abi_ulong target_tz_addr
)
1240 struct target_timezone
*target_tz
;
1242 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1243 return -TARGET_EFAULT
;
1246 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1247 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1249 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1255 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1258 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1259 abi_ulong target_mq_attr_addr
)
1261 struct target_mq_attr
*target_mq_attr
;
1263 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1264 target_mq_attr_addr
, 1))
1265 return -TARGET_EFAULT
;
1267 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1268 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1269 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1270 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1272 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1277 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1278 const struct mq_attr
*attr
)
1280 struct target_mq_attr
*target_mq_attr
;
1282 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1283 target_mq_attr_addr
, 0))
1284 return -TARGET_EFAULT
;
1286 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1287 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1288 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1289 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1291 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1297 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1298 /* do_select() must return target values and target errnos. */
1299 static abi_long
do_select(int n
,
1300 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1301 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1303 fd_set rfds
, wfds
, efds
;
1304 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1306 struct timespec ts
, *ts_ptr
;
1309 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1313 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1317 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1322 if (target_tv_addr
) {
1323 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1324 return -TARGET_EFAULT
;
1325 ts
.tv_sec
= tv
.tv_sec
;
1326 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1332 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1335 if (!is_error(ret
)) {
1336 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1337 return -TARGET_EFAULT
;
1338 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1339 return -TARGET_EFAULT
;
1340 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1341 return -TARGET_EFAULT
;
1343 if (target_tv_addr
) {
1344 tv
.tv_sec
= ts
.tv_sec
;
1345 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1346 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1347 return -TARGET_EFAULT
;
1355 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1356 static abi_long
do_old_select(abi_ulong arg1
)
1358 struct target_sel_arg_struct
*sel
;
1359 abi_ulong inp
, outp
, exp
, tvp
;
1362 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1363 return -TARGET_EFAULT
;
1366 nsel
= tswapal(sel
->n
);
1367 inp
= tswapal(sel
->inp
);
1368 outp
= tswapal(sel
->outp
);
1369 exp
= tswapal(sel
->exp
);
1370 tvp
= tswapal(sel
->tvp
);
1372 unlock_user_struct(sel
, arg1
, 0);
1374 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1379 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1380 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1381 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1384 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1385 fd_set rfds
, wfds
, efds
;
1386 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1387 struct timespec ts
, *ts_ptr
;
1391 * The 6th arg is actually two args smashed together,
1392 * so we cannot use the C library.
1399 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1407 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1411 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1415 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1421 * This takes a timespec, and not a timeval, so we cannot
1422 * use the do_select() helper ...
1426 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1427 return -TARGET_EFAULT
;
1430 if (target_to_host_timespec(&ts
, ts_addr
)) {
1431 return -TARGET_EFAULT
;
1439 /* Extract the two packed args for the sigset */
1442 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1444 return -TARGET_EFAULT
;
1446 arg_sigset
= tswapal(arg7
[0]);
1447 arg_sigsize
= tswapal(arg7
[1]);
1448 unlock_user(arg7
, arg6
, 0);
1451 ret
= process_sigsuspend_mask(&sig
.set
, arg_sigset
, arg_sigsize
);
1456 sig
.size
= SIGSET_T_SIZE
;
1460 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1464 finish_sigsuspend_mask(ret
);
1467 if (!is_error(ret
)) {
1468 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1469 return -TARGET_EFAULT
;
1471 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1472 return -TARGET_EFAULT
;
1474 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1475 return -TARGET_EFAULT
;
1478 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1479 return -TARGET_EFAULT
;
1482 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1483 return -TARGET_EFAULT
;
1491 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1492 defined(TARGET_NR_ppoll_time64)
1493 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1494 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1496 struct target_pollfd
*target_pfd
;
1497 unsigned int nfds
= arg2
;
1505 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1506 return -TARGET_EINVAL
;
1508 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1509 sizeof(struct target_pollfd
) * nfds
, 1);
1511 return -TARGET_EFAULT
;
1514 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1515 for (i
= 0; i
< nfds
; i
++) {
1516 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1517 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1521 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1522 target_sigset_t
*target_set
;
1523 sigset_t _set
, *set
= &_set
;
1527 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1528 unlock_user(target_pfd
, arg1
, 0);
1529 return -TARGET_EFAULT
;
1532 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1533 unlock_user(target_pfd
, arg1
, 0);
1534 return -TARGET_EFAULT
;
1542 if (arg5
!= sizeof(target_sigset_t
)) {
1543 unlock_user(target_pfd
, arg1
, 0);
1544 return -TARGET_EINVAL
;
1547 target_set
= lock_user(VERIFY_READ
, arg4
,
1548 sizeof(target_sigset_t
), 1);
1550 unlock_user(target_pfd
, arg1
, 0);
1551 return -TARGET_EFAULT
;
1553 target_to_host_sigset(set
, target_set
);
1558 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1559 set
, SIGSET_T_SIZE
));
1561 if (!is_error(ret
) && arg3
) {
1563 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1564 return -TARGET_EFAULT
;
1567 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1568 return -TARGET_EFAULT
;
1573 unlock_user(target_set
, arg4
, 0);
1576 struct timespec ts
, *pts
;
1579 /* Convert ms to secs, ns */
1580 ts
.tv_sec
= arg3
/ 1000;
1581 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1584 /* -ve poll() timeout means "infinite" */
1587 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1590 if (!is_error(ret
)) {
1591 for (i
= 0; i
< nfds
; i
++) {
1592 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1595 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1600 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1603 return pipe2(host_pipe
, flags
);
1609 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1610 int flags
, int is_pipe2
)
1614 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1617 return get_errno(ret
);
1619 /* Several targets have special calling conventions for the original
1620 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1622 #if defined(TARGET_ALPHA)
1623 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1624 return host_pipe
[0];
1625 #elif defined(TARGET_MIPS)
1626 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1627 return host_pipe
[0];
1628 #elif defined(TARGET_SH4)
1629 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1630 return host_pipe
[0];
1631 #elif defined(TARGET_SPARC)
1632 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1633 return host_pipe
[0];
1637 if (put_user_s32(host_pipe
[0], pipedes
)
1638 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1639 return -TARGET_EFAULT
;
1640 return get_errno(ret
);
1643 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1644 abi_ulong target_addr
,
1647 struct target_ip_mreqn
*target_smreqn
;
1649 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1651 return -TARGET_EFAULT
;
1652 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1653 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1654 if (len
== sizeof(struct target_ip_mreqn
))
1655 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1656 unlock_user(target_smreqn
, target_addr
, 0);
1661 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1662 abi_ulong target_addr
,
1665 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1666 sa_family_t sa_family
;
1667 struct target_sockaddr
*target_saddr
;
1669 if (fd_trans_target_to_host_addr(fd
)) {
1670 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1673 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1675 return -TARGET_EFAULT
;
1677 sa_family
= tswap16(target_saddr
->sa_family
);
1679 /* Oops. The caller might send a incomplete sun_path; sun_path
1680 * must be terminated by \0 (see the manual page), but
1681 * unfortunately it is quite common to specify sockaddr_un
1682 * length as "strlen(x->sun_path)" while it should be
1683 * "strlen(...) + 1". We'll fix that here if needed.
1684 * Linux kernel has a similar feature.
1687 if (sa_family
== AF_UNIX
) {
1688 if (len
< unix_maxlen
&& len
> 0) {
1689 char *cp
= (char*)target_saddr
;
1691 if ( cp
[len
-1] && !cp
[len
] )
1694 if (len
> unix_maxlen
)
1698 memcpy(addr
, target_saddr
, len
);
1699 addr
->sa_family
= sa_family
;
1700 if (sa_family
== AF_NETLINK
) {
1701 struct sockaddr_nl
*nladdr
;
1703 nladdr
= (struct sockaddr_nl
*)addr
;
1704 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1705 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1706 } else if (sa_family
== AF_PACKET
) {
1707 struct target_sockaddr_ll
*lladdr
;
1709 lladdr
= (struct target_sockaddr_ll
*)addr
;
1710 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1711 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1713 unlock_user(target_saddr
, target_addr
, 0);
1718 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1719 struct sockaddr
*addr
,
1722 struct target_sockaddr
*target_saddr
;
1729 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1731 return -TARGET_EFAULT
;
1732 memcpy(target_saddr
, addr
, len
);
1733 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1734 sizeof(target_saddr
->sa_family
)) {
1735 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1737 if (addr
->sa_family
== AF_NETLINK
&&
1738 len
>= sizeof(struct target_sockaddr_nl
)) {
1739 struct target_sockaddr_nl
*target_nl
=
1740 (struct target_sockaddr_nl
*)target_saddr
;
1741 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1742 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1743 } else if (addr
->sa_family
== AF_PACKET
) {
1744 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1745 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1746 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1747 } else if (addr
->sa_family
== AF_INET6
&&
1748 len
>= sizeof(struct target_sockaddr_in6
)) {
1749 struct target_sockaddr_in6
*target_in6
=
1750 (struct target_sockaddr_in6
*)target_saddr
;
1751 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1753 unlock_user(target_saddr
, target_addr
, len
);
1758 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1759 struct target_msghdr
*target_msgh
)
1761 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1762 abi_long msg_controllen
;
1763 abi_ulong target_cmsg_addr
;
1764 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1765 socklen_t space
= 0;
1767 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1768 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1770 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1771 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1772 target_cmsg_start
= target_cmsg
;
1774 return -TARGET_EFAULT
;
1776 while (cmsg
&& target_cmsg
) {
1777 void *data
= CMSG_DATA(cmsg
);
1778 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1780 int len
= tswapal(target_cmsg
->cmsg_len
)
1781 - sizeof(struct target_cmsghdr
);
1783 space
+= CMSG_SPACE(len
);
1784 if (space
> msgh
->msg_controllen
) {
1785 space
-= CMSG_SPACE(len
);
1786 /* This is a QEMU bug, since we allocated the payload
1787 * area ourselves (unlike overflow in host-to-target
1788 * conversion, which is just the guest giving us a buffer
1789 * that's too small). It can't happen for the payload types
1790 * we currently support; if it becomes an issue in future
1791 * we would need to improve our allocation strategy to
1792 * something more intelligent than "twice the size of the
1793 * target buffer we're reading from".
1795 qemu_log_mask(LOG_UNIMP
,
1796 ("Unsupported ancillary data %d/%d: "
1797 "unhandled msg size\n"),
1798 tswap32(target_cmsg
->cmsg_level
),
1799 tswap32(target_cmsg
->cmsg_type
));
1803 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1804 cmsg
->cmsg_level
= SOL_SOCKET
;
1806 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1808 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1809 cmsg
->cmsg_len
= CMSG_LEN(len
);
1811 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1812 int *fd
= (int *)data
;
1813 int *target_fd
= (int *)target_data
;
1814 int i
, numfds
= len
/ sizeof(int);
1816 for (i
= 0; i
< numfds
; i
++) {
1817 __get_user(fd
[i
], target_fd
+ i
);
1819 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1820 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1821 struct ucred
*cred
= (struct ucred
*)data
;
1822 struct target_ucred
*target_cred
=
1823 (struct target_ucred
*)target_data
;
1825 __get_user(cred
->pid
, &target_cred
->pid
);
1826 __get_user(cred
->uid
, &target_cred
->uid
);
1827 __get_user(cred
->gid
, &target_cred
->gid
);
1829 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1830 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1831 memcpy(data
, target_data
, len
);
1834 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1835 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1838 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1840 msgh
->msg_controllen
= space
;
1844 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1845 struct msghdr
*msgh
)
1847 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1848 abi_long msg_controllen
;
1849 abi_ulong target_cmsg_addr
;
1850 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1851 socklen_t space
= 0;
1853 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1854 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1856 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1857 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1858 target_cmsg_start
= target_cmsg
;
1860 return -TARGET_EFAULT
;
1862 while (cmsg
&& target_cmsg
) {
1863 void *data
= CMSG_DATA(cmsg
);
1864 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1866 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1867 int tgt_len
, tgt_space
;
1869 /* We never copy a half-header but may copy half-data;
1870 * this is Linux's behaviour in put_cmsg(). Note that
1871 * truncation here is a guest problem (which we report
1872 * to the guest via the CTRUNC bit), unlike truncation
1873 * in target_to_host_cmsg, which is a QEMU bug.
1875 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1876 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1880 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1881 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1883 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1885 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1887 /* Payload types which need a different size of payload on
1888 * the target must adjust tgt_len here.
1891 switch (cmsg
->cmsg_level
) {
1893 switch (cmsg
->cmsg_type
) {
1895 tgt_len
= sizeof(struct target_timeval
);
1905 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1906 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1907 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1910 /* We must now copy-and-convert len bytes of payload
1911 * into tgt_len bytes of destination space. Bear in mind
1912 * that in both source and destination we may be dealing
1913 * with a truncated value!
1915 switch (cmsg
->cmsg_level
) {
1917 switch (cmsg
->cmsg_type
) {
1920 int *fd
= (int *)data
;
1921 int *target_fd
= (int *)target_data
;
1922 int i
, numfds
= tgt_len
/ sizeof(int);
1924 for (i
= 0; i
< numfds
; i
++) {
1925 __put_user(fd
[i
], target_fd
+ i
);
1931 struct timeval
*tv
= (struct timeval
*)data
;
1932 struct target_timeval
*target_tv
=
1933 (struct target_timeval
*)target_data
;
1935 if (len
!= sizeof(struct timeval
) ||
1936 tgt_len
!= sizeof(struct target_timeval
)) {
1940 /* copy struct timeval to target */
1941 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1942 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1945 case SCM_CREDENTIALS
:
1947 struct ucred
*cred
= (struct ucred
*)data
;
1948 struct target_ucred
*target_cred
=
1949 (struct target_ucred
*)target_data
;
1951 __put_user(cred
->pid
, &target_cred
->pid
);
1952 __put_user(cred
->uid
, &target_cred
->uid
);
1953 __put_user(cred
->gid
, &target_cred
->gid
);
1962 switch (cmsg
->cmsg_type
) {
1965 uint32_t *v
= (uint32_t *)data
;
1966 uint32_t *t_int
= (uint32_t *)target_data
;
1968 if (len
!= sizeof(uint32_t) ||
1969 tgt_len
!= sizeof(uint32_t)) {
1972 __put_user(*v
, t_int
);
1978 struct sock_extended_err ee
;
1979 struct sockaddr_in offender
;
1981 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1982 struct errhdr_t
*target_errh
=
1983 (struct errhdr_t
*)target_data
;
1985 if (len
!= sizeof(struct errhdr_t
) ||
1986 tgt_len
!= sizeof(struct errhdr_t
)) {
1989 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1990 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1991 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1992 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1993 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1994 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1995 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1996 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1997 (void *) &errh
->offender
, sizeof(errh
->offender
));
2006 switch (cmsg
->cmsg_type
) {
2009 uint32_t *v
= (uint32_t *)data
;
2010 uint32_t *t_int
= (uint32_t *)target_data
;
2012 if (len
!= sizeof(uint32_t) ||
2013 tgt_len
!= sizeof(uint32_t)) {
2016 __put_user(*v
, t_int
);
2022 struct sock_extended_err ee
;
2023 struct sockaddr_in6 offender
;
2025 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2026 struct errhdr6_t
*target_errh
=
2027 (struct errhdr6_t
*)target_data
;
2029 if (len
!= sizeof(struct errhdr6_t
) ||
2030 tgt_len
!= sizeof(struct errhdr6_t
)) {
2033 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2034 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2035 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2036 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2037 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2038 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2039 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2040 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2041 (void *) &errh
->offender
, sizeof(errh
->offender
));
2051 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2052 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2053 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2054 if (tgt_len
> len
) {
2055 memset(target_data
+ len
, 0, tgt_len
- len
);
2059 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2060 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2061 if (msg_controllen
< tgt_space
) {
2062 tgt_space
= msg_controllen
;
2064 msg_controllen
-= tgt_space
;
2066 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2067 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2070 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2072 target_msgh
->msg_controllen
= tswapal(space
);
2076 /* do_setsockopt() Must return target values and target errnos. */
2077 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2078 abi_ulong optval_addr
, socklen_t optlen
)
2082 struct ip_mreqn
*ip_mreq
;
2083 struct ip_mreq_source
*ip_mreq_source
;
2088 /* TCP and UDP options all take an 'int' value. */
2089 if (optlen
< sizeof(uint32_t))
2090 return -TARGET_EINVAL
;
2092 if (get_user_u32(val
, optval_addr
))
2093 return -TARGET_EFAULT
;
2094 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2101 case IP_ROUTER_ALERT
:
2105 case IP_MTU_DISCOVER
:
2112 case IP_MULTICAST_TTL
:
2113 case IP_MULTICAST_LOOP
:
2115 if (optlen
>= sizeof(uint32_t)) {
2116 if (get_user_u32(val
, optval_addr
))
2117 return -TARGET_EFAULT
;
2118 } else if (optlen
>= 1) {
2119 if (get_user_u8(val
, optval_addr
))
2120 return -TARGET_EFAULT
;
2122 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2124 case IP_ADD_MEMBERSHIP
:
2125 case IP_DROP_MEMBERSHIP
:
2126 if (optlen
< sizeof (struct target_ip_mreq
) ||
2127 optlen
> sizeof (struct target_ip_mreqn
))
2128 return -TARGET_EINVAL
;
2130 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2131 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2132 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2135 case IP_BLOCK_SOURCE
:
2136 case IP_UNBLOCK_SOURCE
:
2137 case IP_ADD_SOURCE_MEMBERSHIP
:
2138 case IP_DROP_SOURCE_MEMBERSHIP
:
2139 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2140 return -TARGET_EINVAL
;
2142 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2143 if (!ip_mreq_source
) {
2144 return -TARGET_EFAULT
;
2146 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2147 unlock_user (ip_mreq_source
, optval_addr
, 0);
2156 case IPV6_MTU_DISCOVER
:
2159 case IPV6_RECVPKTINFO
:
2160 case IPV6_UNICAST_HOPS
:
2161 case IPV6_MULTICAST_HOPS
:
2162 case IPV6_MULTICAST_LOOP
:
2164 case IPV6_RECVHOPLIMIT
:
2165 case IPV6_2292HOPLIMIT
:
2168 case IPV6_2292PKTINFO
:
2169 case IPV6_RECVTCLASS
:
2170 case IPV6_RECVRTHDR
:
2171 case IPV6_2292RTHDR
:
2172 case IPV6_RECVHOPOPTS
:
2173 case IPV6_2292HOPOPTS
:
2174 case IPV6_RECVDSTOPTS
:
2175 case IPV6_2292DSTOPTS
:
2177 case IPV6_ADDR_PREFERENCES
:
2178 #ifdef IPV6_RECVPATHMTU
2179 case IPV6_RECVPATHMTU
:
2181 #ifdef IPV6_TRANSPARENT
2182 case IPV6_TRANSPARENT
:
2184 #ifdef IPV6_FREEBIND
2187 #ifdef IPV6_RECVORIGDSTADDR
2188 case IPV6_RECVORIGDSTADDR
:
2191 if (optlen
< sizeof(uint32_t)) {
2192 return -TARGET_EINVAL
;
2194 if (get_user_u32(val
, optval_addr
)) {
2195 return -TARGET_EFAULT
;
2197 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2198 &val
, sizeof(val
)));
2202 struct in6_pktinfo pki
;
2204 if (optlen
< sizeof(pki
)) {
2205 return -TARGET_EINVAL
;
2208 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2209 return -TARGET_EFAULT
;
2212 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2214 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2215 &pki
, sizeof(pki
)));
2218 case IPV6_ADD_MEMBERSHIP
:
2219 case IPV6_DROP_MEMBERSHIP
:
2221 struct ipv6_mreq ipv6mreq
;
2223 if (optlen
< sizeof(ipv6mreq
)) {
2224 return -TARGET_EINVAL
;
2227 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2228 return -TARGET_EFAULT
;
2231 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2233 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2234 &ipv6mreq
, sizeof(ipv6mreq
)));
2245 struct icmp6_filter icmp6f
;
2247 if (optlen
> sizeof(icmp6f
)) {
2248 optlen
= sizeof(icmp6f
);
2251 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2252 return -TARGET_EFAULT
;
2255 for (val
= 0; val
< 8; val
++) {
2256 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2259 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2271 /* those take an u32 value */
2272 if (optlen
< sizeof(uint32_t)) {
2273 return -TARGET_EINVAL
;
2276 if (get_user_u32(val
, optval_addr
)) {
2277 return -TARGET_EFAULT
;
2279 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2280 &val
, sizeof(val
)));
2287 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2292 char *alg_key
= g_malloc(optlen
);
2295 return -TARGET_ENOMEM
;
2297 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2299 return -TARGET_EFAULT
;
2301 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2306 case ALG_SET_AEAD_AUTHSIZE
:
2308 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2317 case TARGET_SOL_SOCKET
:
2319 case TARGET_SO_RCVTIMEO
:
2323 optname
= SO_RCVTIMEO
;
2326 if (optlen
!= sizeof(struct target_timeval
)) {
2327 return -TARGET_EINVAL
;
2330 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2331 return -TARGET_EFAULT
;
2334 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2338 case TARGET_SO_SNDTIMEO
:
2339 optname
= SO_SNDTIMEO
;
2341 case TARGET_SO_ATTACH_FILTER
:
2343 struct target_sock_fprog
*tfprog
;
2344 struct target_sock_filter
*tfilter
;
2345 struct sock_fprog fprog
;
2346 struct sock_filter
*filter
;
2349 if (optlen
!= sizeof(*tfprog
)) {
2350 return -TARGET_EINVAL
;
2352 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2353 return -TARGET_EFAULT
;
2355 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2356 tswapal(tfprog
->filter
), 0)) {
2357 unlock_user_struct(tfprog
, optval_addr
, 1);
2358 return -TARGET_EFAULT
;
2361 fprog
.len
= tswap16(tfprog
->len
);
2362 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2363 if (filter
== NULL
) {
2364 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2365 unlock_user_struct(tfprog
, optval_addr
, 1);
2366 return -TARGET_ENOMEM
;
2368 for (i
= 0; i
< fprog
.len
; i
++) {
2369 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2370 filter
[i
].jt
= tfilter
[i
].jt
;
2371 filter
[i
].jf
= tfilter
[i
].jf
;
2372 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2374 fprog
.filter
= filter
;
2376 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2377 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2380 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2381 unlock_user_struct(tfprog
, optval_addr
, 1);
2384 case TARGET_SO_BINDTODEVICE
:
2386 char *dev_ifname
, *addr_ifname
;
2388 if (optlen
> IFNAMSIZ
- 1) {
2389 optlen
= IFNAMSIZ
- 1;
2391 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2393 return -TARGET_EFAULT
;
2395 optname
= SO_BINDTODEVICE
;
2396 addr_ifname
= alloca(IFNAMSIZ
);
2397 memcpy(addr_ifname
, dev_ifname
, optlen
);
2398 addr_ifname
[optlen
] = 0;
2399 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2400 addr_ifname
, optlen
));
2401 unlock_user (dev_ifname
, optval_addr
, 0);
2404 case TARGET_SO_LINGER
:
2407 struct target_linger
*tlg
;
2409 if (optlen
!= sizeof(struct target_linger
)) {
2410 return -TARGET_EINVAL
;
2412 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2413 return -TARGET_EFAULT
;
2415 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2416 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2417 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2419 unlock_user_struct(tlg
, optval_addr
, 0);
2422 /* Options with 'int' argument. */
2423 case TARGET_SO_DEBUG
:
2426 case TARGET_SO_REUSEADDR
:
2427 optname
= SO_REUSEADDR
;
2430 case TARGET_SO_REUSEPORT
:
2431 optname
= SO_REUSEPORT
;
2434 case TARGET_SO_TYPE
:
2437 case TARGET_SO_ERROR
:
2440 case TARGET_SO_DONTROUTE
:
2441 optname
= SO_DONTROUTE
;
2443 case TARGET_SO_BROADCAST
:
2444 optname
= SO_BROADCAST
;
2446 case TARGET_SO_SNDBUF
:
2447 optname
= SO_SNDBUF
;
2449 case TARGET_SO_SNDBUFFORCE
:
2450 optname
= SO_SNDBUFFORCE
;
2452 case TARGET_SO_RCVBUF
:
2453 optname
= SO_RCVBUF
;
2455 case TARGET_SO_RCVBUFFORCE
:
2456 optname
= SO_RCVBUFFORCE
;
2458 case TARGET_SO_KEEPALIVE
:
2459 optname
= SO_KEEPALIVE
;
2461 case TARGET_SO_OOBINLINE
:
2462 optname
= SO_OOBINLINE
;
2464 case TARGET_SO_NO_CHECK
:
2465 optname
= SO_NO_CHECK
;
2467 case TARGET_SO_PRIORITY
:
2468 optname
= SO_PRIORITY
;
2471 case TARGET_SO_BSDCOMPAT
:
2472 optname
= SO_BSDCOMPAT
;
2475 case TARGET_SO_PASSCRED
:
2476 optname
= SO_PASSCRED
;
2478 case TARGET_SO_PASSSEC
:
2479 optname
= SO_PASSSEC
;
2481 case TARGET_SO_TIMESTAMP
:
2482 optname
= SO_TIMESTAMP
;
2484 case TARGET_SO_RCVLOWAT
:
2485 optname
= SO_RCVLOWAT
;
2490 if (optlen
< sizeof(uint32_t))
2491 return -TARGET_EINVAL
;
2493 if (get_user_u32(val
, optval_addr
))
2494 return -TARGET_EFAULT
;
2495 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2500 case NETLINK_PKTINFO
:
2501 case NETLINK_ADD_MEMBERSHIP
:
2502 case NETLINK_DROP_MEMBERSHIP
:
2503 case NETLINK_BROADCAST_ERROR
:
2504 case NETLINK_NO_ENOBUFS
:
2505 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2506 case NETLINK_LISTEN_ALL_NSID
:
2507 case NETLINK_CAP_ACK
:
2508 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2509 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2510 case NETLINK_EXT_ACK
:
2511 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2512 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2513 case NETLINK_GET_STRICT_CHK
:
2514 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2520 if (optlen
< sizeof(uint32_t)) {
2521 return -TARGET_EINVAL
;
2523 if (get_user_u32(val
, optval_addr
)) {
2524 return -TARGET_EFAULT
;
2526 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2529 #endif /* SOL_NETLINK */
2532 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2534 ret
= -TARGET_ENOPROTOOPT
;
2539 /* do_getsockopt() Must return target values and target errnos. */
2540 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2541 abi_ulong optval_addr
, abi_ulong optlen
)
2548 case TARGET_SOL_SOCKET
:
2551 /* These don't just return a single integer */
2552 case TARGET_SO_PEERNAME
:
2554 case TARGET_SO_RCVTIMEO
: {
2558 optname
= SO_RCVTIMEO
;
2561 if (get_user_u32(len
, optlen
)) {
2562 return -TARGET_EFAULT
;
2565 return -TARGET_EINVAL
;
2569 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2574 if (len
> sizeof(struct target_timeval
)) {
2575 len
= sizeof(struct target_timeval
);
2577 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2578 return -TARGET_EFAULT
;
2580 if (put_user_u32(len
, optlen
)) {
2581 return -TARGET_EFAULT
;
2585 case TARGET_SO_SNDTIMEO
:
2586 optname
= SO_SNDTIMEO
;
2588 case TARGET_SO_PEERCRED
: {
2591 struct target_ucred
*tcr
;
2593 if (get_user_u32(len
, optlen
)) {
2594 return -TARGET_EFAULT
;
2597 return -TARGET_EINVAL
;
2601 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2609 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2610 return -TARGET_EFAULT
;
2612 __put_user(cr
.pid
, &tcr
->pid
);
2613 __put_user(cr
.uid
, &tcr
->uid
);
2614 __put_user(cr
.gid
, &tcr
->gid
);
2615 unlock_user_struct(tcr
, optval_addr
, 1);
2616 if (put_user_u32(len
, optlen
)) {
2617 return -TARGET_EFAULT
;
2621 case TARGET_SO_PEERSEC
: {
2624 if (get_user_u32(len
, optlen
)) {
2625 return -TARGET_EFAULT
;
2628 return -TARGET_EINVAL
;
2630 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2632 return -TARGET_EFAULT
;
2635 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2637 if (put_user_u32(lv
, optlen
)) {
2638 ret
= -TARGET_EFAULT
;
2640 unlock_user(name
, optval_addr
, lv
);
2643 case TARGET_SO_LINGER
:
2647 struct target_linger
*tlg
;
2649 if (get_user_u32(len
, optlen
)) {
2650 return -TARGET_EFAULT
;
2653 return -TARGET_EINVAL
;
2657 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2665 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2666 return -TARGET_EFAULT
;
2668 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2669 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2670 unlock_user_struct(tlg
, optval_addr
, 1);
2671 if (put_user_u32(len
, optlen
)) {
2672 return -TARGET_EFAULT
;
2676 /* Options with 'int' argument. */
2677 case TARGET_SO_DEBUG
:
2680 case TARGET_SO_REUSEADDR
:
2681 optname
= SO_REUSEADDR
;
2684 case TARGET_SO_REUSEPORT
:
2685 optname
= SO_REUSEPORT
;
2688 case TARGET_SO_TYPE
:
2691 case TARGET_SO_ERROR
:
2694 case TARGET_SO_DONTROUTE
:
2695 optname
= SO_DONTROUTE
;
2697 case TARGET_SO_BROADCAST
:
2698 optname
= SO_BROADCAST
;
2700 case TARGET_SO_SNDBUF
:
2701 optname
= SO_SNDBUF
;
2703 case TARGET_SO_RCVBUF
:
2704 optname
= SO_RCVBUF
;
2706 case TARGET_SO_KEEPALIVE
:
2707 optname
= SO_KEEPALIVE
;
2709 case TARGET_SO_OOBINLINE
:
2710 optname
= SO_OOBINLINE
;
2712 case TARGET_SO_NO_CHECK
:
2713 optname
= SO_NO_CHECK
;
2715 case TARGET_SO_PRIORITY
:
2716 optname
= SO_PRIORITY
;
2719 case TARGET_SO_BSDCOMPAT
:
2720 optname
= SO_BSDCOMPAT
;
2723 case TARGET_SO_PASSCRED
:
2724 optname
= SO_PASSCRED
;
2726 case TARGET_SO_TIMESTAMP
:
2727 optname
= SO_TIMESTAMP
;
2729 case TARGET_SO_RCVLOWAT
:
2730 optname
= SO_RCVLOWAT
;
2732 case TARGET_SO_ACCEPTCONN
:
2733 optname
= SO_ACCEPTCONN
;
2735 case TARGET_SO_PROTOCOL
:
2736 optname
= SO_PROTOCOL
;
2738 case TARGET_SO_DOMAIN
:
2739 optname
= SO_DOMAIN
;
2747 /* TCP and UDP options all take an 'int' value. */
2749 if (get_user_u32(len
, optlen
))
2750 return -TARGET_EFAULT
;
2752 return -TARGET_EINVAL
;
2754 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2757 if (optname
== SO_TYPE
) {
2758 val
= host_to_target_sock_type(val
);
2763 if (put_user_u32(val
, optval_addr
))
2764 return -TARGET_EFAULT
;
2766 if (put_user_u8(val
, optval_addr
))
2767 return -TARGET_EFAULT
;
2769 if (put_user_u32(len
, optlen
))
2770 return -TARGET_EFAULT
;
2777 case IP_ROUTER_ALERT
:
2781 case IP_MTU_DISCOVER
:
2787 case IP_MULTICAST_TTL
:
2788 case IP_MULTICAST_LOOP
:
2789 if (get_user_u32(len
, optlen
))
2790 return -TARGET_EFAULT
;
2792 return -TARGET_EINVAL
;
2794 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2797 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2799 if (put_user_u32(len
, optlen
)
2800 || put_user_u8(val
, optval_addr
))
2801 return -TARGET_EFAULT
;
2803 if (len
> sizeof(int))
2805 if (put_user_u32(len
, optlen
)
2806 || put_user_u32(val
, optval_addr
))
2807 return -TARGET_EFAULT
;
2811 ret
= -TARGET_ENOPROTOOPT
;
2817 case IPV6_MTU_DISCOVER
:
2820 case IPV6_RECVPKTINFO
:
2821 case IPV6_UNICAST_HOPS
:
2822 case IPV6_MULTICAST_HOPS
:
2823 case IPV6_MULTICAST_LOOP
:
2825 case IPV6_RECVHOPLIMIT
:
2826 case IPV6_2292HOPLIMIT
:
2829 case IPV6_2292PKTINFO
:
2830 case IPV6_RECVTCLASS
:
2831 case IPV6_RECVRTHDR
:
2832 case IPV6_2292RTHDR
:
2833 case IPV6_RECVHOPOPTS
:
2834 case IPV6_2292HOPOPTS
:
2835 case IPV6_RECVDSTOPTS
:
2836 case IPV6_2292DSTOPTS
:
2838 case IPV6_ADDR_PREFERENCES
:
2839 #ifdef IPV6_RECVPATHMTU
2840 case IPV6_RECVPATHMTU
:
2842 #ifdef IPV6_TRANSPARENT
2843 case IPV6_TRANSPARENT
:
2845 #ifdef IPV6_FREEBIND
2848 #ifdef IPV6_RECVORIGDSTADDR
2849 case IPV6_RECVORIGDSTADDR
:
2851 if (get_user_u32(len
, optlen
))
2852 return -TARGET_EFAULT
;
2854 return -TARGET_EINVAL
;
2856 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2859 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2861 if (put_user_u32(len
, optlen
)
2862 || put_user_u8(val
, optval_addr
))
2863 return -TARGET_EFAULT
;
2865 if (len
> sizeof(int))
2867 if (put_user_u32(len
, optlen
)
2868 || put_user_u32(val
, optval_addr
))
2869 return -TARGET_EFAULT
;
2873 ret
= -TARGET_ENOPROTOOPT
;
2880 case NETLINK_PKTINFO
:
2881 case NETLINK_BROADCAST_ERROR
:
2882 case NETLINK_NO_ENOBUFS
:
2883 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2884 case NETLINK_LISTEN_ALL_NSID
:
2885 case NETLINK_CAP_ACK
:
2886 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2887 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2888 case NETLINK_EXT_ACK
:
2889 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2890 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2891 case NETLINK_GET_STRICT_CHK
:
2892 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2893 if (get_user_u32(len
, optlen
)) {
2894 return -TARGET_EFAULT
;
2896 if (len
!= sizeof(val
)) {
2897 return -TARGET_EINVAL
;
2900 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2904 if (put_user_u32(lv
, optlen
)
2905 || put_user_u32(val
, optval_addr
)) {
2906 return -TARGET_EFAULT
;
2909 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2910 case NETLINK_LIST_MEMBERSHIPS
:
2914 if (get_user_u32(len
, optlen
)) {
2915 return -TARGET_EFAULT
;
2918 return -TARGET_EINVAL
;
2920 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2921 if (!results
&& len
> 0) {
2922 return -TARGET_EFAULT
;
2925 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2927 unlock_user(results
, optval_addr
, 0);
2930 /* swap host endianess to target endianess. */
2931 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2932 results
[i
] = tswap32(results
[i
]);
2934 if (put_user_u32(lv
, optlen
)) {
2935 return -TARGET_EFAULT
;
2937 unlock_user(results
, optval_addr
, 0);
2940 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2945 #endif /* SOL_NETLINK */
2948 qemu_log_mask(LOG_UNIMP
,
2949 "getsockopt level=%d optname=%d not yet supported\n",
2951 ret
= -TARGET_EOPNOTSUPP
;
2957 /* Convert target low/high pair representing file offset into the host
2958 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2959 * as the kernel doesn't handle them either.
2961 static void target_to_host_low_high(abi_ulong tlow
,
2963 unsigned long *hlow
,
2964 unsigned long *hhigh
)
2966 uint64_t off
= tlow
|
2967 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2968 TARGET_LONG_BITS
/ 2;
2971 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2974 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2975 abi_ulong count
, int copy
)
2977 struct target_iovec
*target_vec
;
2979 abi_ulong total_len
, max_len
;
2982 bool bad_address
= false;
2988 if (count
> IOV_MAX
) {
2993 vec
= g_try_new0(struct iovec
, count
);
2999 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3000 count
* sizeof(struct target_iovec
), 1);
3001 if (target_vec
== NULL
) {
3006 /* ??? If host page size > target page size, this will result in a
3007 value larger than what we can actually support. */
3008 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3011 for (i
= 0; i
< count
; i
++) {
3012 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3013 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3018 } else if (len
== 0) {
3019 /* Zero length pointer is ignored. */
3020 vec
[i
].iov_base
= 0;
3022 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3023 /* If the first buffer pointer is bad, this is a fault. But
3024 * subsequent bad buffers will result in a partial write; this
3025 * is realized by filling the vector with null pointers and
3027 if (!vec
[i
].iov_base
) {
3038 if (len
> max_len
- total_len
) {
3039 len
= max_len
- total_len
;
3042 vec
[i
].iov_len
= len
;
3046 unlock_user(target_vec
, target_addr
, 0);
3051 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3052 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3055 unlock_user(target_vec
, target_addr
, 0);
3062 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3063 abi_ulong count
, int copy
)
3065 struct target_iovec
*target_vec
;
3068 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3069 count
* sizeof(struct target_iovec
), 1);
3071 for (i
= 0; i
< count
; i
++) {
3072 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3073 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3077 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3079 unlock_user(target_vec
, target_addr
, 0);
3085 static inline int target_to_host_sock_type(int *type
)
3088 int target_type
= *type
;
3090 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3091 case TARGET_SOCK_DGRAM
:
3092 host_type
= SOCK_DGRAM
;
3094 case TARGET_SOCK_STREAM
:
3095 host_type
= SOCK_STREAM
;
3098 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3101 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3102 #if defined(SOCK_CLOEXEC)
3103 host_type
|= SOCK_CLOEXEC
;
3105 return -TARGET_EINVAL
;
3108 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3109 #if defined(SOCK_NONBLOCK)
3110 host_type
|= SOCK_NONBLOCK
;
3111 #elif !defined(O_NONBLOCK)
3112 return -TARGET_EINVAL
;
3119 /* Try to emulate socket type flags after socket creation. */
3120 static int sock_flags_fixup(int fd
, int target_type
)
3122 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3123 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3124 int flags
= fcntl(fd
, F_GETFL
);
3125 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3127 return -TARGET_EINVAL
;
3134 /* do_socket() Must return target values and target errnos. */
3135 static abi_long
do_socket(int domain
, int type
, int protocol
)
3137 int target_type
= type
;
3140 ret
= target_to_host_sock_type(&type
);
3145 if (domain
== PF_NETLINK
&& !(
3146 #ifdef CONFIG_RTNETLINK
3147 protocol
== NETLINK_ROUTE
||
3149 protocol
== NETLINK_KOBJECT_UEVENT
||
3150 protocol
== NETLINK_AUDIT
)) {
3151 return -TARGET_EPROTONOSUPPORT
;
3154 if (domain
== AF_PACKET
||
3155 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3156 protocol
= tswap16(protocol
);
3159 ret
= get_errno(socket(domain
, type
, protocol
));
3161 ret
= sock_flags_fixup(ret
, target_type
);
3162 if (type
== SOCK_PACKET
) {
3163 /* Manage an obsolete case :
3164 * if socket type is SOCK_PACKET, bind by name
3166 fd_trans_register(ret
, &target_packet_trans
);
3167 } else if (domain
== PF_NETLINK
) {
3169 #ifdef CONFIG_RTNETLINK
3171 fd_trans_register(ret
, &target_netlink_route_trans
);
3174 case NETLINK_KOBJECT_UEVENT
:
3175 /* nothing to do: messages are strings */
3178 fd_trans_register(ret
, &target_netlink_audit_trans
);
3181 g_assert_not_reached();
3188 /* do_bind() Must return target values and target errnos. */
3189 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3195 if ((int)addrlen
< 0) {
3196 return -TARGET_EINVAL
;
3199 addr
= alloca(addrlen
+1);
3201 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3205 return get_errno(bind(sockfd
, addr
, addrlen
));
3208 /* do_connect() Must return target values and target errnos. */
3209 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3215 if ((int)addrlen
< 0) {
3216 return -TARGET_EINVAL
;
3219 addr
= alloca(addrlen
+1);
3221 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3225 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3228 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3229 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3230 int flags
, int send
)
3236 abi_ulong target_vec
;
3238 if (msgp
->msg_name
) {
3239 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3240 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3241 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3242 tswapal(msgp
->msg_name
),
3244 if (ret
== -TARGET_EFAULT
) {
3245 /* For connected sockets msg_name and msg_namelen must
3246 * be ignored, so returning EFAULT immediately is wrong.
3247 * Instead, pass a bad msg_name to the host kernel, and
3248 * let it decide whether to return EFAULT or not.
3250 msg
.msg_name
= (void *)-1;
3255 msg
.msg_name
= NULL
;
3256 msg
.msg_namelen
= 0;
3258 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3259 msg
.msg_control
= alloca(msg
.msg_controllen
);
3260 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3262 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3264 count
= tswapal(msgp
->msg_iovlen
);
3265 target_vec
= tswapal(msgp
->msg_iov
);
3267 if (count
> IOV_MAX
) {
3268 /* sendrcvmsg returns a different errno for this condition than
3269 * readv/writev, so we must catch it here before lock_iovec() does.
3271 ret
= -TARGET_EMSGSIZE
;
3275 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3276 target_vec
, count
, send
);
3278 ret
= -host_to_target_errno(errno
);
3281 msg
.msg_iovlen
= count
;
3285 if (fd_trans_target_to_host_data(fd
)) {
3288 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3289 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3290 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3291 msg
.msg_iov
->iov_len
);
3293 msg
.msg_iov
->iov_base
= host_msg
;
3294 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3298 ret
= target_to_host_cmsg(&msg
, msgp
);
3300 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3304 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3305 if (!is_error(ret
)) {
3307 if (fd_trans_host_to_target_data(fd
)) {
3308 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3309 MIN(msg
.msg_iov
->iov_len
, len
));
3311 ret
= host_to_target_cmsg(msgp
, &msg
);
3313 if (!is_error(ret
)) {
3314 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3315 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3316 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3317 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3318 msg
.msg_name
, msg
.msg_namelen
);
3330 unlock_iovec(vec
, target_vec
, count
, !send
);
3335 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3336 int flags
, int send
)
3339 struct target_msghdr
*msgp
;
3341 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3345 return -TARGET_EFAULT
;
3347 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3348 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3352 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3353 * so it might not have this *mmsg-specific flag either.
3355 #ifndef MSG_WAITFORONE
3356 #define MSG_WAITFORONE 0x10000
3359 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3360 unsigned int vlen
, unsigned int flags
,
3363 struct target_mmsghdr
*mmsgp
;
3367 if (vlen
> UIO_MAXIOV
) {
3371 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3373 return -TARGET_EFAULT
;
3376 for (i
= 0; i
< vlen
; i
++) {
3377 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3378 if (is_error(ret
)) {
3381 mmsgp
[i
].msg_len
= tswap32(ret
);
3382 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3383 if (flags
& MSG_WAITFORONE
) {
3384 flags
|= MSG_DONTWAIT
;
3388 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3390 /* Return number of datagrams sent if we sent any at all;
3391 * otherwise return the error.
3399 /* do_accept4() Must return target values and target errnos. */
3400 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3401 abi_ulong target_addrlen_addr
, int flags
)
3403 socklen_t addrlen
, ret_addrlen
;
3408 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3410 if (target_addr
== 0) {
3411 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3414 /* linux returns EFAULT if addrlen pointer is invalid */
3415 if (get_user_u32(addrlen
, target_addrlen_addr
))
3416 return -TARGET_EFAULT
;
3418 if ((int)addrlen
< 0) {
3419 return -TARGET_EINVAL
;
3422 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3423 return -TARGET_EFAULT
;
3426 addr
= alloca(addrlen
);
3428 ret_addrlen
= addrlen
;
3429 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3430 if (!is_error(ret
)) {
3431 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3432 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3433 ret
= -TARGET_EFAULT
;
3439 /* do_getpeername() Must return target values and target errnos. */
3440 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3441 abi_ulong target_addrlen_addr
)
3443 socklen_t addrlen
, ret_addrlen
;
3447 if (get_user_u32(addrlen
, target_addrlen_addr
))
3448 return -TARGET_EFAULT
;
3450 if ((int)addrlen
< 0) {
3451 return -TARGET_EINVAL
;
3454 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3455 return -TARGET_EFAULT
;
3458 addr
= alloca(addrlen
);
3460 ret_addrlen
= addrlen
;
3461 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3462 if (!is_error(ret
)) {
3463 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3464 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3465 ret
= -TARGET_EFAULT
;
3471 /* do_getsockname() Must return target values and target errnos. */
3472 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3473 abi_ulong target_addrlen_addr
)
3475 socklen_t addrlen
, ret_addrlen
;
3479 if (get_user_u32(addrlen
, target_addrlen_addr
))
3480 return -TARGET_EFAULT
;
3482 if ((int)addrlen
< 0) {
3483 return -TARGET_EINVAL
;
3486 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3487 return -TARGET_EFAULT
;
3490 addr
= alloca(addrlen
);
3492 ret_addrlen
= addrlen
;
3493 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3494 if (!is_error(ret
)) {
3495 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3496 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3497 ret
= -TARGET_EFAULT
;
3503 /* do_socketpair() Must return target values and target errnos. */
3504 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3505 abi_ulong target_tab_addr
)
3510 target_to_host_sock_type(&type
);
3512 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3513 if (!is_error(ret
)) {
3514 if (put_user_s32(tab
[0], target_tab_addr
)
3515 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3516 ret
= -TARGET_EFAULT
;
3521 /* do_sendto() Must return target values and target errnos. */
3522 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3523 abi_ulong target_addr
, socklen_t addrlen
)
3527 void *copy_msg
= NULL
;
3530 if ((int)addrlen
< 0) {
3531 return -TARGET_EINVAL
;
3534 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3536 return -TARGET_EFAULT
;
3537 if (fd_trans_target_to_host_data(fd
)) {
3538 copy_msg
= host_msg
;
3539 host_msg
= g_malloc(len
);
3540 memcpy(host_msg
, copy_msg
, len
);
3541 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3547 addr
= alloca(addrlen
+1);
3548 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3552 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3554 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3559 host_msg
= copy_msg
;
3561 unlock_user(host_msg
, msg
, 0);
3565 /* do_recvfrom() Must return target values and target errnos. */
3566 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3567 abi_ulong target_addr
,
3568 abi_ulong target_addrlen
)
3570 socklen_t addrlen
, ret_addrlen
;
3578 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3580 return -TARGET_EFAULT
;
3584 if (get_user_u32(addrlen
, target_addrlen
)) {
3585 ret
= -TARGET_EFAULT
;
3588 if ((int)addrlen
< 0) {
3589 ret
= -TARGET_EINVAL
;
3592 addr
= alloca(addrlen
);
3593 ret_addrlen
= addrlen
;
3594 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3595 addr
, &ret_addrlen
));
3597 addr
= NULL
; /* To keep compiler quiet. */
3598 addrlen
= 0; /* To keep compiler quiet. */
3599 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3601 if (!is_error(ret
)) {
3602 if (fd_trans_host_to_target_data(fd
)) {
3604 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3605 if (is_error(trans
)) {
3611 host_to_target_sockaddr(target_addr
, addr
,
3612 MIN(addrlen
, ret_addrlen
));
3613 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3614 ret
= -TARGET_EFAULT
;
3618 unlock_user(host_msg
, msg
, len
);
3621 unlock_user(host_msg
, msg
, 0);
3626 #ifdef TARGET_NR_socketcall
3627 /* do_socketcall() must return target values and target errnos. */
3628 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3630 static const unsigned nargs
[] = { /* number of arguments per operation */
3631 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3632 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3633 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3634 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3635 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3636 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3637 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3638 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3639 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3640 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3641 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3642 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3643 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3644 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3645 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3646 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3647 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3648 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3649 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3650 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3652 abi_long a
[6]; /* max 6 args */
3655 /* check the range of the first argument num */
3656 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3657 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3658 return -TARGET_EINVAL
;
3660 /* ensure we have space for args */
3661 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3662 return -TARGET_EINVAL
;
3664 /* collect the arguments in a[] according to nargs[] */
3665 for (i
= 0; i
< nargs
[num
]; ++i
) {
3666 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3667 return -TARGET_EFAULT
;
3670 /* now when we have the args, invoke the appropriate underlying function */
3672 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3673 return do_socket(a
[0], a
[1], a
[2]);
3674 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3675 return do_bind(a
[0], a
[1], a
[2]);
3676 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3677 return do_connect(a
[0], a
[1], a
[2]);
3678 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3679 return get_errno(listen(a
[0], a
[1]));
3680 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3681 return do_accept4(a
[0], a
[1], a
[2], 0);
3682 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3683 return do_getsockname(a
[0], a
[1], a
[2]);
3684 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3685 return do_getpeername(a
[0], a
[1], a
[2]);
3686 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3687 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3688 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3689 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3690 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3691 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3692 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3693 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3694 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3695 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3696 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3697 return get_errno(shutdown(a
[0], a
[1]));
3698 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3699 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3700 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3701 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3702 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3703 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3704 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3705 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3706 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3707 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3708 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3709 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3710 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3711 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3713 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3714 return -TARGET_EINVAL
;
3719 #define N_SHM_REGIONS 32
3721 static struct shm_region
{
3725 } shm_regions
[N_SHM_REGIONS
];
3727 #ifndef TARGET_SEMID64_DS
3728 /* asm-generic version of this struct */
3729 struct target_semid64_ds
3731 struct target_ipc_perm sem_perm
;
3732 abi_ulong sem_otime
;
3733 #if TARGET_ABI_BITS == 32
3734 abi_ulong __unused1
;
3736 abi_ulong sem_ctime
;
3737 #if TARGET_ABI_BITS == 32
3738 abi_ulong __unused2
;
3740 abi_ulong sem_nsems
;
3741 abi_ulong __unused3
;
3742 abi_ulong __unused4
;
3746 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3747 abi_ulong target_addr
)
3749 struct target_ipc_perm
*target_ip
;
3750 struct target_semid64_ds
*target_sd
;
3752 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3753 return -TARGET_EFAULT
;
3754 target_ip
= &(target_sd
->sem_perm
);
3755 host_ip
->__key
= tswap32(target_ip
->__key
);
3756 host_ip
->uid
= tswap32(target_ip
->uid
);
3757 host_ip
->gid
= tswap32(target_ip
->gid
);
3758 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3759 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3760 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3761 host_ip
->mode
= tswap32(target_ip
->mode
);
3763 host_ip
->mode
= tswap16(target_ip
->mode
);
3765 #if defined(TARGET_PPC)
3766 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3768 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3770 unlock_user_struct(target_sd
, target_addr
, 0);
3774 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3775 struct ipc_perm
*host_ip
)
3777 struct target_ipc_perm
*target_ip
;
3778 struct target_semid64_ds
*target_sd
;
3780 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3781 return -TARGET_EFAULT
;
3782 target_ip
= &(target_sd
->sem_perm
);
3783 target_ip
->__key
= tswap32(host_ip
->__key
);
3784 target_ip
->uid
= tswap32(host_ip
->uid
);
3785 target_ip
->gid
= tswap32(host_ip
->gid
);
3786 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3787 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3788 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3789 target_ip
->mode
= tswap32(host_ip
->mode
);
3791 target_ip
->mode
= tswap16(host_ip
->mode
);
3793 #if defined(TARGET_PPC)
3794 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3796 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3798 unlock_user_struct(target_sd
, target_addr
, 1);
3802 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3803 abi_ulong target_addr
)
3805 struct target_semid64_ds
*target_sd
;
3807 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3808 return -TARGET_EFAULT
;
3809 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3810 return -TARGET_EFAULT
;
3811 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3812 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3813 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3814 unlock_user_struct(target_sd
, target_addr
, 0);
3818 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3819 struct semid_ds
*host_sd
)
3821 struct target_semid64_ds
*target_sd
;
3823 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3824 return -TARGET_EFAULT
;
3825 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3826 return -TARGET_EFAULT
;
3827 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3828 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3829 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3830 unlock_user_struct(target_sd
, target_addr
, 1);
3834 struct target_seminfo
{
3847 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3848 struct seminfo
*host_seminfo
)
3850 struct target_seminfo
*target_seminfo
;
3851 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3852 return -TARGET_EFAULT
;
3853 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3854 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3855 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3856 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3857 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3858 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3859 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3860 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3861 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3862 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3863 unlock_user_struct(target_seminfo
, target_addr
, 1);
3869 struct semid_ds
*buf
;
3870 unsigned short *array
;
3871 struct seminfo
*__buf
;
3874 union target_semun
{
3881 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3882 abi_ulong target_addr
)
3885 unsigned short *array
;
3887 struct semid_ds semid_ds
;
3890 semun
.buf
= &semid_ds
;
3892 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3894 return get_errno(ret
);
3896 nsems
= semid_ds
.sem_nsems
;
3898 *host_array
= g_try_new(unsigned short, nsems
);
3900 return -TARGET_ENOMEM
;
3902 array
= lock_user(VERIFY_READ
, target_addr
,
3903 nsems
*sizeof(unsigned short), 1);
3905 g_free(*host_array
);
3906 return -TARGET_EFAULT
;
3909 for(i
=0; i
<nsems
; i
++) {
3910 __get_user((*host_array
)[i
], &array
[i
]);
3912 unlock_user(array
, target_addr
, 0);
3917 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3918 unsigned short **host_array
)
3921 unsigned short *array
;
3923 struct semid_ds semid_ds
;
3926 semun
.buf
= &semid_ds
;
3928 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3930 return get_errno(ret
);
3932 nsems
= semid_ds
.sem_nsems
;
3934 array
= lock_user(VERIFY_WRITE
, target_addr
,
3935 nsems
*sizeof(unsigned short), 0);
3937 return -TARGET_EFAULT
;
3939 for(i
=0; i
<nsems
; i
++) {
3940 __put_user((*host_array
)[i
], &array
[i
]);
3942 g_free(*host_array
);
3943 unlock_user(array
, target_addr
, 1);
3948 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3949 abi_ulong target_arg
)
3951 union target_semun target_su
= { .buf
= target_arg
};
3953 struct semid_ds dsarg
;
3954 unsigned short *array
= NULL
;
3955 struct seminfo seminfo
;
3956 abi_long ret
= -TARGET_EINVAL
;
3963 /* In 64 bit cross-endian situations, we will erroneously pick up
3964 * the wrong half of the union for the "val" element. To rectify
3965 * this, the entire 8-byte structure is byteswapped, followed by
3966 * a swap of the 4 byte val field. In other cases, the data is
3967 * already in proper host byte order. */
3968 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3969 target_su
.buf
= tswapal(target_su
.buf
);
3970 arg
.val
= tswap32(target_su
.val
);
3972 arg
.val
= target_su
.val
;
3974 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3978 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3982 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3983 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3990 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3994 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3995 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4001 arg
.__buf
= &seminfo
;
4002 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4003 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4011 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4018 struct target_sembuf
{
4019 unsigned short sem_num
;
4024 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4025 abi_ulong target_addr
,
4028 struct target_sembuf
*target_sembuf
;
4031 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4032 nsops
*sizeof(struct target_sembuf
), 1);
4034 return -TARGET_EFAULT
;
4036 for(i
=0; i
<nsops
; i
++) {
4037 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4038 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4039 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4042 unlock_user(target_sembuf
, target_addr
, 0);
4047 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4048 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4051 * This macro is required to handle the s390 variants, which passes the
4052 * arguments in a different order than default.
4055 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4056 (__nsops), (__timeout), (__sops)
4058 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4059 (__nsops), 0, (__sops), (__timeout)
4062 static inline abi_long
do_semtimedop(int semid
,
4065 abi_long timeout
, bool time64
)
4067 struct sembuf
*sops
;
4068 struct timespec ts
, *pts
= NULL
;
4074 if (target_to_host_timespec64(pts
, timeout
)) {
4075 return -TARGET_EFAULT
;
4078 if (target_to_host_timespec(pts
, timeout
)) {
4079 return -TARGET_EFAULT
;
4084 if (nsops
> TARGET_SEMOPM
) {
4085 return -TARGET_E2BIG
;
4088 sops
= g_new(struct sembuf
, nsops
);
4090 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4092 return -TARGET_EFAULT
;
4095 ret
= -TARGET_ENOSYS
;
4096 #ifdef __NR_semtimedop
4097 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4100 if (ret
== -TARGET_ENOSYS
) {
4101 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4102 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4110 struct target_msqid_ds
4112 struct target_ipc_perm msg_perm
;
4113 abi_ulong msg_stime
;
4114 #if TARGET_ABI_BITS == 32
4115 abi_ulong __unused1
;
4117 abi_ulong msg_rtime
;
4118 #if TARGET_ABI_BITS == 32
4119 abi_ulong __unused2
;
4121 abi_ulong msg_ctime
;
4122 #if TARGET_ABI_BITS == 32
4123 abi_ulong __unused3
;
4125 abi_ulong __msg_cbytes
;
4127 abi_ulong msg_qbytes
;
4128 abi_ulong msg_lspid
;
4129 abi_ulong msg_lrpid
;
4130 abi_ulong __unused4
;
4131 abi_ulong __unused5
;
4134 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4135 abi_ulong target_addr
)
4137 struct target_msqid_ds
*target_md
;
4139 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4140 return -TARGET_EFAULT
;
4141 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4142 return -TARGET_EFAULT
;
4143 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4144 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4145 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4146 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4147 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4148 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4149 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4150 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4151 unlock_user_struct(target_md
, target_addr
, 0);
4155 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4156 struct msqid_ds
*host_md
)
4158 struct target_msqid_ds
*target_md
;
4160 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4161 return -TARGET_EFAULT
;
4162 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4163 return -TARGET_EFAULT
;
4164 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4165 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4166 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4167 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4168 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4169 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4170 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4171 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4172 unlock_user_struct(target_md
, target_addr
, 1);
4176 struct target_msginfo
{
4184 unsigned short int msgseg
;
4187 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4188 struct msginfo
*host_msginfo
)
4190 struct target_msginfo
*target_msginfo
;
4191 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4192 return -TARGET_EFAULT
;
4193 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4194 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4195 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4196 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4197 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4198 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4199 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4200 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4201 unlock_user_struct(target_msginfo
, target_addr
, 1);
4205 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4207 struct msqid_ds dsarg
;
4208 struct msginfo msginfo
;
4209 abi_long ret
= -TARGET_EINVAL
;
4217 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4218 return -TARGET_EFAULT
;
4219 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4220 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4221 return -TARGET_EFAULT
;
4224 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4228 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4229 if (host_to_target_msginfo(ptr
, &msginfo
))
4230 return -TARGET_EFAULT
;
4237 struct target_msgbuf
{
4242 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4243 ssize_t msgsz
, int msgflg
)
4245 struct target_msgbuf
*target_mb
;
4246 struct msgbuf
*host_mb
;
4250 return -TARGET_EINVAL
;
4253 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4254 return -TARGET_EFAULT
;
4255 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4257 unlock_user_struct(target_mb
, msgp
, 0);
4258 return -TARGET_ENOMEM
;
4260 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4261 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4262 ret
= -TARGET_ENOSYS
;
4264 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4267 if (ret
== -TARGET_ENOSYS
) {
4269 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4272 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4278 unlock_user_struct(target_mb
, msgp
, 0);
4284 #if defined(__sparc__)
4285 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4286 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4287 #elif defined(__s390x__)
4288 /* The s390 sys_ipc variant has only five parameters. */
4289 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4290 ((long int[]){(long int)__msgp, __msgtyp})
4292 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4293 ((long int[]){(long int)__msgp, __msgtyp}), 0
4297 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4298 ssize_t msgsz
, abi_long msgtyp
,
4301 struct target_msgbuf
*target_mb
;
4303 struct msgbuf
*host_mb
;
4307 return -TARGET_EINVAL
;
4310 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4311 return -TARGET_EFAULT
;
4313 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4315 ret
= -TARGET_ENOMEM
;
4318 ret
= -TARGET_ENOSYS
;
4320 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4323 if (ret
== -TARGET_ENOSYS
) {
4324 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4325 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4330 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4331 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4332 if (!target_mtext
) {
4333 ret
= -TARGET_EFAULT
;
4336 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4337 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4340 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4344 unlock_user_struct(target_mb
, msgp
, 1);
4349 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4350 abi_ulong target_addr
)
4352 struct target_shmid_ds
*target_sd
;
4354 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4355 return -TARGET_EFAULT
;
4356 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4357 return -TARGET_EFAULT
;
4358 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4359 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4360 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4361 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4362 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4363 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4364 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4365 unlock_user_struct(target_sd
, target_addr
, 0);
4369 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4370 struct shmid_ds
*host_sd
)
4372 struct target_shmid_ds
*target_sd
;
4374 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4375 return -TARGET_EFAULT
;
4376 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4377 return -TARGET_EFAULT
;
4378 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4379 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4380 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4381 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4382 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4383 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4384 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4385 unlock_user_struct(target_sd
, target_addr
, 1);
4389 struct target_shminfo
{
4397 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4398 struct shminfo
*host_shminfo
)
4400 struct target_shminfo
*target_shminfo
;
4401 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4402 return -TARGET_EFAULT
;
4403 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4404 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4405 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4406 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4407 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4408 unlock_user_struct(target_shminfo
, target_addr
, 1);
4412 struct target_shm_info
{
4417 abi_ulong swap_attempts
;
4418 abi_ulong swap_successes
;
4421 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4422 struct shm_info
*host_shm_info
)
4424 struct target_shm_info
*target_shm_info
;
4425 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4426 return -TARGET_EFAULT
;
4427 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4428 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4429 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4430 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4431 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4432 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4433 unlock_user_struct(target_shm_info
, target_addr
, 1);
4437 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4439 struct shmid_ds dsarg
;
4440 struct shminfo shminfo
;
4441 struct shm_info shm_info
;
4442 abi_long ret
= -TARGET_EINVAL
;
4450 if (target_to_host_shmid_ds(&dsarg
, buf
))
4451 return -TARGET_EFAULT
;
4452 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4453 if (host_to_target_shmid_ds(buf
, &dsarg
))
4454 return -TARGET_EFAULT
;
4457 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4458 if (host_to_target_shminfo(buf
, &shminfo
))
4459 return -TARGET_EFAULT
;
4462 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4463 if (host_to_target_shm_info(buf
, &shm_info
))
4464 return -TARGET_EFAULT
;
4469 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4476 #ifndef TARGET_FORCE_SHMLBA
4477 /* For most architectures, SHMLBA is the same as the page size;
4478 * some architectures have larger values, in which case they should
4479 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4480 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4481 * and defining its own value for SHMLBA.
4483 * The kernel also permits SHMLBA to be set by the architecture to a
4484 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4485 * this means that addresses are rounded to the large size if
4486 * SHM_RND is set but addresses not aligned to that size are not rejected
4487 * as long as they are at least page-aligned. Since the only architecture
4488 * which uses this is ia64 this code doesn't provide for that oddity.
4490 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4492 return TARGET_PAGE_SIZE
;
4496 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4497 int shmid
, abi_ulong shmaddr
, int shmflg
)
4499 CPUState
*cpu
= env_cpu(cpu_env
);
4502 struct shmid_ds shm_info
;
4506 /* shmat pointers are always untagged */
4508 /* find out the length of the shared memory segment */
4509 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4510 if (is_error(ret
)) {
4511 /* can't get length, bail out */
4515 shmlba
= target_shmlba(cpu_env
);
4517 if (shmaddr
& (shmlba
- 1)) {
4518 if (shmflg
& SHM_RND
) {
4519 shmaddr
&= ~(shmlba
- 1);
4521 return -TARGET_EINVAL
;
4524 if (!guest_range_valid_untagged(shmaddr
, shm_info
.shm_segsz
)) {
4525 return -TARGET_EINVAL
;
4531 * We're mapping shared memory, so ensure we generate code for parallel
4532 * execution and flush old translations. This will work up to the level
4533 * supported by the host -- anything that requires EXCP_ATOMIC will not
4534 * be atomic with respect to an external process.
4536 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
4537 cpu
->tcg_cflags
|= CF_PARALLEL
;
4542 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4544 abi_ulong mmap_start
;
4546 /* In order to use the host shmat, we need to honor host SHMLBA. */
4547 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4549 if (mmap_start
== -1) {
4551 host_raddr
= (void *)-1;
4553 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4554 shmflg
| SHM_REMAP
);
4557 if (host_raddr
== (void *)-1) {
4559 return get_errno((long)host_raddr
);
4561 raddr
=h2g((unsigned long)host_raddr
);
4563 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4564 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4565 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4567 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4568 if (!shm_regions
[i
].in_use
) {
4569 shm_regions
[i
].in_use
= true;
4570 shm_regions
[i
].start
= raddr
;
4571 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4581 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4586 /* shmdt pointers are always untagged */
4590 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4591 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4592 shm_regions
[i
].in_use
= false;
4593 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4597 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4604 #ifdef TARGET_NR_ipc
4605 /* ??? This only works with linear mappings. */
4606 /* do_ipc() must return target values and target errnos. */
4607 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4608 unsigned int call
, abi_long first
,
4609 abi_long second
, abi_long third
,
4610 abi_long ptr
, abi_long fifth
)
4615 version
= call
>> 16;
4620 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4622 case IPCOP_semtimedop
:
4624 * The s390 sys_ipc variant has only five parameters instead of six
4625 * (as for default variant) and the only difference is the handling of
4626 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4627 * to a struct timespec where the generic variant uses fifth parameter.
4629 #if defined(TARGET_S390X)
4630 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4632 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4637 ret
= get_errno(semget(first
, second
, third
));
4640 case IPCOP_semctl
: {
4641 /* The semun argument to semctl is passed by value, so dereference the
4644 get_user_ual(atptr
, ptr
);
4645 ret
= do_semctl(first
, second
, third
, atptr
);
4650 ret
= get_errno(msgget(first
, second
));
4654 ret
= do_msgsnd(first
, ptr
, second
, third
);
4658 ret
= do_msgctl(first
, second
, ptr
);
4665 struct target_ipc_kludge
{
4670 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4671 ret
= -TARGET_EFAULT
;
4675 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4677 unlock_user_struct(tmp
, ptr
, 0);
4681 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4690 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4691 if (is_error(raddr
))
4692 return get_errno(raddr
);
4693 if (put_user_ual(raddr
, third
))
4694 return -TARGET_EFAULT
;
4698 ret
= -TARGET_EINVAL
;
4703 ret
= do_shmdt(ptr
);
4707 /* IPC_* flag values are the same on all linux platforms */
4708 ret
= get_errno(shmget(first
, second
, third
));
4711 /* IPC_* and SHM_* command values are the same on all linux platforms */
4713 ret
= do_shmctl(first
, second
, ptr
);
4716 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4718 ret
= -TARGET_ENOSYS
;
4725 /* kernel structure types definitions */
4727 #define STRUCT(name, ...) STRUCT_ ## name,
4728 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4730 #include "syscall_types.h"
4734 #undef STRUCT_SPECIAL
4736 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4737 #define STRUCT_SPECIAL(name)
4738 #include "syscall_types.h"
4740 #undef STRUCT_SPECIAL
4742 #define MAX_STRUCT_SIZE 4096
4744 #ifdef CONFIG_FIEMAP
4745 /* So fiemap access checks don't overflow on 32 bit systems.
4746 * This is very slightly smaller than the limit imposed by
4747 * the underlying kernel.
4749 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4750 / sizeof(struct fiemap_extent))
4752 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4753 int fd
, int cmd
, abi_long arg
)
4755 /* The parameter for this ioctl is a struct fiemap followed
4756 * by an array of struct fiemap_extent whose size is set
4757 * in fiemap->fm_extent_count. The array is filled in by the
4760 int target_size_in
, target_size_out
;
4762 const argtype
*arg_type
= ie
->arg_type
;
4763 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4766 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4770 assert(arg_type
[0] == TYPE_PTR
);
4771 assert(ie
->access
== IOC_RW
);
4773 target_size_in
= thunk_type_size(arg_type
, 0);
4774 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4776 return -TARGET_EFAULT
;
4778 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4779 unlock_user(argptr
, arg
, 0);
4780 fm
= (struct fiemap
*)buf_temp
;
4781 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4782 return -TARGET_EINVAL
;
4785 outbufsz
= sizeof (*fm
) +
4786 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4788 if (outbufsz
> MAX_STRUCT_SIZE
) {
4789 /* We can't fit all the extents into the fixed size buffer.
4790 * Allocate one that is large enough and use it instead.
4792 fm
= g_try_malloc(outbufsz
);
4794 return -TARGET_ENOMEM
;
4796 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4799 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4800 if (!is_error(ret
)) {
4801 target_size_out
= target_size_in
;
4802 /* An extent_count of 0 means we were only counting the extents
4803 * so there are no structs to copy
4805 if (fm
->fm_extent_count
!= 0) {
4806 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4808 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4810 ret
= -TARGET_EFAULT
;
4812 /* Convert the struct fiemap */
4813 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4814 if (fm
->fm_extent_count
!= 0) {
4815 p
= argptr
+ target_size_in
;
4816 /* ...and then all the struct fiemap_extents */
4817 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4818 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4823 unlock_user(argptr
, arg
, target_size_out
);
4833 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4834 int fd
, int cmd
, abi_long arg
)
4836 const argtype
*arg_type
= ie
->arg_type
;
4840 struct ifconf
*host_ifconf
;
4842 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4843 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4844 int target_ifreq_size
;
4849 abi_long target_ifc_buf
;
4853 assert(arg_type
[0] == TYPE_PTR
);
4854 assert(ie
->access
== IOC_RW
);
4857 target_size
= thunk_type_size(arg_type
, 0);
4859 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4861 return -TARGET_EFAULT
;
4862 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4863 unlock_user(argptr
, arg
, 0);
4865 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4866 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4867 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4869 if (target_ifc_buf
!= 0) {
4870 target_ifc_len
= host_ifconf
->ifc_len
;
4871 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4872 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4874 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4875 if (outbufsz
> MAX_STRUCT_SIZE
) {
4877 * We can't fit all the extents into the fixed size buffer.
4878 * Allocate one that is large enough and use it instead.
4880 host_ifconf
= g_try_malloc(outbufsz
);
4882 return -TARGET_ENOMEM
;
4884 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4887 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4889 host_ifconf
->ifc_len
= host_ifc_len
;
4891 host_ifc_buf
= NULL
;
4893 host_ifconf
->ifc_buf
= host_ifc_buf
;
4895 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4896 if (!is_error(ret
)) {
4897 /* convert host ifc_len to target ifc_len */
4899 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4900 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4901 host_ifconf
->ifc_len
= target_ifc_len
;
4903 /* restore target ifc_buf */
4905 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4907 /* copy struct ifconf to target user */
4909 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4911 return -TARGET_EFAULT
;
4912 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4913 unlock_user(argptr
, arg
, target_size
);
4915 if (target_ifc_buf
!= 0) {
4916 /* copy ifreq[] to target user */
4917 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4918 for (i
= 0; i
< nb_ifreq
; i
++) {
4919 thunk_convert(argptr
+ i
* target_ifreq_size
,
4920 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4921 ifreq_arg_type
, THUNK_TARGET
);
4923 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4928 g_free(host_ifconf
);
4934 #if defined(CONFIG_USBFS)
4935 #if HOST_LONG_BITS > 64
4936 #error USBDEVFS thunks do not support >64 bit hosts yet.
4939 uint64_t target_urb_adr
;
4940 uint64_t target_buf_adr
;
4941 char *target_buf_ptr
;
4942 struct usbdevfs_urb host_urb
;
4945 static GHashTable
*usbdevfs_urb_hashtable(void)
4947 static GHashTable
*urb_hashtable
;
4949 if (!urb_hashtable
) {
4950 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4952 return urb_hashtable
;
4955 static void urb_hashtable_insert(struct live_urb
*urb
)
4957 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4958 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4961 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4963 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4964 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4967 static void urb_hashtable_remove(struct live_urb
*urb
)
4969 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4970 g_hash_table_remove(urb_hashtable
, urb
);
4974 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4975 int fd
, int cmd
, abi_long arg
)
4977 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4978 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4979 struct live_urb
*lurb
;
4983 uintptr_t target_urb_adr
;
4986 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4988 memset(buf_temp
, 0, sizeof(uint64_t));
4989 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4990 if (is_error(ret
)) {
4994 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4995 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4996 if (!lurb
->target_urb_adr
) {
4997 return -TARGET_EFAULT
;
4999 urb_hashtable_remove(lurb
);
5000 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
5001 lurb
->host_urb
.buffer_length
);
5002 lurb
->target_buf_ptr
= NULL
;
5004 /* restore the guest buffer pointer */
5005 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
5007 /* update the guest urb struct */
5008 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5011 return -TARGET_EFAULT
;
5013 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5014 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5016 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5017 /* write back the urb handle */
5018 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5021 return -TARGET_EFAULT
;
5024 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5025 target_urb_adr
= lurb
->target_urb_adr
;
5026 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5027 unlock_user(argptr
, arg
, target_size
);
5034 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5035 uint8_t *buf_temp
__attribute__((unused
)),
5036 int fd
, int cmd
, abi_long arg
)
5038 struct live_urb
*lurb
;
5040 /* map target address back to host URB with metadata. */
5041 lurb
= urb_hashtable_lookup(arg
);
5043 return -TARGET_EFAULT
;
5045 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5049 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5050 int fd
, int cmd
, abi_long arg
)
5052 const argtype
*arg_type
= ie
->arg_type
;
5057 struct live_urb
*lurb
;
5060 * each submitted URB needs to map to a unique ID for the
5061 * kernel, and that unique ID needs to be a pointer to
5062 * host memory. hence, we need to malloc for each URB.
5063 * isochronous transfers have a variable length struct.
5066 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5068 /* construct host copy of urb and metadata */
5069 lurb
= g_try_new0(struct live_urb
, 1);
5071 return -TARGET_ENOMEM
;
5074 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5077 return -TARGET_EFAULT
;
5079 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5080 unlock_user(argptr
, arg
, 0);
5082 lurb
->target_urb_adr
= arg
;
5083 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5085 /* buffer space used depends on endpoint type so lock the entire buffer */
5086 /* control type urbs should check the buffer contents for true direction */
5087 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5088 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5089 lurb
->host_urb
.buffer_length
, 1);
5090 if (lurb
->target_buf_ptr
== NULL
) {
5092 return -TARGET_EFAULT
;
5095 /* update buffer pointer in host copy */
5096 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5098 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5099 if (is_error(ret
)) {
5100 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5103 urb_hashtable_insert(lurb
);
5108 #endif /* CONFIG_USBFS */
5110 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5111 int cmd
, abi_long arg
)
5114 struct dm_ioctl
*host_dm
;
5115 abi_long guest_data
;
5116 uint32_t guest_data_size
;
5118 const argtype
*arg_type
= ie
->arg_type
;
5120 void *big_buf
= NULL
;
5124 target_size
= thunk_type_size(arg_type
, 0);
5125 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5127 ret
= -TARGET_EFAULT
;
5130 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5131 unlock_user(argptr
, arg
, 0);
5133 /* buf_temp is too small, so fetch things into a bigger buffer */
5134 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5135 memcpy(big_buf
, buf_temp
, target_size
);
5139 guest_data
= arg
+ host_dm
->data_start
;
5140 if ((guest_data
- arg
) < 0) {
5141 ret
= -TARGET_EINVAL
;
5144 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5145 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5147 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5149 ret
= -TARGET_EFAULT
;
5153 switch (ie
->host_cmd
) {
5155 case DM_LIST_DEVICES
:
5158 case DM_DEV_SUSPEND
:
5161 case DM_TABLE_STATUS
:
5162 case DM_TABLE_CLEAR
:
5164 case DM_LIST_VERSIONS
:
5168 case DM_DEV_SET_GEOMETRY
:
5169 /* data contains only strings */
5170 memcpy(host_data
, argptr
, guest_data_size
);
5173 memcpy(host_data
, argptr
, guest_data_size
);
5174 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5178 void *gspec
= argptr
;
5179 void *cur_data
= host_data
;
5180 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5181 int spec_size
= thunk_type_size(arg_type
, 0);
5184 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5185 struct dm_target_spec
*spec
= cur_data
;
5189 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5190 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5192 spec
->next
= sizeof(*spec
) + slen
;
5193 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5195 cur_data
+= spec
->next
;
5200 ret
= -TARGET_EINVAL
;
5201 unlock_user(argptr
, guest_data
, 0);
5204 unlock_user(argptr
, guest_data
, 0);
5206 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5207 if (!is_error(ret
)) {
5208 guest_data
= arg
+ host_dm
->data_start
;
5209 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5210 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5211 switch (ie
->host_cmd
) {
5216 case DM_DEV_SUSPEND
:
5219 case DM_TABLE_CLEAR
:
5221 case DM_DEV_SET_GEOMETRY
:
5222 /* no return data */
5224 case DM_LIST_DEVICES
:
5226 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5227 uint32_t remaining_data
= guest_data_size
;
5228 void *cur_data
= argptr
;
5229 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5230 int nl_size
= 12; /* can't use thunk_size due to alignment */
5233 uint32_t next
= nl
->next
;
5235 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5237 if (remaining_data
< nl
->next
) {
5238 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5241 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5242 strcpy(cur_data
+ nl_size
, nl
->name
);
5243 cur_data
+= nl
->next
;
5244 remaining_data
-= nl
->next
;
5248 nl
= (void*)nl
+ next
;
5253 case DM_TABLE_STATUS
:
5255 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5256 void *cur_data
= argptr
;
5257 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5258 int spec_size
= thunk_type_size(arg_type
, 0);
5261 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5262 uint32_t next
= spec
->next
;
5263 int slen
= strlen((char*)&spec
[1]) + 1;
5264 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5265 if (guest_data_size
< spec
->next
) {
5266 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5269 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5270 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5271 cur_data
= argptr
+ spec
->next
;
5272 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5278 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5279 int count
= *(uint32_t*)hdata
;
5280 uint64_t *hdev
= hdata
+ 8;
5281 uint64_t *gdev
= argptr
+ 8;
5284 *(uint32_t*)argptr
= tswap32(count
);
5285 for (i
= 0; i
< count
; i
++) {
5286 *gdev
= tswap64(*hdev
);
5292 case DM_LIST_VERSIONS
:
5294 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5295 uint32_t remaining_data
= guest_data_size
;
5296 void *cur_data
= argptr
;
5297 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5298 int vers_size
= thunk_type_size(arg_type
, 0);
5301 uint32_t next
= vers
->next
;
5303 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5305 if (remaining_data
< vers
->next
) {
5306 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5309 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5310 strcpy(cur_data
+ vers_size
, vers
->name
);
5311 cur_data
+= vers
->next
;
5312 remaining_data
-= vers
->next
;
5316 vers
= (void*)vers
+ next
;
5321 unlock_user(argptr
, guest_data
, 0);
5322 ret
= -TARGET_EINVAL
;
5325 unlock_user(argptr
, guest_data
, guest_data_size
);
5327 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5329 ret
= -TARGET_EFAULT
;
5332 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5333 unlock_user(argptr
, arg
, target_size
);
5340 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5341 int cmd
, abi_long arg
)
5345 const argtype
*arg_type
= ie
->arg_type
;
5346 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5349 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5350 struct blkpg_partition host_part
;
5352 /* Read and convert blkpg */
5354 target_size
= thunk_type_size(arg_type
, 0);
5355 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5357 ret
= -TARGET_EFAULT
;
5360 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5361 unlock_user(argptr
, arg
, 0);
5363 switch (host_blkpg
->op
) {
5364 case BLKPG_ADD_PARTITION
:
5365 case BLKPG_DEL_PARTITION
:
5366 /* payload is struct blkpg_partition */
5369 /* Unknown opcode */
5370 ret
= -TARGET_EINVAL
;
5374 /* Read and convert blkpg->data */
5375 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5376 target_size
= thunk_type_size(part_arg_type
, 0);
5377 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5379 ret
= -TARGET_EFAULT
;
5382 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5383 unlock_user(argptr
, arg
, 0);
5385 /* Swizzle the data pointer to our local copy and call! */
5386 host_blkpg
->data
= &host_part
;
5387 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5393 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5394 int fd
, int cmd
, abi_long arg
)
5396 const argtype
*arg_type
= ie
->arg_type
;
5397 const StructEntry
*se
;
5398 const argtype
*field_types
;
5399 const int *dst_offsets
, *src_offsets
;
5402 abi_ulong
*target_rt_dev_ptr
= NULL
;
5403 unsigned long *host_rt_dev_ptr
= NULL
;
5407 assert(ie
->access
== IOC_W
);
5408 assert(*arg_type
== TYPE_PTR
);
5410 assert(*arg_type
== TYPE_STRUCT
);
5411 target_size
= thunk_type_size(arg_type
, 0);
5412 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5414 return -TARGET_EFAULT
;
5417 assert(*arg_type
== (int)STRUCT_rtentry
);
5418 se
= struct_entries
+ *arg_type
++;
5419 assert(se
->convert
[0] == NULL
);
5420 /* convert struct here to be able to catch rt_dev string */
5421 field_types
= se
->field_types
;
5422 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5423 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5424 for (i
= 0; i
< se
->nb_fields
; i
++) {
5425 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5426 assert(*field_types
== TYPE_PTRVOID
);
5427 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5428 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5429 if (*target_rt_dev_ptr
!= 0) {
5430 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5431 tswapal(*target_rt_dev_ptr
));
5432 if (!*host_rt_dev_ptr
) {
5433 unlock_user(argptr
, arg
, 0);
5434 return -TARGET_EFAULT
;
5437 *host_rt_dev_ptr
= 0;
5442 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5443 argptr
+ src_offsets
[i
],
5444 field_types
, THUNK_HOST
);
5446 unlock_user(argptr
, arg
, 0);
5448 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5450 assert(host_rt_dev_ptr
!= NULL
);
5451 assert(target_rt_dev_ptr
!= NULL
);
5452 if (*host_rt_dev_ptr
!= 0) {
5453 unlock_user((void *)*host_rt_dev_ptr
,
5454 *target_rt_dev_ptr
, 0);
5459 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5460 int fd
, int cmd
, abi_long arg
)
5462 int sig
= target_to_host_signal(arg
);
5463 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5466 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5467 int fd
, int cmd
, abi_long arg
)
5472 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5473 if (is_error(ret
)) {
5477 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5478 if (copy_to_user_timeval(arg
, &tv
)) {
5479 return -TARGET_EFAULT
;
5482 if (copy_to_user_timeval64(arg
, &tv
)) {
5483 return -TARGET_EFAULT
;
5490 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5491 int fd
, int cmd
, abi_long arg
)
5496 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5497 if (is_error(ret
)) {
5501 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5502 if (host_to_target_timespec(arg
, &ts
)) {
5503 return -TARGET_EFAULT
;
5506 if (host_to_target_timespec64(arg
, &ts
)) {
5507 return -TARGET_EFAULT
;
5515 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5516 int fd
, int cmd
, abi_long arg
)
5518 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5519 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5525 static void unlock_drm_version(struct drm_version
*host_ver
,
5526 struct target_drm_version
*target_ver
,
5529 unlock_user(host_ver
->name
, target_ver
->name
,
5530 copy
? host_ver
->name_len
: 0);
5531 unlock_user(host_ver
->date
, target_ver
->date
,
5532 copy
? host_ver
->date_len
: 0);
5533 unlock_user(host_ver
->desc
, target_ver
->desc
,
5534 copy
? host_ver
->desc_len
: 0);
5537 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5538 struct target_drm_version
*target_ver
)
5540 memset(host_ver
, 0, sizeof(*host_ver
));
5542 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5543 if (host_ver
->name_len
) {
5544 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5545 target_ver
->name_len
, 0);
5546 if (!host_ver
->name
) {
5551 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5552 if (host_ver
->date_len
) {
5553 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5554 target_ver
->date_len
, 0);
5555 if (!host_ver
->date
) {
5560 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5561 if (host_ver
->desc_len
) {
5562 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5563 target_ver
->desc_len
, 0);
5564 if (!host_ver
->desc
) {
5571 unlock_drm_version(host_ver
, target_ver
, false);
5575 static inline void host_to_target_drmversion(
5576 struct target_drm_version
*target_ver
,
5577 struct drm_version
*host_ver
)
5579 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5580 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5581 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5582 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5583 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5584 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5585 unlock_drm_version(host_ver
, target_ver
, true);
5588 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5589 int fd
, int cmd
, abi_long arg
)
5591 struct drm_version
*ver
;
5592 struct target_drm_version
*target_ver
;
5595 switch (ie
->host_cmd
) {
5596 case DRM_IOCTL_VERSION
:
5597 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5598 return -TARGET_EFAULT
;
5600 ver
= (struct drm_version
*)buf_temp
;
5601 ret
= target_to_host_drmversion(ver
, target_ver
);
5602 if (!is_error(ret
)) {
5603 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5604 if (is_error(ret
)) {
5605 unlock_drm_version(ver
, target_ver
, false);
5607 host_to_target_drmversion(target_ver
, ver
);
5610 unlock_user_struct(target_ver
, arg
, 0);
5613 return -TARGET_ENOSYS
;
5616 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5617 struct drm_i915_getparam
*gparam
,
5618 int fd
, abi_long arg
)
5622 struct target_drm_i915_getparam
*target_gparam
;
5624 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5625 return -TARGET_EFAULT
;
5628 __get_user(gparam
->param
, &target_gparam
->param
);
5629 gparam
->value
= &value
;
5630 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5631 put_user_s32(value
, target_gparam
->value
);
5633 unlock_user_struct(target_gparam
, arg
, 0);
5637 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5638 int fd
, int cmd
, abi_long arg
)
5640 switch (ie
->host_cmd
) {
5641 case DRM_IOCTL_I915_GETPARAM
:
5642 return do_ioctl_drm_i915_getparam(ie
,
5643 (struct drm_i915_getparam
*)buf_temp
,
5646 return -TARGET_ENOSYS
;
5652 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5653 int fd
, int cmd
, abi_long arg
)
5655 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5656 struct tun_filter
*target_filter
;
5659 assert(ie
->access
== IOC_W
);
5661 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5662 if (!target_filter
) {
5663 return -TARGET_EFAULT
;
5665 filter
->flags
= tswap16(target_filter
->flags
);
5666 filter
->count
= tswap16(target_filter
->count
);
5667 unlock_user(target_filter
, arg
, 0);
5669 if (filter
->count
) {
5670 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5672 return -TARGET_EFAULT
;
5675 target_addr
= lock_user(VERIFY_READ
,
5676 arg
+ offsetof(struct tun_filter
, addr
),
5677 filter
->count
* ETH_ALEN
, 1);
5679 return -TARGET_EFAULT
;
5681 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5682 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5685 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5688 IOCTLEntry ioctl_entries
[] = {
5689 #define IOCTL(cmd, access, ...) \
5690 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5691 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5692 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5693 #define IOCTL_IGNORE(cmd) \
5694 { TARGET_ ## cmd, 0, #cmd },
5699 /* ??? Implement proper locking for ioctls. */
5700 /* do_ioctl() Must return target values and target errnos. */
5701 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5703 const IOCTLEntry
*ie
;
5704 const argtype
*arg_type
;
5706 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5712 if (ie
->target_cmd
== 0) {
5714 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5715 return -TARGET_ENOSYS
;
5717 if (ie
->target_cmd
== cmd
)
5721 arg_type
= ie
->arg_type
;
5723 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5724 } else if (!ie
->host_cmd
) {
5725 /* Some architectures define BSD ioctls in their headers
5726 that are not implemented in Linux. */
5727 return -TARGET_ENOSYS
;
5730 switch(arg_type
[0]) {
5733 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5739 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5743 target_size
= thunk_type_size(arg_type
, 0);
5744 switch(ie
->access
) {
5746 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5747 if (!is_error(ret
)) {
5748 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5750 return -TARGET_EFAULT
;
5751 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5752 unlock_user(argptr
, arg
, target_size
);
5756 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5758 return -TARGET_EFAULT
;
5759 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5760 unlock_user(argptr
, arg
, 0);
5761 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5765 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5767 return -TARGET_EFAULT
;
5768 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5769 unlock_user(argptr
, arg
, 0);
5770 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5771 if (!is_error(ret
)) {
5772 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5774 return -TARGET_EFAULT
;
5775 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5776 unlock_user(argptr
, arg
, target_size
);
5782 qemu_log_mask(LOG_UNIMP
,
5783 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5784 (long)cmd
, arg_type
[0]);
5785 ret
= -TARGET_ENOSYS
;
5791 static const bitmask_transtbl iflag_tbl
[] = {
5792 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5793 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5794 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5795 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5796 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5797 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5798 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5799 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5800 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5801 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5802 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5803 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5804 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5805 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5806 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5810 static const bitmask_transtbl oflag_tbl
[] = {
5811 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5812 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5813 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5814 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5815 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5816 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5817 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5818 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5819 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5820 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5821 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5822 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5823 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5824 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5825 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5826 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5827 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5828 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5829 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5830 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5831 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5832 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5833 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5834 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5838 static const bitmask_transtbl cflag_tbl
[] = {
5839 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5840 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5841 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5842 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5843 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5844 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5845 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5846 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5847 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5848 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5849 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5850 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5851 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5852 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5853 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5854 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5855 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5856 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5857 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5858 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5859 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5860 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5861 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5862 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5863 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5864 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5865 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5866 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5867 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5868 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5869 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5873 static const bitmask_transtbl lflag_tbl
[] = {
5874 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5875 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5876 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5877 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5878 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5879 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5880 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5881 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5882 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5883 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5884 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5885 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5886 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5887 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5888 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5889 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5893 static void target_to_host_termios (void *dst
, const void *src
)
5895 struct host_termios
*host
= dst
;
5896 const struct target_termios
*target
= src
;
5899 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5901 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5903 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5905 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5906 host
->c_line
= target
->c_line
;
5908 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5909 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5910 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5911 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5912 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5913 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5914 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5915 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5916 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5917 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5918 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5919 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5920 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5921 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5922 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5923 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5924 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5925 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5928 static void host_to_target_termios (void *dst
, const void *src
)
5930 struct target_termios
*target
= dst
;
5931 const struct host_termios
*host
= src
;
5934 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5936 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5938 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5940 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5941 target
->c_line
= host
->c_line
;
5943 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5944 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5945 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5946 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5947 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5948 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5949 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5950 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5951 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5952 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5953 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5954 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5955 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5956 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5957 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5958 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5959 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5960 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5963 static const StructEntry struct_termios_def
= {
5964 .convert
= { host_to_target_termios
, target_to_host_termios
},
5965 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5966 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5967 .print
= print_termios
,
5970 static const bitmask_transtbl mmap_flags_tbl
[] = {
5971 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5972 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5973 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5974 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5975 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5976 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5977 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5978 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5979 MAP_DENYWRITE
, MAP_DENYWRITE
},
5980 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5981 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5982 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5983 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5984 MAP_NORESERVE
, MAP_NORESERVE
},
5985 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5986 /* MAP_STACK had been ignored by the kernel for quite some time.
5987 Recognize it for the target insofar as we do not want to pass
5988 it through to the host. */
5989 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5994 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5995 * TARGET_I386 is defined if TARGET_X86_64 is defined
5997 #if defined(TARGET_I386)
5999 /* NOTE: there is really one LDT for all the threads */
6000 static uint8_t *ldt_table
;
6002 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6009 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6010 if (size
> bytecount
)
6012 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6014 return -TARGET_EFAULT
;
6015 /* ??? Should this by byteswapped? */
6016 memcpy(p
, ldt_table
, size
);
6017 unlock_user(p
, ptr
, size
);
6021 /* XXX: add locking support */
6022 static abi_long
write_ldt(CPUX86State
*env
,
6023 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6025 struct target_modify_ldt_ldt_s ldt_info
;
6026 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6027 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6028 int seg_not_present
, useable
, lm
;
6029 uint32_t *lp
, entry_1
, entry_2
;
6031 if (bytecount
!= sizeof(ldt_info
))
6032 return -TARGET_EINVAL
;
6033 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6034 return -TARGET_EFAULT
;
6035 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6036 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6037 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6038 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6039 unlock_user_struct(target_ldt_info
, ptr
, 0);
6041 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6042 return -TARGET_EINVAL
;
6043 seg_32bit
= ldt_info
.flags
& 1;
6044 contents
= (ldt_info
.flags
>> 1) & 3;
6045 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6046 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6047 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6048 useable
= (ldt_info
.flags
>> 6) & 1;
6052 lm
= (ldt_info
.flags
>> 7) & 1;
6054 if (contents
== 3) {
6056 return -TARGET_EINVAL
;
6057 if (seg_not_present
== 0)
6058 return -TARGET_EINVAL
;
6060 /* allocate the LDT */
6062 env
->ldt
.base
= target_mmap(0,
6063 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6064 PROT_READ
|PROT_WRITE
,
6065 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6066 if (env
->ldt
.base
== -1)
6067 return -TARGET_ENOMEM
;
6068 memset(g2h_untagged(env
->ldt
.base
), 0,
6069 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6070 env
->ldt
.limit
= 0xffff;
6071 ldt_table
= g2h_untagged(env
->ldt
.base
);
6074 /* NOTE: same code as Linux kernel */
6075 /* Allow LDTs to be cleared by the user. */
6076 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6079 read_exec_only
== 1 &&
6081 limit_in_pages
== 0 &&
6082 seg_not_present
== 1 &&
6090 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6091 (ldt_info
.limit
& 0x0ffff);
6092 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6093 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6094 (ldt_info
.limit
& 0xf0000) |
6095 ((read_exec_only
^ 1) << 9) |
6097 ((seg_not_present
^ 1) << 15) |
6099 (limit_in_pages
<< 23) |
6103 entry_2
|= (useable
<< 20);
6105 /* Install the new entry ... */
6107 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6108 lp
[0] = tswap32(entry_1
);
6109 lp
[1] = tswap32(entry_2
);
6113 /* specific and weird i386 syscalls */
6114 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6115 unsigned long bytecount
)
6121 ret
= read_ldt(ptr
, bytecount
);
6124 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6127 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6130 ret
= -TARGET_ENOSYS
;
6136 #if defined(TARGET_ABI32)
6137 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6139 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6140 struct target_modify_ldt_ldt_s ldt_info
;
6141 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6142 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6143 int seg_not_present
, useable
, lm
;
6144 uint32_t *lp
, entry_1
, entry_2
;
6147 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6148 if (!target_ldt_info
)
6149 return -TARGET_EFAULT
;
6150 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6151 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6152 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6153 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6154 if (ldt_info
.entry_number
== -1) {
6155 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6156 if (gdt_table
[i
] == 0) {
6157 ldt_info
.entry_number
= i
;
6158 target_ldt_info
->entry_number
= tswap32(i
);
6163 unlock_user_struct(target_ldt_info
, ptr
, 1);
6165 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6166 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6167 return -TARGET_EINVAL
;
6168 seg_32bit
= ldt_info
.flags
& 1;
6169 contents
= (ldt_info
.flags
>> 1) & 3;
6170 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6171 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6172 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6173 useable
= (ldt_info
.flags
>> 6) & 1;
6177 lm
= (ldt_info
.flags
>> 7) & 1;
6180 if (contents
== 3) {
6181 if (seg_not_present
== 0)
6182 return -TARGET_EINVAL
;
6185 /* NOTE: same code as Linux kernel */
6186 /* Allow LDTs to be cleared by the user. */
6187 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6188 if ((contents
== 0 &&
6189 read_exec_only
== 1 &&
6191 limit_in_pages
== 0 &&
6192 seg_not_present
== 1 &&
6200 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6201 (ldt_info
.limit
& 0x0ffff);
6202 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6203 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6204 (ldt_info
.limit
& 0xf0000) |
6205 ((read_exec_only
^ 1) << 9) |
6207 ((seg_not_present
^ 1) << 15) |
6209 (limit_in_pages
<< 23) |
6214 /* Install the new entry ... */
6216 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6217 lp
[0] = tswap32(entry_1
);
6218 lp
[1] = tswap32(entry_2
);
6222 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6224 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6225 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6226 uint32_t base_addr
, limit
, flags
;
6227 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6228 int seg_not_present
, useable
, lm
;
6229 uint32_t *lp
, entry_1
, entry_2
;
6231 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6232 if (!target_ldt_info
)
6233 return -TARGET_EFAULT
;
6234 idx
= tswap32(target_ldt_info
->entry_number
);
6235 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6236 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6237 unlock_user_struct(target_ldt_info
, ptr
, 1);
6238 return -TARGET_EINVAL
;
6240 lp
= (uint32_t *)(gdt_table
+ idx
);
6241 entry_1
= tswap32(lp
[0]);
6242 entry_2
= tswap32(lp
[1]);
6244 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6245 contents
= (entry_2
>> 10) & 3;
6246 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6247 seg_32bit
= (entry_2
>> 22) & 1;
6248 limit_in_pages
= (entry_2
>> 23) & 1;
6249 useable
= (entry_2
>> 20) & 1;
6253 lm
= (entry_2
>> 21) & 1;
6255 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6256 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6257 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6258 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6259 base_addr
= (entry_1
>> 16) |
6260 (entry_2
& 0xff000000) |
6261 ((entry_2
& 0xff) << 16);
6262 target_ldt_info
->base_addr
= tswapal(base_addr
);
6263 target_ldt_info
->limit
= tswap32(limit
);
6264 target_ldt_info
->flags
= tswap32(flags
);
6265 unlock_user_struct(target_ldt_info
, ptr
, 1);
6269 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6271 return -TARGET_ENOSYS
;
6274 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6281 case TARGET_ARCH_SET_GS
:
6282 case TARGET_ARCH_SET_FS
:
6283 if (code
== TARGET_ARCH_SET_GS
)
6287 cpu_x86_load_seg(env
, idx
, 0);
6288 env
->segs
[idx
].base
= addr
;
6290 case TARGET_ARCH_GET_GS
:
6291 case TARGET_ARCH_GET_FS
:
6292 if (code
== TARGET_ARCH_GET_GS
)
6296 val
= env
->segs
[idx
].base
;
6297 if (put_user(val
, addr
, abi_ulong
))
6298 ret
= -TARGET_EFAULT
;
6301 ret
= -TARGET_EINVAL
;
6306 #endif /* defined(TARGET_ABI32 */
6307 #endif /* defined(TARGET_I386) */
6310 * These constants are generic. Supply any that are missing from the host.
6313 # define PR_SET_NAME 15
6314 # define PR_GET_NAME 16
6316 #ifndef PR_SET_FP_MODE
6317 # define PR_SET_FP_MODE 45
6318 # define PR_GET_FP_MODE 46
6319 # define PR_FP_MODE_FR (1 << 0)
6320 # define PR_FP_MODE_FRE (1 << 1)
6322 #ifndef PR_SVE_SET_VL
6323 # define PR_SVE_SET_VL 50
6324 # define PR_SVE_GET_VL 51
6325 # define PR_SVE_VL_LEN_MASK 0xffff
6326 # define PR_SVE_VL_INHERIT (1 << 17)
6328 #ifndef PR_PAC_RESET_KEYS
6329 # define PR_PAC_RESET_KEYS 54
6330 # define PR_PAC_APIAKEY (1 << 0)
6331 # define PR_PAC_APIBKEY (1 << 1)
6332 # define PR_PAC_APDAKEY (1 << 2)
6333 # define PR_PAC_APDBKEY (1 << 3)
6334 # define PR_PAC_APGAKEY (1 << 4)
6336 #ifndef PR_SET_TAGGED_ADDR_CTRL
6337 # define PR_SET_TAGGED_ADDR_CTRL 55
6338 # define PR_GET_TAGGED_ADDR_CTRL 56
6339 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6341 #ifndef PR_MTE_TCF_SHIFT
6342 # define PR_MTE_TCF_SHIFT 1
6343 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6344 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6345 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6346 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6347 # define PR_MTE_TAG_SHIFT 3
6348 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6350 #ifndef PR_SET_IO_FLUSHER
6351 # define PR_SET_IO_FLUSHER 57
6352 # define PR_GET_IO_FLUSHER 58
6354 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6355 # define PR_SET_SYSCALL_USER_DISPATCH 59
6358 #include "target_prctl.h"
6360 static abi_long
do_prctl_inval0(CPUArchState
*env
)
6362 return -TARGET_EINVAL
;
6365 static abi_long
do_prctl_inval1(CPUArchState
*env
, abi_long arg2
)
6367 return -TARGET_EINVAL
;
6370 #ifndef do_prctl_get_fp_mode
6371 #define do_prctl_get_fp_mode do_prctl_inval0
6373 #ifndef do_prctl_set_fp_mode
6374 #define do_prctl_set_fp_mode do_prctl_inval1
6376 #ifndef do_prctl_get_vl
6377 #define do_prctl_get_vl do_prctl_inval0
6379 #ifndef do_prctl_set_vl
6380 #define do_prctl_set_vl do_prctl_inval1
6382 #ifndef do_prctl_reset_keys
6383 #define do_prctl_reset_keys do_prctl_inval1
6385 #ifndef do_prctl_set_tagged_addr_ctrl
6386 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6388 #ifndef do_prctl_get_tagged_addr_ctrl
6389 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6391 #ifndef do_prctl_get_unalign
6392 #define do_prctl_get_unalign do_prctl_inval1
6394 #ifndef do_prctl_set_unalign
6395 #define do_prctl_set_unalign do_prctl_inval1
6398 static abi_long
do_prctl(CPUArchState
*env
, abi_long option
, abi_long arg2
,
6399 abi_long arg3
, abi_long arg4
, abi_long arg5
)
6404 case PR_GET_PDEATHSIG
:
6407 ret
= get_errno(prctl(PR_GET_PDEATHSIG
, &deathsig
,
6409 if (!is_error(ret
) &&
6410 put_user_s32(host_to_target_signal(deathsig
), arg2
)) {
6411 return -TARGET_EFAULT
;
6415 case PR_SET_PDEATHSIG
:
6416 return get_errno(prctl(PR_SET_PDEATHSIG
, target_to_host_signal(arg2
),
6420 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
6422 return -TARGET_EFAULT
;
6424 ret
= get_errno(prctl(PR_GET_NAME
, (uintptr_t)name
,
6426 unlock_user(name
, arg2
, 16);
6431 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
6433 return -TARGET_EFAULT
;
6435 ret
= get_errno(prctl(PR_SET_NAME
, (uintptr_t)name
,
6437 unlock_user(name
, arg2
, 0);
6440 case PR_GET_FP_MODE
:
6441 return do_prctl_get_fp_mode(env
);
6442 case PR_SET_FP_MODE
:
6443 return do_prctl_set_fp_mode(env
, arg2
);
6445 return do_prctl_get_vl(env
);
6447 return do_prctl_set_vl(env
, arg2
);
6448 case PR_PAC_RESET_KEYS
:
6449 if (arg3
|| arg4
|| arg5
) {
6450 return -TARGET_EINVAL
;
6452 return do_prctl_reset_keys(env
, arg2
);
6453 case PR_SET_TAGGED_ADDR_CTRL
:
6454 if (arg3
|| arg4
|| arg5
) {
6455 return -TARGET_EINVAL
;
6457 return do_prctl_set_tagged_addr_ctrl(env
, arg2
);
6458 case PR_GET_TAGGED_ADDR_CTRL
:
6459 if (arg2
|| arg3
|| arg4
|| arg5
) {
6460 return -TARGET_EINVAL
;
6462 return do_prctl_get_tagged_addr_ctrl(env
);
6464 case PR_GET_UNALIGN
:
6465 return do_prctl_get_unalign(env
, arg2
);
6466 case PR_SET_UNALIGN
:
6467 return do_prctl_set_unalign(env
, arg2
);
6469 case PR_CAP_AMBIENT
:
6470 case PR_CAPBSET_READ
:
6471 case PR_CAPBSET_DROP
:
6472 case PR_GET_DUMPABLE
:
6473 case PR_SET_DUMPABLE
:
6474 case PR_GET_KEEPCAPS
:
6475 case PR_SET_KEEPCAPS
:
6476 case PR_GET_SECUREBITS
:
6477 case PR_SET_SECUREBITS
:
6480 case PR_GET_TIMERSLACK
:
6481 case PR_SET_TIMERSLACK
:
6483 case PR_MCE_KILL_GET
:
6484 case PR_GET_NO_NEW_PRIVS
:
6485 case PR_SET_NO_NEW_PRIVS
:
6486 case PR_GET_IO_FLUSHER
:
6487 case PR_SET_IO_FLUSHER
:
6488 /* Some prctl options have no pointer arguments and we can pass on. */
6489 return get_errno(prctl(option
, arg2
, arg3
, arg4
, arg5
));
6491 case PR_GET_CHILD_SUBREAPER
:
6492 case PR_SET_CHILD_SUBREAPER
:
6493 case PR_GET_SPECULATION_CTRL
:
6494 case PR_SET_SPECULATION_CTRL
:
6495 case PR_GET_TID_ADDRESS
:
6497 return -TARGET_EINVAL
;
6501 /* Was used for SPE on PowerPC. */
6502 return -TARGET_EINVAL
;
6509 case PR_GET_SECCOMP
:
6510 case PR_SET_SECCOMP
:
6511 case PR_SET_SYSCALL_USER_DISPATCH
:
6512 case PR_GET_THP_DISABLE
:
6513 case PR_SET_THP_DISABLE
:
6516 /* Disable to prevent the target disabling stuff we need. */
6517 return -TARGET_EINVAL
;
6520 qemu_log_mask(LOG_UNIMP
, "Unsupported prctl: " TARGET_ABI_FMT_ld
"\n",
6522 return -TARGET_EINVAL
;
6526 #define NEW_STACK_SIZE 0x40000
6529 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6532 pthread_mutex_t mutex
;
6533 pthread_cond_t cond
;
6536 abi_ulong child_tidptr
;
6537 abi_ulong parent_tidptr
;
6541 static void *clone_func(void *arg
)
6543 new_thread_info
*info
= arg
;
6548 rcu_register_thread();
6549 tcg_register_thread();
6553 ts
= (TaskState
*)cpu
->opaque
;
6554 info
->tid
= sys_gettid();
6556 if (info
->child_tidptr
)
6557 put_user_u32(info
->tid
, info
->child_tidptr
);
6558 if (info
->parent_tidptr
)
6559 put_user_u32(info
->tid
, info
->parent_tidptr
);
6560 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6561 /* Enable signals. */
6562 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6563 /* Signal to the parent that we're ready. */
6564 pthread_mutex_lock(&info
->mutex
);
6565 pthread_cond_broadcast(&info
->cond
);
6566 pthread_mutex_unlock(&info
->mutex
);
6567 /* Wait until the parent has finished initializing the tls state. */
6568 pthread_mutex_lock(&clone_lock
);
6569 pthread_mutex_unlock(&clone_lock
);
6575 /* do_fork() Must return host values and target errnos (unlike most
6576 do_*() functions). */
6577 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6578 abi_ulong parent_tidptr
, target_ulong newtls
,
6579 abi_ulong child_tidptr
)
6581 CPUState
*cpu
= env_cpu(env
);
6585 CPUArchState
*new_env
;
6588 flags
&= ~CLONE_IGNORED_FLAGS
;
6590 /* Emulate vfork() with fork() */
6591 if (flags
& CLONE_VFORK
)
6592 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6594 if (flags
& CLONE_VM
) {
6595 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6596 new_thread_info info
;
6597 pthread_attr_t attr
;
6599 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6600 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6601 return -TARGET_EINVAL
;
6604 ts
= g_new0(TaskState
, 1);
6605 init_task_state(ts
);
6607 /* Grab a mutex so that thread setup appears atomic. */
6608 pthread_mutex_lock(&clone_lock
);
6611 * If this is our first additional thread, we need to ensure we
6612 * generate code for parallel execution and flush old translations.
6613 * Do this now so that the copy gets CF_PARALLEL too.
6615 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
6616 cpu
->tcg_cflags
|= CF_PARALLEL
;
6620 /* we create a new CPU instance. */
6621 new_env
= cpu_copy(env
);
6622 /* Init regs that differ from the parent. */
6623 cpu_clone_regs_child(new_env
, newsp
, flags
);
6624 cpu_clone_regs_parent(env
, flags
);
6625 new_cpu
= env_cpu(new_env
);
6626 new_cpu
->opaque
= ts
;
6627 ts
->bprm
= parent_ts
->bprm
;
6628 ts
->info
= parent_ts
->info
;
6629 ts
->signal_mask
= parent_ts
->signal_mask
;
6631 if (flags
& CLONE_CHILD_CLEARTID
) {
6632 ts
->child_tidptr
= child_tidptr
;
6635 if (flags
& CLONE_SETTLS
) {
6636 cpu_set_tls (new_env
, newtls
);
6639 memset(&info
, 0, sizeof(info
));
6640 pthread_mutex_init(&info
.mutex
, NULL
);
6641 pthread_mutex_lock(&info
.mutex
);
6642 pthread_cond_init(&info
.cond
, NULL
);
6644 if (flags
& CLONE_CHILD_SETTID
) {
6645 info
.child_tidptr
= child_tidptr
;
6647 if (flags
& CLONE_PARENT_SETTID
) {
6648 info
.parent_tidptr
= parent_tidptr
;
6651 ret
= pthread_attr_init(&attr
);
6652 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6653 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6654 /* It is not safe to deliver signals until the child has finished
6655 initializing, so temporarily block all signals. */
6656 sigfillset(&sigmask
);
6657 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6658 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6660 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6661 /* TODO: Free new CPU state if thread creation failed. */
6663 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6664 pthread_attr_destroy(&attr
);
6666 /* Wait for the child to initialize. */
6667 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6672 pthread_mutex_unlock(&info
.mutex
);
6673 pthread_cond_destroy(&info
.cond
);
6674 pthread_mutex_destroy(&info
.mutex
);
6675 pthread_mutex_unlock(&clone_lock
);
6677 /* if no CLONE_VM, we consider it is a fork */
6678 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6679 return -TARGET_EINVAL
;
6682 /* We can't support custom termination signals */
6683 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6684 return -TARGET_EINVAL
;
6687 if (block_signals()) {
6688 return -QEMU_ERESTARTSYS
;
6694 /* Child Process. */
6695 cpu_clone_regs_child(env
, newsp
, flags
);
6697 /* There is a race condition here. The parent process could
6698 theoretically read the TID in the child process before the child
6699 tid is set. This would require using either ptrace
6700 (not implemented) or having *_tidptr to point at a shared memory
6701 mapping. We can't repeat the spinlock hack used above because
6702 the child process gets its own copy of the lock. */
6703 if (flags
& CLONE_CHILD_SETTID
)
6704 put_user_u32(sys_gettid(), child_tidptr
);
6705 if (flags
& CLONE_PARENT_SETTID
)
6706 put_user_u32(sys_gettid(), parent_tidptr
);
6707 ts
= (TaskState
*)cpu
->opaque
;
6708 if (flags
& CLONE_SETTLS
)
6709 cpu_set_tls (env
, newtls
);
6710 if (flags
& CLONE_CHILD_CLEARTID
)
6711 ts
->child_tidptr
= child_tidptr
;
6713 cpu_clone_regs_parent(env
, flags
);
6720 /* warning : doesn't handle linux specific flags... */
6721 static int target_to_host_fcntl_cmd(int cmd
)
6726 case TARGET_F_DUPFD
:
6727 case TARGET_F_GETFD
:
6728 case TARGET_F_SETFD
:
6729 case TARGET_F_GETFL
:
6730 case TARGET_F_SETFL
:
6731 case TARGET_F_OFD_GETLK
:
6732 case TARGET_F_OFD_SETLK
:
6733 case TARGET_F_OFD_SETLKW
:
6736 case TARGET_F_GETLK
:
6739 case TARGET_F_SETLK
:
6742 case TARGET_F_SETLKW
:
6745 case TARGET_F_GETOWN
:
6748 case TARGET_F_SETOWN
:
6751 case TARGET_F_GETSIG
:
6754 case TARGET_F_SETSIG
:
6757 #if TARGET_ABI_BITS == 32
6758 case TARGET_F_GETLK64
:
6761 case TARGET_F_SETLK64
:
6764 case TARGET_F_SETLKW64
:
6768 case TARGET_F_SETLEASE
:
6771 case TARGET_F_GETLEASE
:
6774 #ifdef F_DUPFD_CLOEXEC
6775 case TARGET_F_DUPFD_CLOEXEC
:
6776 ret
= F_DUPFD_CLOEXEC
;
6779 case TARGET_F_NOTIFY
:
6783 case TARGET_F_GETOWN_EX
:
6788 case TARGET_F_SETOWN_EX
:
6793 case TARGET_F_SETPIPE_SZ
:
6796 case TARGET_F_GETPIPE_SZ
:
6801 case TARGET_F_ADD_SEALS
:
6804 case TARGET_F_GET_SEALS
:
6809 ret
= -TARGET_EINVAL
;
6813 #if defined(__powerpc64__)
6814 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6815 * is not supported by kernel. The glibc fcntl call actually adjusts
6816 * them to 5, 6 and 7 before making the syscall(). Since we make the
6817 * syscall directly, adjust to what is supported by the kernel.
6819 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6820 ret
-= F_GETLK64
- 5;
6827 #define FLOCK_TRANSTBL \
6829 TRANSTBL_CONVERT(F_RDLCK); \
6830 TRANSTBL_CONVERT(F_WRLCK); \
6831 TRANSTBL_CONVERT(F_UNLCK); \
6834 static int target_to_host_flock(int type
)
6836 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6838 #undef TRANSTBL_CONVERT
6839 return -TARGET_EINVAL
;
6842 static int host_to_target_flock(int type
)
6844 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6846 #undef TRANSTBL_CONVERT
6847 /* if we don't know how to convert the value coming
6848 * from the host we copy to the target field as-is
6853 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6854 abi_ulong target_flock_addr
)
6856 struct target_flock
*target_fl
;
6859 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6860 return -TARGET_EFAULT
;
6863 __get_user(l_type
, &target_fl
->l_type
);
6864 l_type
= target_to_host_flock(l_type
);
6868 fl
->l_type
= l_type
;
6869 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6870 __get_user(fl
->l_start
, &target_fl
->l_start
);
6871 __get_user(fl
->l_len
, &target_fl
->l_len
);
6872 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6873 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6877 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6878 const struct flock64
*fl
)
6880 struct target_flock
*target_fl
;
6883 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6884 return -TARGET_EFAULT
;
6887 l_type
= host_to_target_flock(fl
->l_type
);
6888 __put_user(l_type
, &target_fl
->l_type
);
6889 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6890 __put_user(fl
->l_start
, &target_fl
->l_start
);
6891 __put_user(fl
->l_len
, &target_fl
->l_len
);
6892 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6893 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6897 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6898 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6900 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6901 struct target_oabi_flock64
{
6909 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6910 abi_ulong target_flock_addr
)
6912 struct target_oabi_flock64
*target_fl
;
6915 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6916 return -TARGET_EFAULT
;
6919 __get_user(l_type
, &target_fl
->l_type
);
6920 l_type
= target_to_host_flock(l_type
);
6924 fl
->l_type
= l_type
;
6925 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6926 __get_user(fl
->l_start
, &target_fl
->l_start
);
6927 __get_user(fl
->l_len
, &target_fl
->l_len
);
6928 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6929 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6933 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6934 const struct flock64
*fl
)
6936 struct target_oabi_flock64
*target_fl
;
6939 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6940 return -TARGET_EFAULT
;
6943 l_type
= host_to_target_flock(fl
->l_type
);
6944 __put_user(l_type
, &target_fl
->l_type
);
6945 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6946 __put_user(fl
->l_start
, &target_fl
->l_start
);
6947 __put_user(fl
->l_len
, &target_fl
->l_len
);
6948 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6949 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6954 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6955 abi_ulong target_flock_addr
)
6957 struct target_flock64
*target_fl
;
6960 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6961 return -TARGET_EFAULT
;
6964 __get_user(l_type
, &target_fl
->l_type
);
6965 l_type
= target_to_host_flock(l_type
);
6969 fl
->l_type
= l_type
;
6970 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6971 __get_user(fl
->l_start
, &target_fl
->l_start
);
6972 __get_user(fl
->l_len
, &target_fl
->l_len
);
6973 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6974 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6978 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6979 const struct flock64
*fl
)
6981 struct target_flock64
*target_fl
;
6984 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6985 return -TARGET_EFAULT
;
6988 l_type
= host_to_target_flock(fl
->l_type
);
6989 __put_user(l_type
, &target_fl
->l_type
);
6990 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6991 __put_user(fl
->l_start
, &target_fl
->l_start
);
6992 __put_user(fl
->l_len
, &target_fl
->l_len
);
6993 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6994 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6998 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
7000 struct flock64 fl64
;
7002 struct f_owner_ex fox
;
7003 struct target_f_owner_ex
*target_fox
;
7006 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
7008 if (host_cmd
== -TARGET_EINVAL
)
7012 case TARGET_F_GETLK
:
7013 ret
= copy_from_user_flock(&fl64
, arg
);
7017 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7019 ret
= copy_to_user_flock(arg
, &fl64
);
7023 case TARGET_F_SETLK
:
7024 case TARGET_F_SETLKW
:
7025 ret
= copy_from_user_flock(&fl64
, arg
);
7029 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7032 case TARGET_F_GETLK64
:
7033 case TARGET_F_OFD_GETLK
:
7034 ret
= copy_from_user_flock64(&fl64
, arg
);
7038 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7040 ret
= copy_to_user_flock64(arg
, &fl64
);
7043 case TARGET_F_SETLK64
:
7044 case TARGET_F_SETLKW64
:
7045 case TARGET_F_OFD_SETLK
:
7046 case TARGET_F_OFD_SETLKW
:
7047 ret
= copy_from_user_flock64(&fl64
, arg
);
7051 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7054 case TARGET_F_GETFL
:
7055 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7057 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
7061 case TARGET_F_SETFL
:
7062 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
7063 target_to_host_bitmask(arg
,
7068 case TARGET_F_GETOWN_EX
:
7069 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7071 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
7072 return -TARGET_EFAULT
;
7073 target_fox
->type
= tswap32(fox
.type
);
7074 target_fox
->pid
= tswap32(fox
.pid
);
7075 unlock_user_struct(target_fox
, arg
, 1);
7081 case TARGET_F_SETOWN_EX
:
7082 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
7083 return -TARGET_EFAULT
;
7084 fox
.type
= tswap32(target_fox
->type
);
7085 fox
.pid
= tswap32(target_fox
->pid
);
7086 unlock_user_struct(target_fox
, arg
, 0);
7087 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7091 case TARGET_F_SETSIG
:
7092 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
7095 case TARGET_F_GETSIG
:
7096 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
7099 case TARGET_F_SETOWN
:
7100 case TARGET_F_GETOWN
:
7101 case TARGET_F_SETLEASE
:
7102 case TARGET_F_GETLEASE
:
7103 case TARGET_F_SETPIPE_SZ
:
7104 case TARGET_F_GETPIPE_SZ
:
7105 case TARGET_F_ADD_SEALS
:
7106 case TARGET_F_GET_SEALS
:
7107 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7111 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
7119 static inline int high2lowuid(int uid
)
7127 static inline int high2lowgid(int gid
)
7135 static inline int low2highuid(int uid
)
7137 if ((int16_t)uid
== -1)
7143 static inline int low2highgid(int gid
)
7145 if ((int16_t)gid
== -1)
7150 static inline int tswapid(int id
)
7155 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7157 #else /* !USE_UID16 */
7158 static inline int high2lowuid(int uid
)
7162 static inline int high2lowgid(int gid
)
7166 static inline int low2highuid(int uid
)
7170 static inline int low2highgid(int gid
)
7174 static inline int tswapid(int id
)
7179 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7181 #endif /* USE_UID16 */
7183 /* We must do direct syscalls for setting UID/GID, because we want to
7184 * implement the Linux system call semantics of "change only for this thread",
7185 * not the libc/POSIX semantics of "change for all threads in process".
7186 * (See http://ewontfix.com/17/ for more details.)
7187 * We use the 32-bit version of the syscalls if present; if it is not
7188 * then either the host architecture supports 32-bit UIDs natively with
7189 * the standard syscall, or the 16-bit UID is the best we can do.
7191 #ifdef __NR_setuid32
7192 #define __NR_sys_setuid __NR_setuid32
7194 #define __NR_sys_setuid __NR_setuid
7196 #ifdef __NR_setgid32
7197 #define __NR_sys_setgid __NR_setgid32
7199 #define __NR_sys_setgid __NR_setgid
7201 #ifdef __NR_setresuid32
7202 #define __NR_sys_setresuid __NR_setresuid32
7204 #define __NR_sys_setresuid __NR_setresuid
7206 #ifdef __NR_setresgid32
7207 #define __NR_sys_setresgid __NR_setresgid32
7209 #define __NR_sys_setresgid __NR_setresgid
7212 _syscall1(int, sys_setuid
, uid_t
, uid
)
7213 _syscall1(int, sys_setgid
, gid_t
, gid
)
7214 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7215 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7217 void syscall_init(void)
7220 const argtype
*arg_type
;
7223 thunk_init(STRUCT_MAX
);
7225 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7226 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7227 #include "syscall_types.h"
7229 #undef STRUCT_SPECIAL
7231 /* we patch the ioctl size if necessary. We rely on the fact that
7232 no ioctl has all the bits at '1' in the size field */
7234 while (ie
->target_cmd
!= 0) {
7235 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7236 TARGET_IOC_SIZEMASK
) {
7237 arg_type
= ie
->arg_type
;
7238 if (arg_type
[0] != TYPE_PTR
) {
7239 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7244 size
= thunk_type_size(arg_type
, 0);
7245 ie
->target_cmd
= (ie
->target_cmd
&
7246 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7247 (size
<< TARGET_IOC_SIZESHIFT
);
7250 /* automatic consistency check if same arch */
7251 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7252 (defined(__x86_64__) && defined(TARGET_X86_64))
7253 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7254 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7255 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7262 #ifdef TARGET_NR_truncate64
7263 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
7268 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7272 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7276 #ifdef TARGET_NR_ftruncate64
7277 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
7282 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7286 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7290 #if defined(TARGET_NR_timer_settime) || \
7291 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7292 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7293 abi_ulong target_addr
)
7295 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7296 offsetof(struct target_itimerspec
,
7298 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7299 offsetof(struct target_itimerspec
,
7301 return -TARGET_EFAULT
;
7308 #if defined(TARGET_NR_timer_settime64) || \
7309 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7310 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7311 abi_ulong target_addr
)
7313 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7314 offsetof(struct target__kernel_itimerspec
,
7316 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7317 offsetof(struct target__kernel_itimerspec
,
7319 return -TARGET_EFAULT
;
7326 #if ((defined(TARGET_NR_timerfd_gettime) || \
7327 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7328 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7329 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7330 struct itimerspec
*host_its
)
7332 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7334 &host_its
->it_interval
) ||
7335 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7337 &host_its
->it_value
)) {
7338 return -TARGET_EFAULT
;
7344 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7345 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7346 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7347 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7348 struct itimerspec
*host_its
)
7350 if (host_to_target_timespec64(target_addr
+
7351 offsetof(struct target__kernel_itimerspec
,
7353 &host_its
->it_interval
) ||
7354 host_to_target_timespec64(target_addr
+
7355 offsetof(struct target__kernel_itimerspec
,
7357 &host_its
->it_value
)) {
7358 return -TARGET_EFAULT
;
7364 #if defined(TARGET_NR_adjtimex) || \
7365 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7366 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7367 abi_long target_addr
)
7369 struct target_timex
*target_tx
;
7371 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7372 return -TARGET_EFAULT
;
7375 __get_user(host_tx
->modes
, &target_tx
->modes
);
7376 __get_user(host_tx
->offset
, &target_tx
->offset
);
7377 __get_user(host_tx
->freq
, &target_tx
->freq
);
7378 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7379 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7380 __get_user(host_tx
->status
, &target_tx
->status
);
7381 __get_user(host_tx
->constant
, &target_tx
->constant
);
7382 __get_user(host_tx
->precision
, &target_tx
->precision
);
7383 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7384 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7385 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7386 __get_user(host_tx
->tick
, &target_tx
->tick
);
7387 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7388 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7389 __get_user(host_tx
->shift
, &target_tx
->shift
);
7390 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7391 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7392 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7393 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7394 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7395 __get_user(host_tx
->tai
, &target_tx
->tai
);
7397 unlock_user_struct(target_tx
, target_addr
, 0);
7401 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7402 struct timex
*host_tx
)
7404 struct target_timex
*target_tx
;
7406 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7407 return -TARGET_EFAULT
;
7410 __put_user(host_tx
->modes
, &target_tx
->modes
);
7411 __put_user(host_tx
->offset
, &target_tx
->offset
);
7412 __put_user(host_tx
->freq
, &target_tx
->freq
);
7413 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7414 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7415 __put_user(host_tx
->status
, &target_tx
->status
);
7416 __put_user(host_tx
->constant
, &target_tx
->constant
);
7417 __put_user(host_tx
->precision
, &target_tx
->precision
);
7418 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7419 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7420 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7421 __put_user(host_tx
->tick
, &target_tx
->tick
);
7422 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7423 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7424 __put_user(host_tx
->shift
, &target_tx
->shift
);
7425 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7426 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7427 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7428 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7429 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7430 __put_user(host_tx
->tai
, &target_tx
->tai
);
7432 unlock_user_struct(target_tx
, target_addr
, 1);
7438 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7439 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7440 abi_long target_addr
)
7442 struct target__kernel_timex
*target_tx
;
7444 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7445 offsetof(struct target__kernel_timex
,
7447 return -TARGET_EFAULT
;
7450 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7451 return -TARGET_EFAULT
;
7454 __get_user(host_tx
->modes
, &target_tx
->modes
);
7455 __get_user(host_tx
->offset
, &target_tx
->offset
);
7456 __get_user(host_tx
->freq
, &target_tx
->freq
);
7457 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7458 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7459 __get_user(host_tx
->status
, &target_tx
->status
);
7460 __get_user(host_tx
->constant
, &target_tx
->constant
);
7461 __get_user(host_tx
->precision
, &target_tx
->precision
);
7462 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7463 __get_user(host_tx
->tick
, &target_tx
->tick
);
7464 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7465 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7466 __get_user(host_tx
->shift
, &target_tx
->shift
);
7467 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7468 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7469 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7470 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7471 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7472 __get_user(host_tx
->tai
, &target_tx
->tai
);
7474 unlock_user_struct(target_tx
, target_addr
, 0);
7478 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7479 struct timex
*host_tx
)
7481 struct target__kernel_timex
*target_tx
;
7483 if (copy_to_user_timeval64(target_addr
+
7484 offsetof(struct target__kernel_timex
, time
),
7486 return -TARGET_EFAULT
;
7489 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7490 return -TARGET_EFAULT
;
7493 __put_user(host_tx
->modes
, &target_tx
->modes
);
7494 __put_user(host_tx
->offset
, &target_tx
->offset
);
7495 __put_user(host_tx
->freq
, &target_tx
->freq
);
7496 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7497 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7498 __put_user(host_tx
->status
, &target_tx
->status
);
7499 __put_user(host_tx
->constant
, &target_tx
->constant
);
7500 __put_user(host_tx
->precision
, &target_tx
->precision
);
7501 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7502 __put_user(host_tx
->tick
, &target_tx
->tick
);
7503 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7504 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7505 __put_user(host_tx
->shift
, &target_tx
->shift
);
7506 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7507 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7508 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7509 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7510 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7511 __put_user(host_tx
->tai
, &target_tx
->tai
);
7513 unlock_user_struct(target_tx
, target_addr
, 1);
7518 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7519 #define sigev_notify_thread_id _sigev_un._tid
7522 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7523 abi_ulong target_addr
)
7525 struct target_sigevent
*target_sevp
;
7527 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7528 return -TARGET_EFAULT
;
7531 /* This union is awkward on 64 bit systems because it has a 32 bit
7532 * integer and a pointer in it; we follow the conversion approach
7533 * used for handling sigval types in signal.c so the guest should get
7534 * the correct value back even if we did a 64 bit byteswap and it's
7535 * using the 32 bit integer.
7537 host_sevp
->sigev_value
.sival_ptr
=
7538 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7539 host_sevp
->sigev_signo
=
7540 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7541 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7542 host_sevp
->sigev_notify_thread_id
= tswap32(target_sevp
->_sigev_un
._tid
);
7544 unlock_user_struct(target_sevp
, target_addr
, 1);
7548 #if defined(TARGET_NR_mlockall)
7549 static inline int target_to_host_mlockall_arg(int arg
)
7553 if (arg
& TARGET_MCL_CURRENT
) {
7554 result
|= MCL_CURRENT
;
7556 if (arg
& TARGET_MCL_FUTURE
) {
7557 result
|= MCL_FUTURE
;
7560 if (arg
& TARGET_MCL_ONFAULT
) {
7561 result
|= MCL_ONFAULT
;
7569 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7570 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7571 defined(TARGET_NR_newfstatat))
7572 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7573 abi_ulong target_addr
,
7574 struct stat
*host_st
)
7576 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7577 if (((CPUARMState
*)cpu_env
)->eabi
) {
7578 struct target_eabi_stat64
*target_st
;
7580 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7581 return -TARGET_EFAULT
;
7582 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7583 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7584 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7585 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7586 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7588 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7589 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7590 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7591 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7592 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7593 __put_user(host_st
->st_size
, &target_st
->st_size
);
7594 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7595 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7596 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7597 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7598 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7599 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7600 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7601 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7602 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7604 unlock_user_struct(target_st
, target_addr
, 1);
7608 #if defined(TARGET_HAS_STRUCT_STAT64)
7609 struct target_stat64
*target_st
;
7611 struct target_stat
*target_st
;
7614 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7615 return -TARGET_EFAULT
;
7616 memset(target_st
, 0, sizeof(*target_st
));
7617 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7618 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7619 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7620 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7622 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7623 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7624 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7625 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7626 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7627 /* XXX: better use of kernel struct */
7628 __put_user(host_st
->st_size
, &target_st
->st_size
);
7629 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7630 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7631 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7632 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7633 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7634 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7635 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7636 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7637 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7639 unlock_user_struct(target_st
, target_addr
, 1);
7646 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7647 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7648 abi_ulong target_addr
)
7650 struct target_statx
*target_stx
;
7652 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7653 return -TARGET_EFAULT
;
7655 memset(target_stx
, 0, sizeof(*target_stx
));
7657 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7658 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7659 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7660 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7661 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7662 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7663 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7664 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7665 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7666 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7667 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7668 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7669 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7670 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7671 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7672 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7673 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7674 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7675 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7676 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7677 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7678 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7679 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7681 unlock_user_struct(target_stx
, target_addr
, 1);
7687 static int do_sys_futex(int *uaddr
, int op
, int val
,
7688 const struct timespec
*timeout
, int *uaddr2
,
7691 #if HOST_LONG_BITS == 64
7692 #if defined(__NR_futex)
7693 /* always a 64-bit time_t, it doesn't define _time64 version */
7694 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7697 #else /* HOST_LONG_BITS == 64 */
7698 #if defined(__NR_futex_time64)
7699 if (sizeof(timeout
->tv_sec
) == 8) {
7700 /* _time64 function on 32bit arch */
7701 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7704 #if defined(__NR_futex)
7705 /* old function on 32bit arch */
7706 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7708 #endif /* HOST_LONG_BITS == 64 */
7709 g_assert_not_reached();
7712 static int do_safe_futex(int *uaddr
, int op
, int val
,
7713 const struct timespec
*timeout
, int *uaddr2
,
7716 #if HOST_LONG_BITS == 64
7717 #if defined(__NR_futex)
7718 /* always a 64-bit time_t, it doesn't define _time64 version */
7719 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7721 #else /* HOST_LONG_BITS == 64 */
7722 #if defined(__NR_futex_time64)
7723 if (sizeof(timeout
->tv_sec
) == 8) {
7724 /* _time64 function on 32bit arch */
7725 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7729 #if defined(__NR_futex)
7730 /* old function on 32bit arch */
7731 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7733 #endif /* HOST_LONG_BITS == 64 */
7734 return -TARGET_ENOSYS
;
7737 /* ??? Using host futex calls even when target atomic operations
7738 are not really atomic probably breaks things. However implementing
7739 futexes locally would make futexes shared between multiple processes
7740 tricky. However they're probably useless because guest atomic
7741 operations won't work either. */
7742 #if defined(TARGET_NR_futex)
7743 static int do_futex(CPUState
*cpu
, target_ulong uaddr
, int op
, int val
,
7744 target_ulong timeout
, target_ulong uaddr2
, int val3
)
7746 struct timespec ts
, *pts
;
7749 /* ??? We assume FUTEX_* constants are the same on both host
7751 #ifdef FUTEX_CMD_MASK
7752 base_op
= op
& FUTEX_CMD_MASK
;
7758 case FUTEX_WAIT_BITSET
:
7761 target_to_host_timespec(pts
, timeout
);
7765 return do_safe_futex(g2h(cpu
, uaddr
),
7766 op
, tswap32(val
), pts
, NULL
, val3
);
7768 return do_safe_futex(g2h(cpu
, uaddr
),
7769 op
, val
, NULL
, NULL
, 0);
7771 return do_safe_futex(g2h(cpu
, uaddr
),
7772 op
, val
, NULL
, NULL
, 0);
7774 case FUTEX_CMP_REQUEUE
:
7776 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7777 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7778 But the prototype takes a `struct timespec *'; insert casts
7779 to satisfy the compiler. We do not need to tswap TIMEOUT
7780 since it's not compared to guest memory. */
7781 pts
= (struct timespec
*)(uintptr_t) timeout
;
7782 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7783 (base_op
== FUTEX_CMP_REQUEUE
7784 ? tswap32(val3
) : val3
));
7786 return -TARGET_ENOSYS
;
7791 #if defined(TARGET_NR_futex_time64)
7792 static int do_futex_time64(CPUState
*cpu
, target_ulong uaddr
, int op
,
7793 int val
, target_ulong timeout
,
7794 target_ulong uaddr2
, int val3
)
7796 struct timespec ts
, *pts
;
7799 /* ??? We assume FUTEX_* constants are the same on both host
7801 #ifdef FUTEX_CMD_MASK
7802 base_op
= op
& FUTEX_CMD_MASK
;
7808 case FUTEX_WAIT_BITSET
:
7811 if (target_to_host_timespec64(pts
, timeout
)) {
7812 return -TARGET_EFAULT
;
7817 return do_safe_futex(g2h(cpu
, uaddr
), op
,
7818 tswap32(val
), pts
, NULL
, val3
);
7820 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7822 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7824 case FUTEX_CMP_REQUEUE
:
7826 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7827 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7828 But the prototype takes a `struct timespec *'; insert casts
7829 to satisfy the compiler. We do not need to tswap TIMEOUT
7830 since it's not compared to guest memory. */
7831 pts
= (struct timespec
*)(uintptr_t) timeout
;
7832 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7833 (base_op
== FUTEX_CMP_REQUEUE
7834 ? tswap32(val3
) : val3
));
7836 return -TARGET_ENOSYS
;
7841 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7842 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7843 abi_long handle
, abi_long mount_id
,
7846 struct file_handle
*target_fh
;
7847 struct file_handle
*fh
;
7851 unsigned int size
, total_size
;
7853 if (get_user_s32(size
, handle
)) {
7854 return -TARGET_EFAULT
;
7857 name
= lock_user_string(pathname
);
7859 return -TARGET_EFAULT
;
7862 total_size
= sizeof(struct file_handle
) + size
;
7863 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7865 unlock_user(name
, pathname
, 0);
7866 return -TARGET_EFAULT
;
7869 fh
= g_malloc0(total_size
);
7870 fh
->handle_bytes
= size
;
7872 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7873 unlock_user(name
, pathname
, 0);
7875 /* man name_to_handle_at(2):
7876 * Other than the use of the handle_bytes field, the caller should treat
7877 * the file_handle structure as an opaque data type
7880 memcpy(target_fh
, fh
, total_size
);
7881 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7882 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7884 unlock_user(target_fh
, handle
, total_size
);
7886 if (put_user_s32(mid
, mount_id
)) {
7887 return -TARGET_EFAULT
;
7895 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7896 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7899 struct file_handle
*target_fh
;
7900 struct file_handle
*fh
;
7901 unsigned int size
, total_size
;
7904 if (get_user_s32(size
, handle
)) {
7905 return -TARGET_EFAULT
;
7908 total_size
= sizeof(struct file_handle
) + size
;
7909 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7911 return -TARGET_EFAULT
;
7914 fh
= g_memdup(target_fh
, total_size
);
7915 fh
->handle_bytes
= size
;
7916 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7918 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7919 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7923 unlock_user(target_fh
, handle
, total_size
);
7929 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7931 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7934 target_sigset_t
*target_mask
;
7938 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
7939 return -TARGET_EINVAL
;
7941 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7942 return -TARGET_EFAULT
;
7945 target_to_host_sigset(&host_mask
, target_mask
);
7947 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7949 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7951 fd_trans_register(ret
, &target_signalfd_trans
);
7954 unlock_user_struct(target_mask
, mask
, 0);
7960 /* Map host to target signal numbers for the wait family of syscalls.
7961 Assume all other status bits are the same. */
7962 int host_to_target_waitstatus(int status
)
7964 if (WIFSIGNALED(status
)) {
7965 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7967 if (WIFSTOPPED(status
)) {
7968 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7974 static int open_self_cmdline(void *cpu_env
, int fd
)
7976 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7977 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7980 for (i
= 0; i
< bprm
->argc
; i
++) {
7981 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7983 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7991 static int open_self_maps(void *cpu_env
, int fd
)
7993 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7994 TaskState
*ts
= cpu
->opaque
;
7995 GSList
*map_info
= read_self_maps();
7999 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
8000 MapInfo
*e
= (MapInfo
*) s
->data
;
8002 if (h2g_valid(e
->start
)) {
8003 unsigned long min
= e
->start
;
8004 unsigned long max
= e
->end
;
8005 int flags
= page_get_flags(h2g(min
));
8008 max
= h2g_valid(max
- 1) ?
8009 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
8011 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
8015 if (h2g(min
) == ts
->info
->stack_limit
) {
8021 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
8022 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
8023 h2g(min
), h2g(max
- 1) + 1,
8024 (flags
& PAGE_READ
) ? 'r' : '-',
8025 (flags
& PAGE_WRITE_ORG
) ? 'w' : '-',
8026 (flags
& PAGE_EXEC
) ? 'x' : '-',
8027 e
->is_priv
? 'p' : 's',
8028 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
8030 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
8037 free_self_maps(map_info
);
8039 #ifdef TARGET_VSYSCALL_PAGE
8041 * We only support execution from the vsyscall page.
8042 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8044 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
8045 " --xp 00000000 00:00 0",
8046 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
8047 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
8053 static int open_self_stat(void *cpu_env
, int fd
)
8055 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
8056 TaskState
*ts
= cpu
->opaque
;
8057 g_autoptr(GString
) buf
= g_string_new(NULL
);
8060 for (i
= 0; i
< 44; i
++) {
8063 g_string_printf(buf
, FMT_pid
" ", getpid());
8064 } else if (i
== 1) {
8066 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
8067 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
8068 g_string_printf(buf
, "(%.15s) ", bin
);
8069 } else if (i
== 3) {
8071 g_string_printf(buf
, FMT_pid
" ", getppid());
8072 } else if (i
== 21) {
8074 g_string_printf(buf
, "%" PRIu64
" ", ts
->start_boottime
);
8075 } else if (i
== 27) {
8077 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
8079 /* for the rest, there is MasterCard */
8080 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
8083 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
8091 static int open_self_auxv(void *cpu_env
, int fd
)
8093 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
8094 TaskState
*ts
= cpu
->opaque
;
8095 abi_ulong auxv
= ts
->info
->saved_auxv
;
8096 abi_ulong len
= ts
->info
->auxv_len
;
8100 * Auxiliary vector is stored in target process stack.
8101 * read in whole auxv vector and copy it to file
8103 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
8107 r
= write(fd
, ptr
, len
);
8114 lseek(fd
, 0, SEEK_SET
);
8115 unlock_user(ptr
, auxv
, len
);
8121 static int is_proc_myself(const char *filename
, const char *entry
)
8123 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
8124 filename
+= strlen("/proc/");
8125 if (!strncmp(filename
, "self/", strlen("self/"))) {
8126 filename
+= strlen("self/");
8127 } else if (*filename
>= '1' && *filename
<= '9') {
8129 snprintf(myself
, sizeof(myself
), "%d/", getpid());
8130 if (!strncmp(filename
, myself
, strlen(myself
))) {
8131 filename
+= strlen(myself
);
8138 if (!strcmp(filename
, entry
)) {
8145 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8146 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8147 static int is_proc(const char *filename
, const char *entry
)
8149 return strcmp(filename
, entry
) == 0;
8153 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8154 static int open_net_route(void *cpu_env
, int fd
)
8161 fp
= fopen("/proc/net/route", "r");
8168 read
= getline(&line
, &len
, fp
);
8169 dprintf(fd
, "%s", line
);
8173 while ((read
= getline(&line
, &len
, fp
)) != -1) {
8175 uint32_t dest
, gw
, mask
;
8176 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8179 fields
= sscanf(line
,
8180 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8181 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8182 &mask
, &mtu
, &window
, &irtt
);
8186 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8187 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8188 metric
, tswap32(mask
), mtu
, window
, irtt
);
8198 #if defined(TARGET_SPARC)
8199 static int open_cpuinfo(void *cpu_env
, int fd
)
8201 dprintf(fd
, "type\t\t: sun4u\n");
8206 #if defined(TARGET_HPPA)
8207 static int open_cpuinfo(void *cpu_env
, int fd
)
8209 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
8210 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
8211 dprintf(fd
, "capabilities\t: os32\n");
8212 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
8213 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8218 #if defined(TARGET_M68K)
8219 static int open_hardware(void *cpu_env
, int fd
)
8221 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8226 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
8229 const char *filename
;
8230 int (*fill
)(void *cpu_env
, int fd
);
8231 int (*cmp
)(const char *s1
, const char *s2
);
8233 const struct fake_open
*fake_open
;
8234 static const struct fake_open fakes
[] = {
8235 { "maps", open_self_maps
, is_proc_myself
},
8236 { "stat", open_self_stat
, is_proc_myself
},
8237 { "auxv", open_self_auxv
, is_proc_myself
},
8238 { "cmdline", open_self_cmdline
, is_proc_myself
},
8239 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8240 { "/proc/net/route", open_net_route
, is_proc
},
8242 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8243 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8245 #if defined(TARGET_M68K)
8246 { "/proc/hardware", open_hardware
, is_proc
},
8248 { NULL
, NULL
, NULL
}
8251 if (is_proc_myself(pathname
, "exe")) {
8252 int execfd
= qemu_getauxval(AT_EXECFD
);
8253 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
8256 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8257 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8262 if (fake_open
->filename
) {
8264 char filename
[PATH_MAX
];
8267 /* create temporary file to map stat to */
8268 tmpdir
= getenv("TMPDIR");
8271 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8272 fd
= mkstemp(filename
);
8278 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8284 lseek(fd
, 0, SEEK_SET
);
8289 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8292 #define TIMER_MAGIC 0x0caf0000
8293 #define TIMER_MAGIC_MASK 0xffff0000
8295 /* Convert QEMU provided timer ID back to internal 16bit index format */
8296 static target_timer_t
get_timer_id(abi_long arg
)
8298 target_timer_t timerid
= arg
;
8300 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8301 return -TARGET_EINVAL
;
8306 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8307 return -TARGET_EINVAL
;
8313 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8315 abi_ulong target_addr
,
8318 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8319 unsigned host_bits
= sizeof(*host_mask
) * 8;
8320 abi_ulong
*target_mask
;
8323 assert(host_size
>= target_size
);
8325 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8327 return -TARGET_EFAULT
;
8329 memset(host_mask
, 0, host_size
);
8331 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8332 unsigned bit
= i
* target_bits
;
8335 __get_user(val
, &target_mask
[i
]);
8336 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8337 if (val
& (1UL << j
)) {
8338 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8343 unlock_user(target_mask
, target_addr
, 0);
8347 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8349 abi_ulong target_addr
,
8352 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8353 unsigned host_bits
= sizeof(*host_mask
) * 8;
8354 abi_ulong
*target_mask
;
8357 assert(host_size
>= target_size
);
8359 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8361 return -TARGET_EFAULT
;
8364 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8365 unsigned bit
= i
* target_bits
;
8368 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8369 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8373 __put_user(val
, &target_mask
[i
]);
8376 unlock_user(target_mask
, target_addr
, target_size
);
8380 #ifdef TARGET_NR_getdents
8381 static int do_getdents(abi_long dirfd
, abi_long arg2
, abi_long count
)
8383 g_autofree
void *hdirp
= NULL
;
8385 int hlen
, hoff
, toff
;
8386 int hreclen
, treclen
;
8387 off64_t prev_diroff
= 0;
8389 hdirp
= g_try_malloc(count
);
8391 return -TARGET_ENOMEM
;
8394 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8395 hlen
= sys_getdents(dirfd
, hdirp
, count
);
8397 hlen
= sys_getdents64(dirfd
, hdirp
, count
);
8400 hlen
= get_errno(hlen
);
8401 if (is_error(hlen
)) {
8405 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8407 return -TARGET_EFAULT
;
8410 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8411 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8412 struct linux_dirent
*hde
= hdirp
+ hoff
;
8414 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8416 struct target_dirent
*tde
= tdirp
+ toff
;
8420 namelen
= strlen(hde
->d_name
);
8421 hreclen
= hde
->d_reclen
;
8422 treclen
= offsetof(struct target_dirent
, d_name
) + namelen
+ 2;
8423 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent
));
8425 if (toff
+ treclen
> count
) {
8427 * If the host struct is smaller than the target struct, or
8428 * requires less alignment and thus packs into less space,
8429 * then the host can return more entries than we can pass
8433 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8437 * Return what we have, resetting the file pointer to the
8438 * location of the first record not returned.
8440 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8444 prev_diroff
= hde
->d_off
;
8445 tde
->d_ino
= tswapal(hde
->d_ino
);
8446 tde
->d_off
= tswapal(hde
->d_off
);
8447 tde
->d_reclen
= tswap16(treclen
);
8448 memcpy(tde
->d_name
, hde
->d_name
, namelen
+ 1);
8451 * The getdents type is in what was formerly a padding byte at the
8452 * end of the structure.
8454 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8455 type
= *((uint8_t *)hde
+ hreclen
- 1);
8459 *((uint8_t *)tde
+ treclen
- 1) = type
;
8462 unlock_user(tdirp
, arg2
, toff
);
8465 #endif /* TARGET_NR_getdents */
8467 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8468 static int do_getdents64(abi_long dirfd
, abi_long arg2
, abi_long count
)
8470 g_autofree
void *hdirp
= NULL
;
8472 int hlen
, hoff
, toff
;
8473 int hreclen
, treclen
;
8474 off64_t prev_diroff
= 0;
8476 hdirp
= g_try_malloc(count
);
8478 return -TARGET_ENOMEM
;
8481 hlen
= get_errno(sys_getdents64(dirfd
, hdirp
, count
));
8482 if (is_error(hlen
)) {
8486 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8488 return -TARGET_EFAULT
;
8491 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8492 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8493 struct target_dirent64
*tde
= tdirp
+ toff
;
8496 namelen
= strlen(hde
->d_name
) + 1;
8497 hreclen
= hde
->d_reclen
;
8498 treclen
= offsetof(struct target_dirent64
, d_name
) + namelen
;
8499 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent64
));
8501 if (toff
+ treclen
> count
) {
8503 * If the host struct is smaller than the target struct, or
8504 * requires less alignment and thus packs into less space,
8505 * then the host can return more entries than we can pass
8509 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8513 * Return what we have, resetting the file pointer to the
8514 * location of the first record not returned.
8516 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8520 prev_diroff
= hde
->d_off
;
8521 tde
->d_ino
= tswap64(hde
->d_ino
);
8522 tde
->d_off
= tswap64(hde
->d_off
);
8523 tde
->d_reclen
= tswap16(treclen
);
8524 tde
->d_type
= hde
->d_type
;
8525 memcpy(tde
->d_name
, hde
->d_name
, namelen
);
8528 unlock_user(tdirp
, arg2
, toff
);
8531 #endif /* TARGET_NR_getdents64 */
8533 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8534 _syscall2(int, pivot_root
, const char *, new_root
, const char *, put_old
)
8537 /* This is an internal helper for do_syscall so that it is easier
8538 * to have a single return point, so that actions, such as logging
8539 * of syscall results, can be performed.
8540 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8542 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
8543 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8544 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8547 CPUState
*cpu
= env_cpu(cpu_env
);
8549 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8550 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8551 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8552 || defined(TARGET_NR_statx)
8555 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8556 || defined(TARGET_NR_fstatfs)
8562 case TARGET_NR_exit
:
8563 /* In old applications this may be used to implement _exit(2).
8564 However in threaded applications it is used for thread termination,
8565 and _exit_group is used for application termination.
8566 Do thread termination if we have more then one thread. */
8568 if (block_signals()) {
8569 return -QEMU_ERESTARTSYS
;
8572 pthread_mutex_lock(&clone_lock
);
8574 if (CPU_NEXT(first_cpu
)) {
8575 TaskState
*ts
= cpu
->opaque
;
8577 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
8578 object_unref(OBJECT(cpu
));
8580 * At this point the CPU should be unrealized and removed
8581 * from cpu lists. We can clean-up the rest of the thread
8582 * data without the lock held.
8585 pthread_mutex_unlock(&clone_lock
);
8587 if (ts
->child_tidptr
) {
8588 put_user_u32(0, ts
->child_tidptr
);
8589 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
8590 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
8594 rcu_unregister_thread();
8598 pthread_mutex_unlock(&clone_lock
);
8599 preexit_cleanup(cpu_env
, arg1
);
8601 return 0; /* avoid warning */
8602 case TARGET_NR_read
:
8603 if (arg2
== 0 && arg3
== 0) {
8604 return get_errno(safe_read(arg1
, 0, 0));
8606 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8607 return -TARGET_EFAULT
;
8608 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8610 fd_trans_host_to_target_data(arg1
)) {
8611 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8613 unlock_user(p
, arg2
, ret
);
8616 case TARGET_NR_write
:
8617 if (arg2
== 0 && arg3
== 0) {
8618 return get_errno(safe_write(arg1
, 0, 0));
8620 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8621 return -TARGET_EFAULT
;
8622 if (fd_trans_target_to_host_data(arg1
)) {
8623 void *copy
= g_malloc(arg3
);
8624 memcpy(copy
, p
, arg3
);
8625 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8627 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8631 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8633 unlock_user(p
, arg2
, 0);
8636 #ifdef TARGET_NR_open
8637 case TARGET_NR_open
:
8638 if (!(p
= lock_user_string(arg1
)))
8639 return -TARGET_EFAULT
;
8640 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8641 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8643 fd_trans_unregister(ret
);
8644 unlock_user(p
, arg1
, 0);
8647 case TARGET_NR_openat
:
8648 if (!(p
= lock_user_string(arg2
)))
8649 return -TARGET_EFAULT
;
8650 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8651 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8653 fd_trans_unregister(ret
);
8654 unlock_user(p
, arg2
, 0);
8656 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8657 case TARGET_NR_name_to_handle_at
:
8658 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8661 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8662 case TARGET_NR_open_by_handle_at
:
8663 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8664 fd_trans_unregister(ret
);
8667 case TARGET_NR_close
:
8668 fd_trans_unregister(arg1
);
8669 return get_errno(close(arg1
));
8672 return do_brk(arg1
);
8673 #ifdef TARGET_NR_fork
8674 case TARGET_NR_fork
:
8675 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8677 #ifdef TARGET_NR_waitpid
8678 case TARGET_NR_waitpid
:
8681 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8682 if (!is_error(ret
) && arg2
&& ret
8683 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8684 return -TARGET_EFAULT
;
8688 #ifdef TARGET_NR_waitid
8689 case TARGET_NR_waitid
:
8693 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8694 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8695 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8696 return -TARGET_EFAULT
;
8697 host_to_target_siginfo(p
, &info
);
8698 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8703 #ifdef TARGET_NR_creat /* not on alpha */
8704 case TARGET_NR_creat
:
8705 if (!(p
= lock_user_string(arg1
)))
8706 return -TARGET_EFAULT
;
8707 ret
= get_errno(creat(p
, arg2
));
8708 fd_trans_unregister(ret
);
8709 unlock_user(p
, arg1
, 0);
8712 #ifdef TARGET_NR_link
8713 case TARGET_NR_link
:
8716 p
= lock_user_string(arg1
);
8717 p2
= lock_user_string(arg2
);
8719 ret
= -TARGET_EFAULT
;
8721 ret
= get_errno(link(p
, p2
));
8722 unlock_user(p2
, arg2
, 0);
8723 unlock_user(p
, arg1
, 0);
8727 #if defined(TARGET_NR_linkat)
8728 case TARGET_NR_linkat
:
8732 return -TARGET_EFAULT
;
8733 p
= lock_user_string(arg2
);
8734 p2
= lock_user_string(arg4
);
8736 ret
= -TARGET_EFAULT
;
8738 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8739 unlock_user(p
, arg2
, 0);
8740 unlock_user(p2
, arg4
, 0);
8744 #ifdef TARGET_NR_unlink
8745 case TARGET_NR_unlink
:
8746 if (!(p
= lock_user_string(arg1
)))
8747 return -TARGET_EFAULT
;
8748 ret
= get_errno(unlink(p
));
8749 unlock_user(p
, arg1
, 0);
8752 #if defined(TARGET_NR_unlinkat)
8753 case TARGET_NR_unlinkat
:
8754 if (!(p
= lock_user_string(arg2
)))
8755 return -TARGET_EFAULT
;
8756 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8757 unlock_user(p
, arg2
, 0);
8760 case TARGET_NR_execve
:
8762 char **argp
, **envp
;
8765 abi_ulong guest_argp
;
8766 abi_ulong guest_envp
;
8772 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8773 if (get_user_ual(addr
, gp
))
8774 return -TARGET_EFAULT
;
8781 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8782 if (get_user_ual(addr
, gp
))
8783 return -TARGET_EFAULT
;
8789 argp
= g_new0(char *, argc
+ 1);
8790 envp
= g_new0(char *, envc
+ 1);
8792 for (gp
= guest_argp
, q
= argp
; gp
;
8793 gp
+= sizeof(abi_ulong
), q
++) {
8794 if (get_user_ual(addr
, gp
))
8798 if (!(*q
= lock_user_string(addr
)))
8803 for (gp
= guest_envp
, q
= envp
; gp
;
8804 gp
+= sizeof(abi_ulong
), q
++) {
8805 if (get_user_ual(addr
, gp
))
8809 if (!(*q
= lock_user_string(addr
)))
8814 if (!(p
= lock_user_string(arg1
)))
8816 /* Although execve() is not an interruptible syscall it is
8817 * a special case where we must use the safe_syscall wrapper:
8818 * if we allow a signal to happen before we make the host
8819 * syscall then we will 'lose' it, because at the point of
8820 * execve the process leaves QEMU's control. So we use the
8821 * safe syscall wrapper to ensure that we either take the
8822 * signal as a guest signal, or else it does not happen
8823 * before the execve completes and makes it the other
8824 * program's problem.
8826 ret
= get_errno(safe_execve(p
, argp
, envp
));
8827 unlock_user(p
, arg1
, 0);
8832 ret
= -TARGET_EFAULT
;
8835 for (gp
= guest_argp
, q
= argp
; *q
;
8836 gp
+= sizeof(abi_ulong
), q
++) {
8837 if (get_user_ual(addr
, gp
)
8840 unlock_user(*q
, addr
, 0);
8842 for (gp
= guest_envp
, q
= envp
; *q
;
8843 gp
+= sizeof(abi_ulong
), q
++) {
8844 if (get_user_ual(addr
, gp
)
8847 unlock_user(*q
, addr
, 0);
8854 case TARGET_NR_chdir
:
8855 if (!(p
= lock_user_string(arg1
)))
8856 return -TARGET_EFAULT
;
8857 ret
= get_errno(chdir(p
));
8858 unlock_user(p
, arg1
, 0);
8860 #ifdef TARGET_NR_time
8861 case TARGET_NR_time
:
8864 ret
= get_errno(time(&host_time
));
8867 && put_user_sal(host_time
, arg1
))
8868 return -TARGET_EFAULT
;
8872 #ifdef TARGET_NR_mknod
8873 case TARGET_NR_mknod
:
8874 if (!(p
= lock_user_string(arg1
)))
8875 return -TARGET_EFAULT
;
8876 ret
= get_errno(mknod(p
, arg2
, arg3
));
8877 unlock_user(p
, arg1
, 0);
8880 #if defined(TARGET_NR_mknodat)
8881 case TARGET_NR_mknodat
:
8882 if (!(p
= lock_user_string(arg2
)))
8883 return -TARGET_EFAULT
;
8884 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8885 unlock_user(p
, arg2
, 0);
8888 #ifdef TARGET_NR_chmod
8889 case TARGET_NR_chmod
:
8890 if (!(p
= lock_user_string(arg1
)))
8891 return -TARGET_EFAULT
;
8892 ret
= get_errno(chmod(p
, arg2
));
8893 unlock_user(p
, arg1
, 0);
8896 #ifdef TARGET_NR_lseek
8897 case TARGET_NR_lseek
:
8898 return get_errno(lseek(arg1
, arg2
, arg3
));
8900 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8901 /* Alpha specific */
8902 case TARGET_NR_getxpid
:
8903 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8904 return get_errno(getpid());
8906 #ifdef TARGET_NR_getpid
8907 case TARGET_NR_getpid
:
8908 return get_errno(getpid());
8910 case TARGET_NR_mount
:
8912 /* need to look at the data field */
8916 p
= lock_user_string(arg1
);
8918 return -TARGET_EFAULT
;
8924 p2
= lock_user_string(arg2
);
8927 unlock_user(p
, arg1
, 0);
8929 return -TARGET_EFAULT
;
8933 p3
= lock_user_string(arg3
);
8936 unlock_user(p
, arg1
, 0);
8938 unlock_user(p2
, arg2
, 0);
8939 return -TARGET_EFAULT
;
8945 /* FIXME - arg5 should be locked, but it isn't clear how to
8946 * do that since it's not guaranteed to be a NULL-terminated
8950 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8952 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
8954 ret
= get_errno(ret
);
8957 unlock_user(p
, arg1
, 0);
8959 unlock_user(p2
, arg2
, 0);
8961 unlock_user(p3
, arg3
, 0);
8965 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8966 #if defined(TARGET_NR_umount)
8967 case TARGET_NR_umount
:
8969 #if defined(TARGET_NR_oldumount)
8970 case TARGET_NR_oldumount
:
8972 if (!(p
= lock_user_string(arg1
)))
8973 return -TARGET_EFAULT
;
8974 ret
= get_errno(umount(p
));
8975 unlock_user(p
, arg1
, 0);
8978 #ifdef TARGET_NR_stime /* not on alpha */
8979 case TARGET_NR_stime
:
8983 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8984 return -TARGET_EFAULT
;
8986 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8989 #ifdef TARGET_NR_alarm /* not on alpha */
8990 case TARGET_NR_alarm
:
8993 #ifdef TARGET_NR_pause /* not on alpha */
8994 case TARGET_NR_pause
:
8995 if (!block_signals()) {
8996 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8998 return -TARGET_EINTR
;
9000 #ifdef TARGET_NR_utime
9001 case TARGET_NR_utime
:
9003 struct utimbuf tbuf
, *host_tbuf
;
9004 struct target_utimbuf
*target_tbuf
;
9006 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
9007 return -TARGET_EFAULT
;
9008 tbuf
.actime
= tswapal(target_tbuf
->actime
);
9009 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
9010 unlock_user_struct(target_tbuf
, arg2
, 0);
9015 if (!(p
= lock_user_string(arg1
)))
9016 return -TARGET_EFAULT
;
9017 ret
= get_errno(utime(p
, host_tbuf
));
9018 unlock_user(p
, arg1
, 0);
9022 #ifdef TARGET_NR_utimes
9023 case TARGET_NR_utimes
:
9025 struct timeval
*tvp
, tv
[2];
9027 if (copy_from_user_timeval(&tv
[0], arg2
)
9028 || copy_from_user_timeval(&tv
[1],
9029 arg2
+ sizeof(struct target_timeval
)))
9030 return -TARGET_EFAULT
;
9035 if (!(p
= lock_user_string(arg1
)))
9036 return -TARGET_EFAULT
;
9037 ret
= get_errno(utimes(p
, tvp
));
9038 unlock_user(p
, arg1
, 0);
9042 #if defined(TARGET_NR_futimesat)
9043 case TARGET_NR_futimesat
:
9045 struct timeval
*tvp
, tv
[2];
9047 if (copy_from_user_timeval(&tv
[0], arg3
)
9048 || copy_from_user_timeval(&tv
[1],
9049 arg3
+ sizeof(struct target_timeval
)))
9050 return -TARGET_EFAULT
;
9055 if (!(p
= lock_user_string(arg2
))) {
9056 return -TARGET_EFAULT
;
9058 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
9059 unlock_user(p
, arg2
, 0);
9063 #ifdef TARGET_NR_access
9064 case TARGET_NR_access
:
9065 if (!(p
= lock_user_string(arg1
))) {
9066 return -TARGET_EFAULT
;
9068 ret
= get_errno(access(path(p
), arg2
));
9069 unlock_user(p
, arg1
, 0);
9072 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9073 case TARGET_NR_faccessat
:
9074 if (!(p
= lock_user_string(arg2
))) {
9075 return -TARGET_EFAULT
;
9077 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
9078 unlock_user(p
, arg2
, 0);
9081 #ifdef TARGET_NR_nice /* not on alpha */
9082 case TARGET_NR_nice
:
9083 return get_errno(nice(arg1
));
9085 case TARGET_NR_sync
:
9088 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9089 case TARGET_NR_syncfs
:
9090 return get_errno(syncfs(arg1
));
9092 case TARGET_NR_kill
:
9093 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
9094 #ifdef TARGET_NR_rename
9095 case TARGET_NR_rename
:
9098 p
= lock_user_string(arg1
);
9099 p2
= lock_user_string(arg2
);
9101 ret
= -TARGET_EFAULT
;
9103 ret
= get_errno(rename(p
, p2
));
9104 unlock_user(p2
, arg2
, 0);
9105 unlock_user(p
, arg1
, 0);
9109 #if defined(TARGET_NR_renameat)
9110 case TARGET_NR_renameat
:
9113 p
= lock_user_string(arg2
);
9114 p2
= lock_user_string(arg4
);
9116 ret
= -TARGET_EFAULT
;
9118 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
9119 unlock_user(p2
, arg4
, 0);
9120 unlock_user(p
, arg2
, 0);
9124 #if defined(TARGET_NR_renameat2)
9125 case TARGET_NR_renameat2
:
9128 p
= lock_user_string(arg2
);
9129 p2
= lock_user_string(arg4
);
9131 ret
= -TARGET_EFAULT
;
9133 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
9135 unlock_user(p2
, arg4
, 0);
9136 unlock_user(p
, arg2
, 0);
9140 #ifdef TARGET_NR_mkdir
9141 case TARGET_NR_mkdir
:
9142 if (!(p
= lock_user_string(arg1
)))
9143 return -TARGET_EFAULT
;
9144 ret
= get_errno(mkdir(p
, arg2
));
9145 unlock_user(p
, arg1
, 0);
9148 #if defined(TARGET_NR_mkdirat)
9149 case TARGET_NR_mkdirat
:
9150 if (!(p
= lock_user_string(arg2
)))
9151 return -TARGET_EFAULT
;
9152 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
9153 unlock_user(p
, arg2
, 0);
9156 #ifdef TARGET_NR_rmdir
9157 case TARGET_NR_rmdir
:
9158 if (!(p
= lock_user_string(arg1
)))
9159 return -TARGET_EFAULT
;
9160 ret
= get_errno(rmdir(p
));
9161 unlock_user(p
, arg1
, 0);
9165 ret
= get_errno(dup(arg1
));
9167 fd_trans_dup(arg1
, ret
);
9170 #ifdef TARGET_NR_pipe
9171 case TARGET_NR_pipe
:
9172 return do_pipe(cpu_env
, arg1
, 0, 0);
9174 #ifdef TARGET_NR_pipe2
9175 case TARGET_NR_pipe2
:
9176 return do_pipe(cpu_env
, arg1
,
9177 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
9179 case TARGET_NR_times
:
9181 struct target_tms
*tmsp
;
9183 ret
= get_errno(times(&tms
));
9185 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
9187 return -TARGET_EFAULT
;
9188 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
9189 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
9190 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
9191 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
9194 ret
= host_to_target_clock_t(ret
);
9197 case TARGET_NR_acct
:
9199 ret
= get_errno(acct(NULL
));
9201 if (!(p
= lock_user_string(arg1
))) {
9202 return -TARGET_EFAULT
;
9204 ret
= get_errno(acct(path(p
)));
9205 unlock_user(p
, arg1
, 0);
9208 #ifdef TARGET_NR_umount2
9209 case TARGET_NR_umount2
:
9210 if (!(p
= lock_user_string(arg1
)))
9211 return -TARGET_EFAULT
;
9212 ret
= get_errno(umount2(p
, arg2
));
9213 unlock_user(p
, arg1
, 0);
9216 case TARGET_NR_ioctl
:
9217 return do_ioctl(arg1
, arg2
, arg3
);
9218 #ifdef TARGET_NR_fcntl
9219 case TARGET_NR_fcntl
:
9220 return do_fcntl(arg1
, arg2
, arg3
);
9222 case TARGET_NR_setpgid
:
9223 return get_errno(setpgid(arg1
, arg2
));
9224 case TARGET_NR_umask
:
9225 return get_errno(umask(arg1
));
9226 case TARGET_NR_chroot
:
9227 if (!(p
= lock_user_string(arg1
)))
9228 return -TARGET_EFAULT
;
9229 ret
= get_errno(chroot(p
));
9230 unlock_user(p
, arg1
, 0);
9232 #ifdef TARGET_NR_dup2
9233 case TARGET_NR_dup2
:
9234 ret
= get_errno(dup2(arg1
, arg2
));
9236 fd_trans_dup(arg1
, arg2
);
9240 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9241 case TARGET_NR_dup3
:
9245 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
9248 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
9249 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
9251 fd_trans_dup(arg1
, arg2
);
9256 #ifdef TARGET_NR_getppid /* not on alpha */
9257 case TARGET_NR_getppid
:
9258 return get_errno(getppid());
9260 #ifdef TARGET_NR_getpgrp
9261 case TARGET_NR_getpgrp
:
9262 return get_errno(getpgrp());
9264 case TARGET_NR_setsid
:
9265 return get_errno(setsid());
9266 #ifdef TARGET_NR_sigaction
9267 case TARGET_NR_sigaction
:
9269 #if defined(TARGET_MIPS)
9270 struct target_sigaction act
, oact
, *pact
, *old_act
;
9273 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9274 return -TARGET_EFAULT
;
9275 act
._sa_handler
= old_act
->_sa_handler
;
9276 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
9277 act
.sa_flags
= old_act
->sa_flags
;
9278 unlock_user_struct(old_act
, arg2
, 0);
9284 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9286 if (!is_error(ret
) && arg3
) {
9287 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9288 return -TARGET_EFAULT
;
9289 old_act
->_sa_handler
= oact
._sa_handler
;
9290 old_act
->sa_flags
= oact
.sa_flags
;
9291 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
9292 old_act
->sa_mask
.sig
[1] = 0;
9293 old_act
->sa_mask
.sig
[2] = 0;
9294 old_act
->sa_mask
.sig
[3] = 0;
9295 unlock_user_struct(old_act
, arg3
, 1);
9298 struct target_old_sigaction
*old_act
;
9299 struct target_sigaction act
, oact
, *pact
;
9301 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9302 return -TARGET_EFAULT
;
9303 act
._sa_handler
= old_act
->_sa_handler
;
9304 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9305 act
.sa_flags
= old_act
->sa_flags
;
9306 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9307 act
.sa_restorer
= old_act
->sa_restorer
;
9309 unlock_user_struct(old_act
, arg2
, 0);
9314 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9315 if (!is_error(ret
) && arg3
) {
9316 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9317 return -TARGET_EFAULT
;
9318 old_act
->_sa_handler
= oact
._sa_handler
;
9319 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9320 old_act
->sa_flags
= oact
.sa_flags
;
9321 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9322 old_act
->sa_restorer
= oact
.sa_restorer
;
9324 unlock_user_struct(old_act
, arg3
, 1);
9330 case TARGET_NR_rt_sigaction
:
9333 * For Alpha and SPARC this is a 5 argument syscall, with
9334 * a 'restorer' parameter which must be copied into the
9335 * sa_restorer field of the sigaction struct.
9336 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9337 * and arg5 is the sigsetsize.
9339 #if defined(TARGET_ALPHA)
9340 target_ulong sigsetsize
= arg4
;
9341 target_ulong restorer
= arg5
;
9342 #elif defined(TARGET_SPARC)
9343 target_ulong restorer
= arg4
;
9344 target_ulong sigsetsize
= arg5
;
9346 target_ulong sigsetsize
= arg4
;
9347 target_ulong restorer
= 0;
9349 struct target_sigaction
*act
= NULL
;
9350 struct target_sigaction
*oact
= NULL
;
9352 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9353 return -TARGET_EINVAL
;
9355 if (arg2
&& !lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9356 return -TARGET_EFAULT
;
9358 if (arg3
&& !lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9359 ret
= -TARGET_EFAULT
;
9361 ret
= get_errno(do_sigaction(arg1
, act
, oact
, restorer
));
9363 unlock_user_struct(oact
, arg3
, 1);
9367 unlock_user_struct(act
, arg2
, 0);
9371 #ifdef TARGET_NR_sgetmask /* not on alpha */
9372 case TARGET_NR_sgetmask
:
9375 abi_ulong target_set
;
9376 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9378 host_to_target_old_sigset(&target_set
, &cur_set
);
9384 #ifdef TARGET_NR_ssetmask /* not on alpha */
9385 case TARGET_NR_ssetmask
:
9388 abi_ulong target_set
= arg1
;
9389 target_to_host_old_sigset(&set
, &target_set
);
9390 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9392 host_to_target_old_sigset(&target_set
, &oset
);
9398 #ifdef TARGET_NR_sigprocmask
9399 case TARGET_NR_sigprocmask
:
9401 #if defined(TARGET_ALPHA)
9402 sigset_t set
, oldset
;
9407 case TARGET_SIG_BLOCK
:
9410 case TARGET_SIG_UNBLOCK
:
9413 case TARGET_SIG_SETMASK
:
9417 return -TARGET_EINVAL
;
9420 target_to_host_old_sigset(&set
, &mask
);
9422 ret
= do_sigprocmask(how
, &set
, &oldset
);
9423 if (!is_error(ret
)) {
9424 host_to_target_old_sigset(&mask
, &oldset
);
9426 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
9429 sigset_t set
, oldset
, *set_ptr
;
9433 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
9435 return -TARGET_EFAULT
;
9437 target_to_host_old_sigset(&set
, p
);
9438 unlock_user(p
, arg2
, 0);
9441 case TARGET_SIG_BLOCK
:
9444 case TARGET_SIG_UNBLOCK
:
9447 case TARGET_SIG_SETMASK
:
9451 return -TARGET_EINVAL
;
9457 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9458 if (!is_error(ret
) && arg3
) {
9459 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9460 return -TARGET_EFAULT
;
9461 host_to_target_old_sigset(p
, &oldset
);
9462 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9468 case TARGET_NR_rt_sigprocmask
:
9471 sigset_t set
, oldset
, *set_ptr
;
9473 if (arg4
!= sizeof(target_sigset_t
)) {
9474 return -TARGET_EINVAL
;
9478 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
9480 return -TARGET_EFAULT
;
9482 target_to_host_sigset(&set
, p
);
9483 unlock_user(p
, arg2
, 0);
9486 case TARGET_SIG_BLOCK
:
9489 case TARGET_SIG_UNBLOCK
:
9492 case TARGET_SIG_SETMASK
:
9496 return -TARGET_EINVAL
;
9502 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9503 if (!is_error(ret
) && arg3
) {
9504 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9505 return -TARGET_EFAULT
;
9506 host_to_target_sigset(p
, &oldset
);
9507 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9511 #ifdef TARGET_NR_sigpending
9512 case TARGET_NR_sigpending
:
9515 ret
= get_errno(sigpending(&set
));
9516 if (!is_error(ret
)) {
9517 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9518 return -TARGET_EFAULT
;
9519 host_to_target_old_sigset(p
, &set
);
9520 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9525 case TARGET_NR_rt_sigpending
:
9529 /* Yes, this check is >, not != like most. We follow the kernel's
9530 * logic and it does it like this because it implements
9531 * NR_sigpending through the same code path, and in that case
9532 * the old_sigset_t is smaller in size.
9534 if (arg2
> sizeof(target_sigset_t
)) {
9535 return -TARGET_EINVAL
;
9538 ret
= get_errno(sigpending(&set
));
9539 if (!is_error(ret
)) {
9540 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9541 return -TARGET_EFAULT
;
9542 host_to_target_sigset(p
, &set
);
9543 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9547 #ifdef TARGET_NR_sigsuspend
9548 case TARGET_NR_sigsuspend
:
9552 #if defined(TARGET_ALPHA)
9553 TaskState
*ts
= cpu
->opaque
;
9554 /* target_to_host_old_sigset will bswap back */
9555 abi_ulong mask
= tswapal(arg1
);
9556 set
= &ts
->sigsuspend_mask
;
9557 target_to_host_old_sigset(set
, &mask
);
9559 ret
= process_sigsuspend_mask(&set
, arg1
, sizeof(target_sigset_t
));
9564 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
9565 finish_sigsuspend_mask(ret
);
9569 case TARGET_NR_rt_sigsuspend
:
9573 ret
= process_sigsuspend_mask(&set
, arg1
, arg2
);
9577 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
9578 finish_sigsuspend_mask(ret
);
9581 #ifdef TARGET_NR_rt_sigtimedwait
9582 case TARGET_NR_rt_sigtimedwait
:
9585 struct timespec uts
, *puts
;
9588 if (arg4
!= sizeof(target_sigset_t
)) {
9589 return -TARGET_EINVAL
;
9592 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9593 return -TARGET_EFAULT
;
9594 target_to_host_sigset(&set
, p
);
9595 unlock_user(p
, arg1
, 0);
9598 if (target_to_host_timespec(puts
, arg3
)) {
9599 return -TARGET_EFAULT
;
9604 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9606 if (!is_error(ret
)) {
9608 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9611 return -TARGET_EFAULT
;
9613 host_to_target_siginfo(p
, &uinfo
);
9614 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9616 ret
= host_to_target_signal(ret
);
9621 #ifdef TARGET_NR_rt_sigtimedwait_time64
9622 case TARGET_NR_rt_sigtimedwait_time64
:
9625 struct timespec uts
, *puts
;
9628 if (arg4
!= sizeof(target_sigset_t
)) {
9629 return -TARGET_EINVAL
;
9632 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9634 return -TARGET_EFAULT
;
9636 target_to_host_sigset(&set
, p
);
9637 unlock_user(p
, arg1
, 0);
9640 if (target_to_host_timespec64(puts
, arg3
)) {
9641 return -TARGET_EFAULT
;
9646 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9648 if (!is_error(ret
)) {
9650 p
= lock_user(VERIFY_WRITE
, arg2
,
9651 sizeof(target_siginfo_t
), 0);
9653 return -TARGET_EFAULT
;
9655 host_to_target_siginfo(p
, &uinfo
);
9656 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9658 ret
= host_to_target_signal(ret
);
9663 case TARGET_NR_rt_sigqueueinfo
:
9667 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9669 return -TARGET_EFAULT
;
9671 target_to_host_siginfo(&uinfo
, p
);
9672 unlock_user(p
, arg3
, 0);
9673 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9676 case TARGET_NR_rt_tgsigqueueinfo
:
9680 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9682 return -TARGET_EFAULT
;
9684 target_to_host_siginfo(&uinfo
, p
);
9685 unlock_user(p
, arg4
, 0);
9686 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9689 #ifdef TARGET_NR_sigreturn
9690 case TARGET_NR_sigreturn
:
9691 if (block_signals()) {
9692 return -QEMU_ERESTARTSYS
;
9694 return do_sigreturn(cpu_env
);
9696 case TARGET_NR_rt_sigreturn
:
9697 if (block_signals()) {
9698 return -QEMU_ERESTARTSYS
;
9700 return do_rt_sigreturn(cpu_env
);
9701 case TARGET_NR_sethostname
:
9702 if (!(p
= lock_user_string(arg1
)))
9703 return -TARGET_EFAULT
;
9704 ret
= get_errno(sethostname(p
, arg2
));
9705 unlock_user(p
, arg1
, 0);
9707 #ifdef TARGET_NR_setrlimit
9708 case TARGET_NR_setrlimit
:
9710 int resource
= target_to_host_resource(arg1
);
9711 struct target_rlimit
*target_rlim
;
9713 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9714 return -TARGET_EFAULT
;
9715 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9716 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9717 unlock_user_struct(target_rlim
, arg2
, 0);
9719 * If we just passed through resource limit settings for memory then
9720 * they would also apply to QEMU's own allocations, and QEMU will
9721 * crash or hang or die if its allocations fail. Ideally we would
9722 * track the guest allocations in QEMU and apply the limits ourselves.
9723 * For now, just tell the guest the call succeeded but don't actually
9726 if (resource
!= RLIMIT_AS
&&
9727 resource
!= RLIMIT_DATA
&&
9728 resource
!= RLIMIT_STACK
) {
9729 return get_errno(setrlimit(resource
, &rlim
));
9735 #ifdef TARGET_NR_getrlimit
9736 case TARGET_NR_getrlimit
:
9738 int resource
= target_to_host_resource(arg1
);
9739 struct target_rlimit
*target_rlim
;
9742 ret
= get_errno(getrlimit(resource
, &rlim
));
9743 if (!is_error(ret
)) {
9744 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9745 return -TARGET_EFAULT
;
9746 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9747 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9748 unlock_user_struct(target_rlim
, arg2
, 1);
9753 case TARGET_NR_getrusage
:
9755 struct rusage rusage
;
9756 ret
= get_errno(getrusage(arg1
, &rusage
));
9757 if (!is_error(ret
)) {
9758 ret
= host_to_target_rusage(arg2
, &rusage
);
9762 #if defined(TARGET_NR_gettimeofday)
9763 case TARGET_NR_gettimeofday
:
9768 ret
= get_errno(gettimeofday(&tv
, &tz
));
9769 if (!is_error(ret
)) {
9770 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9771 return -TARGET_EFAULT
;
9773 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9774 return -TARGET_EFAULT
;
9780 #if defined(TARGET_NR_settimeofday)
9781 case TARGET_NR_settimeofday
:
9783 struct timeval tv
, *ptv
= NULL
;
9784 struct timezone tz
, *ptz
= NULL
;
9787 if (copy_from_user_timeval(&tv
, arg1
)) {
9788 return -TARGET_EFAULT
;
9794 if (copy_from_user_timezone(&tz
, arg2
)) {
9795 return -TARGET_EFAULT
;
9800 return get_errno(settimeofday(ptv
, ptz
));
9803 #if defined(TARGET_NR_select)
9804 case TARGET_NR_select
:
9805 #if defined(TARGET_WANT_NI_OLD_SELECT)
9806 /* some architectures used to have old_select here
9807 * but now ENOSYS it.
9809 ret
= -TARGET_ENOSYS
;
9810 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9811 ret
= do_old_select(arg1
);
9813 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9817 #ifdef TARGET_NR_pselect6
9818 case TARGET_NR_pselect6
:
9819 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9821 #ifdef TARGET_NR_pselect6_time64
9822 case TARGET_NR_pselect6_time64
:
9823 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9825 #ifdef TARGET_NR_symlink
9826 case TARGET_NR_symlink
:
9829 p
= lock_user_string(arg1
);
9830 p2
= lock_user_string(arg2
);
9832 ret
= -TARGET_EFAULT
;
9834 ret
= get_errno(symlink(p
, p2
));
9835 unlock_user(p2
, arg2
, 0);
9836 unlock_user(p
, arg1
, 0);
9840 #if defined(TARGET_NR_symlinkat)
9841 case TARGET_NR_symlinkat
:
9844 p
= lock_user_string(arg1
);
9845 p2
= lock_user_string(arg3
);
9847 ret
= -TARGET_EFAULT
;
9849 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9850 unlock_user(p2
, arg3
, 0);
9851 unlock_user(p
, arg1
, 0);
9855 #ifdef TARGET_NR_readlink
9856 case TARGET_NR_readlink
:
9859 p
= lock_user_string(arg1
);
9860 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9862 ret
= -TARGET_EFAULT
;
9864 /* Short circuit this for the magic exe check. */
9865 ret
= -TARGET_EINVAL
;
9866 } else if (is_proc_myself((const char *)p
, "exe")) {
9867 char real
[PATH_MAX
], *temp
;
9868 temp
= realpath(exec_path
, real
);
9869 /* Return value is # of bytes that we wrote to the buffer. */
9871 ret
= get_errno(-1);
9873 /* Don't worry about sign mismatch as earlier mapping
9874 * logic would have thrown a bad address error. */
9875 ret
= MIN(strlen(real
), arg3
);
9876 /* We cannot NUL terminate the string. */
9877 memcpy(p2
, real
, ret
);
9880 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9882 unlock_user(p2
, arg2
, ret
);
9883 unlock_user(p
, arg1
, 0);
9887 #if defined(TARGET_NR_readlinkat)
9888 case TARGET_NR_readlinkat
:
9891 p
= lock_user_string(arg2
);
9892 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9894 ret
= -TARGET_EFAULT
;
9895 } else if (is_proc_myself((const char *)p
, "exe")) {
9896 char real
[PATH_MAX
], *temp
;
9897 temp
= realpath(exec_path
, real
);
9898 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9899 snprintf((char *)p2
, arg4
, "%s", real
);
9901 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9903 unlock_user(p2
, arg3
, ret
);
9904 unlock_user(p
, arg2
, 0);
9908 #ifdef TARGET_NR_swapon
9909 case TARGET_NR_swapon
:
9910 if (!(p
= lock_user_string(arg1
)))
9911 return -TARGET_EFAULT
;
9912 ret
= get_errno(swapon(p
, arg2
));
9913 unlock_user(p
, arg1
, 0);
9916 case TARGET_NR_reboot
:
9917 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9918 /* arg4 must be ignored in all other cases */
9919 p
= lock_user_string(arg4
);
9921 return -TARGET_EFAULT
;
9923 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9924 unlock_user(p
, arg4
, 0);
9926 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9929 #ifdef TARGET_NR_mmap
9930 case TARGET_NR_mmap
:
9931 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9932 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9933 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9934 || defined(TARGET_S390X)
9937 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9938 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9939 return -TARGET_EFAULT
;
9946 unlock_user(v
, arg1
, 0);
9947 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9948 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9952 /* mmap pointers are always untagged */
9953 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9954 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9960 #ifdef TARGET_NR_mmap2
9961 case TARGET_NR_mmap2
:
9963 #define MMAP_SHIFT 12
9965 ret
= target_mmap(arg1
, arg2
, arg3
,
9966 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9967 arg5
, arg6
<< MMAP_SHIFT
);
9968 return get_errno(ret
);
9970 case TARGET_NR_munmap
:
9971 arg1
= cpu_untagged_addr(cpu
, arg1
);
9972 return get_errno(target_munmap(arg1
, arg2
));
9973 case TARGET_NR_mprotect
:
9974 arg1
= cpu_untagged_addr(cpu
, arg1
);
9976 TaskState
*ts
= cpu
->opaque
;
9977 /* Special hack to detect libc making the stack executable. */
9978 if ((arg3
& PROT_GROWSDOWN
)
9979 && arg1
>= ts
->info
->stack_limit
9980 && arg1
<= ts
->info
->start_stack
) {
9981 arg3
&= ~PROT_GROWSDOWN
;
9982 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9983 arg1
= ts
->info
->stack_limit
;
9986 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9987 #ifdef TARGET_NR_mremap
9988 case TARGET_NR_mremap
:
9989 arg1
= cpu_untagged_addr(cpu
, arg1
);
9990 /* mremap new_addr (arg5) is always untagged */
9991 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9993 /* ??? msync/mlock/munlock are broken for softmmu. */
9994 #ifdef TARGET_NR_msync
9995 case TARGET_NR_msync
:
9996 return get_errno(msync(g2h(cpu
, arg1
), arg2
, arg3
));
9998 #ifdef TARGET_NR_mlock
9999 case TARGET_NR_mlock
:
10000 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
10002 #ifdef TARGET_NR_munlock
10003 case TARGET_NR_munlock
:
10004 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
10006 #ifdef TARGET_NR_mlockall
10007 case TARGET_NR_mlockall
:
10008 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
10010 #ifdef TARGET_NR_munlockall
10011 case TARGET_NR_munlockall
:
10012 return get_errno(munlockall());
10014 #ifdef TARGET_NR_truncate
10015 case TARGET_NR_truncate
:
10016 if (!(p
= lock_user_string(arg1
)))
10017 return -TARGET_EFAULT
;
10018 ret
= get_errno(truncate(p
, arg2
));
10019 unlock_user(p
, arg1
, 0);
10022 #ifdef TARGET_NR_ftruncate
10023 case TARGET_NR_ftruncate
:
10024 return get_errno(ftruncate(arg1
, arg2
));
10026 case TARGET_NR_fchmod
:
10027 return get_errno(fchmod(arg1
, arg2
));
10028 #if defined(TARGET_NR_fchmodat)
10029 case TARGET_NR_fchmodat
:
10030 if (!(p
= lock_user_string(arg2
)))
10031 return -TARGET_EFAULT
;
10032 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
10033 unlock_user(p
, arg2
, 0);
10036 case TARGET_NR_getpriority
:
10037 /* Note that negative values are valid for getpriority, so we must
10038 differentiate based on errno settings. */
10040 ret
= getpriority(arg1
, arg2
);
10041 if (ret
== -1 && errno
!= 0) {
10042 return -host_to_target_errno(errno
);
10044 #ifdef TARGET_ALPHA
10045 /* Return value is the unbiased priority. Signal no error. */
10046 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
10048 /* Return value is a biased priority to avoid negative numbers. */
10052 case TARGET_NR_setpriority
:
10053 return get_errno(setpriority(arg1
, arg2
, arg3
));
10054 #ifdef TARGET_NR_statfs
10055 case TARGET_NR_statfs
:
10056 if (!(p
= lock_user_string(arg1
))) {
10057 return -TARGET_EFAULT
;
10059 ret
= get_errno(statfs(path(p
), &stfs
));
10060 unlock_user(p
, arg1
, 0);
10062 if (!is_error(ret
)) {
10063 struct target_statfs
*target_stfs
;
10065 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
10066 return -TARGET_EFAULT
;
10067 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10068 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10069 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10070 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10071 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10072 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10073 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10074 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10075 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10076 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10077 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10078 #ifdef _STATFS_F_FLAGS
10079 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10081 __put_user(0, &target_stfs
->f_flags
);
10083 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10084 unlock_user_struct(target_stfs
, arg2
, 1);
10088 #ifdef TARGET_NR_fstatfs
10089 case TARGET_NR_fstatfs
:
10090 ret
= get_errno(fstatfs(arg1
, &stfs
));
10091 goto convert_statfs
;
10093 #ifdef TARGET_NR_statfs64
10094 case TARGET_NR_statfs64
:
10095 if (!(p
= lock_user_string(arg1
))) {
10096 return -TARGET_EFAULT
;
10098 ret
= get_errno(statfs(path(p
), &stfs
));
10099 unlock_user(p
, arg1
, 0);
10101 if (!is_error(ret
)) {
10102 struct target_statfs64
*target_stfs
;
10104 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
10105 return -TARGET_EFAULT
;
10106 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10107 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10108 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10109 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10110 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10111 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10112 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10113 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10114 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10115 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10116 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10117 #ifdef _STATFS_F_FLAGS
10118 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10120 __put_user(0, &target_stfs
->f_flags
);
10122 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10123 unlock_user_struct(target_stfs
, arg3
, 1);
10126 case TARGET_NR_fstatfs64
:
10127 ret
= get_errno(fstatfs(arg1
, &stfs
));
10128 goto convert_statfs64
;
10130 #ifdef TARGET_NR_socketcall
10131 case TARGET_NR_socketcall
:
10132 return do_socketcall(arg1
, arg2
);
10134 #ifdef TARGET_NR_accept
10135 case TARGET_NR_accept
:
10136 return do_accept4(arg1
, arg2
, arg3
, 0);
10138 #ifdef TARGET_NR_accept4
10139 case TARGET_NR_accept4
:
10140 return do_accept4(arg1
, arg2
, arg3
, arg4
);
10142 #ifdef TARGET_NR_bind
10143 case TARGET_NR_bind
:
10144 return do_bind(arg1
, arg2
, arg3
);
10146 #ifdef TARGET_NR_connect
10147 case TARGET_NR_connect
:
10148 return do_connect(arg1
, arg2
, arg3
);
10150 #ifdef TARGET_NR_getpeername
10151 case TARGET_NR_getpeername
:
10152 return do_getpeername(arg1
, arg2
, arg3
);
10154 #ifdef TARGET_NR_getsockname
10155 case TARGET_NR_getsockname
:
10156 return do_getsockname(arg1
, arg2
, arg3
);
10158 #ifdef TARGET_NR_getsockopt
10159 case TARGET_NR_getsockopt
:
10160 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
10162 #ifdef TARGET_NR_listen
10163 case TARGET_NR_listen
:
10164 return get_errno(listen(arg1
, arg2
));
10166 #ifdef TARGET_NR_recv
10167 case TARGET_NR_recv
:
10168 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
10170 #ifdef TARGET_NR_recvfrom
10171 case TARGET_NR_recvfrom
:
10172 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10174 #ifdef TARGET_NR_recvmsg
10175 case TARGET_NR_recvmsg
:
10176 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
10178 #ifdef TARGET_NR_send
10179 case TARGET_NR_send
:
10180 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
10182 #ifdef TARGET_NR_sendmsg
10183 case TARGET_NR_sendmsg
:
10184 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
10186 #ifdef TARGET_NR_sendmmsg
10187 case TARGET_NR_sendmmsg
:
10188 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
10190 #ifdef TARGET_NR_recvmmsg
10191 case TARGET_NR_recvmmsg
:
10192 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
10194 #ifdef TARGET_NR_sendto
10195 case TARGET_NR_sendto
:
10196 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10198 #ifdef TARGET_NR_shutdown
10199 case TARGET_NR_shutdown
:
10200 return get_errno(shutdown(arg1
, arg2
));
10202 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10203 case TARGET_NR_getrandom
:
10204 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10206 return -TARGET_EFAULT
;
10208 ret
= get_errno(getrandom(p
, arg2
, arg3
));
10209 unlock_user(p
, arg1
, ret
);
10212 #ifdef TARGET_NR_socket
10213 case TARGET_NR_socket
:
10214 return do_socket(arg1
, arg2
, arg3
);
10216 #ifdef TARGET_NR_socketpair
10217 case TARGET_NR_socketpair
:
10218 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
10220 #ifdef TARGET_NR_setsockopt
10221 case TARGET_NR_setsockopt
:
10222 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
10224 #if defined(TARGET_NR_syslog)
10225 case TARGET_NR_syslog
:
10230 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
10231 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
10232 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
10233 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
10234 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
10235 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
10236 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
10237 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
10238 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
10239 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
10240 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
10241 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
10244 return -TARGET_EINVAL
;
10249 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10251 return -TARGET_EFAULT
;
10253 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
10254 unlock_user(p
, arg2
, arg3
);
10258 return -TARGET_EINVAL
;
10263 case TARGET_NR_setitimer
:
10265 struct itimerval value
, ovalue
, *pvalue
;
10269 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10270 || copy_from_user_timeval(&pvalue
->it_value
,
10271 arg2
+ sizeof(struct target_timeval
)))
10272 return -TARGET_EFAULT
;
10276 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10277 if (!is_error(ret
) && arg3
) {
10278 if (copy_to_user_timeval(arg3
,
10279 &ovalue
.it_interval
)
10280 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10282 return -TARGET_EFAULT
;
10286 case TARGET_NR_getitimer
:
10288 struct itimerval value
;
10290 ret
= get_errno(getitimer(arg1
, &value
));
10291 if (!is_error(ret
) && arg2
) {
10292 if (copy_to_user_timeval(arg2
,
10293 &value
.it_interval
)
10294 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10296 return -TARGET_EFAULT
;
10300 #ifdef TARGET_NR_stat
10301 case TARGET_NR_stat
:
10302 if (!(p
= lock_user_string(arg1
))) {
10303 return -TARGET_EFAULT
;
10305 ret
= get_errno(stat(path(p
), &st
));
10306 unlock_user(p
, arg1
, 0);
10309 #ifdef TARGET_NR_lstat
10310 case TARGET_NR_lstat
:
10311 if (!(p
= lock_user_string(arg1
))) {
10312 return -TARGET_EFAULT
;
10314 ret
= get_errno(lstat(path(p
), &st
));
10315 unlock_user(p
, arg1
, 0);
10318 #ifdef TARGET_NR_fstat
10319 case TARGET_NR_fstat
:
10321 ret
= get_errno(fstat(arg1
, &st
));
10322 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10325 if (!is_error(ret
)) {
10326 struct target_stat
*target_st
;
10328 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10329 return -TARGET_EFAULT
;
10330 memset(target_st
, 0, sizeof(*target_st
));
10331 __put_user(st
.st_dev
, &target_st
->st_dev
);
10332 __put_user(st
.st_ino
, &target_st
->st_ino
);
10333 __put_user(st
.st_mode
, &target_st
->st_mode
);
10334 __put_user(st
.st_uid
, &target_st
->st_uid
);
10335 __put_user(st
.st_gid
, &target_st
->st_gid
);
10336 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10337 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10338 __put_user(st
.st_size
, &target_st
->st_size
);
10339 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10340 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10341 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10342 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10343 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10344 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10345 __put_user(st
.st_atim
.tv_nsec
,
10346 &target_st
->target_st_atime_nsec
);
10347 __put_user(st
.st_mtim
.tv_nsec
,
10348 &target_st
->target_st_mtime_nsec
);
10349 __put_user(st
.st_ctim
.tv_nsec
,
10350 &target_st
->target_st_ctime_nsec
);
10352 unlock_user_struct(target_st
, arg2
, 1);
10357 case TARGET_NR_vhangup
:
10358 return get_errno(vhangup());
10359 #ifdef TARGET_NR_syscall
10360 case TARGET_NR_syscall
:
10361 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10362 arg6
, arg7
, arg8
, 0);
10364 #if defined(TARGET_NR_wait4)
10365 case TARGET_NR_wait4
:
10368 abi_long status_ptr
= arg2
;
10369 struct rusage rusage
, *rusage_ptr
;
10370 abi_ulong target_rusage
= arg4
;
10371 abi_long rusage_err
;
10373 rusage_ptr
= &rusage
;
10376 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10377 if (!is_error(ret
)) {
10378 if (status_ptr
&& ret
) {
10379 status
= host_to_target_waitstatus(status
);
10380 if (put_user_s32(status
, status_ptr
))
10381 return -TARGET_EFAULT
;
10383 if (target_rusage
) {
10384 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10393 #ifdef TARGET_NR_swapoff
10394 case TARGET_NR_swapoff
:
10395 if (!(p
= lock_user_string(arg1
)))
10396 return -TARGET_EFAULT
;
10397 ret
= get_errno(swapoff(p
));
10398 unlock_user(p
, arg1
, 0);
10401 case TARGET_NR_sysinfo
:
10403 struct target_sysinfo
*target_value
;
10404 struct sysinfo value
;
10405 ret
= get_errno(sysinfo(&value
));
10406 if (!is_error(ret
) && arg1
)
10408 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10409 return -TARGET_EFAULT
;
10410 __put_user(value
.uptime
, &target_value
->uptime
);
10411 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10412 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10413 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10414 __put_user(value
.totalram
, &target_value
->totalram
);
10415 __put_user(value
.freeram
, &target_value
->freeram
);
10416 __put_user(value
.sharedram
, &target_value
->sharedram
);
10417 __put_user(value
.bufferram
, &target_value
->bufferram
);
10418 __put_user(value
.totalswap
, &target_value
->totalswap
);
10419 __put_user(value
.freeswap
, &target_value
->freeswap
);
10420 __put_user(value
.procs
, &target_value
->procs
);
10421 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10422 __put_user(value
.freehigh
, &target_value
->freehigh
);
10423 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10424 unlock_user_struct(target_value
, arg1
, 1);
10428 #ifdef TARGET_NR_ipc
10429 case TARGET_NR_ipc
:
10430 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10432 #ifdef TARGET_NR_semget
10433 case TARGET_NR_semget
:
10434 return get_errno(semget(arg1
, arg2
, arg3
));
10436 #ifdef TARGET_NR_semop
10437 case TARGET_NR_semop
:
10438 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10440 #ifdef TARGET_NR_semtimedop
10441 case TARGET_NR_semtimedop
:
10442 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10444 #ifdef TARGET_NR_semtimedop_time64
10445 case TARGET_NR_semtimedop_time64
:
10446 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10448 #ifdef TARGET_NR_semctl
10449 case TARGET_NR_semctl
:
10450 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10452 #ifdef TARGET_NR_msgctl
10453 case TARGET_NR_msgctl
:
10454 return do_msgctl(arg1
, arg2
, arg3
);
10456 #ifdef TARGET_NR_msgget
10457 case TARGET_NR_msgget
:
10458 return get_errno(msgget(arg1
, arg2
));
10460 #ifdef TARGET_NR_msgrcv
10461 case TARGET_NR_msgrcv
:
10462 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10464 #ifdef TARGET_NR_msgsnd
10465 case TARGET_NR_msgsnd
:
10466 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10468 #ifdef TARGET_NR_shmget
10469 case TARGET_NR_shmget
:
10470 return get_errno(shmget(arg1
, arg2
, arg3
));
10472 #ifdef TARGET_NR_shmctl
10473 case TARGET_NR_shmctl
:
10474 return do_shmctl(arg1
, arg2
, arg3
);
10476 #ifdef TARGET_NR_shmat
10477 case TARGET_NR_shmat
:
10478 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10480 #ifdef TARGET_NR_shmdt
10481 case TARGET_NR_shmdt
:
10482 return do_shmdt(arg1
);
10484 case TARGET_NR_fsync
:
10485 return get_errno(fsync(arg1
));
10486 case TARGET_NR_clone
:
10487 /* Linux manages to have three different orderings for its
10488 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10489 * match the kernel's CONFIG_CLONE_* settings.
10490 * Microblaze is further special in that it uses a sixth
10491 * implicit argument to clone for the TLS pointer.
10493 #if defined(TARGET_MICROBLAZE)
10494 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10495 #elif defined(TARGET_CLONE_BACKWARDS)
10496 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10497 #elif defined(TARGET_CLONE_BACKWARDS2)
10498 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10500 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10503 #ifdef __NR_exit_group
10504 /* new thread calls */
10505 case TARGET_NR_exit_group
:
10506 preexit_cleanup(cpu_env
, arg1
);
10507 return get_errno(exit_group(arg1
));
10509 case TARGET_NR_setdomainname
:
10510 if (!(p
= lock_user_string(arg1
)))
10511 return -TARGET_EFAULT
;
10512 ret
= get_errno(setdomainname(p
, arg2
));
10513 unlock_user(p
, arg1
, 0);
10515 case TARGET_NR_uname
:
10516 /* no need to transcode because we use the linux syscall */
10518 struct new_utsname
* buf
;
10520 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10521 return -TARGET_EFAULT
;
10522 ret
= get_errno(sys_uname(buf
));
10523 if (!is_error(ret
)) {
10524 /* Overwrite the native machine name with whatever is being
10526 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10527 sizeof(buf
->machine
));
10528 /* Allow the user to override the reported release. */
10529 if (qemu_uname_release
&& *qemu_uname_release
) {
10530 g_strlcpy(buf
->release
, qemu_uname_release
,
10531 sizeof(buf
->release
));
10534 unlock_user_struct(buf
, arg1
, 1);
10538 case TARGET_NR_modify_ldt
:
10539 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10540 #if !defined(TARGET_X86_64)
10541 case TARGET_NR_vm86
:
10542 return do_vm86(cpu_env
, arg1
, arg2
);
10545 #if defined(TARGET_NR_adjtimex)
10546 case TARGET_NR_adjtimex
:
10548 struct timex host_buf
;
10550 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10551 return -TARGET_EFAULT
;
10553 ret
= get_errno(adjtimex(&host_buf
));
10554 if (!is_error(ret
)) {
10555 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10556 return -TARGET_EFAULT
;
10562 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10563 case TARGET_NR_clock_adjtime
:
10565 struct timex htx
, *phtx
= &htx
;
10567 if (target_to_host_timex(phtx
, arg2
) != 0) {
10568 return -TARGET_EFAULT
;
10570 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10571 if (!is_error(ret
) && phtx
) {
10572 if (host_to_target_timex(arg2
, phtx
) != 0) {
10573 return -TARGET_EFAULT
;
10579 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10580 case TARGET_NR_clock_adjtime64
:
10584 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10585 return -TARGET_EFAULT
;
10587 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10588 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10589 return -TARGET_EFAULT
;
10594 case TARGET_NR_getpgid
:
10595 return get_errno(getpgid(arg1
));
10596 case TARGET_NR_fchdir
:
10597 return get_errno(fchdir(arg1
));
10598 case TARGET_NR_personality
:
10599 return get_errno(personality(arg1
));
10600 #ifdef TARGET_NR__llseek /* Not on alpha */
10601 case TARGET_NR__llseek
:
10604 #if !defined(__NR_llseek)
10605 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10607 ret
= get_errno(res
);
10612 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10614 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10615 return -TARGET_EFAULT
;
10620 #ifdef TARGET_NR_getdents
10621 case TARGET_NR_getdents
:
10622 return do_getdents(arg1
, arg2
, arg3
);
10623 #endif /* TARGET_NR_getdents */
10624 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10625 case TARGET_NR_getdents64
:
10626 return do_getdents64(arg1
, arg2
, arg3
);
10627 #endif /* TARGET_NR_getdents64 */
10628 #if defined(TARGET_NR__newselect)
10629 case TARGET_NR__newselect
:
10630 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10632 #ifdef TARGET_NR_poll
10633 case TARGET_NR_poll
:
10634 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10636 #ifdef TARGET_NR_ppoll
10637 case TARGET_NR_ppoll
:
10638 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10640 #ifdef TARGET_NR_ppoll_time64
10641 case TARGET_NR_ppoll_time64
:
10642 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10644 case TARGET_NR_flock
:
10645 /* NOTE: the flock constant seems to be the same for every
10647 return get_errno(safe_flock(arg1
, arg2
));
10648 case TARGET_NR_readv
:
10650 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10652 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10653 unlock_iovec(vec
, arg2
, arg3
, 1);
10655 ret
= -host_to_target_errno(errno
);
10659 case TARGET_NR_writev
:
10661 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10663 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10664 unlock_iovec(vec
, arg2
, arg3
, 0);
10666 ret
= -host_to_target_errno(errno
);
10670 #if defined(TARGET_NR_preadv)
10671 case TARGET_NR_preadv
:
10673 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10675 unsigned long low
, high
;
10677 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10678 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10679 unlock_iovec(vec
, arg2
, arg3
, 1);
10681 ret
= -host_to_target_errno(errno
);
10686 #if defined(TARGET_NR_pwritev)
10687 case TARGET_NR_pwritev
:
10689 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10691 unsigned long low
, high
;
10693 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10694 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10695 unlock_iovec(vec
, arg2
, arg3
, 0);
10697 ret
= -host_to_target_errno(errno
);
10702 case TARGET_NR_getsid
:
10703 return get_errno(getsid(arg1
));
10704 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10705 case TARGET_NR_fdatasync
:
10706 return get_errno(fdatasync(arg1
));
10708 case TARGET_NR_sched_getaffinity
:
10710 unsigned int mask_size
;
10711 unsigned long *mask
;
10714 * sched_getaffinity needs multiples of ulong, so need to take
10715 * care of mismatches between target ulong and host ulong sizes.
10717 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10718 return -TARGET_EINVAL
;
10720 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10722 mask
= alloca(mask_size
);
10723 memset(mask
, 0, mask_size
);
10724 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10726 if (!is_error(ret
)) {
10728 /* More data returned than the caller's buffer will fit.
10729 * This only happens if sizeof(abi_long) < sizeof(long)
10730 * and the caller passed us a buffer holding an odd number
10731 * of abi_longs. If the host kernel is actually using the
10732 * extra 4 bytes then fail EINVAL; otherwise we can just
10733 * ignore them and only copy the interesting part.
10735 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10736 if (numcpus
> arg2
* 8) {
10737 return -TARGET_EINVAL
;
10742 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10743 return -TARGET_EFAULT
;
10748 case TARGET_NR_sched_setaffinity
:
10750 unsigned int mask_size
;
10751 unsigned long *mask
;
10754 * sched_setaffinity needs multiples of ulong, so need to take
10755 * care of mismatches between target ulong and host ulong sizes.
10757 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10758 return -TARGET_EINVAL
;
10760 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10761 mask
= alloca(mask_size
);
10763 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10768 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10770 case TARGET_NR_getcpu
:
10772 unsigned cpu
, node
;
10773 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10774 arg2
? &node
: NULL
,
10776 if (is_error(ret
)) {
10779 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10780 return -TARGET_EFAULT
;
10782 if (arg2
&& put_user_u32(node
, arg2
)) {
10783 return -TARGET_EFAULT
;
10787 case TARGET_NR_sched_setparam
:
10789 struct target_sched_param
*target_schp
;
10790 struct sched_param schp
;
10793 return -TARGET_EINVAL
;
10795 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1)) {
10796 return -TARGET_EFAULT
;
10798 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10799 unlock_user_struct(target_schp
, arg2
, 0);
10800 return get_errno(sys_sched_setparam(arg1
, &schp
));
10802 case TARGET_NR_sched_getparam
:
10804 struct target_sched_param
*target_schp
;
10805 struct sched_param schp
;
10808 return -TARGET_EINVAL
;
10810 ret
= get_errno(sys_sched_getparam(arg1
, &schp
));
10811 if (!is_error(ret
)) {
10812 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0)) {
10813 return -TARGET_EFAULT
;
10815 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10816 unlock_user_struct(target_schp
, arg2
, 1);
10820 case TARGET_NR_sched_setscheduler
:
10822 struct target_sched_param
*target_schp
;
10823 struct sched_param schp
;
10825 return -TARGET_EINVAL
;
10827 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1)) {
10828 return -TARGET_EFAULT
;
10830 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10831 unlock_user_struct(target_schp
, arg3
, 0);
10832 return get_errno(sys_sched_setscheduler(arg1
, arg2
, &schp
));
10834 case TARGET_NR_sched_getscheduler
:
10835 return get_errno(sys_sched_getscheduler(arg1
));
10836 case TARGET_NR_sched_getattr
:
10838 struct target_sched_attr
*target_scha
;
10839 struct sched_attr scha
;
10841 return -TARGET_EINVAL
;
10843 if (arg3
> sizeof(scha
)) {
10844 arg3
= sizeof(scha
);
10846 ret
= get_errno(sys_sched_getattr(arg1
, &scha
, arg3
, arg4
));
10847 if (!is_error(ret
)) {
10848 target_scha
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10849 if (!target_scha
) {
10850 return -TARGET_EFAULT
;
10852 target_scha
->size
= tswap32(scha
.size
);
10853 target_scha
->sched_policy
= tswap32(scha
.sched_policy
);
10854 target_scha
->sched_flags
= tswap64(scha
.sched_flags
);
10855 target_scha
->sched_nice
= tswap32(scha
.sched_nice
);
10856 target_scha
->sched_priority
= tswap32(scha
.sched_priority
);
10857 target_scha
->sched_runtime
= tswap64(scha
.sched_runtime
);
10858 target_scha
->sched_deadline
= tswap64(scha
.sched_deadline
);
10859 target_scha
->sched_period
= tswap64(scha
.sched_period
);
10860 if (scha
.size
> offsetof(struct sched_attr
, sched_util_min
)) {
10861 target_scha
->sched_util_min
= tswap32(scha
.sched_util_min
);
10862 target_scha
->sched_util_max
= tswap32(scha
.sched_util_max
);
10864 unlock_user(target_scha
, arg2
, arg3
);
10868 case TARGET_NR_sched_setattr
:
10870 struct target_sched_attr
*target_scha
;
10871 struct sched_attr scha
;
10875 return -TARGET_EINVAL
;
10877 if (get_user_u32(size
, arg2
)) {
10878 return -TARGET_EFAULT
;
10881 size
= offsetof(struct target_sched_attr
, sched_util_min
);
10883 if (size
< offsetof(struct target_sched_attr
, sched_util_min
)) {
10884 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
10885 return -TARGET_EFAULT
;
10887 return -TARGET_E2BIG
;
10890 zeroed
= check_zeroed_user(arg2
, sizeof(struct target_sched_attr
), size
);
10893 } else if (zeroed
== 0) {
10894 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
10895 return -TARGET_EFAULT
;
10897 return -TARGET_E2BIG
;
10899 if (size
> sizeof(struct target_sched_attr
)) {
10900 size
= sizeof(struct target_sched_attr
);
10903 target_scha
= lock_user(VERIFY_READ
, arg2
, size
, 1);
10904 if (!target_scha
) {
10905 return -TARGET_EFAULT
;
10908 scha
.sched_policy
= tswap32(target_scha
->sched_policy
);
10909 scha
.sched_flags
= tswap64(target_scha
->sched_flags
);
10910 scha
.sched_nice
= tswap32(target_scha
->sched_nice
);
10911 scha
.sched_priority
= tswap32(target_scha
->sched_priority
);
10912 scha
.sched_runtime
= tswap64(target_scha
->sched_runtime
);
10913 scha
.sched_deadline
= tswap64(target_scha
->sched_deadline
);
10914 scha
.sched_period
= tswap64(target_scha
->sched_period
);
10915 if (size
> offsetof(struct target_sched_attr
, sched_util_min
)) {
10916 scha
.sched_util_min
= tswap32(target_scha
->sched_util_min
);
10917 scha
.sched_util_max
= tswap32(target_scha
->sched_util_max
);
10919 unlock_user(target_scha
, arg2
, 0);
10920 return get_errno(sys_sched_setattr(arg1
, &scha
, arg3
));
10922 case TARGET_NR_sched_yield
:
10923 return get_errno(sched_yield());
10924 case TARGET_NR_sched_get_priority_max
:
10925 return get_errno(sched_get_priority_max(arg1
));
10926 case TARGET_NR_sched_get_priority_min
:
10927 return get_errno(sched_get_priority_min(arg1
));
10928 #ifdef TARGET_NR_sched_rr_get_interval
10929 case TARGET_NR_sched_rr_get_interval
:
10931 struct timespec ts
;
10932 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10933 if (!is_error(ret
)) {
10934 ret
= host_to_target_timespec(arg2
, &ts
);
10939 #ifdef TARGET_NR_sched_rr_get_interval_time64
10940 case TARGET_NR_sched_rr_get_interval_time64
:
10942 struct timespec ts
;
10943 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10944 if (!is_error(ret
)) {
10945 ret
= host_to_target_timespec64(arg2
, &ts
);
10950 #if defined(TARGET_NR_nanosleep)
10951 case TARGET_NR_nanosleep
:
10953 struct timespec req
, rem
;
10954 target_to_host_timespec(&req
, arg1
);
10955 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10956 if (is_error(ret
) && arg2
) {
10957 host_to_target_timespec(arg2
, &rem
);
10962 case TARGET_NR_prctl
:
10963 return do_prctl(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
);
10965 #ifdef TARGET_NR_arch_prctl
10966 case TARGET_NR_arch_prctl
:
10967 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10969 #ifdef TARGET_NR_pread64
10970 case TARGET_NR_pread64
:
10971 if (regpairs_aligned(cpu_env
, num
)) {
10975 if (arg2
== 0 && arg3
== 0) {
10976 /* Special-case NULL buffer and zero length, which should succeed */
10979 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10981 return -TARGET_EFAULT
;
10984 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10985 unlock_user(p
, arg2
, ret
);
10987 case TARGET_NR_pwrite64
:
10988 if (regpairs_aligned(cpu_env
, num
)) {
10992 if (arg2
== 0 && arg3
== 0) {
10993 /* Special-case NULL buffer and zero length, which should succeed */
10996 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10998 return -TARGET_EFAULT
;
11001 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11002 unlock_user(p
, arg2
, 0);
11005 case TARGET_NR_getcwd
:
11006 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11007 return -TARGET_EFAULT
;
11008 ret
= get_errno(sys_getcwd1(p
, arg2
));
11009 unlock_user(p
, arg1
, ret
);
11011 case TARGET_NR_capget
:
11012 case TARGET_NR_capset
:
11014 struct target_user_cap_header
*target_header
;
11015 struct target_user_cap_data
*target_data
= NULL
;
11016 struct __user_cap_header_struct header
;
11017 struct __user_cap_data_struct data
[2];
11018 struct __user_cap_data_struct
*dataptr
= NULL
;
11019 int i
, target_datalen
;
11020 int data_items
= 1;
11022 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11023 return -TARGET_EFAULT
;
11025 header
.version
= tswap32(target_header
->version
);
11026 header
.pid
= tswap32(target_header
->pid
);
11028 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11029 /* Version 2 and up takes pointer to two user_data structs */
11033 target_datalen
= sizeof(*target_data
) * data_items
;
11036 if (num
== TARGET_NR_capget
) {
11037 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11039 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11041 if (!target_data
) {
11042 unlock_user_struct(target_header
, arg1
, 0);
11043 return -TARGET_EFAULT
;
11046 if (num
== TARGET_NR_capset
) {
11047 for (i
= 0; i
< data_items
; i
++) {
11048 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11049 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11050 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11057 if (num
== TARGET_NR_capget
) {
11058 ret
= get_errno(capget(&header
, dataptr
));
11060 ret
= get_errno(capset(&header
, dataptr
));
11063 /* The kernel always updates version for both capget and capset */
11064 target_header
->version
= tswap32(header
.version
);
11065 unlock_user_struct(target_header
, arg1
, 1);
11068 if (num
== TARGET_NR_capget
) {
11069 for (i
= 0; i
< data_items
; i
++) {
11070 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11071 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11072 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11074 unlock_user(target_data
, arg2
, target_datalen
);
11076 unlock_user(target_data
, arg2
, 0);
11081 case TARGET_NR_sigaltstack
:
11082 return do_sigaltstack(arg1
, arg2
, cpu_env
);
11084 #ifdef CONFIG_SENDFILE
11085 #ifdef TARGET_NR_sendfile
11086 case TARGET_NR_sendfile
:
11088 off_t
*offp
= NULL
;
11091 ret
= get_user_sal(off
, arg3
);
11092 if (is_error(ret
)) {
11097 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11098 if (!is_error(ret
) && arg3
) {
11099 abi_long ret2
= put_user_sal(off
, arg3
);
11100 if (is_error(ret2
)) {
11107 #ifdef TARGET_NR_sendfile64
11108 case TARGET_NR_sendfile64
:
11110 off_t
*offp
= NULL
;
11113 ret
= get_user_s64(off
, arg3
);
11114 if (is_error(ret
)) {
11119 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11120 if (!is_error(ret
) && arg3
) {
11121 abi_long ret2
= put_user_s64(off
, arg3
);
11122 if (is_error(ret2
)) {
11130 #ifdef TARGET_NR_vfork
11131 case TARGET_NR_vfork
:
11132 return get_errno(do_fork(cpu_env
,
11133 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11136 #ifdef TARGET_NR_ugetrlimit
11137 case TARGET_NR_ugetrlimit
:
11139 struct rlimit rlim
;
11140 int resource
= target_to_host_resource(arg1
);
11141 ret
= get_errno(getrlimit(resource
, &rlim
));
11142 if (!is_error(ret
)) {
11143 struct target_rlimit
*target_rlim
;
11144 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11145 return -TARGET_EFAULT
;
11146 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11147 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11148 unlock_user_struct(target_rlim
, arg2
, 1);
11153 #ifdef TARGET_NR_truncate64
11154 case TARGET_NR_truncate64
:
11155 if (!(p
= lock_user_string(arg1
)))
11156 return -TARGET_EFAULT
;
11157 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11158 unlock_user(p
, arg1
, 0);
11161 #ifdef TARGET_NR_ftruncate64
11162 case TARGET_NR_ftruncate64
:
11163 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11165 #ifdef TARGET_NR_stat64
11166 case TARGET_NR_stat64
:
11167 if (!(p
= lock_user_string(arg1
))) {
11168 return -TARGET_EFAULT
;
11170 ret
= get_errno(stat(path(p
), &st
));
11171 unlock_user(p
, arg1
, 0);
11172 if (!is_error(ret
))
11173 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11176 #ifdef TARGET_NR_lstat64
11177 case TARGET_NR_lstat64
:
11178 if (!(p
= lock_user_string(arg1
))) {
11179 return -TARGET_EFAULT
;
11181 ret
= get_errno(lstat(path(p
), &st
));
11182 unlock_user(p
, arg1
, 0);
11183 if (!is_error(ret
))
11184 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11187 #ifdef TARGET_NR_fstat64
11188 case TARGET_NR_fstat64
:
11189 ret
= get_errno(fstat(arg1
, &st
));
11190 if (!is_error(ret
))
11191 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11194 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11195 #ifdef TARGET_NR_fstatat64
11196 case TARGET_NR_fstatat64
:
11198 #ifdef TARGET_NR_newfstatat
11199 case TARGET_NR_newfstatat
:
11201 if (!(p
= lock_user_string(arg2
))) {
11202 return -TARGET_EFAULT
;
11204 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11205 unlock_user(p
, arg2
, 0);
11206 if (!is_error(ret
))
11207 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11210 #if defined(TARGET_NR_statx)
11211 case TARGET_NR_statx
:
11213 struct target_statx
*target_stx
;
11217 p
= lock_user_string(arg2
);
11219 return -TARGET_EFAULT
;
11221 #if defined(__NR_statx)
11224 * It is assumed that struct statx is architecture independent.
11226 struct target_statx host_stx
;
11229 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11230 if (!is_error(ret
)) {
11231 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11232 unlock_user(p
, arg2
, 0);
11233 return -TARGET_EFAULT
;
11237 if (ret
!= -TARGET_ENOSYS
) {
11238 unlock_user(p
, arg2
, 0);
11243 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11244 unlock_user(p
, arg2
, 0);
11246 if (!is_error(ret
)) {
11247 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11248 return -TARGET_EFAULT
;
11250 memset(target_stx
, 0, sizeof(*target_stx
));
11251 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11252 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11253 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11254 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11255 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11256 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11257 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11258 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11259 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11260 __put_user(st
.st_size
, &target_stx
->stx_size
);
11261 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11262 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11263 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11264 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11265 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11266 unlock_user_struct(target_stx
, arg5
, 1);
11271 #ifdef TARGET_NR_lchown
11272 case TARGET_NR_lchown
:
11273 if (!(p
= lock_user_string(arg1
)))
11274 return -TARGET_EFAULT
;
11275 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11276 unlock_user(p
, arg1
, 0);
11279 #ifdef TARGET_NR_getuid
11280 case TARGET_NR_getuid
:
11281 return get_errno(high2lowuid(getuid()));
11283 #ifdef TARGET_NR_getgid
11284 case TARGET_NR_getgid
:
11285 return get_errno(high2lowgid(getgid()));
11287 #ifdef TARGET_NR_geteuid
11288 case TARGET_NR_geteuid
:
11289 return get_errno(high2lowuid(geteuid()));
11291 #ifdef TARGET_NR_getegid
11292 case TARGET_NR_getegid
:
11293 return get_errno(high2lowgid(getegid()));
11295 case TARGET_NR_setreuid
:
11296 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11297 case TARGET_NR_setregid
:
11298 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11299 case TARGET_NR_getgroups
:
11301 int gidsetsize
= arg1
;
11302 target_id
*target_grouplist
;
11306 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11307 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11308 if (gidsetsize
== 0)
11310 if (!is_error(ret
)) {
11311 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11312 if (!target_grouplist
)
11313 return -TARGET_EFAULT
;
11314 for(i
= 0;i
< ret
; i
++)
11315 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11316 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11320 case TARGET_NR_setgroups
:
11322 int gidsetsize
= arg1
;
11323 target_id
*target_grouplist
;
11324 gid_t
*grouplist
= NULL
;
11327 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11328 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11329 if (!target_grouplist
) {
11330 return -TARGET_EFAULT
;
11332 for (i
= 0; i
< gidsetsize
; i
++) {
11333 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11335 unlock_user(target_grouplist
, arg2
, 0);
11337 return get_errno(setgroups(gidsetsize
, grouplist
));
11339 case TARGET_NR_fchown
:
11340 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11341 #if defined(TARGET_NR_fchownat)
11342 case TARGET_NR_fchownat
:
11343 if (!(p
= lock_user_string(arg2
)))
11344 return -TARGET_EFAULT
;
11345 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11346 low2highgid(arg4
), arg5
));
11347 unlock_user(p
, arg2
, 0);
11350 #ifdef TARGET_NR_setresuid
11351 case TARGET_NR_setresuid
:
11352 return get_errno(sys_setresuid(low2highuid(arg1
),
11354 low2highuid(arg3
)));
11356 #ifdef TARGET_NR_getresuid
11357 case TARGET_NR_getresuid
:
11359 uid_t ruid
, euid
, suid
;
11360 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11361 if (!is_error(ret
)) {
11362 if (put_user_id(high2lowuid(ruid
), arg1
)
11363 || put_user_id(high2lowuid(euid
), arg2
)
11364 || put_user_id(high2lowuid(suid
), arg3
))
11365 return -TARGET_EFAULT
;
11370 #ifdef TARGET_NR_getresgid
11371 case TARGET_NR_setresgid
:
11372 return get_errno(sys_setresgid(low2highgid(arg1
),
11374 low2highgid(arg3
)));
11376 #ifdef TARGET_NR_getresgid
11377 case TARGET_NR_getresgid
:
11379 gid_t rgid
, egid
, sgid
;
11380 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11381 if (!is_error(ret
)) {
11382 if (put_user_id(high2lowgid(rgid
), arg1
)
11383 || put_user_id(high2lowgid(egid
), arg2
)
11384 || put_user_id(high2lowgid(sgid
), arg3
))
11385 return -TARGET_EFAULT
;
11390 #ifdef TARGET_NR_chown
11391 case TARGET_NR_chown
:
11392 if (!(p
= lock_user_string(arg1
)))
11393 return -TARGET_EFAULT
;
11394 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11395 unlock_user(p
, arg1
, 0);
11398 case TARGET_NR_setuid
:
11399 return get_errno(sys_setuid(low2highuid(arg1
)));
11400 case TARGET_NR_setgid
:
11401 return get_errno(sys_setgid(low2highgid(arg1
)));
11402 case TARGET_NR_setfsuid
:
11403 return get_errno(setfsuid(arg1
));
11404 case TARGET_NR_setfsgid
:
11405 return get_errno(setfsgid(arg1
));
11407 #ifdef TARGET_NR_lchown32
11408 case TARGET_NR_lchown32
:
11409 if (!(p
= lock_user_string(arg1
)))
11410 return -TARGET_EFAULT
;
11411 ret
= get_errno(lchown(p
, arg2
, arg3
));
11412 unlock_user(p
, arg1
, 0);
11415 #ifdef TARGET_NR_getuid32
11416 case TARGET_NR_getuid32
:
11417 return get_errno(getuid());
11420 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11421 /* Alpha specific */
11422 case TARGET_NR_getxuid
:
11426 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11428 return get_errno(getuid());
11430 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11431 /* Alpha specific */
11432 case TARGET_NR_getxgid
:
11436 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11438 return get_errno(getgid());
11440 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11441 /* Alpha specific */
11442 case TARGET_NR_osf_getsysinfo
:
11443 ret
= -TARGET_EOPNOTSUPP
;
11445 case TARGET_GSI_IEEE_FP_CONTROL
:
11447 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11448 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11450 swcr
&= ~SWCR_STATUS_MASK
;
11451 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11453 if (put_user_u64 (swcr
, arg2
))
11454 return -TARGET_EFAULT
;
11459 /* case GSI_IEEE_STATE_AT_SIGNAL:
11460 -- Not implemented in linux kernel.
11462 -- Retrieves current unaligned access state; not much used.
11463 case GSI_PROC_TYPE:
11464 -- Retrieves implver information; surely not used.
11465 case GSI_GET_HWRPB:
11466 -- Grabs a copy of the HWRPB; surely not used.
11471 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11472 /* Alpha specific */
11473 case TARGET_NR_osf_setsysinfo
:
11474 ret
= -TARGET_EOPNOTSUPP
;
11476 case TARGET_SSI_IEEE_FP_CONTROL
:
11478 uint64_t swcr
, fpcr
;
11480 if (get_user_u64 (swcr
, arg2
)) {
11481 return -TARGET_EFAULT
;
11485 * The kernel calls swcr_update_status to update the
11486 * status bits from the fpcr at every point that it
11487 * could be queried. Therefore, we store the status
11488 * bits only in FPCR.
11490 ((CPUAlphaState
*)cpu_env
)->swcr
11491 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11493 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11494 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11495 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11496 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11501 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11503 uint64_t exc
, fpcr
, fex
;
11505 if (get_user_u64(exc
, arg2
)) {
11506 return -TARGET_EFAULT
;
11508 exc
&= SWCR_STATUS_MASK
;
11509 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11511 /* Old exceptions are not signaled. */
11512 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11514 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11515 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11517 /* Update the hardware fpcr. */
11518 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11519 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11522 int si_code
= TARGET_FPE_FLTUNK
;
11523 target_siginfo_t info
;
11525 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11526 si_code
= TARGET_FPE_FLTUND
;
11528 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11529 si_code
= TARGET_FPE_FLTRES
;
11531 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11532 si_code
= TARGET_FPE_FLTUND
;
11534 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11535 si_code
= TARGET_FPE_FLTOVF
;
11537 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11538 si_code
= TARGET_FPE_FLTDIV
;
11540 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11541 si_code
= TARGET_FPE_FLTINV
;
11544 info
.si_signo
= SIGFPE
;
11546 info
.si_code
= si_code
;
11547 info
._sifields
._sigfault
._addr
11548 = ((CPUArchState
*)cpu_env
)->pc
;
11549 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11550 QEMU_SI_FAULT
, &info
);
11556 /* case SSI_NVPAIRS:
11557 -- Used with SSIN_UACPROC to enable unaligned accesses.
11558 case SSI_IEEE_STATE_AT_SIGNAL:
11559 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11560 -- Not implemented in linux kernel
11565 #ifdef TARGET_NR_osf_sigprocmask
11566 /* Alpha specific. */
11567 case TARGET_NR_osf_sigprocmask
:
11571 sigset_t set
, oldset
;
11574 case TARGET_SIG_BLOCK
:
11577 case TARGET_SIG_UNBLOCK
:
11580 case TARGET_SIG_SETMASK
:
11584 return -TARGET_EINVAL
;
11587 target_to_host_old_sigset(&set
, &mask
);
11588 ret
= do_sigprocmask(how
, &set
, &oldset
);
11590 host_to_target_old_sigset(&mask
, &oldset
);
11597 #ifdef TARGET_NR_getgid32
11598 case TARGET_NR_getgid32
:
11599 return get_errno(getgid());
11601 #ifdef TARGET_NR_geteuid32
11602 case TARGET_NR_geteuid32
:
11603 return get_errno(geteuid());
11605 #ifdef TARGET_NR_getegid32
11606 case TARGET_NR_getegid32
:
11607 return get_errno(getegid());
11609 #ifdef TARGET_NR_setreuid32
11610 case TARGET_NR_setreuid32
:
11611 return get_errno(setreuid(arg1
, arg2
));
11613 #ifdef TARGET_NR_setregid32
11614 case TARGET_NR_setregid32
:
11615 return get_errno(setregid(arg1
, arg2
));
11617 #ifdef TARGET_NR_getgroups32
11618 case TARGET_NR_getgroups32
:
11620 int gidsetsize
= arg1
;
11621 uint32_t *target_grouplist
;
11625 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11626 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11627 if (gidsetsize
== 0)
11629 if (!is_error(ret
)) {
11630 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11631 if (!target_grouplist
) {
11632 return -TARGET_EFAULT
;
11634 for(i
= 0;i
< ret
; i
++)
11635 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11636 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11641 #ifdef TARGET_NR_setgroups32
11642 case TARGET_NR_setgroups32
:
11644 int gidsetsize
= arg1
;
11645 uint32_t *target_grouplist
;
11649 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11650 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11651 if (!target_grouplist
) {
11652 return -TARGET_EFAULT
;
11654 for(i
= 0;i
< gidsetsize
; i
++)
11655 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11656 unlock_user(target_grouplist
, arg2
, 0);
11657 return get_errno(setgroups(gidsetsize
, grouplist
));
11660 #ifdef TARGET_NR_fchown32
11661 case TARGET_NR_fchown32
:
11662 return get_errno(fchown(arg1
, arg2
, arg3
));
11664 #ifdef TARGET_NR_setresuid32
11665 case TARGET_NR_setresuid32
:
11666 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11668 #ifdef TARGET_NR_getresuid32
11669 case TARGET_NR_getresuid32
:
11671 uid_t ruid
, euid
, suid
;
11672 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11673 if (!is_error(ret
)) {
11674 if (put_user_u32(ruid
, arg1
)
11675 || put_user_u32(euid
, arg2
)
11676 || put_user_u32(suid
, arg3
))
11677 return -TARGET_EFAULT
;
11682 #ifdef TARGET_NR_setresgid32
11683 case TARGET_NR_setresgid32
:
11684 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11686 #ifdef TARGET_NR_getresgid32
11687 case TARGET_NR_getresgid32
:
11689 gid_t rgid
, egid
, sgid
;
11690 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11691 if (!is_error(ret
)) {
11692 if (put_user_u32(rgid
, arg1
)
11693 || put_user_u32(egid
, arg2
)
11694 || put_user_u32(sgid
, arg3
))
11695 return -TARGET_EFAULT
;
11700 #ifdef TARGET_NR_chown32
11701 case TARGET_NR_chown32
:
11702 if (!(p
= lock_user_string(arg1
)))
11703 return -TARGET_EFAULT
;
11704 ret
= get_errno(chown(p
, arg2
, arg3
));
11705 unlock_user(p
, arg1
, 0);
11708 #ifdef TARGET_NR_setuid32
11709 case TARGET_NR_setuid32
:
11710 return get_errno(sys_setuid(arg1
));
11712 #ifdef TARGET_NR_setgid32
11713 case TARGET_NR_setgid32
:
11714 return get_errno(sys_setgid(arg1
));
11716 #ifdef TARGET_NR_setfsuid32
11717 case TARGET_NR_setfsuid32
:
11718 return get_errno(setfsuid(arg1
));
11720 #ifdef TARGET_NR_setfsgid32
11721 case TARGET_NR_setfsgid32
:
11722 return get_errno(setfsgid(arg1
));
11724 #ifdef TARGET_NR_mincore
11725 case TARGET_NR_mincore
:
11727 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11729 return -TARGET_ENOMEM
;
11731 p
= lock_user_string(arg3
);
11733 ret
= -TARGET_EFAULT
;
11735 ret
= get_errno(mincore(a
, arg2
, p
));
11736 unlock_user(p
, arg3
, ret
);
11738 unlock_user(a
, arg1
, 0);
11742 #ifdef TARGET_NR_arm_fadvise64_64
11743 case TARGET_NR_arm_fadvise64_64
:
11744 /* arm_fadvise64_64 looks like fadvise64_64 but
11745 * with different argument order: fd, advice, offset, len
11746 * rather than the usual fd, offset, len, advice.
11747 * Note that offset and len are both 64-bit so appear as
11748 * pairs of 32-bit registers.
11750 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11751 target_offset64(arg5
, arg6
), arg2
);
11752 return -host_to_target_errno(ret
);
11755 #if TARGET_ABI_BITS == 32
11757 #ifdef TARGET_NR_fadvise64_64
11758 case TARGET_NR_fadvise64_64
:
11759 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11760 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11768 /* 6 args: fd, offset (high, low), len (high, low), advice */
11769 if (regpairs_aligned(cpu_env
, num
)) {
11770 /* offset is in (3,4), len in (5,6) and advice in 7 */
11778 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11779 target_offset64(arg4
, arg5
), arg6
);
11780 return -host_to_target_errno(ret
);
11783 #ifdef TARGET_NR_fadvise64
11784 case TARGET_NR_fadvise64
:
11785 /* 5 args: fd, offset (high, low), len, advice */
11786 if (regpairs_aligned(cpu_env
, num
)) {
11787 /* offset is in (3,4), len in 5 and advice in 6 */
11793 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11794 return -host_to_target_errno(ret
);
11797 #else /* not a 32-bit ABI */
11798 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11799 #ifdef TARGET_NR_fadvise64_64
11800 case TARGET_NR_fadvise64_64
:
11802 #ifdef TARGET_NR_fadvise64
11803 case TARGET_NR_fadvise64
:
11805 #ifdef TARGET_S390X
11807 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11808 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11809 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11810 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11814 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11816 #endif /* end of 64-bit ABI fadvise handling */
11818 #ifdef TARGET_NR_madvise
11819 case TARGET_NR_madvise
:
11820 /* A straight passthrough may not be safe because qemu sometimes
11821 turns private file-backed mappings into anonymous mappings.
11822 This will break MADV_DONTNEED.
11823 This is a hint, so ignoring and returning success is ok. */
11826 #ifdef TARGET_NR_fcntl64
11827 case TARGET_NR_fcntl64
:
11831 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11832 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11835 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11836 copyfrom
= copy_from_user_oabi_flock64
;
11837 copyto
= copy_to_user_oabi_flock64
;
11841 cmd
= target_to_host_fcntl_cmd(arg2
);
11842 if (cmd
== -TARGET_EINVAL
) {
11847 case TARGET_F_GETLK64
:
11848 ret
= copyfrom(&fl
, arg3
);
11852 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11854 ret
= copyto(arg3
, &fl
);
11858 case TARGET_F_SETLK64
:
11859 case TARGET_F_SETLKW64
:
11860 ret
= copyfrom(&fl
, arg3
);
11864 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11867 ret
= do_fcntl(arg1
, arg2
, arg3
);
11873 #ifdef TARGET_NR_cacheflush
11874 case TARGET_NR_cacheflush
:
11875 /* self-modifying code is handled automatically, so nothing needed */
11878 #ifdef TARGET_NR_getpagesize
11879 case TARGET_NR_getpagesize
:
11880 return TARGET_PAGE_SIZE
;
11882 case TARGET_NR_gettid
:
11883 return get_errno(sys_gettid());
11884 #ifdef TARGET_NR_readahead
11885 case TARGET_NR_readahead
:
11886 #if TARGET_ABI_BITS == 32
11887 if (regpairs_aligned(cpu_env
, num
)) {
11892 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11894 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11899 #ifdef TARGET_NR_setxattr
11900 case TARGET_NR_listxattr
:
11901 case TARGET_NR_llistxattr
:
11905 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11907 return -TARGET_EFAULT
;
11910 p
= lock_user_string(arg1
);
11912 if (num
== TARGET_NR_listxattr
) {
11913 ret
= get_errno(listxattr(p
, b
, arg3
));
11915 ret
= get_errno(llistxattr(p
, b
, arg3
));
11918 ret
= -TARGET_EFAULT
;
11920 unlock_user(p
, arg1
, 0);
11921 unlock_user(b
, arg2
, arg3
);
11924 case TARGET_NR_flistxattr
:
11928 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11930 return -TARGET_EFAULT
;
11933 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11934 unlock_user(b
, arg2
, arg3
);
11937 case TARGET_NR_setxattr
:
11938 case TARGET_NR_lsetxattr
:
11940 void *p
, *n
, *v
= 0;
11942 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11944 return -TARGET_EFAULT
;
11947 p
= lock_user_string(arg1
);
11948 n
= lock_user_string(arg2
);
11950 if (num
== TARGET_NR_setxattr
) {
11951 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11953 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11956 ret
= -TARGET_EFAULT
;
11958 unlock_user(p
, arg1
, 0);
11959 unlock_user(n
, arg2
, 0);
11960 unlock_user(v
, arg3
, 0);
11963 case TARGET_NR_fsetxattr
:
11967 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11969 return -TARGET_EFAULT
;
11972 n
= lock_user_string(arg2
);
11974 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11976 ret
= -TARGET_EFAULT
;
11978 unlock_user(n
, arg2
, 0);
11979 unlock_user(v
, arg3
, 0);
11982 case TARGET_NR_getxattr
:
11983 case TARGET_NR_lgetxattr
:
11985 void *p
, *n
, *v
= 0;
11987 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11989 return -TARGET_EFAULT
;
11992 p
= lock_user_string(arg1
);
11993 n
= lock_user_string(arg2
);
11995 if (num
== TARGET_NR_getxattr
) {
11996 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11998 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
12001 ret
= -TARGET_EFAULT
;
12003 unlock_user(p
, arg1
, 0);
12004 unlock_user(n
, arg2
, 0);
12005 unlock_user(v
, arg3
, arg4
);
12008 case TARGET_NR_fgetxattr
:
12012 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12014 return -TARGET_EFAULT
;
12017 n
= lock_user_string(arg2
);
12019 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12021 ret
= -TARGET_EFAULT
;
12023 unlock_user(n
, arg2
, 0);
12024 unlock_user(v
, arg3
, arg4
);
12027 case TARGET_NR_removexattr
:
12028 case TARGET_NR_lremovexattr
:
12031 p
= lock_user_string(arg1
);
12032 n
= lock_user_string(arg2
);
12034 if (num
== TARGET_NR_removexattr
) {
12035 ret
= get_errno(removexattr(p
, n
));
12037 ret
= get_errno(lremovexattr(p
, n
));
12040 ret
= -TARGET_EFAULT
;
12042 unlock_user(p
, arg1
, 0);
12043 unlock_user(n
, arg2
, 0);
12046 case TARGET_NR_fremovexattr
:
12049 n
= lock_user_string(arg2
);
12051 ret
= get_errno(fremovexattr(arg1
, n
));
12053 ret
= -TARGET_EFAULT
;
12055 unlock_user(n
, arg2
, 0);
12059 #endif /* CONFIG_ATTR */
12060 #ifdef TARGET_NR_set_thread_area
12061 case TARGET_NR_set_thread_area
:
12062 #if defined(TARGET_MIPS)
12063 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
12065 #elif defined(TARGET_CRIS)
12067 ret
= -TARGET_EINVAL
;
12069 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
12073 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12074 return do_set_thread_area(cpu_env
, arg1
);
12075 #elif defined(TARGET_M68K)
12077 TaskState
*ts
= cpu
->opaque
;
12078 ts
->tp_value
= arg1
;
12082 return -TARGET_ENOSYS
;
12085 #ifdef TARGET_NR_get_thread_area
12086 case TARGET_NR_get_thread_area
:
12087 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12088 return do_get_thread_area(cpu_env
, arg1
);
12089 #elif defined(TARGET_M68K)
12091 TaskState
*ts
= cpu
->opaque
;
12092 return ts
->tp_value
;
12095 return -TARGET_ENOSYS
;
12098 #ifdef TARGET_NR_getdomainname
12099 case TARGET_NR_getdomainname
:
12100 return -TARGET_ENOSYS
;
12103 #ifdef TARGET_NR_clock_settime
12104 case TARGET_NR_clock_settime
:
12106 struct timespec ts
;
12108 ret
= target_to_host_timespec(&ts
, arg2
);
12109 if (!is_error(ret
)) {
12110 ret
= get_errno(clock_settime(arg1
, &ts
));
12115 #ifdef TARGET_NR_clock_settime64
12116 case TARGET_NR_clock_settime64
:
12118 struct timespec ts
;
12120 ret
= target_to_host_timespec64(&ts
, arg2
);
12121 if (!is_error(ret
)) {
12122 ret
= get_errno(clock_settime(arg1
, &ts
));
12127 #ifdef TARGET_NR_clock_gettime
12128 case TARGET_NR_clock_gettime
:
12130 struct timespec ts
;
12131 ret
= get_errno(clock_gettime(arg1
, &ts
));
12132 if (!is_error(ret
)) {
12133 ret
= host_to_target_timespec(arg2
, &ts
);
12138 #ifdef TARGET_NR_clock_gettime64
12139 case TARGET_NR_clock_gettime64
:
12141 struct timespec ts
;
12142 ret
= get_errno(clock_gettime(arg1
, &ts
));
12143 if (!is_error(ret
)) {
12144 ret
= host_to_target_timespec64(arg2
, &ts
);
12149 #ifdef TARGET_NR_clock_getres
12150 case TARGET_NR_clock_getres
:
12152 struct timespec ts
;
12153 ret
= get_errno(clock_getres(arg1
, &ts
));
12154 if (!is_error(ret
)) {
12155 host_to_target_timespec(arg2
, &ts
);
12160 #ifdef TARGET_NR_clock_getres_time64
12161 case TARGET_NR_clock_getres_time64
:
12163 struct timespec ts
;
12164 ret
= get_errno(clock_getres(arg1
, &ts
));
12165 if (!is_error(ret
)) {
12166 host_to_target_timespec64(arg2
, &ts
);
12171 #ifdef TARGET_NR_clock_nanosleep
12172 case TARGET_NR_clock_nanosleep
:
12174 struct timespec ts
;
12175 if (target_to_host_timespec(&ts
, arg3
)) {
12176 return -TARGET_EFAULT
;
12178 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12179 &ts
, arg4
? &ts
: NULL
));
12181 * if the call is interrupted by a signal handler, it fails
12182 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12183 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12185 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12186 host_to_target_timespec(arg4
, &ts
)) {
12187 return -TARGET_EFAULT
;
12193 #ifdef TARGET_NR_clock_nanosleep_time64
12194 case TARGET_NR_clock_nanosleep_time64
:
12196 struct timespec ts
;
12198 if (target_to_host_timespec64(&ts
, arg3
)) {
12199 return -TARGET_EFAULT
;
12202 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12203 &ts
, arg4
? &ts
: NULL
));
12205 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12206 host_to_target_timespec64(arg4
, &ts
)) {
12207 return -TARGET_EFAULT
;
12213 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12214 case TARGET_NR_set_tid_address
:
12215 return get_errno(set_tid_address((int *)g2h(cpu
, arg1
)));
12218 case TARGET_NR_tkill
:
12219 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12221 case TARGET_NR_tgkill
:
12222 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12223 target_to_host_signal(arg3
)));
12225 #ifdef TARGET_NR_set_robust_list
12226 case TARGET_NR_set_robust_list
:
12227 case TARGET_NR_get_robust_list
:
12228 /* The ABI for supporting robust futexes has userspace pass
12229 * the kernel a pointer to a linked list which is updated by
12230 * userspace after the syscall; the list is walked by the kernel
12231 * when the thread exits. Since the linked list in QEMU guest
12232 * memory isn't a valid linked list for the host and we have
12233 * no way to reliably intercept the thread-death event, we can't
12234 * support these. Silently return ENOSYS so that guest userspace
12235 * falls back to a non-robust futex implementation (which should
12236 * be OK except in the corner case of the guest crashing while
12237 * holding a mutex that is shared with another process via
12240 return -TARGET_ENOSYS
;
12243 #if defined(TARGET_NR_utimensat)
12244 case TARGET_NR_utimensat
:
12246 struct timespec
*tsp
, ts
[2];
12250 if (target_to_host_timespec(ts
, arg3
)) {
12251 return -TARGET_EFAULT
;
12253 if (target_to_host_timespec(ts
+ 1, arg3
+
12254 sizeof(struct target_timespec
))) {
12255 return -TARGET_EFAULT
;
12260 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12262 if (!(p
= lock_user_string(arg2
))) {
12263 return -TARGET_EFAULT
;
12265 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12266 unlock_user(p
, arg2
, 0);
12271 #ifdef TARGET_NR_utimensat_time64
12272 case TARGET_NR_utimensat_time64
:
12274 struct timespec
*tsp
, ts
[2];
12278 if (target_to_host_timespec64(ts
, arg3
)) {
12279 return -TARGET_EFAULT
;
12281 if (target_to_host_timespec64(ts
+ 1, arg3
+
12282 sizeof(struct target__kernel_timespec
))) {
12283 return -TARGET_EFAULT
;
12288 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12290 p
= lock_user_string(arg2
);
12292 return -TARGET_EFAULT
;
12294 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12295 unlock_user(p
, arg2
, 0);
12300 #ifdef TARGET_NR_futex
12301 case TARGET_NR_futex
:
12302 return do_futex(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12304 #ifdef TARGET_NR_futex_time64
12305 case TARGET_NR_futex_time64
:
12306 return do_futex_time64(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12308 #ifdef CONFIG_INOTIFY
12309 #if defined(TARGET_NR_inotify_init)
12310 case TARGET_NR_inotify_init
:
12311 ret
= get_errno(inotify_init());
12313 fd_trans_register(ret
, &target_inotify_trans
);
12317 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12318 case TARGET_NR_inotify_init1
:
12319 ret
= get_errno(inotify_init1(target_to_host_bitmask(arg1
,
12320 fcntl_flags_tbl
)));
12322 fd_trans_register(ret
, &target_inotify_trans
);
12326 #if defined(TARGET_NR_inotify_add_watch)
12327 case TARGET_NR_inotify_add_watch
:
12328 p
= lock_user_string(arg2
);
12329 ret
= get_errno(inotify_add_watch(arg1
, path(p
), arg3
));
12330 unlock_user(p
, arg2
, 0);
12333 #if defined(TARGET_NR_inotify_rm_watch)
12334 case TARGET_NR_inotify_rm_watch
:
12335 return get_errno(inotify_rm_watch(arg1
, arg2
));
12339 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12340 case TARGET_NR_mq_open
:
12342 struct mq_attr posix_mq_attr
;
12343 struct mq_attr
*pposix_mq_attr
;
12346 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12347 pposix_mq_attr
= NULL
;
12349 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12350 return -TARGET_EFAULT
;
12352 pposix_mq_attr
= &posix_mq_attr
;
12354 p
= lock_user_string(arg1
- 1);
12356 return -TARGET_EFAULT
;
12358 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12359 unlock_user (p
, arg1
, 0);
12363 case TARGET_NR_mq_unlink
:
12364 p
= lock_user_string(arg1
- 1);
12366 return -TARGET_EFAULT
;
12368 ret
= get_errno(mq_unlink(p
));
12369 unlock_user (p
, arg1
, 0);
12372 #ifdef TARGET_NR_mq_timedsend
12373 case TARGET_NR_mq_timedsend
:
12375 struct timespec ts
;
12377 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12379 if (target_to_host_timespec(&ts
, arg5
)) {
12380 return -TARGET_EFAULT
;
12382 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12383 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12384 return -TARGET_EFAULT
;
12387 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12389 unlock_user (p
, arg2
, arg3
);
12393 #ifdef TARGET_NR_mq_timedsend_time64
12394 case TARGET_NR_mq_timedsend_time64
:
12396 struct timespec ts
;
12398 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12400 if (target_to_host_timespec64(&ts
, arg5
)) {
12401 return -TARGET_EFAULT
;
12403 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12404 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12405 return -TARGET_EFAULT
;
12408 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12410 unlock_user(p
, arg2
, arg3
);
12415 #ifdef TARGET_NR_mq_timedreceive
12416 case TARGET_NR_mq_timedreceive
:
12418 struct timespec ts
;
12421 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12423 if (target_to_host_timespec(&ts
, arg5
)) {
12424 return -TARGET_EFAULT
;
12426 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12428 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12429 return -TARGET_EFAULT
;
12432 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12435 unlock_user (p
, arg2
, arg3
);
12437 put_user_u32(prio
, arg4
);
12441 #ifdef TARGET_NR_mq_timedreceive_time64
12442 case TARGET_NR_mq_timedreceive_time64
:
12444 struct timespec ts
;
12447 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12449 if (target_to_host_timespec64(&ts
, arg5
)) {
12450 return -TARGET_EFAULT
;
12452 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12454 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12455 return -TARGET_EFAULT
;
12458 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12461 unlock_user(p
, arg2
, arg3
);
12463 put_user_u32(prio
, arg4
);
12469 /* Not implemented for now... */
12470 /* case TARGET_NR_mq_notify: */
12473 case TARGET_NR_mq_getsetattr
:
12475 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12478 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12479 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12480 &posix_mq_attr_out
));
12481 } else if (arg3
!= 0) {
12482 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12484 if (ret
== 0 && arg3
!= 0) {
12485 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12491 #ifdef CONFIG_SPLICE
12492 #ifdef TARGET_NR_tee
12493 case TARGET_NR_tee
:
12495 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12499 #ifdef TARGET_NR_splice
12500 case TARGET_NR_splice
:
12502 loff_t loff_in
, loff_out
;
12503 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12505 if (get_user_u64(loff_in
, arg2
)) {
12506 return -TARGET_EFAULT
;
12508 ploff_in
= &loff_in
;
12511 if (get_user_u64(loff_out
, arg4
)) {
12512 return -TARGET_EFAULT
;
12514 ploff_out
= &loff_out
;
12516 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12518 if (put_user_u64(loff_in
, arg2
)) {
12519 return -TARGET_EFAULT
;
12523 if (put_user_u64(loff_out
, arg4
)) {
12524 return -TARGET_EFAULT
;
12530 #ifdef TARGET_NR_vmsplice
12531 case TARGET_NR_vmsplice
:
12533 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12535 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12536 unlock_iovec(vec
, arg2
, arg3
, 0);
12538 ret
= -host_to_target_errno(errno
);
12543 #endif /* CONFIG_SPLICE */
12544 #ifdef CONFIG_EVENTFD
12545 #if defined(TARGET_NR_eventfd)
12546 case TARGET_NR_eventfd
:
12547 ret
= get_errno(eventfd(arg1
, 0));
12549 fd_trans_register(ret
, &target_eventfd_trans
);
12553 #if defined(TARGET_NR_eventfd2)
12554 case TARGET_NR_eventfd2
:
12556 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
12557 if (arg2
& TARGET_O_NONBLOCK
) {
12558 host_flags
|= O_NONBLOCK
;
12560 if (arg2
& TARGET_O_CLOEXEC
) {
12561 host_flags
|= O_CLOEXEC
;
12563 ret
= get_errno(eventfd(arg1
, host_flags
));
12565 fd_trans_register(ret
, &target_eventfd_trans
);
12570 #endif /* CONFIG_EVENTFD */
12571 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12572 case TARGET_NR_fallocate
:
12573 #if TARGET_ABI_BITS == 32
12574 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12575 target_offset64(arg5
, arg6
)));
12577 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12581 #if defined(CONFIG_SYNC_FILE_RANGE)
12582 #if defined(TARGET_NR_sync_file_range)
12583 case TARGET_NR_sync_file_range
:
12584 #if TARGET_ABI_BITS == 32
12585 #if defined(TARGET_MIPS)
12586 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12587 target_offset64(arg5
, arg6
), arg7
));
12589 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12590 target_offset64(arg4
, arg5
), arg6
));
12591 #endif /* !TARGET_MIPS */
12593 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12597 #if defined(TARGET_NR_sync_file_range2) || \
12598 defined(TARGET_NR_arm_sync_file_range)
12599 #if defined(TARGET_NR_sync_file_range2)
12600 case TARGET_NR_sync_file_range2
:
12602 #if defined(TARGET_NR_arm_sync_file_range)
12603 case TARGET_NR_arm_sync_file_range
:
12605 /* This is like sync_file_range but the arguments are reordered */
12606 #if TARGET_ABI_BITS == 32
12607 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12608 target_offset64(arg5
, arg6
), arg2
));
12610 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12615 #if defined(TARGET_NR_signalfd4)
12616 case TARGET_NR_signalfd4
:
12617 return do_signalfd4(arg1
, arg2
, arg4
);
12619 #if defined(TARGET_NR_signalfd)
12620 case TARGET_NR_signalfd
:
12621 return do_signalfd4(arg1
, arg2
, 0);
12623 #if defined(CONFIG_EPOLL)
12624 #if defined(TARGET_NR_epoll_create)
12625 case TARGET_NR_epoll_create
:
12626 return get_errno(epoll_create(arg1
));
12628 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12629 case TARGET_NR_epoll_create1
:
12630 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12632 #if defined(TARGET_NR_epoll_ctl)
12633 case TARGET_NR_epoll_ctl
:
12635 struct epoll_event ep
;
12636 struct epoll_event
*epp
= 0;
12638 if (arg2
!= EPOLL_CTL_DEL
) {
12639 struct target_epoll_event
*target_ep
;
12640 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12641 return -TARGET_EFAULT
;
12643 ep
.events
= tswap32(target_ep
->events
);
12645 * The epoll_data_t union is just opaque data to the kernel,
12646 * so we transfer all 64 bits across and need not worry what
12647 * actual data type it is.
12649 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12650 unlock_user_struct(target_ep
, arg4
, 0);
12653 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12654 * non-null pointer, even though this argument is ignored.
12659 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12663 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12664 #if defined(TARGET_NR_epoll_wait)
12665 case TARGET_NR_epoll_wait
:
12667 #if defined(TARGET_NR_epoll_pwait)
12668 case TARGET_NR_epoll_pwait
:
12671 struct target_epoll_event
*target_ep
;
12672 struct epoll_event
*ep
;
12674 int maxevents
= arg3
;
12675 int timeout
= arg4
;
12677 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12678 return -TARGET_EINVAL
;
12681 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12682 maxevents
* sizeof(struct target_epoll_event
), 1);
12684 return -TARGET_EFAULT
;
12687 ep
= g_try_new(struct epoll_event
, maxevents
);
12689 unlock_user(target_ep
, arg2
, 0);
12690 return -TARGET_ENOMEM
;
12694 #if defined(TARGET_NR_epoll_pwait)
12695 case TARGET_NR_epoll_pwait
:
12697 sigset_t
*set
= NULL
;
12700 ret
= process_sigsuspend_mask(&set
, arg5
, arg6
);
12706 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12707 set
, SIGSET_T_SIZE
));
12710 finish_sigsuspend_mask(ret
);
12715 #if defined(TARGET_NR_epoll_wait)
12716 case TARGET_NR_epoll_wait
:
12717 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12722 ret
= -TARGET_ENOSYS
;
12724 if (!is_error(ret
)) {
12726 for (i
= 0; i
< ret
; i
++) {
12727 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12728 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12730 unlock_user(target_ep
, arg2
,
12731 ret
* sizeof(struct target_epoll_event
));
12733 unlock_user(target_ep
, arg2
, 0);
12740 #ifdef TARGET_NR_prlimit64
12741 case TARGET_NR_prlimit64
:
12743 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12744 struct target_rlimit64
*target_rnew
, *target_rold
;
12745 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12746 int resource
= target_to_host_resource(arg2
);
12748 if (arg3
&& (resource
!= RLIMIT_AS
&&
12749 resource
!= RLIMIT_DATA
&&
12750 resource
!= RLIMIT_STACK
)) {
12751 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12752 return -TARGET_EFAULT
;
12754 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12755 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12756 unlock_user_struct(target_rnew
, arg3
, 0);
12760 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12761 if (!is_error(ret
) && arg4
) {
12762 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12763 return -TARGET_EFAULT
;
12765 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12766 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12767 unlock_user_struct(target_rold
, arg4
, 1);
12772 #ifdef TARGET_NR_gethostname
12773 case TARGET_NR_gethostname
:
12775 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12777 ret
= get_errno(gethostname(name
, arg2
));
12778 unlock_user(name
, arg1
, arg2
);
12780 ret
= -TARGET_EFAULT
;
12785 #ifdef TARGET_NR_atomic_cmpxchg_32
12786 case TARGET_NR_atomic_cmpxchg_32
:
12788 /* should use start_exclusive from main.c */
12789 abi_ulong mem_value
;
12790 if (get_user_u32(mem_value
, arg6
)) {
12791 target_siginfo_t info
;
12792 info
.si_signo
= SIGSEGV
;
12794 info
.si_code
= TARGET_SEGV_MAPERR
;
12795 info
._sifields
._sigfault
._addr
= arg6
;
12796 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12797 QEMU_SI_FAULT
, &info
);
12801 if (mem_value
== arg2
)
12802 put_user_u32(arg1
, arg6
);
12806 #ifdef TARGET_NR_atomic_barrier
12807 case TARGET_NR_atomic_barrier
:
12808 /* Like the kernel implementation and the
12809 qemu arm barrier, no-op this? */
12813 #ifdef TARGET_NR_timer_create
12814 case TARGET_NR_timer_create
:
12816 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12818 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12821 int timer_index
= next_free_host_timer();
12823 if (timer_index
< 0) {
12824 ret
= -TARGET_EAGAIN
;
12826 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12829 phost_sevp
= &host_sevp
;
12830 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12836 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12840 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12841 return -TARGET_EFAULT
;
12849 #ifdef TARGET_NR_timer_settime
12850 case TARGET_NR_timer_settime
:
12852 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12853 * struct itimerspec * old_value */
12854 target_timer_t timerid
= get_timer_id(arg1
);
12858 } else if (arg3
== 0) {
12859 ret
= -TARGET_EINVAL
;
12861 timer_t htimer
= g_posix_timers
[timerid
];
12862 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12864 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12865 return -TARGET_EFAULT
;
12868 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12869 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12870 return -TARGET_EFAULT
;
12877 #ifdef TARGET_NR_timer_settime64
12878 case TARGET_NR_timer_settime64
:
12880 target_timer_t timerid
= get_timer_id(arg1
);
12884 } else if (arg3
== 0) {
12885 ret
= -TARGET_EINVAL
;
12887 timer_t htimer
= g_posix_timers
[timerid
];
12888 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12890 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12891 return -TARGET_EFAULT
;
12894 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12895 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12896 return -TARGET_EFAULT
;
12903 #ifdef TARGET_NR_timer_gettime
12904 case TARGET_NR_timer_gettime
:
12906 /* args: timer_t timerid, struct itimerspec *curr_value */
12907 target_timer_t timerid
= get_timer_id(arg1
);
12911 } else if (!arg2
) {
12912 ret
= -TARGET_EFAULT
;
12914 timer_t htimer
= g_posix_timers
[timerid
];
12915 struct itimerspec hspec
;
12916 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12918 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12919 ret
= -TARGET_EFAULT
;
12926 #ifdef TARGET_NR_timer_gettime64
12927 case TARGET_NR_timer_gettime64
:
12929 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12930 target_timer_t timerid
= get_timer_id(arg1
);
12934 } else if (!arg2
) {
12935 ret
= -TARGET_EFAULT
;
12937 timer_t htimer
= g_posix_timers
[timerid
];
12938 struct itimerspec hspec
;
12939 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12941 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
12942 ret
= -TARGET_EFAULT
;
12949 #ifdef TARGET_NR_timer_getoverrun
12950 case TARGET_NR_timer_getoverrun
:
12952 /* args: timer_t timerid */
12953 target_timer_t timerid
= get_timer_id(arg1
);
12958 timer_t htimer
= g_posix_timers
[timerid
];
12959 ret
= get_errno(timer_getoverrun(htimer
));
12965 #ifdef TARGET_NR_timer_delete
12966 case TARGET_NR_timer_delete
:
12968 /* args: timer_t timerid */
12969 target_timer_t timerid
= get_timer_id(arg1
);
12974 timer_t htimer
= g_posix_timers
[timerid
];
12975 ret
= get_errno(timer_delete(htimer
));
12976 g_posix_timers
[timerid
] = 0;
12982 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12983 case TARGET_NR_timerfd_create
:
12984 return get_errno(timerfd_create(arg1
,
12985 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12988 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12989 case TARGET_NR_timerfd_gettime
:
12991 struct itimerspec its_curr
;
12993 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12995 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12996 return -TARGET_EFAULT
;
13002 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13003 case TARGET_NR_timerfd_gettime64
:
13005 struct itimerspec its_curr
;
13007 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13009 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
13010 return -TARGET_EFAULT
;
13016 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13017 case TARGET_NR_timerfd_settime
:
13019 struct itimerspec its_new
, its_old
, *p_new
;
13022 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13023 return -TARGET_EFAULT
;
13030 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13032 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13033 return -TARGET_EFAULT
;
13039 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13040 case TARGET_NR_timerfd_settime64
:
13042 struct itimerspec its_new
, its_old
, *p_new
;
13045 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13046 return -TARGET_EFAULT
;
13053 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13055 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13056 return -TARGET_EFAULT
;
13062 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13063 case TARGET_NR_ioprio_get
:
13064 return get_errno(ioprio_get(arg1
, arg2
));
13067 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13068 case TARGET_NR_ioprio_set
:
13069 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13072 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13073 case TARGET_NR_setns
:
13074 return get_errno(setns(arg1
, arg2
));
13076 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13077 case TARGET_NR_unshare
:
13078 return get_errno(unshare(arg1
));
13080 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13081 case TARGET_NR_kcmp
:
13082 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13084 #ifdef TARGET_NR_swapcontext
13085 case TARGET_NR_swapcontext
:
13086 /* PowerPC specific. */
13087 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13089 #ifdef TARGET_NR_memfd_create
13090 case TARGET_NR_memfd_create
:
13091 p
= lock_user_string(arg1
);
13093 return -TARGET_EFAULT
;
13095 ret
= get_errno(memfd_create(p
, arg2
));
13096 fd_trans_unregister(ret
);
13097 unlock_user(p
, arg1
, 0);
13100 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13101 case TARGET_NR_membarrier
:
13102 return get_errno(membarrier(arg1
, arg2
));
13105 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13106 case TARGET_NR_copy_file_range
:
13108 loff_t inoff
, outoff
;
13109 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13112 if (get_user_u64(inoff
, arg2
)) {
13113 return -TARGET_EFAULT
;
13118 if (get_user_u64(outoff
, arg4
)) {
13119 return -TARGET_EFAULT
;
13123 /* Do not sign-extend the count parameter. */
13124 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13125 (abi_ulong
)arg5
, arg6
));
13126 if (!is_error(ret
) && ret
> 0) {
13128 if (put_user_u64(inoff
, arg2
)) {
13129 return -TARGET_EFAULT
;
13133 if (put_user_u64(outoff
, arg4
)) {
13134 return -TARGET_EFAULT
;
13142 #if defined(TARGET_NR_pivot_root)
13143 case TARGET_NR_pivot_root
:
13146 p
= lock_user_string(arg1
); /* new_root */
13147 p2
= lock_user_string(arg2
); /* put_old */
13149 ret
= -TARGET_EFAULT
;
13151 ret
= get_errno(pivot_root(p
, p2
));
13153 unlock_user(p2
, arg2
, 0);
13154 unlock_user(p
, arg1
, 0);
13160 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13161 return -TARGET_ENOSYS
;
13166 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
13167 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13168 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13171 CPUState
*cpu
= env_cpu(cpu_env
);
13174 #ifdef DEBUG_ERESTARTSYS
13175 /* Debug-only code for exercising the syscall-restart code paths
13176 * in the per-architecture cpu main loops: restart every syscall
13177 * the guest makes once before letting it through.
13183 return -QEMU_ERESTARTSYS
;
13188 record_syscall_start(cpu
, num
, arg1
,
13189 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13191 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13192 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13195 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13196 arg5
, arg6
, arg7
, arg8
);
13198 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13199 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13200 arg3
, arg4
, arg5
, arg6
);
13203 record_syscall_return(cpu
, num
, ret
);